├── .vscode
├── tasks.json
├── c_cpp_properties.json
├── launch.json
└── settings.json
├── Final
├── mutex_lock_prb.py
├── peterson.py
├── mutex_lock.py
├── semaphore_producer.py
├── semaphore_dining.py
├── deadlock_detection.py
├── peterson_alternative.py
├── fcfs.py
├── bankers.py
├── fcfs_concurent.py
├── priority_non.py
├── sjf.py
├── srt.py
├── priority_prim.py
└── rr.py
├── same_python_file
├── peterson_solution.py
├── mutex_lock.py
├── semaphore_producer_consumer.py
├── semaphore_dining_philosopher.py
├── deadlock_detection_alternative.py
├── bankers_algorithm_alternative_3.py
├── peterson_solution_alternative.py
├── deadlock_detection.py
├── first_come_first_serve.py
├── bankers_algorithm_alternative_2.py
├── bankers_algorithm_alternative_4.py
├── semaphore_producer_consumer_alternative.py
├── fcfs_with_concurent_process.py
├── bankers_algorithm.py
├── shortest_job_first_non_preemptive.py
├── priority_non_primptive.py
├── shortest_job_first_preemptive_SRT.py
├── priority_primptive.py
└── round_robin.py
├── semaphore_dining_philosopher.ipynb
├── first_come_first_serve.ipynb
├── README.md
├── fcfs_with_concurent_process.ipynb
├── deadlock_detection.ipynb
├── shortest_job_first_non_preemptive.ipynb
├── shortest_job_first_preemptive_SRT.ipynb
├── priority_non_primptive.ipynb
├── semaphore_producer_consumer.ipynb
├── priority_primptive.ipynb
├── peterson_solution.ipynb
├── round_robin.ipynb
└── mutex_lock.ipynb
/.vscode/tasks.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "2.0.0",
3 | "tasks": [
4 | {
5 | "type": "cppbuild",
6 | "label": "C/C++: g++.exe build active file",
7 | "command": "C:\\x86_64-8.1.0-release-posix-seh-rt_v6-rev0\\mingw64\\bin\\g++.exe",
8 | "args": [
9 | "-fdiagnostics-color=always",
10 | "-g",
11 | "${file}",
12 | "-o",
13 | "${fileDirname}\\${fileBasenameNoExtension}.exe"
14 | ],
15 | "options": {
16 | "cwd": "${fileDirname}"
17 | },
18 | "problemMatcher": [
19 | "$gcc"
20 | ],
21 | "group": "build",
22 | "detail": "compiler: C:\\x86_64-8.1.0-release-posix-seh-rt_v6-rev0\\mingw64\\bin\\g++.exe"
23 | }
24 | ]
25 | }
--------------------------------------------------------------------------------
/.vscode/c_cpp_properties.json:
--------------------------------------------------------------------------------
1 | {
2 | "configurations": [
3 | {
4 | "name": "Win32",
5 | "includePath": [
6 | "${workspaceFolder}/**",
7 | "C:/x86_64-8.1.0-release-posix-seh-rt_v6-rev0/mingw64/lib/gcc/x86_64-w64-mingw32/8.1.0/include/c++/x86_64-w64-mingw32"
8 | ],
9 | "defines": [
10 | "_DEBUG",
11 | "UNICODE",
12 | "_UNICODE"
13 | ],
14 | "compilerPath": "C:\\x86_64-8.1.0-release-posix-seh-rt_v6-rev0\\mingw64\\bin\\g++.exe",
15 | "cStandard": "gnu17",
16 | "cppStandard": "gnu++17",
17 | "intelliSenseMode": "gcc-x64"
18 | }
19 | ],
20 | "version": 4
21 | }
--------------------------------------------------------------------------------
/Final/mutex_lock_prb.py:
--------------------------------------------------------------------------------
1 | from threading import Thread
2 | from time import sleep
3 |
4 |
5 | counter = 0
6 |
7 | # define a function that increases the value of the counter variable by a number:
8 | def increase(by):
9 | global counter
10 |
11 | local_counter = counter
12 | local_counter += by
13 |
14 | sleep(0.1)
15 |
16 | counter = local_counter
17 | print(f'counter={counter}\n')
18 |
19 |
20 | # create two threads.
21 | # first thread increases the counter by 10
22 | # second thread increases the counter by 20:
23 | t1 = Thread(target=increase, args=(10,))
24 | t2 = Thread(target=increase, args=(20,))
25 |
26 | # start the threads
27 | t1.start()
28 | t2.start()
29 |
30 |
31 | # wait for the threads to complete
32 | t1.join()
33 | t2.join()
34 |
35 |
36 | print(f'The final counter is {counter}')
--------------------------------------------------------------------------------
/same_python_file/peterson_solution.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | cs = 0
5 | flag_0 = False
6 | flag_1 = False
7 | turn = 0
8 |
9 | def thread_0():
10 | global cs, flag_0, flag_1, turn
11 |
12 | flag_0 = True
13 | turn = 1
14 | while (flag_1 and turn == 1):
15 | continue
16 |
17 | for i in range(10):
18 | cs += 1
19 | print("Thread 0: cs =", cs)
20 | time.sleep(0.1)
21 |
22 | flag_0 = False
23 |
24 | def thread_1():
25 | global cs, flag_0, flag_1, turn
26 |
27 | flag_1 = True
28 | turn = 0
29 | while (flag_0 and turn == 0):
30 | continue
31 |
32 | for i in range(10):
33 | cs += 1000
34 | print("Thread 1: cs =", cs)
35 | time.sleep(0.1)
36 |
37 | flag_1 = False
38 |
39 | if __name__ == "__main__":
40 | t0 = threading.Thread(target=thread_0)
41 | t1 = threading.Thread(target=thread_1)
42 | t0.start()
43 | t1.start()
--------------------------------------------------------------------------------
/Final/peterson.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | # Shared variables
5 | turn = 0
6 | flag = [False, False]
7 |
8 | def process_0():
9 | global turn, flag
10 | flag[0] = True
11 | turn = 1
12 | while flag[1] and turn == 1:
13 | # Wait
14 | pass
15 | # Critical section
16 | print("Process 0 is in the critical section.")
17 | time.sleep(2) # Simulating some work inside the critical section
18 | # Exit section
19 | flag[0] = False
20 | print("Process 0 exited the critical section.\n")
21 |
22 | def process_1():
23 | global turn, flag
24 | flag[1] = True
25 | turn = 0
26 | while flag[0] and turn == 0:
27 | # Wait
28 | pass
29 | # Critical section
30 | print("Process 1 is in the critical section.")
31 | time.sleep(1) # Simulating some work inside the critical section
32 | # Exit section
33 | flag[1] = False
34 | print("Process 1 exited the critical section.\n")
35 |
36 | if __name__ == "__main__":
37 | thread_0 = threading.Thread(target=process_0)
38 | thread_1 = threading.Thread(target=process_1)
39 |
40 | thread_0.start()
41 | thread_1.start()
42 |
43 | thread_0.join()
44 | thread_1.join()
45 |
46 | print("Both processes have completed.")
47 |
--------------------------------------------------------------------------------
/Final/mutex_lock.py:
--------------------------------------------------------------------------------
1 | from threading import Thread, Lock
2 | from time import sleep
3 |
4 | # Initialize the global counter variable
5 | counter = 0
6 |
7 | # Define the thread function to increase the counter by a given value using a lock
8 | def increase(by, lock):
9 | global counter
10 |
11 | # Acquire the lock to ensure exclusive access to the shared counter
12 | lock.acquire()
13 |
14 | # Create a local copy of the counter to perform the update
15 | local_counter = counter
16 | local_counter += by
17 |
18 | # Simulate some time-consuming work using sleep
19 | sleep(0.1)
20 |
21 | # Update the global counter with the new value
22 | counter = local_counter
23 | print(f'counter={counter}')
24 |
25 | # Release the lock to allow other threads to access the shared counter
26 | lock.release()
27 |
28 | # Create a Lock object to synchronize access to the shared counter
29 | lock = Lock()
30 |
31 | # Create two threads, each incrementing the counter by a different value
32 | t1 = Thread(target=increase, args=(10, lock))
33 | t2 = Thread(target=increase, args=(20, lock))
34 |
35 | # Start the threads
36 | t1.start()
37 | t2.start()
38 |
39 | # Wait for the threads to complete their execution
40 | t1.join()
41 | t2.join()
42 |
43 | # Print the final value of the counter
44 | print(f'The final counter is {counter}')
45 |
--------------------------------------------------------------------------------
/same_python_file/mutex_lock.py:
--------------------------------------------------------------------------------
1 | from threading import Thread, Lock
2 | from time import sleep
3 |
4 | # Initialize the global counter variable
5 | counter = 0
6 |
7 | # Define the thread function to increase the counter by a given value using a lock
8 | def increase(by, lock):
9 | global counter
10 |
11 | # Acquire the lock to ensure exclusive access to the shared counter
12 | lock.acquire()
13 |
14 | # Create a local copy of the counter to perform the update
15 | local_counter = counter
16 | local_counter += by
17 |
18 | # Simulate some time-consuming work using sleep
19 | sleep(0.1)
20 |
21 | # Update the global counter with the new value
22 | counter = local_counter
23 | print(f'counter={counter}')
24 |
25 | # Release the lock to allow other threads to access the shared counter
26 | lock.release()
27 |
28 | # Create a Lock object to synchronize access to the shared counter
29 | lock = Lock()
30 |
31 | # Create two threads, each incrementing the counter by a different value
32 | t1 = Thread(target=increase, args=(10, lock))
33 | t2 = Thread(target=increase, args=(20, lock))
34 |
35 | # Start the threads
36 | t1.start()
37 | t2.start()
38 |
39 | # Wait for the threads to complete their execution
40 | t1.join()
41 | t2.join()
42 |
43 | # Print the final value of the counter
44 | print(f'The final counter is {counter}')
45 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 |
8 | {
9 | "name": "(gdb) Launch",
10 | "type": "cppdbg",
11 | "request": "launch",
12 | "program": "${fileDirname}\\${fileBasenameNoExtension}.exe",
13 | "args": [],
14 | "stopAtEntry": false,
15 | "cwd": "${fileDirname}",
16 | "environment": [],
17 | "externalConsole": false,
18 | "MIMode": "gdb",
19 | "miDebuggerPath": "C:\\x86_64-8.1.0-release-posix-seh-rt_v6-rev0\\mingw64\\bin\\gdb.exe",
20 | "setupCommands": [
21 | {
22 | "description": "Enable pretty-printing for gdb",
23 | "text": "-enable-pretty-printing",
24 | "ignoreFailures": true
25 | },
26 | {
27 | "description": "Set Disassembly Flavor to Intel",
28 | "text": "-gdb-set disassembly-flavor intel",
29 | "ignoreFailures": true
30 | }
31 | ],
32 | "preLaunchTask": "C/C++: g++.exe build active file"
33 | }
34 |
35 | ]
36 | }
--------------------------------------------------------------------------------
/Final/semaphore_producer.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | mutex = threading.Semaphore(1)
5 | full = threading.Semaphore(0)
6 | empty = threading.Semaphore(3)
7 | x = 0
8 |
9 | def producer():
10 | global mutex, full, empty, x
11 | empty.acquire()
12 | mutex.acquire()
13 | x += 1
14 | print(f"Producer produces item {x}\n")
15 | mutex.release()
16 | full.release()
17 |
18 | def consumer():
19 | global mutex, full, empty, x
20 | full.acquire()
21 | mutex.acquire()
22 | print(f"Consumer consumes item {x}\n")
23 | x -= 1
24 | mutex.release()
25 | empty.release()
26 |
27 | def main():
28 | while True:
29 | print("1. PRODUCER\n2. CONSUMER\n3. EXIT")
30 | n = int(input("ENTER YOUR CHOICE: \n"))
31 | if n == 1:
32 | if empty._value != 0:
33 | producer_thread = threading.Thread(target=producer)
34 | producer_thread.start()
35 | else:
36 | print("BUFFER IS FULL")
37 | elif n == 2:
38 | if full._value != 0:
39 | consumer_thread = threading.Thread(target=consumer)
40 | consumer_thread.start()
41 | else:
42 | print("BUFFER IS EMPTY")
43 | elif n == 3:
44 | break
45 | else:
46 | print("Invalid choice. Please try again.")
47 |
48 | if __name__ == "__main__":
49 | main()
50 |
--------------------------------------------------------------------------------
/same_python_file/semaphore_producer_consumer.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | mutex = threading.Semaphore(1)
5 | full = threading.Semaphore(0)
6 | empty = threading.Semaphore(3)
7 | x = 0
8 |
9 | def producer():
10 | global mutex, full, empty, x
11 | empty.acquire()
12 | mutex.acquire()
13 | x += 1
14 | print(f"Producer produces item {x}\n")
15 | mutex.release()
16 | full.release()
17 |
18 | def consumer():
19 | global mutex, full, empty, x
20 | full.acquire()
21 | mutex.acquire()
22 | print(f"Consumer consumes item {x}\n")
23 | x -= 1
24 | mutex.release()
25 | empty.release()
26 |
27 | def main():
28 | while True:
29 | print("1. PRODUCER\n2. CONSUMER\n3. EXIT")
30 | n = int(input("ENTER YOUR CHOICE: \n"))
31 | if n == 1:
32 | if empty._value != 0:
33 | producer_thread = threading.Thread(target=producer)
34 | producer_thread.start()
35 | else:
36 | print("BUFFER IS FULL")
37 | elif n == 2:
38 | if full._value != 0:
39 | consumer_thread = threading.Thread(target=consumer)
40 | consumer_thread.start()
41 | else:
42 | print("BUFFER IS EMPTY")
43 | elif n == 3:
44 | break
45 | else:
46 | print("Invalid choice. Please try again.")
47 |
48 | if __name__ == "__main__":
49 | main()
50 |
--------------------------------------------------------------------------------
/Final/semaphore_dining.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | import random
4 |
5 | NUM_PHILOSOPHERS = 5
6 | chopsticks = [threading.Semaphore(1) for _ in range(NUM_PHILOSOPHERS)]
7 |
8 | def philosopher(philosopher_id):
9 | # Get the left and right chopsticks for the philosopher
10 | left_chopstick = chopsticks[philosopher_id]
11 | right_chopstick = chopsticks[(philosopher_id + 1) % NUM_PHILOSOPHERS]
12 |
13 | while True:
14 | # Thinking
15 | print(f"Philosopher {philosopher_id} is thinking.")
16 | time.sleep(random.random())
17 |
18 | # Pick up left chopstick
19 | left_chopstick.acquire()
20 | print(f"Philosopher {philosopher_id} picked up the left chopstick.")
21 |
22 | # Try to pick up right chopstick without blocking
23 | if right_chopstick.acquire(blocking=False):
24 | # Pick up right chopstick and start eating
25 | print(f"Philosopher {philosopher_id} picked up the right chopstick.")
26 | print(f"Philosopher {philosopher_id} is eating.")
27 | time.sleep(random.random())
28 |
29 | # Release right chopstick after eating
30 | right_chopstick.release()
31 | print(f"Philosopher {philosopher_id} put down the right chopstick.")
32 |
33 | # Release left chopstick after eating or if right chopstick is not available
34 | left_chopstick.release()
35 | print(f"Philosopher {philosopher_id} put down the left chopstick.")
36 |
37 | if __name__ == "__main__":
38 | # Create philosopher threads and start them
39 | philosophers = [threading.Thread(target=philosopher, args=(i,)) for i in range(NUM_PHILOSOPHERS)]
40 | for p in philosophers:
41 | p.start()
42 |
43 | # Wait for all philosopher threads to complete
44 | for p in philosophers:
45 | p.join()
46 |
--------------------------------------------------------------------------------
/same_python_file/semaphore_dining_philosopher.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | import random
4 |
5 | NUM_PHILOSOPHERS = 5
6 | chopsticks = [threading.Semaphore(1) for _ in range(NUM_PHILOSOPHERS)]
7 |
8 | def philosopher(philosopher_id):
9 | # Get the left and right chopsticks for the philosopher
10 | left_chopstick = chopsticks[philosopher_id]
11 | right_chopstick = chopsticks[(philosopher_id + 1) % NUM_PHILOSOPHERS]
12 |
13 | while True:
14 | # Thinking
15 | print(f"Philosopher {philosopher_id} is thinking.")
16 | time.sleep(random.random())
17 |
18 | # Pick up left chopstick
19 | left_chopstick.acquire()
20 | print(f"Philosopher {philosopher_id} picked up the left chopstick.")
21 |
22 | # Try to pick up right chopstick without blocking
23 | if right_chopstick.acquire(blocking=False):
24 | # Pick up right chopstick and start eating
25 | print(f"Philosopher {philosopher_id} picked up the right chopstick.")
26 | print(f"Philosopher {philosopher_id} is eating.")
27 | time.sleep(random.random())
28 |
29 | # Release right chopstick after eating
30 | right_chopstick.release()
31 | print(f"Philosopher {philosopher_id} put down the right chopstick.")
32 |
33 | # Release left chopstick after eating or if right chopstick is not available
34 | left_chopstick.release()
35 | print(f"Philosopher {philosopher_id} put down the left chopstick.")
36 |
37 | if __name__ == "__main__":
38 | # Create philosopher threads and start them
39 | philosophers = [threading.Thread(target=philosopher, args=(i,)) for i in range(NUM_PHILOSOPHERS)]
40 | for p in philosophers:
41 | p.start()
42 |
43 | # Wait for all philosopher threads to complete
44 | for p in philosophers:
45 | p.join()
46 |
--------------------------------------------------------------------------------
/Final/deadlock_detection.py:
--------------------------------------------------------------------------------
1 | def input_data():
2 | global n, r, max_claim, allocation, avail
3 |
4 | n = int(input("Enter the number of processes: "))
5 | r = int(input("Enter the number of resources: "))
6 |
7 | print("Enter the Maximum Claim Matrix:")
8 | max_claim = []
9 | for i in range(n):
10 | row = list(map(int, input().split()))
11 | max_claim.append(row)
12 |
13 | print("Enter the Allocation Matrix:")
14 | allocation = []
15 | for i in range(n):
16 | row = list(map(int, input().split()))
17 | allocation.append(row)
18 |
19 | print("Enter the Available Resources:")
20 | avail = list(map(int, input().split()))
21 |
22 |
23 | def show_data():
24 | global n, r, max_claim, allocation, avail
25 |
26 | print("Process\tAllocation\tMax\t\tAvailable")
27 | for i in range(n):
28 | print(f"P{i + 1}\t\t{allocation[i]}\t\t{max_claim[i]}\t\t{avail}")
29 |
30 |
31 | def is_safe_state():
32 | global n, r, max_claim, allocation, avail
33 |
34 | work = avail.copy()
35 | finish = [False] * n
36 |
37 | while True:
38 | found = False
39 | for i in range(n):
40 | if not finish[i] and all(work[j] >= max_claim[i][j] - allocation[i][j] for j in range(r)):
41 | found = True
42 | finish[i] = True
43 | work = [work[j] + allocation[i][j] for j in range(r)]
44 | break
45 |
46 | if not found:
47 | break
48 |
49 | return all(finish)
50 |
51 |
52 | def deadlock_detection():
53 | if is_safe_state():
54 | print("The system is in a safe state. No deadlock detected.")
55 | else:
56 | print("Deadlock detected. The system is in an unsafe state.")
57 |
58 |
59 | if __name__ == "__main__":
60 | print("********** Deadlock Detection Algo ************")
61 | input_data()
62 | show_data()
63 | deadlock_detection()
64 |
--------------------------------------------------------------------------------
/same_python_file/deadlock_detection_alternative.py:
--------------------------------------------------------------------------------
1 | def input_data():
2 | global n, r, max_claim, allocation, avail
3 |
4 | n = int(input("Enter the number of processes: "))
5 | r = int(input("Enter the number of resources: "))
6 |
7 | print("Enter the Maximum Claim Matrix:")
8 | max_claim = []
9 | for i in range(n):
10 | row = list(map(int, input().split()))
11 | max_claim.append(row)
12 |
13 | print("Enter the Allocation Matrix:")
14 | allocation = []
15 | for i in range(n):
16 | row = list(map(int, input().split()))
17 | allocation.append(row)
18 |
19 | print("Enter the Available Resources:")
20 | avail = list(map(int, input().split()))
21 |
22 |
23 | def show_data():
24 | global n, r, max_claim, allocation, avail
25 |
26 | print("Process\tAllocation\tMax\t\tAvailable")
27 | for i in range(n):
28 | print(f"P{i + 1}\t\t{allocation[i]}\t\t{max_claim[i]}\t\t{avail}")
29 |
30 |
31 | def is_safe_state():
32 | global n, r, max_claim, allocation, avail
33 |
34 | work = avail.copy()
35 | finish = [False] * n
36 |
37 | while True:
38 | found = False
39 | for i in range(n):
40 | if not finish[i] and all(work[j] >= max_claim[i][j] - allocation[i][j] for j in range(r)):
41 | found = True
42 | finish[i] = True
43 | work = [work[j] + allocation[i][j] for j in range(r)]
44 | break
45 |
46 | if not found:
47 | break
48 |
49 | return all(finish)
50 |
51 |
52 | def deadlock_detection():
53 | if is_safe_state():
54 | print("The system is in a safe state. No deadlock detected.")
55 | else:
56 | print("Deadlock detected. The system is in an unsafe state.")
57 |
58 |
59 | if __name__ == "__main__":
60 | print("********** Deadlock Detection Algo ************")
61 | input_data()
62 | show_data()
63 | deadlock_detection()
64 |
--------------------------------------------------------------------------------
/same_python_file/bankers_algorithm_alternative_3.py:
--------------------------------------------------------------------------------
1 | def is_available(process_id, allocated, max_resources, need, available):
2 | # Check if all the available resources are greater than or equal to the process needs
3 | return all(need[process_id][i] <= available[i] for i in range(len(available)))
4 |
5 | def safe_sequence(processes, allocated, max_resources, need, available, safe=[]):
6 | # Check if a safe-sequence is found and display it, or recursively find more safe sequences
7 | if len(safe) == len(processes):
8 | # If a safe-sequence is found, display it
9 | print("Safe sequence:", "->".join("P" + str(p + 1) for p in safe))
10 | else:
11 | for i in range(len(processes)):
12 | if i not in safe and is_available(i, allocated, max_resources, need, available):
13 | # Mark the process and update available resources
14 | marked = safe + [i]
15 | new_available = [available[j] + allocated[i][j] for j in range(len(available))]
16 |
17 | # Recursively find safe sequences with the updated state
18 | safe_sequence(processes, allocated, max_resources, need, new_available, marked)
19 |
20 | # Driver code
21 | if __name__ == "__main__":
22 | # Define the processes, allocated resources, maximum resources, and available resources
23 | processes = [0, 1, 2, 3]
24 | allocated = [[0, 1, 0], [2, 0, 0], [3, 0, 2], [2, 1, 1]]
25 | max_resources = [[7, 5, 3], [3, 2, 2], [9, 0, 2], [2, 2, 2]]
26 | resources = [10, 5, 7]
27 |
28 | # Calculate the need matrix and available resources
29 | need = [[max_resources[i][j] - allocated[i][j] for j in range(len(resources))] for i in range(len(processes))]
30 | available = [resources[j] - sum(allocated[i][j] for i in range(len(processes))) for j in range(len(resources))]
31 |
32 | # Find and print all safe sequences
33 | print("Safe sequences are:")
34 | safe_sequence(processes, allocated, max_resources, need, available)
35 |
36 | # Calculate the total number of possible safe sequences
37 | total_safe_sequences = 2 ** len(processes) - 1
38 | print("Total safe sequences:", total_safe_sequences)
39 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "files.associations": {
3 | "array": "cpp",
4 | "atomic": "cpp",
5 | "*.tcc": "cpp",
6 | "bitset": "cpp",
7 | "cctype": "cpp",
8 | "cfenv": "cpp",
9 | "charconv": "cpp",
10 | "chrono": "cpp",
11 | "cinttypes": "cpp",
12 | "clocale": "cpp",
13 | "cmath": "cpp",
14 | "codecvt": "cpp",
15 | "complex": "cpp",
16 | "condition_variable": "cpp",
17 | "csetjmp": "cpp",
18 | "csignal": "cpp",
19 | "cstdarg": "cpp",
20 | "cstddef": "cpp",
21 | "cstdint": "cpp",
22 | "cstdio": "cpp",
23 | "cstdlib": "cpp",
24 | "cstring": "cpp",
25 | "ctime": "cpp",
26 | "cuchar": "cpp",
27 | "cwchar": "cpp",
28 | "cwctype": "cpp",
29 | "deque": "cpp",
30 | "forward_list": "cpp",
31 | "list": "cpp",
32 | "unordered_map": "cpp",
33 | "unordered_set": "cpp",
34 | "vector": "cpp",
35 | "exception": "cpp",
36 | "algorithm": "cpp",
37 | "functional": "cpp",
38 | "iterator": "cpp",
39 | "map": "cpp",
40 | "memory": "cpp",
41 | "memory_resource": "cpp",
42 | "numeric": "cpp",
43 | "optional": "cpp",
44 | "random": "cpp",
45 | "ratio": "cpp",
46 | "regex": "cpp",
47 | "set": "cpp",
48 | "string": "cpp",
49 | "string_view": "cpp",
50 | "system_error": "cpp",
51 | "tuple": "cpp",
52 | "type_traits": "cpp",
53 | "utility": "cpp",
54 | "fstream": "cpp",
55 | "future": "cpp",
56 | "initializer_list": "cpp",
57 | "iomanip": "cpp",
58 | "iosfwd": "cpp",
59 | "iostream": "cpp",
60 | "istream": "cpp",
61 | "limits": "cpp",
62 | "mutex": "cpp",
63 | "new": "cpp",
64 | "ostream": "cpp",
65 | "scoped_allocator": "cpp",
66 | "shared_mutex": "cpp",
67 | "sstream": "cpp",
68 | "stdexcept": "cpp",
69 | "streambuf": "cpp",
70 | "thread": "cpp",
71 | "typeindex": "cpp",
72 | "typeinfo": "cpp",
73 | "valarray": "cpp"
74 | }
75 | }
--------------------------------------------------------------------------------
/Final/peterson_alternative.py:
--------------------------------------------------------------------------------
1 | import random
2 | import time
3 | import threading
4 | import logging
5 | import queue
6 |
7 | BSIZE = 8 # Buffer size
8 | PWT = 2 # Producer wait time limit
9 | CWT = 10 # Consumer wait time limit
10 | RT = 10 # Program run-time in seconds
11 |
12 | def myrand(n):
13 | return random.randint(1, n)
14 |
15 | def producer(queue, state):
16 | index = 0
17 | while state:
18 | time.sleep(1)
19 | logging.info("\nProducer is ready now.")
20 | with queue.lock:
21 | if not queue.full():
22 | tempo = myrand(BSIZE * 3)
23 | logging.info(f"Job {tempo} has been produced")
24 | queue.put(tempo)
25 | logging.info(f"Buffer: {list(queue.queue)}")
26 | else:
27 | logging.info("Buffer is full, nothing can be produced!!!")
28 | wait_time = myrand(PWT)
29 | logging.info(f"Producer will wait for {wait_time} seconds")
30 | time.sleep(wait_time)
31 |
32 | def consumer(queue, state):
33 | time.sleep(5)
34 | while state:
35 | time.sleep(1)
36 | logging.info("\nConsumer is ready now.")
37 | with queue.lock:
38 | if not queue.empty():
39 | job = queue.get()
40 | logging.info(f"Job {job} has been consumed")
41 | logging.info(f"Buffer: {list(queue.queue)}")
42 | else:
43 | logging.info("Buffer is empty, nothing can be consumed!!!")
44 | wait_time = myrand(CWT)
45 | logging.info(f"Consumer will sleep for {wait_time} seconds")
46 | time.sleep(wait_time)
47 |
48 | if __name__ == "__main__":
49 | logging.basicConfig(level=logging.INFO, format='%(message)s')
50 |
51 | shared_queue = queue.Queue(BSIZE)
52 | shared_queue.lock = threading.Lock()
53 | state = True
54 |
55 | producer_thread = threading.Thread(target=producer, args=(shared_queue, state))
56 | consumer_thread = threading.Thread(target=consumer, args=(shared_queue, state))
57 |
58 | producer_thread.start()
59 | consumer_thread.start()
60 |
61 | time.sleep(RT)
62 | state = False
63 |
64 | producer_thread.join()
65 | consumer_thread.join()
66 |
67 | logging.info("\nThe clock ran out.")
68 |
--------------------------------------------------------------------------------
/same_python_file/peterson_solution_alternative.py:
--------------------------------------------------------------------------------
1 | import random
2 | import time
3 | import threading
4 | import logging
5 | import queue
6 |
7 | BSIZE = 8 # Buffer size
8 | PWT = 2 # Producer wait time limit
9 | CWT = 10 # Consumer wait time limit
10 | RT = 10 # Program run-time in seconds
11 |
12 | def myrand(n):
13 | return random.randint(1, n)
14 |
15 | def producer(queue, state):
16 | index = 0
17 | while state:
18 | time.sleep(1)
19 | logging.info("\nProducer is ready now.")
20 | with queue.lock:
21 | if not queue.full():
22 | tempo = myrand(BSIZE * 3)
23 | logging.info(f"Job {tempo} has been produced")
24 | queue.put(tempo)
25 | logging.info(f"Buffer: {list(queue.queue)}")
26 | else:
27 | logging.info("Buffer is full, nothing can be produced!!!")
28 | wait_time = myrand(PWT)
29 | logging.info(f"Producer will wait for {wait_time} seconds")
30 | time.sleep(wait_time)
31 |
32 | def consumer(queue, state):
33 | time.sleep(5)
34 | while state:
35 | time.sleep(1)
36 | logging.info("\nConsumer is ready now.")
37 | with queue.lock:
38 | if not queue.empty():
39 | job = queue.get()
40 | logging.info(f"Job {job} has been consumed")
41 | logging.info(f"Buffer: {list(queue.queue)}")
42 | else:
43 | logging.info("Buffer is empty, nothing can be consumed!!!")
44 | wait_time = myrand(CWT)
45 | logging.info(f"Consumer will sleep for {wait_time} seconds")
46 | time.sleep(wait_time)
47 |
48 | if __name__ == "__main__":
49 | logging.basicConfig(level=logging.INFO, format='%(message)s')
50 |
51 | shared_queue = queue.Queue(BSIZE)
52 | shared_queue.lock = threading.Lock()
53 | state = True
54 |
55 | producer_thread = threading.Thread(target=producer, args=(shared_queue, state))
56 | consumer_thread = threading.Thread(target=consumer, args=(shared_queue, state))
57 |
58 | producer_thread.start()
59 | consumer_thread.start()
60 |
61 | time.sleep(RT)
62 | state = False
63 |
64 | producer_thread.join()
65 | consumer_thread.join()
66 |
67 | logging.info("\nThe clock ran out.")
68 |
--------------------------------------------------------------------------------
/same_python_file/deadlock_detection.py:
--------------------------------------------------------------------------------
1 | def is_safe_state(available, max_claim, allocation):
2 | # Get the number of resources and processes
3 | num_resources = len(available)
4 | num_processes = len(max_claim)
5 |
6 | # Create a copy of available resources (work) and initialize finish array
7 | work = available.copy()
8 | finish = [False] * num_processes
9 |
10 | while True:
11 | found = False
12 | # Iterate through each process
13 | for i in range(num_processes):
14 | # Check if the process is not finished and if it can be executed
15 | if not finish[i] and all(work[j] >= max_claim[i][j] - allocation[i][j] for j in range(num_resources)):
16 | found = True
17 | # Mark the process as finished
18 | finish[i] = True
19 | # Add the allocated resources to the work vector
20 | work = [work[j] + allocation[i][j] for j in range(num_resources)]
21 | break
22 |
23 | # If no process can be executed, break out of the loop
24 | if not found:
25 | break
26 |
27 | # Check if all processes have finished (i.e., system is in a safe state)
28 | return all(finish)
29 |
30 | def deadlock_detection(available, max_claim, allocation):
31 | # Check if the current state is safe or in a deadlock
32 | if is_safe_state(available, max_claim, allocation):
33 | print("The system is in a safe state. No deadlock detected.")
34 | else:
35 | print("Deadlock detected. The system is in an unsafe state.")
36 |
37 | if __name__ == "__main__":
38 | # Define the available resources, maximum claim of each process, and current allocation
39 | available_resources = [3, 3, 2]
40 | max_claim_per_process = [
41 | [7, 5, 3],
42 | [3, 2, 2],
43 | [9, 0, 2],
44 | [2, 2, 2],
45 | [4, 3, 3]
46 | ]
47 | current_allocation = [
48 | [0, 1, 0],
49 | [2, 0, 0],
50 | [3, 0, 2],
51 | [2, 1, 1],
52 | [0, 0, 2]
53 | ]
54 |
55 | # Perform deadlock detection
56 | deadlock_detection(available_resources, max_claim_per_process, current_allocation)
57 |
58 | # case 2
59 | # Define the deadlock scenario
60 | available_resources = [2, 2, 2]
61 | max_claim_per_process = [
62 | [3, 2, 2],
63 | [1, 2, 4],
64 | [4, 4, 3],
65 | [2, 3, 2]
66 | ]
67 | current_allocation = [
68 | [0, 1, 0],
69 | [0, 0, 1],
70 | [2, 1, 1],
71 | [1, 0, 0]
72 | ]
73 |
74 | # Perform deadlock detection
75 | deadlock_detection(available_resources, max_claim_per_process, current_allocation)
76 |
--------------------------------------------------------------------------------
/Final/fcfs.py:
--------------------------------------------------------------------------------
1 | process_working_sequence = []
2 | def fcfs_scheduling(processes):
3 | processes.sort(key=lambda p: p[1])
4 |
5 | current_time = 0
6 |
7 | waiting_time_total = 0
8 | turnaround_time_total = 0
9 | system_idle_time = 0
10 | timeline = []
11 |
12 |
13 | print("Process ID\tArrival Time\tBurst Time\tWaiting Time\tTurnaround Time")
14 |
15 | for process in processes:
16 | pid, arrival_time, burst_time = process
17 | process_working_sequence.append(pid)
18 |
19 | # Calculate system idle time
20 | if current_time < arrival_time:
21 | system_idle_time += arrival_time - current_time
22 | timeline.append(current_time)
23 | current_time = arrival_time
24 | timeline.append(current_time)
25 |
26 | waiting_time = max(0, current_time - arrival_time)
27 | turnaround_time = waiting_time + burst_time
28 | waiting_time_total += waiting_time
29 | turnaround_time_total += turnaround_time
30 |
31 | print(f"{pid}\t\t{arrival_time}\t\t{burst_time}\t\t{waiting_time}\t\t{turnaround_time}")
32 |
33 | # Add the print statements for the process execution
34 | # print(f"Process {pid} started execution at time {current_time}")
35 |
36 | # Update the current time to the completion time of the current process
37 | current_time += burst_time
38 | # print(f"Process {pid} completed execution at time {current_time}")
39 |
40 | timeline.append(current_time)
41 | avg_waiting_time = waiting_time_total / len(processes)
42 | avg_turnaround_time = turnaround_time_total / len(processes)
43 |
44 | # Print the average waiting time and average turnaround time
45 | print(f"\nAverage Waiting Time: {avg_waiting_time:.2f}")
46 | print(f"Average Turnaround Time: {avg_turnaround_time:.2f}")
47 | print("Syetem utilization:", (1 - (system_idle_time / current_time ) )* 100, "%")
48 | print("Sequence of Processes:", process_working_sequence)
49 |
50 | print()
51 | for process in process_working_sequence:
52 | print("| " , process, " " , end="")
53 | print("|")
54 | for i in timeline:
55 | print(i , end=" ")
56 |
57 | n = int(input("Enter the number of processes: "))
58 | processes = []
59 |
60 | print("\nEnter Process Arrival Time and Burst Time")
61 | for i in range(n):
62 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
63 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
64 | processes.append((i+1, arrival_time, burst_time))
65 |
66 | fcfs_scheduling(processes)
67 |
68 |
69 |
70 |
71 |
72 |
73 | '''
74 | 3
75 | 0
76 | 8
77 | .4
78 | 4
79 | 1
80 | 1
81 |
82 |
83 | '''
--------------------------------------------------------------------------------
/Final/bankers.py:
--------------------------------------------------------------------------------
1 | def input_data():
2 | global n, r, max_resources, allocated_resources, available_resources
3 |
4 | print("********** Banker's Algorithm ************")
5 | n = int(input("Enter the number of Processes: "))
6 | r = int(input("Enter the number of resources instances: "))
7 |
8 | print("Enter the Max Matrix:")
9 | max_resources = []
10 | for i in range(n):
11 | max_resources.append(list(map(int, input().split())))
12 |
13 | print("Enter the Allocation Matrix:")
14 | allocated_resources = []
15 | for i in range(n):
16 | allocated_resources.append(list(map(int, input().split())))
17 |
18 | print("Enter the available Resources:")
19 | available_resources = list(map(int, input().split()))
20 |
21 |
22 | def show_data():
23 | global n, r, max_resources, allocated_resources, available_resources
24 |
25 | print("\nProcess\tAllocation\tMax\tAvailable")
26 | for i in range(n):
27 | print(f"\nP{i+1}\t {allocated_resources[i]} \t{max_resources[i]}", end=" ")
28 | if i == 0:
29 | print("\t", *available_resources, end=" ")
30 |
31 |
32 | def calculate_need():
33 | global n, r, max_resources, allocated_resources, need
34 |
35 | need = [[0] * r for _ in range(n)]
36 |
37 | for i in range(n):
38 | for j in range(r):
39 | need[i][j] = max_resources[i][j] - allocated_resources[i][j]
40 |
41 |
42 | def is_available(process_id):
43 | global need, available_resources
44 |
45 | for j in range(r):
46 | if need[process_id][j] > available_resources[j]:
47 | return False
48 |
49 | return True
50 |
51 |
52 | def banker_algorithm():
53 | global n, r, max_resources, allocated_resources, available_resources, need
54 |
55 | finish = [0] * n
56 | safe_sequence = []
57 | flag = True
58 |
59 | while flag:
60 | flag = False
61 | for i in range(n):
62 | if not finish[i] and is_available(i):
63 | for j in range(r):
64 | available_resources[j] += allocated_resources[i][j]
65 | finish[i] = 1
66 | flag = True
67 | safe_sequence.append(i)
68 |
69 | if len(safe_sequence) == n:
70 | print("\nSafe sequence:", end=" ")
71 | for i in safe_sequence:
72 | print(f"P{i+1}", end="")
73 | if i != safe_sequence[-1]:
74 | print(" ->", end="")
75 | print("\n\nThe system is in a safe state.")
76 | else:
77 | print("\nProcess P", end="")
78 | for i in range(n):
79 | if not finish[i]:
80 | print(i + 1, end="")
81 | if i != n - 1:
82 | print(" ->", end="")
83 | print(" is in deadlock.")
84 | print("\n\nThe system is in an unsafe state.")
85 |
86 |
87 | if __name__ == "__main__":
88 | input_data()
89 | show_data()
90 | calculate_need()
91 | banker_algorithm()
92 |
--------------------------------------------------------------------------------
/same_python_file/first_come_first_serve.py:
--------------------------------------------------------------------------------
1 | def fcfs_scheduling(processes):
2 | # Sort the processes based on their arrival time (ascending order)
3 | # This ensures that processes with earlier arrival times are scheduled first.
4 | processes.sort(key=lambda p: p[1])
5 |
6 | # Initialize the starting time of the first process as its arrival time
7 | current_time = processes[0][1]
8 |
9 | # Initialize variables to keep track of total waiting time and turnaround time
10 | waiting_time_total = 0
11 | turnaround_time_total = 0
12 |
13 | # Print the table header for the process details
14 | print("Process ID\tArrival Time\tBurst Time\tWaiting Time\tTurnaround Time")
15 |
16 | # Iterate through each process in the sorted order
17 | for process in processes:
18 | # Unpack the process details (process ID, arrival time, and burst time)
19 | pid, arrival_time, burst_time = process
20 |
21 | # Calculate the waiting time for the current process, ensuring it is non-negative
22 | waiting_time = max(0, current_time - arrival_time)
23 |
24 | # Calculate the turnaround time for the current process
25 | turnaround_time = waiting_time + burst_time
26 |
27 | # Update the total waiting time and turnaround time
28 | waiting_time_total += waiting_time
29 | turnaround_time_total += turnaround_time
30 |
31 | # Print the details for the current process
32 | print(f"{pid}\t\t{arrival_time}\t\t{burst_time}\t\t{waiting_time}\t\t{turnaround_time}")
33 |
34 | # Add the print statements for the process execution
35 | # print(f"Process {pid} started execution at time {current_time}")
36 | # Update the current time to the completion time of the current process
37 | current_time += burst_time
38 | # print(f"Process {pid} completed execution at time {current_time}")
39 |
40 |
41 |
42 | # Calculate the average waiting time and turnaround time for all processes
43 | n = len(processes)
44 | avg_waiting_time = waiting_time_total / n
45 | avg_turnaround_time = turnaround_time_total / n
46 |
47 | # Print the average waiting time and average turnaround time
48 | print(f"\nAverage Waiting Time: {avg_waiting_time:.2f}")
49 | print(f"Average Turnaround Time: {avg_turnaround_time:.2f}")
50 |
51 | if __name__ == "__main__":
52 | # Example usage with user input for the number of processes
53 | n = int(input("Enter the number of processes: "))
54 | processes = []
55 |
56 | # Prompt the user to enter arrival time and burst time for each process
57 | print("\nEnter Process Arrival Time and Burst Time")
58 | for i in range(n):
59 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
60 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
61 | processes.append((i+1, arrival_time, burst_time))
62 |
63 | # Call the FCFS scheduling function with the list of processes
64 | fcfs_scheduling(processes)
65 |
--------------------------------------------------------------------------------
/semaphore_dining_philosopher.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import threading\n",
10 | "import time\n",
11 | "import random\n",
12 | "\n",
13 | "NUM_PHILOSOPHERS = 5\n",
14 | "chopsticks = [threading.Semaphore(1) for _ in range(NUM_PHILOSOPHERS)]\n",
15 | "\n",
16 | "def philosopher(philosopher_id):\n",
17 | " # Get the left and right chopsticks for the philosopher\n",
18 | " left_chopstick = chopsticks[philosopher_id]\n",
19 | " right_chopstick = chopsticks[(philosopher_id + 1) % NUM_PHILOSOPHERS]\n",
20 | "\n",
21 | " while True:\n",
22 | " # Thinking\n",
23 | " print(f\"Philosopher {philosopher_id} is thinking.\")\n",
24 | " time.sleep(random.random())\n",
25 | "\n",
26 | " # Pick up left chopstick\n",
27 | " left_chopstick.acquire()\n",
28 | " print(f\"Philosopher {philosopher_id} picked up the left chopstick.\")\n",
29 | "\n",
30 | " # Try to pick up right chopstick without blocking\n",
31 | " if right_chopstick.acquire(blocking=False):\n",
32 | " # Pick up right chopstick and start eating\n",
33 | " print(f\"Philosopher {philosopher_id} picked up the right chopstick.\")\n",
34 | " print(f\"Philosopher {philosopher_id} is eating.\")\n",
35 | " time.sleep(random.random())\n",
36 | "\n",
37 | " # Release right chopstick after eating\n",
38 | " right_chopstick.release()\n",
39 | " print(f\"Philosopher {philosopher_id} put down the right chopstick.\")\n",
40 | " \n",
41 | " # Release left chopstick after eating or if right chopstick is not available\n",
42 | " left_chopstick.release()\n",
43 | " print(f\"Philosopher {philosopher_id} put down the left chopstick.\")\n",
44 | "\n",
45 | "if __name__ == \"__main__\":\n",
46 | " # Create philosopher threads and start them\n",
47 | " philosophers = [threading.Thread(target=philosopher, args=(i,)) for i in range(NUM_PHILOSOPHERS)]\n",
48 | " for p in philosophers:\n",
49 | " p.start()\n",
50 | "\n",
51 | " # Wait for all philosopher threads to complete\n",
52 | " for p in philosophers:\n",
53 | " p.join()\n"
54 | ]
55 | }
56 | ],
57 | "metadata": {
58 | "kernelspec": {
59 | "display_name": "Python 3",
60 | "language": "python",
61 | "name": "python3"
62 | },
63 | "language_info": {
64 | "codemirror_mode": {
65 | "name": "ipython",
66 | "version": 3
67 | },
68 | "file_extension": ".py",
69 | "mimetype": "text/x-python",
70 | "name": "python",
71 | "nbconvert_exporter": "python",
72 | "pygments_lexer": "ipython3",
73 | "version": "3.11.0"
74 | },
75 | "orig_nbformat": 4
76 | },
77 | "nbformat": 4,
78 | "nbformat_minor": 2
79 | }
80 |
--------------------------------------------------------------------------------
/same_python_file/bankers_algorithm_alternative_2.py:
--------------------------------------------------------------------------------
1 | def calculate_need(need, max_resources, allocated_resources):
2 | # Calculate the need matrix for each process
3 | for i in range(len(max_resources)):
4 | for j in range(len(max_resources[0])):
5 | # Need = Maximum resources required - Allocated resources
6 | need[i][j] = max_resources[i][j] - allocated_resources[i][j]
7 |
8 | def is_safe_state(processes, available_resources, max_resources, allocated_resources):
9 | # Initialize the need matrix with zeros
10 | need = [[0 for _ in range(len(available_resources))] for _ in range(len(processes))]
11 |
12 | # Calculate the need matrix for each process
13 | calculate_need(need, max_resources, allocated_resources)
14 |
15 | # Create a copy of available resources to keep track of available resources during simulation
16 | work = available_resources[:]
17 |
18 | # Create a list to keep track of finished processes, initially set to 0 (not finished)
19 | finish = [0] * len(processes)
20 |
21 | # Create an empty list to store the safe sequence of processes
22 | safe_sequence = []
23 |
24 | # Loop until all processes are finished or deadlock is detected
25 | while True:
26 | found = False
27 | for p in range(len(processes)):
28 | # Check if the process is not finished and if its needs can be satisfied with available resources
29 | if not finish[p] and all(need[p][j] <= work[j] for j in range(len(available_resources))):
30 | # Update the available resources by adding the allocated resources of the process
31 | work = [work[j] + allocated_resources[p][j] for j in range(len(available_resources))]
32 | # Mark the process as finished
33 | finish[p] = 1
34 | # Add the process to the safe sequence
35 | safe_sequence.append(p)
36 | found = True
37 |
38 | # If no process can finish in the current iteration, exit the loop
39 | if not found:
40 | break
41 |
42 | # If all processes are finished, the system is in a safe state
43 | if all(finish):
44 | print("System is in safe state.")
45 | print("Safe sequence is:", safe_sequence)
46 | return True
47 | else:
48 | print("System is not in safe state.")
49 | return False
50 |
51 | # Driver code
52 | if __name__ == "__main__":
53 | processes = [0, 1, 2, 3, 4]
54 | available_resources = [3, 3, 2]
55 |
56 | max_resources = [
57 | [7, 5, 3],
58 | [3, 2, 2],
59 | [9, 0, 2],
60 | [2, 2, 2],
61 | [4, 3, 3]
62 | ]
63 |
64 | allocated_resources = [
65 | [0, 1, 0],
66 | [2, 0, 0],
67 | [3, 0, 2],
68 | [2, 1, 1],
69 | [0, 0, 2]
70 | ]
71 |
72 | # Check if the system is in a safe state using Banker's Algorithm
73 | is_safe_state(processes, available_resources, max_resources, allocated_resources)
74 |
--------------------------------------------------------------------------------
/same_python_file/bankers_algorithm_alternative_4.py:
--------------------------------------------------------------------------------
1 | def input_data():
2 | global n, r, max_resources, allocated_resources, available_resources
3 |
4 | print("********** Banker's Algorithm ************")
5 | n = int(input("Enter the number of Processes: "))
6 | r = int(input("Enter the number of resources instances: "))
7 |
8 | print("Enter the Max Matrix:")
9 | max_resources = []
10 | for i in range(n):
11 | max_resources.append(list(map(int, input().split())))
12 |
13 | print("Enter the Allocation Matrix:")
14 | allocated_resources = []
15 | for i in range(n):
16 | allocated_resources.append(list(map(int, input().split())))
17 |
18 | print("Enter the available Resources:")
19 | available_resources = list(map(int, input().split()))
20 |
21 |
22 | def show_data():
23 | global n, r, max_resources, allocated_resources, available_resources
24 |
25 | print("\nProcess\tAllocation\tMax\tAvailable")
26 | for i in range(n):
27 | print(f"\nP{i+1}\t {allocated_resources[i]} \t{max_resources[i]}", end=" ")
28 | if i == 0:
29 | print("\t", *available_resources, end=" ")
30 |
31 |
32 | def calculate_need():
33 | global n, r, max_resources, allocated_resources, need
34 |
35 | need = [[0] * r for _ in range(n)]
36 |
37 | for i in range(n):
38 | for j in range(r):
39 | need[i][j] = max_resources[i][j] - allocated_resources[i][j]
40 |
41 |
42 | def is_available(process_id):
43 | global need, available_resources
44 |
45 | for j in range(r):
46 | if need[process_id][j] > available_resources[j]:
47 | return False
48 |
49 | return True
50 |
51 |
52 | def banker_algorithm():
53 | global n, r, max_resources, allocated_resources, available_resources, need
54 |
55 | finish = [0] * n
56 | safe_sequence = []
57 | flag = True
58 |
59 | while flag:
60 | flag = False
61 | for i in range(n):
62 | if not finish[i] and is_available(i):
63 | for j in range(r):
64 | available_resources[j] += allocated_resources[i][j]
65 | finish[i] = 1
66 | flag = True
67 | safe_sequence.append(i)
68 |
69 | if len(safe_sequence) == n:
70 | print("\nSafe sequence:", end=" ")
71 | for i in safe_sequence:
72 | print(f"P{i+1}", end="")
73 | if i != safe_sequence[-1]:
74 | print(" ->", end="")
75 | print("\n\nThe system is in a safe state.")
76 | else:
77 | print("\nProcess P", end="")
78 | for i in range(n):
79 | if not finish[i]:
80 | print(i + 1, end="")
81 | if i != n - 1:
82 | print(" ->", end="")
83 | print(" is in deadlock.")
84 | print("\n\nThe system is in an unsafe state.")
85 |
86 |
87 | if __name__ == "__main__":
88 | input_data()
89 | show_data()
90 | calculate_need()
91 | banker_algorithm()
92 |
--------------------------------------------------------------------------------
/same_python_file/semaphore_producer_consumer_alternative.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | import random
4 |
5 | # Buffer size
6 | BUFFER_SIZE = 5
7 |
8 | # Semaphore to control access to the buffer
9 | mutex = threading.Semaphore(1)
10 |
11 | # Semaphore to count the empty slots in the buffer
12 | empty = threading.Semaphore(BUFFER_SIZE)
13 |
14 | # Semaphore to count the number of items in the buffer
15 | full = threading.Semaphore(0)
16 |
17 | # Buffer to store items
18 | buffer = []
19 |
20 | # The producer function, responsible for producing items
21 | def producer():
22 | for _ in range(10):
23 | # Generate a random item to be produced
24 | item = random.randint(1, 100)
25 |
26 | # Acquire an empty slot in the buffer
27 | empty.acquire()
28 |
29 | # Acquire the mutex to ensure mutual exclusion when accessing the buffer
30 | mutex.acquire()
31 |
32 | # Add the item to the buffer
33 | buffer.append(item)
34 |
35 | # Print a message indicating the item is produced and the current buffer contents
36 | print(f"Producer: Produced item {item}. Buffer: {buffer}")
37 |
38 | # Release the mutex to allow other threads to access the buffer
39 | mutex.release()
40 |
41 | # Release a full slot in the buffer to signal that an item is available
42 | full.release()
43 |
44 | # Introduce a random delay to simulate variable production time
45 | time.sleep(random.uniform(0.1, 0.5))
46 |
47 | # The consumer function, responsible for consuming items
48 | def consumer():
49 | for _ in range(10):
50 | # Acquire a full slot in the buffer to check if an item is available for consumption
51 | full.acquire()
52 |
53 | # Acquire the mutex to ensure mutual exclusion when accessing the buffer
54 | mutex.acquire()
55 |
56 | # Remove the first item from the buffer
57 | item = buffer.pop(0)
58 |
59 | # Print a message indicating the item is consumed and the current buffer contents
60 | print(f"Consumer: Consumed item {item}. Buffer: {buffer}")
61 |
62 | # Release the mutex to allow other threads to access the buffer
63 | mutex.release()
64 |
65 | # Release an empty slot in the buffer to signal that a slot is available for production
66 | empty.release()
67 |
68 | # Introduce a random delay to simulate variable consumption time
69 | time.sleep(random.uniform(0.1, 0.5))
70 |
71 | if __name__ == "__main__":
72 | # Create producer and consumer threads
73 | producer_thread = threading.Thread(target=producer)
74 | consumer_thread = threading.Thread(target=consumer)
75 |
76 | # Start the threads
77 | producer_thread.start()
78 | consumer_thread.start()
79 |
80 | # Wait for the threads to complete
81 | producer_thread.join()
82 | consumer_thread.join()
83 |
84 | # Print a message indicating that the simulation is completed
85 | print("Producer-Consumer simulation completed.")
86 |
--------------------------------------------------------------------------------
/Final/fcfs_concurent.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import time
3 |
4 | def process_function(process_id, arrival_time, burst_time):
5 | print(f"Process {process_id} started execution at time {time.time()}")
6 | time.sleep(burst_time)
7 | print(f"Process {process_id} completed execution at time {time.time()}")
8 |
9 |
10 | def fcfs_scheduling(processes):
11 | # Sort the processes based on their arrival time (ascending order)
12 | # This ensures that processes with earlier arrival times are scheduled first.
13 | processes.sort(key=lambda p: p[1])
14 |
15 | # Initialize the starting time of the first process as its arrival time
16 | current_time = processes[0][1]
17 |
18 | # Initialize variables to keep track of total waiting time and turnaround time
19 | waiting_time_total = 0
20 | turnaround_time_total = 0
21 |
22 | # Print the table header for the process details
23 | print("Process ID\tArrival Time\tBurst Time\tWaiting Time\tTurnaround Time")
24 |
25 | # Iterate through each process in the sorted order
26 | for process in processes:
27 | # Unpack the process details (process ID, arrival time, and burst time)
28 | pid, arrival_time, burst_time = process
29 |
30 | # Calculate the waiting time for the current process, ensuring it is non-negative
31 | waiting_time = max(0, current_time - arrival_time)
32 |
33 | # Calculate the turnaround time for the current process
34 | turnaround_time = waiting_time + burst_time
35 |
36 | # Update the total waiting time and turnaround time
37 | waiting_time_total += waiting_time
38 | turnaround_time_total += turnaround_time
39 |
40 | # Print the details for the current process
41 | print(f"{pid}\t\t{arrival_time}\t\t{burst_time}\t\t{waiting_time}\t\t{turnaround_time}")
42 |
43 |
44 |
45 | # Add the print statements for the process execution
46 | # print(f"Process {pid} started execution at time {current_time}")
47 | # Update the current time to the completion time of the current process
48 | current_time += burst_time
49 | # print(f"Process {pid} completed execution at time {current_time}")
50 |
51 |
52 | # Calculate the average waiting time and turnaround time for all processes
53 | n = len(processes)
54 | avg_waiting_time = waiting_time_total / n
55 | avg_turnaround_time = turnaround_time_total / n
56 |
57 | # Print the average waiting time and average turnaround time
58 | print(f"\nAverage Waiting Time: {avg_waiting_time:.2f}")
59 | print(f"Average Turnaround Time: {avg_turnaround_time:.2f}")
60 |
61 | if __name__ == "__main__":
62 | # Example usage with user input for the number of processes
63 | n = int(input("Enter the number of processes: "))
64 | processes = []
65 |
66 | # Prompt the user to enter arrival time and burst time for each process
67 | print("\nEnter Process Arrival Time and Burst Time")
68 | for i in range(n):
69 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
70 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
71 | processes.append((i+1, arrival_time, burst_time))
72 |
73 |
74 | jobs = []
75 | for process in processes:
76 | process_id, arrival_time, burst_time = process
77 | p = multiprocessing.Process(target=process_function, args=(process_id, arrival_time, burst_time))
78 | jobs.append(p)
79 | p.start()
80 |
81 | for job in jobs:
82 | job.join()
83 |
84 | print("All processes completed execution.")
85 |
86 | # Call the FCFS scheduling function with the list of processes
87 | fcfs_scheduling(processes)
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/same_python_file/fcfs_with_concurent_process.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import time
3 |
4 | def process_function(process_id, arrival_time, burst_time):
5 | print(f"Process {process_id} started execution at time {time.time()}")
6 | time.sleep(burst_time)
7 | print(f"Process {process_id} completed execution at time {time.time()}")
8 |
9 |
10 | def fcfs_scheduling(processes):
11 | # Sort the processes based on their arrival time (ascending order)
12 | # This ensures that processes with earlier arrival times are scheduled first.
13 | processes.sort(key=lambda p: p[1])
14 |
15 | # Initialize the starting time of the first process as its arrival time
16 | current_time = processes[0][1]
17 |
18 | # Initialize variables to keep track of total waiting time and turnaround time
19 | waiting_time_total = 0
20 | turnaround_time_total = 0
21 |
22 | # Print the table header for the process details
23 | print("Process ID\tArrival Time\tBurst Time\tWaiting Time\tTurnaround Time")
24 |
25 | # Iterate through each process in the sorted order
26 | for process in processes:
27 | # Unpack the process details (process ID, arrival time, and burst time)
28 | pid, arrival_time, burst_time = process
29 |
30 | # Calculate the waiting time for the current process, ensuring it is non-negative
31 | waiting_time = max(0, current_time - arrival_time)
32 |
33 | # Calculate the turnaround time for the current process
34 | turnaround_time = waiting_time + burst_time
35 |
36 | # Update the total waiting time and turnaround time
37 | waiting_time_total += waiting_time
38 | turnaround_time_total += turnaround_time
39 |
40 | # Print the details for the current process
41 | print(f"{pid}\t\t{arrival_time}\t\t{burst_time}\t\t{waiting_time}\t\t{turnaround_time}")
42 |
43 |
44 |
45 | # Add the print statements for the process execution
46 | # print(f"Process {pid} started execution at time {current_time}")
47 | # Update the current time to the completion time of the current process
48 | current_time += burst_time
49 | # print(f"Process {pid} completed execution at time {current_time}")
50 |
51 |
52 | # Calculate the average waiting time and turnaround time for all processes
53 | n = len(processes)
54 | avg_waiting_time = waiting_time_total / n
55 | avg_turnaround_time = turnaround_time_total / n
56 |
57 | # Print the average waiting time and average turnaround time
58 | print(f"\nAverage Waiting Time: {avg_waiting_time:.2f}")
59 | print(f"Average Turnaround Time: {avg_turnaround_time:.2f}")
60 |
61 | if __name__ == "__main__":
62 | # Example usage with user input for the number of processes
63 | n = int(input("Enter the number of processes: "))
64 | processes = []
65 |
66 | # Prompt the user to enter arrival time and burst time for each process
67 | print("\nEnter Process Arrival Time and Burst Time")
68 | for i in range(n):
69 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
70 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
71 | processes.append((i+1, arrival_time, burst_time))
72 |
73 |
74 | jobs = []
75 | for process in processes:
76 | process_id, arrival_time, burst_time = process
77 | p = multiprocessing.Process(target=process_function, args=(process_id, arrival_time, burst_time))
78 | jobs.append(p)
79 | p.start()
80 |
81 | for job in jobs:
82 | job.join()
83 |
84 | print("All processes completed execution.")
85 |
86 | # Call the FCFS scheduling function with the list of processes
87 | fcfs_scheduling(processes)
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/first_come_first_serve.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "def fcfs_scheduling(processes):\n",
10 | " # Sort the processes based on their arrival time (ascending order)\n",
11 | " # This ensures that processes with earlier arrival times are scheduled first.\n",
12 | " processes.sort(key=lambda p: p[1])\n",
13 | "\n",
14 | " # Initialize the starting time of the first process as its arrival time\n",
15 | " current_time = processes[0][1]\n",
16 | "\n",
17 | " # Initialize variables to keep track of total waiting time and turnaround time\n",
18 | " waiting_time_total = 0\n",
19 | " turnaround_time_total = 0\n",
20 | "\n",
21 | " # Print the table header for the process details\n",
22 | " print(\"Process ID\\tArrival Time\\tBurst Time\\tWaiting Time\\tTurnaround Time\")\n",
23 | "\n",
24 | " # Iterate through each process in the sorted order\n",
25 | " for process in processes:\n",
26 | " # Unpack the process details (process ID, arrival time, and burst time)\n",
27 | " pid, arrival_time, burst_time = process\n",
28 | "\n",
29 | " # Calculate the waiting time for the current process, ensuring it is non-negative\n",
30 | " waiting_time = max(0, current_time - arrival_time)\n",
31 | "\n",
32 | " # Calculate the turnaround time for the current process\n",
33 | " turnaround_time = waiting_time + burst_time\n",
34 | "\n",
35 | " # Update the total waiting time and turnaround time\n",
36 | " waiting_time_total += waiting_time\n",
37 | " turnaround_time_total += turnaround_time\n",
38 | "\n",
39 | " # Print the details for the current process\n",
40 | " print(f\"{pid}\\t\\t{arrival_time}\\t\\t{burst_time}\\t\\t{waiting_time}\\t\\t{turnaround_time}\")\n",
41 | "\n",
42 | " # Add the print statements for the process execution\n",
43 | " # print(f\"Process {pid} started execution at time {current_time}\")\n",
44 | " # Update the current time to the completion time of the current process\n",
45 | " current_time += burst_time\n",
46 | " # print(f\"Process {pid} completed execution at time {current_time}\")\n",
47 | " \n",
48 | " \n",
49 | "\n",
50 | " # Calculate the average waiting time and turnaround time for all processes\n",
51 | " n = len(processes)\n",
52 | " avg_waiting_time = waiting_time_total / n\n",
53 | " avg_turnaround_time = turnaround_time_total / n\n",
54 | "\n",
55 | " # Print the average waiting time and average turnaround time\n",
56 | " print(f\"\\nAverage Waiting Time: {avg_waiting_time:.2f}\")\n",
57 | " print(f\"Average Turnaround Time: {avg_turnaround_time:.2f}\")\n",
58 | "\n",
59 | "if __name__ == \"__main__\":\n",
60 | " # Example usage with user input for the number of processes\n",
61 | " n = int(input(\"Enter the number of processes: \"))\n",
62 | " processes = []\n",
63 | "\n",
64 | " # Prompt the user to enter arrival time and burst time for each process\n",
65 | " print(\"\\nEnter Process Arrival Time and Burst Time\")\n",
66 | " for i in range(n):\n",
67 | " arrival_time = float(input(f\"P[{i+1}] Arrival Time: \"))\n",
68 | " burst_time = float(input(f\"P[{i+1}] Burst Time: \"))\n",
69 | " processes.append((i+1, arrival_time, burst_time))\n",
70 | "\n",
71 | " # Call the FCFS scheduling function with the list of processes\n",
72 | " fcfs_scheduling(processes)\n"
73 | ]
74 | }
75 | ],
76 | "metadata": {
77 | "kernelspec": {
78 | "display_name": "Python 3",
79 | "language": "python",
80 | "name": "python3"
81 | },
82 | "language_info": {
83 | "codemirror_mode": {
84 | "name": "ipython",
85 | "version": 3
86 | },
87 | "file_extension": ".py",
88 | "mimetype": "text/x-python",
89 | "name": "python",
90 | "nbconvert_exporter": "python",
91 | "pygments_lexer": "ipython3",
92 | "version": "3.11.0"
93 | },
94 | "orig_nbformat": 4
95 | },
96 | "nbformat": 4,
97 | "nbformat_minor": 2
98 | }
99 |
--------------------------------------------------------------------------------
/Final/priority_non.py:
--------------------------------------------------------------------------------
1 |
2 | def schedulingProcess(process_data):
3 | start_time = []
4 | exit_time = []
5 | s_time = 0
6 | process_working_sequence = []
7 | process_data.sort(key=lambda x: x[1])
8 |
9 | for i in range(len(process_data)):
10 | ready_queue = []
11 | temp = []
12 | normal_queue = []
13 |
14 | for j in range(len(process_data)):
15 | if (process_data[j][1] <= s_time) and (process_data[j][4] == 0):
16 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2], process_data[j][3]])
17 | ready_queue.append(temp)
18 | temp = []
19 | elif process_data[j][4] == 0:
20 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2], process_data[j][3]])
21 | normal_queue.append(temp)
22 | temp = []
23 |
24 | if len(ready_queue) != 0:
25 | ready_queue.sort(key=lambda x: x[3], reverse=True)
26 | start_time.append(s_time)
27 | s_time = s_time + ready_queue[0][2]
28 | exit_time.append(s_time)
29 | process_working_sequence.append(ready_queue[0][0])
30 | for k in range(len(process_data)):
31 | if process_data[k][0] == ready_queue[0][0]:
32 | break
33 | process_data[k][4] = 1
34 | process_data[k].append(s_time)
35 |
36 | elif len(ready_queue) == 0:
37 | if s_time < normal_queue[0][1]:
38 | s_time = normal_queue[0][1]
39 | start_time.append(s_time)
40 | s_time = s_time + normal_queue[0][2]
41 | exit_time.append(s_time)
42 | process_working_sequence.append(normal_queue[0][0])
43 | for k in range(len(process_data)):
44 | if process_data[k][0] == normal_queue[0][0]:
45 | break
46 | process_data[k][4] = 1
47 | process_data[k].append(s_time)
48 | t_time = calculateTurnaroundTime(process_data)
49 | w_time = calculateWaitingTime(process_data)
50 | printData(process_data, t_time, w_time, process_working_sequence)
51 |
52 |
53 | def calculateTurnaroundTime(process_data):
54 | total_turnaround_time = 0
55 | for i in range(len(process_data)):
56 | turnaround_time = process_data[i][5] - process_data[i][1]
57 | total_turnaround_time = total_turnaround_time + turnaround_time
58 | process_data[i].append(turnaround_time)
59 | average_turnaround_time = total_turnaround_time / len(process_data)
60 | return average_turnaround_time
61 |
62 |
63 | def calculateWaitingTime(process_data):
64 | total_waiting_time = 0
65 | for i in range(len(process_data)):
66 | waiting_time = process_data[i][6] - process_data[i][2]
67 | total_waiting_time = total_waiting_time + waiting_time
68 | process_data[i].append(waiting_time)
69 | average_waiting_time = total_waiting_time / len(process_data)
70 | return average_waiting_time
71 |
72 |
73 | def printData(process_data, average_turnaround_time, average_waiting_time, process_working_sequence):
74 | process_data.sort(key=lambda x: x[0])
75 |
76 | print("Process_ID\tArrival_Time\tBurst_Time\tPriority\tCompleted\tExit_Time\tTurnaround_Time\tWaiting_Time")
77 | for i in range(len(process_data)):
78 | for j in range(len(process_data[i])):
79 | print(process_data[i][j], end="\t\t")
80 | print()
81 | print(f'Average Turnaround Time: {average_turnaround_time}')
82 | print(f'Average Waiting Time: {average_waiting_time}')
83 | print("Sequence of Processes:", process_working_sequence)
84 |
85 |
86 |
87 | no_of_processes = int(input("Enter number of processes: "))
88 | process_data = []
89 |
90 | print("\nEnter Process Arrival Time and Burst Time")
91 | for i in range(no_of_processes):
92 | temporary = []
93 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
94 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
95 | priority = float(input(f"P[{i+1}] Priority: "))
96 | temporary.extend([i+1, arrival_time, burst_time, priority, 0])
97 | process_data.append(temporary)
98 |
99 | schedulingProcess(process_data)
100 |
101 | ### highest priority == higher number
102 |
103 |
104 | '''
105 |
106 | 5
107 | 0
108 | 2
109 | 2
110 | 0
111 | 1
112 | 1
113 | 0
114 | 8
115 | 4
116 | 0
117 | 4
118 | 2
119 | 0
120 | 5
121 | 3
122 |
123 |
124 |
125 |
126 | '''
127 |
128 |
--------------------------------------------------------------------------------
/Final/sjf.py:
--------------------------------------------------------------------------------
1 | # Define a function to perform the scheduling process
2 | def schedulingProcess(process_data):
3 | start_time = []
4 | exit_time = []
5 | current_time = 0
6 | process_data.sort(key=lambda x: x[1])
7 | process_working_sequence = []
8 |
9 |
10 | for i in range(len(process_data)):
11 | ready_queue = []
12 | temp = []
13 | normal_queue = []
14 |
15 | for j in range(len(process_data)):
16 | if (process_data[j][1] <= current_time) and (process_data[j][3] == 0):
17 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])
18 | ready_queue.append(temp)
19 | temp = []
20 | elif process_data[j][3] == 0:
21 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])
22 | normal_queue.append(temp)
23 | temp = []
24 |
25 | if len(ready_queue) != 0:
26 | ready_queue.sort(key=lambda x: x[2])
27 | start_time.append(current_time)
28 | current_time = current_time + ready_queue[0][2]
29 | exit_time.append(current_time)
30 | process_working_sequence.append(ready_queue[0][0])
31 | for k in range(len(process_data)):
32 | if process_data[k][0] == ready_queue[0][0]:
33 | break
34 | process_data[k][3] = 1
35 | process_data[k].append(current_time)
36 |
37 | elif len(ready_queue) == 0:
38 | if current_time < normal_queue[0][1]:
39 | current_time = normal_queue[0][1]
40 | start_time.append(current_time)
41 | current_time = current_time + normal_queue[0][2]
42 | exit_time.append(current_time)
43 | process_working_sequence.append(normal_queue[0][0])
44 | for k in range(len(process_data)):
45 | if process_data[k][0] == normal_queue[0][0]:
46 | break
47 | process_data[k][3] = 1
48 | process_data[k].append(current_time)
49 |
50 | t_time = calculateTurnaroundTime(process_data)
51 | w_time = calculateWaitingTime(process_data)
52 | printData(process_data, t_time, w_time , process_working_sequence , start_time , exit_time)
53 |
54 | def calculateTurnaroundTime(process_data):
55 | total_turnaround_time = 0
56 | for i in range(len(process_data)):
57 | turnaround_time = process_data[i][4] - process_data[i][1]
58 | total_turnaround_time = total_turnaround_time + turnaround_time
59 | process_data[i].append(turnaround_time)
60 | average_turnaround_time = total_turnaround_time / len(process_data)
61 | return average_turnaround_time
62 |
63 | def calculateWaitingTime(process_data):
64 | total_waiting_time = 0
65 | for i in range(len(process_data)):
66 | waiting_time = process_data[i][5] - process_data[i][2]
67 | total_waiting_time = total_waiting_time + waiting_time
68 | process_data[i].append(waiting_time)
69 | average_waiting_time = total_waiting_time / len(process_data)
70 | return average_waiting_time
71 |
72 | def printData(process_data, average_turnaround_time, average_waiting_time , process_working_sequence , start_time , exit_time):
73 | process_data.sort(key=lambda x: x[0])
74 | print("Process_ID\tArrival_Time\tBurst_Time\tCompleted\tExit_Time\tTurnaround_Time\tWaiting_Time")
75 |
76 | for i in range(len(process_data)):
77 | for j in range(len(process_data[i])):
78 | print(process_data[i][j], end=" ")
79 | print()
80 |
81 | print(f'Average Turnaround Time: {average_turnaround_time}')
82 | print(f'Average Waiting Time: {average_waiting_time}')
83 | print("Sequence of Processes:", process_working_sequence)
84 | for i in range(len(process_working_sequence)):
85 | print("|| " , process_working_sequence[i], " " , end="")
86 | print("||")
87 | for i in range(len(start_time)):
88 | print(start_time[i]," " , exit_time[i] , end=" ")
89 | print()
90 |
91 |
92 | no_of_processes = int(input("Enter number of processes: "))
93 | process_data = []
94 | print("\nEnter Process Arrival Time and Burst Time")
95 | for i in range(no_of_processes):
96 | temporary = []
97 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
98 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
99 | temporary.extend([i+1, arrival_time, burst_time, 0])
100 | process_data.append(temporary)
101 |
102 | schedulingProcess(process_data)
103 |
104 | '''
105 | 3
106 | 0
107 | 8
108 | .4
109 | 4
110 | 1
111 | 1
112 |
113 |
114 | '''
--------------------------------------------------------------------------------
/Final/srt.py:
--------------------------------------------------------------------------------
1 | def schedulingProcess(process_data):
2 | start_time = []
3 | exit_time = []
4 | s_time = 0
5 | process_working_sequence = []
6 | process_data.sort(key=lambda x: x[1])
7 |
8 | while True:
9 | ready_queue = []
10 | normal_queue = []
11 | temp = []
12 |
13 | for i in range(len(process_data)):
14 | if process_data[i][1] <= s_time and process_data[i][3] == 0:
15 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
16 | ready_queue.append(temp)
17 | temp = []
18 | elif process_data[i][3] == 0:
19 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
20 | normal_queue.append(temp)
21 | temp = []
22 |
23 | if len(ready_queue) == 0 and len(normal_queue) == 0:
24 | break
25 |
26 | if len(ready_queue) != 0:
27 | ready_queue.sort(key=lambda x: x[2])
28 | start_time.append(s_time)
29 | s_time = s_time + 1
30 | exit_time.append(s_time)
31 | process_working_sequence.append(ready_queue[0][0])
32 | for k in range(len(process_data)):
33 | if process_data[k][0] == ready_queue[0][0]:
34 | break
35 | process_data[k][2] = process_data[k][2] - 1
36 |
37 | if process_data[k][2] == 0:
38 | process_data[k][3] = 1
39 | process_data[k].append(s_time)
40 |
41 | if len(ready_queue) == 0:
42 | if s_time < normal_queue[0][1]:
43 | s_time = normal_queue[0][1]
44 | start_time.append(s_time)
45 | s_time = s_time + 1
46 | exit_time.append(s_time)
47 | process_working_sequence.append(normal_queue[0][0])
48 | for k in range(len(process_data)):
49 | if process_data[k][0] == normal_queue[0][0]:
50 | break
51 | process_data[k][2] = process_data[k][2] - 1
52 | if process_data[k][2] == 0:
53 | process_data[k][3] = 1
54 | process_data[k].append(s_time)
55 |
56 | t_time = calculateTurnaroundTime(process_data)
57 | w_time = calculateWaitingTime(process_data)
58 | printData(process_data, t_time, w_time, process_working_sequence , start_time , exit_time)
59 |
60 | def calculateTurnaroundTime(process_data):
61 | total_turnaround_time = 0
62 | for i in range(len(process_data)):
63 | turnaround_time = process_data[i][5] - process_data[i][1]
64 | total_turnaround_time = total_turnaround_time + turnaround_time
65 | process_data[i].append(turnaround_time)
66 | average_turnaround_time = total_turnaround_time / len(process_data)
67 | return average_turnaround_time
68 |
69 | def calculateWaitingTime(process_data):
70 | total_waiting_time = 0
71 | for i in range(len(process_data)):
72 | waiting_time = process_data[i][6] - process_data[i][4]
73 | total_waiting_time = total_waiting_time + waiting_time
74 | process_data[i].append(waiting_time)
75 | average_waiting_time = total_waiting_time / len(process_data)
76 | return average_waiting_time
77 |
78 | def printData(process_data, average_turnaround_time, average_waiting_time, process_working_sequence , start_time , exit_time):
79 | process_data.sort(key=lambda x: x[0])
80 | print("Process_ID\tArrival_Time\tRem_Burst_Time\tCompleted\tOrig_Burst_Time\tExit_time\tTurnaround_Time\tWaiting_Time")
81 |
82 | for i in range(len(process_data)):
83 | for j in range(len(process_data[i])):
84 | print(process_data[i][j], end="\t\t")
85 | print()
86 |
87 | print(f'Average Turnaround Time: {average_turnaround_time}')
88 | print(f'Average Waiting Time: {average_waiting_time}')
89 | print(f'Sequence of Process: {process_working_sequence}')
90 |
91 |
92 | no_of_processes = int(input("Enter number of processes: "))
93 | process_data = []
94 |
95 | print("\nEnter Process Arrival Time and Burst Time")
96 | for i in range(no_of_processes):
97 | temporary = []
98 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
99 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
100 | temporary.extend([i+1, arrival_time, burst_time, 0, burst_time])
101 | process_data.append(temporary)
102 |
103 | schedulingProcess(process_data)
104 |
105 |
106 | '''
107 | 3
108 | 0
109 | 3
110 | 5
111 | 7
112 | 2
113 | 14
114 |
115 | ANS:
116 | | P1 | P3 | P2 | P3 |
117 | 0 3 5 12 24
118 |
119 |
120 | '''
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Operating System Code in Python 3
[](https://www.python.org/) [](https://pypi.org/project/numpy/) [](https://pypi.org/project/scipy/) [](https://pypi.org/project/matplotlib/)   
2 |
3 | ## Installations
4 |
5 | * **Python 3** can be downloaded from [here](https://www.python.org/downloads/). Make sure to check **Add Python 3.x to PATH** during installation.
6 | * **NumPy**, **SciPy**, and **matplotlib** **etc** libraries can be downloaded and installed using the commands:
7 | ```bash
8 | pip install numpy
9 | pip install scipy
10 | pip install matplotlib
11 | pip install library-name
12 | ```
13 | or if you have multiple python version installed use this
14 | ```bash
15 | py -3.9 -m pip install numpy
16 | py -3.11 -m pip install numpy
17 | .....
18 | ```
19 |
20 | ## Code for Academic Course CIT-322 (Operating System Sessional)
21 |
22 |
23 |
24 |
25 | ## `Mahabub Sir Part`
26 |
27 |
28 | ### First Come First Serve (FCFS) (With Different Arrival Time)
29 | [jupyter File](./first_come_first_serve.ipynb)
30 | [Python File](./same_python_file/first_come_first_serve.py)
31 | ### Shortest Job First Non-Preemptive (SJF) (With Different Arrival Time)
32 | [jupyter File](./shortest_job_first_non_preemptive.ipynb)
33 | [Python File](./same_python_file/shortest_job_first_non_preemptive.py)
34 | ### Shortest Job First Preemptive (SRT) (With Different Arrival Time)
35 | [jupyter File](./shortest_job_first_preemptive_SRT.ipynb)
36 | [Python File](./same_python_file/shortest_job_first_preemptive_SRT.py)
37 | ### Priority Primptive (With Different Arrival Time)
38 | [jupyter File](./priority_primptive.ipynb)
39 | [Python File](./same_python_file/priority_primptive.py)
40 | ### Priority Non-Primptive (With Different Arrival Time)
41 | [jupyter File](./priority_non_primptive.ipynb)
42 | [Python File](./same_python_file/priority_non_primptive.py)
43 | ### Round Robin (With Different Arrival Time)
44 | [jupyter File](./round_robin.ipynb)
45 | [Python File](./same_python_file/round_robin.py)
46 | ### FCFS With Concurent Process
47 | [jupyter File](./fcfs_with_concurent_process.ipynb)
48 | [Python File](./same_python_file/fcfs_with_concurent_process.py)
49 |
50 |
51 |
52 |
53 | ## `Masud Sir Part`
54 |
55 |
56 | ### Lab - 1 == Peterson Solution (Producer Consumer Problem)
57 | [jupyter File](./peterson_solution.ipynb)
58 | [Python File](./same_python_file/peterson_solution.py) / [Python File Alternative](./same_python_file/peterson_solution_alternative.py)
59 |
60 | ### Lab - 2 == Mutex Lock (Protect Critical-Sections & Prevent Race-Conditions)
61 | [jupyter File](./mutex_lock.ipynb)
62 | [Python File](./same_python_file/mutex_lock.py)
63 |
64 | ### Lab - 3 == Semaphore Solution in Process-Synchronization (Producer Consumer Problem)
65 | [jupyter File](./semaphore_producer_consumer.ipynb)
66 | [Python File](./same_python_file/semaphore_producer_consumer.py) / [Python File Alternative](./same_python_file/semaphore_producer_consumer_alternative.py)
67 |
68 | ### Lab - 4 == Semaphore Solution in Process-Synchronization (Dining Philosopher Problem)
69 | [jupyter File](./semaphore_dining_philosopher.ipynb)
70 | [Python File](./same_python_file/semaphore_dining_philosopher.py)
71 |
72 | ### Lab - 5 == Banker’s Algorithm (Deadlock Avoidance)
73 | [jupyter File](./bankers_algorithm.ipynb)
74 | [Python File](./same_python_file/bankers_algorithm.py) / [Python File Alternative 2](./same_python_file/bankers_algorithm_alternative_2.py) / [Python File Alternative 3](./same_python_file/bankers_algorithm_alternative_3.py) / [Python File Alternative 4](./same_python_file/bankers_algorithm_alternative_4.py)
75 |
76 | ### Lab - 6 == Deadlock Detection
77 | [jupyter File](./deadlock_detection.ipynb)
78 | [Python File](./same_python_file/deadlock_detection.py) / [Python File Alternative](./same_python_file/deadlock_detection_alternative.py)
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/fcfs_with_concurent_process.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import multiprocessing\n",
10 | "import time\n",
11 | "\n",
12 | "def process_function(process_id, arrival_time, burst_time):\n",
13 | " print(f\"Process {process_id} started execution at time {time.time()}\")\n",
14 | " time.sleep(burst_time)\n",
15 | " print(f\"Process {process_id} completed execution at time {time.time()}\")\n",
16 | "\n",
17 | "\n",
18 | "def fcfs_scheduling(processes):\n",
19 | " # Sort the processes based on their arrival time (ascending order)\n",
20 | " # This ensures that processes with earlier arrival times are scheduled first.\n",
21 | " processes.sort(key=lambda p: p[1])\n",
22 | "\n",
23 | " # Initialize the starting time of the first process as its arrival time\n",
24 | " current_time = processes[0][1]\n",
25 | "\n",
26 | " # Initialize variables to keep track of total waiting time and turnaround time\n",
27 | " waiting_time_total = 0\n",
28 | " turnaround_time_total = 0\n",
29 | "\n",
30 | " # Print the table header for the process details\n",
31 | " print(\"Process ID\\tArrival Time\\tBurst Time\\tWaiting Time\\tTurnaround Time\")\n",
32 | "\n",
33 | " # Iterate through each process in the sorted order\n",
34 | " for process in processes:\n",
35 | " # Unpack the process details (process ID, arrival time, and burst time)\n",
36 | " pid, arrival_time, burst_time = process\n",
37 | "\n",
38 | " # Calculate the waiting time for the current process, ensuring it is non-negative\n",
39 | " waiting_time = max(0, current_time - arrival_time)\n",
40 | "\n",
41 | " # Calculate the turnaround time for the current process\n",
42 | " turnaround_time = waiting_time + burst_time\n",
43 | "\n",
44 | " # Update the total waiting time and turnaround time\n",
45 | " waiting_time_total += waiting_time\n",
46 | " turnaround_time_total += turnaround_time\n",
47 | "\n",
48 | " # Print the details for the current process\n",
49 | " print(f\"{pid}\\t\\t{arrival_time}\\t\\t{burst_time}\\t\\t{waiting_time}\\t\\t{turnaround_time}\")\n",
50 | "\n",
51 | "\n",
52 | " \n",
53 | " # Add the print statements for the process execution\n",
54 | " # print(f\"Process {pid} started execution at time {current_time}\")\n",
55 | " # Update the current time to the completion time of the current process\n",
56 | " current_time += burst_time\n",
57 | " # print(f\"Process {pid} completed execution at time {current_time}\")\n",
58 | "\n",
59 | "\n",
60 | " # Calculate the average waiting time and turnaround time for all processes\n",
61 | " n = len(processes)\n",
62 | " avg_waiting_time = waiting_time_total / n\n",
63 | " avg_turnaround_time = turnaround_time_total / n\n",
64 | "\n",
65 | " # Print the average waiting time and average turnaround time\n",
66 | " print(f\"\\nAverage Waiting Time: {avg_waiting_time:.2f}\")\n",
67 | " print(f\"Average Turnaround Time: {avg_turnaround_time:.2f}\")\n",
68 | "\n",
69 | "if __name__ == \"__main__\":\n",
70 | " # Example usage with user input for the number of processes\n",
71 | " n = int(input(\"Enter the number of processes: \"))\n",
72 | " processes = []\n",
73 | "\n",
74 | " # Prompt the user to enter arrival time and burst time for each process\n",
75 | " print(\"\\nEnter Process Arrival Time and Burst Time\")\n",
76 | " for i in range(n):\n",
77 | " arrival_time = float(input(f\"P[{i+1}] Arrival Time: \"))\n",
78 | " burst_time = float(input(f\"P[{i+1}] Burst Time: \"))\n",
79 | " processes.append((i+1, arrival_time, burst_time))\n",
80 | " \n",
81 | " \n",
82 | " jobs = []\n",
83 | " for process in processes:\n",
84 | " process_id, arrival_time, burst_time = process\n",
85 | " p = multiprocessing.Process(target=process_function, args=(process_id, arrival_time, burst_time))\n",
86 | " jobs.append(p)\n",
87 | " p.start()\n",
88 | "\n",
89 | " for job in jobs:\n",
90 | " job.join()\n",
91 | "\n",
92 | " print(\"All processes completed execution.\")\n",
93 | "\n",
94 | " # Call the FCFS scheduling function with the list of processes\n",
95 | " fcfs_scheduling(processes)\n",
96 | "\n",
97 | "\n",
98 | "\n"
99 | ]
100 | }
101 | ],
102 | "metadata": {
103 | "language_info": {
104 | "name": "python"
105 | },
106 | "orig_nbformat": 4
107 | },
108 | "nbformat": 4,
109 | "nbformat_minor": 2
110 | }
111 |
--------------------------------------------------------------------------------
/same_python_file/bankers_algorithm.py:
--------------------------------------------------------------------------------
1 | class BankersAlgorithm:
2 | def __init__(self, max_resources, allocated_resources, total_resources):
3 | # Initialize the BankersAlgorithm class with the maximum resources required by each process,
4 | # currently allocated resources for each process, and total available resources in the system.
5 | self.max_resources = max_resources
6 | self.allocated_resources = allocated_resources
7 | self.total_resources = total_resources
8 | self.num_processes = len(max_resources) # Number of processes in the system
9 | self.num_resources = len(total_resources) # Number of resource types in the system
10 |
11 | # Calculate the available resources by subtracting the currently allocated resources from the total resources
12 | self.available_resources = [total_resources[j] - sum(allocated_resources[i][j] for i in range(self.num_processes))
13 | for j in range(self.num_resources)]
14 |
15 | def is_safe_state(self, request, process_id):
16 | # Check if the requested resources exceed the maximum claim of the process
17 | if any(request[j] > self.max_resources[process_id][j] for j in range(self.num_resources)):
18 | return False
19 |
20 | # Check if the requested resources exceed the available resources
21 | if any(request[j] > self.available_resources[j] for j in range(self.num_resources)):
22 | return False
23 |
24 | # Temporarily allocate the requested resources to check for safety
25 | for j in range(self.num_resources):
26 | self.available_resources[j] -= request[j]
27 | self.allocated_resources[process_id][j] += request[j]
28 |
29 | # Perform safety check
30 | is_safe = self.check_safety()
31 |
32 | # Rollback the temporary allocation
33 | for j in range(self.num_resources):
34 | self.available_resources[j] += request[j]
35 | self.allocated_resources[process_id][j] -= request[j]
36 |
37 | return is_safe
38 |
39 | def check_safety(self):
40 | # Safety check to determine if the current system state is safe or not
41 | work = self.available_resources[:] # Make a copy of available resources
42 | finish = [False] * self.num_processes # Keep track of finished processes
43 | safe_sequence = [] # To store the sequence of processes that can finish without deadlock
44 |
45 | # Iterate until all processes are finished or deadlock is detected
46 | while True:
47 | found = False
48 | for i in range(self.num_processes):
49 | # Check if the current process can finish based on its needs and available resources
50 | if not finish[i] and all(self.max_resources[i][j] - self.allocated_resources[i][j] <= work[j]
51 | for j in range(self.num_resources)):
52 | # If the process can finish, update the work and finish arrays
53 | work = [work[j] + self.allocated_resources[i][j] for j in range(self.num_resources)]
54 | finish[i] = True
55 | safe_sequence.append(i)
56 | found = True
57 | # If no process can finish in the current iteration, exit the loop
58 | if not found:
59 | break
60 |
61 | # If all processes are finished, the system is in a safe state
62 | return all(finish)
63 |
64 | # The main function to demonstrate the Banker's algorithm
65 | if __name__ == "__main__":
66 |
67 | # case 1
68 | max_resources = [
69 | [7, 5, 3],
70 | [3, 2, 2],
71 | [9, 0, 2],
72 | [2, 2, 2],
73 | [4, 3, 3]
74 | ]
75 |
76 | allocated_resources = [
77 | [0, 1, 0],
78 | [2, 0, 0],
79 | [3, 0, 2],
80 | [2, 1, 1],
81 | [0, 0, 2]
82 | ]
83 |
84 | total_resources = [10, 5, 7]
85 |
86 | banker = BankersAlgorithm(max_resources, allocated_resources, total_resources)
87 | request = [1, 0, 2]
88 | process_id = 1
89 |
90 | if banker.is_safe_state(request, process_id):
91 | print("Request is safe to be granted.")
92 | else:
93 | print("Request may lead to a deadlock and cannot be granted.")
94 |
95 |
96 | # case 2
97 | max_resources = [
98 | [7, 5, 3],
99 | [3, 2, 2],
100 | [9, 0, 2],
101 | ]
102 |
103 | allocated_resources = [
104 | [0, 1, 0],
105 | [2, 0, 0],
106 | [3, 0, 2],
107 | ]
108 |
109 | total_resources = [10, 5, 7]
110 |
111 | banker = BankersAlgorithm(max_resources, allocated_resources, total_resources)
112 | request = [5, 0, 0]
113 | process_id = 0
114 |
115 | if banker.is_safe_state(request, process_id):
116 | print("Request is safe to be granted.")
117 | else:
118 | print("Request may lead to a deadlock and cannot be granted.")
119 |
--------------------------------------------------------------------------------
/Final/priority_prim.py:
--------------------------------------------------------------------------------
1 |
2 | def schedulingProcess(process_data):
3 | start_time = []
4 | exit_time = []
5 | s_time = 0
6 | process_working_sequence = []
7 | process_data.sort(key=lambda x: x[1])
8 |
9 | while 1:
10 | ready_queue = []
11 | normal_queue = []
12 | temp = []
13 | for i in range(len(process_data)):
14 | if process_data[i][1] <= s_time and process_data[i][4] == 0:
15 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][3],
16 | process_data[i][5]])
17 | ready_queue.append(temp)
18 | temp = []
19 | elif process_data[i][4] == 0:
20 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4],
21 | process_data[i][5]])
22 | normal_queue.append(temp)
23 | temp = []
24 |
25 | if len(ready_queue) == 0 and len(normal_queue) == 0:
26 | break
27 |
28 | if len(ready_queue) != 0:
29 | ready_queue.sort(key=lambda x: x[3], reverse=True)
30 | start_time.append(s_time)
31 | s_time = s_time + 1
32 | exit_time.append(s_time)
33 | process_working_sequence.append(ready_queue[0][0])
34 | for k in range(len(process_data)):
35 | if process_data[k][0] == ready_queue[0][0]:
36 | break
37 | process_data[k][2] = process_data[k][2] - 1
38 | if process_data[k][2] == 0:
39 | process_data[k][4] = 1
40 | process_data[k].append(s_time)
41 |
42 | if len(ready_queue) == 0:
43 | normal_queue.sort(key=lambda x: x[1])
44 | if s_time < normal_queue[0][1]:
45 | s_time = normal_queue[0][1]
46 | start_time.append(s_time)
47 | s_time = s_time + 1
48 | exit_time.append(s_time)
49 | process_working_sequence.append(normal_queue[0][0])
50 | for k in range(len(process_data)):
51 | if process_data[k][0] == normal_queue[0][0]:
52 | break
53 | process_data[k][2] = process_data[k][2] - 1
54 | if process_data[k][2] == 0:
55 | process_data[k][4] = 1
56 | process_data[k].append(s_time)
57 |
58 | t_time = calculateTurnaroundTime(process_data)
59 | w_time = calculateWaitingTime(process_data)
60 | printData(process_data, t_time, w_time, process_working_sequence)
61 |
62 | def calculateTurnaroundTime(process_data):
63 | total_turnaround_time = 0
64 | for i in range(len(process_data)):
65 | # turnaround_time = process_data[i][6] - process_data[i][5]
66 | turnaround_time = process_data[i][6] - process_data[i][1]
67 | total_turnaround_time = total_turnaround_time + turnaround_time
68 | process_data[i].append(turnaround_time)
69 | average_turnaround_time = total_turnaround_time / len(process_data)
70 | return average_turnaround_time
71 |
72 | def calculateWaitingTime(process_data):
73 | total_waiting_time = 0
74 | for i in range(len(process_data)):
75 | # waiting_time = process_data[i][6] - process_data[i][2]
76 | waiting_time = process_data[i][6] - process_data[i][4]
77 | total_waiting_time = total_waiting_time + waiting_time
78 | process_data[i].append(waiting_time)
79 | average_waiting_time = total_waiting_time / len(process_data)
80 | return average_waiting_time
81 |
82 | def printData(process_data, average_turnaround_time, average_waiting_time, process_working_sequence):
83 | process_data.sort(key=lambda x: x[0])
84 | print("Process_ID\tArrival_Time\tRem_Burst_Time\tPriority\tCompleted\tOrig_Burst_Time\tExit_Time\tTurnaround_Time\tWaiting_Time")
85 |
86 | for i in range(len(process_data)):
87 | for j in range(len(process_data[i])):
88 | print(process_data[i][j], end=" ")
89 | print()
90 |
91 | print(f'Average Turnaround Time: {average_turnaround_time}')
92 | print(f'Average Waiting Time: {average_waiting_time}')
93 | print(f'Sequence of Process: {process_working_sequence}')
94 |
95 |
96 | no_of_processes = int(input("Enter number of processes: "))
97 | process_data = []
98 |
99 | print("\nEnter Process Arrival Time and Burst Time")
100 | for i in range(no_of_processes):
101 | temporary = []
102 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
103 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
104 | priority = float(input(f"P[{i+1}] Priority: "))
105 | temporary.extend([i+1, arrival_time, burst_time, priority, 0, burst_time])
106 | process_data.append(temporary)
107 |
108 | schedulingProcess(process_data)
109 |
110 |
111 | '''
112 | 5
113 | 0
114 | 3
115 | 9
116 | 1
117 | 4
118 | 10
119 | 2
120 | 6
121 | 8
122 | 3
123 | 4
124 | 6
125 | 5
126 | 2
127 | 1
128 |
129 | ANS:
130 |
131 | | P1 | P2 | P1 | P3 | P4 | P5 |
132 | 0 1 5 7 13 17 19
133 |
134 |
135 |
136 | '''
137 |
138 |
139 |
--------------------------------------------------------------------------------
/same_python_file/shortest_job_first_non_preemptive.py:
--------------------------------------------------------------------------------
1 | class SJF:
2 |
3 | def processData(self, no_of_processes):
4 | process_data = []
5 | for i in range(no_of_processes):
6 | temporary = []
7 | process_id = int(input("Enter Process ID: "))
8 |
9 | arrival_time = float(input(f"Enter Arrival Time for Process {process_id}: "))
10 |
11 | burst_time = float(input(f"Enter Burst Time for Process {process_id}: "))
12 | temporary.extend([process_id, arrival_time, burst_time, 0])
13 | '''
14 | '0' is the state of the process. 0 means not executed and 1 means execution complete
15 | '''
16 | process_data.append(temporary)
17 | SJF.schedulingProcess(self, process_data)
18 |
19 | def schedulingProcess(self, process_data):
20 | start_time = []
21 | exit_time = []
22 | s_time = 0
23 | process_data.sort(key=lambda x: x[1])
24 | '''
25 | Sort processes according to the Arrival Time
26 | '''
27 | for i in range(len(process_data)):
28 | ready_queue = []
29 | temp = []
30 | normal_queue = []
31 |
32 | for j in range(len(process_data)):
33 | if (process_data[j][1] <= s_time) and (process_data[j][3] == 0):
34 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])
35 | ready_queue.append(temp)
36 | temp = []
37 | elif process_data[j][3] == 0:
38 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])
39 | normal_queue.append(temp)
40 | temp = []
41 |
42 | if len(ready_queue) != 0:
43 | ready_queue.sort(key=lambda x: x[2])
44 | '''
45 | Sort the processes according to the Burst Time
46 | '''
47 | start_time.append(s_time)
48 | s_time = s_time + ready_queue[0][2]
49 | e_time = s_time
50 | exit_time.append(e_time)
51 | for k in range(len(process_data)):
52 | if process_data[k][0] == ready_queue[0][0]:
53 | break
54 | process_data[k][3] = 1
55 | process_data[k].append(e_time)
56 |
57 | elif len(ready_queue) == 0:
58 | if s_time < normal_queue[0][1]:
59 | s_time = normal_queue[0][1]
60 | start_time.append(s_time)
61 | s_time = s_time + normal_queue[0][2]
62 | e_time = s_time
63 | exit_time.append(e_time)
64 | for k in range(len(process_data)):
65 | if process_data[k][0] == normal_queue[0][0]:
66 | break
67 | process_data[k][3] = 1
68 | process_data[k].append(e_time)
69 |
70 | t_time = SJF.calculateTurnaroundTime(self, process_data)
71 | w_time = SJF.calculateWaitingTime(self, process_data)
72 | SJF.printData(self, process_data, t_time, w_time)
73 |
74 |
75 | def calculateTurnaroundTime(self, process_data):
76 | total_turnaround_time = 0
77 | for i in range(len(process_data)):
78 | turnaround_time = process_data[i][4] - process_data[i][1]
79 | '''
80 | turnaround_time = completion_time - arrival_time
81 | '''
82 | total_turnaround_time = total_turnaround_time + turnaround_time
83 | process_data[i].append(turnaround_time)
84 | average_turnaround_time = total_turnaround_time / len(process_data)
85 | '''
86 | average_turnaround_time = total_turnaround_time / no_of_processes
87 | '''
88 | return average_turnaround_time
89 |
90 |
91 | def calculateWaitingTime(self, process_data):
92 | total_waiting_time = 0
93 | for i in range(len(process_data)):
94 | waiting_time = process_data[i][5] - process_data[i][2]
95 | '''
96 | waiting_time = turnaround_time - burst_time
97 | '''
98 | total_waiting_time = total_waiting_time + waiting_time
99 | process_data[i].append(waiting_time)
100 | average_waiting_time = total_waiting_time / len(process_data)
101 | '''
102 | average_waiting_time = total_waiting_time / no_of_processes
103 | '''
104 | return average_waiting_time
105 |
106 |
107 | def printData(self, process_data, average_turnaround_time, average_waiting_time):
108 | process_data.sort(key=lambda x: x[0])
109 | '''
110 | Sort processes according to the Process ID
111 | '''
112 | print("Process_ID Arrival_Time Burst_Time Completed Completion_Time Turnaround_Time Waiting_Time")
113 |
114 | for i in range(len(process_data)):
115 | for j in range(len(process_data[i])):
116 |
117 | print(process_data[i][j], end=" ")
118 | print()
119 |
120 | print(f'Average Turnaround Time: {average_turnaround_time}')
121 |
122 | print(f'Average Waiting Time: {average_waiting_time}')
123 |
124 |
125 | if __name__ == "__main__":
126 | no_of_processes = int(input("Enter number of processes: "))
127 | sjf = SJF()
128 | sjf.processData(no_of_processes)
129 |
130 |
131 | '''
132 | 3
133 | 1
134 | 0
135 | 8
136 | 2
137 | .4
138 | 4
139 | 3
140 | 1
141 | 1
142 |
143 |
144 | '''
--------------------------------------------------------------------------------
/same_python_file/priority_non_primptive.py:
--------------------------------------------------------------------------------
1 | # https://cppsecrets.com/users/1108979711510497121461151049710464115111109971051219746101100117/Python-Priority-Scheduling-Non-Pre-emptive-Algorithm-with-Different-Arrival-Time.php
2 |
3 |
4 | class Priority:
5 |
6 | def processData(self, no_of_processes):
7 | process_data = []
8 | for i in range(no_of_processes):
9 | temporary = []
10 | process_id = int(input("Enter Process ID: "))
11 |
12 | arrival_time = int(input(f"Enter Arrival Time for Process {process_id}: "))
13 |
14 | burst_time = int(input(f"Enter Burst Time for Process {process_id}: "))
15 |
16 | priority = int(input(f"Enter Priority for Process {process_id}: "))
17 |
18 | temporary.extend([process_id, arrival_time, burst_time, priority, 0])
19 | '''
20 | '0' is the state of the process. 0 means not executed and 1 means execution complete
21 | '''
22 | process_data.append(temporary)
23 | Priority.schedulingProcess(self, process_data)
24 |
25 |
26 | def schedulingProcess(self, process_data):
27 | start_time = []
28 | exit_time = []
29 | s_time = 0
30 | process_data.sort(key=lambda x: x[1])
31 | '''
32 | Sort processes according to the Arrival Time
33 | '''
34 | for i in range(len(process_data)):
35 | ready_queue = []
36 | temp = []
37 | normal_queue = []
38 | for j in range(len(process_data)):
39 | if (process_data[j][1] <= s_time) and (process_data[j][4] == 0):
40 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2], process_data[j][3]])
41 | ready_queue.append(temp)
42 | temp = []
43 | elif process_data[j][4] == 0:
44 | temp.extend([process_data[j][0], process_data[j][1], process_data[j][2], process_data[j][3]])
45 | normal_queue.append(temp)
46 | temp = []
47 | if len(ready_queue) != 0:
48 | ready_queue.sort(key=lambda x: x[3], reverse=True)
49 | '''
50 | Sort the processes according to the Priority, considering Higher the Value, Higher the Priority
51 | '''
52 | start_time.append(s_time)
53 | s_time = s_time + ready_queue[0][2]
54 | e_time = s_time
55 | exit_time.append(e_time)
56 | for k in range(len(process_data)):
57 | if process_data[k][0] == ready_queue[0][0]:
58 | break
59 | process_data[k][4] = 1
60 | process_data[k].append(e_time)
61 | elif len(ready_queue) == 0:
62 | if s_time < normal_queue[0][1]:
63 | s_time = normal_queue[0][1]
64 | start_time.append(s_time)
65 | s_time = s_time + normal_queue[0][2]
66 | e_time = s_time
67 | exit_time.append(e_time)
68 | for k in range(len(process_data)):
69 | if process_data[k][0] == normal_queue[0][0]:
70 | break
71 | process_data[k][4] = 1
72 | process_data[k].append(e_time)
73 | t_time = Priority.calculateTurnaroundTime(self, process_data)
74 | w_time = Priority.calculateWaitingTime(self, process_data)
75 | Priority.printData(self, process_data, t_time, w_time)
76 |
77 |
78 | def calculateTurnaroundTime(self, process_data):
79 | total_turnaround_time = 0
80 | for i in range(len(process_data)):
81 | turnaround_time = process_data[i][5] - process_data[i][1]
82 | '''
83 | turnaround_time = completion_time - arrival_time
84 | '''
85 | total_turnaround_time = total_turnaround_time + turnaround_time
86 | process_data[i].append(turnaround_time)
87 | average_turnaround_time = total_turnaround_time / len(process_data)
88 | '''
89 | average_turnaround_time = total_turnaround_time / no_of_processes
90 | '''
91 | return average_turnaround_time
92 |
93 |
94 | def calculateWaitingTime(self, process_data):
95 | total_waiting_time = 0
96 | for i in range(len(process_data)):
97 | waiting_time = process_data[i][6] - process_data[i][2]
98 | '''
99 | waiting_time = turnaround_time - burst_time
100 | '''
101 | total_waiting_time = total_waiting_time + waiting_time
102 | process_data[i].append(waiting_time)
103 | average_waiting_time = total_waiting_time / len(process_data)
104 | '''
105 | average_waiting_time = total_waiting_time / no_of_processes
106 | '''
107 | return average_waiting_time
108 |
109 |
110 | def printData(self, process_data, average_turnaround_time, average_waiting_time):
111 | process_data.sort(key=lambda x: x[0])
112 | '''
113 | Sort processes according to the Process ID
114 | '''
115 | print("Process_ID Arrival_Time Burst_Time Priority Completed Completion_Time Turnaround_Time Waiting_Time")
116 | for i in range(len(process_data)):
117 | for j in range(len(process_data[i])):
118 | print(process_data[i][j], end="\t\t")
119 | print()
120 | print(f'Average Turnaround Time: {average_turnaround_time}')
121 |
122 | print(f'Average Waiting Time: {average_waiting_time}')
123 |
124 |
125 | if __name__ == "__main__":
126 | no_of_processes = int(input("Enter number of processes: "))
127 | priority = Priority()
128 | priority.processData(no_of_processes)
129 |
130 | ### highest priority == higher number
131 |
132 |
133 | '''
134 | 5
135 | 1
136 | 0
137 | 2
138 | 2
139 | 2
140 | 0
141 | 1
142 | 1
143 | 3
144 | 0
145 | 8
146 | 4
147 | 4
148 | 0
149 | 4
150 | 2
151 | 5
152 | 0
153 | 5
154 | 3
155 |
156 |
157 |
158 |
159 | '''
160 |
161 |
--------------------------------------------------------------------------------
/same_python_file/shortest_job_first_preemptive_SRT.py:
--------------------------------------------------------------------------------
1 | class SJF:
2 |
3 | def processData(self, no_of_processes):
4 | process_data = []
5 | for i in range(no_of_processes):
6 | temporary = []
7 | process_id = int(input("Enter Process ID: "))
8 |
9 | arrival_time = float(input(f"Enter Arrival Time for Process {process_id}: "))
10 |
11 | burst_time = float(input(f"Enter Burst Time for Process {process_id}: "))
12 |
13 | temporary.extend([process_id, arrival_time, burst_time, 0, burst_time])
14 | '''
15 | '0' is the state of the process. 0 means not executed and 1 means execution complete
16 | '''
17 | process_data.append(temporary)
18 | SJF.schedulingProcess(self, process_data)
19 |
20 | def schedulingProcess(self, process_data):
21 | start_time = []
22 | exit_time = []
23 | s_time = 0
24 | sequence_of_process = []
25 | process_data.sort(key=lambda x: x[1])
26 | '''
27 | Sort processes according to the Arrival Time
28 | '''
29 | while 1:
30 | ready_queue = []
31 | normal_queue = []
32 | temp = []
33 | for i in range(len(process_data)):
34 | if process_data[i][1] <= s_time and process_data[i][3] == 0:
35 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
36 | ready_queue.append(temp)
37 | temp = []
38 | elif process_data[i][3] == 0:
39 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
40 | normal_queue.append(temp)
41 | temp = []
42 | if len(ready_queue) == 0 and len(normal_queue) == 0:
43 | break
44 | if len(ready_queue) != 0:
45 | ready_queue.sort(key=lambda x: x[2])
46 | '''
47 | Sort processes according to Burst Time
48 | '''
49 | start_time.append(s_time)
50 | s_time = s_time + 1
51 | e_time = s_time
52 | exit_time.append(e_time)
53 | sequence_of_process.append(ready_queue[0][0])
54 | for k in range(len(process_data)):
55 | if process_data[k][0] == ready_queue[0][0]:
56 | break
57 | process_data[k][2] = process_data[k][2] - 1
58 | if process_data[k][2] == 0: #If Burst Time of a process is 0, it means the process is completed
59 | process_data[k][3] = 1
60 | process_data[k].append(e_time)
61 | if len(ready_queue) == 0:
62 | if s_time < normal_queue[0][1]:
63 | s_time = normal_queue[0][1]
64 | start_time.append(s_time)
65 | s_time = s_time + 1
66 | e_time = s_time
67 | exit_time.append(e_time)
68 | sequence_of_process.append(normal_queue[0][0])
69 | for k in range(len(process_data)):
70 | if process_data[k][0] == normal_queue[0][0]:
71 | break
72 | process_data[k][2] = process_data[k][2] - 1
73 | if process_data[k][2] == 0: #If Burst Time of a process is 0, it means the process is completed
74 | process_data[k][3] = 1
75 | process_data[k].append(e_time)
76 | t_time = SJF.calculateTurnaroundTime(self, process_data)
77 | w_time = SJF.calculateWaitingTime(self, process_data)
78 | SJF.printData(self, process_data, t_time, w_time, sequence_of_process)
79 |
80 | def calculateTurnaroundTime(self, process_data):
81 | total_turnaround_time = 0
82 | for i in range(len(process_data)):
83 | turnaround_time = process_data[i][5] - process_data[i][1]
84 | '''
85 | turnaround_time = completion_time - arrival_time
86 | '''
87 | total_turnaround_time = total_turnaround_time + turnaround_time
88 | process_data[i].append(turnaround_time)
89 | average_turnaround_time = total_turnaround_time / len(process_data)
90 | '''
91 | average_turnaround_time = total_turnaround_time / no_of_processes
92 | '''
93 | return average_turnaround_time
94 |
95 | def calculateWaitingTime(self, process_data):
96 | total_waiting_time = 0
97 | for i in range(len(process_data)):
98 | waiting_time = process_data[i][6] - process_data[i][4]
99 | '''
100 | waiting_time = turnaround_time - burst_time
101 | '''
102 | total_waiting_time = total_waiting_time + waiting_time
103 | process_data[i].append(waiting_time)
104 | average_waiting_time = total_waiting_time / len(process_data)
105 | '''
106 | average_waiting_time = total_waiting_time / no_of_processes
107 | '''
108 | return average_waiting_time
109 |
110 | def printData(self, process_data, average_turnaround_time, average_waiting_time, sequence_of_process):
111 | process_data.sort(key=lambda x: x[0])
112 | '''
113 | Sort processes according to the Process ID
114 | '''
115 | print("Process_ID Arrival_Time Rem_Burst_Time Completed Orig_Burst_Time Completion_Time Turnaround_Time Waiting_Time")
116 |
117 | for i in range(len(process_data)):
118 | for j in range(len(process_data[i])):
119 |
120 | print(process_data[i][j], end="\t\t")
121 | print()
122 |
123 | print(f'Average Turnaround Time: {average_turnaround_time}')
124 |
125 | print(f'Average Waiting Time: {average_waiting_time}')
126 |
127 | print(f'Sequence of Process: {sequence_of_process}')
128 |
129 |
130 | if __name__ == "__main__":
131 | no_of_processes = int(input("Enter number of processes: "))
132 | sjf = SJF()
133 | sjf.processData(no_of_processes)
134 |
135 |
136 | '''
137 | 3
138 | 1
139 | 0
140 | 8
141 | 2
142 | .4
143 | 4
144 | 3
145 | 1
146 | 1
147 |
148 |
149 | '''
--------------------------------------------------------------------------------
/Final/rr.py:
--------------------------------------------------------------------------------
1 |
2 | def schedulingProcess(process_data, time_slice):
3 | start_time = []
4 | exit_time = []
5 | process_working_sequence = []
6 | ready_queue = []
7 | s_time = 0
8 | process_data.sort(key=lambda x: x[1])
9 |
10 | while 1:
11 | normal_queue = []
12 | temp = []
13 | for i in range(len(process_data)):
14 | if process_data[i][1] <= s_time and process_data[i][3] == 0:
15 | present = 0
16 | if len(ready_queue) != 0:
17 | for k in range(len(ready_queue)):
18 | if process_data[i][0] == ready_queue[k][0]:
19 | present = 1
20 |
21 | if present == 0:
22 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
23 | ready_queue.append(temp)
24 | temp = []
25 |
26 | if len(ready_queue) != 0 and len(process_working_sequence) != 0:
27 | for k in range(len(ready_queue)):
28 | if ready_queue[k][0] == process_working_sequence[len(process_working_sequence) - 1]:
29 | ready_queue.insert((len(ready_queue) - 1), ready_queue.pop(k))
30 |
31 | elif process_data[i][3] == 0:
32 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
33 | normal_queue.append(temp)
34 | temp = []
35 | if len(ready_queue) == 0 and len(normal_queue) == 0:
36 | break
37 |
38 | if len(ready_queue) != 0:
39 | if ready_queue[0][2] > time_slice:
40 | start_time.append(s_time)
41 | s_time = s_time + time_slice
42 | exit_time.append(s_time)
43 | process_working_sequence.append(ready_queue[0][0])
44 | for j in range(len(process_data)):
45 | if process_data[j][0] == ready_queue[0][0]:
46 | break
47 | process_data[j][2] = process_data[j][2] - time_slice
48 | ready_queue.pop(0)
49 | elif ready_queue[0][2] <= time_slice:
50 | start_time.append(s_time)
51 | s_time = s_time + ready_queue[0][2]
52 | exit_time.append(s_time)
53 | process_working_sequence.append(ready_queue[0][0])
54 | for j in range(len(process_data)):
55 | if process_data[j][0] == ready_queue[0][0]:
56 | break
57 | process_data[j][2] = 0
58 | process_data[j][3] = 1
59 | process_data[j].append(s_time)
60 | ready_queue.pop(0)
61 | elif len(ready_queue) == 0:
62 | if s_time < normal_queue[0][1]:
63 | s_time = normal_queue[0][1]
64 | if normal_queue[0][2] > time_slice:
65 | start_time.append(s_time)
66 | s_time = s_time + time_slice
67 | exit_time.append(s_time)
68 | process_working_sequence.append(normal_queue[0][0])
69 | for j in range(len(process_data)):
70 | if process_data[j][0] == normal_queue[0][0]:
71 | break
72 | process_data[j][2] = process_data[j][2] - time_slice
73 | elif normal_queue[0][2] <= time_slice:
74 | start_time.append(s_time)
75 | s_time = s_time + normal_queue[0][2]
76 | exit_time.append(s_time)
77 | process_working_sequence.append(normal_queue[0][0])
78 | for j in range(len(process_data)):
79 | if process_data[j][0] == normal_queue[0][0]:
80 | break
81 | process_data[j][2] = 0
82 | process_data[j][3] = 1
83 | process_data[j].append(s_time)
84 | t_time = calculateTurnaroundTime(process_data)
85 | w_time = calculateWaitingTime(process_data)
86 | printData(process_data, t_time, w_time, process_working_sequence)
87 |
88 | def calculateTurnaroundTime(process_data):
89 | total_turnaround_time = 0
90 | for i in range(len(process_data)):
91 | turnaround_time = process_data[i][5] - process_data[i][1]
92 | total_turnaround_time = total_turnaround_time + turnaround_time
93 | process_data[i].append(turnaround_time)
94 | average_turnaround_time = total_turnaround_time / len(process_data)
95 | return average_turnaround_time
96 |
97 | def calculateWaitingTime(process_data):
98 | total_waiting_time = 0
99 | for i in range(len(process_data)):
100 | waiting_time = process_data[i][6] - process_data[i][4]
101 | total_waiting_time = total_waiting_time + waiting_time
102 | process_data[i].append(waiting_time)
103 | average_waiting_time = total_waiting_time / len(process_data)
104 | return average_waiting_time
105 |
106 | def printData(process_data, average_turnaround_time, average_waiting_time, process_working_sequence):
107 | process_data.sort(key=lambda x: x[0])
108 | print("Process_ID\tArrival_Time\tRem_Burst_Time\tCompleted\tOriginal_Burst_Time\tExit_Time\tTurnaround_Time\tWaiting_Time")
109 |
110 | for i in range(len(process_data)):
111 | for j in range(len(process_data[i])):
112 |
113 | print(process_data[i][j], end=" ")
114 | print()
115 |
116 | print(f'Average Turnaround Time: {average_turnaround_time}')
117 | print(f'Average Waiting Time: {average_waiting_time}')
118 | print(f'Sequence of Processes: {process_working_sequence}')
119 |
120 |
121 | no_of_processes = int(input("Enter number of processes: "))
122 | process_data = []
123 |
124 | print("\nEnter Process Arrival Time and Burst Time")
125 | for i in range(no_of_processes):
126 | temporary = []
127 | arrival_time = float(input(f"P[{i+1}] Arrival Time: "))
128 | burst_time = float(input(f"P[{i+1}] Burst Time: "))
129 | temporary.extend([i+1, arrival_time, burst_time, 0, burst_time])
130 | process_data.append(temporary)
131 |
132 | time_slice = int(input("Enter Time Slice: "))
133 | schedulingProcess(process_data , time_slice)
134 |
135 | '''
136 |
137 | 5
138 | 0
139 | 2
140 | 0
141 | 1
142 | 0
143 | 8
144 | 0
145 | 4
146 | 0
147 | 5
148 |
149 |
150 |
151 | '''
--------------------------------------------------------------------------------
/same_python_file/priority_primptive.py:
--------------------------------------------------------------------------------
1 | # https://cppsecrets.com/users/1108979711510497121461151049710464115111109971051219746101100117/Python-Priority-Scheduling-Preemeptive-Algorithm-with-Different-Arrival-Time.php
2 |
3 | class Priority:
4 |
5 | def processData(self, no_of_processes):
6 | process_data = []
7 | for i in range(no_of_processes):
8 | temporary = []
9 | process_id = int(input("Enter Process ID: "))
10 |
11 | arrival_time = int(input(f"Enter Arrival Time for Process {process_id}: "))
12 |
13 | burst_time = int(input(f"Enter Burst Time for Process {process_id}: "))
14 |
15 | priority = int(input(f"Enter Priority for Process {process_id}: "))
16 |
17 | temporary.extend([process_id, arrival_time, burst_time, priority, 0, burst_time])
18 | '''
19 | '0' is the state of the process. 0 means not executed and 1 means execution complete
20 | '''
21 | process_data.append(temporary)
22 | Priority.schedulingProcess(self, process_data)
23 |
24 | def schedulingProcess(self, process_data):
25 | start_time = []
26 | exit_time = []
27 | s_time = 0
28 | sequence_of_process = []
29 | process_data.sort(key=lambda x: x[1])
30 | '''
31 | Sort processes according to the Arrival Time
32 | '''
33 | while 1:
34 | ready_queue = []
35 | normal_queue = []
36 | temp = []
37 | for i in range(len(process_data)):
38 | if process_data[i][1] <= s_time and process_data[i][4] == 0:
39 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][3],
40 | process_data[i][5]])
41 | ready_queue.append(temp)
42 | temp = []
43 | elif process_data[i][4] == 0:
44 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4],
45 | process_data[i][5]])
46 | normal_queue.append(temp)
47 | temp = []
48 | if len(ready_queue) == 0 and len(normal_queue) == 0:
49 | break
50 | if len(ready_queue) != 0:
51 | ready_queue.sort(key=lambda x: x[3], reverse=True)
52 | start_time.append(s_time)
53 | s_time = s_time + 1
54 | e_time = s_time
55 | exit_time.append(e_time)
56 | sequence_of_process.append(ready_queue[0][0])
57 | for k in range(len(process_data)):
58 | if process_data[k][0] == ready_queue[0][0]:
59 | break
60 | process_data[k][2] = process_data[k][2] - 1
61 | if process_data[k][2] == 0: #if burst time is zero, it means process is completed
62 | process_data[k][4] = 1
63 | process_data[k].append(e_time)
64 | if len(ready_queue) == 0:
65 | normal_queue.sort(key=lambda x: x[1])
66 | if s_time < normal_queue[0][1]:
67 | s_time = normal_queue[0][1]
68 | start_time.append(s_time)
69 | s_time = s_time + 1
70 | e_time = s_time
71 | exit_time.append(e_time)
72 | sequence_of_process.append(normal_queue[0][0])
73 | for k in range(len(process_data)):
74 | if process_data[k][0] == normal_queue[0][0]:
75 | break
76 | process_data[k][2] = process_data[k][2] - 1
77 | if process_data[k][2] == 0: #if burst time is zero, it means process is completed
78 | process_data[k][4] = 1
79 | process_data[k].append(e_time)
80 | t_time = Priority.calculateTurnaroundTime(self, process_data)
81 | w_time = Priority.calculateWaitingTime(self, process_data)
82 | Priority.printData(self, process_data, t_time, w_time, sequence_of_process)
83 |
84 | def calculateTurnaroundTime(self, process_data):
85 | total_turnaround_time = 0
86 | for i in range(len(process_data)):
87 | # turnaround_time = process_data[i][6] - process_data[i][5]
88 | turnaround_time = process_data[i][6] - process_data[i][1]
89 | '''
90 | turnaround_time = completion_time - arrival_time
91 | '''
92 | total_turnaround_time = total_turnaround_time + turnaround_time
93 | process_data[i].append(turnaround_time)
94 | average_turnaround_time = total_turnaround_time / len(process_data)
95 | '''
96 | average_turnaround_time = total_turnaround_time / no_of_processes
97 | '''
98 | return average_turnaround_time
99 |
100 | def calculateWaitingTime(self, process_data):
101 | total_waiting_time = 0
102 | for i in range(len(process_data)):
103 | # waiting_time = process_data[i][6] - process_data[i][2]
104 | waiting_time = process_data[i][6] - process_data[i][4]
105 | '''
106 | waiting_time = turnaround_time - burst_time
107 | '''
108 | total_waiting_time = total_waiting_time + waiting_time
109 | process_data[i].append(waiting_time)
110 | average_waiting_time = total_waiting_time / len(process_data)
111 | '''
112 | average_waiting_time = total_waiting_time / no_of_processes
113 | '''
114 | return average_waiting_time
115 |
116 | def printData(self, process_data, average_turnaround_time, average_waiting_time, sequence_of_process):
117 | process_data.sort(key=lambda x: x[0])
118 | '''
119 | Sort processes according to the Process ID
120 | '''
121 | print("Process_ID Arrival_Time Rem_Burst_Time Priority Completed Orig_Burst_Time Completion_Time Turnaround_Time Waiting_Time")
122 | for i in range(len(process_data)):
123 | for j in range(len(process_data[i])):
124 |
125 | print(process_data[i][j], end=" ")
126 | print()
127 |
128 | print(f'Average Turnaround Time: {average_turnaround_time}')
129 |
130 | print(f'Average Waiting Time: {average_waiting_time}')
131 |
132 | print(f'Sequence of Process: {sequence_of_process}')
133 |
134 |
135 | if __name__ == "__main__":
136 | no_of_processes = int(input("Enter number of processes: "))
137 | priority = Priority()
138 | priority.processData(no_of_processes)
139 |
140 |
141 | ### highest priority == higher number
142 |
143 |
144 | '''
145 | 5
146 | 1
147 | 0
148 | 2
149 | 2
150 | 2
151 | 0
152 | 1
153 | 1
154 | 3
155 | 0
156 | 8
157 | 4
158 | 4
159 | 0
160 | 4
161 | 2
162 | 5
163 | 0
164 | 5
165 | 3
166 |
167 |
168 |
169 |
170 | '''
171 |
172 |
173 |
--------------------------------------------------------------------------------
/deadlock_detection.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "def is_safe_state(available, max_claim, allocation):\n",
10 | " # Get the number of resources and processes\n",
11 | " num_resources = len(available)\n",
12 | " num_processes = len(max_claim)\n",
13 | "\n",
14 | " # Create a copy of available resources (work) and initialize finish array\n",
15 | " work = available.copy()\n",
16 | " finish = [False] * num_processes\n",
17 | "\n",
18 | " while True:\n",
19 | " found = False\n",
20 | " # Iterate through each process\n",
21 | " for i in range(num_processes):\n",
22 | " # Check if the process is not finished and if it can be executed\n",
23 | " if not finish[i] and all(work[j] >= max_claim[i][j] - allocation[i][j] for j in range(num_resources)):\n",
24 | " found = True\n",
25 | " # Mark the process as finished\n",
26 | " finish[i] = True\n",
27 | " # Add the allocated resources to the work vector\n",
28 | " work = [work[j] + allocation[i][j] for j in range(num_resources)]\n",
29 | " break\n",
30 | "\n",
31 | " # If no process can be executed, break out of the loop\n",
32 | " if not found:\n",
33 | " break\n",
34 | "\n",
35 | " # Check if all processes have finished (i.e., system is in a safe state)\n",
36 | " return all(finish)\n",
37 | "\n",
38 | "def deadlock_detection(available, max_claim, allocation):\n",
39 | " # Check if the current state is safe or in a deadlock\n",
40 | " if is_safe_state(available, max_claim, allocation):\n",
41 | " print(\"The system is in a safe state. No deadlock detected.\")\n",
42 | " else:\n",
43 | " print(\"Deadlock detected. The system is in an unsafe state.\")\n",
44 | "\n",
45 | "if __name__ == \"__main__\":\n",
46 | " # case 1\n",
47 | " # Define the available resources, maximum claim of each process, and current allocation\n",
48 | " available_resources = [3, 3, 2]\n",
49 | " max_claim_per_process = [\n",
50 | " [7, 5, 3],\n",
51 | " [3, 2, 2],\n",
52 | " [9, 0, 2],\n",
53 | " [2, 2, 2],\n",
54 | " [4, 3, 3]\n",
55 | " ]\n",
56 | " current_allocation = [\n",
57 | " [0, 1, 0],\n",
58 | " [2, 0, 0],\n",
59 | " [3, 0, 2],\n",
60 | " [2, 1, 1],\n",
61 | " [0, 0, 2]\n",
62 | " ]\n",
63 | "\n",
64 | " # Perform deadlock detection\n",
65 | " deadlock_detection(available_resources, max_claim_per_process, current_allocation)\n"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "# case 2\n",
75 | "if __name__ == \"__main__\":\n",
76 | " # Define the deadlock scenario\n",
77 | " available_resources = [2, 2, 2]\n",
78 | " max_claim_per_process = [\n",
79 | " [3, 2, 2],\n",
80 | " [1, 2, 4],\n",
81 | " [4, 4, 3],\n",
82 | " [2, 3, 2]\n",
83 | " ]\n",
84 | " current_allocation = [\n",
85 | " [0, 1, 0],\n",
86 | " [0, 0, 1],\n",
87 | " [2, 1, 1],\n",
88 | " [1, 0, 0]\n",
89 | " ]\n",
90 | "\n",
91 | " # Perform deadlock detection\n",
92 | " deadlock_detection(available_resources, max_claim_per_process, current_allocation)\n"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "### Alternative 2\n",
100 | "With input"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "def input_data():\n",
110 | " global n, r, max_claim, allocation, avail\n",
111 | "\n",
112 | " n = int(input(\"Enter the number of processes: \"))\n",
113 | " r = int(input(\"Enter the number of resources: \"))\n",
114 | "\n",
115 | " print(\"Enter the Maximum Claim Matrix:\")\n",
116 | " max_claim = []\n",
117 | " for i in range(n):\n",
118 | " row = list(map(int, input().split()))\n",
119 | " max_claim.append(row)\n",
120 | "\n",
121 | " print(\"Enter the Allocation Matrix:\")\n",
122 | " allocation = []\n",
123 | " for i in range(n):\n",
124 | " row = list(map(int, input().split()))\n",
125 | " allocation.append(row)\n",
126 | "\n",
127 | " print(\"Enter the Available Resources:\")\n",
128 | " avail = list(map(int, input().split()))\n",
129 | "\n",
130 | "\n",
131 | "def show_data():\n",
132 | " global n, r, max_claim, allocation, avail\n",
133 | "\n",
134 | " print(\"Process\\tAllocation\\tMax\\t\\tAvailable\")\n",
135 | " for i in range(n):\n",
136 | " print(f\"P{i + 1}\\t\\t{allocation[i]}\\t\\t{max_claim[i]}\\t\\t{avail}\")\n",
137 | "\n",
138 | "\n",
139 | "def is_safe_state():\n",
140 | " global n, r, max_claim, allocation, avail\n",
141 | "\n",
142 | " work = avail.copy()\n",
143 | " finish = [False] * n\n",
144 | "\n",
145 | " while True:\n",
146 | " found = False\n",
147 | " for i in range(n):\n",
148 | " if not finish[i] and all(work[j] >= max_claim[i][j] - allocation[i][j] for j in range(r)):\n",
149 | " found = True\n",
150 | " finish[i] = True\n",
151 | " work = [work[j] + allocation[i][j] for j in range(r)]\n",
152 | " break\n",
153 | "\n",
154 | " if not found:\n",
155 | " break\n",
156 | "\n",
157 | " return all(finish)\n",
158 | "\n",
159 | "\n",
160 | "def deadlock_detection():\n",
161 | " if is_safe_state():\n",
162 | " print(\"The system is in a safe state. No deadlock detected.\")\n",
163 | " else:\n",
164 | " print(\"Deadlock detected. The system is in an unsafe state.\")\n",
165 | "\n",
166 | "\n",
167 | "if __name__ == \"__main__\":\n",
168 | " print(\"********** Deadlock Detection Algo ************\")\n",
169 | " input_data()\n",
170 | " show_data()\n",
171 | " deadlock_detection()\n"
172 | ]
173 | }
174 | ],
175 | "metadata": {
176 | "kernelspec": {
177 | "display_name": "Python 3",
178 | "language": "python",
179 | "name": "python3"
180 | },
181 | "language_info": {
182 | "codemirror_mode": {
183 | "name": "ipython",
184 | "version": 3
185 | },
186 | "file_extension": ".py",
187 | "mimetype": "text/x-python",
188 | "name": "python",
189 | "nbconvert_exporter": "python",
190 | "pygments_lexer": "ipython3",
191 | "version": "3.11.0"
192 | },
193 | "orig_nbformat": 4
194 | },
195 | "nbformat": 4,
196 | "nbformat_minor": 2
197 | }
198 |
--------------------------------------------------------------------------------
/shortest_job_first_non_preemptive.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "class SJF:\n",
10 | "\n",
11 | " def processData(self, no_of_processes):\n",
12 | " process_data = []\n",
13 | " for i in range(no_of_processes):\n",
14 | " temporary = []\n",
15 | " process_id = int(input(\"Enter Process ID: \"))\n",
16 | "\n",
17 | " arrival_time = float(input(f\"Enter Arrival Time for Process {process_id}: \"))\n",
18 | "\n",
19 | " burst_time = float(input(f\"Enter Burst Time for Process {process_id}: \"))\n",
20 | " temporary.extend([process_id, arrival_time, burst_time, 0])\n",
21 | " '''\n",
22 | " '0' is the state of the process. 0 means not executed and 1 means execution complete\n",
23 | " '''\n",
24 | " process_data.append(temporary)\n",
25 | " SJF.schedulingProcess(self, process_data)\n",
26 | "\n",
27 | " def schedulingProcess(self, process_data):\n",
28 | " start_time = []\n",
29 | " exit_time = []\n",
30 | " s_time = 0\n",
31 | " process_data.sort(key=lambda x: x[1])\n",
32 | " '''\n",
33 | " Sort processes according to the Arrival Time\n",
34 | " '''\n",
35 | " for i in range(len(process_data)):\n",
36 | " ready_queue = []\n",
37 | " temp = []\n",
38 | " normal_queue = []\n",
39 | "\n",
40 | " for j in range(len(process_data)):\n",
41 | " if (process_data[j][1] <= s_time) and (process_data[j][3] == 0):\n",
42 | " temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])\n",
43 | " ready_queue.append(temp)\n",
44 | " temp = []\n",
45 | " elif process_data[j][3] == 0:\n",
46 | " temp.extend([process_data[j][0], process_data[j][1], process_data[j][2]])\n",
47 | " normal_queue.append(temp)\n",
48 | " temp = []\n",
49 | "\n",
50 | " if len(ready_queue) != 0:\n",
51 | " ready_queue.sort(key=lambda x: x[2])\n",
52 | " '''\n",
53 | " Sort the processes according to the Burst Time\n",
54 | " '''\n",
55 | " start_time.append(s_time)\n",
56 | " s_time = s_time + ready_queue[0][2]\n",
57 | " e_time = s_time\n",
58 | " exit_time.append(e_time)\n",
59 | " for k in range(len(process_data)):\n",
60 | " if process_data[k][0] == ready_queue[0][0]:\n",
61 | " break\n",
62 | " process_data[k][3] = 1\n",
63 | " process_data[k].append(e_time)\n",
64 | "\n",
65 | " elif len(ready_queue) == 0:\n",
66 | " if s_time < normal_queue[0][1]:\n",
67 | " s_time = normal_queue[0][1]\n",
68 | " start_time.append(s_time)\n",
69 | " s_time = s_time + normal_queue[0][2]\n",
70 | " e_time = s_time\n",
71 | " exit_time.append(e_time)\n",
72 | " for k in range(len(process_data)):\n",
73 | " if process_data[k][0] == normal_queue[0][0]:\n",
74 | " break\n",
75 | " process_data[k][3] = 1\n",
76 | " process_data[k].append(e_time)\n",
77 | "\n",
78 | " t_time = SJF.calculateTurnaroundTime(self, process_data)\n",
79 | " w_time = SJF.calculateWaitingTime(self, process_data)\n",
80 | " SJF.printData(self, process_data, t_time, w_time)\n",
81 | "\n",
82 | "\n",
83 | " def calculateTurnaroundTime(self, process_data):\n",
84 | " total_turnaround_time = 0\n",
85 | " for i in range(len(process_data)):\n",
86 | " turnaround_time = process_data[i][4] - process_data[i][1]\n",
87 | " '''\n",
88 | " turnaround_time = completion_time - arrival_time\n",
89 | " '''\n",
90 | " total_turnaround_time = total_turnaround_time + turnaround_time\n",
91 | " process_data[i].append(turnaround_time)\n",
92 | " average_turnaround_time = total_turnaround_time / len(process_data)\n",
93 | " '''\n",
94 | " average_turnaround_time = total_turnaround_time / no_of_processes\n",
95 | " '''\n",
96 | " return average_turnaround_time\n",
97 | "\n",
98 | "\n",
99 | " def calculateWaitingTime(self, process_data):\n",
100 | " total_waiting_time = 0\n",
101 | " for i in range(len(process_data)):\n",
102 | " waiting_time = process_data[i][5] - process_data[i][2]\n",
103 | " '''\n",
104 | " waiting_time = turnaround_time - burst_time\n",
105 | " '''\n",
106 | " total_waiting_time = total_waiting_time + waiting_time\n",
107 | " process_data[i].append(waiting_time)\n",
108 | " average_waiting_time = total_waiting_time / len(process_data)\n",
109 | " '''\n",
110 | " average_waiting_time = total_waiting_time / no_of_processes\n",
111 | " '''\n",
112 | " return average_waiting_time\n",
113 | "\n",
114 | "\n",
115 | " def printData(self, process_data, average_turnaround_time, average_waiting_time):\n",
116 | " process_data.sort(key=lambda x: x[0])\n",
117 | " '''\n",
118 | " Sort processes according to the Process ID\n",
119 | " '''\n",
120 | " print(\"Process_ID Arrival_Time Burst_Time Completed Completion_Time Turnaround_Time Waiting_Time\")\n",
121 | "\n",
122 | " for i in range(len(process_data)):\n",
123 | " for j in range(len(process_data[i])):\n",
124 | "\n",
125 | " print(process_data[i][j], end=\"\t\t\")\n",
126 | " print()\n",
127 | "\n",
128 | " print(f'Average Turnaround Time: {average_turnaround_time}')\n",
129 | "\n",
130 | " print(f'Average Waiting Time: {average_waiting_time}')\n",
131 | "\n",
132 | "\n",
133 | "if __name__ == \"__main__\":\n",
134 | " no_of_processes = int(input(\"Enter number of processes: \"))\n",
135 | " sjf = SJF()\n",
136 | " sjf.processData(no_of_processes)\n",
137 | "\n",
138 | "\n",
139 | "'''\n",
140 | "3\n",
141 | "1\n",
142 | "0\n",
143 | "8\n",
144 | "2\n",
145 | ".4\n",
146 | "4\n",
147 | "3\n",
148 | "1\n",
149 | "1\n",
150 | "\n",
151 | "\n",
152 | "'''"
153 | ]
154 | }
155 | ],
156 | "metadata": {
157 | "kernelspec": {
158 | "display_name": "Python 3",
159 | "language": "python",
160 | "name": "python3"
161 | },
162 | "language_info": {
163 | "name": "python",
164 | "version": "3.11.0"
165 | },
166 | "orig_nbformat": 4
167 | },
168 | "nbformat": 4,
169 | "nbformat_minor": 2
170 | }
171 |
--------------------------------------------------------------------------------
/shortest_job_first_preemptive_SRT.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Works only if arrival time and burst time is in integer"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "class SJF:\n",
17 | "\n",
18 | " def processData(self, no_of_processes):\n",
19 | " process_data = []\n",
20 | " for i in range(no_of_processes):\n",
21 | " temporary = []\n",
22 | " process_id = int(input(\"Enter Process ID: \"))\n",
23 | "\n",
24 | " arrival_time = float(input(f\"Enter Arrival Time for Process {process_id}: \"))\n",
25 | "\n",
26 | " burst_time = float(input(f\"Enter Burst Time for Process {process_id}: \"))\n",
27 | "\n",
28 | " temporary.extend([process_id, arrival_time, burst_time, 0, burst_time])\n",
29 | " '''\n",
30 | " '0' is the state of the process. 0 means not executed and 1 means execution complete\n",
31 | " '''\n",
32 | " process_data.append(temporary)\n",
33 | " SJF.schedulingProcess(self, process_data)\n",
34 | "\n",
35 | " def schedulingProcess(self, process_data):\n",
36 | " start_time = []\n",
37 | " exit_time = []\n",
38 | " s_time = 0\n",
39 | " sequence_of_process = []\n",
40 | " process_data.sort(key=lambda x: x[1])\n",
41 | " '''\n",
42 | " Sort processes according to the Arrival Time\n",
43 | " '''\n",
44 | " while 1:\n",
45 | " ready_queue = []\n",
46 | " normal_queue = []\n",
47 | " temp = []\n",
48 | " for i in range(len(process_data)):\n",
49 | " if process_data[i][1] <= s_time and process_data[i][3] == 0:\n",
50 | " temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])\n",
51 | " ready_queue.append(temp)\n",
52 | " temp = []\n",
53 | " elif process_data[i][3] == 0:\n",
54 | " temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])\n",
55 | " normal_queue.append(temp)\n",
56 | " temp = []\n",
57 | " if len(ready_queue) == 0 and len(normal_queue) == 0:\n",
58 | " break\n",
59 | " if len(ready_queue) != 0:\n",
60 | " ready_queue.sort(key=lambda x: x[2])\n",
61 | " '''\n",
62 | " Sort processes according to Burst Time\n",
63 | " '''\n",
64 | " start_time.append(s_time)\n",
65 | " s_time = s_time + 1\n",
66 | " e_time = s_time\n",
67 | " exit_time.append(e_time)\n",
68 | " sequence_of_process.append(ready_queue[0][0])\n",
69 | " for k in range(len(process_data)):\n",
70 | " if process_data[k][0] == ready_queue[0][0]:\n",
71 | " break\n",
72 | " process_data[k][2] = process_data[k][2] - 1\n",
73 | " if process_data[k][2] == 0: #If Burst Time of a process is 0, it means the process is completed\n",
74 | " process_data[k][3] = 1\n",
75 | " process_data[k].append(e_time)\n",
76 | " if len(ready_queue) == 0:\n",
77 | " if s_time < normal_queue[0][1]:\n",
78 | " s_time = normal_queue[0][1]\n",
79 | " start_time.append(s_time)\n",
80 | " s_time = s_time + 1\n",
81 | " e_time = s_time\n",
82 | " exit_time.append(e_time)\n",
83 | " sequence_of_process.append(normal_queue[0][0])\n",
84 | " for k in range(len(process_data)):\n",
85 | " if process_data[k][0] == normal_queue[0][0]:\n",
86 | " break\n",
87 | " process_data[k][2] = process_data[k][2] - 1\n",
88 | " if process_data[k][2] == 0: #If Burst Time of a process is 0, it means the process is completed\n",
89 | " process_data[k][3] = 1\n",
90 | " process_data[k].append(e_time)\n",
91 | " t_time = SJF.calculateTurnaroundTime(self, process_data)\n",
92 | " w_time = SJF.calculateWaitingTime(self, process_data)\n",
93 | " SJF.printData(self, process_data, t_time, w_time, sequence_of_process)\n",
94 | "\n",
95 | " def calculateTurnaroundTime(self, process_data):\n",
96 | " total_turnaround_time = 0\n",
97 | " for i in range(len(process_data)):\n",
98 | " turnaround_time = process_data[i][5] - process_data[i][1]\n",
99 | " '''\n",
100 | " turnaround_time = completion_time - arrival_time\n",
101 | " '''\n",
102 | " total_turnaround_time = total_turnaround_time + turnaround_time\n",
103 | " process_data[i].append(turnaround_time)\n",
104 | " average_turnaround_time = total_turnaround_time / len(process_data)\n",
105 | " '''\n",
106 | " average_turnaround_time = total_turnaround_time / no_of_processes\n",
107 | " '''\n",
108 | " return average_turnaround_time\n",
109 | "\n",
110 | " def calculateWaitingTime(self, process_data):\n",
111 | " total_waiting_time = 0\n",
112 | " for i in range(len(process_data)):\n",
113 | " waiting_time = process_data[i][6] - process_data[i][4]\n",
114 | " '''\n",
115 | " waiting_time = turnaround_time - burst_time\n",
116 | " '''\n",
117 | " total_waiting_time = total_waiting_time + waiting_time\n",
118 | " process_data[i].append(waiting_time)\n",
119 | " average_waiting_time = total_waiting_time / len(process_data)\n",
120 | " '''\n",
121 | " average_waiting_time = total_waiting_time / no_of_processes\n",
122 | " '''\n",
123 | " return average_waiting_time\n",
124 | "\n",
125 | " def printData(self, process_data, average_turnaround_time, average_waiting_time, sequence_of_process):\n",
126 | " process_data.sort(key=lambda x: x[0])\n",
127 | " '''\n",
128 | " Sort processes according to the Process ID\n",
129 | " '''\n",
130 | " print(\"Process_ID Arrival_Time Rem_Burst_Time Completed Orig_Burst_Time Completion_Time Turnaround_Time Waiting_Time\")\n",
131 | " \n",
132 | " for i in range(len(process_data)):\n",
133 | " for j in range(len(process_data[i])):\n",
134 | "\n",
135 | " print(process_data[i][j], end=\"\\t\\t\")\n",
136 | " print()\n",
137 | "\n",
138 | " print(f'Average Turnaround Time: {average_turnaround_time}')\n",
139 | "\n",
140 | " print(f'Average Waiting Time: {average_waiting_time}')\n",
141 | "\n",
142 | " print(f'Sequence of Process: {sequence_of_process}')\n",
143 | "\n",
144 | "\n",
145 | "if __name__ == \"__main__\":\n",
146 | " no_of_processes = int(input(\"Enter number of processes: \"))\n",
147 | " sjf = SJF()\n",
148 | " sjf.processData(no_of_processes)\n",
149 | "\n",
150 | "\n",
151 | "'''\n",
152 | "3\n",
153 | "1\n",
154 | "0\n",
155 | "8\n",
156 | "2\n",
157 | ".4\n",
158 | "4\n",
159 | "3\n",
160 | "1\n",
161 | "1\n",
162 | "\n",
163 | "\n",
164 | "'''"
165 | ]
166 | }
167 | ],
168 | "metadata": {
169 | "kernelspec": {
170 | "display_name": "Python 3",
171 | "language": "python",
172 | "name": "python3"
173 | },
174 | "language_info": {
175 | "name": "python",
176 | "version": "3.11.0"
177 | },
178 | "orig_nbformat": 4
179 | },
180 | "nbformat": 4,
181 | "nbformat_minor": 2
182 | }
183 |
--------------------------------------------------------------------------------
/priority_non_primptive.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Works only if arrival time and burst time is in integer\n",
8 | "### highest priority == higher number "
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "# https://cppsecrets.com/users/1108979711510497121461151049710464115111109971051219746101100117/Python-Priority-Scheduling-Non-Pre-emptive-Algorithm-with-Different-Arrival-Time.php\n",
18 | "\n",
19 | "\n",
20 | "class Priority:\n",
21 | "\n",
22 | " def processData(self, no_of_processes):\n",
23 | " process_data = []\n",
24 | " for i in range(no_of_processes):\n",
25 | " temporary = []\n",
26 | " process_id = int(input(\"Enter Process ID: \"))\n",
27 | "\n",
28 | " arrival_time = int(input(f\"Enter Arrival Time for Process {process_id}: \"))\n",
29 | "\n",
30 | " burst_time = int(input(f\"Enter Burst Time for Process {process_id}: \"))\n",
31 | "\n",
32 | " priority = int(input(f\"Enter Priority for Process {process_id}: \"))\n",
33 | "\n",
34 | " temporary.extend([process_id, arrival_time, burst_time, priority, 0])\n",
35 | " '''\n",
36 | " '0' is the state of the process. 0 means not executed and 1 means execution complete\n",
37 | " '''\n",
38 | " process_data.append(temporary)\n",
39 | " Priority.schedulingProcess(self, process_data)\n",
40 | "\n",
41 | "\n",
42 | " def schedulingProcess(self, process_data):\n",
43 | " start_time = []\n",
44 | " exit_time = []\n",
45 | " s_time = 0\n",
46 | " process_data.sort(key=lambda x: x[1])\n",
47 | " '''\n",
48 | " Sort processes according to the Arrival Time\n",
49 | " '''\n",
50 | " for i in range(len(process_data)):\n",
51 | " ready_queue = []\n",
52 | " temp = []\n",
53 | " normal_queue = []\n",
54 | " for j in range(len(process_data)):\n",
55 | " if (process_data[j][1] <= s_time) and (process_data[j][4] == 0):\n",
56 | " temp.extend([process_data[j][0], process_data[j][1], process_data[j][2], process_data[j][3]])\n",
57 | " ready_queue.append(temp)\n",
58 | " temp = []\n",
59 | " elif process_data[j][4] == 0:\n",
60 | " temp.extend([process_data[j][0], process_data[j][1], process_data[j][2], process_data[j][3]])\n",
61 | " normal_queue.append(temp)\n",
62 | " temp = []\n",
63 | " if len(ready_queue) != 0:\n",
64 | " ready_queue.sort(key=lambda x: x[3], reverse=True)\n",
65 | " '''\n",
66 | " Sort the processes according to the Priority, considering Higher the Value, Higher the Priority\n",
67 | " '''\n",
68 | " start_time.append(s_time)\n",
69 | " s_time = s_time + ready_queue[0][2]\n",
70 | " e_time = s_time\n",
71 | " exit_time.append(e_time)\n",
72 | " for k in range(len(process_data)):\n",
73 | " if process_data[k][0] == ready_queue[0][0]:\n",
74 | " break\n",
75 | " process_data[k][4] = 1\n",
76 | " process_data[k].append(e_time)\n",
77 | " elif len(ready_queue) == 0:\n",
78 | " if s_time < normal_queue[0][1]:\n",
79 | " s_time = normal_queue[0][1]\n",
80 | " start_time.append(s_time)\n",
81 | " s_time = s_time + normal_queue[0][2]\n",
82 | " e_time = s_time\n",
83 | " exit_time.append(e_time)\n",
84 | " for k in range(len(process_data)):\n",
85 | " if process_data[k][0] == normal_queue[0][0]:\n",
86 | " break\n",
87 | " process_data[k][4] = 1\n",
88 | " process_data[k].append(e_time)\n",
89 | " t_time = Priority.calculateTurnaroundTime(self, process_data)\n",
90 | " w_time = Priority.calculateWaitingTime(self, process_data)\n",
91 | " Priority.printData(self, process_data, t_time, w_time)\n",
92 | "\n",
93 | "\n",
94 | " def calculateTurnaroundTime(self, process_data):\n",
95 | " total_turnaround_time = 0\n",
96 | " for i in range(len(process_data)):\n",
97 | " turnaround_time = process_data[i][5] - process_data[i][1]\n",
98 | " '''\n",
99 | " turnaround_time = completion_time - arrival_time\n",
100 | " '''\n",
101 | " total_turnaround_time = total_turnaround_time + turnaround_time\n",
102 | " process_data[i].append(turnaround_time)\n",
103 | " average_turnaround_time = total_turnaround_time / len(process_data)\n",
104 | " '''\n",
105 | " average_turnaround_time = total_turnaround_time / no_of_processes\n",
106 | " '''\n",
107 | " return average_turnaround_time\n",
108 | "\n",
109 | "\n",
110 | " def calculateWaitingTime(self, process_data):\n",
111 | " total_waiting_time = 0\n",
112 | " for i in range(len(process_data)):\n",
113 | " waiting_time = process_data[i][6] - process_data[i][2]\n",
114 | " '''\n",
115 | " waiting_time = turnaround_time - burst_time\n",
116 | " '''\n",
117 | " total_waiting_time = total_waiting_time + waiting_time\n",
118 | " process_data[i].append(waiting_time)\n",
119 | " average_waiting_time = total_waiting_time / len(process_data)\n",
120 | " '''\n",
121 | " average_waiting_time = total_waiting_time / no_of_processes\n",
122 | " '''\n",
123 | " return average_waiting_time\n",
124 | "\n",
125 | "\n",
126 | " def printData(self, process_data, average_turnaround_time, average_waiting_time):\n",
127 | " process_data.sort(key=lambda x: x[0])\n",
128 | " '''\n",
129 | " Sort processes according to the Process ID\n",
130 | " '''\n",
131 | " print(\"Process_ID Arrival_Time Burst_Time Priority Completed Completion_Time Turnaround_Time Waiting_Time\")\n",
132 | " for i in range(len(process_data)):\n",
133 | " for j in range(len(process_data[i])):\n",
134 | " print(process_data[i][j], end=\"\\t\\t\")\n",
135 | " print()\n",
136 | " print(f'Average Turnaround Time: {average_turnaround_time}')\n",
137 | "\n",
138 | " print(f'Average Waiting Time: {average_waiting_time}')\n",
139 | "\n",
140 | "\n",
141 | "if __name__ == \"__main__\":\n",
142 | " no_of_processes = int(input(\"Enter number of processes: \"))\n",
143 | " priority = Priority()\n",
144 | " priority.processData(no_of_processes)\n",
145 | "\n",
146 | "### highest priority == higher number \n",
147 | "\n",
148 | "\n",
149 | "'''\n",
150 | "5\n",
151 | "1\n",
152 | "0\n",
153 | "2\n",
154 | "2\n",
155 | "2\n",
156 | "0\n",
157 | "1\n",
158 | "1\n",
159 | "3\n",
160 | "0\n",
161 | "8\n",
162 | "4\n",
163 | "4\n",
164 | "0\n",
165 | "4\n",
166 | "2\n",
167 | "5\n",
168 | "0\n",
169 | "5\n",
170 | "3\n",
171 | "\n",
172 | "\n",
173 | "\n",
174 | "\n",
175 | "'''\n",
176 | "\n"
177 | ]
178 | }
179 | ],
180 | "metadata": {
181 | "kernelspec": {
182 | "display_name": "Python 3",
183 | "language": "python",
184 | "name": "python3"
185 | },
186 | "language_info": {
187 | "codemirror_mode": {
188 | "name": "ipython",
189 | "version": 3
190 | },
191 | "file_extension": ".py",
192 | "mimetype": "text/x-python",
193 | "name": "python",
194 | "nbconvert_exporter": "python",
195 | "pygments_lexer": "ipython3",
196 | "version": "3.11.0"
197 | },
198 | "orig_nbformat": 4
199 | },
200 | "nbformat": 4,
201 | "nbformat_minor": 2
202 | }
203 |
--------------------------------------------------------------------------------
/semaphore_producer_consumer.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import threading\n",
10 | "import time\n",
11 | "\n",
12 | "# Declare three semaphore variables: mutex, full, and empty.\n",
13 | "# mutex is used to ensure mutual exclusion when accessing shared variables.\n",
14 | "# full represents the number of full slots in the buffer.\n",
15 | "# empty represents the number of empty slots in the buffer.\n",
16 | "mutex = threading.Semaphore(1)\n",
17 | "full = threading.Semaphore(0)\n",
18 | "empty = threading.Semaphore(3)\n",
19 | "\n",
20 | "# Variable to keep track of the item count.\n",
21 | "x = 0\n",
22 | "\n",
23 | "# The producer function, responsible for producing items.\n",
24 | "def producer():\n",
25 | " global mutex, full, empty, x\n",
26 | "\n",
27 | " # Acquire an empty slot in the buffer.\n",
28 | " empty.acquire()\n",
29 | "\n",
30 | " # Acquire the mutex to ensure mutual exclusion when accessing shared variables.\n",
31 | " mutex.acquire()\n",
32 | "\n",
33 | " # Increment the item count.\n",
34 | " x += 1\n",
35 | "\n",
36 | " # Print a message indicating the item is produced.\n",
37 | " print(f\"Producer produces item {x}\\n\")\n",
38 | "\n",
39 | " # Release the mutex to allow other threads to access shared variables.\n",
40 | " mutex.release()\n",
41 | "\n",
42 | " # Release a full slot in the buffer.\n",
43 | " full.release()\n",
44 | "\n",
45 | "# The consumer function, responsible for consuming items.\n",
46 | "def consumer():\n",
47 | " global mutex, full, empty, x\n",
48 | "\n",
49 | " # Acquire a full slot in the buffer.\n",
50 | " full.acquire()\n",
51 | "\n",
52 | " # Acquire the mutex to ensure mutual exclusion when accessing shared variables.\n",
53 | " mutex.acquire()\n",
54 | "\n",
55 | " # Print a message indicating the item is consumed.\n",
56 | " print(f\"Consumer consumes item {x}\\n\")\n",
57 | "\n",
58 | " # Decrement the item count.\n",
59 | " x -= 1\n",
60 | "\n",
61 | " # Release the mutex to allow other threads to access shared variables.\n",
62 | " mutex.release()\n",
63 | "\n",
64 | " # Release an empty slot in the buffer.\n",
65 | " empty.release()\n",
66 | "\n",
67 | "# The main function to start the producer-consumer simulation.\n",
68 | "def main():\n",
69 | " while True:\n",
70 | " # Display the menu for user selection.\n",
71 | " print(\"1. PRODUCER\\n2. CONSUMER\\n3. EXIT\")\n",
72 | " n = int(input(\"ENTER YOUR CHOICE: \\n\"))\n",
73 | "\n",
74 | " if n == 1:\n",
75 | " # If the buffer is not full, start a new producer thread.\n",
76 | " if empty._value != 0:\n",
77 | " producer_thread = threading.Thread(target=producer)\n",
78 | " producer_thread.start()\n",
79 | " else:\n",
80 | " print(\"BUFFER IS FULL\")\n",
81 | "\n",
82 | " elif n == 2:\n",
83 | " # If the buffer is not empty, start a new consumer thread.\n",
84 | " if full._value != 0:\n",
85 | " consumer_thread = threading.Thread(target=consumer)\n",
86 | " consumer_thread.start()\n",
87 | " else:\n",
88 | " print(\"BUFFER IS EMPTY\")\n",
89 | "\n",
90 | " elif n == 3:\n",
91 | " # Exit the program.\n",
92 | " break\n",
93 | "\n",
94 | " else:\n",
95 | " print(\"Invalid choice. Please try again.\")\n",
96 | "\n",
97 | "if __name__ == \"__main__\":\n",
98 | " main()\n"
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {},
104 | "source": [
105 | "#### Altenative"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "import threading\n",
115 | "import time\n",
116 | "import random\n",
117 | "\n",
118 | "# Buffer size\n",
119 | "BUFFER_SIZE = 5\n",
120 | "\n",
121 | "# Semaphore to control access to the buffer\n",
122 | "mutex = threading.Semaphore(1)\n",
123 | "\n",
124 | "# Semaphore to count the empty slots in the buffer\n",
125 | "empty = threading.Semaphore(BUFFER_SIZE)\n",
126 | "\n",
127 | "# Semaphore to count the number of items in the buffer\n",
128 | "full = threading.Semaphore(0)\n",
129 | "\n",
130 | "# Buffer to store items\n",
131 | "buffer = []\n",
132 | "\n",
133 | "# The producer function, responsible for producing items\n",
134 | "def producer():\n",
135 | " for _ in range(10):\n",
136 | " # Generate a random item to be produced\n",
137 | " item = random.randint(1, 100)\n",
138 | "\n",
139 | " # Acquire an empty slot in the buffer\n",
140 | " empty.acquire()\n",
141 | "\n",
142 | " # Acquire the mutex to ensure mutual exclusion when accessing the buffer\n",
143 | " mutex.acquire()\n",
144 | "\n",
145 | " # Add the item to the buffer\n",
146 | " buffer.append(item)\n",
147 | "\n",
148 | " # Print a message indicating the item is produced and the current buffer contents\n",
149 | " print(f\"Producer: Produced item {item}. Buffer: {buffer}\")\n",
150 | "\n",
151 | " # Release the mutex to allow other threads to access the buffer\n",
152 | " mutex.release()\n",
153 | "\n",
154 | " # Release a full slot in the buffer to signal that an item is available\n",
155 | " full.release()\n",
156 | "\n",
157 | " # Introduce a random delay to simulate variable production time\n",
158 | " time.sleep(random.uniform(0.1, 0.5))\n",
159 | "\n",
160 | "# The consumer function, responsible for consuming items\n",
161 | "def consumer():\n",
162 | " for _ in range(10):\n",
163 | " # Acquire a full slot in the buffer to check if an item is available for consumption\n",
164 | " full.acquire()\n",
165 | "\n",
166 | " # Acquire the mutex to ensure mutual exclusion when accessing the buffer\n",
167 | " mutex.acquire()\n",
168 | "\n",
169 | " # Remove the first item from the buffer\n",
170 | " item = buffer.pop(0)\n",
171 | "\n",
172 | " # Print a message indicating the item is consumed and the current buffer contents\n",
173 | " print(f\"Consumer: Consumed item {item}. Buffer: {buffer}\")\n",
174 | "\n",
175 | " # Release the mutex to allow other threads to access the buffer\n",
176 | " mutex.release()\n",
177 | "\n",
178 | " # Release an empty slot in the buffer to signal that a slot is available for production\n",
179 | " empty.release()\n",
180 | "\n",
181 | " # Introduce a random delay to simulate variable consumption time\n",
182 | " time.sleep(random.uniform(0.1, 0.5))\n",
183 | "\n",
184 | "if __name__ == \"__main__\":\n",
185 | " # Create producer and consumer threads\n",
186 | " producer_thread = threading.Thread(target=producer)\n",
187 | " consumer_thread = threading.Thread(target=consumer)\n",
188 | "\n",
189 | " # Start the threads\n",
190 | " producer_thread.start()\n",
191 | " consumer_thread.start()\n",
192 | "\n",
193 | " # Wait for the threads to complete\n",
194 | " producer_thread.join()\n",
195 | " consumer_thread.join()\n",
196 | "\n",
197 | " # Print a message indicating that the simulation is completed\n",
198 | " print(\"Producer-Consumer simulation completed.\")\n"
199 | ]
200 | }
201 | ],
202 | "metadata": {
203 | "kernelspec": {
204 | "display_name": "Python 3",
205 | "language": "python",
206 | "name": "python3"
207 | },
208 | "language_info": {
209 | "codemirror_mode": {
210 | "name": "ipython",
211 | "version": 3
212 | },
213 | "file_extension": ".py",
214 | "mimetype": "text/x-python",
215 | "name": "python",
216 | "nbconvert_exporter": "python",
217 | "pygments_lexer": "ipython3",
218 | "version": "3.11.0"
219 | },
220 | "orig_nbformat": 4
221 | },
222 | "nbformat": 4,
223 | "nbformat_minor": 2
224 | }
225 |
--------------------------------------------------------------------------------
/same_python_file/round_robin.py:
--------------------------------------------------------------------------------
1 | class RoundRobin:
2 |
3 | def processData(self, no_of_processes):
4 | process_data = []
5 | for i in range(no_of_processes):
6 | temporary = []
7 | process_id = int(input("Enter Process ID: "))
8 |
9 | arrival_time = float(input(f"Enter Arrival Time for Process {process_id}: "))
10 |
11 | burst_time = float(input(f"Enter Burst Time for Process {process_id}: "))
12 |
13 | temporary.extend([process_id, arrival_time, burst_time, 0, burst_time])
14 | '''
15 | '0' is the state of the process. 0 means not executed and 1 means execution complete
16 |
17 | '''
18 | process_data.append(temporary)
19 |
20 | time_slice = int(input("Enter Time Slice: "))
21 |
22 | RoundRobin.schedulingProcess(self, process_data, time_slice)
23 |
24 | def schedulingProcess(self, process_data, time_slice):
25 | start_time = []
26 | exit_time = []
27 | executed_process = []
28 | ready_queue = []
29 | s_time = 0
30 | process_data.sort(key=lambda x: x[1])
31 | '''
32 | Sort processes according to the Arrival Time
33 | '''
34 | while 1:
35 | normal_queue = []
36 | temp = []
37 | for i in range(len(process_data)):
38 | if process_data[i][1] <= s_time and process_data[i][3] == 0:
39 | present = 0
40 | if len(ready_queue) != 0:
41 | for k in range(len(ready_queue)):
42 | if process_data[i][0] == ready_queue[k][0]:
43 | present = 1
44 | '''
45 | The above if loop checks that the next process is not a part of ready_queue
46 | '''
47 | if present == 0:
48 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
49 | ready_queue.append(temp)
50 | temp = []
51 | '''
52 | The above if loop adds a process to the ready_queue only if it is not already present in it
53 | '''
54 | if len(ready_queue) != 0 and len(executed_process) != 0:
55 | for k in range(len(ready_queue)):
56 | if ready_queue[k][0] == executed_process[len(executed_process) - 1]:
57 | ready_queue.insert((len(ready_queue) - 1), ready_queue.pop(k))
58 | '''
59 | The above if loop makes sure that the recently executed process is appended at the end of ready_queue
60 | '''
61 | elif process_data[i][3] == 0:
62 | temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])
63 | normal_queue.append(temp)
64 | temp = []
65 | if len(ready_queue) == 0 and len(normal_queue) == 0:
66 | break
67 | if len(ready_queue) != 0:
68 | if ready_queue[0][2] > time_slice:
69 | '''
70 | If process has remaining burst time greater than the time slice, it will execute for a time period equal to time slice and then switch
71 | '''
72 | start_time.append(s_time)
73 | s_time = s_time + time_slice
74 | e_time = s_time
75 | exit_time.append(e_time)
76 | executed_process.append(ready_queue[0][0])
77 | for j in range(len(process_data)):
78 | if process_data[j][0] == ready_queue[0][0]:
79 | break
80 | process_data[j][2] = process_data[j][2] - time_slice
81 | ready_queue.pop(0)
82 | elif ready_queue[0][2] <= time_slice:
83 | '''
84 | If a process has a remaining burst time less than or equal to time slice, it will complete its execution
85 | '''
86 | start_time.append(s_time)
87 | s_time = s_time + ready_queue[0][2]
88 | e_time = s_time
89 | exit_time.append(e_time)
90 | executed_process.append(ready_queue[0][0])
91 | for j in range(len(process_data)):
92 | if process_data[j][0] == ready_queue[0][0]:
93 | break
94 | process_data[j][2] = 0
95 | process_data[j][3] = 1
96 | process_data[j].append(e_time)
97 | ready_queue.pop(0)
98 | elif len(ready_queue) == 0:
99 | if s_time < normal_queue[0][1]:
100 | s_time = normal_queue[0][1]
101 | if normal_queue[0][2] > time_slice:
102 | '''
103 | If process has remaining burst time greater than the time slice, it will execute for a time period equal to time slice and then switch
104 | '''
105 | start_time.append(s_time)
106 | s_time = s_time + time_slice
107 | e_time = s_time
108 | exit_time.append(e_time)
109 | executed_process.append(normal_queue[0][0])
110 | for j in range(len(process_data)):
111 | if process_data[j][0] == normal_queue[0][0]:
112 | break
113 | process_data[j][2] = process_data[j][2] - time_slice
114 | elif normal_queue[0][2] <= time_slice:
115 | '''
116 | If a process has a remaining burst time less than or equal to time slice, it will complete its execution
117 | '''
118 | start_time.append(s_time)
119 | s_time = s_time + normal_queue[0][2]
120 | e_time = s_time
121 | exit_time.append(e_time)
122 | executed_process.append(normal_queue[0][0])
123 | for j in range(len(process_data)):
124 | if process_data[j][0] == normal_queue[0][0]:
125 | break
126 | process_data[j][2] = 0
127 | process_data[j][3] = 1
128 | process_data[j].append(e_time)
129 | t_time = RoundRobin.calculateTurnaroundTime(self, process_data)
130 | w_time = RoundRobin.calculateWaitingTime(self, process_data)
131 | RoundRobin.printData(self, process_data, t_time, w_time, executed_process)
132 |
133 | def calculateTurnaroundTime(self, process_data):
134 | total_turnaround_time = 0
135 | for i in range(len(process_data)):
136 | turnaround_time = process_data[i][5] - process_data[i][1]
137 | '''
138 | turnaround_time = completion_time - arrival_time
139 | '''
140 | total_turnaround_time = total_turnaround_time + turnaround_time
141 | process_data[i].append(turnaround_time)
142 | average_turnaround_time = total_turnaround_time / len(process_data)
143 | '''
144 | average_turnaround_time = total_turnaround_time / no_of_processes
145 | '''
146 | return average_turnaround_time
147 |
148 | def calculateWaitingTime(self, process_data):
149 | total_waiting_time = 0
150 | for i in range(len(process_data)):
151 | waiting_time = process_data[i][6] - process_data[i][4]
152 | '''
153 | waiting_time = turnaround_time - burst_time
154 | '''
155 | total_waiting_time = total_waiting_time + waiting_time
156 | process_data[i].append(waiting_time)
157 | average_waiting_time = total_waiting_time / len(process_data)
158 | '''
159 | average_waiting_time = total_waiting_time / no_of_processes
160 | '''
161 | return average_waiting_time
162 |
163 | def printData(self, process_data, average_turnaround_time, average_waiting_time, executed_process):
164 | process_data.sort(key=lambda x: x[0])
165 | '''
166 | Sort processes according to the Process ID
167 | '''
168 | print("Process_ID Arrival_Time Rem_Burst_Time Completed Original_Burst_Time Completion_Time Turnaround_Time Waiting_Time")
169 | for i in range(len(process_data)):
170 | for j in range(len(process_data[i])):
171 |
172 | print(process_data[i][j], end=" ")
173 | print()
174 |
175 | print(f'Average Turnaround Time: {average_turnaround_time}')
176 |
177 | print(f'Average Waiting Time: {average_waiting_time}')
178 |
179 | print(f'Sequence of Processes: {executed_process}')
180 |
181 |
182 | if __name__ == "__main__":
183 | no_of_processes = int(input("Enter number of processes: "))
184 | rr = RoundRobin()
185 | rr.processData(no_of_processes)
186 |
187 |
188 |
--------------------------------------------------------------------------------
/priority_primptive.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Works only if arrival time and burst time is in integer\n",
8 | "### highest priority == higher number "
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "# https://cppsecrets.com/users/1108979711510497121461151049710464115111109971051219746101100117/Python-Priority-Scheduling-Preemeptive-Algorithm-with-Different-Arrival-Time.php\n",
18 | "\n",
19 | "class Priority:\n",
20 | "\n",
21 | " def processData(self, no_of_processes):\n",
22 | " process_data = []\n",
23 | " for i in range(no_of_processes):\n",
24 | " temporary = []\n",
25 | " process_id = int(input(\"Enter Process ID: \"))\n",
26 | "\n",
27 | " arrival_time = int(input(f\"Enter Arrival Time for Process {process_id}: \"))\n",
28 | "\n",
29 | " burst_time = int(input(f\"Enter Burst Time for Process {process_id}: \"))\n",
30 | "\n",
31 | " priority = int(input(f\"Enter Priority for Process {process_id}: \"))\n",
32 | "\n",
33 | " temporary.extend([process_id, arrival_time, burst_time, priority, 0, burst_time])\n",
34 | " '''\n",
35 | " '0' is the state of the process. 0 means not executed and 1 means execution complete\n",
36 | " '''\n",
37 | " process_data.append(temporary)\n",
38 | " Priority.schedulingProcess(self, process_data)\n",
39 | "\n",
40 | " def schedulingProcess(self, process_data):\n",
41 | " start_time = []\n",
42 | " exit_time = []\n",
43 | " s_time = 0\n",
44 | " sequence_of_process = []\n",
45 | " process_data.sort(key=lambda x: x[1])\n",
46 | " '''\n",
47 | " Sort processes according to the Arrival Time\n",
48 | " '''\n",
49 | " while 1:\n",
50 | " ready_queue = []\n",
51 | " normal_queue = []\n",
52 | " temp = []\n",
53 | " for i in range(len(process_data)):\n",
54 | " if process_data[i][1] <= s_time and process_data[i][4] == 0:\n",
55 | " temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][3],\n",
56 | " process_data[i][5]])\n",
57 | " ready_queue.append(temp)\n",
58 | " temp = []\n",
59 | " elif process_data[i][4] == 0:\n",
60 | " temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4],\n",
61 | " process_data[i][5]])\n",
62 | " normal_queue.append(temp)\n",
63 | " temp = []\n",
64 | " if len(ready_queue) == 0 and len(normal_queue) == 0:\n",
65 | " break\n",
66 | " if len(ready_queue) != 0:\n",
67 | " ready_queue.sort(key=lambda x: x[3], reverse=True)\n",
68 | " start_time.append(s_time)\n",
69 | " s_time = s_time + 1\n",
70 | " e_time = s_time\n",
71 | " exit_time.append(e_time)\n",
72 | " sequence_of_process.append(ready_queue[0][0])\n",
73 | " for k in range(len(process_data)):\n",
74 | " if process_data[k][0] == ready_queue[0][0]:\n",
75 | " break\n",
76 | " process_data[k][2] = process_data[k][2] - 1\n",
77 | " if process_data[k][2] == 0: #if burst time is zero, it means process is completed\n",
78 | " process_data[k][4] = 1\n",
79 | " process_data[k].append(e_time)\n",
80 | " if len(ready_queue) == 0:\n",
81 | " normal_queue.sort(key=lambda x: x[1])\n",
82 | " if s_time < normal_queue[0][1]:\n",
83 | " s_time = normal_queue[0][1]\n",
84 | " start_time.append(s_time)\n",
85 | " s_time = s_time + 1\n",
86 | " e_time = s_time\n",
87 | " exit_time.append(e_time)\n",
88 | " sequence_of_process.append(normal_queue[0][0])\n",
89 | " for k in range(len(process_data)):\n",
90 | " if process_data[k][0] == normal_queue[0][0]:\n",
91 | " break\n",
92 | " process_data[k][2] = process_data[k][2] - 1\n",
93 | " if process_data[k][2] == 0: #if burst time is zero, it means process is completed\n",
94 | " process_data[k][4] = 1\n",
95 | " process_data[k].append(e_time)\n",
96 | " t_time = Priority.calculateTurnaroundTime(self, process_data)\n",
97 | " w_time = Priority.calculateWaitingTime(self, process_data)\n",
98 | " Priority.printData(self, process_data, t_time, w_time, sequence_of_process)\n",
99 | "\n",
100 | " def calculateTurnaroundTime(self, process_data):\n",
101 | " total_turnaround_time = 0\n",
102 | " for i in range(len(process_data)):\n",
103 | " # turnaround_time = process_data[i][6] - process_data[i][5]\n",
104 | " turnaround_time = process_data[i][6] - process_data[i][1]\n",
105 | " '''\n",
106 | " turnaround_time = completion_time - arrival_time\n",
107 | " '''\n",
108 | " total_turnaround_time = total_turnaround_time + turnaround_time\n",
109 | " process_data[i].append(turnaround_time)\n",
110 | " average_turnaround_time = total_turnaround_time / len(process_data)\n",
111 | " '''\n",
112 | " average_turnaround_time = total_turnaround_time / no_of_processes\n",
113 | " '''\n",
114 | " return average_turnaround_time\n",
115 | "\n",
116 | " def calculateWaitingTime(self, process_data):\n",
117 | " total_waiting_time = 0\n",
118 | " for i in range(len(process_data)):\n",
119 | " # waiting_time = process_data[i][6] - process_data[i][2]\n",
120 | " waiting_time = process_data[i][6] - process_data[i][4]\n",
121 | " '''\n",
122 | " waiting_time = turnaround_time - burst_time\n",
123 | " '''\n",
124 | " total_waiting_time = total_waiting_time + waiting_time\n",
125 | " process_data[i].append(waiting_time)\n",
126 | " average_waiting_time = total_waiting_time / len(process_data)\n",
127 | " '''\n",
128 | " average_waiting_time = total_waiting_time / no_of_processes\n",
129 | " '''\n",
130 | " return average_waiting_time\n",
131 | "\n",
132 | " def printData(self, process_data, average_turnaround_time, average_waiting_time, sequence_of_process):\n",
133 | " process_data.sort(key=lambda x: x[0])\n",
134 | " '''\n",
135 | " Sort processes according to the Process ID\n",
136 | " '''\n",
137 | " print(\"Process_ID Arrival_Time Rem_Burst_Time Priority Completed Orig_Burst_Time Completion_Time Turnaround_Time Waiting_Time\")\n",
138 | " for i in range(len(process_data)):\n",
139 | " for j in range(len(process_data[i])):\n",
140 | "\n",
141 | " print(process_data[i][j], end=\"\t\t\")\n",
142 | " print()\n",
143 | "\n",
144 | " print(f'Average Turnaround Time: {average_turnaround_time}')\n",
145 | "\n",
146 | " print(f'Average Waiting Time: {average_waiting_time}')\n",
147 | "\n",
148 | " print(f'Sequence of Process: {sequence_of_process}')\n",
149 | "\n",
150 | "\n",
151 | "if __name__ == \"__main__\":\n",
152 | " no_of_processes = int(input(\"Enter number of processes: \"))\n",
153 | " priority = Priority()\n",
154 | " priority.processData(no_of_processes)\n",
155 | "\n",
156 | "\n",
157 | "\n",
158 | "\n",
159 | "\n",
160 | "'''\n",
161 | "5\n",
162 | "1\n",
163 | "0\n",
164 | "2\n",
165 | "2\n",
166 | "2\n",
167 | "0\n",
168 | "1\n",
169 | "1\n",
170 | "3\n",
171 | "0\n",
172 | "8\n",
173 | "4\n",
174 | "4\n",
175 | "0\n",
176 | "4\n",
177 | "2\n",
178 | "5\n",
179 | "0\n",
180 | "5\n",
181 | "3\n",
182 | "\n",
183 | "\n",
184 | "\n",
185 | "\n",
186 | "'''\n",
187 | "\n",
188 | "\n"
189 | ]
190 | }
191 | ],
192 | "metadata": {
193 | "kernelspec": {
194 | "display_name": "Python 3",
195 | "language": "python",
196 | "name": "python3"
197 | },
198 | "language_info": {
199 | "codemirror_mode": {
200 | "name": "ipython",
201 | "version": 3
202 | },
203 | "file_extension": ".py",
204 | "mimetype": "text/x-python",
205 | "name": "python",
206 | "nbconvert_exporter": "python",
207 | "pygments_lexer": "ipython3",
208 | "version": "3.11.0"
209 | },
210 | "orig_nbformat": 4
211 | },
212 | "nbformat": 4,
213 | "nbformat_minor": 2
214 | }
215 |
--------------------------------------------------------------------------------
/peterson_solution.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Peterson's Solution is a classic algorithm used to solve the critical-section problem in concurrent programming. The critical-section problem arises when multiple processes or threads need to access a shared resource (such as shared memory or a shared data structure) concurrently. To prevent data inconsistency and ensure the correctness of the program, mutual exclusion must be enforced, meaning only one process can access the shared resource at a time.\n",
8 | "\n",
9 | "Peterson's Solution is based on the idea of using shared variables to coordinate the access to the critical section. It is primarily designed for systems with two processes that need to access a shared resource. The algorithm ensures that only one process is in its critical section at any given time, thus providing mutual exclusion.\n",
10 | "\n",
11 | "The algorithm relies on the following shared variables:\n",
12 | "\n",
13 | "- `int turn`: A variable that indicates whose turn it is to enter the critical section.\n",
14 | "- `bool flag[2]`: An array of flags that each process uses to indicate its intention to enter the critical section. `flag[i]` is true if process `i` wants to enter the critical section.\n",
15 | "\n",
16 | "The key idea of Peterson's Solution is that before entering the critical section, each process sets its `flag` to true and then sets `turn` to its own process number. Then it enters a loop where it checks if the other process also wants to enter the critical section. If the other process is in the critical section (indicated by its `flag` being true), and it has the turn (indicated by `turn` being equal to the other process number), the current process waits (spins) until it can enter the critical section. If not, it proceeds to enter its critical section.\n",
17 | "\n",
18 | "The pseudocode for Peterson's Solution is as follows:\n",
19 | "\n",
20 | "``` python\n",
21 | "# Shared variables\n",
22 | "int turn = 0\n",
23 | "bool flag[2] = {false, false}\n",
24 | "\n",
25 | "# Process 0\n",
26 | "flag[0] = true\n",
27 | "turn = 1\n",
28 | "while flag[1] and turn == 1:\n",
29 | " # Wait\n",
30 | "# Critical section\n",
31 | "# Exit section\n",
32 | "flag[0] = false\n",
33 | "\n",
34 | "# Process 1\n",
35 | "flag[1] = true\n",
36 | "turn = 0\n",
37 | "while flag[0] and turn == 0:\n",
38 | " # Wait\n",
39 | "# Critical section\n",
40 | "# Exit section\n",
41 | "flag[1] = false\n",
42 | "```\n",
43 | "\n",
44 | "The key property of Peterson's Solution is that it provides mutual exclusion, meaning only one process can be in its critical section at a time. Additionally, the algorithm guarantees progress, ensuring that a process that wants to enter the critical section will eventually do so as long as the other process does not remain indefinitely in its critical section.\n",
45 | "\n",
46 | "It's important to note that while Peterson's Solution is a simple and elegant algorithm, it is not suitable for scenarios with more than two processes."
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "import threading\n",
56 | "import time\n",
57 | "\n",
58 | "cs = 0\n",
59 | "flag_0 = False\n",
60 | "flag_1 = False\n",
61 | "turn = 0\n",
62 | "\n",
63 | "def thread_0():\n",
64 | " global cs, flag_0, flag_1, turn\n",
65 | "\n",
66 | " flag_0 = True\n",
67 | " turn = 1\n",
68 | " while (flag_1 and turn == 1):\n",
69 | " continue\n",
70 | "\n",
71 | " for i in range(10):\n",
72 | " cs += 1\n",
73 | " print(\"Thread 0: cs =\", cs)\n",
74 | " time.sleep(0.1)\n",
75 | "\n",
76 | " flag_0 = False\n",
77 | "\n",
78 | "def thread_1():\n",
79 | " global cs, flag_0, flag_1, turn\n",
80 | "\n",
81 | " flag_1 = True\n",
82 | " turn = 0\n",
83 | " while (flag_0 and turn == 0):\n",
84 | " continue\n",
85 | "\n",
86 | " for i in range(10):\n",
87 | " cs += 1000\n",
88 | " print(\"Thread 1: cs =\", cs)\n",
89 | " time.sleep(0.1)\n",
90 | "\n",
91 | " flag_1 = False\n",
92 | "\n",
93 | "if __name__ == \"__main__\":\n",
94 | "\t\tt0 = threading.Thread(target=thread_0)\n",
95 | "\t\tt1 = threading.Thread(target=thread_1)\n",
96 | "\t\tt0.start()\n",
97 | "\t\tt1.start()"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "metadata": {},
103 | "source": [
104 | "### Alternative"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "metadata": {},
111 | "outputs": [],
112 | "source": [
113 | "import threading\n",
114 | "import time\n",
115 | "\n",
116 | "# Shared variables\n",
117 | "turn = 0\n",
118 | "flag = [False, False]\n",
119 | "\n",
120 | "def process_0():\n",
121 | " global turn, flag\n",
122 | " flag[0] = True\n",
123 | " turn = 1\n",
124 | " while flag[1] and turn == 1:\n",
125 | " # Wait\n",
126 | " pass\n",
127 | " # Critical section\n",
128 | " print(\"Process 0 is in the critical section.\")\n",
129 | " time.sleep(2) # Simulating some work inside the critical section\n",
130 | " # Exit section\n",
131 | " flag[0] = False\n",
132 | " print(\"Process 0 exited the critical section.\\n\")\n",
133 | "\n",
134 | "def process_1():\n",
135 | " global turn, flag\n",
136 | " flag[1] = True\n",
137 | " turn = 0\n",
138 | " while flag[0] and turn == 0:\n",
139 | " # Wait\n",
140 | " pass\n",
141 | " # Critical section\n",
142 | " print(\"Process 1 is in the critical section.\")\n",
143 | " time.sleep(1) # Simulating some work inside the critical section\n",
144 | " # Exit section\n",
145 | " flag[1] = False\n",
146 | " print(\"Process 1 exited the critical section.\\n\")\n",
147 | "\n",
148 | "if __name__ == \"__main__\":\n",
149 | " thread_0 = threading.Thread(target=process_0)\n",
150 | " thread_1 = threading.Thread(target=process_1)\n",
151 | "\n",
152 | " thread_0.start()\n",
153 | " thread_1.start()\n",
154 | "\n",
155 | " thread_0.join()\n",
156 | " thread_1.join()\n",
157 | "\n",
158 | " print(\"Both processes have completed.\")\n"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "metadata": {},
164 | "source": [
165 | "### Alternative"
166 | ]
167 | },
168 | {
169 | "cell_type": "markdown",
170 | "metadata": {},
171 | "source": [
172 | "The producer-consumer problem (or bounded buffer problem) describes two processes, the producer and the consumer, which share a common, fixed-size buffer used as a queue. Producers produce an item and put it into the buffer. If the buffer is already full then the producer will have to wait for an empty block in the buffer. Consumers consume an item from the buffer. If the buffer is already empty then the consumer will have to wait for an item in the buffer. Implement Peterson’s Algorithm for the two processes using shared memory such that there is mutual exclusion between them. The solution should have free from synchronization problems. "
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": null,
178 | "metadata": {},
179 | "outputs": [],
180 | "source": [
181 | "import random\n",
182 | "import time\n",
183 | "import threading\n",
184 | "import logging\n",
185 | "import queue\n",
186 | "\n",
187 | "BSIZE = 8 # Buffer size\n",
188 | "PWT = 2 # Producer wait time limit\n",
189 | "CWT = 10 # Consumer wait time limit\n",
190 | "RT = 10 # Program run-time in seconds\n",
191 | "\n",
192 | "def myrand(n):\n",
193 | " return random.randint(1, n)\n",
194 | "\n",
195 | "def producer(queue, state):\n",
196 | " index = 0\n",
197 | " while state:\n",
198 | " time.sleep(1)\n",
199 | " logging.info(\"\\nProducer is ready now.\")\n",
200 | " with queue.lock:\n",
201 | " if not queue.full():\n",
202 | " tempo = myrand(BSIZE * 3)\n",
203 | " logging.info(f\"Job {tempo} has been produced\")\n",
204 | " queue.put(tempo)\n",
205 | " logging.info(f\"Buffer: {list(queue.queue)}\")\n",
206 | " else:\n",
207 | " logging.info(\"Buffer is full, nothing can be produced!!!\")\n",
208 | " wait_time = myrand(PWT)\n",
209 | " logging.info(f\"Producer will wait for {wait_time} seconds\")\n",
210 | " time.sleep(wait_time)\n",
211 | "\n",
212 | "def consumer(queue, state):\n",
213 | " time.sleep(5)\n",
214 | " while state:\n",
215 | " time.sleep(1)\n",
216 | " logging.info(\"\\nConsumer is ready now.\")\n",
217 | " with queue.lock:\n",
218 | " if not queue.empty():\n",
219 | " job = queue.get()\n",
220 | " logging.info(f\"Job {job} has been consumed\")\n",
221 | " logging.info(f\"Buffer: {list(queue.queue)}\")\n",
222 | " else:\n",
223 | " logging.info(\"Buffer is empty, nothing can be consumed!!!\")\n",
224 | " wait_time = myrand(CWT)\n",
225 | " logging.info(f\"Consumer will sleep for {wait_time} seconds\")\n",
226 | " time.sleep(wait_time)\n",
227 | "\n",
228 | "if __name__ == \"__main__\":\n",
229 | " logging.basicConfig(level=logging.INFO, format='%(message)s')\n",
230 | "\n",
231 | " shared_queue = queue.Queue(BSIZE)\n",
232 | " shared_queue.lock = threading.Lock()\n",
233 | " state = True\n",
234 | "\n",
235 | " producer_thread = threading.Thread(target=producer, args=(shared_queue, state))\n",
236 | " consumer_thread = threading.Thread(target=consumer, args=(shared_queue, state))\n",
237 | "\n",
238 | " producer_thread.start()\n",
239 | " consumer_thread.start()\n",
240 | "\n",
241 | " time.sleep(RT)\n",
242 | " state = False\n",
243 | "\n",
244 | " producer_thread.join()\n",
245 | " consumer_thread.join()\n",
246 | "\n",
247 | " logging.info(\"\\nThe clock ran out.\")\n"
248 | ]
249 | }
250 | ],
251 | "metadata": {
252 | "kernelspec": {
253 | "display_name": "Python 3",
254 | "language": "python",
255 | "name": "python3"
256 | },
257 | "language_info": {
258 | "codemirror_mode": {
259 | "name": "ipython",
260 | "version": 3
261 | },
262 | "file_extension": ".py",
263 | "mimetype": "text/x-python",
264 | "name": "python",
265 | "nbconvert_exporter": "python",
266 | "pygments_lexer": "ipython3",
267 | "version": "3.11.0"
268 | },
269 | "orig_nbformat": 4
270 | },
271 | "nbformat": 4,
272 | "nbformat_minor": 2
273 | }
274 |
--------------------------------------------------------------------------------
/round_robin.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "### Works only if arrival time and burst time is in integer"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": null,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "class RoundRobin:\n",
17 | "\n",
18 | " def processData(self, no_of_processes):\n",
19 | " process_data = []\n",
20 | " for i in range(no_of_processes):\n",
21 | " temporary = []\n",
22 | " process_id = int(input(\"Enter Process ID: \"))\n",
23 | "\n",
24 | " arrival_time = float(input(f\"Enter Arrival Time for Process {process_id}: \"))\n",
25 | "\n",
26 | " burst_time = float(input(f\"Enter Burst Time for Process {process_id}: \"))\n",
27 | "\n",
28 | " temporary.extend([process_id, arrival_time, burst_time, 0, burst_time])\n",
29 | " '''\n",
30 | " '0' is the state of the process. 0 means not executed and 1 means execution complete\n",
31 | " \n",
32 | " '''\n",
33 | " process_data.append(temporary)\n",
34 | "\n",
35 | " time_slice = int(input(\"Enter Time Slice: \"))\n",
36 | "\n",
37 | " RoundRobin.schedulingProcess(self, process_data, time_slice)\n",
38 | "\n",
39 | " def schedulingProcess(self, process_data, time_slice):\n",
40 | " start_time = []\n",
41 | " exit_time = []\n",
42 | " executed_process = []\n",
43 | " ready_queue = []\n",
44 | " s_time = 0\n",
45 | " process_data.sort(key=lambda x: x[1])\n",
46 | " '''\n",
47 | " Sort processes according to the Arrival Time\n",
48 | " '''\n",
49 | " while 1:\n",
50 | " normal_queue = []\n",
51 | " temp = []\n",
52 | " for i in range(len(process_data)):\n",
53 | " if process_data[i][1] <= s_time and process_data[i][3] == 0:\n",
54 | " present = 0\n",
55 | " if len(ready_queue) != 0:\n",
56 | " for k in range(len(ready_queue)):\n",
57 | " if process_data[i][0] == ready_queue[k][0]:\n",
58 | " present = 1\n",
59 | " '''\n",
60 | " The above if loop checks that the next process is not a part of ready_queue\n",
61 | " '''\n",
62 | " if present == 0:\n",
63 | " temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])\n",
64 | " ready_queue.append(temp)\n",
65 | " temp = []\n",
66 | " '''\n",
67 | " The above if loop adds a process to the ready_queue only if it is not already present in it\n",
68 | " '''\n",
69 | " if len(ready_queue) != 0 and len(executed_process) != 0:\n",
70 | " for k in range(len(ready_queue)):\n",
71 | " if ready_queue[k][0] == executed_process[len(executed_process) - 1]:\n",
72 | " ready_queue.insert((len(ready_queue) - 1), ready_queue.pop(k))\n",
73 | " '''\n",
74 | " The above if loop makes sure that the recently executed process is appended at the end of ready_queue\n",
75 | " '''\n",
76 | " elif process_data[i][3] == 0:\n",
77 | " temp.extend([process_data[i][0], process_data[i][1], process_data[i][2], process_data[i][4]])\n",
78 | " normal_queue.append(temp)\n",
79 | " temp = []\n",
80 | " if len(ready_queue) == 0 and len(normal_queue) == 0:\n",
81 | " break\n",
82 | " if len(ready_queue) != 0:\n",
83 | " if ready_queue[0][2] > time_slice:\n",
84 | " '''\n",
85 | " If process has remaining burst time greater than the time slice, it will execute for a time period equal to time slice and then switch\n",
86 | " '''\n",
87 | " start_time.append(s_time)\n",
88 | " s_time = s_time + time_slice\n",
89 | " e_time = s_time\n",
90 | " exit_time.append(e_time)\n",
91 | " executed_process.append(ready_queue[0][0])\n",
92 | " for j in range(len(process_data)):\n",
93 | " if process_data[j][0] == ready_queue[0][0]:\n",
94 | " break\n",
95 | " process_data[j][2] = process_data[j][2] - time_slice\n",
96 | " ready_queue.pop(0)\n",
97 | " elif ready_queue[0][2] <= time_slice:\n",
98 | " '''\n",
99 | " If a process has a remaining burst time less than or equal to time slice, it will complete its execution\n",
100 | " '''\n",
101 | " start_time.append(s_time)\n",
102 | " s_time = s_time + ready_queue[0][2]\n",
103 | " e_time = s_time\n",
104 | " exit_time.append(e_time)\n",
105 | " executed_process.append(ready_queue[0][0])\n",
106 | " for j in range(len(process_data)):\n",
107 | " if process_data[j][0] == ready_queue[0][0]:\n",
108 | " break\n",
109 | " process_data[j][2] = 0\n",
110 | " process_data[j][3] = 1\n",
111 | " process_data[j].append(e_time)\n",
112 | " ready_queue.pop(0)\n",
113 | " elif len(ready_queue) == 0:\n",
114 | " if s_time < normal_queue[0][1]:\n",
115 | " s_time = normal_queue[0][1]\n",
116 | " if normal_queue[0][2] > time_slice:\n",
117 | " '''\n",
118 | " If process has remaining burst time greater than the time slice, it will execute for a time period equal to time slice and then switch\n",
119 | " '''\n",
120 | " start_time.append(s_time)\n",
121 | " s_time = s_time + time_slice\n",
122 | " e_time = s_time\n",
123 | " exit_time.append(e_time)\n",
124 | " executed_process.append(normal_queue[0][0])\n",
125 | " for j in range(len(process_data)):\n",
126 | " if process_data[j][0] == normal_queue[0][0]:\n",
127 | " break\n",
128 | " process_data[j][2] = process_data[j][2] - time_slice\n",
129 | " elif normal_queue[0][2] <= time_slice:\n",
130 | " '''\n",
131 | " If a process has a remaining burst time less than or equal to time slice, it will complete its execution\n",
132 | " '''\n",
133 | " start_time.append(s_time)\n",
134 | " s_time = s_time + normal_queue[0][2]\n",
135 | " e_time = s_time\n",
136 | " exit_time.append(e_time)\n",
137 | " executed_process.append(normal_queue[0][0])\n",
138 | " for j in range(len(process_data)):\n",
139 | " if process_data[j][0] == normal_queue[0][0]:\n",
140 | " break\n",
141 | " process_data[j][2] = 0\n",
142 | " process_data[j][3] = 1\n",
143 | " process_data[j].append(e_time)\n",
144 | " t_time = RoundRobin.calculateTurnaroundTime(self, process_data)\n",
145 | " w_time = RoundRobin.calculateWaitingTime(self, process_data)\n",
146 | " RoundRobin.printData(self, process_data, t_time, w_time, executed_process)\n",
147 | "\n",
148 | " def calculateTurnaroundTime(self, process_data):\n",
149 | " total_turnaround_time = 0\n",
150 | " for i in range(len(process_data)):\n",
151 | " turnaround_time = process_data[i][5] - process_data[i][1]\n",
152 | " '''\n",
153 | " turnaround_time = completion_time - arrival_time\n",
154 | " '''\n",
155 | " total_turnaround_time = total_turnaround_time + turnaround_time\n",
156 | " process_data[i].append(turnaround_time)\n",
157 | " average_turnaround_time = total_turnaround_time / len(process_data)\n",
158 | " '''\n",
159 | " average_turnaround_time = total_turnaround_time / no_of_processes\n",
160 | " '''\n",
161 | " return average_turnaround_time\n",
162 | "\n",
163 | " def calculateWaitingTime(self, process_data):\n",
164 | " total_waiting_time = 0\n",
165 | " for i in range(len(process_data)):\n",
166 | " waiting_time = process_data[i][6] - process_data[i][4]\n",
167 | " '''\n",
168 | " waiting_time = turnaround_time - burst_time\n",
169 | " '''\n",
170 | " total_waiting_time = total_waiting_time + waiting_time\n",
171 | " process_data[i].append(waiting_time)\n",
172 | " average_waiting_time = total_waiting_time / len(process_data)\n",
173 | " '''\n",
174 | " average_waiting_time = total_waiting_time / no_of_processes\n",
175 | " '''\n",
176 | " return average_waiting_time\n",
177 | "\n",
178 | " def printData(self, process_data, average_turnaround_time, average_waiting_time, executed_process):\n",
179 | " process_data.sort(key=lambda x: x[0])\n",
180 | " '''\n",
181 | " Sort processes according to the Process ID\n",
182 | " '''\n",
183 | " print(\"Process_ID Arrival_Time Rem_Burst_Time Completed Original_Burst_Time Completion_Time Turnaround_Time Waiting_Time\")\n",
184 | " for i in range(len(process_data)):\n",
185 | " for j in range(len(process_data[i])):\n",
186 | "\n",
187 | " print(process_data[i][j], end=\"\t\t\")\n",
188 | " print()\n",
189 | "\n",
190 | " print(f'Average Turnaround Time: {average_turnaround_time}')\n",
191 | "\n",
192 | " print(f'Average Waiting Time: {average_waiting_time}')\n",
193 | "\n",
194 | " print(f'Sequence of Processes: {executed_process}')\n",
195 | "\n",
196 | "\n",
197 | "if __name__ == \"__main__\":\n",
198 | " no_of_processes = int(input(\"Enter number of processes: \"))\n",
199 | " rr = RoundRobin()\n",
200 | " rr.processData(no_of_processes)\n",
201 | "\n",
202 | "\n"
203 | ]
204 | }
205 | ],
206 | "metadata": {
207 | "language_info": {
208 | "name": "python"
209 | },
210 | "orig_nbformat": 4
211 | },
212 | "nbformat": 4,
213 | "nbformat_minor": 2
214 | }
215 |
--------------------------------------------------------------------------------
/mutex_lock.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | " One Example"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "https://www.pythontutorial.net/python-concurrency/python-threading-lock/"
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "illustrates a race condition problem: Output will vary 10 & 20 each time you run , depending race"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "from threading import Thread\n",
31 | "from time import sleep\n",
32 | "\n",
33 | "\n",
34 | "counter = 0\n",
35 | "\n",
36 | "# define a function that increases the value of the counter variable by a number:\n",
37 | "def increase(by):\n",
38 | " global counter\n",
39 | "\n",
40 | " local_counter = counter\n",
41 | " local_counter += by\n",
42 | "\n",
43 | " sleep(0.1)\n",
44 | "\n",
45 | " counter = local_counter\n",
46 | " print(f'counter={counter}\\n')\n",
47 | "\n",
48 | "\n",
49 | "# create two threads. \n",
50 | "# first thread increases the counter by 10 \n",
51 | "# second thread increases the counter by 20:\n",
52 | "t1 = Thread(target=increase, args=(10,))\n",
53 | "t2 = Thread(target=increase, args=(20,))\n",
54 | "\n",
55 | "# start the threads\n",
56 | "t1.start()\n",
57 | "t2.start()\n",
58 | "\n",
59 | "\n",
60 | "# wait for the threads to complete\n",
61 | "t1.join()\n",
62 | "t2.join()\n",
63 | "\n",
64 | "\n",
65 | "print(f'The final counter is {counter}')"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "#### Solution using mutext lock\n",
73 | "#### Using a threading lock to prevent the race condition\n",
74 | "\n",
75 | "In Python, you can use the `Lock` class from the `threading` module to create a lock object:\n",
76 | "\n",
77 | "First, create an instance the Lock class:\n",
78 | "\n",
79 | "```python\n",
80 | "lock = Lock()\n",
81 | "```\n",
82 | "\n",
83 | "By default, the lock is unlocked until a thread acquires it.\n",
84 | "\n",
85 | "Second, acquire a lock by calling the acquire() method:\n",
86 | "\n",
87 | "```python\n",
88 | "lock.acquire()\n",
89 | "```\n",
90 | "\n",
91 | "Third, release the lock once the thread completes changing the shared variable:\n",
92 | "\n",
93 | "```python\n",
94 | "lock.release()\n",
95 | "```"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "from threading import Thread, Lock\n",
105 | "from time import sleep\n",
106 | "\n",
107 | "# Initialize the global counter variable\n",
108 | "counter = 0\n",
109 | "\n",
110 | "# Define the thread function to increase the counter by a given value using a lock\n",
111 | "def increase(by, lock):\n",
112 | " global counter\n",
113 | "\n",
114 | " # Acquire the lock to ensure exclusive access to the shared counter\n",
115 | " lock.acquire()\n",
116 | "\n",
117 | " # Create a local copy of the counter to perform the update\n",
118 | " local_counter = counter\n",
119 | " local_counter += by\n",
120 | "\n",
121 | " # Simulate some time-consuming work using sleep\n",
122 | " sleep(0.1)\n",
123 | "\n",
124 | " # Update the global counter with the new value\n",
125 | " counter = local_counter\n",
126 | " print(f'counter={counter}')\n",
127 | "\n",
128 | " # Release the lock to allow other threads to access the shared counter\n",
129 | " lock.release()\n",
130 | "\n",
131 | "# Create a Lock object to synchronize access to the shared counter\n",
132 | "lock = Lock()\n",
133 | "\n",
134 | "# Create two threads, each incrementing the counter by a different value\n",
135 | "t1 = Thread(target=increase, args=(10, lock))\n",
136 | "t2 = Thread(target=increase, args=(20, lock))\n",
137 | "\n",
138 | "# Start the threads\n",
139 | "t1.start()\n",
140 | "t2.start()\n",
141 | "\n",
142 | "# Wait for the threads to complete their execution\n",
143 | "t1.join()\n",
144 | "t2.join()\n",
145 | "\n",
146 | "# Print the final value of the counter\n",
147 | "print(f'The final counter is {counter}')\n"
148 | ]
149 | },
150 | {
151 | "cell_type": "markdown",
152 | "metadata": {},
153 | "source": [
154 | "It’s easier to use the lock object with the with statement to acquire and release the lock within a block of code:"
155 | ]
156 | },
157 | {
158 | "cell_type": "code",
159 | "execution_count": null,
160 | "metadata": {},
161 | "outputs": [],
162 | "source": [
163 | "from threading import Thread, Lock\n",
164 | "from time import sleep\n",
165 | "\n",
166 | "counter = 0\n",
167 | "\n",
168 | "def increase(by, lock):\n",
169 | " global counter\n",
170 | "\n",
171 | " # Use the 'with' statement to acquire and release the lock automatically\n",
172 | " with lock:\n",
173 | " # Create a local copy of the counter to perform the update\n",
174 | " local_counter = counter\n",
175 | " local_counter += by\n",
176 | "\n",
177 | " # Simulate some time-consuming work using sleep\n",
178 | " sleep(0.1)\n",
179 | "\n",
180 | " # Update the global counter with the new value\n",
181 | " counter = local_counter\n",
182 | " print(f'counter={counter}')\n",
183 | "\n",
184 | "lock = Lock()\n",
185 | "\n",
186 | "# create threads\n",
187 | "t1 = Thread(target=increase, args=(10, lock))\n",
188 | "t2 = Thread(target=increase, args=(20, lock))\n",
189 | "\n",
190 | "# start the threads\n",
191 | "t1.start()\n",
192 | "t2.start()\n",
193 | "\n",
194 | "# wait for the threads to complete\n",
195 | "t1.join()\n",
196 | "t2.join()\n",
197 | "\n",
198 | "# Print the final value of the counter\n",
199 | "print(f'The final counter is {counter}')\n"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "### Alternative (same but reworked code)"
207 | ]
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {},
212 | "source": [
213 | "Alternative code follows a more structured and object-oriented approach by encapsulating the shared counter and lock within a class. This approach is more modular and reusable, allowing you to easily create multiple instances of the Counter class with their own separate counters and locks, each capable of independent synchronization"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "metadata": {},
220 | "outputs": [],
221 | "source": [
222 | "from threading import Thread, Lock\n",
223 | "from time import sleep\n",
224 | "\n",
225 | "# Define a class to encapsulate the shared counter and lock\n",
226 | "class Counter:\n",
227 | " def __init__(self):\n",
228 | " # Initialize the counter value to 0\n",
229 | " self.value = 0\n",
230 | " # Create a lock object to synchronize access to the counter\n",
231 | " self.lock = Lock()\n",
232 | "\n",
233 | " # Method to increase the counter by a specified value\n",
234 | " def increase(self, by):\n",
235 | " # Acquire the lock to ensure exclusive access to the shared counter\n",
236 | " with self.lock:\n",
237 | " # Create a local variable to perform the update operation\n",
238 | " current_value = self.value\n",
239 | " # Increment the local variable by the specified value\n",
240 | " current_value += by\n",
241 | "\n",
242 | " # Simulate some time-consuming work using sleep\n",
243 | " sleep(0.1)\n",
244 | "\n",
245 | " # Update the shared counter with the new value\n",
246 | " self.value = current_value\n",
247 | " # Print the updated value of the counter\n",
248 | " print(f'counter={self.value}')\n",
249 | "\n",
250 | "# Main function\n",
251 | "def main():\n",
252 | " # Create an instance of the Counter class to manage the shared counter\n",
253 | " counter = Counter()\n",
254 | "\n",
255 | " # Create two threads with different increment values\n",
256 | " t1 = Thread(target=counter.increase, args=(10, ))\n",
257 | " t2 = Thread(target=counter.increase, args=(20, ))\n",
258 | "\n",
259 | " # Start the threads\n",
260 | " t1.start()\n",
261 | " t2.start()\n",
262 | "\n",
263 | " # Wait for the threads to complete their work\n",
264 | " t1.join()\n",
265 | " t2.join()\n",
266 | "\n",
267 | " # Print the final value of the counter after both threads have finished\n",
268 | " print(f'The final counter is {counter.value}')\n",
269 | "\n",
270 | "# Check if the script is run directly (not imported as a module)\n",
271 | "if __name__ == '__main__':\n",
272 | " # Call the main function to start the threads and perform the synchronization\n",
273 | " main()\n"
274 | ]
275 | },
276 | {
277 | "cell_type": "markdown",
278 | "metadata": {},
279 | "source": [
280 | " Another Example"
281 | ]
282 | },
283 | {
284 | "cell_type": "markdown",
285 | "metadata": {},
286 | "source": [
287 | "#### Synchronization Errors\n",
288 | "We will create two threads and give each of them a pointer towards a variable in the main containing an unsigned integer, count. Each thread will iterate a certain number of times (defined in the TIMES_TO_COUNT macro) and increment the count at each iteration. Since there are two threads, we will of course expect the final count to be exactly twice TIMES_TO_COUNT."
289 | ]
290 | },
291 | {
292 | "cell_type": "markdown",
293 | "metadata": {},
294 | "source": [
295 | "##### Solution using mutext lock"
296 | ]
297 | },
298 | {
299 | "cell_type": "code",
300 | "execution_count": null,
301 | "metadata": {},
302 | "outputs": [],
303 | "source": [
304 | "import threading\n",
305 | "import time\n",
306 | "\n",
307 | "# Each thread will count TIMES_TO_COUNT times\n",
308 | "TIMES_TO_COUNT = 21000\n",
309 | "\n",
310 | "class Counter:\n",
311 | " def __init__(self):\n",
312 | " self.count = 0\n",
313 | " self.lock = threading.Lock()\n",
314 | "\n",
315 | " def increment(self):\n",
316 | " with self.lock:\n",
317 | " self.count += 1\n",
318 | "\n",
319 | "def thread_routine(counter):\n",
320 | " # Each thread starts here\n",
321 | " tid = threading.current_thread().ident\n",
322 | " # Print the count before this thread starts iterating.\n",
323 | " # In order to read the value of count, we lock the mutex:\n",
324 | " with counter.lock:\n",
325 | " print(f\"Thread [{tid}]: Count at thread start = {counter.count}\")\n",
326 | " for i in range(TIMES_TO_COUNT):\n",
327 | " # Iterate TIMES_TO_COUNT times\n",
328 | " # Increment the counter at each iteration\n",
329 | " # Lock the mutex for the duration of the incrementation\n",
330 | " counter.increment()\n",
331 | " # Print the final count when this thread finishes its own count,\n",
332 | " # without forgetting to lock the mutex:\n",
333 | " with counter.lock:\n",
334 | " print(f\"Thread [{tid}]: Final count = {counter.count}\")\n",
335 | "\n",
336 | "def main():\n",
337 | " # Structure containing the threads' total count:\n",
338 | " counter = Counter()\n",
339 | "\n",
340 | " # Since each thread counts TIMES_TO_COUNT times and that\n",
341 | " # we have 2 threads, we expect the final count to be\n",
342 | " # 2 * TIMES_TO_COUNT:\n",
343 | " print(f\"Main: Expected count is {2 * TIMES_TO_COUNT}\")\n",
344 | " # Thread creation:\n",
345 | " t1 = threading.Thread(target=thread_routine, args=(counter,))\n",
346 | " print(f\"Main: Created first thread [{t1.ident}]\")\n",
347 | " t2 = threading.Thread(target=thread_routine, args=(counter,))\n",
348 | " print(f\"Main: Created second thread [{t2.ident}]\")\n",
349 | " # Thread starting:\n",
350 | " t1.start()\n",
351 | " t2.start()\n",
352 | " # Thread joining:\n",
353 | " t1.join()\n",
354 | " print(f\"Main: Joined first thread [{t1.ident}]\")\n",
355 | " t2.join()\n",
356 | " print(f\"Main: Joined second thread [{t2.ident}]\")\n",
357 | " # Final count evaluation:\n",
358 | " # (Here we can read the count without worrying about\n",
359 | " # the lock because all threads have been joined and\n",
360 | " # there can be no data race between threads)\n",
361 | " if counter.count != (2 * TIMES_TO_COUNT):\n",
362 | " print(f\"Main: ERROR! Total count is {counter.count}\")\n",
363 | " else:\n",
364 | " print(f\"Main: OK. Total count is {counter.count}\")\n",
365 | "\n",
366 | "if __name__ == \"__main__\":\n",
367 | " main()\n"
368 | ]
369 | }
370 | ],
371 | "metadata": {
372 | "kernelspec": {
373 | "display_name": "Python 3",
374 | "language": "python",
375 | "name": "python3"
376 | },
377 | "language_info": {
378 | "codemirror_mode": {
379 | "name": "ipython",
380 | "version": 3
381 | },
382 | "file_extension": ".py",
383 | "mimetype": "text/x-python",
384 | "name": "python",
385 | "nbconvert_exporter": "python",
386 | "pygments_lexer": "ipython3",
387 | "version": "3.11.0"
388 | },
389 | "orig_nbformat": 4
390 | },
391 | "nbformat": 4,
392 | "nbformat_minor": 2
393 | }
394 |
--------------------------------------------------------------------------------