├── .gitignore ├── Unit_2 ├── readme.md ├── Insertion_Sort │ ├── c_Insertion_sort.c │ ├── Rust_Insertion_Sort.rs │ ├── cpp_Insertion_Sort.cpp │ └── python_insertion_sort.py ├── Quick_Sort │ ├── python_quick_Sort.py │ ├── rust_quick_sort.rs │ ├── cpp_quick_Sort.cpp │ └── c_Quick_sort.c ├── counting_Sort │ ├── python_counting_sort.py │ ├── rust_counting_sort.rs │ ├── cpp_counting_sort.cpp │ └── c_counting_sort.c ├── Heap_Sort │ ├── python_heap_sort.py │ ├── rust_heap_sort.rs │ ├── cpp_heap_sort.cpp │ └── c_Heap_sort.c ├── Radix_sort │ ├── python_radix_sort.py │ ├── rust_radix_sort.rs │ ├── cpp_radix_sort.cpp │ └── c_radix_sort.c └── Merge_Sort │ ├── Python_Merge_Sort.py │ ├── Rust_Merge_Sort.rs │ ├── C_Merge_Sort.c │ └── cpp_Merge_Sort.cpp ├── Unit_3_Greedy_Methods ├── 03_Optimal_Merge_Pattern │ ├── python_optimal_merge.py │ ├── rust_optimal_merge.rs │ ├── cpp_optimal_merge.cpp │ └── c_optimal_merge.c ├── 02_Fractional_knapsack_problem │ ├── python_fractional_knapsack.py │ ├── rust_fractional_knapsack.rs │ ├── c_fractional_knapsack.c │ └── cpp_fractional_knapsack.cpp ├── 05_kruskal_algorithm │ ├── python_kruskal_algorithm.py │ ├── cpp_kruskal_algorithm.cpp │ ├── rust_kruskal_algorithm.rs │ └── c_kruskal_algorithm.c ├── 06_Prim_algorithm │ ├── python_prim_algorithm.py │ ├── cpp_prim_algorithm.cpp │ ├── c_prim_algorithm.c │ └── rust_prim_algorithm.rs ├── 04_Dijkstra_Algorithm │ ├── python_dijkstra_algorithm.py │ ├── cpp_dijkstra_algorithm.cpp │ ├── rust_dijkstra_algorithm.rs │ └── c_Dijkstra_Algorithm.c └── 01_Huffman_Coding │ ├── cpp_huffman_coding.cpp │ ├── python_huffman_coding.py │ ├── rust_huffman_coding.rs │ └── c_huffman_coding.c ├── Unit_4_Dynamic_Programming ├── 03_0_1_Knapsack_problem │ ├── python_01_knapsack.py │ ├── rust_01_knapsack.rs │ ├── c_01_knapsack.c │ └── cpp_01_knapsack.cpp ├── 04_Floyd_Warshall_Algorithm │ ├── python_FW.py │ ├── rust_FW.rs │ ├── cpp_FW.cpp │ └── c_FW.c ├── 02_Longest_Common_Subsequence │ ├── python_LCS.py │ ├── c_LCS.c │ ├── rust_LCS.rs │ └── cpp_LCS.cpp └── 01_Matrix_Chain_Multiplication │ ├── python_MCM.py │ ├── rust_MCM.rs │ ├── cpp_MCM.cpp │ └── c_MCM.c ├── Unit_5 ├── String_Matching │ ├── python_KMP.py │ ├── rust_KMP.rs │ ├── C_KMP.c │ └── cpp_KMP.cpp └── Binary_Search_Tree │ ├── python_BST.py │ ├── cpp_BST.cpp │ ├── C_BST.c │ ├── rust_BST.rs │ ├── python_AVL.py │ ├── cpp_AVL.cpp │ ├── C_AVL.c │ └── rust_AVL.rs ├── README.md └── Lab_Files └── lab_stuffs.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.exe 3 | -------------------------------------------------------------------------------- /Unit_2/readme.md: -------------------------------------------------------------------------------- 1 | # Unit -2 - Analysis of Sorting Algorithms -------------------------------------------------------------------------------- /Unit_2/Insertion_Sort/c_Insertion_sort.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void insertion_sort(int a[] , int length){ 4 | for (int i = 1 ; i < length ; i++){ 5 | int key = a[i]; 6 | int j = i - 1; 7 | 8 | 9 | } 10 | } 11 | 12 | int main () { 13 | int a[] = {5, 2, 4, 6, 1, 3}; 14 | 15 | } -------------------------------------------------------------------------------- /Unit_2/Quick_Sort/python_quick_Sort.py: -------------------------------------------------------------------------------- 1 | def quick_sort(arr): 2 | if len(arr) <= 1: 3 | return arr 4 | pivot = arr[len(arr) // 2] 5 | left = [x for x in arr if x < pivot] 6 | middle = [x for x in arr if x == pivot] 7 | right = [x for x in arr if x > pivot] 8 | return quick_sort(left) + middle + quick_sort(right) 9 | 10 | arr = [10, 7, 8, 9, 1, 5] 11 | sorted_arr = quick_sort(arr) 12 | print("Sorted array:", sorted_arr) 13 | -------------------------------------------------------------------------------- /Unit_2/counting_Sort/python_counting_sort.py: -------------------------------------------------------------------------------- 1 | def counting_sort(arr, max_value): 2 | count = [0] * (max_value + 1) 3 | output = [0] * len(arr) 4 | 5 | for num in arr: 6 | count[num] += 1 7 | 8 | for i in range(1, len(count)): 9 | count[i] += count[i - 1] 10 | 11 | for num in reversed(arr): 12 | output[count[num] - 1] = num 13 | count[num] -= 1 14 | 15 | for i in range(len(arr)): 16 | arr[i] = output[i] 17 | 18 | arr = [4, 2, 2, 8, 3, 3, 1] 19 | counting_sort(arr, max(arr)) 20 | print("Sorted array:", arr) 21 | -------------------------------------------------------------------------------- /Unit_2/Insertion_Sort/Rust_Insertion_Sort.rs: -------------------------------------------------------------------------------- 1 | 2 | fn main() { 3 | let mut arr = [64, 88, 51, 65, 90, 75, 34, 79, 46, 36]; 4 | println!("Unsorted array: {:?}", arr); 5 | 6 | insertion_sort(&mut arr); 7 | 8 | println!("Sorted array: {:?}", arr); 9 | } 10 | 11 | fn insertion_sort(arr: &mut [i32]) { 12 | let len = arr.len(); 13 | for i in 1..len { 14 | let key = arr[i]; 15 | let mut j = i as i32 - 1; 16 | 17 | while j >= 0 && arr[j as usize] > key { 18 | arr[(j + 1) as usize] = arr[j as usize]; 19 | j -= 1; 20 | } 21 | 22 | arr[(j + 1) as usize] = key; 23 | } 24 | } -------------------------------------------------------------------------------- /Unit_2/Quick_Sort/rust_quick_sort.rs: -------------------------------------------------------------------------------- 1 | fn quick_sort(arr: &mut [i32]) { 2 | if arr.len() <= 1 { 3 | return; 4 | } 5 | let pivot = partition(arr); 6 | quick_sort(&mut arr[0..pivot]); 7 | quick_sort(&mut arr[pivot + 1..]); 8 | } 9 | 10 | fn partition(arr: &mut [i32]) -> usize { 11 | let pivot_index = arr.len() - 1; 12 | let mut i = 0; 13 | for j in 0..pivot_index { 14 | if arr[j] < arr[pivot_index] { 15 | arr.swap(i, j); 16 | i += 1; 17 | } 18 | } 19 | arr.swap(i, pivot_index); 20 | i 21 | } 22 | 23 | fn main() { 24 | let mut arr = [10, 7, 8, 9, 1, 5]; 25 | quick_sort(&mut arr); 26 | println!("Sorted array: {:?}", arr); 27 | } 28 | -------------------------------------------------------------------------------- /Unit_2/Heap_Sort/python_heap_sort.py: -------------------------------------------------------------------------------- 1 | def heapify(arr, n, i): 2 | largest = i 3 | left = 2 * i + 1 4 | right = 2 * i + 2 5 | 6 | if left < n and arr[left] > arr[largest]: 7 | largest = left 8 | if right < n and arr[right] > arr[largest]: 9 | largest = right 10 | if largest != i: 11 | arr[i], arr[largest] = arr[largest], arr[i] 12 | heapify(arr, n, largest) 13 | 14 | def heap_sort(arr): 15 | n = len(arr) 16 | for i in range(n // 2 - 1, -1, -1): 17 | heapify(arr, n, i) 18 | for i in range(n - 1, 0, -1): 19 | arr[i], arr[0] = arr[0], arr[i] 20 | heapify(arr, i, 0) 21 | 22 | arr = [12, 11, 13, 5, 6, 7] 23 | heap_sort(arr) 24 | print("Sorted array:", arr) 25 | -------------------------------------------------------------------------------- /Unit_2/counting_Sort/rust_counting_sort.rs: -------------------------------------------------------------------------------- 1 | fn counting_sort(arr: &mut [usize], max_value: usize) { 2 | let mut count = vec![0; max_value + 1]; 3 | let mut output = vec![0; arr.len()]; 4 | 5 | for &num in arr.iter() { 6 | count[num] += 1; 7 | } 8 | 9 | for i in 1..=max_value { 10 | count[i] += count[i - 1]; 11 | } 12 | 13 | for &num in arr.iter().rev() { 14 | output[count[num] - 1] = num; 15 | count[num] -= 1; 16 | } 17 | 18 | arr.copy_from_slice(&output); 19 | } 20 | 21 | fn main() { 22 | let mut arr = [4, 2, 2, 8, 3, 3, 1]; 23 | let max_value = *arr.iter().max().unwrap(); 24 | counting_sort(&mut arr, max_value); 25 | println!("Sorted array: {:?}", arr); 26 | } 27 | -------------------------------------------------------------------------------- /Unit_2/Radix_sort/python_radix_sort.py: -------------------------------------------------------------------------------- 1 | def counting_sort_for_radix(arr, exp): 2 | n = len(arr) 3 | output = [0] * n 4 | count = [0] * 10 5 | 6 | for i in range(n): 7 | index = (arr[i] // exp) % 10 8 | count[index] += 1 9 | 10 | for i in range(1, 10): 11 | count[i] += count[i - 1] 12 | 13 | for i in range(n - 1, -1, -1): 14 | index = (arr[i] // exp) % 10 15 | output[count[index] - 1] = arr[i] 16 | count[index] -= 1 17 | 18 | for i in range(n): 19 | arr[i] = output[i] 20 | 21 | def radix_sort(arr): 22 | max_val = max(arr) 23 | exp = 1 24 | while max_val // exp > 0: 25 | counting_sort_for_radix(arr, exp) 26 | exp *= 10 27 | 28 | arr = [170, 45, 75, 90, 802, 24, 2, 66] 29 | radix_sort(arr) 30 | print("Sorted array:", arr) 31 | -------------------------------------------------------------------------------- /Unit_2/Heap_Sort/rust_heap_sort.rs: -------------------------------------------------------------------------------- 1 | fn heap_sort(arr: &mut [i32]) { 2 | let n = arr.len(); 3 | for i in (0..n / 2).rev() { 4 | heapify(arr, n, i); 5 | } 6 | for i in (1..n).rev() { 7 | arr.swap(0, i); 8 | heapify(arr, i, 0); 9 | } 10 | } 11 | 12 | fn heapify(arr: &mut [i32], n: usize, i: usize) { 13 | let mut largest = i; 14 | let left = 2 * i + 1; 15 | let right = 2 * i + 2; 16 | 17 | if left < n && arr[left] > arr[largest] { 18 | largest = left; 19 | } 20 | if right < n && arr[right] > arr[largest] { 21 | largest = right; 22 | } 23 | if largest != i { 24 | arr.swap(i, largest); 25 | heapify(arr, n, largest); 26 | } 27 | } 28 | 29 | fn main() { 30 | let mut arr = [12, 11, 13, 5, 6, 7]; 31 | heap_sort(&mut arr); 32 | println!("Sorted array: {:?}", arr); 33 | } 34 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/03_Optimal_Merge_Pattern/python_optimal_merge.py: -------------------------------------------------------------------------------- 1 | import heapq 2 | 3 | def optimal_merge(sizes): 4 | # Create a min-heap from the list of file sizes 5 | heapq.heapify(sizes) 6 | 7 | total_cost = 0 # Total cost of merging 8 | 9 | # Merge files until one file remains 10 | while len(sizes) > 1: 11 | # Take the two smallest files 12 | first = heapq.heappop(sizes) 13 | second = heapq.heappop(sizes) 14 | 15 | # Merge the two files 16 | merged_size = first + second 17 | total_cost += merged_size 18 | 19 | # Push the merged size back into the heap 20 | heapq.heappush(sizes, merged_size) 21 | 22 | return total_cost 23 | 24 | if __name__ == "__main__": 25 | sizes = [8, 4, 6, 12] 26 | 27 | min_cost = optimal_merge(sizes) 28 | print(f"Minimum cost to merge files: {min_cost}") 29 | -------------------------------------------------------------------------------- /Unit_2/counting_Sort/cpp_counting_sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | void countingSort(std::vector& arr, int max_value) { 6 | std::vector count(max_value + 1, 0); 7 | std::vector output(arr.size()); 8 | 9 | for (int num : arr) 10 | count[num]++; 11 | 12 | for (int i = 1; i <= max_value; i++) 13 | count[i] += count[i - 1]; 14 | 15 | for (int i = arr.size() - 1; i >= 0; i--) { 16 | output[count[arr[i]] - 1] = arr[i]; 17 | count[arr[i]]--; 18 | } 19 | 20 | arr = output; 21 | } 22 | 23 | int main() { 24 | std::vector arr = {4, 2, 2, 8, 3, 3, 1}; 25 | int max_value = *max_element(arr.begin(), arr.end()); 26 | countingSort(arr, max_value); 27 | std::cout << "Sorted array: "; 28 | for (int num : arr) std::cout << num << " "; 29 | return 0; 30 | } 31 | -------------------------------------------------------------------------------- /Unit_2/Insertion_Sort/cpp_Insertion_Sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | void insertionSort(int arr[], int n) { 5 | for (int i = 1; i < n; i++) { 6 | int key = arr[i]; 7 | int j = i - 1; 8 | 9 | 10 | while (j >= 0 && arr[j] > key) { 11 | arr[j + 1] = arr[j]; 12 | j--; 13 | } 14 | arr[j + 1] = key; 15 | } 16 | } 17 | 18 | void printArray(int arr[], int n) { 19 | for (int i = 0; i < n; i++) { 20 | cout << arr[i] << " "; 21 | } 22 | cout << endl; 23 | } 24 | 25 | int main() { 26 | int arr[] = {64, 88, 51, 65, 90, 75, 34, 79, 46, 36}; 27 | int n = sizeof(arr) / sizeof(arr[0]); 28 | 29 | cout << "Unsorted array: "; 30 | printArray(arr, n); 31 | 32 | insertionSort(arr, n); 33 | 34 | cout << "Sorted array: "; 35 | printArray(arr, n); 36 | 37 | return 0; 38 | } 39 | -------------------------------------------------------------------------------- /Unit_2/Insertion_Sort/python_insertion_sort.py: -------------------------------------------------------------------------------- 1 | def insertionSort(lst): 2 | for i in range(1, len(lst)): 3 | key = lst[i] 4 | j = i - 1 5 | while j >= 0 and key < lst[j]: 6 | lst[j + 1] = lst[j] 7 | j -= 1 8 | lst[j + 1] = key 9 | return lst 10 | 11 | if __name__ == '__main__': 12 | lst = [64, 88, 51, 65, 90, 75, 34, 79, 46, 36] 13 | print(insertionSort(lst)) 14 | 15 | # 'if __name__ == "__main__":' ka use isliye karte hain taaki yeh code sirf tabhi chale jab script directly run ki jaye. 16 | # Agar hum is script ko kisi dusre Python file mein import karte hain, toh yeh code automatically execute nahi hoga. 17 | # Yeh practice useful hai jab hum code ko reuse karna chahte hain, bina kisi unwanted execution ke. 18 | # Isse script ko modular aur maintainable banaya ja sakta hai, jahan testing ya specific code sirf direct execution pe hi chalega. 19 | -------------------------------------------------------------------------------- /Unit_2/Quick_Sort/cpp_quick_Sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int partition(std::vector& arr, int low, int high) { 5 | int pivot = arr[high]; 6 | int i = low - 1; 7 | for (int j = low; j < high; j++) { 8 | if (arr[j] < pivot) { 9 | i++; 10 | std::swap(arr[i], arr[j]); 11 | } 12 | } 13 | std::swap(arr[i + 1], arr[high]); 14 | return i + 1; 15 | } 16 | 17 | void quickSort(std::vector& arr, int low, int high) { 18 | if (low < high) { 19 | int pi = partition(arr, low, high); 20 | quickSort(arr, low, pi - 1); 21 | quickSort(arr, pi + 1, high); 22 | } 23 | } 24 | 25 | int main() { 26 | std::vector arr = {10, 7, 8, 9, 1, 5}; 27 | quickSort(arr, 0, arr.size() - 1); 28 | std::cout << "Sorted array: "; 29 | for (int num : arr) std::cout << num << " "; 30 | return 0; 31 | } 32 | -------------------------------------------------------------------------------- /Unit_2/Heap_Sort/cpp_heap_sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | void heapify(std::vector& arr, int n, int i) { 5 | int largest = i; 6 | int left = 2 * i + 1; 7 | int right = 2 * i + 2; 8 | 9 | if (left < n && arr[left] > arr[largest]) 10 | largest = left; 11 | if (right < n && arr[right] > arr[largest]) 12 | largest = right; 13 | if (largest != i) { 14 | std::swap(arr[i], arr[largest]); 15 | heapify(arr, n, largest); 16 | } 17 | } 18 | 19 | void heapSort(std::vector& arr) { 20 | int n = arr.size(); 21 | for (int i = n / 2 - 1; i >= 0; i--) 22 | heapify(arr, n, i); 23 | for (int i = n - 1; i > 0; i--) { 24 | std::swap(arr[0], arr[i]); 25 | heapify(arr, i, 0); 26 | } 27 | } 28 | 29 | int main() { 30 | std::vector arr = {12, 11, 13, 5, 6, 7}; 31 | heapSort(arr); 32 | std::cout << "Sorted array: "; 33 | for (int num : arr) std::cout << num << " "; 34 | return 0; 35 | } 36 | -------------------------------------------------------------------------------- /Unit_2/Radix_sort/rust_radix_sort.rs: -------------------------------------------------------------------------------- 1 | fn counting_sort_for_radix(arr: &mut [i32], exp: i32) { 2 | let n = arr.len(); 3 | let mut output = vec![0; n]; 4 | let mut count = vec![0; 10]; 5 | 6 | for &num in arr.iter() { 7 | let index = (num / exp) % 10; 8 | count[index as usize] += 1; 9 | } 10 | 11 | for i in 1..10 { 12 | count[i] += count[i - 1]; 13 | } 14 | 15 | for &num in arr.iter().rev() { 16 | let index = (num / exp) % 10; 17 | output[count[index as usize] as usize - 1] = num; 18 | count[index as usize] -= 1; 19 | } 20 | 21 | arr.copy_from_slice(&output); 22 | } 23 | 24 | fn radix_sort(arr: &mut [i32]) { 25 | let max = *arr.iter().max().unwrap(); 26 | let mut exp = 1; 27 | while max / exp > 0 { 28 | counting_sort_for_radix(arr, exp); 29 | exp *= 10; 30 | } 31 | } 32 | 33 | fn main() { 34 | let mut arr = [170, 45, 75, 90, 802, 24, 2, 66]; 35 | radix_sort(&mut arr); 36 | println!("Sorted array: {:?}", arr); 37 | } 38 | -------------------------------------------------------------------------------- /Unit_2/counting_Sort/c_counting_sort.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | void countingSort(int arr[], int n, int max) { 5 | int count[max + 1]; 6 | int output[n]; 7 | 8 | memset(count, 0, sizeof(count)); 9 | 10 | for (int i = 0; i < n; i++) 11 | count[arr[i]]++; 12 | 13 | for (int i = 1; i <= max; i++) 14 | count[i] += count[i - 1]; 15 | 16 | for (int i = n - 1; i >= 0; i--) { 17 | output[count[arr[i]] - 1] = arr[i]; 18 | count[arr[i]]--; 19 | } 20 | 21 | for (int i = 0; i < n; i++) 22 | arr[i] = output[i]; 23 | } 24 | 25 | void printArray(int arr[], int size) { 26 | for (int i = 0; i < size; i++) 27 | printf("%d ", arr[i]); 28 | printf("\n"); 29 | } 30 | 31 | int main() { 32 | int arr[] = {4, 2, 2, 8, 3, 3, 1}; 33 | int n = sizeof(arr) / sizeof(arr[0]); 34 | int max = 8; // Assumes the maximum value in the array is known 35 | countingSort(arr, n, max); 36 | printf("Sorted array: "); 37 | printArray(arr, n); 38 | return 0; 39 | } 40 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/03_0_1_Knapsack_problem/python_01_knapsack.py: -------------------------------------------------------------------------------- 1 | def knapsack(W, weights, values, n): 2 | # Create a 2D array to store the maximum value for each weight limit 3 | K = [[0 for _ in range(W + 1)] for _ in range(n + 1)] 4 | 5 | # Build the K table in bottom-up manner 6 | for i in range(n + 1): 7 | for w in range(W + 1): 8 | if i == 0 or w == 0: 9 | K[i][w] = 0 # Base case: no items or weight 10 | elif weights[i - 1] <= w: 11 | K[i][w] = max(values[i - 1] + K[i - 1][w - weights[i - 1]], K[i - 1][w]) 12 | else: 13 | K[i][w] = K[i - 1][w] 14 | 15 | return K[n][W] # Return the maximum value that can be obtained 16 | 17 | if __name__ == "__main__": 18 | values = [60, 100, 120] # Values of items 19 | weights = [10, 20, 30] # Weights of items 20 | W = 50 # Maximum weight capacity 21 | n = len(values) # Number of items 22 | 23 | max_value = knapsack(W, weights, values, n) 24 | print(f"Maximum value in Knapsack = {max_value}") 25 | -------------------------------------------------------------------------------- /Unit_2/Quick_Sort/c_Quick_sort.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void swap(int *a, int *b) { 4 | int temp = *a; 5 | *a = *b; 6 | *b = temp; 7 | } 8 | 9 | int partition(int arr[], int low, int high) { 10 | int pivot = arr[high]; 11 | int i = low - 1; 12 | 13 | for (int j = low; j < high; j++) { 14 | if (arr[j] < pivot) { 15 | i++; 16 | swap(&arr[i], &arr[j]); 17 | } 18 | } 19 | swap(&arr[i + 1], &arr[high]); 20 | return i + 1; 21 | } 22 | 23 | void quickSort(int arr[], int low, int high) { 24 | if (low < high) { 25 | int pi = partition(arr, low, high); 26 | quickSort(arr, low, pi - 1); 27 | quickSort(arr, pi + 1, high); 28 | } 29 | } 30 | 31 | void printArray(int arr[], int size) { 32 | for (int i = 0; i < size; i++) 33 | printf("%d ", arr[i]); 34 | printf("\n"); 35 | } 36 | 37 | int main() { 38 | int arr[] = {10, 7, 8, 9, 1, 5}; 39 | int n = sizeof(arr) / sizeof(arr[0]); 40 | quickSort(arr, 0, n - 1); 41 | printf("Sorted array: "); 42 | printArray(arr, n); 43 | return 0; 44 | } 45 | -------------------------------------------------------------------------------- /Unit_2/Radix_sort/cpp_radix_sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | void countingSortForRadix(std::vector& arr, int exp) { 6 | int n = arr.size(); 7 | std::vector output(n); 8 | std::vector count(10, 0); 9 | 10 | for (int i = 0; i < n; i++) 11 | count[(arr[i] / exp) % 10]++; 12 | 13 | for (int i = 1; i < 10; i++) 14 | count[i] += count[i - 1]; 15 | 16 | for (int i = n - 1; i >= 0; i--) { 17 | output[count[(arr[i] / exp) % 10] - 1] = arr[i]; 18 | count[(arr[i] / exp) % 10]--; 19 | } 20 | 21 | for (int i = 0; i < n; i++) 22 | arr[i] = output[i]; 23 | } 24 | 25 | void radixSort(std::vector& arr) { 26 | int max_val = *max_element(arr.begin(), arr.end()); 27 | for (int exp = 1; max_val / exp > 0; exp *= 10) 28 | countingSortForRadix(arr, exp); 29 | } 30 | 31 | int main() { 32 | std::vector arr = {170, 45, 75, 90, 802, 24, 2, 66}; 33 | radixSort(arr); 34 | std::cout << "Sorted array: "; 35 | for (int num : arr) std::cout << num << " "; 36 | return 0; 37 | } 38 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/03_0_1_Knapsack_problem/rust_01_knapsack.rs: -------------------------------------------------------------------------------- 1 | fn knapsack(w: usize, weights: &[usize], values: &[usize], n: usize) -> usize { 2 | // Create a 2D vector to store the maximum value for each weight limit 3 | let mut k = vec![vec![0; w + 1]; n + 1]; 4 | 5 | // Build the k table in bottom-up manner 6 | for i in 0..=n { 7 | for j in 0..=w { 8 | if i == 0 || j == 0 { 9 | k[i][j] = 0; // Base case: no items or weight 10 | } else if weights[i - 1] <= j { 11 | k[i][j] = k[i - 1][j].max(values[i - 1] + k[i - 1][j - weights[i - 1]]); 12 | } else { 13 | k[i][j] = k[i - 1][j]; 14 | } 15 | } 16 | } 17 | 18 | k[n][w] // Return the maximum value that can be obtained 19 | } 20 | 21 | fn main() { 22 | let values = vec![60, 100, 120]; // Values of items 23 | let weights = vec![10, 20, 30]; // Weights of items 24 | let w = 50; // Maximum weight capacity 25 | let n = values.len(); // Number of items 26 | 27 | let max_value = knapsack(w, &weights, &values, n); 28 | println!("Maximum value in Knapsack = {}", max_value); 29 | } 30 | -------------------------------------------------------------------------------- /Unit_2/Heap_Sort/c_Heap_sort.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void swap(int *a, int *b) { 4 | int temp = *a; 5 | *a = *b; 6 | *b = temp; 7 | } 8 | 9 | void heapify(int arr[], int n, int i) { 10 | int largest = i; 11 | int left = 2 * i + 1; 12 | int right = 2 * i + 2; 13 | 14 | if (left < n && arr[left] > arr[largest]) 15 | largest = left; 16 | 17 | if (right < n && arr[right] > arr[largest]) 18 | largest = right; 19 | 20 | if (largest != i) { 21 | swap(&arr[i], &arr[largest]); 22 | heapify(arr, n, largest); 23 | } 24 | } 25 | 26 | void heapSort(int arr[], int n) { 27 | for (int i = n / 2 - 1; i >= 0; i--) 28 | heapify(arr, n, i); 29 | 30 | for (int i = n - 1; i >= 0; i--) { 31 | swap(&arr[0], &arr[i]); 32 | heapify(arr, i, 0); 33 | } 34 | } 35 | 36 | void printArray(int arr[], int size) { 37 | for (int i = 0; i < size; i++) 38 | printf("%d ", arr[i]); 39 | printf("\n"); 40 | } 41 | 42 | int main() { 43 | int arr[] = {12, 11, 13, 5, 6, 7}; 44 | int n = sizeof(arr) / sizeof(arr[0]); 45 | heapSort(arr, n); 46 | printf("Sorted array: "); 47 | printArray(arr, n); 48 | return 0; 49 | } 50 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/03_Optimal_Merge_Pattern/rust_optimal_merge.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BinaryHeap; 2 | 3 | fn optimal_merge(sizes: &mut Vec) -> i32 { 4 | // Create a max-heap using BinaryHeap and invert the values to use as a min-heap 5 | let mut min_heap: BinaryHeap = BinaryHeap::new(); 6 | 7 | // Push all file sizes into the min-heap 8 | for &size in sizes { 9 | min_heap.push(-size); // Invert to simulate min-heap behavior 10 | } 11 | 12 | let mut total_cost = 0; // Total cost of merging 13 | 14 | // Merge files until one file remains 15 | while min_heap.len() > 1 { 16 | // Take the two smallest files (inverted back to positive) 17 | let first = -min_heap.pop().unwrap(); 18 | let second = -min_heap.pop().unwrap(); 19 | 20 | // Merge the two files 21 | let merged_size = first + second; 22 | total_cost += merged_size; 23 | 24 | // Push the merged size back into the min-heap (inverted) 25 | min_heap.push(-merged_size); 26 | } 27 | 28 | total_cost 29 | } 30 | 31 | fn main() { 32 | let mut sizes = vec![8, 4, 6, 12]; 33 | 34 | let min_cost = optimal_merge(&mut sizes); 35 | println!("Minimum cost to merge files: {}", min_cost); 36 | } 37 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/03_Optimal_Merge_Pattern/cpp_optimal_merge.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | // Function to calculate the optimal merge cost 8 | int optimalMerge(vector& sizes) { 9 | // Min-heap to store file sizes 10 | priority_queue, greater> minHeap; 11 | 12 | // Push all file sizes into the min-heap 13 | for (int size : sizes) { 14 | minHeap.push(size); 15 | } 16 | 17 | int totalCost = 0; // Total cost of merging 18 | 19 | // Merge files until one file remains 20 | while (minHeap.size() > 1) { 21 | // Take the two smallest files 22 | int first = minHeap.top(); minHeap.pop(); 23 | int second = minHeap.top(); minHeap.pop(); 24 | 25 | // Merge the two files 26 | int mergedSize = first + second; 27 | totalCost += mergedSize; 28 | 29 | // Push the merged size back into the min-heap 30 | minHeap.push(mergedSize); 31 | } 32 | 33 | return totalCost; 34 | } 35 | 36 | int main() { 37 | vector sizes = {8, 4, 6, 12}; 38 | 39 | int minCost = optimalMerge(sizes); 40 | cout << "Minimum cost to merge files: " << minCost << endl; 41 | 42 | return 0; 43 | } 44 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/04_Floyd_Warshall_Algorithm/python_FW.py: -------------------------------------------------------------------------------- 1 | # Define a large constant for infinity 2 | INF = float('inf') 3 | 4 | def floyd_warshall(graph): 5 | # Number of vertices in the graph 6 | V = len(graph) 7 | 8 | # Initialize the distance matrix 9 | dist = [[graph[i][j] for j in range(V)] for i in range(V)] 10 | 11 | # Update the distances using the Floyd-Warshall algorithm 12 | for k in range(V): 13 | for i in range(V): 14 | for j in range(V): 15 | if dist[i][k] + dist[k][j] < dist[i][j]: 16 | dist[i][j] = dist[i][k] + dist[k][j] 17 | 18 | # Print the resulting shortest path matrix 19 | print_solution(dist) 20 | 21 | def print_solution(dist): 22 | print("The following matrix shows the shortest distances between every pair of vertices:") 23 | for i in range(len(dist)): 24 | for j in range(len(dist)): 25 | if dist[i][j] == INF: 26 | print("INF", end="\t") 27 | else: 28 | print(dist[i][j], end="\t") 29 | print() 30 | 31 | # Input graph represented as an adjacency matrix 32 | graph = [ 33 | [0, 3, INF, 7], 34 | [8, 0, 2, INF], 35 | [5, INF, 0, 1], 36 | [2, INF, INF, 0] 37 | ] 38 | 39 | floyd_warshall(graph) 40 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/03_0_1_Knapsack_problem/c_01_knapsack.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int max(int a, int b) { 4 | return (a > b) ? a : b; 5 | } 6 | 7 | // Function to solve 0/1 Knapsack Problem 8 | int knapsack(int W, int weights[], int values[], int n) { 9 | int i, w; 10 | int K[n + 1][W + 1]; 11 | 12 | // Build the K table in bottom-up manner 13 | for (i = 0; i <= n; i++) { 14 | for (w = 0; w <= W; w++) { 15 | if (i == 0 || w == 0) { 16 | K[i][w] = 0; // Base case: no items or weight 17 | } else if (weights[i - 1] <= w) { 18 | K[i][w] = max(values[i - 1] + K[i - 1][w - weights[i - 1]], K[i - 1][w]); 19 | } else { 20 | K[i][w] = K[i - 1][w]; 21 | } 22 | } 23 | } 24 | 25 | return K[n][W]; // Return the maximum value that can be obtained 26 | } 27 | 28 | int main() { 29 | int values[] = {60, 100, 120}; // Values of items 30 | int weights[] = {10, 20, 30}; // Weights of items 31 | int W = 50; // Maximum weight capacity 32 | int n = sizeof(values) / sizeof(values[0]); // Number of items 33 | 34 | int max_value = knapsack(W, weights, values, n); 35 | printf("Maximum value in Knapsack = %d\n", max_value); 36 | 37 | return 0; 38 | } 39 | -------------------------------------------------------------------------------- /Unit_2/Radix_sort/c_radix_sort.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int getMax(int arr[], int n) { 4 | int max = arr[0]; 5 | for (int i = 1; i < n; i++) 6 | if (arr[i] > max) 7 | max = arr[i]; 8 | return max; 9 | } 10 | 11 | void countingSortForRadix(int arr[], int n, int exp) { 12 | int output[n]; 13 | int count[10] = {0}; 14 | 15 | for (int i = 0; i < n; i++) 16 | count[(arr[i] / exp) % 10]++; 17 | 18 | for (int i = 1; i < 10; i++) 19 | count[i] += count[i - 1]; 20 | 21 | for (int i = n - 1; i >= 0; i--) { 22 | output[count[(arr[i] / exp) % 10] - 1] = arr[i]; 23 | count[(arr[i] / exp) % 10]--; 24 | } 25 | 26 | for (int i = 0; i < n; i++) 27 | arr[i] = output[i]; 28 | } 29 | 30 | void radixSort(int arr[], int n) { 31 | int max = getMax(arr, n); 32 | for (int exp = 1; max / exp > 0; exp *= 10) 33 | countingSortForRadix(arr, n, exp); 34 | } 35 | 36 | void printArray(int arr[], int size) { 37 | for (int i = 0; i < size; i++) 38 | printf("%d ", arr[i]); 39 | printf("\n"); 40 | } 41 | 42 | int main() { 43 | int arr[] = {170, 45, 75, 90, 802, 24, 2, 66}; 44 | int n = sizeof(arr) / sizeof(arr[0]); 45 | radixSort(arr, n); 46 | printf("Sorted array: "); 47 | printArray(arr, n); 48 | return 0; 49 | } 50 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/03_Optimal_Merge_Pattern/c_optimal_merge.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | // Function to compare two integers (used for sorting) 5 | int compare(const void* a, const void* b) { 6 | return (*(int*)a - *(int*)b; 7 | } 8 | 9 | // Function to calculate the optimal merge cost 10 | int optimal_merge(int sizes[], int n) { 11 | // Sort the array of sizes 12 | qsort(sizes, n, sizeof(int), compare); 13 | 14 | int total_cost = 0; // Total cost of merging 15 | int i; 16 | 17 | // Merge files until one file remains 18 | while (n > 1) { 19 | // Take the two smallest files 20 | int merged_size = sizes[0] + sizes[1]; 21 | 22 | // Update total cost 23 | total_cost += merged_size; 24 | 25 | // Shift sizes down and insert the merged file 26 | for (i = 2; i < n; i++) { 27 | sizes[i - 1] = sizes[i]; 28 | } 29 | sizes[n - 2] = merged_size; // Insert the merged size 30 | n--; // Decrease the number of files 31 | // Sort the array again 32 | qsort(sizes, n, sizeof(int), compare); 33 | } 34 | 35 | return total_cost; 36 | } 37 | 38 | int main() { 39 | int sizes[] = {8, 4, 6, 12}; 40 | int n = sizeof(sizes) / sizeof(sizes[0]); 41 | 42 | int min_cost = optimal_merge(sizes, n); 43 | printf("Minimum cost to merge files: %d\n", min_cost); 44 | return 0; 45 | } 46 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/03_0_1_Knapsack_problem/cpp_01_knapsack.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | using namespace std; 4 | 5 | // Function to solve 0/1 Knapsack Problem 6 | int knapsack(int W, const vector& weights, const vector& values, int n) { 7 | // Create a 2D array to store the maximum value for each weight limit 8 | vector> K(n + 1, vector(W + 1, 0)); 9 | 10 | // Build the K table in bottom-up manner 11 | for (int i = 0; i <= n; i++) { 12 | for (int w = 0; w <= W; w++) { 13 | if (i == 0 || w == 0) { 14 | K[i][w] = 0; // Base case: no items or weight 15 | } else if (weights[i - 1] <= w) { 16 | K[i][w] = max(values[i - 1] + K[i - 1][w - weights[i - 1]], K[i - 1][w]); 17 | } else { 18 | K[i][w] = K[i - 1][w]; 19 | } 20 | } 21 | } 22 | 23 | return K[n][W]; // Return the maximum value that can be obtained 24 | } 25 | 26 | int main() { 27 | vector values = {60, 100, 120}; // Values of items 28 | vector weights = {10, 20, 30}; // Weights of items 29 | int W = 50; // Maximum weight capacity 30 | int n = values.size(); // Number of items 31 | 32 | int max_value = knapsack(W, weights, values, n); 33 | cout << "Maximum value in Knapsack = " << max_value << endl; 34 | 35 | return 0; 36 | } 37 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/04_Floyd_Warshall_Algorithm/rust_FW.rs: -------------------------------------------------------------------------------- 1 | const INF: i32 = i32::MAX; // Define a constant for infinity 2 | 3 | fn floyd_warshall(graph: &mut Vec>) { 4 | let v = graph.len(); 5 | 6 | // Update the distances using the Floyd-Warshall algorithm 7 | for k in 0..v { 8 | for i in 0..v { 9 | for j in 0..v { 10 | if graph[i][k] != INF && graph[k][j] != INF { 11 | if graph[i][k] + graph[k][j] < graph[i][j] { 12 | graph[i][j] = graph[i][k] + graph[k][j]; 13 | } 14 | } 15 | } 16 | } 17 | } 18 | } 19 | 20 | fn print_solution(graph: &Vec>) { 21 | println!("The following matrix shows the shortest distances between every pair of vertices:"); 22 | for i in 0..graph.len() { 23 | for j in 0..graph.len() { 24 | if graph[i][j] == INF { 25 | print!("INF\t"); 26 | } else { 27 | print!("{}\t", graph[i][j]); 28 | } 29 | } 30 | println!(); 31 | } 32 | } 33 | 34 | fn main() { 35 | // Input graph represented as an adjacency matrix 36 | let mut graph = vec![ 37 | vec![0, 3, INF, 7], 38 | vec![8, 0, 2, INF], 39 | vec![5, INF, 0, 1], 40 | vec![2, INF, INF, 0], 41 | ]; 42 | 43 | floyd_warshall(&mut graph); 44 | print_solution(&graph); 45 | } 46 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/02_Longest_Common_Subsequence/python_LCS.py: -------------------------------------------------------------------------------- 1 | def lcs(X, Y): 2 | m = len(X) 3 | n = len(Y) 4 | 5 | # Create a 2D array to store lengths of longest common subsequence 6 | L = [[0] * (n + 1) for _ in range(m + 1)] 7 | 8 | # Build the LCS table 9 | for i in range(m + 1): 10 | for j in range(n + 1): 11 | if i == 0 or j == 0: 12 | L[i][j] = 0 # Base case 13 | elif X[i - 1] == Y[j - 1]: 14 | L[i][j] = L[i - 1][j - 1] + 1 # Characters match 15 | else: 16 | L[i][j] = max(L[i - 1][j], L[i][j - 1]) # Take the maximum 17 | 18 | # Length of LCS is in L[m][n] 19 | print(f"Length of Longest Common Subsequence: {L[m][n]}") 20 | 21 | # Backtrack to find the LCS string 22 | index = L[m][n] 23 | lcs_str = [''] * index # Create a list to store the LCS string 24 | lcs_str[index] = '' 25 | 26 | # Start from the bottom-right corner of the LCS table 27 | i, j = m, n 28 | while i > 0 and j > 0: 29 | if X[i - 1] == Y[j - 1]: 30 | lcs_str[index - 1] = X[i - 1] # Store the character 31 | i -= 1 32 | j -= 1 33 | index -= 1 34 | elif L[i - 1][j] > L[i][j - 1]: 35 | i -= 1 36 | else: 37 | j -= 1 38 | 39 | print("Longest Common Subsequence:", ''.join(lcs_str)) 40 | 41 | 42 | if __name__ == "__main__": 43 | X = "AGGTAB" 44 | Y = "GXTXAYB" 45 | lcs(X, Y) 46 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/02_Fractional_knapsack_problem/python_fractional_knapsack.py: -------------------------------------------------------------------------------- 1 | # Define an Item class with value and weight 2 | class Item: 3 | def __init__(self, value, weight): 4 | self.value = value 5 | self.weight = weight 6 | 7 | # Comparison function to sort items by value-to-weight ratio in descending order 8 | def get_ratio(item): 9 | return item.value / item.weight 10 | 11 | # Function to calculate the maximum value in the knapsack 12 | def fractional_knapsack(capacity, items): 13 | # Sort items by value-to-weight ratio in descending order 14 | items.sort(key=get_ratio, reverse=True) 15 | 16 | max_value = 0.0 # Maximum value in knapsack 17 | current_weight = 0 # Current weight in knapsack 18 | 19 | # Loop through sorted items 20 | for item in items: 21 | if current_weight + item.weight <= capacity: 22 | # If item fits, take it entirely 23 | current_weight += item.weight 24 | max_value += item.value 25 | else: 26 | # If item can't fit, take the fractional part 27 | remaining_weight = capacity - current_weight 28 | max_value += item.value * (remaining_weight / item.weight) 29 | break 30 | 31 | return max_value 32 | 33 | # Define knapsack capacity and items (value, weight) 34 | capacity = 50 35 | items = [Item(60, 10), Item(100, 20), Item(120, 30)] 36 | 37 | # Output the maximum value 38 | print("Maximum value in Knapsack =", fractional_knapsack(capacity, items)) 39 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/01_Matrix_Chain_Multiplication/python_MCM.py: -------------------------------------------------------------------------------- 1 | def print_optimal_parenthesis(s, i, j): 2 | if i == j: 3 | print(f"A{i}", end="") 4 | else: 5 | print("(", end="") 6 | print_optimal_parenthesis(s, i, s[i][j]) 7 | print_optimal_parenthesis(s, s[i][j] + 1, j) 8 | print(")", end="") 9 | 10 | def matrix_chain_order(p): 11 | n = len(p) - 1 # Number of matrices 12 | m = [[0] * n for _ in range(n)] # m[i][j] = minimum number of multiplications needed 13 | s = [[0] * n for _ in range(n)] # s[i][j] = index of the matrix after which the product is split 14 | 15 | # L is chain length 16 | for L in range(2, n + 1): # L = 2 to n 17 | for i in range(n - L + 1): 18 | j = i + L - 1 19 | m[i][j] = float('inf') # Set to infinity 20 | 21 | # Try all possible splits 22 | for k in range(i, j): 23 | q = m[i][k] + m[k + 1][j] + p[i] * p[k + 1] * p[j + 1] 24 | if q < m[i][j]: 25 | m[i][j] = q 26 | s[i][j] = k # Store the split point 27 | 28 | print("Minimum number of multiplications:", m[0][n - 1]) 29 | print("Optimal Parenthesization:", end=" ") 30 | print_optimal_parenthesis(s, 0, n - 1) 31 | print() # For a new line 32 | 33 | # Example usage 34 | if __name__ == "__main__": 35 | # Dimensions of matrices 36 | # For example: A1 (10x30), A2 (30x5), A3 (5x60) 37 | arr = [10, 30, 5, 60] # p = [10, 30, 5, 60] 38 | matrix_chain_order(arr) 39 | -------------------------------------------------------------------------------- /Unit_5/String_Matching/python_KMP.py: -------------------------------------------------------------------------------- 1 | def computeLPSArray(pattern): 2 | pattern_length = len(pattern) 3 | lps = [0] * pattern_length # Longest Prefix Suffix array 4 | length = 0 # Length of the previous longest prefix suffix 5 | i = 1 6 | 7 | # Build the LPS array 8 | while i < pattern_length: 9 | if pattern[i] == pattern[length]: 10 | length += 1 11 | lps[i] = length 12 | i += 1 13 | else: 14 | # Mismatch after length matches 15 | if length != 0: 16 | length = lps[length - 1] # Use the previous lps value 17 | else: 18 | lps[i] = 0 19 | i += 1 20 | 21 | return lps 22 | 23 | 24 | def KMP(text, pattern): 25 | text_length = len(text) 26 | pattern_length = len(pattern) 27 | lps = computeLPSArray(pattern) # Preprocess the pattern to create the LPS array 28 | 29 | i = 0 # Index for text 30 | j = 0 # Index for pattern 31 | 32 | while i < text_length: 33 | if pattern[j] == text[i]: 34 | i += 1 35 | j += 1 36 | 37 | if j == pattern_length: 38 | print(f"Pattern found at index {i - j}") 39 | j = lps[j - 1] # Reset j using lps 40 | elif i < text_length and pattern[j] != text[i]: 41 | # Mismatch after j matches 42 | if j != 0: 43 | j = lps[j - 1] # Use the previous lps value 44 | else: 45 | i += 1 46 | 47 | 48 | # Example usage 49 | if __name__ == "__main__": 50 | text = "ABABDABACDABABCABAB" 51 | pattern = "ABABCABAB" 52 | 53 | KMP(text, pattern) 54 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/01_Matrix_Chain_Multiplication/rust_MCM.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::std::i32; 2 | 3 | fn print_optimal_parenthesis(s: &Vec>, i: usize, j: usize) { 4 | if i == j { 5 | print!("A{}", i); 6 | } else { 7 | print!("("); 8 | print_optimal_parenthesis(s, i, s[i][j]); 9 | print_optimal_parenthesis(s, s[i][j] + 1, j); 10 | print!(")"); 11 | } 12 | } 13 | 14 | fn matrix_chain_order(p: Vec) { 15 | let n = p.len() - 1; // Number of matrices 16 | let mut m = vec![vec![0; n]; n]; // m[i][j] = minimum number of multiplications needed 17 | let mut s = vec![vec![0; n]; n]; // s[i][j] = index of the matrix after which the product is split 18 | 19 | // L is chain length 20 | for L in 2..=n { // L = 2 to n 21 | for i in 0..=n - L { 22 | let j = i + L - 1; 23 | m[i][j] = i32::MAX; // Set to maximum value 24 | 25 | // Try all possible splits 26 | for k in i..j { 27 | let q = m[i][k] + m[k + 1][j] + p[i] * p[k + 1] * p[j + 1]; 28 | if q < m[i][j] { 29 | m[i][j] = q; 30 | s[i][j] = k; // Store the split point 31 | } 32 | } 33 | } 34 | } 35 | 36 | println!("Minimum number of multiplications: {}", m[0][n - 1]); 37 | print!("Optimal Parenthesization: "); 38 | print_optimal_parenthesis(&s, 0, n - 1); 39 | println!(); // New line 40 | } 41 | 42 | fn main() { 43 | // Dimensions of matrices 44 | // For example: A1 (10x30), A2 (30x5), A3 (5x60) 45 | let arr = vec![10, 30, 5, 60]; // p = [10, 30, 5, 60] 46 | matrix_chain_order(arr); 47 | } 48 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/02_Longest_Common_Subsequence/c_LCS.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int max(int a, int b) { 5 | return (a > b) ? a : b; 6 | } 7 | 8 | void lcs(const char *X, const char *Y) { 9 | int m = strlen(X); 10 | int n = strlen(Y); 11 | int L[m + 1][n + 1]; // LCS table 12 | 13 | // Building the LCS table 14 | for (int i = 0; i <= m; i++) { 15 | for (int j = 0; j <= n; j++) { 16 | if (i == 0 || j == 0) { 17 | L[i][j] = 0; // Base case 18 | } else if (X[i - 1] == Y[j - 1]) { 19 | L[i][j] = L[i - 1][j - 1] + 1; // Characters match 20 | } else { 21 | L[i][j] = max(L[i - 1][j], L[i][j - 1]); // Take the maximum 22 | } 23 | } 24 | } 25 | 26 | // Length of LCS is in L[m][n] 27 | printf("Length of Longest Common Subsequence: %d\n", L[m][n]); 28 | 29 | // Printing the LCS 30 | int index = L[m][n]; 31 | char lcs_str[index + 1]; 32 | lcs_str[index] = '\0'; // Null-terminate the LCS string 33 | 34 | // Backtrack to find the LCS string 35 | int i = m, j = n; 36 | while (i > 0 && j > 0) { 37 | if (X[i - 1] == Y[j - 1]) { 38 | lcs_str[index - 1] = X[i - 1]; // Store the character 39 | i--; 40 | j--; 41 | index--; 42 | } else if (L[i - 1][j] > L[i][j - 1]) { 43 | i--; 44 | } else { 45 | j--; 46 | } 47 | } 48 | 49 | printf("Longest Common Subsequence: %s\n", lcs_str); 50 | } 51 | 52 | int main() { 53 | const char *X = "AGGTAB"; 54 | const char *Y = "GXTXAYB"; 55 | lcs(X, Y); 56 | return 0; 57 | } 58 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/04_Floyd_Warshall_Algorithm/cpp_FW.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define INF std::numeric_limits::max() // Define a constant for infinity 6 | 7 | void floydWarshall(std::vector>& graph) { 8 | int V = graph.size(); // Number of vertices 9 | 10 | // Update the distances using the Floyd-Warshall algorithm 11 | for (int k = 0; k < V; k++) { 12 | for (int i = 0; i < V; i++) { 13 | for (int j = 0; j < V; j++) { 14 | if (graph[i][k] != INF && graph[k][j] != INF) { 15 | if (graph[i][k] + graph[k][j] < graph[i][j]) { 16 | graph[i][j] = graph[i][k] + graph[k][j]; 17 | } 18 | } 19 | } 20 | } 21 | } 22 | } 23 | 24 | void printSolution(const std::vector>& graph) { 25 | std::cout << "The following matrix shows the shortest distances between every pair of vertices:\n"; 26 | for (int i = 0; i < graph.size(); i++) { 27 | for (int j = 0; j < graph.size(); j++) { 28 | if (graph[i][j] == INF) { 29 | std::cout << "INF\t"; 30 | } else { 31 | std::cout << graph[i][j] << "\t"; 32 | } 33 | } 34 | std::cout << "\n"; 35 | } 36 | } 37 | 38 | int main() { 39 | // Input graph represented as an adjacency matrix 40 | std::vector> graph = { 41 | {0, 3, INF, 7}, 42 | {8, 0, 2, INF}, 43 | {5, INF, 0, 1}, 44 | {2, INF, INF, 0} 45 | }; 46 | 47 | floydWarshall(graph); 48 | printSolution(graph); 49 | 50 | return 0; 51 | } 52 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/02_Fractional_knapsack_problem/rust_fractional_knapsack.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | struct Item { 3 | value: i32, 4 | weight: i32, 5 | } 6 | 7 | // Function to calculate value-to-weight ratio 8 | fn get_ratio(item: &Item) -> f64 { 9 | item.value as f64 / item.weight as f64 10 | } 11 | 12 | // Function to calculate the maximum value in the knapsack 13 | fn fractional_knapsack(capacity: i32, items: &mut Vec) -> f64 { 14 | // Sort items by value-to-weight ratio in descending order 15 | items.sort_by(|a, b| get_ratio(b).partial_cmp(&get_ratio(a)).unwrap()); 16 | 17 | let mut max_value = 0.0; // Maximum value in knapsack 18 | let mut current_weight = 0; // Current weight in knapsack 19 | 20 | // Loop through sorted items 21 | for item in items { 22 | if current_weight + item.weight <= capacity { 23 | // If the item can fit, take it entirely 24 | current_weight += item.weight; 25 | max_value += item.value as f64; 26 | } else { 27 | // If the item can't fit, take the fractional part 28 | let remaining_weight = capacity - current_weight; 29 | max_value += item.value as f64 * (remaining_weight as f64 / item.weight as f64); 30 | break; 31 | } 32 | } 33 | 34 | max_value 35 | } 36 | 37 | fn main() { 38 | let capacity = 50; // Maximum weight capacity of knapsack 39 | let mut items = vec![ 40 | Item { value: 60, weight: 10 }, 41 | Item { value: 100, weight: 20 }, 42 | Item { value: 120, weight: 30 }, 43 | ]; 44 | 45 | let max_value = fractional_knapsack(capacity, &mut items); 46 | println!("Maximum value in Knapsack = {:.2}", max_value); 47 | } 48 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/05_kruskal_algorithm/python_kruskal_algorithm.py: -------------------------------------------------------------------------------- 1 | class Edge: 2 | def __init__(self, u, v, weight): 3 | self.u = u 4 | self.v = v 5 | self.weight = weight 6 | 7 | def find(parent, i): 8 | if parent[i] != i: 9 | parent[i] = find(parent, parent[i]) # Path compression 10 | return parent[i] 11 | 12 | def union(parent, rank, x, y): 13 | rootX = find(parent, x) 14 | rootY = find(parent, y) 15 | 16 | if rootX != rootY: 17 | if rank[rootX] < rank[rootY]: 18 | parent[rootX] = rootY 19 | elif rank[rootX] > rank[rootY]: 20 | parent[rootY] = rootX 21 | else: 22 | parent[rootY] = rootX 23 | rank[rootX] += 1 24 | 25 | def kruskal(vertices, edges): 26 | edges.sort(key=lambda edge: edge.weight) # Sort edges by weight 27 | 28 | parent = [i for i in range(vertices)] 29 | rank = [0] * vertices 30 | 31 | print("Edges in the Minimum Spanning Tree:") 32 | mst_weight = 0 33 | 34 | for edge in edges: 35 | if find(parent, edge.u) != find(parent, edge.v): 36 | print(f"{edge.u} -- {edge.v} == {edge.weight}") 37 | union(parent, rank, edge.u, edge.v) 38 | mst_weight += edge.weight 39 | 40 | return mst_weight 41 | 42 | def main(): 43 | vertices = int(input("Enter the number of vertices: ")) 44 | edges = [] 45 | 46 | num_edges = int(input("Enter the number of edges: ")) 47 | print("Enter the edges (u v weight):") 48 | for _ in range(num_edges): 49 | u, v, weight = map(int, input().split()) 50 | edges.append(Edge(u, v, weight)) 51 | 52 | kruskal(vertices, edges) 53 | 54 | if __name__ == "__main__": 55 | main() 56 | -------------------------------------------------------------------------------- /Unit_2/Merge_Sort/Python_Merge_Sort.py: -------------------------------------------------------------------------------- 1 | # Do subarrays ko merge karne wali function 2 | def merge(arr, left, mid, right): 3 | n1 = mid - left + 1 4 | n2 = right - mid 5 | 6 | # Temporary arrays banayenge 7 | L = arr[left:mid+1] 8 | R = arr[mid+1:right+1] 9 | 10 | # Merge process start karenge 11 | i = j = 0 12 | k = left 13 | 14 | # Dono subarrays ko merge karenge 15 | while i < len(L) and j < len(R): 16 | if L[i] <= R[j]: 17 | arr[k] = L[i] 18 | i += 1 19 | else: 20 | arr[k] = R[j] 21 | j += 1 22 | k += 1 23 | 24 | # Agar L[] mein kuch elements bache hain to unhe copy karenge 25 | while i < len(L): 26 | arr[k] = L[i] 27 | i += 1 28 | k += 1 29 | 30 | # Agar R[] mein kuch elements bache hain to unhe copy karenge 31 | while j < len(R): 32 | arr[k] = R[j] 33 | j += 1 34 | k += 1 35 | 36 | # Merge Sort ko implement karne wali function 37 | def mergeSort(arr, left, right): 38 | if left < right: 39 | mid = left + (right - left) // 2 40 | 41 | # Pehla aur doosra half ko sort karenge 42 | mergeSort(arr, left, mid) 43 | mergeSort(arr, mid + 1, right) 44 | 45 | # Sorted halves ko merge karenge 46 | merge(arr, left, mid, right) 47 | 48 | # Array ko print karne ki utility function 49 | def printArray(arr): 50 | for i in arr: 51 | print(i, end=" ") 52 | print() 53 | 54 | # Main code 55 | if __name__ == "__main__": 56 | arr = [12, 11, 13, 5, 6, 7] 57 | 58 | print("Given array hai") 59 | printArray(arr) 60 | 61 | mergeSort(arr, 0, len(arr) - 1) 62 | 63 | print("\nSorted array hai") 64 | printArray(arr) 65 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/python_BST.py: -------------------------------------------------------------------------------- 1 | class Node: 2 | def __init__(self, key): 3 | self.key = key 4 | self.left = None 5 | self.right = None 6 | 7 | class BST: 8 | def __init__(self): 9 | self.root = None 10 | 11 | def insert(self, key): 12 | if self.root is None: 13 | self.root = Node(key) 14 | else: 15 | self._insert(self.root, key) 16 | 17 | def _insert(self, node, key): 18 | if key < node.key: 19 | if node.left is None: 20 | node.left = Node(key) 21 | else: 22 | self._insert(node.left, key) 23 | elif key > node.key: 24 | if node.right is None: 25 | node.right = Node(key) 26 | else: 27 | self._insert(node.right, key) 28 | 29 | def inorder(self): 30 | return self._inorder(self.root) 31 | 32 | def _inorder(self, node): 33 | return self._inorder(node.left) + [node.key] + self._inorder(node.right) if node else [] 34 | 35 | def search(self, key): 36 | return self._search(self.root, key) 37 | 38 | def _search(self, node, key): 39 | if node is None or node.key == key: 40 | return node 41 | if key < node.key: 42 | return self._search(node.left, key) 43 | return self._search(node.right, key) 44 | 45 | # Example usage of BST 46 | bst = BST() 47 | bst.insert(50) 48 | bst.insert(30) 49 | bst.insert(20) 50 | bst.insert(40) 51 | bst.insert(70) 52 | bst.insert(60) 53 | bst.insert(80) 54 | 55 | print("In-order traversal of the BST:", bst.inorder()) 56 | 57 | key = 40 58 | if bst.search(key): 59 | print(f"{key} found in the BST.") 60 | else: 61 | print(f"{key} not found in the BST.") 62 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/02_Longest_Common_Subsequence/rust_LCS.rs: -------------------------------------------------------------------------------- 1 | fn lcs(x: &str, y: &str) { 2 | let m = x.len(); 3 | let n = y.len(); 4 | 5 | // Create a 2D vector to store lengths of longest common subsequence 6 | let mut l: Vec> = vec![vec![0; n + 1]; m + 1]; 7 | 8 | // Build the LCS table 9 | for i in 0..=m { 10 | for j in 0..=n { 11 | if i == 0 || j == 0 { 12 | l[i][j] = 0; // Base case 13 | } else if x.as_bytes()[i - 1] == y.as_bytes()[j - 1] { 14 | l[i][j] = l[i - 1][j - 1] + 1; // Characters match 15 | } else { 16 | l[i][j] = std::cmp::max(l[i - 1][j], l[i][j - 1]); // Take the maximum 17 | } 18 | } 19 | } 20 | 21 | // Length of LCS is in l[m][n] 22 | println!("Length of Longest Common Subsequence: {}", l[m][n]); 23 | 24 | // Backtrack to find the LCS string 25 | let mut index = l[m][n]; 26 | let mut lcs_str = vec!['\0'; index]; // Create a vector to store the LCS string 27 | 28 | // Start from the bottom-right corner of the LCS table 29 | let mut i = m; 30 | let mut j = n; 31 | 32 | while i > 0 && j > 0 { 33 | if x.as_bytes()[i - 1] == y.as_bytes()[j - 1] { 34 | lcs_str[index - 1] = x.as_bytes()[i - 1] as char; // Store the character 35 | i -= 1; 36 | j -= 1; 37 | index -= 1; 38 | } else if l[i - 1][j] > l[i][j - 1] { 39 | i -= 1; 40 | } else { 41 | j -= 1; 42 | } 43 | } 44 | 45 | // Print the LCS string 46 | println!("Longest Common Subsequence: {}", lcs_str.iter().collect::()); 47 | } 48 | 49 | fn main() { 50 | let x = "AGGTAB"; 51 | let y = "GXTXAYB"; 52 | lcs(x, y); 53 | } 54 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/04_Floyd_Warshall_Algorithm/c_FW.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define V 4 // Number of vertices in the graph 5 | 6 | void printSolution(int dist[][V]); 7 | 8 | void floydWarshall(int graph[][V]) { 9 | // Create a 2D array to store the shortest path distances 10 | int dist[V][V], i, j, k; 11 | 12 | // Initialize the distance array with the given graph values 13 | for (i = 0; i < V; i++) { 14 | for (j = 0; j < V; j++) { 15 | dist[i][j] = graph[i][j]; 16 | } 17 | } 18 | 19 | // Update the distances using the Floyd-Warshall algorithm 20 | for (k = 0; k < V; k++) { 21 | for (i = 0; i < V; i++) { 22 | for (j = 0; j < V; j++) { 23 | if (dist[i][k] + dist[k][j] < dist[i][j]) { 24 | dist[i][j] = dist[i][k] + dist[k][j]; 25 | } 26 | } 27 | } 28 | } 29 | 30 | // Print the resulting shortest path matrix 31 | printSolution(dist); 32 | } 33 | 34 | void printSolution(int dist[][V]) { 35 | printf("The following matrix shows the shortest distances between every pair of vertices:\n"); 36 | for (int i = 0; i < V; i++) { 37 | for (int j = 0; j < V; j++) { 38 | if (dist[i][j] == INT_MAX) { 39 | printf("INF\t"); 40 | } else { 41 | printf("%d\t", dist[i][j]); 42 | } 43 | } 44 | printf("\n"); 45 | } 46 | } 47 | 48 | int main() { 49 | // Input graph represented as an adjacency matrix 50 | int graph[V][V] = { 51 | {0, 3, INT_MAX, 7}, 52 | {8, 0, 2, INT_MAX}, 53 | {5, INT_MAX, 0, 1}, 54 | {2, INT_MAX, INT_MAX, 0} 55 | }; 56 | 57 | floydWarshall(graph); 58 | 59 | return 0; 60 | } 61 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/02_Longest_Common_Subsequence/cpp_LCS.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | void lcs(const string &X, const string &Y) { 8 | int m = X.length(); 9 | int n = Y.length(); 10 | 11 | // Create a 2D array to store lengths of longest common subsequence 12 | vector> L(m + 1, vector(n + 1, 0)); 13 | 14 | // Build the LCS table 15 | for (int i = 0; i <= m; i++) { 16 | for (int j = 0; j <= n; j++) { 17 | if (i == 0 || j == 0) { 18 | L[i][j] = 0; // Base case 19 | } else if (X[i - 1] == Y[j - 1]) { 20 | L[i][j] = L[i - 1][j - 1] + 1; // Characters match 21 | } else { 22 | L[i][j] = max(L[i - 1][j], L[i][j - 1]); // Take the maximum 23 | } 24 | } 25 | } 26 | 27 | // Length of LCS is in L[m][n] 28 | cout << "Length of Longest Common Subsequence: " << L[m][n] << endl; 29 | 30 | // Backtrack to find the LCS string 31 | int index = L[m][n]; 32 | string lcs_str(index, '\0'); // Create a string to store the LCS string 33 | 34 | // Start from the bottom-right corner of the LCS table 35 | int i = m, j = n; 36 | while (i > 0 && j > 0) { 37 | if (X[i - 1] == Y[j - 1]) { 38 | lcs_str[index - 1] = X[i - 1]; // Store the character 39 | i--; 40 | j--; 41 | index--; 42 | } else if (L[i - 1][j] > L[i][j - 1]) { 43 | i--; 44 | } else { 45 | j--; 46 | } 47 | } 48 | 49 | // Print the LCS string 50 | cout << "Longest Common Subsequence: " << lcs_str << endl; 51 | } 52 | 53 | int main() { 54 | string X = "AGGTAB"; 55 | string Y = "GXTXAYB"; 56 | lcs(X, Y); 57 | return 0; 58 | } 59 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/01_Matrix_Chain_Multiplication/cpp_MCM.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | void printOptimalParenthesis(const vector>& s, int i, int j) { 8 | if (i == j) { 9 | cout << "A" << i; 10 | } else { 11 | cout << "("; 12 | printOptimalParenthesis(s, i, s[i][j]); 13 | printOptimalParenthesis(s, s[i][j] + 1, j); 14 | cout << ")"; 15 | } 16 | } 17 | 18 | void matrixChainOrder(const vector& p) { 19 | int n = p.size() - 1; // Number of matrices 20 | vector> m(n, vector(n, 0)); // m[i][j] = minimum number of multiplications needed 21 | vector> s(n, vector(n, 0)); // s[i][j] = index of the matrix after which the product is split 22 | 23 | // L is chain length 24 | for (int L = 2; L <= n; ++L) { // L = 2 to n 25 | for (int i = 0; i < n - L + 1; ++i) { 26 | int j = i + L - 1; 27 | m[i][j] = numeric_limits::max(); // Set to maximum value 28 | 29 | // Try all possible splits 30 | for (int k = i; k < j; ++k) { 31 | int q = m[i][k] + m[k + 1][j] + p[i] * p[k + 1] * p[j + 1]; 32 | if (q < m[i][j]) { 33 | m[i][j] = q; 34 | s[i][j] = k; // Store the split point 35 | } 36 | } 37 | } 38 | } 39 | 40 | cout << "Minimum number of multiplications: " << m[0][n - 1] << endl; 41 | cout << "Optimal Parenthesization: "; 42 | printOptimalParenthesis(s, 0, n - 1); 43 | cout << endl; 44 | } 45 | 46 | int main() { 47 | // Dimensions of matrices 48 | // For example: A1 (10x30), A2 (30x5), A3 (5x60) 49 | vector arr = {10, 30, 5, 60}; // p = {10, 30, 5, 60} 50 | matrixChainOrder(arr); 51 | 52 | return 0; 53 | } 54 | -------------------------------------------------------------------------------- /Unit_4_Dynamic_Programming/01_Matrix_Chain_Multiplication/c_MCM.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | void printOptimalParenthesis(int i, int j, int n, int* s) { 5 | if (i == j) { 6 | printf("A%d", i); 7 | } else { 8 | printf("("); 9 | printOptimalParenthesis(i, *((s + i * n) + j), n, s); 10 | printOptimalParenthesis(*((s + i * n) + j) + 1, j, n, s); 11 | printf(")"); 12 | } 13 | } 14 | 15 | void matrixChainOrder(int p[], int n) { 16 | int m[n][n]; // m[i][j] = minimum number of multiplications needed to compute the matrix A[i]...A[j] 17 | int s[n][n]; // s[i][j] = index of the matrix after which the product is split 18 | 19 | for (int i = 1; i < n; i++) { 20 | m[i][i] = 0; // Cost is zero when multiplying one matrix 21 | } 22 | 23 | // L is chain length 24 | for (int L = 2; L < n; L++) { 25 | for (int i = 1; i < n - L + 1; i++) { 26 | int j = i + L - 1; 27 | m[i][j] = INT_MAX; // Set to maximum value 28 | 29 | // Try all possible splits 30 | for (int k = i; k < j; k++) { 31 | int q = m[i][k] + m[k + 1][j] + p[i - 1] * p[k] * p[j]; 32 | if (q < m[i][j]) { 33 | m[i][j] = q; 34 | s[i][j] = k; // Store the split point 35 | } 36 | } 37 | } 38 | } 39 | 40 | printf("Minimum number of multiplications: %d\n", m[1][n - 1]); 41 | printf("Optimal Parenthesization: "); 42 | printOptimalParenthesis(1, n - 1, n, (int *)s); 43 | printf("\n"); 44 | } 45 | 46 | int main() { 47 | // Dimensions of matrices 48 | // For example: A1 (10x30), A2 (30x5), A3 (5x60) 49 | int arr[] = {10, 30, 5, 60}; // p = [10, 30, 5, 60] 50 | int size = sizeof(arr) / sizeof(arr[0]); 51 | 52 | matrixChainOrder(arr, size); 53 | 54 | return 0; 55 | } 56 | 57 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/02_Fractional_knapsack_problem/c_fractional_knapsack.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | // Structure to represent an item 5 | struct Item { 6 | int value; 7 | int weight; 8 | }; 9 | 10 | // Comparison function for sorting items based on value-to-weight ratio 11 | int compare(const void* a, const void* b) { 12 | double r1 = (double)((struct Item*)a)->value / ((struct Item*)a)->weight; 13 | double r2 = (double)((struct Item*)b)->value / ((struct Item*)b)->weight; 14 | return (r1 < r2) ? 1 : -1; // Sort in descending order 15 | } 16 | 17 | // Function to calculate the maximum value in the knapsack 18 | double fractionalKnapsack(int capacity, struct Item* items, int n) { 19 | // Sort items by value-to-weight ratio 20 | qsort(items, n, sizeof(struct Item), compare); 21 | 22 | double maxValue = 0.0; // Maximum value in knapsack 23 | int currentWeight = 0; // Current weight in knapsack 24 | 25 | // Loop through sorted items 26 | for (int i = 0; i < n; i++) { 27 | if (currentWeight + items[i].weight <= capacity) { 28 | // If the item can fit, take it entirely 29 | currentWeight += items[i].weight; 30 | maxValue += items[i].value; 31 | } else { 32 | // If the item can't fit, take the fractional part 33 | int remainingWeight = capacity - currentWeight; 34 | maxValue += items[i].value * ((double)remainingWeight / items[i].weight); 35 | break; 36 | } 37 | } 38 | 39 | return maxValue; 40 | } 41 | 42 | int main() { 43 | int capacity = 50; // Maximum weight capacity of knapsack 44 | struct Item items[] = { {60, 10}, {100, 20}, {120, 30} }; 45 | int n = sizeof(items) / sizeof(items[0]); // Number of items 46 | 47 | printf("Maximum value in Knapsack = %.2f\n", fractionalKnapsack(capacity, items, n)); 48 | return 0; 49 | } 50 | -------------------------------------------------------------------------------- /Unit_5/String_Matching/rust_KMP.rs: -------------------------------------------------------------------------------- 1 | fn compute_lps_array(pattern: &str) -> Vec { 2 | let pattern_length = pattern.len(); 3 | let mut lps = vec![0; pattern_length]; // Longest Prefix Suffix array 4 | let mut length = 0; // Length of the previous longest prefix suffix 5 | let mut i = 1; 6 | 7 | // Build the LPS array 8 | while i < pattern_length { 9 | if pattern.as_bytes()[i] == pattern.as_bytes()[length] { 10 | length += 1; 11 | lps[i] = length; 12 | i += 1; 13 | } else { 14 | // Mismatch after length matches 15 | if length != 0 { 16 | length = lps[length - 1]; // Use the previous lps value 17 | } else { 18 | lps[i] = 0; 19 | i += 1; 20 | } 21 | } 22 | } 23 | 24 | lps 25 | } 26 | 27 | fn kmp(text: &str, pattern: &str) { 28 | let text_length = text.len(); 29 | let pattern_length = pattern.len(); 30 | let lps = compute_lps_array(pattern); // Preprocess the pattern to create the LPS array 31 | 32 | let mut i = 0; // Index for text 33 | let mut j = 0; // Index for pattern 34 | 35 | while i < text_length { 36 | if pattern.as_bytes()[j] == text.as_bytes()[i] { 37 | i += 1; 38 | j += 1; 39 | } 40 | 41 | if j == pattern_length { 42 | println!("Pattern found at index {}", i - j); 43 | j = lps[j - 1]; // Reset j using lps 44 | } else if i < text_length && pattern.as_bytes()[j] != text.as_bytes()[i] { 45 | // Mismatch after j matches 46 | if j != 0 { 47 | j = lps[j - 1]; // Use the previous lps value 48 | } else { 49 | i += 1; 50 | } 51 | } 52 | } 53 | } 54 | 55 | fn main() { 56 | let text = "ABABDABACDABABCABAB"; 57 | let pattern = "ABABCABAB"; 58 | 59 | kmp(text, pattern); 60 | } 61 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/cpp_BST.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | struct Node { 5 | int key; 6 | Node* left; 7 | Node* right; 8 | 9 | Node(int value) : key(value), left(nullptr), right(nullptr) {} 10 | }; 11 | 12 | class BST { 13 | public: 14 | Node* root; 15 | 16 | BST() : root(nullptr) {} 17 | 18 | void insert(int key) { 19 | root = insertNode(root, key); 20 | } 21 | 22 | Node* insertNode(Node* node, int key) { 23 | if (!node) return new Node(key); 24 | if (key < node->key) { 25 | node->left = insertNode(node->left, key); 26 | } else if (key > node->key) { 27 | node->right = insertNode(node->right, key); 28 | } 29 | return node; 30 | } 31 | 32 | void inorder() { 33 | inorderHelper(root); 34 | cout << endl; 35 | } 36 | 37 | void inorderHelper(Node* node) { 38 | if (node) { 39 | inorderHelper(node->left); 40 | cout << node->key << " "; 41 | inorderHelper(node->right); 42 | } 43 | } 44 | 45 | bool search(int key) { 46 | return searchNode(root, key); 47 | } 48 | 49 | bool searchNode(Node* node, int key) { 50 | if (!node) return false; 51 | if (node->key == key) return true; 52 | if (key < node->key) return searchNode(node->left, key); 53 | return searchNode(node->right, key); 54 | } 55 | }; 56 | 57 | // Example usage of BST 58 | int main() { 59 | BST bst; 60 | bst.insert(50); 61 | bst.insert(30); 62 | bst.insert(20); 63 | bst.insert(40); 64 | bst.insert(70); 65 | bst.insert(60); 66 | bst.insert(80); 67 | 68 | cout << "In-order traversal of the BST: "; 69 | bst.inorder(); 70 | 71 | int key = 40; 72 | if (bst.search(key)) { 73 | cout << key << " found in the BST." << endl; 74 | } else { 75 | cout << key << " not found in the BST." << endl; 76 | } 77 | 78 | return 0; 79 | } 80 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/06_Prim_algorithm/python_prim_algorithm.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def min_key(key, mst_set): 4 | """Utility function to find the vertex with the minimum key value.""" 5 | min_value = sys.maxsize 6 | min_index = -1 7 | 8 | for v in range(len(key)): 9 | if key[v] < min_value and not mst_set[v]: 10 | min_value = key[v] 11 | min_index = v 12 | 13 | return min_index 14 | 15 | def prim_mst(graph): 16 | """Function to implement Prim's algorithm to find the Minimum Spanning Tree.""" 17 | num_vertices = len(graph) 18 | parent = [-1] * num_vertices # Array to store constructed MST 19 | key = [sys.maxsize] * num_vertices # Key values used to pick the minimum weight edge 20 | mst_set = [False] * num_vertices # To represent the set of vertices included in MST 21 | 22 | # Start from the first vertex 23 | key[0] = 0 # Make key 0 so that this vertex is picked as the first vertex 24 | 25 | for _ in range(num_vertices - 1): 26 | # Pick the minimum key vertex from the set of vertices not yet included in MST 27 | u = min_key(key, mst_set) 28 | mst_set[u] = True # Add the picked vertex to the MST Set 29 | 30 | # Update key value and parent index of the adjacent vertices 31 | for v in range(num_vertices): 32 | # Update key only if graph[u][v] is smaller than key[v] 33 | if graph[u][v] > 0 and not mst_set[v] and graph[u][v] < key[v]: 34 | parent[v] = u 35 | key[v] = graph[u][v] 36 | 37 | # Print the constructed MST 38 | print("Edge \tWeight") 39 | for i in range(1, num_vertices): 40 | print(f"{parent[i]} -- {i} \t{graph[i][parent[i]]}") 41 | 42 | if __name__ == "__main__": 43 | # Adjacency matrix representation of the graph 44 | graph = [ 45 | [0, 2, 0, 6, 0], 46 | [2, 0, 3, 8, 5], 47 | [0, 3, 0, 0, 7], 48 | [6, 8, 0, 0, 9], 49 | [0, 5, 7, 9, 0] 50 | ] 51 | 52 | prim_mst(graph) 53 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/C_BST.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | // Node structure for Binary Search Tree 5 | struct Node { 6 | int key; 7 | struct Node *left; 8 | struct Node *right; 9 | }; 10 | 11 | // Function to create a new node 12 | struct Node* createNode(int key) { 13 | struct Node* newNode = (struct Node*)malloc(sizeof(struct Node)); 14 | newNode->key = key; 15 | newNode->left = newNode->right = NULL; 16 | return newNode; 17 | } 18 | 19 | // Function to insert a new key in the BST 20 | struct Node* insert(struct Node* root, int key) { 21 | if (root == NULL) { 22 | return createNode(key); 23 | } 24 | if (key < root->key) { 25 | root->left = insert(root->left, key); 26 | } else if (key > root->key) { 27 | root->right = insert(root->right, key); 28 | } 29 | return root; 30 | } 31 | 32 | // Function to perform in-order traversal 33 | void inorder(struct Node* root) { 34 | if (root != NULL) { 35 | inorder(root->left); 36 | printf("%d ", root->key); 37 | inorder(root->right); 38 | } 39 | } 40 | 41 | // Function to search a key in the BST 42 | struct Node* search(struct Node* root, int key) { 43 | if (root == NULL || root->key == key) { 44 | return root; 45 | } 46 | if (key < root->key) { 47 | return search(root->left, key); 48 | } 49 | return search(root->right, key); 50 | } 51 | 52 | // Main function to demonstrate BST operations 53 | int main() { 54 | struct Node* root = NULL; 55 | root = insert(root, 50); 56 | insert(root, 30); 57 | insert(root, 20); 58 | insert(root, 40); 59 | insert(root, 70); 60 | insert(root, 60); 61 | insert(root, 80); 62 | 63 | printf("In-order traversal of the BST: "); 64 | inorder(root); 65 | printf("\n"); 66 | 67 | int key = 40; 68 | if (search(root, key) != NULL) { 69 | printf("%d found in the BST.\n", key); 70 | } else { 71 | printf("%d not found in the BST.\n", key); 72 | } 73 | 74 | return 0; 75 | } 76 | -------------------------------------------------------------------------------- /Unit_5/String_Matching/C_KMP.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | // Function to preprocess the pattern and create the LPS array 5 | void computeLPSArray(char* pattern, int* lps, int patternLength) { 6 | int length = 0; // Length of the previous longest prefix suffix 7 | lps[0] = 0; // LPS[0] is always 0 8 | int i = 1; 9 | 10 | // Build the LPS array 11 | while (i < patternLength) { 12 | if (pattern[i] == pattern[length]) { 13 | length++; 14 | lps[i] = length; 15 | i++; 16 | } else { 17 | // Mismatch after length matches 18 | if (length != 0) { 19 | length = lps[length - 1]; // Use the previous lps value 20 | } else { 21 | lps[i] = 0; 22 | i++; 23 | } 24 | } 25 | } 26 | } 27 | 28 | // KMP algorithm to search for a pattern in a text 29 | void KMP(char* text, char* pattern) { 30 | int textLength = strlen(text); 31 | int patternLength = strlen(pattern); 32 | int lps[patternLength]; // Longest Prefix Suffix array 33 | 34 | // Preprocess the pattern to create the LPS array 35 | computeLPSArray(pattern, lps, patternLength); 36 | 37 | int i = 0; // Index for text 38 | int j = 0; // Index for pattern 39 | 40 | while (i < textLength) { 41 | if (pattern[j] == text[i]) { 42 | i++; 43 | j++; 44 | } 45 | 46 | if (j == patternLength) { 47 | printf("Pattern found at index %d\n", i - j); 48 | j = lps[j - 1]; // Reset j using lps 49 | } else if (i < textLength && pattern[j] != text[i]) { 50 | // Mismatch after j matches 51 | if (j != 0) { 52 | j = lps[j - 1]; // Use the previous lps value 53 | } else { 54 | i++; 55 | } 56 | } 57 | } 58 | } 59 | 60 | // Example usage 61 | int main() { 62 | char text[] = "ABABDABACDABABCABAB"; 63 | char pattern[] = "ABABCABAB"; 64 | 65 | KMP(text, pattern); 66 | 67 | return 0; 68 | } 69 | -------------------------------------------------------------------------------- /Unit_2/Merge_Sort/Rust_Merge_Sort.rs: -------------------------------------------------------------------------------- 1 | fn merge(arr: &mut [i32], left: usize, mid: usize, right: usize) { 2 | // Left aur right subarrays ka size nikalenge 3 | let n1 = mid - left + 1; 4 | let n2 = right - mid; 5 | 6 | // Temporary arrays banayenge 7 | let mut L = vec![0; n1]; 8 | let mut R = vec![0; n2]; 9 | 10 | // Data ko temporary arrays mein copy karenge 11 | for i in 0..n1 { 12 | L[i] = arr[left + i]; 13 | } 14 | for j in 0..n2 { 15 | R[j] = arr[mid + 1 + j]; 16 | } 17 | 18 | // Dono subarrays ko merge karenge 19 | let mut i = 0; 20 | let mut j = 0; 21 | let mut k = left; 22 | 23 | while i < n1 && j < n2 { 24 | if L[i] <= R[j] { 25 | arr[k] = L[i]; 26 | i += 1; 27 | } else { 28 | arr[k] = R[j]; 29 | j += 1; 30 | } 31 | k += 1; 32 | } 33 | 34 | // Agar L[] mein kuch elements bache hain to unhe copy karenge 35 | while i < n1 { 36 | arr[k] = L[i]; 37 | i += 1; 38 | k += 1; 39 | } 40 | 41 | // Agar R[] mein kuch elements bache hain to unhe copy karenge 42 | while j < n2 { 43 | arr[k] = R[j]; 44 | j += 1; 45 | k += 1; 46 | } 47 | } 48 | 49 | // Merge Sort ko implement karenge 50 | fn merge_sort(arr: &mut [i32], left: usize, right: usize) { 51 | if left < right { 52 | let mid = left + (right - left) / 2; 53 | 54 | // Pehla aur doosra half ko sort karenge 55 | merge_sort(arr, left, mid); 56 | merge_sort(arr, mid + 1, right); 57 | 58 | // Sorted halves ko merge karenge 59 | merge(arr, left, mid, right); 60 | } 61 | } 62 | 63 | // Array ko print karne ki utility function 64 | fn print_array(arr: &[i32]) { 65 | for &i in arr { 66 | print!("{} ", i); 67 | } 68 | println!(); 69 | } 70 | 71 | fn main() { 72 | let mut arr = [12, 11, 13, 5, 6, 7]; 73 | 74 | println!("Given array hai:"); 75 | print_array(&arr); 76 | 77 | merge_sort(&mut arr, 0, arr.len() - 1); 78 | 79 | println!("\nSorted array hai:"); 80 | print_array(&arr); 81 | } 82 | -------------------------------------------------------------------------------- /Unit_2/Merge_Sort/C_Merge_Sort.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // Do subarrays ko merge karne wali function 4 | void merge(int arr[], int left, int mid, int right) { 5 | int n1 = mid - left + 1; 6 | int n2 = right - mid; 7 | 8 | // Temporary arrays banayenge 9 | int L[n1], R[n2]; 10 | 11 | // Temporary arrays mein data copy karenge 12 | for (int i = 0; i < n1; i++) 13 | L[i] = arr[left + i]; 14 | for (int j = 0; j < n2; j++) 15 | R[j] = arr[mid + 1 + j]; 16 | 17 | // Temporary arrays ko wapas original array mein merge karenge 18 | int i = 0, j = 0, k = left; 19 | while (i < n1 && j < n2) { 20 | if (L[i] <= R[j]) { 21 | arr[k] = L[i]; 22 | i++; 23 | } else { 24 | arr[k] = R[j]; 25 | j++; 26 | } 27 | k++; 28 | } 29 | 30 | // Agar L[] mein kuch elements bache hain to unhe copy karenge 31 | while (i < n1) { 32 | arr[k] = L[i]; 33 | i++; 34 | k++; 35 | } 36 | 37 | // Agar R[] mein kuch elements bache hain to unhe copy karenge 38 | while (j < n2) { 39 | arr[k] = R[j]; 40 | j++; 41 | k++; 42 | } 43 | } 44 | 45 | // Merge Sort ko implement karne wali function 46 | void mergeSort(int arr[], int left, int right) { 47 | if (left < right) { 48 | int mid = left + (right - left) / 2; 49 | 50 | // Pehla aur doosra half ko sort karenge 51 | mergeSort(arr, left, mid); 52 | mergeSort(arr, mid + 1, right); 53 | 54 | // Sorted halves ko merge karenge 55 | merge(arr, left, mid, right); 56 | } 57 | } 58 | 59 | // Array ko print karne ki utility function 60 | void printArray(int arr[], int size) { 61 | for (int i = 0; i < size; i++) 62 | printf("%d ", arr[i]); 63 | printf("\n"); 64 | } 65 | 66 | int main() { 67 | int arr[] = {12, 11, 13, 5, 6, 7}; 68 | int arr_size = sizeof(arr) / sizeof(arr[0]); 69 | 70 | printf("Given array hai \n"); 71 | printArray(arr, arr_size); 72 | 73 | mergeSort(arr, 0, arr_size - 1); 74 | 75 | printf("\nSorted array hai \n"); 76 | printArray(arr, arr_size); 77 | return 0; 78 | } 79 | -------------------------------------------------------------------------------- /Unit_2/Merge_Sort/cpp_Merge_Sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | // Do subarrays ko merge karne wali function 5 | void merge(int arr[], int left, int mid, int right) { 6 | int n1 = mid - left + 1; 7 | int n2 = right - mid; 8 | 9 | // Temporary arrays banayenge 10 | int L[n1], R[n2]; 11 | 12 | // Data ko temporary arrays mein copy karenge 13 | for (int i = 0; i < n1; i++) 14 | L[i] = arr[left + i]; 15 | for (int j = 0; j < n2; j++) 16 | R[j] = arr[mid + 1 + j]; 17 | 18 | // Dono subarrays ko merge karenge 19 | int i = 0, j = 0, k = left; 20 | while (i < n1 && j < n2) { 21 | if (L[i] <= R[j]) { 22 | arr[k] = L[i]; 23 | i++; 24 | } else { 25 | arr[k] = R[j]; 26 | j++; 27 | } 28 | k++; 29 | } 30 | 31 | // Agar L[] mein kuch elements bache hain to unhe copy karenge 32 | while (i < n1) { 33 | arr[k] = L[i]; 34 | i++; 35 | k++; 36 | } 37 | 38 | // Agar R[] mein kuch elements bache hain to unhe copy karenge 39 | while (j < n2) { 40 | arr[k] = R[j]; 41 | j++; 42 | k++; 43 | } 44 | } 45 | 46 | // Merge Sort ko implement karenge 47 | void mergeSort(int arr[], int left, int right) { 48 | if (left < right) { 49 | int mid = left + (right - left) / 2; 50 | 51 | // Pehla aur doosra half ko sort karenge 52 | mergeSort(arr, left, mid); 53 | mergeSort(arr, mid + 1, right); 54 | 55 | // Sorted halves ko merge karenge 56 | merge(arr, left, mid, right); 57 | } 58 | } 59 | 60 | // Array ko print karne ki utility function 61 | void printArray(int arr[], int size) { 62 | for (int i = 0; i < size; i++) 63 | cout << arr[i] << " "; 64 | cout << endl; 65 | } 66 | 67 | int main() { 68 | int arr[] = {12, 11, 13, 5, 6, 7}; 69 | int arr_size = sizeof(arr) / sizeof(arr[0]); 70 | 71 | cout << "Given array hai" << endl; 72 | printArray(arr, arr_size); 73 | 74 | mergeSort(arr, 0, arr_size - 1); 75 | 76 | cout << "\nSorted array hai" << endl; 77 | printArray(arr, arr_size); 78 | 79 | return 0; 80 | } 81 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/04_Dijkstra_Algorithm/python_dijkstra_algorithm.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | V = 9 # Number of vertices in the graph 4 | 5 | def min_distance(dist, spt_set): 6 | """Find the vertex with the minimum distance value.""" 7 | min_dist = sys.maxsize # Infinite distance 8 | min_index = -1 9 | 10 | for v in range(V): 11 | if not spt_set[v] and dist[v] < min_dist: 12 | min_dist = dist[v] 13 | min_index = v 14 | 15 | return min_index 16 | 17 | def dijkstra(graph, src): 18 | """Implement Dijkstra's algorithm to find the shortest path.""" 19 | dist = [sys.maxsize] * V # Initialize distances to infinity 20 | spt_set = [False] * V # Shortest Path Tree set 21 | 22 | dist[src] = 0 # Distance from source to itself is always 0 23 | 24 | for _ in range(V - 1): 25 | u = min_distance(dist, spt_set) # Pick the minimum distance vertex 26 | spt_set[u] = True # Mark the picked vertex as processed 27 | 28 | for v in range(V): 29 | # Update dist[v] if and only if it is not in spt_set, 30 | # there is an edge from u to v, and the total weight of 31 | # the path from src to v through u is smaller than 32 | # the current value of dist[v] 33 | if (not spt_set[v] and graph[u][v] and 34 | dist[u] != sys.maxsize and 35 | dist[u] + graph[u][v] < dist[v]): 36 | dist[v] = dist[u] + graph[u][v] 37 | 38 | # Print the constructed distance array 39 | print("Vertex\tDistance from Source") 40 | for i in range(V): 41 | print(f"{i}\t\t{dist[i]}") 42 | 43 | if __name__ == "__main__": 44 | # Adjacency matrix representation of the graph 45 | graph = [ 46 | [0, 4, 0, 0, 0, 0, 0, 8, 0], 47 | [4, 0, 8, 0, 0, 0, 0, 0, 10], 48 | [0, 8, 0, 7, 0, 4, 0, 0, 2], 49 | [0, 0, 7, 0, 9, 14, 0, 0, 0], 50 | [0, 0, 0, 9, 0, 10, 0, 0, 0], 51 | [0, 0, 4, 14, 10, 0, 2, 0, 0], 52 | [0, 0, 0, 0, 0, 2, 0, 1, 6], 53 | [8, 0, 0, 0, 0, 0, 1, 0, 7], 54 | [0, 10, 2, 0, 0, 0, 6, 7, 0] 55 | ] 56 | 57 | source = 0 # Starting node 58 | dijkstra(graph, source) 59 | -------------------------------------------------------------------------------- /Unit_5/String_Matching/cpp_KMP.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | // Function to compute the Longest Prefix Suffix (LPS) array 8 | vector computeLPSArray(const string &pattern) { 9 | int pattern_length = pattern.length(); 10 | vector lps(pattern_length, 0); // Longest Prefix Suffix array 11 | int length = 0; // Length of the previous longest prefix suffix 12 | int i = 1; 13 | 14 | // Build the LPS array 15 | while (i < pattern_length) { 16 | if (pattern[i] == pattern[length]) { 17 | length++; 18 | lps[i] = length; 19 | i++; 20 | } else { 21 | // Mismatch after length matches 22 | if (length != 0) { 23 | length = lps[length - 1]; // Use the previous lps value 24 | } else { 25 | lps[i] = 0; 26 | i++; 27 | } 28 | } 29 | } 30 | 31 | return lps; 32 | } 33 | 34 | // Function to implement the KMP algorithm 35 | void KMP(const string &text, const string &pattern) { 36 | int text_length = text.length(); 37 | int pattern_length = pattern.length(); 38 | vector lps = computeLPSArray(pattern); // Preprocess the pattern to create the LPS array 39 | 40 | int i = 0; // Index for text 41 | int j = 0; // Index for pattern 42 | 43 | while (i < text_length) { 44 | if (pattern[j] == text[i]) { 45 | i++; 46 | j++; 47 | } 48 | 49 | if (j == pattern_length) { 50 | cout << "Pattern found at index " << i - j << endl; 51 | j = lps[j - 1]; // Reset j using lps 52 | } else if (i < text_length && pattern[j] != text[i]) { 53 | // Mismatch after j matches 54 | if (j != 0) { 55 | j = lps[j - 1]; // Use the previous lps value 56 | } else { 57 | i++; 58 | } 59 | } 60 | } 61 | } 62 | 63 | // Main function to test the KMP algorithm 64 | int main() { 65 | string text = "ABABDABACDABABCABAB"; 66 | string pattern = "ABABCABAB"; 67 | 68 | KMP(text, pattern); 69 | 70 | return 0; 71 | } 72 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/05_kruskal_algorithm/cpp_kruskal_algorithm.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | struct Edge { 8 | int u, v, weight; 9 | }; 10 | 11 | // Comparator function to sort edges by weight 12 | bool compareEdges(const Edge &a, const Edge &b) { 13 | return a.weight < b.weight; 14 | } 15 | 16 | class DisjointSet { 17 | private: 18 | vector parent, rank; 19 | 20 | public: 21 | DisjointSet(int n) { 22 | parent.resize(n); 23 | rank.resize(n, 0); 24 | for (int i = 0; i < n; i++) 25 | parent[i] = i; 26 | } 27 | 28 | int find(int i) { 29 | if (parent[i] != i) 30 | parent[i] = find(parent[i]); // Path compression 31 | return parent[i]; 32 | } 33 | 34 | void unionSets(int x, int y) { 35 | int rootX = find(x); 36 | int rootY = find(y); 37 | 38 | if (rootX != rootY) { 39 | if (rank[rootX] < rank[rootY]) { 40 | parent[rootX] = rootY; 41 | } else if (rank[rootX] > rank[rootY]) { 42 | parent[rootY] = rootX; 43 | } else { 44 | parent[rootY] = rootX; 45 | rank[rootX]++; 46 | } 47 | } 48 | } 49 | }; 50 | 51 | void kruskal(int vertices, vector &edges) { 52 | // Sort edges by weight 53 | sort(edges.begin(), edges.end(), compareEdges); 54 | 55 | DisjointSet ds(vertices); 56 | cout << "Edges in the Minimum Spanning Tree:\n"; 57 | int mst_weight = 0; 58 | 59 | for (const auto &edge : edges) { 60 | if (ds.find(edge.u) != ds.find(edge.v)) { 61 | cout << edge.u << " -- " << edge.v << " == " << edge.weight << endl; 62 | ds.unionSets(edge.u, edge.v); 63 | mst_weight += edge.weight; 64 | } 65 | } 66 | } 67 | 68 | int main() { 69 | int vertices, num_edges; 70 | 71 | cout << "Enter the number of vertices: "; 72 | cin >> vertices; 73 | 74 | cout << "Enter the number of edges: "; 75 | cin >> num_edges; 76 | 77 | vector edges(num_edges); 78 | 79 | cout << "Enter the edges (u v weight):\n"; 80 | for (int i = 0; i < num_edges; ++i) { 81 | cin >> edges[i].u >> edges[i].v >> edges[i].weight; 82 | } 83 | 84 | kruskal(vertices, edges); 85 | 86 | return 0; 87 | } 88 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/rust_BST.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | struct Node { 3 | key: i32, 4 | left: Option>, 5 | right: Option>, 6 | } 7 | 8 | #[derive(Debug)] 9 | struct BST { 10 | root: Option>, 11 | } 12 | 13 | impl BST { 14 | fn new() -> Self { 15 | BST { root: None } 16 | } 17 | 18 | fn insert(&mut self, key: i32) { 19 | self.root = Self::insert_node(self.root.take(), key); 20 | } 21 | 22 | fn insert_node(node: Option>, key: i32) -> Option> { 23 | if let Some(mut n) = node { 24 | if key < n.key { 25 | n.left = Self::insert_node(n.left, key); 26 | } else if key > n.key { 27 | n.right = Self::insert_node(n.right, key); 28 | } 29 | Some(n) 30 | } else { 31 | Some(Box::new(Node { 32 | key, 33 | left: None, 34 | right: None, 35 | })) 36 | } 37 | } 38 | 39 | fn inorder(&self) -> Vec { 40 | let mut result = Vec::new(); 41 | Self::inorder_helper(&self.root, &mut result); 42 | result 43 | } 44 | 45 | fn inorder_helper(node: &Option>, result: &mut Vec) { 46 | if let Some(n) = node { 47 | Self::inorder_helper(&n.left, result); 48 | result.push(n.key); 49 | Self::inorder_helper(&n.right, result); 50 | } 51 | } 52 | 53 | fn search(&self, key: i32) -> bool { 54 | Self::search_node(&self.root, key) 55 | } 56 | 57 | fn search_node(node: &Option>, key: i32) -> bool { 58 | match node { 59 | Some(n) if n.key == key => true, 60 | Some(n) if key < n.key => Self::search_node(&n.left, key), 61 | Some(n) => Self::search_node(&n.right, key), 62 | None => false, 63 | } 64 | } 65 | } 66 | 67 | // Example usage of BST 68 | fn main() { 69 | let mut bst = BST::new(); 70 | bst.insert(50); 71 | bst.insert(30); 72 | bst.insert(20); 73 | bst.insert(40); 74 | bst.insert(70); 75 | bst.insert(60); 76 | bst.insert(80); 77 | 78 | println!("In-order traversal of the BST: {:?}", bst.inorder()); 79 | 80 | let key = 40; 81 | if bst.search(key) { 82 | println!("{} found in the BST.", key); 83 | } else { 84 | println!("{} not found in the BST.", key); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/02_Fractional_knapsack_problem/cpp_fractional_knapsack.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | // Structure for items 8 | struct Item { 9 | int value, weight; 10 | 11 | // Constructor 12 | Item(int v, int w) : value(v), weight(w) {} 13 | }; 14 | 15 | // Comparison function to sort items by value-to-weight ratio 16 | bool compare(Item a, Item b) { 17 | double r1 = (double)a.value / a.weight; 18 | double r2 = (double)b.value / b.weight; 19 | return r1 > r2; 20 | } 21 | 22 | // Function to calculate the maximum value in the knapsack 23 | double fractionalKnapsack(int W, vector& items) { 24 | // Sort items by value-to-weight ratio 25 | sort(items.begin(), items.end(), compare); 26 | 27 | double maxValue = 0.0; // Maximum value in knapsack 28 | int currentWeight = 0; // Current weight in knapsack 29 | 30 | // Loop through the sorted items 31 | for (auto& item : items) { 32 | if (currentWeight + item.weight <= W) { 33 | // If the item can fit in the knapsack, take it all 34 | currentWeight += item.weight; 35 | maxValue += item.value; 36 | } else { 37 | // If the item can't fit, take the fractional part 38 | int remainingWeight = W - currentWeight; 39 | maxValue += item.value * ((double)remainingWeight / item.weight); 40 | break; 41 | } 42 | } 43 | 44 | return maxValue; 45 | } 46 | 47 | int main() { 48 | int W = 50; // Maximum weight capacity of knapsack 49 | vector items = { {60, 10}, {100, 20}, {120, 30} }; 50 | 51 | cout << "Maximum value in Knapsack = " << fractionalKnapsack(W, items) << endl; 52 | return 0; 53 | } 54 | 55 | 56 | /* 57 | Explanation :- 58 | Item Struct: Represents each item, with fields for value and weight. 59 | 60 | compare Function: A custom comparison function to sort items by their value-to-weight ratio in descending order. 61 | 62 | fractionalKnapsack Function: 63 | Sorts the items by value-to-weight ratio. 64 | Adds items to the knapsack as long as they fit entirely within the weight capacity. 65 | If an item doesn’t completely fit, it adds a fractional part of the item proportional to the remaining weight capacity. 66 | 67 | Main Function: Initializes the knapsack's weight limit and items, calls the fractionalKnapsack function, and outputs the result. 68 | 69 | 70 | */ -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/04_Dijkstra_Algorithm/cpp_dijkstra_algorithm.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | #define V 9 // Number of vertices in the graph 8 | 9 | // Function to find the vertex with the minimum distance value 10 | int minDistance(const vector& dist, const vector& sptSet) { 11 | int min = INT_MAX, minIndex; 12 | 13 | for (int v = 0; v < V; v++) { 14 | if (!sptSet[v] && dist[v] <= min) { 15 | min = dist[v]; 16 | minIndex = v; 17 | } 18 | } 19 | return minIndex; 20 | } 21 | 22 | // Function to implement Dijkstra's algorithm 23 | void dijkstra(const vector>& graph, int src) { 24 | vector dist(V, INT_MAX); // Output array to hold the shortest distance from source 25 | vector sptSet(V, false); // Shortest Path Tree set 26 | 27 | // Distance from the source to itself is always 0 28 | dist[src] = 0; 29 | 30 | // Find shortest path for all vertices 31 | for (int count = 0; count < V - 1; count++) { 32 | // Pick the minimum distance vertex from the set of vertices 33 | int u = minDistance(dist, sptSet); 34 | 35 | // Mark the picked vertex as processed 36 | sptSet[u] = true; 37 | 38 | // Update dist value of the adjacent vertices of the picked vertex 39 | for (int v = 0; v < V; v++) { 40 | // Update dist[v] if and only if it is not in sptSet, there is an edge from u to v, 41 | // and the total weight of the path from src to v through u is smaller than the current value of dist[v] 42 | if (!sptSet[v] && graph[u][v] && dist[u] != INT_MAX && dist[u] + graph[u][v] < dist[v]) { 43 | dist[v] = dist[u] + graph[u][v]; 44 | } 45 | } 46 | } 47 | 48 | // Print the constructed distance array 49 | cout << "Vertex\tDistance from Source\n"; 50 | for (int i = 0; i < V; i++) { 51 | cout << i << "\t\t" << dist[i] << endl; 52 | } 53 | } 54 | 55 | int main() { 56 | // Adjacency matrix representation of the graph 57 | vector> graph = { 58 | {0, 4, 0, 0, 0, 0, 0, 8, 0}, 59 | {4, 0, 8, 0, 0, 0, 0, 0, 10}, 60 | {0, 8, 0, 7, 0, 4, 0, 0, 2}, 61 | {0, 0, 7, 0, 9, 14, 0, 0, 0}, 62 | {0, 0, 0, 9, 0, 10, 0, 0, 0}, 63 | {0, 0, 4, 14, 10, 0, 2, 0, 0}, 64 | {0, 0, 0, 0, 0, 2, 0, 1, 6}, 65 | {8, 0, 0, 0, 0, 0, 1, 0, 7}, 66 | {0, 10, 2, 0, 0, 0, 6, 7, 0} 67 | }; 68 | 69 | int source = 0; // Starting node 70 | dijkstra(graph, source); 71 | 72 | return 0; 73 | } 74 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/01_Huffman_Coding/cpp_huffman_coding.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | using namespace std; 6 | 7 | // Node of Huffman Tree 8 | struct Node { 9 | char ch; 10 | int freq; 11 | Node* left; 12 | Node* right; 13 | 14 | Node(char c, int f) : ch(c), freq(f), left(nullptr), right(nullptr) {} 15 | }; 16 | 17 | // Compare class for priority queue to sort nodes by frequency 18 | struct Compare { 19 | bool operator()(Node* a, Node* b) { 20 | return a->freq > b->freq; 21 | } 22 | }; 23 | 24 | // Function to generate Huffman codes by traversing the Huffman Tree 25 | void generateCodes(Node* root, string code, unordered_map& huffmanCodes) { 26 | if (!root) return; 27 | 28 | // If it's a leaf node, store the character and its code 29 | if (!root->left && !root->right) { 30 | huffmanCodes[root->ch] = code; 31 | } 32 | 33 | // Traverse the left and right children 34 | generateCodes(root->left, code + "0", huffmanCodes); 35 | generateCodes(root->right, code + "1", huffmanCodes); 36 | } 37 | 38 | // Function to build the Huffman Tree and return the root 39 | Node* buildHuffmanTree(const vector& characters, const vector& frequencies) { 40 | priority_queue, Compare> minHeap; 41 | 42 | // Create a leaf node for each character and add it to the min heap 43 | for (size_t i = 0; i < characters.size(); ++i) { 44 | minHeap.push(new Node(characters[i], frequencies[i])); 45 | } 46 | 47 | // Build the Huffman Tree 48 | while (minHeap.size() > 1) { 49 | // Remove the two nodes with the lowest frequency 50 | Node* left = minHeap.top(); minHeap.pop(); 51 | Node* right = minHeap.top(); minHeap.pop(); 52 | 53 | // Create a new internal node with combined frequency 54 | Node* merged = new Node('\0', left->freq + right->freq); 55 | merged->left = left; 56 | merged->right = right; 57 | 58 | // Add the new node back to the heap 59 | minHeap.push(merged); 60 | } 61 | 62 | // The remaining node is the root of the Huffman Tree 63 | return minHeap.top(); 64 | } 65 | 66 | int main() { 67 | vector characters = {'a', 'b', 'c', 'd', 'e', 'f'}; 68 | vector frequencies = {5, 9, 12, 13, 16, 45}; 69 | 70 | // Build Huffman Tree 71 | Node* root = buildHuffmanTree(characters, frequencies); 72 | 73 | // Generate Huffman Codes 74 | unordered_map huffmanCodes; 75 | generateCodes(root, "", huffmanCodes); 76 | 77 | // Print Huffman Codes 78 | for (auto& pair : huffmanCodes) { 79 | cout << pair.first << ": " << pair.second << endl; 80 | } 81 | 82 | return 0; 83 | } 84 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/04_Dijkstra_Algorithm/rust_dijkstra_algorithm.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::collections::BinaryHeap; 3 | use std::usize; 4 | 5 | const V: usize = 9; // Number of vertices in the graph 6 | 7 | #[derive(Debug, Eq, PartialEq)] 8 | struct Node { 9 | vertex: usize, 10 | distance: usize, 11 | } 12 | 13 | // Implementing Ord trait for Node to make it usable in BinaryHeap 14 | impl Ord for Node { 15 | fn cmp(&self, other: &Self) -> Ordering { 16 | other.distance.cmp(&self.distance) // Reverse order for min-heap 17 | } 18 | } 19 | 20 | // Implementing PartialOrd trait for Node 21 | impl PartialOrd for Node { 22 | fn partial_cmp(&self, other: &Self) -> Option { 23 | Some(self.cmp(other)) 24 | } 25 | } 26 | 27 | // Function to implement Dijkstra's algorithm 28 | fn dijkstra(graph: &Vec>, src: usize) { 29 | let mut dist = vec![usize::MAX; V]; // Initialize distances to infinity 30 | let mut spt_set = vec![false; V]; // Shortest Path Tree set 31 | 32 | dist[src] = 0; // Distance from source to itself is always 0 33 | 34 | let mut heap = BinaryHeap::new(); 35 | heap.push(Node { 36 | vertex: src, 37 | distance: 0, 38 | }); 39 | 40 | while let Some(Node { vertex: u, distance: d }) = heap.pop() { 41 | // If this distance is not the shortest recorded distance, skip 42 | if d > dist[u] { 43 | continue; 44 | } 45 | 46 | spt_set[u] = true; // Mark the picked vertex as processed 47 | 48 | for v in 0..V { 49 | if graph[u][v] != 0 && !spt_set[v] { // Check if there is an edge 50 | let new_distance = dist[u].saturating_add(graph[u][v]); 51 | 52 | // Only consider this new path if it's better 53 | if new_distance < dist[v] { 54 | dist[v] = new_distance; 55 | heap.push(Node { 56 | vertex: v, 57 | distance: new_distance, 58 | }); 59 | } 60 | } 61 | } 62 | } 63 | 64 | // Print the constructed distance array 65 | println!("Vertex\tDistance from Source"); 66 | for i in 0..V { 67 | println!("{}\t\t{}", i, dist[i]); 68 | } 69 | } 70 | 71 | fn main() { 72 | // Adjacency matrix representation of the graph 73 | let graph: Vec> = vec![ 74 | vec![0, 4, 0, 0, 0, 0, 0, 8, 0], 75 | vec![4, 0, 8, 0, 0, 0, 0, 0, 10], 76 | vec![0, 8, 0, 7, 0, 4, 0, 0, 2], 77 | vec![0, 0, 7, 0, 9, 14, 0, 0, 0], 78 | vec![0, 0, 0, 9, 0, 10, 0, 0, 0], 79 | vec![0, 0, 4, 14, 10, 0, 2, 0, 0], 80 | vec![0, 0, 0, 0, 0, 2, 0, 1, 6], 81 | vec![8, 0, 0, 0, 0, 0, 1, 0, 7], 82 | vec![0, 10, 2, 0, 0, 0, 6, 7, 0], 83 | ]; 84 | 85 | let source = 0; // Starting node 86 | dijkstra(&graph, source); 87 | } 88 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/01_Huffman_Coding/python_huffman_coding.py: -------------------------------------------------------------------------------- 1 | import heapq 2 | 3 | # Node class for the Huffman Tree 4 | class Node: 5 | def __init__(self, char, freq): 6 | self.char = char 7 | self.freq = freq 8 | self.left = None 9 | self.right = None 10 | 11 | # Overriding __lt__ to compare nodes based on frequency 12 | def __lt__(self, other): 13 | return self.freq < other.freq 14 | 15 | # Function to print the Huffman codes by traversing the Huffman Tree 16 | def print_codes(root, code=""): 17 | if root is None: 18 | return 19 | 20 | # If it's a leaf node, print the character and its code 21 | if root.char is not None: 22 | print(f"{root.char}: {code}") 23 | 24 | # Traverse the left and right children 25 | print_codes(root.left, code + "0") 26 | print_codes(root.right, code + "1") 27 | 28 | # Function to build the Huffman Tree and print codes 29 | def huffman_codes(characters, frequencies): 30 | # Create a priority queue (min-heap) and insert all characters with frequencies 31 | heap = [Node(characters[i], frequencies[i]) for i in range(len(characters))] 32 | heapq.heapify(heap) 33 | 34 | # Iterate until only one node remains in the heap (root of Huffman Tree) 35 | while len(heap) > 1: 36 | # Remove the two nodes with the lowest frequency 37 | left = heapq.heappop(heap) 38 | right = heapq.heappop(heap) 39 | 40 | # Create a new internal node with these two nodes as children and their sum as the frequency 41 | merged = Node(None, left.freq + right.freq) 42 | merged.left = left 43 | merged.right = right 44 | 45 | # Insert the new node back into the heap 46 | heapq.heappush(heap, merged) 47 | 48 | # The remaining node is the root of the Huffman Tree 49 | root = heapq.heappop(heap) 50 | 51 | # Print Huffman codes by traversing the tree 52 | print_codes(root) 53 | 54 | # Test data 55 | characters = ['a', 'b', 'c', 'd', 'e', 'f'] 56 | frequencies = [5, 9, 12, 13, 16, 45] 57 | 58 | # Generate Huffman Codes 59 | huffman_codes(characters, frequencies) 60 | 61 | 62 | 63 | ''' 64 | Explanation 65 | Node Class: The Node class represents a node in the Huffman Tree. It contains the character, frequency, and pointers to left and right children. 66 | 67 | print_codes Function: This function recursively traverses the Huffman Tree to print the codes for each character. 68 | 69 | huffman_codes Function: This function builds the Huffman Tree by: 70 | Creating nodes for each character and frequency. 71 | Using a min-heap (priority queue) to always merge the two nodes with the lowest frequency. 72 | Merging nodes until only the root node remains. 73 | 74 | Example Output: 75 | When you run the code with the provided test data, it should output something like this (note: the exact codes may vary based on frequency and tree structure): 76 | 77 | f: 0 78 | c: 100 79 | d: 101 80 | a: 1100 81 | b: 1101 82 | e: 111 83 | 84 | Each character is mapped to a binary code based on its position in the Huffman Tree. Characters with higher frequencies tend to have shorter codes. 85 | ''' -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/05_kruskal_algorithm/rust_kruskal_algorithm.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::collections::BinaryHeap; 3 | 4 | #[derive(Debug, Clone)] 5 | struct Edge { 6 | u: usize, 7 | v: usize, 8 | weight: usize, 9 | } 10 | 11 | // Implement Ord and PartialOrd for Edge to allow sorting by weight 12 | impl Ord for Edge { 13 | fn cmp(&self, other: &Self) -> Ordering { 14 | other.weight.cmp(&self.weight) // Reverse order for max heap 15 | } 16 | } 17 | 18 | impl PartialOrd for Edge { 19 | fn partial_cmp(&self, other: &Self) -> Option { 20 | Some(self.cmp(other)) 21 | } 22 | } 23 | 24 | impl PartialEq for Edge { 25 | fn eq(&self, other: &Self) -> bool { 26 | self.weight == other.weight 27 | } 28 | } 29 | 30 | impl Eq for Edge {} 31 | 32 | fn find(parent: &mut Vec, i: usize) -> usize { 33 | if parent[i] != i { 34 | parent[i] = find(parent, parent[i]); // Path compression 35 | } 36 | parent[i] 37 | } 38 | 39 | fn union(parent: &mut Vec, rank: &mut Vec, x: usize, y: usize) { 40 | let root_x = find(parent, x); 41 | let root_y = find(parent, y); 42 | 43 | if root_x != root_y { 44 | if rank[root_x] < rank[root_y] { 45 | parent[root_x] = root_y; 46 | } else if rank[root_x] > rank[root_y] { 47 | parent[root_y] = root_x; 48 | } else { 49 | parent[root_y] = root_x; 50 | rank[root_x] += 1; 51 | } 52 | } 53 | } 54 | 55 | fn kruskal(vertices: usize, edges: Vec) { 56 | let mut parent: Vec = (0..vertices).collect(); 57 | let mut rank: Vec = vec![0; vertices]; 58 | 59 | let mut sorted_edges = edges.clone(); 60 | sorted_edges.sort_unstable(); 61 | 62 | println!("Edges in the Minimum Spanning Tree:"); 63 | 64 | for edge in sorted_edges { 65 | if find(&mut parent, edge.u) != find(&mut parent, edge.v) { 66 | println!("{} -- {} == {}", edge.u, edge.v, edge.weight); 67 | union(&mut parent, &mut rank, edge.u, edge.v); 68 | } 69 | } 70 | } 71 | 72 | fn main() { 73 | let mut vertices = String::new(); 74 | let mut edges_input = String::new(); 75 | 76 | println!("Enter the number of vertices: "); 77 | std::io::stdin().read_line(&mut vertices).unwrap(); 78 | let vertices: usize = vertices.trim().parse().unwrap(); 79 | 80 | println!("Enter the number of edges: "); 81 | std::io::stdin().read_line(&mut edges_input).unwrap(); 82 | let num_edges: usize = edges_input.trim().parse().unwrap(); 83 | 84 | let mut edges: Vec = Vec::new(); 85 | 86 | println!("Enter the edges (u v weight):"); 87 | for _ in 0..num_edges { 88 | let mut edge_input = String::new(); 89 | std::io::stdin().read_line(&mut edge_input).unwrap(); 90 | let parts: Vec = edge_input.trim().split_whitespace() 91 | .map(|s| s.parse().unwrap()).collect(); 92 | edges.push(Edge { 93 | u: parts[0], 94 | v: parts[1], 95 | weight: parts[2], 96 | }); 97 | } 98 | 99 | kruskal(vertices, edges); 100 | } 101 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/06_Prim_algorithm/cpp_prim_algorithm.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | // Structure to represent an edge 10 | struct Edge { 11 | int vertex; 12 | int weight; 13 | }; 14 | 15 | // Class to represent the graph 16 | class Graph { 17 | private: 18 | int numVertices; // Number of vertices 19 | vector> adjList; // Adjacency list 20 | 21 | public: 22 | Graph(int vertices); 23 | void addEdge(int src, int dest, int weight); 24 | void primMST(); 25 | }; 26 | 27 | // Constructor to initialize the graph 28 | Graph::Graph(int vertices) { 29 | numVertices = vertices; 30 | adjList.resize(vertices); 31 | } 32 | 33 | // Function to add an edge to the graph 34 | void Graph::addEdge(int src, int dest, int weight) { 35 | adjList[src].push_back({dest, weight}); 36 | adjList[dest].push_back({src, weight}); // Since the graph is undirected 37 | } 38 | 39 | // Function to find the vertex with the minimum key value 40 | int minKey(const vector& key, const vector& mstSet) { 41 | int minValue = numeric_limits::max(); 42 | int minIndex = -1; 43 | 44 | for (int v = 0; v < key.size(); v++) { 45 | if (!mstSet[v] && key[v] < minValue) { 46 | minValue = key[v]; 47 | minIndex = v; 48 | } 49 | } 50 | 51 | return minIndex; 52 | } 53 | 54 | // Function to implement Prim's algorithm 55 | void Graph::primMST() { 56 | vector parent(numVertices, -1); // Array to store constructed MST 57 | vector key(numVertices, numeric_limits::max()); // Key values to pick the minimum weight edge 58 | vector mstSet(numVertices, false); // To represent the set of vertices included in MST 59 | 60 | key[0] = 0; // Start from the first vertex 61 | 62 | for (int count = 0; count < numVertices - 1; count++) { 63 | // Pick the minimum key vertex from the set of vertices not yet included in MST 64 | int u = minKey(key, mstSet); 65 | mstSet[u] = true; // Add the picked vertex to the MST Set 66 | 67 | // Update key value and parent index of the adjacent vertices 68 | for (const Edge& edge : adjList[u]) { 69 | int v = edge.vertex; 70 | int weight = edge.weight; 71 | 72 | // Update key only if weight is smaller than current key[v] 73 | if (!mstSet[v] && weight < key[v]) { 74 | parent[v] = u; 75 | key[v] = weight; 76 | } 77 | } 78 | } 79 | 80 | // Print the constructed MST 81 | cout << "Edge \tWeight\n"; 82 | for (int i = 1; i < numVertices; i++) { 83 | cout << parent[i] << " -- " << i << "\t" << key[i] << endl; 84 | } 85 | } 86 | 87 | int main() { 88 | // Create a graph given in the example 89 | Graph graph(5); 90 | graph.addEdge(0, 1, 2); 91 | graph.addEdge(0, 3, 6); 92 | graph.addEdge(1, 2, 3); 93 | graph.addEdge(1, 3, 8); 94 | graph.addEdge(1, 4, 5); 95 | graph.addEdge(2, 4, 7); 96 | graph.addEdge(3, 4, 9); 97 | 98 | // Find the Minimum Spanning Tree 99 | graph.primMST(); 100 | 101 | return 0; 102 | } 103 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/python_AVL.py: -------------------------------------------------------------------------------- 1 | class AVLNode: 2 | def __init__(self, key): 3 | self.key = key 4 | self.left = None 5 | self.right = None 6 | self.height = 1 # New node is initially added at leaf 7 | 8 | class AVLTree: 9 | def insert(self, root, key): 10 | # Perform normal BST insert 11 | if root is None: 12 | return AVLNode(key) 13 | elif key < root.key: 14 | root.left = self.insert(root.left, key) 15 | else: 16 | root.right = self.insert(root.right, key) 17 | 18 | # Update height of this ancestor node 19 | root.height = 1 + max(self.getHeight(root.left), self.getHeight(root.right)) 20 | 21 | # Get the balance factor of this ancestor node to check whether this node became unbalanced 22 | balance = self.getBalance(root) 23 | 24 | # If this node becomes unbalanced, then there are 4 cases 25 | 26 | # Left Left Case 27 | if balance > 1 and key < root.left.key: 28 | return self.rightRotate(root) 29 | 30 | # Right Right Case 31 | if balance < -1 and key > root.right.key: 32 | return self.leftRotate(root) 33 | 34 | # Left Right Case 35 | if balance > 1 and key > root.left.key: 36 | root.left = self.leftRotate(root.left) 37 | return self.rightRotate(root) 38 | 39 | # Right Left Case 40 | if balance < -1 and key < root.right.key: 41 | root.right = self.rightRotate(root.right) 42 | return self.leftRotate(root) 43 | 44 | # Return the (unchanged) node pointer 45 | return root 46 | 47 | def leftRotate(self, z): 48 | y = z.right 49 | T2 = y.left 50 | 51 | # Perform rotation 52 | y.left = z 53 | z.right = T2 54 | 55 | # Update heights 56 | z.height = 1 + max(self.getHeight(z.left), self.getHeight(z.right)) 57 | y.height = 1 + max(self.getHeight(y.left), self.getHeight(y.right)) 58 | 59 | # Return the new root 60 | return y 61 | 62 | def rightRotate(self, z): 63 | y = z.left 64 | T3 = y.right 65 | 66 | # Perform rotation 67 | y.right = z 68 | z.left = T3 69 | 70 | # Update heights 71 | z.height = 1 + max(self.getHeight(z.left), self.getHeight(z.right)) 72 | y.height = 1 + max(self.getHeight(y.left), self.getHeight(y.right)) 73 | 74 | # Return the new root 75 | return y 76 | 77 | def getHeight(self, node): 78 | if not node: 79 | return 0 80 | return node.height 81 | 82 | def getBalance(self, node): 83 | if not node: 84 | return 0 85 | return self.getHeight(node.left) - self.getHeight(node.right) 86 | 87 | def inorder(self, root): 88 | return self._inorder(root) 89 | 90 | def _inorder(self, node): 91 | return self._inorder(node.left) + [node.key] + self._inorder(node.right) if node else [] 92 | 93 | # Example usage of AVL Tree 94 | avl_tree = AVLTree() 95 | root = None 96 | keys = [30, 20, 40, 10, 5, 3, 35] 97 | 98 | for key in keys: 99 | root = avl_tree.insert(root, key) 100 | 101 | print("In-order traversal of the AVL tree:", avl_tree.inorder(root)) 102 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/cpp_AVL.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | using namespace std; 3 | 4 | struct AVLNode { 5 | int key; 6 | AVLNode* left; 7 | AVLNode* right; 8 | int height; 9 | 10 | AVLNode(int value) : key(value), left(nullptr), right(nullptr), height(1) {} 11 | }; 12 | 13 | class AVLTree { 14 | public: 15 | AVLNode* root; 16 | 17 | AVLTree() : root(nullptr) {} 18 | 19 | void insert(int key) { 20 | root = insertNode(root, key); 21 | } 22 | 23 | AVLNode* insertNode(AVLNode* node, int key) { 24 | if (!node) return new AVLNode(key); 25 | if (key < node->key) { 26 | node->left = insertNode(node->left, key); 27 | } else { 28 | node->right = insertNode(node->right, key); 29 | } 30 | 31 | node->height = 1 + max(getHeight(node->left), getHeight(node->right)); 32 | 33 | int balance = getBalance(node); 34 | 35 | // Left Left Case 36 | if (balance > 1 && key < node->left->key) { 37 | return rightRotate(node); 38 | } 39 | 40 | // Right Right Case 41 | if (balance < -1 && key > node->right->key) { 42 | return leftRotate(node); 43 | } 44 | 45 | // Left Right Case 46 | if (balance > 1 && key > node->left->key) { 47 | node->left = leftRotate(node->left); 48 | return rightRotate(node); 49 | } 50 | 51 | // Right Left Case 52 | if (balance < -1 && key < node->right->key) { 53 | node->right = rightRotate(node->right); 54 | return leftRotate(node); 55 | } 56 | 57 | return node; 58 | } 59 | 60 | AVLNode* leftRotate(AVLNode* node) { 61 | AVLNode* y = node->right; 62 | AVLNode* T2 = y->left; 63 | 64 | y->left = node; 65 | node->right = T2; 66 | 67 | node->height = 1 + max(getHeight(node->left), getHeight(node->right)); 68 | y->height = 1 + max(getHeight(y->left), getHeight(y->right)); 69 | 70 | return y; 71 | } 72 | 73 | AVLNode* rightRotate(AVLNode* node) { 74 | AVLNode* y = node->left; 75 | AVLNode* T3 = y->right; 76 | 77 | y->right = node; 78 | node->left = T3; 79 | 80 | node->height = 1 + max(getHeight(node->left), getHeight(node->right)); 81 | y->height = 1 + max(getHeight(y->left), getHeight(y->right)); 82 | 83 | return y; 84 | } 85 | 86 | int getHeight(AVLNode* node) { 87 | return node ? node->height : 0; 88 | } 89 | 90 | int getBalance(AVLNode* node) { 91 | return node ? getHeight(node->left) - getHeight(node->right) : 0; 92 | } 93 | 94 | void inorder() { 95 | inorderHelper(root); 96 | cout << endl; 97 | } 98 | 99 | void inorderHelper(AVLNode* node) { 100 | if (node) { 101 | inorderHelper(node->left); 102 | cout << node->key << " "; 103 | inorderHelper(node->right); 104 | } 105 | } 106 | }; 107 | 108 | // Example usage of AVL Tree 109 | int main() { 110 | AVLTree avlTree; 111 | int keys[] = {30, 20, 40, 10, 5, 3, 35}; 112 | 113 | for (int key : keys) { 114 | avlTree.insert(key); 115 | } 116 | 117 | cout << "In-order traversal of the AVL tree: "; 118 | avlTree.inorder(); 119 | 120 | return 0; 121 | } 122 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # 🌟 Analysis and Design of Algorithms (ADA) - Multi-Language Implementations 4 | 5 | ![GitHub repo size](https://img.shields.io/github/repo-size/maxprogrammer007/ADA-Sem3-CSVTU) 6 | 7 | Welcome to the **Analysis and Design of Algorithms (ADA)** repository! This repository showcases the implementation of various algorithms covered in the ADA course in four powerful programming languages: **Rust, C++, C, and Python**. 8 | 9 | ## 📚 Course Overview 10 | 11 | The syllabus of the ADA course includes: 12 | 13 | - **Introduction to Algorithms** 14 | - **Analysis of Sorting Algorithms** 15 | - **Greedy Methods** 16 | - **Dynamic Programming** 17 | - **Other Relevant Topics** 18 | 19 | ## 🚀 Why This Repository? 20 | 21 | This repository is aimed at: 22 | 23 | - **Multi-Language Exposure**: Learning and comparing algorithm implementations in different languages. 24 | - **Deep Understanding**: Reinforcing your understanding of core algorithmic concepts. 25 | - **Code Reusability**: Providing ready-to-use algorithm templates in multiple languages. 26 | 27 | ## 🗂️ Algorithms To Be Covered 28 | 29 | 30 | 31 | ### Unit II: Analysis of Sorting Algorithms 32 | - Insertion Sort 33 | - Merge Sort 34 | - Quick Sort 35 | - Heap Sort 36 | - Counting Sort 37 | - Radix Sort 38 | 39 | ### Unit III: Greedy Methods 40 | - Huffman Coding 41 | - Fractional Knapsack Problem 42 | - Optimal Merge Pattern 43 | - Dijkstra's Algorithm 44 | - Kruskal's Algorithm 45 | - Prim's Algorithm 46 | 47 | ### Unit IV: Dynamic Programming 48 | - Matrix Chain Multiplication 49 | - Longest Common Subsequence 50 | - 0/1 Knapsack Problem 51 | - Floyd-Warshall Algorithm 52 | 53 | ### Unit V: Other Relevant Topics 54 | - Binary Search Tree 55 | - String Matching 56 | - Approximation Algorithms 57 | 58 | ## 🔧 How to Use 59 | 60 | 1. **Clone the Repository**: 61 | ```bash 62 | git clone https://github.com/yourusername/ADA-Sem3-CSVTU.git 63 | ``` 64 | 65 | 2. **Navigate to the Algorithm**: 66 | - Go to the folder corresponding to the language and algorithm you're interested in. 67 | 68 | 3. **Run the Code**: 69 | - Follow the instructions in the README of each language-specific folder to compile and run the code. 70 | 71 | ## 📊 Algorithm Implementations 72 | 73 | ### Insertion Sort Example 74 | - **Rust**: [View Code](https://github.com/maxprogrammer007/ADA-Sem3-CSVTU/blob/main/Unit_2/Insertion_Sort/Rust_Insertion_Sort.rs) 75 | - **C++**: [View Code](https://github.com/maxprogrammer007/ADA-Sem3-CSVTU/blob/main/Unit_2/Insertion_Sort/cpp_Insertion_Sort.cpp) 76 | - **C**: [View Code](https://github.com/maxprogrammer007/ADA-Sem3-CSVTU/blob/main/Unit_2/Insertion_Sort/c_Insertion_sort.c) 77 | - **Python**: [View Code](https://github.com/maxprogrammer007/ADA-Sem3-CSVTU/blob/main/Unit_2/Insertion_Sort/python_insertion_sort.py) 78 | 79 | _Similar structure will be followed for all other algorithms._ 80 | 81 | ## 🎯 Goals of the Repository 82 | 83 | - **Educational Resource**: To help students and developers understand and implement algorithms in different programming languages. 84 | - **Community Contribution**: Open to contributions and improvements from the community. 85 | 86 | ## 🤝 Contributing 87 | 88 | Contributions are welcome! If you have any improvements or new algorithm implementations to add, please feel free to make a pull request. 89 | 90 | 91 | 92 | ## 👤 Author 93 | 94 | **Abhinav SHukla** 95 | - [GitHub Profile](https://github.com/maxprogrammer007) 96 | - [LinkedIn](https://linkedin.com/in/maxprogrammer007) 97 | 98 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/06_Prim_algorithm/c_prim_algorithm.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define V 5 // Number of vertices in the graph 6 | 7 | // Function to find the vertex with the minimum key value 8 | int minKey(int key[], bool mstSet[]) { 9 | int min = INT_MAX, min_index; 10 | 11 | for (int v = 0; v < V; v++) { 12 | if (mstSet[v] == false && key[v] < min) { 13 | min = key[v]; 14 | min_index = v; 15 | } 16 | } 17 | 18 | return min_index; 19 | } 20 | 21 | // Function to implement Prim's algorithm 22 | void primMST(int graph[V][V]) { 23 | int parent[V]; // Array to store constructed MST 24 | int key[V]; // Key values used to pick the minimum weight edge 25 | bool mstSet[V]; // To represent the set of vertices included in MST 26 | 27 | // Initialize all keys as infinite and mstSet as false 28 | for (int i = 0; i < V; i++) { 29 | key[i] = INT_MAX; 30 | mstSet[i] = false; 31 | } 32 | 33 | // Always include the first vertex in MST 34 | key[0] = 0; // Make key 0 so that this vertex is picked as the first vertex 35 | parent[0] = -1; // First node is always the root of the MST 36 | 37 | // The MST will have V vertices 38 | for (int count = 0; count < V - 1; count++) { 39 | // Pick the minimum key vertex from the set of vertices not yet included in MST 40 | int u = minKey(key, mstSet); 41 | 42 | // Add the picked vertex to the MST Set 43 | mstSet[u] = true; 44 | 45 | // Update key value and parent index of the adjacent vertices 46 | for (int v = 0; v < V; v++) { 47 | // Update key only if graph[u][v] is smaller than key[v] 48 | if (graph[u][v] && mstSet[v] == false && graph[u][v] < key[v]) { 49 | parent[v] = u; 50 | key[v] = graph[u][v]; 51 | } 52 | } 53 | } 54 | 55 | // Print the constructed MST 56 | printf("Edge \tWeight\n"); 57 | for (int i = 1; i < V; i++) { 58 | printf("%d -- %d \t%d\n", parent[i], i, graph[i][parent[i]]); 59 | } 60 | } 61 | 62 | int main() { 63 | // Adjacency matrix representation of the graph 64 | int graph[V][V] = { 65 | {0, 2, 0, 6, 0}, 66 | {2, 0, 3, 8, 5}, 67 | {0, 3, 0, 0, 7}, 68 | {6, 8, 0, 0, 9}, 69 | {0, 5, 7, 9, 0} 70 | }; 71 | 72 | primMST(graph); 73 | 74 | return 0; 75 | } 76 | 77 | /* 78 | 79 | Explanation 80 | Graph Representation: The graph is represented using an adjacency matrix graph[V][V], where V is the number of vertices. 81 | 82 | Key and MST Set: 83 | 84 | key[] array holds the minimum weight edge for each vertex. 85 | mstSet[] array keeps track of vertices included in the MST. 86 | minKey Function: This function finds the vertex with the minimum key value that hasn’t been included in the MST yet. 87 | 88 | Prim's Algorithm Implementation: 89 | 90 | The first vertex is initialized with a key value of 0 to start the MST. 91 | The algorithm iteratively picks the minimum key vertex, adds it to the MST, and updates the key values for its adjacent vertices. 92 | Output: The edges included in the MST along with their weights are printed. 93 | 94 | Example Output 95 | When you run this program, the output will display the edges included in the Minimum Spanning Tree along with their weights: 96 | 97 | Edge Weight 98 | 0 -- 1 2 99 | 1 -- 2 3 100 | 0 -- 3 6 101 | 1 -- 4 5 102 | 103 | 104 | */ -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/06_Prim_algorithm/rust_prim_algorithm.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BinaryHeap; 2 | use std::cmp::Ordering; 3 | 4 | #[derive(Debug)] 5 | struct Edge { 6 | vertex: usize, 7 | weight: usize, 8 | } 9 | 10 | // Implementing Ord trait for Edge to use it in the priority queue 11 | impl Ord for Edge { 12 | fn cmp(&self, other: &Self) -> Ordering { 13 | other.weight.cmp(&self.weight) // Reverse ordering for min-heap 14 | } 15 | } 16 | 17 | // Implementing PartialOrd trait for Edge 18 | impl PartialOrd for Edge { 19 | fn partial_cmp(&self, other: &Self) -> Option { 20 | Some(self.cmp(other)) 21 | } 22 | } 23 | 24 | // Implementing PartialEq trait for Edge 25 | impl PartialEq for Edge { 26 | fn eq(&self, other: &Self) -> bool { 27 | self.weight == other.weight 28 | } 29 | } 30 | 31 | // Implementing Eq trait for Edge 32 | impl Eq for Edge {} 33 | 34 | struct Graph { 35 | num_vertices: usize, 36 | adj_list: Vec>, 37 | } 38 | 39 | impl Graph { 40 | // Create a new graph 41 | fn new(vertices: usize) -> Self { 42 | Graph { 43 | num_vertices: vertices, 44 | adj_list: vec![Vec::new(); vertices], 45 | } 46 | } 47 | 48 | // Add an edge to the graph 49 | fn add_edge(&mut self, src: usize, dest: usize, weight: usize) { 50 | self.adj_list[src].push(Edge { vertex: dest, weight }); 51 | self.adj_list[dest].push(Edge { vertex: src, weight }); // Undirected graph 52 | } 53 | 54 | // Function to implement Prim's algorithm 55 | fn prim_mst(&self) { 56 | let mut key = vec![usize::MAX; self.num_vertices]; // Key values to pick minimum weight edge 57 | let mut parent = vec![None; self.num_vertices]; // Array to store constructed MST 58 | let mut mst_set = vec![false; self.num_vertices]; // Track vertices included in MST 59 | let mut min_heap = BinaryHeap::new(); // Min-heap to pick the minimum weight edge 60 | 61 | key[0] = 0; // Start from the first vertex 62 | min_heap.push(Edge { vertex: 0, weight: 0 }); 63 | 64 | while let Some(Edge { vertex: u, weight: _ }) = min_heap.pop() { 65 | if mst_set[u] { 66 | continue; // Skip if already in MST 67 | } 68 | mst_set[u] = true; // Add vertex to MST 69 | 70 | for edge in &self.adj_list[u] { 71 | let v = edge.vertex; 72 | let weight = edge.weight; 73 | 74 | // Update key and parent if weight is smaller 75 | if !mst_set[v] && weight < key[v] { 76 | key[v] = weight; 77 | parent[v] = Some(u); 78 | min_heap.push(Edge { vertex: v, weight }); 79 | } 80 | } 81 | } 82 | 83 | // Print the constructed MST 84 | println!("Edge \tWeight"); 85 | for i in 1..self.num_vertices { 86 | if let Some(p) = parent[i] { 87 | println!("{} -- {} \t{}", p, i, key[i]); 88 | } 89 | } 90 | } 91 | } 92 | 93 | fn main() { 94 | // Create a graph given in the example 95 | let mut graph = Graph::new(5); 96 | graph.add_edge(0, 1, 2); 97 | graph.add_edge(0, 3, 6); 98 | graph.add_edge(1, 2, 3); 99 | graph.add_edge(1, 3, 8); 100 | graph.add_edge(1, 4, 5); 101 | graph.add_edge(2, 4, 7); 102 | graph.add_edge(3, 4, 9); 103 | 104 | // Find the Minimum Spanning Tree 105 | graph.prim_mst(); 106 | } 107 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/01_Huffman_Coding/rust_huffman_coding.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{BinaryHeap, HashMap}; 2 | use std::cmp::Ordering; 3 | use std::rc::Rc; 4 | use std::cell::RefCell; 5 | 6 | // Define the Huffman Tree Node 7 | #[derive(Debug, Clone)] 8 | struct Node { 9 | character: Option, 10 | frequency: u32, 11 | left: Option>>, 12 | right: Option>>, 13 | } 14 | 15 | // Implement PartialEq and Eq for Node to make it usable in BinaryHeap 16 | impl PartialEq for Node { 17 | fn eq(&self, other: &Self) -> bool { 18 | self.frequency == other.frequency 19 | } 20 | } 21 | 22 | impl Eq for Node {} 23 | 24 | // Implement Ord and PartialOrd for Node to allow min-heap behavior in BinaryHeap 25 | impl Ord for Node { 26 | fn cmp(&self, other: &Self) -> Ordering { 27 | other.frequency.cmp(&self.frequency) 28 | } 29 | } 30 | 31 | impl PartialOrd for Node { 32 | fn partial_cmp(&self, other: &Self) -> Option { 33 | Some(self.cmp(other)) 34 | } 35 | } 36 | 37 | // Function to build the Huffman Tree 38 | fn build_huffman_tree(characters: &[char], frequencies: &[u32]) -> Rc> { 39 | let mut heap = BinaryHeap::new(); 40 | 41 | // Insert all characters into the heap as individual nodes 42 | for i in 0..characters.len() { 43 | let node = Node { 44 | character: Some(characters[i]), 45 | frequency: frequencies[i], 46 | left: None, 47 | right: None, 48 | }; 49 | heap.push(Rc::new(RefCell::new(node))); 50 | } 51 | 52 | // Build the Huffman Tree 53 | while heap.len() > 1 { 54 | // Extract two nodes with the lowest frequency 55 | let left = heap.pop().unwrap(); 56 | let right = heap.pop().unwrap(); 57 | 58 | // Create a new internal node with the combined frequency 59 | let merged = Node { 60 | character: None, 61 | frequency: left.borrow().frequency + right.borrow().frequency, 62 | left: Some(left), 63 | right: Some(right), 64 | }; 65 | 66 | // Insert the new node back into the heap 67 | heap.push(Rc::new(RefCell::new(merged))); 68 | } 69 | 70 | // The remaining node is the root of the Huffman Tree 71 | heap.pop().unwrap() 72 | } 73 | 74 | // Function to generate Huffman Codes by traversing the Huffman Tree 75 | fn generate_codes(node: &Option>>, code: String, codes: &mut HashMap) { 76 | if let Some(n) = node { 77 | let node_ref = n.borrow(); 78 | 79 | // If it’s a leaf node, store the character and its code 80 | if let Some(character) = node_ref.character { 81 | codes.insert(character, code); 82 | } else { 83 | // Traverse left with code "0" 84 | generate_codes(&node_ref.left, format!("{}0", code), codes); 85 | // Traverse right with code "1" 86 | generate_codes(&node_ref.right, format!("{}1", code), codes); 87 | } 88 | } 89 | } 90 | 91 | fn main() { 92 | let characters = vec!['a', 'b', 'c', 'd', 'e', 'f']; 93 | let frequencies = vec![5, 9, 12, 13, 16, 45]; 94 | 95 | // Build the Huffman Tree 96 | let root = build_huffman_tree(&characters, &frequencies); 97 | 98 | // Generate the Huffman Codes 99 | let mut codes = HashMap::new(); 100 | generate_codes(&Some(root), String::new(), &mut codes); 101 | 102 | // Print the Huffman Codes 103 | for (character, code) in &codes { 104 | println!("{}: {}", character, code); 105 | } 106 | } 107 | 108 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/04_Dijkstra_Algorithm/c_Dijkstra_Algorithm.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define V 9 // Number of vertices in the graph 6 | 7 | // Function to find the vertex with the minimum distance value 8 | int min_distance(int dist[], int spt_set[]) { 9 | int min = INT_MAX, min_index; 10 | 11 | for (int v = 0; v < V; v++) { 12 | if (spt_set[v] == 0 && dist[v] <= min) { 13 | min = dist[v]; 14 | min_index = v; 15 | } 16 | } 17 | return min_index; 18 | } 19 | 20 | // Function to implement Dijkstra's algorithm 21 | void dijkstra(int graph[V][V], int src) { 22 | int dist[V]; // Output array to hold the shortest distance from source 23 | int spt_set[V]; // Shortest Path Tree set 24 | 25 | // Initialize all distances as INFINITE and spt_set[] as false 26 | for (int i = 0; i < V; i++) { 27 | dist[i] = INT_MAX; 28 | spt_set[i] = 0; 29 | } 30 | 31 | // Distance from the source to itself is always 0 32 | dist[src] = 0; 33 | 34 | // Find shortest path for all vertices 35 | for (int count = 0; count < V - 1; count++) { 36 | // Pick the minimum distance vertex from the set of vertices 37 | int u = min_distance(dist, spt_set); 38 | 39 | // Mark the picked vertex as processed 40 | spt_set[u] = 1; 41 | 42 | // Update dist value of the adjacent vertices of the picked vertex 43 | for (int v = 0; v < V; v++) { 44 | // Update dist[v] if and only if it is not in spt_set, there is an edge from u to v, 45 | // and the total weight of the path from src to v through u is smaller than the current value of dist[v] 46 | if (!spt_set[v] && graph[u][v] && dist[u] != INT_MAX && dist[u] + graph[u][v] < dist[v]) { 47 | dist[v] = dist[u] + graph[u][v]; 48 | } 49 | } 50 | } 51 | 52 | // Print the constructed distance array 53 | printf("Vertex\tDistance from Source\n"); 54 | for (int i = 0; i < V; i++) { 55 | printf("%d\t\t%d\n", i, dist[i]); 56 | } 57 | } 58 | 59 | int main() { 60 | // Adjacency matrix representation of the graph 61 | int graph[V][V] = { 62 | {0, 4, 0, 0, 0, 0, 0, 8, 0}, 63 | {4, 0, 8, 0, 0, 0, 0, 0, 10}, 64 | {0, 8, 0, 7, 0, 4, 0, 0, 2}, 65 | {0, 0, 7, 0, 9, 14, 0, 0, 0}, 66 | {0, 0, 0, 9, 0, 10, 0, 0, 0}, 67 | {0, 0, 4, 14, 10, 0, 2, 0, 0}, 68 | {0, 0, 0, 0, 0, 2, 0, 1, 6}, 69 | {8, 0, 0, 0, 0, 0, 1, 0, 7}, 70 | {0, 10, 2, 0, 0, 0, 6, 7, 0} 71 | }; 72 | 73 | int source = 0; // Starting node 74 | dijkstra(graph, source); 75 | 76 | return 0; 77 | } 78 | 79 | 80 | /* 81 | 82 | Explanation 83 | Graph Representation: The graph is represented as an adjacency matrix. Each cell graph[i][j] holds the weight of the edge between vertex i and vertex j. A value of 0 indicates no edge exists. 84 | 85 | min_distance Function: This function finds the vertex with the minimum distance value that has not been processed yet. It iterates through all vertices and returns the index of the vertex with the smallest distance. 86 | 87 | dijkstra Function: 88 | 89 | Initializes the distance array (dist) to INT_MAX (infinity) and the shortest path tree set (spt_set) to 0 (false). 90 | Sets the distance from the source to itself as 0. 91 | For each vertex, it picks the minimum distance vertex, marks it as processed, and updates the distance values of its adjacent vertices. 92 | 93 | Main Function: Defines the graph using an adjacency matrix and calls the dijkstra function to compute and print the shortest distances from the source vertex to all other vertices. 94 | 95 | When you run this code, it will print the shortest distance from the source vertex (0) to all other vertices: 96 | 97 | Vertex Distance from Source 98 | 0 0 99 | 1 4 100 | 2 12 101 | 3 19 102 | 4 21 103 | 5 11 104 | 6 9 105 | 7 8 106 | 8 14 107 | 108 | */ -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/C_AVL.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | // Node structure for AVL Tree 5 | struct AVLNode { 6 | int key; 7 | struct AVLNode *left; 8 | struct AVLNode *right; 9 | int height; 10 | }; 11 | 12 | // Function to create a new AVL node 13 | struct AVLNode* createAVLNode(int key) { 14 | struct AVLNode* newNode = (struct AVLNode*)malloc(sizeof(struct AVLNode)); 15 | newNode->key = key; 16 | newNode->left = newNode->right = NULL; 17 | newNode->height = 1; // New node is initially added at leaf 18 | return newNode; 19 | } 20 | 21 | // Function to get the height of the tree 22 | int height(struct AVLNode* N) { 23 | if (N == NULL) return 0; 24 | return N->height; 25 | } 26 | 27 | // Function to get the balance factor of the node 28 | int getBalance(struct AVLNode* N) { 29 | if (N == NULL) return 0; 30 | return height(N->left) - height(N->right); 31 | } 32 | 33 | // Right rotate subtree rooted with y 34 | struct AVLNode* rightRotate(struct AVLNode* y) { 35 | struct AVLNode* x = y->left; 36 | struct AVLNode* T2 = x->right; 37 | 38 | // Perform rotation 39 | x->right = y; 40 | y->left = T2; 41 | 42 | // Update heights 43 | y->height = 1 + (height(y->left) > height(y->right) ? height(y->left) : height(y->right)); 44 | x->height = 1 + (height(x->left) > height(x->right) ? height(x->left) : height(x->right)); 45 | 46 | // Return new root 47 | return x; 48 | } 49 | 50 | // Left rotate subtree rooted with x 51 | struct AVLNode* leftRotate(struct AVLNode* x) { 52 | struct AVLNode* y = x->right; 53 | struct AVLNode* T2 = y->left; 54 | 55 | // Perform rotation 56 | y->left = x; 57 | x->right = T2; 58 | 59 | // Update heights 60 | x->height = 1 + (height(x->left) > height(x->right) ? height(x->left) : height(x->right)); 61 | y->height = 1 + (height(y->left) > height(y->right) ? height(y->left) : height(y->right)); 62 | 63 | // Return new root 64 | return y; 65 | } 66 | 67 | // Function to insert a key in the AVL tree 68 | struct AVLNode* insertAVL(struct AVLNode* node, int key) { 69 | // Normal BST insertion 70 | if (node == NULL) return createAVLNode(key); 71 | if (key < node->key) { 72 | node->left = insertAVL(node->left, key); 73 | } else if (key > node->key) { 74 | node->right = insertAVL(node->right, key); 75 | } else { // Duplicate keys are not allowed 76 | return node; 77 | } 78 | 79 | // Update height of this ancestor node 80 | node->height = 1 + (height(node->left) > height(node->right) ? height(node->left) : height(node->right)); 81 | 82 | // Get the balance factor of this ancestor node to check whether this node became unbalanced 83 | int balance = getBalance(node); 84 | 85 | // If this node becomes unbalanced, then there are 4 cases 86 | 87 | // Left Left Case 88 | if (balance > 1 && key < node->left->key) { 89 | return rightRotate(node); 90 | } 91 | 92 | // Right Right Case 93 | if (balance < -1 && key > node->right->key) { 94 | return leftRotate(node); 95 | } 96 | 97 | // Left Right Case 98 | if (balance > 1 && key > node->left->key) { 99 | node->left = leftRotate(node->left); 100 | return rightRotate(node); 101 | } 102 | 103 | // Right Left Case 104 | if (balance < -1 && key < node->right->key) { 105 | node->right = rightRotate(node->right); 106 | return leftRotate(node); 107 | } 108 | 109 | // Return the (unchanged) node pointer 110 | return node; 111 | } 112 | 113 | // Function to perform in-order traversal of AVL tree 114 | void inorderAVL(struct AVLNode* root) { 115 | if (root != NULL) { 116 | inorderAVL(root->left); 117 | printf("%d ", root->key); 118 | inorderAVL(root->right); 119 | } 120 | } 121 | 122 | // Main function to demonstrate AVL Tree operations 123 | int main() { 124 | struct AVLNode* root = NULL; 125 | root = insertAVL(root, 30); 126 | root = insertAVL(root, 20); 127 | root = insertAVL(root, 40); 128 | root = insertAVL(root, 10); 129 | root = insertAVL(root, 5); 130 | root = insertAVL(root, 3); 131 | root = insertAVL(root, 35); 132 | 133 | printf("In-order traversal of the AVL tree: "); 134 | inorderAVL(root); 135 | printf("\n"); 136 | 137 | return 0; 138 | } 139 | -------------------------------------------------------------------------------- /Unit_5/Binary_Search_Tree/rust_AVL.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | struct AVLNode { 3 | key: i32, 4 | left: Option>, 5 | right: Option>, 6 | height: i32, 7 | } 8 | 9 | #[derive(Debug)] 10 | struct AVLTree { 11 | root: Option>, 12 | } 13 | 14 | impl AVLTree { 15 | fn new() -> Self { 16 | AVLTree { root: None } 17 | } 18 | 19 | fn insert(&mut self, key: i32) { 20 | self.root = Self::insert_node(self.root.take(), key); 21 | } 22 | 23 | fn insert_node(node: Option>, key: i32) -> Option> { 24 | let mut node = match node { 25 | Some(n) => n, 26 | None => { 27 | return Some(Box::new(AVLNode { 28 | key, 29 | left: None, 30 | right: None, 31 | height: 1, 32 | })) 33 | } 34 | }; 35 | 36 | if key < node.key { 37 | node.left = Self::insert_node(node.left, key); 38 | } else { 39 | node.right = Self::insert_node(node.right, key); 40 | } 41 | 42 | node.height = 1 + Self::max(Self::get_height(&node.left), Self::get_height(&node.right)); 43 | 44 | let balance = Self::get_balance(&node); 45 | 46 | // Left Left Case 47 | if balance > 1 && key < node.left.as_ref().unwrap().key { 48 | return Self::right_rotate(node); 49 | } 50 | 51 | // Right Right Case 52 | if balance < -1 && key > node.right.as_ref().unwrap().key { 53 | return Self::left_rotate(node); 54 | } 55 | 56 | // Left Right Case 57 | if balance > 1 && key > node.left.as_ref().unwrap().key { 58 | node.left = Some(Self::left_rotate(node.left.take().unwrap())); 59 | return Self::right_rotate(node); 60 | } 61 | 62 | // Right Left Case 63 | if balance < -1 && key < node.right.as_ref().unwrap().key { 64 | node.right = Some(Self::right_rotate(node.right.take().unwrap())); 65 | return Self::left_rotate(node); 66 | } 67 | 68 | Some(Box::new(node)) 69 | } 70 | 71 | fn left_rotate(node: Box) -> Option> { 72 | let mut y = node.right.unwrap(); 73 | let T2 = y.left.take(); 74 | 75 | y.left = Some(node); 76 | y.left.as_mut().unwrap().right = T2; 77 | 78 | y.left.as_mut().unwrap().height = 1 + Self::max( 79 | Self::get_height(&y.left.as_ref().unwrap().left), 80 | Self::get_height(&y.left.as_ref().unwrap().right), 81 | ); 82 | y.height = 1 + Self::max(Self::get_height(&y.left), Self::get_height(&y.right)); 83 | 84 | Some(y) 85 | } 86 | 87 | fn right_rotate(node: Box) -> Option> { 88 | let mut y = node.left.unwrap(); 89 | let T3 = y.right.take(); 90 | 91 | y.right = Some(node); 92 | y.right.as_mut().unwrap().left = T3; 93 | 94 | y.right.as_mut().unwrap().height = 1 + Self::max( 95 | Self::get_height(&y.right.as_ref().unwrap().left), 96 | Self::get_height(&y.right.as_ref().unwrap().right), 97 | ); 98 | y.height = 1 + Self::max(Self::get_height(&y.left), Self::get_height(&y.right)); 99 | 100 | Some(y) 101 | } 102 | 103 | fn get_height(node: &Option>) -> i32 { 104 | node.as_ref().map_or(0, |n| n.height) 105 | } 106 | 107 | fn get_balance(node: &AVLNode) -> i32 { 108 | Self::get_height(&node.left) - Self::get_height(&node.right) 109 | } 110 | 111 | fn max(a: i32, b: i32) -> i32 { 112 | if a > b { a } else { b } 113 | } 114 | 115 | fn inorder(&self) -> Vec { 116 | let mut result = Vec::new(); 117 | Self::inorder_helper(&self.root, &mut result); 118 | result 119 | } 120 | 121 | fn inorder_helper(node: &Option>, result: &mut Vec) { 122 | if let Some(n) = node { 123 | Self::inorder_helper(&n.left, result); 124 | result.push(n.key); 125 | Self::inorder_helper(&n.right, result); 126 | } 127 | } 128 | } 129 | 130 | // Example usage of AVL Tree 131 | fn main() { 132 | let mut avl_tree = AVLTree::new(); 133 | let keys = vec![30, 20, 40, 10, 5, 3, 35]; 134 | 135 | for &key in &keys { 136 | avl_tree.insert(key); 137 | } 138 | 139 | println!("In-order traversal of the AVL tree: {:?}", avl_tree.inorder()); 140 | } 141 | -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/05_kruskal_algorithm/c_kruskal_algorithm.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #define MAX 100 5 | #define INF 999999 6 | 7 | // Structure to represent an edge in the graph 8 | typedef struct { 9 | int u, v, weight; 10 | } Edge; 11 | 12 | // Structure to represent a subset for union-find 13 | typedef struct { 14 | int parent; 15 | int rank; 16 | } Subset; 17 | 18 | // Function to compare two edges (used for sorting) 19 | int compare(const void *a, const void *b) { 20 | Edge *edge1 = (Edge *)a; 21 | Edge *edge2 = (Edge *)b; 22 | return edge1->weight - edge2->weight; 23 | } 24 | 25 | // Function to find the subset of an element 26 | int find(Subset subsets[], int i) { 27 | if (subsets[i].parent != i) { 28 | subsets[i].parent = find(subsets, subsets[i].parent); 29 | } 30 | return subsets[i].parent; 31 | } 32 | 33 | // Function to perform union of two subsets 34 | void union_sets(Subset subsets[], int x, int y) { 35 | int rootX = find(subsets, x); 36 | int rootY = find(subsets, y); 37 | 38 | if (rootX != rootY) { 39 | if (subsets[rootX].rank < subsets[rootY].rank) { 40 | subsets[rootX].parent = rootY; 41 | } else if (subsets[rootX].rank > subsets[rootY].rank) { 42 | subsets[rootY].parent = rootX; 43 | } else { 44 | subsets[rootY].parent = rootX; 45 | subsets[rootX].rank++; 46 | } 47 | } 48 | } 49 | 50 | // Function to implement Kruskal's Algorithm 51 | void kruskal(Edge edges[], int numEdges, int numVertices) { 52 | // Sort edges based on their weight 53 | qsort(edges, numEdges, sizeof(edges[0]), compare); 54 | 55 | // Create subsets for union-find 56 | Subset *subsets = (Subset *)malloc(numVertices * sizeof(Subset)); 57 | for (int v = 0; v < numVertices; ++v) { 58 | subsets[v].parent = v; 59 | subsets[v].rank = 0; 60 | } 61 | 62 | printf("Edges in the Minimum Spanning Tree:\n"); 63 | 64 | for (int i = 0; i < numEdges; ++i) { 65 | Edge currentEdge = edges[i]; 66 | int setU = find(subsets, currentEdge.u); 67 | int setV = find(subsets, currentEdge.v); 68 | 69 | // If including this edge does not cause a cycle 70 | if (setU != setV) { 71 | printf("%d -- %d == %d\n", currentEdge.u, currentEdge.v, currentEdge.weight); 72 | union_sets(subsets, setU, setV); 73 | } 74 | } 75 | 76 | free(subsets); 77 | } 78 | 79 | int main() { 80 | int numVertices, numEdges; 81 | 82 | printf("Enter the number of vertices: "); 83 | scanf("%d", &numVertices); 84 | printf("Enter the number of edges: "); 85 | scanf("%d", &numEdges); 86 | 87 | Edge *edges = (Edge *)malloc(numEdges * sizeof(Edge)); 88 | 89 | printf("Enter the edges (u v weight):\n"); 90 | for (int i = 0; i < numEdges; ++i) { 91 | scanf("%d %d %d", &edges[i].u, &edges[i].v, &edges[i].weight); 92 | } 93 | 94 | kruskal(edges, numEdges, numVertices); 95 | 96 | free(edges); 97 | return 0; 98 | } 99 | 100 | /* 101 | 102 | Explanation 103 | Graph Representation: The graph is represented as a list of edges, where each edge is represented by its two endpoints (u, v) and its weight. 104 | 105 | Edge Structure: An Edge structure is defined to hold the endpoints and the weight of each edge. 106 | 107 | Subset Structure: A Subset structure is used for the union-find algorithm, containing a parent and a rank for each vertex. 108 | 109 | Comparison Function: The compare function is used to sort the edges based on their weights using qsort. 110 | 111 | Find Function: The find function implements the path compression technique to find the root of the subset containing a particular vertex. 112 | 113 | Union Function: The union_sets function merges two subsets, ensuring that the smaller tree is added under the larger tree to keep the structure balanced. 114 | 115 | Kruskal's Algorithm: 116 | 117 | The edges are sorted by weight. 118 | For each edge, it checks whether including that edge would form a cycle using the union-find data structure. 119 | If it does not form a cycle, the edge is included in the MST. 120 | Main Function: The user inputs the number of vertices and edges, followed by the edges themselves, and the kruskal function is called to compute the MST. 121 | 122 | When you run this program, it prompts for the number of vertices and edges, as well as the edges themselves: 123 | 124 | Enter the number of vertices: 4 125 | Enter the number of edges: 5 126 | Enter the edges (u v weight): 127 | 0 1 10 128 | 0 2 6 129 | 0 3 5 130 | 1 3 15 131 | 2 3 4 132 | 133 | 134 | The output will display the edges included in the Minimum Spanning Tree: 135 | 136 | Edges in the Minimum Spanning Tree: 137 | 2 -- 3 == 4 138 | 0 -- 3 == 5 139 | 0 -- 1 == 10 140 | 141 | 142 | */ -------------------------------------------------------------------------------- /Unit_3_Greedy_Methods/01_Huffman_Coding/c_huffman_coding.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | // Huffman Tree node 5 | struct MinHeapNode { 6 | char data; 7 | unsigned freq; 8 | struct MinHeapNode *left, *right; 9 | }; 10 | 11 | // Min Heap structure 12 | struct MinHeap { 13 | unsigned size; 14 | unsigned capacity; 15 | struct MinHeapNode **array; 16 | }; 17 | 18 | // Create a new node with given character and frequency 19 | struct MinHeapNode* createNode(char data, unsigned freq) { 20 | struct MinHeapNode* temp = (struct MinHeapNode*)malloc(sizeof(struct MinHeapNode)); 21 | temp->left = temp->right = NULL; 22 | temp->data = data; 23 | temp->freq = freq; 24 | return temp; 25 | } 26 | 27 | // Create a min heap of given capacity 28 | struct MinHeap* createMinHeap(unsigned capacity) { 29 | struct MinHeap* minHeap = (struct MinHeap*)malloc(sizeof(struct MinHeap)); 30 | minHeap->size = 0; 31 | minHeap->capacity = capacity; 32 | minHeap->array = (struct MinHeapNode**)malloc(minHeap->capacity * sizeof(struct MinHeapNode*)); 33 | return minHeap; 34 | } 35 | 36 | // Swap two nodes 37 | void swapMinHeapNode(struct MinHeapNode** a, struct MinHeapNode** b) { 38 | struct MinHeapNode* t = *a; 39 | *a = *b; 40 | *b = t; 41 | } 42 | 43 | // Heapify at a given index 44 | void minHeapify(struct MinHeap* minHeap, int idx) { 45 | int smallest = idx; 46 | int left = 2 * idx + 1; 47 | int right = 2 * idx + 2; 48 | 49 | if (left < minHeap->size && minHeap->array[left]->freq < minHeap->array[smallest]->freq) 50 | smallest = left; 51 | 52 | if (right < minHeap->size && minHeap->array[right]->freq < minHeap->array[smallest]->freq) 53 | smallest = right; 54 | 55 | if (smallest != idx) { 56 | swapMinHeapNode(&minHeap->array[smallest], &minHeap->array[idx]); 57 | minHeapify(minHeap, smallest); 58 | } 59 | } 60 | 61 | // Check if size is 1 62 | int isSizeOne(struct MinHeap* minHeap) { 63 | return (minHeap->size == 1); 64 | } 65 | 66 | // Extract minimum value node 67 | struct MinHeapNode* extractMin(struct MinHeap* minHeap) { 68 | struct MinHeapNode* temp = minHeap->array[0]; 69 | minHeap->array[0] = minHeap->array[minHeap->size - 1]; 70 | --minHeap->size; 71 | minHeapify(minHeap, 0); 72 | return temp; 73 | } 74 | 75 | // Insert a new node to min heap 76 | void insertMinHeap(struct MinHeap* minHeap, struct MinHeapNode* minHeapNode) { 77 | ++minHeap->size; 78 | int i = minHeap->size - 1; 79 | 80 | while (i && minHeapNode->freq < minHeap->array[(i - 1) / 2]->freq) { 81 | minHeap->array[i] = minHeap->array[(i - 1) / 2]; 82 | i = (i - 1) / 2; 83 | } 84 | minHeap->array[i] = minHeapNode; 85 | } 86 | 87 | // Build a min heap 88 | void buildMinHeap(struct MinHeap* minHeap) { 89 | int n = minHeap->size - 1; 90 | for (int i = (n - 1) / 2; i >= 0; --i) 91 | minHeapify(minHeap, i); 92 | } 93 | 94 | // Check if node is leaf 95 | int isLeaf(struct MinHeapNode* root) { 96 | return !(root->left) && !(root->right); 97 | } 98 | 99 | // Create min heap and insert all characters 100 | struct MinHeap* createAndBuildMinHeap(char data[], int freq[], int size) { 101 | struct MinHeap* minHeap = createMinHeap(size); 102 | 103 | for (int i = 0; i < size; ++i) 104 | minHeap->array[i] = createNode(data[i], freq[i]); 105 | minHeap->size = size; 106 | buildMinHeap(minHeap); 107 | return minHeap; 108 | } 109 | 110 | // Build Huffman Tree 111 | struct MinHeapNode* buildHuffmanTree(char data[], int freq[], int size) { 112 | struct MinHeapNode *left, *right, *top; 113 | struct MinHeap* minHeap = createAndBuildMinHeap(data, freq, size); 114 | 115 | while (!isSizeOne(minHeap)) { 116 | left = extractMin(minHeap); 117 | right = extractMin(minHeap); 118 | top = createNode('$', left->freq + right->freq); 119 | top->left = left; 120 | top->right = right; 121 | insertMinHeap(minHeap, top); 122 | } 123 | return extractMin(minHeap); 124 | } 125 | 126 | // Print Huffman Codes 127 | void printCodes(struct MinHeapNode* root, int arr[], int top) { 128 | if (root->left) { 129 | arr[top] = 0; 130 | printCodes(root->left, arr, top + 1); 131 | } 132 | if (root->right) { 133 | arr[top] = 1; 134 | printCodes(root->right, arr, top + 1); 135 | } 136 | if (isLeaf(root)) { 137 | printf("%c: ", root->data); 138 | for (int i = 0; i < top; ++i) 139 | printf("%d", arr[i]); 140 | printf("\n"); 141 | } 142 | } 143 | 144 | // Main function to build Huffman Tree and print codes 145 | void HuffmanCodes(char data[], int freq[], int size) { 146 | struct MinHeapNode* root = buildHuffmanTree(data, freq, size); 147 | int arr[100], top = 0; 148 | printCodes(root, arr, top); 149 | } 150 | 151 | int main() { 152 | char arr[] = {'a', 'b', 'c', 'd', 'e', 'f'}; 153 | int freq[] = {5, 9, 12, 13, 16, 45}; 154 | int size = sizeof(arr) / sizeof(arr[0]); 155 | 156 | HuffmanCodes(arr, freq, size); 157 | return 0; 158 | } 159 | 160 | 161 | /* 162 | Explanation: 163 | 164 | MinHeapNode and MinHeap structures are used to create the nodes of the Huffman Tree and the min heap, respectively. 165 | 166 | createNode creates a new node with character data and frequency. 167 | 168 | createAndBuildMinHeap creates a min heap and initializes it with characters and frequencies. 169 | 170 | buildHuffmanTree constructs the Huffman Tree by repeatedly combining the two lowest-frequency nodes until only one node remains, which becomes the root of the tree. 171 | printCodes traverses the Huffman Tree and prints the codes for each character. 172 | 173 | HuffmanCodes is the main function that builds the Huffman Tree and prints the codes. 174 | 175 | Output: 176 | When run, this program will output the Huffman codes for the characters provided in arr based on the frequencies in freq. 177 | 178 | */ -------------------------------------------------------------------------------- /Lab_Files/lab_stuffs.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Analysis and Design of Algorithms - Lab Component\n", 8 | "\n", 9 | "**Course Instructor:** Dr. JP Patra \n", 10 | "**Lab Guide:** Mrs. Thaneshwari Sahu \n", 11 | "\n", 12 | "Welcome to the lab component of the **Analysis and Design of Algorithms** course. This notebook covers a range of fundamental algorithms, each implemented in Python to demonstrate essential concepts in algorithm design, efficiency, and problem-solving techniques.\n", 13 | "\n", 14 | "The exercises in this lab aim to deepen understanding of:\n", 15 | "- **Sorting Algorithms:** Techniques such as insertion sort, merge sort, and bubble sort, focusing on their time complexities and applications.\n", 16 | "- **Divide and Conquer Approaches:** Including quicksort, maximum subarray problems, and matrix multiplication, illustrating how complex problems can be broken down into simpler subproblems.\n", 17 | "- **Dynamic Programming and Recursion:** Implementing algorithms for tasks like Fibonacci sequence generation and finding minimums in arrays and matrices.\n", 18 | "- **Data Structures in Search and Optimization:** Using binary search trees, binary search algorithms, and heap structures for efficient data handling.\n", 19 | "- **Graph Algorithms:** Dijkstra’s and Prim’s algorithms to solve shortest path and minimum spanning tree problems.\n", 20 | "\n", 21 | "Each lab exercise is organized to include a clear explanation of the problem, the Python implementation, and sample inputs/outputs to illustrate the algorithm in action. This structured approach will not only reinforce understanding but also help you develop a systematic approach to designing and analyzing algorithms.\n", 22 | "\n", 23 | "Let's dive into these exercises and explore the world of algorithms through code!\n" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "# Lab 1: Sorting Algorithms\n", 31 | "\n", 32 | "In this lab, we will implement three fundamental sorting algorithms: **Insertion Sort**, **Merge Sort**, and **Bubble Sort**. Each of these algorithms has unique characteristics and performance profiles, making them suitable for different scenarios. This exercise will help us understand the mechanics and efficiency of each algorithm through hands-on coding.\n", 33 | "\n", 34 | "### Problem Statement\n", 35 | "\n", 36 | "Implement the following sorting algorithms:\n", 37 | "\n", 38 | "1. **Insertion Sort** \n", 39 | " A simple, intuitive sorting algorithm that builds the final sorted array one item at a time. It’s efficient for small data sets or partially sorted arrays.\n", 40 | "\n", 41 | "2. **Merge Sort** \n", 42 | " A classic example of the divide-and-conquer approach, merge sort divides the array into halves, recursively sorts each half, and then merges the sorted halves. Known for its consistent performance of \\(O(n \\log n)\\).\n", 43 | "\n", 44 | "3. **Bubble Sort** \n", 45 | " An elementary sorting algorithm where elements \"bubble\" to their correct positions through repeated swapping. Though not the most efficient for large arrays, it’s a good starting point for understanding sorting logic.\n", 46 | "\n", 47 | "### Objectives\n", 48 | "- Implement each sorting algorithm in Python.\n", 49 | "- Analyze the time and space complexities of each algorithm.\n", 50 | "- Compare their performance on small and large data sets to observe efficiency.\n", 51 | "\n", 52 | "Let's dive in and start coding these algorithms to see them in action!\n" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 1, 58 | "metadata": {}, 59 | "outputs": [ 60 | { 61 | "name": "stdout", 62 | "output_type": "stream", 63 | "text": [ 64 | "Original array: [12, 11, 13, 5, 6]\n", 65 | "Sorted array: [5, 6, 11, 12, 13]\n" 66 | ] 67 | } 68 | ], 69 | "source": [ 70 | "def insertion_sort(arr):\n", 71 | " # Traverse through 1 to len(arr)\n", 72 | " for i in range(1, len(arr)):\n", 73 | " key = arr[i]\n", 74 | " \n", 75 | " # Move elements of arr[0..i-1] that are greater than key\n", 76 | " # to one position ahead of their current position\n", 77 | " j = i - 1\n", 78 | " while j >= 0 and key < arr[j]:\n", 79 | " arr[j + 1] = arr[j]\n", 80 | " j -= 1\n", 81 | " arr[j + 1] = key\n", 82 | "\n", 83 | "# Example usage\n", 84 | "arr = [12, 11, 13, 5, 6]\n", 85 | "print(\"Original array:\", arr)\n", 86 | "insertion_sort(arr)\n", 87 | "print(\"Sorted array:\", arr)\n" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 2, 93 | "metadata": {}, 94 | "outputs": [ 95 | { 96 | "name": "stdout", 97 | "output_type": "stream", 98 | "text": [ 99 | "Original array: [12, 11, 13, 5, 6, 7]\n", 100 | "Sorted array: [5, 6, 7, 11, 12, 13]\n" 101 | ] 102 | } 103 | ], 104 | "source": [ 105 | "def merge_sort(arr):\n", 106 | " if len(arr) > 1:\n", 107 | " mid = len(arr) // 2 # Find the middle of the array\n", 108 | " left_half = arr[:mid] # Divide the array elements into two halves\n", 109 | " right_half = arr[mid:]\n", 110 | "\n", 111 | " # Recursively sort each half\n", 112 | " merge_sort(left_half)\n", 113 | " merge_sort(right_half)\n", 114 | "\n", 115 | " # Merging the sorted halves\n", 116 | " i = j = k = 0\n", 117 | "\n", 118 | " # Copy data to temp arrays left_half and right_half\n", 119 | " while i < len(left_half) and j < len(right_half):\n", 120 | " if left_half[i] < right_half[j]:\n", 121 | " arr[k] = left_half[i]\n", 122 | " i += 1\n", 123 | " else:\n", 124 | " arr[k] = right_half[j]\n", 125 | " j += 1\n", 126 | " k += 1\n", 127 | "\n", 128 | " # Checking if any element was left\n", 129 | " while i < len(left_half):\n", 130 | " arr[k] = left_half[i]\n", 131 | " i += 1\n", 132 | " k += 1\n", 133 | "\n", 134 | " while j < len(right_half):\n", 135 | " arr[k] = right_half[j]\n", 136 | " j += 1\n", 137 | " k += 1\n", 138 | "\n", 139 | "# Example usage\n", 140 | "arr = [12, 11, 13, 5, 6, 7]\n", 141 | "print(\"Original array:\", arr)\n", 142 | "merge_sort(arr)\n", 143 | "print(\"Sorted array:\", arr)\n" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 3, 149 | "metadata": {}, 150 | "outputs": [ 151 | { 152 | "name": "stdout", 153 | "output_type": "stream", 154 | "text": [ 155 | "Original array: [64, 34, 25, 12, 22, 11, 90]\n", 156 | "Sorted array: [11, 12, 22, 25, 34, 64, 90]\n" 157 | ] 158 | } 159 | ], 160 | "source": [ 161 | "def bubble_sort(arr):\n", 162 | " n = len(arr)\n", 163 | " # Traverse through all array elements\n", 164 | " for i in range(n):\n", 165 | " swapped = False\n", 166 | " # Last i elements are already sorted\n", 167 | " for j in range(0, n - i - 1):\n", 168 | " # Swap if the element found is greater than the next element\n", 169 | " if arr[j] > arr[j + 1]:\n", 170 | " arr[j], arr[j + 1] = arr[j + 1], arr[j]\n", 171 | " swapped = True\n", 172 | " # If no two elements were swapped by inner loop, then break\n", 173 | " if not swapped:\n", 174 | " break\n", 175 | "\n", 176 | "# Example usage\n", 177 | "arr = [64, 34, 25, 12, 22, 11, 90]\n", 178 | "print(\"Original array:\", arr)\n", 179 | "bubble_sort(arr)\n", 180 | "print(\"Sorted array:\", arr)\n" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "# Lab 2: Maximum Subarray Sum Problem\n", 188 | "\n", 189 | "In this lab, we will implement the **Maximum Subarray Sum Problem**, an important problem in algorithm design often used to illustrate optimization techniques and dynamic programming.\n", 190 | "\n", 191 | "### Problem Statement\n", 192 | "\n", 193 | "Given an array of integers, the goal is to find the contiguous subarray (containing at least one number) that has the largest sum and return that sum.\n", 194 | "\n", 195 | "For example, given the array `[-2, 1, -3, 4, -1, 2, 1, -5, 4]`, the maximum sum of a contiguous subarray is `6`, achieved by the subarray `[4, -1, 2, 1]`.\n", 196 | "\n", 197 | "### Objectives\n", 198 | "- **Implement a solution** to find the maximum subarray sum.\n", 199 | "- **Explore different approaches** to solve this problem, such as:\n", 200 | " - Brute-force approach\n", 201 | " - Kadane’s Algorithm (an efficient solution with \\(O(n)\\) complexity)\n", 202 | "- **Analyze the time and space complexities** of each approach.\n", 203 | "- **Compare the performance** of each method on various input sizes to understand the trade-offs between simplicity and efficiency.\n", 204 | "\n", 205 | "Let's begin by implementing different methods to solve this problem and explore which is most effective for different array sizes!\n" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 5, 211 | "metadata": {}, 212 | "outputs": [ 213 | { 214 | "name": "stdout", 215 | "output_type": "stream", 216 | "text": [ 217 | "Maximum Subarray Sum: 6\n" 218 | ] 219 | } 220 | ], 221 | "source": [ 222 | "def max_subarray_sum(arr):\n", 223 | " max_sum = arr[0]\n", 224 | " current_sum = arr[0]\n", 225 | "\n", 226 | " for i in range(1, len(arr)):\n", 227 | " current_sum = max(arr[i], current_sum + arr[i]) # Decide to start new subarray or add to current\n", 228 | " max_sum = max(max_sum, current_sum) # Track the maximum sum found\n", 229 | "\n", 230 | " return max_sum\n", 231 | "\n", 232 | "# Example usage\n", 233 | "arr = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n", 234 | "print(\"Maximum Subarray Sum:\", max_subarray_sum(arr))\n" 235 | ] 236 | }, 237 | { 238 | "cell_type": "markdown", 239 | "metadata": {}, 240 | "source": [ 241 | "# Lab 3: Heap Sort Algorithm\n", 242 | "\n", 243 | "In this lab, we will implement the **Heap Sort Algorithm**, a popular and efficient sorting algorithm that uses a binary heap data structure. Heap sort is particularly valuable for its \\(O(n \\log n)\\) time complexity and in-place sorting capability, making it suitable for large data sets.\n", 244 | "\n", 245 | "### Problem Statement\n", 246 | "\n", 247 | "Implement **Heap Sort** to sort an array of elements in ascending order. The algorithm should:\n", 248 | "1. Build a max heap from the input data.\n", 249 | "2. Extract the maximum element from the heap and place it at the end of the array.\n", 250 | "3. Repeat the process until all elements are sorted.\n", 251 | "\n", 252 | "### Objectives\n", 253 | "- **Understand and implement** the heap data structure, focusing on max-heaps.\n", 254 | "- **Develop the Heap Sort Algorithm** using max-heap properties.\n", 255 | "- **Analyze the time and space complexity** of heap sort and discuss its advantages compared to other \\(O(n \\log n)\\) sorting algorithms, like merge sort and quicksort.\n", 256 | "- **Test and validate** the algorithm on various input sizes to evaluate its efficiency and effectiveness.\n", 257 | "\n", 258 | "Let’s proceed to code the heap sort algorithm and analyze its performance across different scenarios!\n" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": 6, 264 | "metadata": {}, 265 | "outputs": [ 266 | { 267 | "name": "stdout", 268 | "output_type": "stream", 269 | "text": [ 270 | "Original array: [12, 11, 13, 5, 6, 7]\n", 271 | "Sorted array: [5, 6, 7, 11, 12, 13]\n" 272 | ] 273 | } 274 | ], 275 | "source": [ 276 | "def heapify(arr, n, i):\n", 277 | " largest = i # Initialize largest as root\n", 278 | " left = 2 * i + 1 # Left child\n", 279 | " right = 2 * i + 2 # Right child\n", 280 | "\n", 281 | " # Check if left child exists and is greater than root\n", 282 | " if left < n and arr[left] > arr[largest]:\n", 283 | " largest = left\n", 284 | "\n", 285 | " # Check if right child exists and is greater than the largest so far\n", 286 | " if right < n and arr[right] > arr[largest]:\n", 287 | " largest = right\n", 288 | "\n", 289 | " # Change root if needed\n", 290 | " if largest != i:\n", 291 | " arr[i], arr[largest] = arr[largest], arr[i] # Swap\n", 292 | " heapify(arr, n, largest) # Recursively heapify the affected subtree\n", 293 | "\n", 294 | "def heap_sort(arr):\n", 295 | " n = len(arr)\n", 296 | "\n", 297 | " # Build max heap\n", 298 | " for i in range(n // 2 - 1, -1, -1):\n", 299 | " heapify(arr, n, i)\n", 300 | "\n", 301 | " # Extract elements one by one\n", 302 | " for i in range(n - 1, 0, -1):\n", 303 | " arr[i], arr[0] = arr[0], arr[i] # Swap\n", 304 | " heapify(arr, i, 0)\n", 305 | "\n", 306 | "# Example usage\n", 307 | "arr = [12, 11, 13, 5, 6, 7]\n", 308 | "print(\"Original array:\", arr)\n", 309 | "heap_sort(arr)\n", 310 | "print(\"Sorted array:\", arr)\n" 311 | ] 312 | }, 313 | { 314 | "cell_type": "markdown", 315 | "metadata": {}, 316 | "source": [ 317 | "# Lab 4: Quicksort and Kth Smallest Element\n", 318 | "\n", 319 | "In this lab, we will implement the **Quicksort Algorithm** and find the **Kth Smallest Element** in an array. Quicksort is a highly efficient sorting algorithm that employs a divide-and-conquer strategy, making it one of the fastest sorting algorithms for average cases.\n", 320 | "\n", 321 | "### Problem Statement\n", 322 | "\n", 323 | "1. **Quicksort Implementation** \n", 324 | " Implement the Quicksort algorithm to sort an array of elements in ascending order. The algorithm should:\n", 325 | " - Select a 'pivot' element from the array.\n", 326 | " - Partition the other elements into two sub-arrays: those less than the pivot and those greater than the pivot.\n", 327 | " - Recursively apply the same logic to the sub-arrays.\n", 328 | "\n", 329 | "2. **Finding the Kth Smallest Element** \n", 330 | " Using the Quicksort algorithm, find the Kth smallest element in an array. The Kth smallest element is the element that would be in the Kth position if the array were sorted.\n", 331 | "\n", 332 | "### Objectives\n", 333 | "- **Implement the Quicksort Algorithm** efficiently and understand its partitioning strategy.\n", 334 | "- **Develop a method** to find the Kth smallest element using the properties of Quicksort, ensuring an average time complexity of \\(O(n)\\).\n", 335 | "- **Analyze the time and space complexities** of the Quicksort algorithm and the Kth smallest element implementation.\n", 336 | "- **Test and validate** both implementations with various input sizes and K values to ensure accuracy and efficiency.\n", 337 | "\n", 338 | "Let's start coding the Quicksort algorithm and the Kth smallest element finder to enhance our understanding of these essential algorithmic concepts!\n" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": 7, 344 | "metadata": {}, 345 | "outputs": [ 346 | { 347 | "name": "stdout", 348 | "output_type": "stream", 349 | "text": [ 350 | "Sorted array: [3, 4, 7, 10, 15, 20]\n", 351 | "The 3th smallest element is: 7\n" 352 | ] 353 | } 354 | ], 355 | "source": [ 356 | "def quick_sort(arr):\n", 357 | " if len(arr) <= 1:\n", 358 | " return arr\n", 359 | " pivot = arr[len(arr) // 2] # Choose the middle element as pivot\n", 360 | " left = [x for x in arr if x < pivot] # Elements less than pivot\n", 361 | " middle = [x for x in arr if x == pivot] # Elements equal to pivot\n", 362 | " right = [x for x in arr if x > pivot] # Elements greater than pivot\n", 363 | " return quick_sort(left) + middle + quick_sort(right)\n", 364 | "\n", 365 | "def kth_smallest(arr, k):\n", 366 | " if k < 1 or k > len(arr):\n", 367 | " return None # k is out of bounds\n", 368 | " # Use quicksort to sort the array\n", 369 | " sorted_arr = quick_sort(arr)\n", 370 | " return sorted_arr[k - 1] # k-1 because list indexing starts from 0\n", 371 | "\n", 372 | "# Example usage\n", 373 | "arr = [7, 10, 4, 3, 20, 15]\n", 374 | "k = 3\n", 375 | "sorted_arr = quick_sort(arr)\n", 376 | "print(\"Sorted array:\", sorted_arr)\n", 377 | "print(f\"The {k}th smallest element is: {kth_smallest(arr, k)}\")\n" 378 | ] 379 | }, 380 | { 381 | "cell_type": "markdown", 382 | "metadata": {}, 383 | "source": [ 384 | "# Lab 5: Fibonacci Sequence, Linear Search, and Binary Search\n", 385 | "\n", 386 | "In this lab, we will implement three fundamental algorithms: the **Fibonacci Sequence**, **Linear Search**, and **Binary Search**. These algorithms are essential for understanding recursion, searching techniques, and algorithmic efficiency.\n", 387 | "\n", 388 | "### Problem Statement\n", 389 | "\n", 390 | "1. **Fibonacci Sequence** \n", 391 | " Implement a function to compute the Fibonacci sequence. You can use both iterative and recursive methods. The Fibonacci sequence is defined as:\n", 392 | " - \\( F(0) = 0 \\)\n", 393 | " - \\( F(1) = 1 \\)\n", 394 | " - \\( F(n) = F(n-1) + F(n-2) \\) for \\( n > 1 \\)\n", 395 | "\n", 396 | "2. **Linear Search** \n", 397 | " Implement a linear search algorithm that scans through an array to find a specified target element. The algorithm should return the index of the target if found or -1 if the target is not in the array.\n", 398 | "\n", 399 | "3. **Binary Search** \n", 400 | " Implement a binary search algorithm that efficiently finds the position of a target value within a sorted array. The algorithm should:\n", 401 | " - Continuously divide the search interval in half until the target value is found or the interval is empty.\n", 402 | "\n", 403 | "### Objectives\n", 404 | "- **Implement both iterative and recursive methods** for the Fibonacci sequence and understand their performance implications.\n", 405 | "- **Develop a linear search algorithm** and analyze its \\(O(n)\\) time complexity.\n", 406 | "- **Implement binary search** and analyze its \\(O(\\log n)\\) time complexity, emphasizing the importance of having a sorted array.\n", 407 | "- **Test and validate** each algorithm with various input sizes to evaluate their performance and correctness.\n", 408 | "\n", 409 | "Let’s begin coding these algorithms to explore their functionality and efficiency in solving common computational problems!\n" 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": 8, 415 | "metadata": {}, 416 | "outputs": [ 417 | { 418 | "name": "stdout", 419 | "output_type": "stream", 420 | "text": [ 421 | "The first 10 Fibonacci numbers are: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]\n" 422 | ] 423 | } 424 | ], 425 | "source": [ 426 | "def fibonacci(n):\n", 427 | " fib_sequence = []\n", 428 | " a, b = 0, 1\n", 429 | " for _ in range(n):\n", 430 | " fib_sequence.append(a)\n", 431 | " a, b = b, a + b\n", 432 | " return fib_sequence\n", 433 | "\n", 434 | "# Example usage\n", 435 | "n = 10\n", 436 | "print(f\"The first {n} Fibonacci numbers are: {fibonacci(n)}\")\n" 437 | ] 438 | }, 439 | { 440 | "cell_type": "code", 441 | "execution_count": 9, 442 | "metadata": {}, 443 | "outputs": [ 444 | { 445 | "name": "stdout", 446 | "output_type": "stream", 447 | "text": [ 448 | "Element 4 found at index: 3\n" 449 | ] 450 | } 451 | ], 452 | "source": [ 453 | "def linear_search(arr, target):\n", 454 | " for index, value in enumerate(arr):\n", 455 | " if value == target:\n", 456 | " return index # Return the index of the found element\n", 457 | " return -1 # Return -1 if the target is not found\n", 458 | "\n", 459 | "# Example usage\n", 460 | "arr = [5, 3, 8, 4, 2]\n", 461 | "target = 4\n", 462 | "result = linear_search(arr, target)\n", 463 | "if result != -1:\n", 464 | " print(f\"Element {target} found at index: {result}\")\n", 465 | "else:\n", 466 | " print(f\"Element {target} not found in the array.\")\n" 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": 10, 472 | "metadata": {}, 473 | "outputs": [ 474 | { 475 | "name": "stdout", 476 | "output_type": "stream", 477 | "text": [ 478 | "Element 6 found at index: 5\n" 479 | ] 480 | } 481 | ], 482 | "source": [ 483 | "def binary_search(arr, target):\n", 484 | " left, right = 0, len(arr) - 1\n", 485 | " while left <= right:\n", 486 | " mid = left + (right - left) // 2 # Avoids potential overflow\n", 487 | " if arr[mid] == target:\n", 488 | " return mid # Return the index of the found element\n", 489 | " elif arr[mid] < target:\n", 490 | " left = mid + 1 # Target is in the right half\n", 491 | " else:\n", 492 | " right = mid - 1 # Target is in the left half\n", 493 | " return -1 # Return -1 if the target is not found\n", 494 | "\n", 495 | "# Example usage\n", 496 | "sorted_arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n", 497 | "target = 6\n", 498 | "result = binary_search(sorted_arr, target)\n", 499 | "if result != -1:\n", 500 | " print(f\"Element {target} found at index: {result}\")\n", 501 | "else:\n", 502 | " print(f\"Element {target} not found in the array.\")\n" 503 | ] 504 | }, 505 | { 506 | "cell_type": "markdown", 507 | "metadata": {}, 508 | "source": [ 509 | "# Lab 6: Dynamic Programming - Knapsack Problem\n", 510 | "\n", 511 | "In this lab, we will implement the **0/1 Knapsack Problem**, a classic example of dynamic programming that illustrates the principles of optimization and decision-making.\n", 512 | "\n", 513 | "### Problem Statement\n", 514 | "\n", 515 | "Given a set of items, each with a weight and a value, the objective is to determine the maximum value that can be carried in a knapsack of a given capacity. The challenge is that each item can either be included in the knapsack or excluded (hence the name 0/1 knapsack).\n", 516 | "\n", 517 | "#### Inputs:\n", 518 | "- An array of weights representing the weight of each item.\n", 519 | "- An array of values representing the value of each item.\n", 520 | "- A maximum capacity of the knapsack.\n", 521 | "\n", 522 | "#### Output:\n", 523 | "- The maximum total value that can be carried in the knapsack without exceeding its capacity.\n", 524 | "\n", 525 | "### Objectives\n", 526 | "- **Implement the 0/1 Knapsack Algorithm** using dynamic programming.\n", 527 | "- **Understand the transition formula** for dynamic programming and how to build the solution iteratively.\n", 528 | "- **Analyze the time and space complexities** of the algorithm, emphasizing the trade-offs involved in memory usage.\n", 529 | "- **Test the implementation** with various sets of items and knapsack capacities to validate the correctness and efficiency of the solution.\n", 530 | "\n", 531 | "Let’s dive into the world of dynamic programming by implementing the 0/1 Knapsack Problem and exploring its applications in optimization!\n" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": 12, 537 | "metadata": {}, 538 | "outputs": [ 539 | { 540 | "name": "stdout", 541 | "output_type": "stream", 542 | "text": [ 543 | "The minimum value in the matrix is: 1\n" 544 | ] 545 | } 546 | ], 547 | "source": [ 548 | "def find_minimum(matrix):\n", 549 | " if not matrix or not matrix[0]:\n", 550 | " return None # Handle empty matrix case\n", 551 | " min_value = matrix[0][0]\n", 552 | " for row in matrix:\n", 553 | " for value in row:\n", 554 | " if value < min_value:\n", 555 | " min_value = value\n", 556 | " return min_value\n", 557 | "\n", 558 | "# Example usage\n", 559 | "matrix = [\n", 560 | " [3, 5, 9],\n", 561 | " [1, 2, 8],\n", 562 | " [4, 7, 6]\n", 563 | "]\n", 564 | "min_value = find_minimum(matrix)\n", 565 | "print(f\"The minimum value in the matrix is: {min_value}\")\n" 566 | ] 567 | }, 568 | { 569 | "cell_type": "code", 570 | "execution_count": 13, 571 | "metadata": {}, 572 | "outputs": [ 573 | { 574 | "name": "stdout", 575 | "output_type": "stream", 576 | "text": [ 577 | "Row-wise minimums are: [3, 1, 4]\n" 578 | ] 579 | } 580 | ], 581 | "source": [ 582 | "def row_wise_minimum(matrix):\n", 583 | " if not matrix:\n", 584 | " return [] # Handle empty matrix case\n", 585 | " row_mins = []\n", 586 | " for row in matrix:\n", 587 | " row_mins.append(min(row)) # Find the minimum in the current row\n", 588 | " return row_mins\n", 589 | "\n", 590 | "# Example usage\n", 591 | "row_mins = row_wise_minimum(matrix)\n", 592 | "print(f\"Row-wise minimums are: {row_mins}\")\n" 593 | ] 594 | }, 595 | { 596 | "cell_type": "code", 597 | "execution_count": 14, 598 | "metadata": {}, 599 | "outputs": [ 600 | { 601 | "name": "stdout", 602 | "output_type": "stream", 603 | "text": [ 604 | "Column-wise minimums are: [1, 2, 6]\n" 605 | ] 606 | } 607 | ], 608 | "source": [ 609 | "def column_wise_minimum(matrix):\n", 610 | " if not matrix or not matrix[0]:\n", 611 | " return [] # Handle empty matrix case\n", 612 | " num_cols = len(matrix[0])\n", 613 | " col_mins = [float('inf')] * num_cols # Initialize column mins with infinity\n", 614 | "\n", 615 | " for row in matrix:\n", 616 | " for col in range(num_cols):\n", 617 | " if row[col] < col_mins[col]:\n", 618 | " col_mins[col] = row[col] # Update the minimum for the column\n", 619 | "\n", 620 | " return col_mins\n", 621 | "\n", 622 | "# Example usage\n", 623 | "col_mins = column_wise_minimum(matrix)\n", 624 | "print(f\"Column-wise minimums are: {col_mins}\")\n" 625 | ] 626 | }, 627 | { 628 | "cell_type": "markdown", 629 | "metadata": {}, 630 | "source": [ 631 | "# Lab 7: Graph Algorithms - Shortest Path and Minimum Spanning Tree\n", 632 | "\n", 633 | "In this lab, we will explore fundamental graph algorithms, specifically focusing on finding the **Shortest Path** using Dijkstra's Algorithm and constructing a **Minimum Spanning Tree (MST)** using Prim's Algorithm.\n", 634 | "\n", 635 | "### Problem Statement\n", 636 | "\n", 637 | "1. **Dijkstra's Algorithm for Shortest Path** \n", 638 | " Implement Dijkstra's Algorithm to find the shortest path from a starting vertex to all other vertices in a weighted graph. The graph can be represented using an adjacency matrix or adjacency list.\n", 639 | "\n", 640 | " **Inputs:**\n", 641 | " - A graph represented as an adjacency matrix or list.\n", 642 | " - A starting vertex.\n", 643 | "\n", 644 | " **Output:**\n", 645 | " - The shortest distances from the starting vertex to all other vertices.\n", 646 | "\n", 647 | "2. **Prim's Algorithm for Minimum Spanning Tree** \n", 648 | " Implement Prim's Algorithm to find the Minimum Spanning Tree of a connected, weighted graph. The MST is a subset of the edges that connect all vertices without cycles and with the minimum possible total edge weight.\n", 649 | "\n", 650 | " **Inputs:**\n", 651 | " - A connected graph represented as an adjacency matrix or list.\n", 652 | "\n", 653 | " **Output:**\n", 654 | " - The edges that comprise the Minimum Spanning Tree and its total weight.\n", 655 | "\n", 656 | "### Objectives\n", 657 | "- **Implement Dijkstra's Algorithm** and understand its greedy approach for finding the shortest path.\n", 658 | "- **Implement Prim's Algorithm** and explore how it efficiently finds the MST.\n", 659 | "- **Analyze the time and space complexities** of both algorithms, discussing their suitability for different types of graphs.\n", 660 | "- **Test and validate** the implementations on various graphs to ensure accuracy and performance.\n", 661 | "\n", 662 | "Let’s get started with implementing these essential graph algorithms to deepen our understanding of graph theory and its applications in real-world scenarios!\n" 663 | ] 664 | }, 665 | { 666 | "cell_type": "code", 667 | "execution_count": 15, 668 | "metadata": {}, 669 | "outputs": [ 670 | { 671 | "name": "stdout", 672 | "output_type": "stream", 673 | "text": [ 674 | "Result of Matrix Multiplication:\n", 675 | "[58, 64]\n", 676 | "[139, 154]\n" 677 | ] 678 | } 679 | ], 680 | "source": [ 681 | "def matrix_multiply(A, B):\n", 682 | " if len(A[0]) != len(B):\n", 683 | " raise ValueError(\"Number of columns in A must be equal to the number of rows in B\")\n", 684 | " \n", 685 | " # Initialize the result matrix with zeros\n", 686 | " result = [[0 for _ in range(len(B[0]))] for _ in range(len(A))]\n", 687 | " \n", 688 | " # Perform matrix multiplication\n", 689 | " for i in range(len(A)):\n", 690 | " for j in range(len(B[0])):\n", 691 | " for k in range(len(B)):\n", 692 | " result[i][j] += A[i][k] * B[k][j]\n", 693 | " \n", 694 | " return result\n", 695 | "\n", 696 | "# Example usage\n", 697 | "A = [[1, 2, 3],\n", 698 | " [4, 5, 6]]\n", 699 | "\n", 700 | "B = [[7, 8],\n", 701 | " [9, 10],\n", 702 | " [11, 12]]\n", 703 | "\n", 704 | "result = matrix_multiply(A, B)\n", 705 | "print(\"Result of Matrix Multiplication:\")\n", 706 | "for row in result:\n", 707 | " print(row)\n" 708 | ] 709 | }, 710 | { 711 | "cell_type": "code", 712 | "execution_count": 16, 713 | "metadata": {}, 714 | "outputs": [ 715 | { 716 | "name": "stdout", 717 | "output_type": "stream", 718 | "text": [ 719 | "Shortest paths from A: {'A': 0, 'B': 1, 'C': 3, 'D': 4}\n" 720 | ] 721 | } 722 | ], 723 | "source": [ 724 | "import heapq\n", 725 | "\n", 726 | "def dijkstra(graph, start):\n", 727 | " # Distance dictionary to store the shortest path to each vertex\n", 728 | " distances = {vertex: float('infinity') for vertex in graph}\n", 729 | " distances[start] = 0 # Distance from start to itself is 0\n", 730 | " \n", 731 | " priority_queue = [(0, start)] # (distance, vertex)\n", 732 | " \n", 733 | " while priority_queue:\n", 734 | " current_distance, current_vertex = heapq.heappop(priority_queue)\n", 735 | " \n", 736 | " # Nodes can only be added once to the queue\n", 737 | " if current_distance > distances[current_vertex]:\n", 738 | " continue\n", 739 | " \n", 740 | " # Explore neighbors\n", 741 | " for neighbor, weight in graph[current_vertex].items():\n", 742 | " distance = current_distance + weight\n", 743 | " \n", 744 | " # Only consider this new path if it's better\n", 745 | " if distance < distances[neighbor]:\n", 746 | " distances[neighbor] = distance\n", 747 | " heapq.heappush(priority_queue, (distance, neighbor))\n", 748 | " \n", 749 | " return distances\n", 750 | "\n", 751 | "# Example usage\n", 752 | "graph = {\n", 753 | " 'A': {'B': 1, 'C': 4},\n", 754 | " 'B': {'A': 1, 'C': 2, 'D': 5},\n", 755 | " 'C': {'A': 4, 'B': 2, 'D': 1},\n", 756 | " 'D': {'B': 5, 'C': 1}\n", 757 | "}\n", 758 | "\n", 759 | "shortest_paths = dijkstra(graph, 'A')\n", 760 | "print(\"Shortest paths from A:\", shortest_paths)\n" 761 | ] 762 | }, 763 | { 764 | "cell_type": "code", 765 | "execution_count": 17, 766 | "metadata": {}, 767 | "outputs": [ 768 | { 769 | "name": "stdout", 770 | "output_type": "stream", 771 | "text": [ 772 | "Minimum Spanning Tree edges (from, to, weight):\n", 773 | "('A', 'B', 1)\n", 774 | "('B', 'C', 2)\n", 775 | "('C', 'D', 1)\n" 776 | ] 777 | } 778 | ], 779 | "source": [ 780 | "def prim(graph, start):\n", 781 | " mst = [] # List to store edges of the minimum spanning tree\n", 782 | " visited = {start}\n", 783 | " edges = [\n", 784 | " (cost, start, to) \n", 785 | " for to, cost in graph[start].items()\n", 786 | " ]\n", 787 | " heapq.heapify(edges) # Create a priority queue of edges\n", 788 | "\n", 789 | " while edges:\n", 790 | " cost, frm, to = heapq.heappop(edges)\n", 791 | " \n", 792 | " if to not in visited:\n", 793 | " visited.add(to)\n", 794 | " mst.append((frm, to, cost))\n", 795 | "\n", 796 | " for to_next, cost_next in graph[to].items():\n", 797 | " if to_next not in visited:\n", 798 | " heapq.heappush(edges, (cost_next, to, to_next))\n", 799 | "\n", 800 | " return mst\n", 801 | "\n", 802 | "# Example usage\n", 803 | "graph_for_mst = {\n", 804 | " 'A': {'B': 1, 'C': 4},\n", 805 | " 'B': {'A': 1, 'C': 2, 'D': 5},\n", 806 | " 'C': {'A': 4, 'B': 2, 'D': 1},\n", 807 | " 'D': {'B': 5, 'C': 1}\n", 808 | "}\n", 809 | "\n", 810 | "mst = prim(graph_for_mst, 'A')\n", 811 | "print(\"Minimum Spanning Tree edges (from, to, weight):\")\n", 812 | "for edge in mst:\n", 813 | " print(edge)\n" 814 | ] 815 | }, 816 | { 817 | "cell_type": "markdown", 818 | "metadata": {}, 819 | "source": [ 820 | "# Lab 8: Prim’s Minimum Spanning Tree Algorithm\n", 821 | "\n", 822 | "In this lab, we will implement **Prim’s Minimum Spanning Tree (MST) Algorithm**, a classic algorithm in graph theory that helps us find a subset of edges in a weighted graph that connects all the vertices together without cycles and with the minimum possible total edge weight.\n", 823 | "\n", 824 | "### Problem Statement\n", 825 | "\n", 826 | "**Implement Prim’s Algorithm** to find the Minimum Spanning Tree of a connected, undirected graph. The algorithm works by maintaining a growing tree of vertices and repeatedly adding the smallest edge that connects a vertex in the tree to a vertex outside the tree.\n", 827 | "\n", 828 | "#### Inputs:\n", 829 | "- A connected graph represented as an adjacency matrix or adjacency list.\n", 830 | "- The starting vertex from which to begin constructing the MST.\n", 831 | "\n", 832 | "#### Output:\n", 833 | "- The edges that comprise the Minimum Spanning Tree and its total weight.\n", 834 | "\n", 835 | "### Objectives\n", 836 | "- **Understand and implement Prim’s Algorithm** to efficiently find the MST of a graph.\n", 837 | "- **Analyze the time and space complexities** of the algorithm, discussing its performance in different scenarios.\n", 838 | "- **Test and validate** the implementation on various graphs to ensure correctness and efficiency.\n", 839 | "\n", 840 | "Let’s begin implementing Prim’s Minimum Spanning Tree Algorithm and explore its applications in optimizing network design and other areas of graph theory!\n" 841 | ] 842 | }, 843 | { 844 | "cell_type": "code", 845 | "execution_count": 18, 846 | "metadata": {}, 847 | "outputs": [ 848 | { 849 | "name": "stdout", 850 | "output_type": "stream", 851 | "text": [ 852 | "Minimum Spanning Tree edges (from, to, weight):\n", 853 | "('A', 'B', 1)\n", 854 | "('B', 'C', 2)\n", 855 | "('C', 'D', 1)\n" 856 | ] 857 | } 858 | ], 859 | "source": [ 860 | "def prim(graph, start):\n", 861 | " mst = [] # List to store edges of the minimum spanning tree\n", 862 | " visited = {start}\n", 863 | " edges = [\n", 864 | " (cost, start, to) \n", 865 | " for to, cost in graph[start].items()\n", 866 | " ]\n", 867 | " heapq.heapify(edges) # Create a priority queue of edges\n", 868 | "\n", 869 | " while edges:\n", 870 | " cost, frm, to = heapq.heappop(edges)\n", 871 | " \n", 872 | " if to not in visited:\n", 873 | " visited.add(to)\n", 874 | " mst.append((frm, to, cost))\n", 875 | "\n", 876 | " for to_next, cost_next in graph[to].items():\n", 877 | " if to_next not in visited:\n", 878 | " heapq.heappush(edges, (cost_next, to, to_next))\n", 879 | "\n", 880 | " return mst\n", 881 | "\n", 882 | "# Example usage\n", 883 | "graph_for_mst = {\n", 884 | " 'A': {'B': 1, 'C': 4},\n", 885 | " 'B': {'A': 1, 'C': 2, 'D': 5},\n", 886 | " 'C': {'A': 4, 'B': 2, 'D': 1},\n", 887 | " 'D': {'B': 5, 'C': 1}\n", 888 | "}\n", 889 | "\n", 890 | "mst = prim(graph_for_mst, 'A')\n", 891 | "print(\"Minimum Spanning Tree edges (from, to, weight):\")\n", 892 | "for edge in mst:\n", 893 | " print(edge)\n" 894 | ] 895 | }, 896 | { 897 | "cell_type": "markdown", 898 | "metadata": {}, 899 | "source": [ 900 | "# Lab 9: Binary Search Tree and Binary Search Algorithm\n", 901 | "\n", 902 | "In this lab, we will implement a **Binary Search Tree (BST)** and the **Binary Search Algorithm**. These data structures and algorithms are fundamental for efficient searching, sorting, and data management.\n", 903 | "\n", 904 | "### Problem Statement\n", 905 | "\n", 906 | "1. **Binary Search Tree (BST)** \n", 907 | " Implement a Binary Search Tree that supports the following operations:\n", 908 | " - Insertion of a new node.\n", 909 | " - Searching for a node with a given value.\n", 910 | " - Deletion of a node.\n", 911 | " - Traversal of the tree (in-order, pre-order, post-order).\n", 912 | "\n", 913 | " **Inputs:**\n", 914 | " - Values to be inserted into the BST.\n", 915 | "\n", 916 | " **Output:**\n", 917 | " - The structure of the BST after each operation and the results of the traversal methods.\n", 918 | "\n", 919 | "2. **Binary Search Algorithm** \n", 920 | " Implement the Binary Search algorithm to find a target value in a sorted array. The algorithm should repeatedly divide the search interval in half until the target value is found or the interval is empty.\n", 921 | "\n", 922 | " **Inputs:**\n", 923 | " - A sorted array and a target value to search for.\n", 924 | "\n", 925 | " **Output:**\n", 926 | " - The index of the target value if found, or -1 if the target is not present in the array.\n", 927 | "\n", 928 | "### Objectives\n", 929 | "- **Understand and implement the BST** and its core operations, emphasizing its properties and advantages.\n", 930 | "- **Implement the Binary Search algorithm** and analyze its efficiency compared to linear search methods.\n", 931 | "- **Analyze the time and space complexities** of both the BST operations and the Binary Search algorithm.\n", 932 | "- **Test and validate** the implementations with various inputs to ensure correctness and performance.\n", 933 | "\n", 934 | "Let’s get started with implementing the Binary Search Tree and the Binary Search algorithm to enhance our understanding of data structures and searching techniques!\n" 935 | ] 936 | }, 937 | { 938 | "cell_type": "code", 939 | "execution_count": 19, 940 | "metadata": {}, 941 | "outputs": [ 942 | { 943 | "name": "stdout", 944 | "output_type": "stream", 945 | "text": [ 946 | "Inorder Traversal of BST: [1, 3, 5, 7, 8, 9, 10]\n", 947 | "Found 5 in BST.\n" 948 | ] 949 | } 950 | ], 951 | "source": [ 952 | "class TreeNode:\n", 953 | " def __init__(self, key):\n", 954 | " self.left = None\n", 955 | " self.right = None\n", 956 | " self.val = key\n", 957 | "\n", 958 | "class BinarySearchTree:\n", 959 | " def __init__(self):\n", 960 | " self.root = None\n", 961 | "\n", 962 | " def insert(self, key):\n", 963 | " if self.root is None:\n", 964 | " self.root = TreeNode(key)\n", 965 | " else:\n", 966 | " self._insert_recursive(self.root, key)\n", 967 | "\n", 968 | " def _insert_recursive(self, node, key):\n", 969 | " if key < node.val:\n", 970 | " if node.left is None:\n", 971 | " node.left = TreeNode(key)\n", 972 | " else:\n", 973 | " self._insert_recursive(node.left, key)\n", 974 | " else:\n", 975 | " if node.right is None:\n", 976 | " node.right = TreeNode(key)\n", 977 | " else:\n", 978 | " self._insert_recursive(node.right, key)\n", 979 | "\n", 980 | " def search(self, key):\n", 981 | " return self._search_recursive(self.root, key)\n", 982 | "\n", 983 | " def _search_recursive(self, node, key):\n", 984 | " if node is None or node.val == key:\n", 985 | " return node\n", 986 | " if key < node.val:\n", 987 | " return self._search_recursive(node.left, key)\n", 988 | " return self._search_recursive(node.right, key)\n", 989 | "\n", 990 | " def inorder_traversal(self):\n", 991 | " return self._inorder_recursive(self.root)\n", 992 | "\n", 993 | " def _inorder_recursive(self, node):\n", 994 | " result = []\n", 995 | " if node:\n", 996 | " result = self._inorder_recursive(node.left)\n", 997 | " result.append(node.val)\n", 998 | " result += self._inorder_recursive(node.right)\n", 999 | " return result\n", 1000 | "\n", 1001 | "# Example usage\n", 1002 | "bst = BinarySearchTree()\n", 1003 | "elements = [7, 3, 9, 1, 5, 8, 10]\n", 1004 | "for element in elements:\n", 1005 | " bst.insert(element)\n", 1006 | "\n", 1007 | "print(\"Inorder Traversal of BST:\", bst.inorder_traversal())\n", 1008 | "search_key = 5\n", 1009 | "result = bst.search(search_key)\n", 1010 | "if result:\n", 1011 | " print(f\"Found {search_key} in BST.\")\n", 1012 | "else:\n", 1013 | " print(f\"{search_key} not found in BST.\")\n" 1014 | ] 1015 | }, 1016 | { 1017 | "cell_type": "code", 1018 | "execution_count": 20, 1019 | "metadata": {}, 1020 | "outputs": [ 1021 | { 1022 | "name": "stdout", 1023 | "output_type": "stream", 1024 | "text": [ 1025 | "Element 5 found at index 2.\n" 1026 | ] 1027 | } 1028 | ], 1029 | "source": [ 1030 | "def binary_search(arr, target):\n", 1031 | " left, right = 0, len(arr) - 1\n", 1032 | " while left <= right:\n", 1033 | " mid = left + (right - left) // 2\n", 1034 | " \n", 1035 | " # Check if target is present at mid\n", 1036 | " if arr[mid] == target:\n", 1037 | " return mid\n", 1038 | " # If target is greater, ignore left half\n", 1039 | " elif arr[mid] < target:\n", 1040 | " left = mid + 1\n", 1041 | " # If target is smaller, ignore right half\n", 1042 | " else:\n", 1043 | " right = mid - 1\n", 1044 | " return -1 # Target not found\n", 1045 | "\n", 1046 | "# Example usage\n", 1047 | "sorted_array = [1, 3, 5, 7, 8, 9, 10]\n", 1048 | "target = 5\n", 1049 | "index = binary_search(sorted_array, target)\n", 1050 | "if index != -1:\n", 1051 | " print(f\"Element {target} found at index {index}.\")\n", 1052 | "else:\n", 1053 | " print(f\"Element {target} not found in the array.\")\n" 1054 | ] 1055 | }, 1056 | { 1057 | "cell_type": "markdown", 1058 | "metadata": {}, 1059 | "source": [ 1060 | "# Lab 10: Majority Element and Pattern Matching Algorithms\n", 1061 | "\n", 1062 | "In this lab, we will implement two important algorithms: the **Majority Element Algorithm** and **Pattern Matching Algorithms**. These algorithms are fundamental in data analysis and string processing.\n", 1063 | "\n", 1064 | "### Problem Statement\n", 1065 | "\n", 1066 | "1. **Majority Element Algorithm** \n", 1067 | " Implement an algorithm to find the **majority element** in an array. The majority element is the element that appears more than half the time in the array. If no such element exists, the algorithm should indicate this.\n", 1068 | "\n", 1069 | " **Inputs:**\n", 1070 | " - An array of integers.\n", 1071 | "\n", 1072 | " **Output:**\n", 1073 | " - The majority element if it exists, or a message indicating that no majority element is present.\n", 1074 | "\n", 1075 | "2. **Pattern Matching Algorithms** \n", 1076 | " Implement a pattern matching algorithm to find all occurrences of a given pattern in a text. You can choose to implement either the **Naive String Matching Algorithm** or the **Knuth-Morris-Pratt (KMP) Algorithm**.\n", 1077 | "\n", 1078 | " **Inputs:**\n", 1079 | " - A string (text) and a pattern (substring) to search for.\n", 1080 | "\n", 1081 | " **Output:**\n", 1082 | " - The starting indices of all occurrences of the pattern in the text.\n", 1083 | "\n", 1084 | "### Objectives\n", 1085 | "- **Understand and implement the Majority Element Algorithm**, analyzing its efficiency and possible approaches (e.g., Boyer-Moore Voting Algorithm).\n", 1086 | "- **Implement a pattern matching algorithm** and compare the efficiency of different methods.\n", 1087 | "- **Analyze the time and space complexities** of both algorithms.\n", 1088 | "- **Test and validate** the implementations with various inputs to ensure correctness and performance.\n", 1089 | "\n", 1090 | "Let’s dive into implementing the Majority Element and Pattern Matching algorithms to deepen our understanding of algorithmic techniques in data processing and string manipulation!\n" 1091 | ] 1092 | }, 1093 | { 1094 | "cell_type": "code", 1095 | "execution_count": 21, 1096 | "metadata": {}, 1097 | "outputs": [ 1098 | { 1099 | "name": "stdout", 1100 | "output_type": "stream", 1101 | "text": [ 1102 | "The majority element is: 3\n" 1103 | ] 1104 | } 1105 | ], 1106 | "source": [ 1107 | "def majority_element(nums):\n", 1108 | " candidate, count = None, 0\n", 1109 | " \n", 1110 | " # Phase 1: Find a candidate\n", 1111 | " for num in nums:\n", 1112 | " if count == 0:\n", 1113 | " candidate = num\n", 1114 | " count = 1\n", 1115 | " elif num == candidate:\n", 1116 | " count += 1\n", 1117 | " else:\n", 1118 | " count -= 1\n", 1119 | "\n", 1120 | " # Phase 2: Verify the candidate\n", 1121 | " count = sum(1 for num in nums if num == candidate)\n", 1122 | " if count > len(nums) // 2:\n", 1123 | " return candidate\n", 1124 | " return None\n", 1125 | "\n", 1126 | "# Example usage\n", 1127 | "nums = [3, 2, 3]\n", 1128 | "result = majority_element(nums)\n", 1129 | "if result is not None:\n", 1130 | " print(f\"The majority element is: {result}\")\n", 1131 | "else:\n", 1132 | " print(\"No majority element found.\")\n" 1133 | ] 1134 | }, 1135 | { 1136 | "cell_type": "code", 1137 | "execution_count": 22, 1138 | "metadata": {}, 1139 | "outputs": [ 1140 | { 1141 | "name": "stdout", 1142 | "output_type": "stream", 1143 | "text": [ 1144 | "Pattern found at index 10\n" 1145 | ] 1146 | } 1147 | ], 1148 | "source": [ 1149 | "def kmp_pattern_search(text, pattern):\n", 1150 | " # Preprocess the pattern to create the LPS array\n", 1151 | " lps = compute_lps(pattern)\n", 1152 | " m, n = len(pattern), len(text)\n", 1153 | " i = j = 0 # index for text and pattern\n", 1154 | "\n", 1155 | " while i < n:\n", 1156 | " if pattern[j] == text[i]:\n", 1157 | " i += 1\n", 1158 | " j += 1\n", 1159 | " \n", 1160 | " if j == m: # Found pattern\n", 1161 | " print(f\"Pattern found at index {i - j}\")\n", 1162 | " j = lps[j - 1]\n", 1163 | " elif i < n and pattern[j] != text[i]: # Mismatch after j matches\n", 1164 | " if j != 0:\n", 1165 | " j = lps[j - 1]\n", 1166 | " else:\n", 1167 | " i += 1\n", 1168 | "\n", 1169 | "def compute_lps(pattern):\n", 1170 | " lps = [0] * len(pattern)\n", 1171 | " length = 0 # length of the previous longest prefix suffix\n", 1172 | " i = 1\n", 1173 | "\n", 1174 | " while i < len(pattern):\n", 1175 | " if pattern[i] == pattern[length]:\n", 1176 | " length += 1\n", 1177 | " lps[i] = length\n", 1178 | " i += 1\n", 1179 | " else:\n", 1180 | " if length != 0:\n", 1181 | " length = lps[length - 1]\n", 1182 | " else:\n", 1183 | " lps[i] = 0\n", 1184 | " i += 1\n", 1185 | " return lps\n", 1186 | "\n", 1187 | "# Example usage\n", 1188 | "text = \"ababcabcabababd\"\n", 1189 | "pattern = \"ababd\"\n", 1190 | "kmp_pattern_search(text, pattern)\n" 1191 | ] 1192 | }, 1193 | { 1194 | "cell_type": "markdown", 1195 | "metadata": {}, 1196 | "source": [ 1197 | "# Lab 11: Divide and Conquer Algorithms - Multiplication, Counting Inversions, and Quicksort\n", 1198 | "\n", 1199 | "In this lab, we will explore the **Divide and Conquer** paradigm by implementing three important algorithms: **Multiplication of Numbers**, **Counting Inversions**, and **Quicksort**. These algorithms showcase the efficiency and power of the divide and conquer approach.\n", 1200 | "\n", 1201 | "### Problem Statement\n", 1202 | "\n", 1203 | "1. **Multiplication of Numbers Using Divide and Conquer** \n", 1204 | " Implement a multiplication algorithm that multiplies two numbers using the divide and conquer approach, specifically the **Karatsuba algorithm**. This method reduces the multiplication of two \\(n\\)-digit numbers to at most three multiplications of \\(n/2\\)-digit numbers.\n", 1205 | "\n", 1206 | " **Inputs:**\n", 1207 | " - Two non-negative integers.\n", 1208 | "\n", 1209 | " **Output:**\n", 1210 | " - The product of the two integers.\n", 1211 | "\n", 1212 | "2. **Counting Inversions** \n", 1213 | " Implement an algorithm to count the number of inversions in an array. An inversion is a pair of indices \\(i\\) and \\(j\\) such that \\(i < j\\) and \\(A[i] > A[j]\\). This can be efficiently done using a modified merge sort algorithm.\n", 1214 | "\n", 1215 | " **Inputs:**\n", 1216 | " - An array of integers.\n", 1217 | "\n", 1218 | " **Output:**\n", 1219 | " - The total number of inversions in the array.\n", 1220 | "\n", 1221 | "3. **Quicksort** \n", 1222 | " Implement the **Quicksort algorithm**, a highly efficient sorting algorithm that uses the divide and conquer technique to sort an array by partitioning it into smaller sub-arrays. It selects a pivot element and rearranges the array so that elements less than the pivot are on the left and elements greater than the pivot are on the right.\n", 1223 | "\n", 1224 | " **Inputs:**\n", 1225 | " - An array of integers.\n", 1226 | "\n", 1227 | " **Output:**\n", 1228 | " - The sorted array.\n", 1229 | "\n", 1230 | "### Objectives\n", 1231 | "- **Implement the Karatsuba algorithm** for efficient multiplication of large numbers.\n", 1232 | "- **Use a modified merge sort** to count inversions in an array and understand its implications in sorting.\n", 1233 | "- **Implement the Quicksort algorithm** and analyze its average and worst-case performance.\n", 1234 | "- **Analyze the time and space complexities** of all three algorithms.\n", 1235 | "- **Test and validate** the implementations with various inputs to ensure correctness and performance.\n", 1236 | "\n", 1237 | "Let’s dive into implementing these divide and conquer algorithms to strengthen our understanding of this powerful algorithmic strategy!\n" 1238 | ] 1239 | }, 1240 | { 1241 | "cell_type": "code", 1242 | "execution_count": 23, 1243 | "metadata": {}, 1244 | "outputs": [ 1245 | { 1246 | "name": "stdout", 1247 | "output_type": "stream", 1248 | "text": [ 1249 | "Karatsuba multiplication of 1234 and 5678 is: 7006652\n" 1250 | ] 1251 | } 1252 | ], 1253 | "source": [ 1254 | "def karatsuba(x, y):\n", 1255 | " # Base case for recursion\n", 1256 | " if x < 10 or y < 10:\n", 1257 | " return x * y\n", 1258 | "\n", 1259 | " # Calculate the size of the numbers\n", 1260 | " num1 = str(x)\n", 1261 | " num2 = str(y)\n", 1262 | " n = max(len(num1), len(num2))\n", 1263 | " half = n // 2\n", 1264 | "\n", 1265 | " # Split the digit sequences in the middle\n", 1266 | " a = x // 10**half\n", 1267 | " b = x % 10**half\n", 1268 | " c = y // 10**half\n", 1269 | " d = y % 10**half\n", 1270 | "\n", 1271 | " # 3 recursive calls\n", 1272 | " ac = karatsuba(a, c) # (a * c)\n", 1273 | " bd = karatsuba(b, d) # (b * d)\n", 1274 | " abcd = karatsuba(a + b, c + d) # (a + b) * (c + d)\n", 1275 | "\n", 1276 | " # Combine the results\n", 1277 | " return (ac * 10**(2 * half)) + ((abcd - ac - bd) * 10**half) + bd\n", 1278 | "\n", 1279 | "# Example usage\n", 1280 | "x = 1234\n", 1281 | "y = 5678\n", 1282 | "result = karatsuba(x, y)\n", 1283 | "print(f\"Karatsuba multiplication of {x} and {y} is: {result}\")\n" 1284 | ] 1285 | }, 1286 | { 1287 | "cell_type": "code", 1288 | "execution_count": 24, 1289 | "metadata": {}, 1290 | "outputs": [ 1291 | { 1292 | "name": "stdout", 1293 | "output_type": "stream", 1294 | "text": [ 1295 | "Number of inversions in the array: 5\n" 1296 | ] 1297 | } 1298 | ], 1299 | "source": [ 1300 | "def merge_and_count(arr, temp_arr, left, mid, right):\n", 1301 | " i = left # Starting index for left subarray\n", 1302 | " j = mid + 1 # Starting index for right subarray\n", 1303 | " k = left # Starting index to be sorted\n", 1304 | " inv_count = 0\n", 1305 | "\n", 1306 | " while i <= mid and j <= right:\n", 1307 | " if arr[i] <= arr[j]:\n", 1308 | " temp_arr[k] = arr[i]\n", 1309 | " i += 1\n", 1310 | " else:\n", 1311 | " temp_arr[k] = arr[j]\n", 1312 | " inv_count += (mid - i + 1) # Count inversions\n", 1313 | " j += 1\n", 1314 | " k += 1\n", 1315 | "\n", 1316 | " while i <= mid:\n", 1317 | " temp_arr[k] = arr[i]\n", 1318 | " i += 1\n", 1319 | " k += 1\n", 1320 | "\n", 1321 | " while j <= right:\n", 1322 | " temp_arr[k] = arr[j]\n", 1323 | " j += 1\n", 1324 | " k += 1\n", 1325 | "\n", 1326 | " for i in range(left, right + 1):\n", 1327 | " arr[i] = temp_arr[i]\n", 1328 | "\n", 1329 | " return inv_count\n", 1330 | "\n", 1331 | "def merge_sort_and_count(arr, temp_arr, left, right):\n", 1332 | " inv_count = 0\n", 1333 | " if left < right:\n", 1334 | " mid = (left + right) // 2\n", 1335 | "\n", 1336 | " inv_count += merge_sort_and_count(arr, temp_arr, left, mid)\n", 1337 | " inv_count += merge_sort_and_count(arr, temp_arr, mid + 1, right)\n", 1338 | " inv_count += merge_and_count(arr, temp_arr, left, mid, right)\n", 1339 | "\n", 1340 | " return inv_count\n", 1341 | "\n", 1342 | "# Example usage\n", 1343 | "arr = [1, 20, 6, 4, 5]\n", 1344 | "n = len(arr)\n", 1345 | "temp_arr = [0] * n\n", 1346 | "result = merge_sort_and_count(arr, temp_arr, 0, n - 1)\n", 1347 | "print(f\"Number of inversions in the array: {result}\")\n" 1348 | ] 1349 | }, 1350 | { 1351 | "cell_type": "code", 1352 | "execution_count": 25, 1353 | "metadata": {}, 1354 | "outputs": [ 1355 | { 1356 | "name": "stdout", 1357 | "output_type": "stream", 1358 | "text": [ 1359 | "Sorted array using quicksort: [1, 5, 7, 8, 9, 10]\n" 1360 | ] 1361 | } 1362 | ], 1363 | "source": [ 1364 | "def quicksort(arr):\n", 1365 | " if len(arr) <= 1:\n", 1366 | " return arr\n", 1367 | " else:\n", 1368 | " pivot = arr[len(arr) // 2] # Choosing the pivot\n", 1369 | " left = [x for x in arr if x < pivot] # Elements less than pivot\n", 1370 | " middle = [x for x in arr if x == pivot] # Elements equal to pivot\n", 1371 | " right = [x for x in arr if x > pivot] # Elements greater than pivot\n", 1372 | " return quicksort(left) + middle + quicksort(right)\n", 1373 | "\n", 1374 | "# Example usage\n", 1375 | "arr = [10, 7, 8, 9, 1, 5]\n", 1376 | "sorted_arr = quicksort(arr)\n", 1377 | "print(f\"Sorted array using quicksort: {sorted_arr}\")\n" 1378 | ] 1379 | }, 1380 | { 1381 | "cell_type": "markdown", 1382 | "metadata": {}, 1383 | "source": [ 1384 | "### Conclusion\n", 1385 | "\n", 1386 | "In this notebook, we have explored various fundamental algorithms implemented in Python, including multiplication using the divide and conquer method, counting inversions in an array, and the Quicksort sorting algorithm. Each implementation not only demonstrates the principles of these algorithms but also showcases their practical applications in solving common computational problems.\n", 1387 | "\n", 1388 | "Understanding and implementing these algorithms is crucial for developing efficient software and tackling real-world challenges in data manipulation and analysis. As you continue your journey in programming and algorithm design, consider experimenting with these implementations, optimizing them further, or applying them to more complex problems.\n", 1389 | "\n", 1390 | "Thank you for engaging with this notebook. Happy coding!\n" 1391 | ] 1392 | }, 1393 | { 1394 | "cell_type": "markdown", 1395 | "metadata": {}, 1396 | "source": [] 1397 | } 1398 | ], 1399 | "metadata": { 1400 | "kernelspec": { 1401 | "display_name": "Python 3", 1402 | "language": "python", 1403 | "name": "python3" 1404 | }, 1405 | "language_info": { 1406 | "codemirror_mode": { 1407 | "name": "ipython", 1408 | "version": 3 1409 | }, 1410 | "file_extension": ".py", 1411 | "mimetype": "text/x-python", 1412 | "name": "python", 1413 | "nbconvert_exporter": "python", 1414 | "pygments_lexer": "ipython3", 1415 | "version": "3.12.5" 1416 | } 1417 | }, 1418 | "nbformat": 4, 1419 | "nbformat_minor": 2 1420 | } 1421 | --------------------------------------------------------------------------------