├── 2 - SAT with solution .cpp ├── Aho Corasick.cpp ├── Articulartion Points .cpp ├── Articulation Bridge .cpp ├── BIT - Binary Indexed Tree(All variant , Sum+Xor).cpp ├── BIT- Binary Indexed Tree - 2D ( All Variant , sum+xor).cpp ├── Bellman Ford .cpp ├── Bi-connected Components.cpp ├── Big Integer.cpp ├── Binary Lifting (Sum Query between two vertex of a tree) .cpp ├── Centroid Decomposition .cpp ├── Cycle Printing in Directed Graph .cpp ├── DSU Modified(DSU+Ordered Set).cpp ├── DSU On Tree (nlogn) .cpp ├── DSU On Tree O(n log^2 n) .cpp ├── Dijkstra .cpp ├── Dynamic Programming Road Map ├── CSES DP.PNG ├── GFG Recursion and Backtracking.PNG ├── README.md ├── Shafayat blog.PNG ├── hackearth dp.PNG └── leetcode dp.PNG ├── EULER TOURS On Tree ( All variant).cpp ├── Euler Path_Circuit.cpp ├── Euler Tour Improved (FOR USE).cpp ├── FLOW1 - Max Flow (Ford-Fulkerson) O(E * max flow).cpp ├── FLOW2 - Max Flow (Edmonds-Karp) O(V * E^2).cpp ├── FLOW3 - Max Flow (for general use) (Dinic's algorithm) O(V^2 * E).cpp ├── FLOW4 - Minimum Cut.cpp ├── FLOW5 - Maximum Bipartite Matching HopcroftKarp.cpp ├── FLOW6 - Minimum Cost Maximum Flow (MCMF).cpp ├── FLOW7 - Minimum Cost Maximum Flow With Negative Cycle(MCMF).cpp ├── FLOW8 - Weighted Maximum Bipartite Matching (Hungarian algorithm).cpp ├── FLOW9 - Minimum Vertex Cover\Maximum Independent Set in Bipartite Graph (hopcroft-karp).cpp ├── GP Hash .cpp ├── Game Theory ( MEX , Sprague-Grundy Number Calculation).cpp ├── Gaussian Elimination.cpp ├── Heavy Light Decomposition ( Edge Update MINIMUM Query).cpp ├── Heavy Light Decomposition ( Sum Update Query on Vertex ).cpp ├── Heavy Light Decomposition (Sum Update Query on Edges ).cpp ├── Heavy Light Decomposition (Vertex Maximum Query with Single update).cpp ├── Inclusion Exclusion .cpp ├── Johnson’s algorithm for All-pairs shortest paths.cpp ├── LCA in O(1).cpp ├── LCA with Binary lifting.cpp ├── Longest Path In A DAG O(V+E).cpp ├── MO's Algorithm (K-th Minimum in Range).cpp ├── MST Krushkal .cpp ├── MST Prims .cpp ├── Matrix Exponentiation .cpp ├── Merge Sort Tree (k-th minimum in range) .cpp ├── Mobius Function.cpp ├── Monotonic Stack.cpp ├── Negative Cycle Printing .cpp ├── Order Multiset Template.cpp ├── Persistent Segment Tree (k-th minimum in range).cpp ├── Pollard Rho ( probabilistic Factorization) Algorithm.cpp ├── Prime Factorization .cpp ├── README.md ├── Segment Tree With Lazy Propagation.cpp ├── Shortest Path In A DAG O(V+E) .cpp ├── Sliding Window (Fixed,Variable Length Window Template).cpp ├── Strongly Connected Components to DAG.cpp ├── Suffix Array.cpp ├── Topological Sorting .cpp ├── Trie .cpp ├── Trie Xor (Min - Max).cpp ├── Why Dijkstra Fails For Negative Edges.md ├── XOR Basis.cpp ├── Z Function.cpp ├── zzzzzzz Me.png └── zzzzzzzz Batarang.png /2 - SAT with solution .cpp: -------------------------------------------------------------------------------- 1 | 2 | // 2- SAT 3 | // a = a and -a = a+n edge; // here n being the number of variable 4 | 5 | const int N = 1e5; 6 | vectorgraph[N]; 7 | vectorRgraph[N]; 8 | vectorforder; 9 | bool vis1[N]; 10 | bool vis2[N]; 11 | int SCC[N]; 12 | vectorassignment(N , false); 13 | 14 | void addedge(int a,int b) 15 | { 16 | graph[a].push_back(b); 17 | } 18 | void addRedge(int a,int b) 19 | { 20 | Rgraph[b].push_back(a); 21 | } 22 | 23 | void dfs1(int s) 24 | { 25 | vis1[s] = true; 26 | for(auto child:graph[s]) 27 | { 28 | if(vis1[child]) 29 | { 30 | continue; 31 | } 32 | dfs1(child); 33 | } 34 | forder.push_back(s); 35 | } 36 | 37 | void dfs2(int s,int sccno) 38 | { 39 | vis2[s] = true; 40 | SCC[s] = sccno; 41 | for(auto child : Rgraph[s]) 42 | { 43 | if(vis2[child]) 44 | { 45 | continue; 46 | } 47 | dfs2(child,sccno); 48 | } 49 | } 50 | 51 | 52 | int32_t main() 53 | { 54 | ios::sync_with_stdio(0); 55 | cin.tie(0); 56 | 57 | int n ;// number of variable 58 | cin>>n; 59 | int m; // number of clause 60 | cin>>m; 61 | 62 | int a[m]; // first element of clause 63 | int b[m]; // second element of clause 64 | 65 | for(int i=0;i>a[i]; 68 | } 69 | for(int i=0;i>b[i]; 72 | } 73 | 74 | for(int i=0;i0 && b[i]> 0) // (a v b) = (-a -> b)^(-b -> a) 77 | { 78 | addedge(a[i]+n , b[i]); // (-a -> b) 79 | addRedge(a[i]+n , b[i]); 80 | addedge(b[i]+n , a[i]); //(-b -> a) 81 | addRedge(b[i]+n , a[i]); 82 | 83 | } 84 | else if(a[i]>0 && b[i]<0) // (a v -b) = (-a -> -b)^(b -> a) 85 | { 86 | addedge(a[i]+n , n-b[i]); // (-a -> -b) 87 | addRedge(a[i]+n , n-b[i]); 88 | addedge(-b[i], a[i]); // (b -> a) 89 | addRedge(-b[i] , a[i]); 90 | } 91 | else if(a[i]<0 && b[i]>0) // (-a v b) = (a -> b)^(-b -> -a) 92 | { 93 | addedge(-a[i] , b[i]); // (a -> b) 94 | addRedge(-a[i] , b[i]); 95 | addedge(b[i]+n, n-a[i]); // (-b -> -a) 96 | addRedge(b[i]+n , n-a[i]); 97 | } 98 | else //(-a v -b) = (a -> -b)^(b -> -a) 99 | { 100 | addedge(-a[i] , n-b[i]); // (a -> -b) 101 | addRedge(-a[i] , n-b[i]); 102 | addedge(-b[i] , n-a[i]); // (b -> -a) 103 | addRedge(-b[i] , n-a[i]); 104 | } 105 | } 106 | 107 | for(int i=1;i<=2*n;i++) 108 | { 109 | if(vis1[i]==false) 110 | { 111 | dfs1(i); 112 | } 113 | } 114 | 115 | reverse(forder.begin(),forder.end()); 116 | int cnt=0; 117 | for(auto x : forder) 118 | { 119 | if(vis2[x]==false) 120 | { 121 | cnt++; 122 | dfs2(x,cnt); 123 | } 124 | } 125 | 126 | 127 | bool ans = true; 128 | 129 | 130 | for(int i=1;i<=n;i++) 131 | { 132 | 133 | if(SCC[i]==SCC[i+n]) 134 | { 135 | ans = false; 136 | } 137 | assignment[i] = SCC[i+n] x) hole x = true else (x -> -x) hole x = false [for testing build a truth table] 138 | } 139 | 140 | 141 | 142 | if(ans==false) 143 | { 144 | cout<<"The given expression is NOT satisfiable."<node[N]; // it is equal to [[ int node[N][ALPH] ]]; Trie er moto 11 | const int root = 0; 12 | int avail = 0; 13 | 14 | //Suffix Link or Failure Link 15 | int fail[N]; 16 | int nxtWord[N]; 17 | int wordId[N]; 18 | 19 | 20 | void ini() 21 | { 22 | for(int i=0;i<=avail;i++) 23 | { 24 | node[i].clear(); 25 | } 26 | avail = 0; 27 | } 28 | 29 | int insertTrie(const string &s , int idx) 30 | { 31 | int cur = root; 32 | for(int i=0;iq; 56 | for(auto x : node[root]) 57 | { 58 | q.push(x.second); // root er child node er No. 59 | } 60 | while(!q.empty()) 61 | { 62 | int u = q.front(); 63 | q.pop(); 64 | for(auto [ch , v] : node[u]) // v = child node . ch = transition character 65 | { 66 | int f = fail[u]; // parent er fail link e gelam : jate sekan theke check korbo node ch e Transition hoy naki 67 | 68 | while(node[f][ch]==0 && f!=0) // jotokkon porjonto f No. node theke ch Transition exit's na kore totokkon failure link e jump kortei thakbo . f = 0 hoyegele off 69 | { 70 | f = fail[f]; 71 | } 72 | 73 | fail[v] = node[f][ch]; // jodi node[f][ch] ch na thake tahole auto f = 0 zero assign(ie. root ) map er karone 74 | 75 | if(wordId[fail[v]] !=-1) 76 | { 77 | nxtWord[v] = fail[v]; 78 | } 79 | else 80 | { 81 | nxtWord[v] = nxtWord[fail[v]]; 82 | } 83 | 84 | q.push(v); 85 | 86 | } 87 | } 88 | } 89 | 90 | 91 | vector> match(const string &text , int q) 92 | { 93 | vector> ret(q); 94 | 95 | int cur = root; 96 | 97 | for(int i=0;i>t; 127 | 128 | while(t--) 129 | { 130 | ini(); 131 | string text; 132 | cin>>text; 133 | int n; 134 | cin>>n; 135 | vectorid; 136 | for(int i=0;i>s; 140 | id.push_back(insertTrie(s,i)); 141 | } 142 | computeFailureLinkBFS(); 143 | 144 | auto matching = match(text,n); 145 | 146 | for(int i=0;igraph[N]; 4 | bool vis[N]; 5 | vectorparent(N,-1); 6 | int dis[N]; 7 | int low[N]; 8 | setAP; 9 | 10 | void dfs(int s , int time = 0) 11 | { 12 | time++; 13 | vis[s] = true; 14 | dis[s] = low[s] = time; 15 | int childno = 0; 16 | for(auto child : graph[s]) 17 | { 18 | if(parent[s]==child) 19 | { 20 | continue; 21 | } 22 | if(vis[child]) 23 | { 24 | low[s] = min(low[s],dis[child]); 25 | continue; 26 | } 27 | childno++; 28 | parent[child] = s; 29 | dfs(child, time); 30 | low[s] = min(low[s],low[child]); 31 | 32 | if(parent[s]!=-1 && dis[s]<=low[child]) // make sure s is not the root 33 | { 34 | AP.insert(s); 35 | } 36 | } 37 | if(s==0 && childno>1) // handling the root case 38 | { 39 | AP.insert(s); 40 | } 41 | 42 | return; 43 | } 44 | 45 | 46 | int32_t main() 47 | { 48 | ios::sync_with_stdio(0); 49 | cin.tie(0); 50 | int n,m; 51 | cin>>n>>m; 52 | for(int i=0;i>x>>y; 56 | graph[x].push_back(y); 57 | graph[y].push_back(x); 58 | } 59 | dfs(0); 60 | for(auto x : AP) 61 | { 62 | cout<graph[N]; 4 | bool vis[N]; 5 | vectorparent(N,-1); 6 | int dis[N]; 7 | int low[N]; 8 | set>bridge; 9 | 10 | void dfs(int s , int time = 0) 11 | { 12 | time++; 13 | vis[s] = true; 14 | dis[s] = low[s] = time; 15 | for(auto child : graph[s]) 16 | { 17 | if(parent[s]==child) 18 | { 19 | continue; 20 | } 21 | if(vis[child]) 22 | { 23 | low[s] = min(low[s],dis[child]); // backedge checking 24 | continue; 25 | } 26 | parent[child] = s; 27 | dfs(child, time); 28 | low[s] = min(low[s],low[child]); 29 | 30 | if(dis[s]>n>>m; 46 | for(int i=0;i>x>>y; 50 | graph[x].push_back(y); 51 | graph[y].push_back(x); 52 | } 53 | for(int i=0;i0 ; x-=(x&-x)) 23 | { 24 | sum+= BIT[x]; 25 | } 26 | return sum; 27 | } 28 | 29 | int32_t main() 30 | { 31 | ios::sync_with_stdio(0); 32 | cin.tie(0); 33 | 34 | 35 | int q; 36 | cin>>n>>q; 37 | for(int i=1;i<=n;i++) // BIT should be 1 based index 38 | { 39 | int a; 40 | cin>>a; 41 | update(i,a); 42 | } 43 | 44 | while(q--) 45 | { 46 | int a; 47 | cin>>a; 48 | if(a==1) 49 | { 50 | int b,c; 51 | cin>>b>>c; 52 | int val = query(b)-query(b-1); 53 | update(b,-val); 54 | update(b,c); //point update 55 | } 56 | else 57 | { 58 | int l,r; 59 | cin>>l>>r; 60 | 61 | cout< 78 | #include 79 | #include 80 | using namespace std; 81 | using namespace __gnu_pbds; 82 | #define endl "\n" 83 | #define int long long int 84 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 85 | 86 | const int N = 2e5+10; 87 | int BIT[N]; 88 | 89 | void update(int x,int num) 90 | { 91 | for(; x0 ; x-=(x&-x)) 107 | { 108 | ans += BIT[x]; 109 | } 110 | return ans; 111 | } 112 | 113 | int32_t main() 114 | { 115 | ios::sync_with_stdio(0); 116 | cin.tie(0); 117 | 118 | int n; 119 | cin>>n; 120 | for(int i=1;i<=n;i++) 121 | { 122 | int a; 123 | cin>>a; 124 | Update(i,i,a); 125 | } 126 | 127 | int q; 128 | cin>>q; 129 | 130 | while(q--) 131 | { 132 | int a; 133 | cin>>a; 134 | if(a==1) 135 | { 136 | int l,r,val; 137 | cin>>l>>r>>val; 138 | Update(l,r,val); 139 | } 140 | else 141 | { 142 | int pos; // 1 based 143 | cin>>pos; 144 | cout< 175 | #include 176 | #include 177 | using namespace std; 178 | using namespace __gnu_pbds; 179 | #define endl "\n" 180 | #define int long long int 181 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 182 | 183 | const int N = 2e5+100; 184 | int BIT1[N]; 185 | int BIT2[N]; 186 | 187 | void update(int BIT[] , int x , int num) 188 | { 189 | for(;x0;x-=(x&-x)) 207 | { 208 | sum += BIT[x]; 209 | } 210 | return sum; 211 | } 212 | 213 | int PrefixSum(int i) 214 | { 215 | return query(BIT1,i)*i-query(BIT2 , i); // For explainantion check cp algo 216 | } 217 | 218 | int RangeQuery(int l,int r) // You have to use this method : cant use other above 219 | { 220 | return PrefixSum(r)-PrefixSum(l-1); 221 | } 222 | 223 | 224 | 225 | int32_t main() 226 | { 227 | ios::sync_with_stdio(0); 228 | cin.tie(0); 229 | 230 | int n; 231 | cin>>n; 232 | for(int i=1;i<=n;i++) 233 | { 234 | int a; 235 | cin>>a; 236 | RangeUpdate(i,i,a); 237 | } 238 | 239 | int q; 240 | cin>>q; 241 | while(q--) 242 | { 243 | int a; 244 | cin>>a; 245 | if(a==1) 246 | { 247 | int l,r,val; 248 | cin>>l>>r>>val; 249 | RangeUpdate(l,r,val); 250 | } 251 | else 252 | { 253 | int l,r; 254 | cin>>l>>r; 255 | cout< 285 | #include 286 | #include 287 | using namespace std; 288 | using namespace __gnu_pbds; 289 | #define endl "\n" 290 | #define int long long int 291 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 292 | 293 | 294 | const int N = 2e5+100; 295 | int BIT1[N]; 296 | int BIT2[N]; 297 | 298 | void update(int BIT[] , int x , int num) 299 | { 300 | for(;x0;x-=(x&-x)) 318 | { 319 | sum ^= BIT[x]; 320 | } 321 | return sum; 322 | } 323 | 324 | int PrefixXor(int i) 325 | { 326 | return query(BIT1,i)*(i&1)^query(BIT2 , i); //In Range Sum Query BIT . query(BIT1,i)*i meant query(BIT1,i) appeared i times . So for Range Xor BIT we just need to check whether i is odd or even like before 327 | } 328 | 329 | int RangeQuery(int l,int r) // You have to use this method : cant use other above 330 | { 331 | return PrefixXor(r)^PrefixXor(l-1); 332 | } 333 | 334 | 335 | 336 | int32_t main() 337 | { 338 | ios::sync_with_stdio(0); 339 | cin.tie(0); 340 | 341 | int n; 342 | cin>>n; 343 | for(int i=1;i<=n;i++) 344 | { 345 | int a; 346 | cin>>a; 347 | RangeUpdate(i,i,a); 348 | } 349 | 350 | int q; 351 | cin>>q; 352 | while(q--) 353 | { 354 | int a; 355 | cin>>a; 356 | if(a==1) 357 | { 358 | int l,r,val; 359 | cin>>l>>r>>val; 360 | RangeUpdate(l,r,val); 361 | } 362 | else 363 | { 364 | int l,r; 365 | cin>>l>>r; 366 | cout<>n>>m; 18 | vectoredges; 19 | 20 | for(int i=0;i>x>>y>>w; 24 | edges.push_back({x,y,w}); 25 | } 26 | 27 | vectordis(n+1,inf); 28 | 29 | dis[0] = 0; 30 | dis[1] = 0; // distant of source is 0 . very important 31 | 32 | vectorpar(n+4,-1); 33 | 34 | bool neg_cycle = false; 35 | 36 | for(int i=0;ipath; 61 | // 62 | // int v = n; 63 | // 64 | // while(v!=-1) // path printing 65 | // { 66 | // path.push_back(v); 67 | // v = par[v]; 68 | // } 69 | // 70 | // reverse(path.begin() , path.end()); 71 | // 72 | // for(auto x : path) 73 | // { 74 | // cout<graph[N]; 4 | int low[N]; 5 | int dis[N]; 6 | vectorpar(N,-1); 7 | bool vis[N]; 8 | vector>p[N]; 9 | int no = 1; 10 | 11 | void dfs(int s, stack> &st , int time) 12 | { 13 | time++; 14 | vis[s] = true; 15 | low[s] = dis[s] = time; 16 | int childno = 0; 17 | for(auto child : graph[s]) 18 | { 19 | if(par[s]==child) 20 | { 21 | continue; 22 | } 23 | if(vis[child]) 24 | { 25 | low[s] = min(low[s] , dis[child]); 26 | if(dis[child]1) || (par[s]!=-1 && dis[s]<=low[child]) ) 42 | { 43 | while(st.top().first!=s || st.top().second!= child) 44 | { 45 | p[no].push_back({st.top().first ,st.top().second }); 46 | st.pop(); 47 | } 48 | p[no].push_back({st.top().first ,st.top().second }); 49 | st.pop(); 50 | no++; 51 | } 52 | } 53 | } 54 | 55 | 56 | int32_t main() 57 | { 58 | ios::sync_with_stdio(0); 59 | cin.tie(0); 60 | int n,m; 61 | cin>>n>>m; 62 | for(int i=0;i>x>>y; 66 | graph[x].push_back(y); 67 | graph[y].push_back(x); 68 | } 69 | stack>st; 70 | for(int i=0;i>graph[N]; 11 | bool vis[N]; 12 | const int L = log2(N); 13 | int up[N][L+1]; 14 | int sum[N]; 15 | int din[N]; 16 | int dout[N]; 17 | int tin; 18 | int tout; 19 | 20 | void dfs(int s,int par) 21 | { 22 | vis[s] = true; 23 | up[s][0] = par; 24 | 25 | din[s] = tin++; 26 | for(int i=1;i<=L;i++) 27 | { 28 | up[s][i] = up[ up[s][i-1] ][i-1]; 29 | } 30 | 31 | for(auto child : graph[s]) 32 | { 33 | if(vis[child.first]) 34 | { 35 | continue; 36 | } 37 | 38 | sum[child.first] = sum[s] + child.second; // here storing prefix sum 39 | 40 | dfs(child.first,s); 41 | } 42 | dout[s] = tout++; 43 | } 44 | 45 | bool is_ancestor(int u,int v) 46 | { 47 | if(din[u]<=din[v] && dout[u]>=dout[v]) 48 | { 49 | return true; 50 | } 51 | return false; 52 | } 53 | 54 | int lca(int u,int v) 55 | { 56 | if(is_ancestor(u,v)) 57 | { 58 | return u; 59 | } 60 | if(is_ancestor(v,u)) 61 | { 62 | return v; 63 | } 64 | for(int i=L;i>=0;i--) 65 | { 66 | if(!is_ancestor(up[u][i] , v)) 67 | { 68 | u = up[u][i]; 69 | } 70 | } 71 | return up[u][0]; 72 | } 73 | 74 | 75 | int32_t main() 76 | { 77 | ios::sync_with_stdio(0); 78 | cin.tie(0); 79 | int n; 80 | cin>>n; 81 | for(int i=0;i>x>>y>>w; 85 | graph[x].push_back({y,w}); 86 | graph[y].push_back({x,w}); 87 | } 88 | 89 | dfs(1,1); 90 | 91 | int q; 92 | cin>>q; 93 | while(q--) 94 | { 95 | int u,v; 96 | cin>>u>>v; 97 | 98 | int l = lca(u,v); 99 | 100 | if(l==u || l==v) 101 | { 102 | cout<graph[N]; 11 | int Time[N]; 12 | bool ok; 13 | bool flag; 14 | vectorans; 15 | int parent[N]; 16 | int start,finish; 17 | 18 | void dfs(int s,int tim) 19 | { 20 | vis[s] = 1; 21 | Time[s] = tim++; 22 | for(auto child: graph[s]) 23 | { 24 | if(vis[child]==0 ) 25 | { 26 | parent[child] = s; 27 | dfs(child,tim); 28 | if(ok) 29 | { 30 | return; 31 | } 32 | } 33 | if(vis[child]==1) 34 | { 35 | 36 | start = s; 37 | finish = child; 38 | ok = true; 39 | flag = true; 40 | return; 41 | } 42 | 43 | } 44 | 45 | vis[s] = 2; 46 | 47 | 48 | } 49 | 50 | int32_t main() 51 | { 52 | ios::sync_with_stdio(0); 53 | cin.tie(0); 54 | int n,m; 55 | cin>>n>>m; 56 | 57 | for(int i=0;i>x>>y; 61 | graph[x].push_back(y); 62 | } 63 | 64 | 65 | for(int i=1;i<=n;i++) 66 | { 67 | if(vis[i]==false) 68 | { 69 | dfs(i,0); 70 | if(flag) 71 | { 72 | break; 73 | } 74 | } 75 | } 76 | 77 | 78 | if(flag) 79 | { 80 | 81 | ans.push_back(finish); 82 | 83 | while(finish!=start) 84 | { 85 | ans.push_back(start); 86 | start = parent[start]; 87 | } 88 | 89 | ans.push_back(finish); 90 | 91 | reverse(ans.begin(),ans.end()); 92 | 93 | 94 | cout< 5 | #include 6 | #include 7 | using namespace std; 8 | using namespace __gnu_pbds; 9 | #define endl "\n" 10 | #define int long long int 11 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 12 | 13 | //https://codeforces.com/gym/102625/problem/G 14 | 15 | 16 | //Modified dsu 17 | //dsu + ordered set 18 | // [ u -- v edge] lets say I want to get the size of [left part of u in the component tree and right part of v in the same component tree] 19 | 20 | 21 | const int N = 2e5+100; 22 | vectorgraph[N]; 23 | int in[N]; 24 | int out[N]; 25 | int tim; 26 | 27 | void dfs(int s , int p) 28 | { 29 | in[s] = tim++; 30 | 31 | for(auto child : graph[s]) 32 | { 33 | if(child==p) 34 | { 35 | continue; 36 | } 37 | dfs(child,s); 38 | } 39 | 40 | out[s] = tim; 41 | } 42 | 43 | 44 | int parent[N]; 45 | ordered_set sz[N]; 46 | 47 | void make(int v) 48 | { 49 | parent[v] = v; 50 | sz[v].insert(in[v]); 51 | } 52 | 53 | int Find(int v) 54 | { 55 | if(v==parent[v]) 56 | { 57 | return v; 58 | } 59 | 60 | return parent[v] = Find(parent[v]); 61 | } 62 | 63 | void Union(int a,int b) 64 | { 65 | a = Find(a); 66 | b = Find(b); 67 | 68 | if(a!=b) 69 | { 70 | if(sz[a].size() left_right_size(int u,int v) 86 | { 87 | bool flag = false; 88 | if(in[u]>in[v]) 89 | { 90 | flag = true; 91 | swap(u,v); 92 | } // u age dukbe 93 | 94 | u = Find(u); 95 | 96 | int a = sz[u].order_of_key(out[v]) - sz[u].order_of_key(in[v]); // This represent same component e v er subtree size . ba right part er size 97 | int b = sz[u].size()-a ; // this represent the component size of u or left part 98 | 99 | if(flag) 100 | { 101 | return {b,a}; // b hocche u er diker size and a hocche v er diker size 102 | } 103 | else 104 | { 105 | return {a,b}; // a hocce u er diker size and b hocce v er diker size 106 | } 107 | 108 | } 109 | 110 | 111 | int32_t main() 112 | { 113 | ios::sync_with_stdio(0); 114 | cin.tie(0); 115 | 116 | int n; 117 | cin>>n; 118 | 119 | vector>edges; 120 | 121 | sets; 122 | 123 | for(int i=1;i>x>>y>>w; 127 | edges.push_back({i,x,y,w}); 128 | graph[x].push_back(y); 129 | graph[y].push_back(x); 130 | 131 | s.insert(w); 132 | } 133 | 134 | dfs(1,0); 135 | 136 | sort(edges.begin() , edges.end() , [&](auto a, auto b){return a[3]ans(n+1); 148 | 149 | for(auto w : s) 150 | { 151 | int r = l; 152 | 153 | while(r+1res; 179 | 180 | for(int i=1;i>graph[N]; 4 | const int inf = 1e18; 5 | bool vis[N]; 6 | int dist[N]; 7 | 8 | 9 | void dijkstra(int ss) 10 | { 11 | dist[ss] = 0; 12 | priority_queue , vector> , greater> >pq; 13 | pq.push({0,ss}); 14 | 15 | while(!pq.empty()) 16 | { 17 | 18 | int father = pq.top().second; 19 | 20 | pq.pop(); 21 | 22 | 23 | //This line is very important for time saving 24 | if(vis[father]) 25 | { 26 | 27 | continue; 28 | } 29 | 30 | vis[father] = true; 31 | 32 | 33 | for(auto child : graph[father]) 34 | { 35 | int v = child.first; 36 | int vw = child.second; 37 | if(vis[v]) 38 | { 39 | continue; 40 | } 41 | if(dist[v]>dist[father]+vw) 42 | { 43 | dist[v] = dist[father]+vw; 44 | pq.push({dist[v] , v}); 45 | } 46 | } 47 | } 48 | 49 | 50 | } 51 | 52 | int32_t main() 53 | { 54 | ios::sync_with_stdio(0); 55 | cin.tie(0); 56 | int n,m; 57 | cin>>n>>m; 58 | 59 | for(int i=0;i>x>>y>>w; 63 | graph[x].push_back({y,w}); 64 | } 65 | for(int i=0;i Step - 01 : Watch Abdul Bari Sir's Video on Recursion 12 | It is very important to have a clear idea on how different types of recursion works . And also one should have clear idea on stack memory 13 | and how it is accessed and cleared through recursive process . You should watch BARI sir's video on recursion available on Youtube For this .
14 | 15 | Here is a Question for You. what happens and most importantly why happens in the following 2 codes? . 16 | 17 | ```c 18 | void print() 19 | { 20 | char ch; 21 | cin>>ch; 22 | cout<>ch; 32 | print(); 33 | cout< 37 | 38 | *You should spend atmost 2 days in this step* 39 | 40 | 41 | 42 |

43 |

44 |

Step - 02 : Do 50 Easy and Medium problems on Recursion and Backtracking from GeeksForGeeks

45 | Go to GFG and then go to topic wise practice and select topic Recursion and Backtracking . 46 | 47 | 48 | Select sort by difficulty and solve atleast 50 problems. 49 | 50 | ```c 51 | After solving them You must have an Excellent Idea on how Recursion and Stack memory works as well as Backtracking. 52 | ``` 53 | 54 |
55 | 56 | *You should spend atmost 1 week in this step* 57 | 58 |

59 |

60 |

61 |

Step - 03 : Do some Text Book Works

62 | It is time to get yourself familiarize with DP . You know , basic definations , why dp? How dp? etc . 63 | You should read some blogs and articles on DP and try to get an image on dp . 64 | Now You should code some of the most common and well known dp problems . 65 | 66 | ```c 67 | 1. Coin change. 68 | 2. LIS 69 | 3. LCS 70 | 4. Knapsack 71 | 5. etc.... 72 | ``` 73 | 74 | Remember You are not learning dp through them , Just getting yourself ready . 75 | If you are from Bangladesh , I also recommand that you follow shafayat's blog in this step. 76 | 77 | 78 | 79 | *I don't really know the time laps in this step , take as long as you need.* 80 | 81 |

82 |

83 | 84 |

Step - 04 : Solving From Atcoder Educational DP Contest .

85 | 86 | ```c 87 | https://atcoder.jp/contests/dp 88 | ``` 89 | 90 | Almost all recent CP coders I know had started their dp jouney from this library. 91 | You should solve as many as you can . If you get stuck , watch a video of Errichto From the link where he 92 | has explained solution of all the problems. 93 | 94 | ```c 95 | https://www.youtube.com/watch?v=FAQxdm0bTaw 96 | ``` 97 | Now you should turn to CSES dp section . And try to solve as many as you can . If you get stuck , don't waste much time , watch the solution and move on. 98 | 99 | 100 | ```c 101 | After this step you should have abstruct idea on how dp works . It is time to catagorize your approach 102 | on solving traditional dp problems . Move on to next step . 103 | ``` 104 |
105 | 106 | *You should spend 2 weeks in this step.* 107 | 108 |

109 |

110 | 111 |

Step - 05 : Catagorizing your approach on solving traditional dp problems

112 | Follow the link . You will reach a leetcode article . Follow every details of it blindly , trust me ... 113 | 114 | ```c 115 | https://leetcode.com/discuss/general-discussion/458695/dynamic-programming-patterns#Merging-Intervals 116 | ``` 117 | 118 | 119 | 120 | ```c 121 | Follow it blindly , every last details of it . Trust Me 122 | ``` 123 | 124 | Remember to focus on Recursive process rather than iterative . Cause that way you would be able to transform a problem into recursive relation and then apply memoization . After a while you will realize that recursive process is much easier than iterative(Or may be). 125 | 126 |
127 | 128 | 129 | *You should spend around 2 weeks in this step* 130 | 131 | 132 |

133 |

134 | 135 |

Step -06 : Time to do Your own research and defining your own approch on various dp problems

136 | You should start solving dp problems from codeforces . Remember one thing , most of the problems under 1700 rated can be solved 137 | using greedy even though they might have dp as tag .
So don't try dp on them .(or try , I think its quite your own decision ) 138 | The reason I said so is because , you have recently learned dp and you will try dp apprach even on basic greedy problems and make it 139 | much difficult. However this should pass away over time and maturity. 140 |
141 |
142 | 143 | 144 | *I don't know the time laps in this step , take as long as you need* 145 | 146 | 147 |

148 |

149 |

Step -07 : Learn Bitmask dp

150 | 151 | Although it is a quite advanced technique . But I think You should learn it right away . Allmost all the problems which has 152 | n<20 as constrain is bitmask dp . 153 | I thought the following article was very helpful. 154 | 155 | ```c 156 | https://www.hackerearth.com/practice/algorithms/dynamic-programming/bit-masking/tutorial/ 157 | ``` 158 | 159 | 160 |

161 |

162 |

163 |

164 |

165 | 166 |

Step - 08 : The Most Important Step .

167 | I am by no means expert on dp , Far from it actully .
168 | But I do know How it works while my journey through this road map . I can only hope that my writting was helpful to you 169 | .. 170 | If it was even a little bit helpful . 171 | 172 | ```c 173 | Please consider loggin in to your github and giving me a Star on my Competitive-Programming Repository. 174 | ``` 175 | 176 | Take care BYE . 177 | 178 | 179 | 180 | 181 | -------------------------------------------------------------------------------- /Dynamic Programming Road Map/Shafayat blog.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ArfatulAsif/Competitive-programming/2461dfe21a6b2cb81d9468e7a52685a904862807/Dynamic Programming Road Map/Shafayat blog.PNG -------------------------------------------------------------------------------- /Dynamic Programming Road Map/hackearth dp.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ArfatulAsif/Competitive-programming/2461dfe21a6b2cb81d9468e7a52685a904862807/Dynamic Programming Road Map/hackearth dp.PNG -------------------------------------------------------------------------------- /Dynamic Programming Road Map/leetcode dp.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ArfatulAsif/Competitive-programming/2461dfe21a6b2cb81d9468e7a52685a904862807/Dynamic Programming Road Map/leetcode dp.PNG -------------------------------------------------------------------------------- /Euler Path_Circuit.cpp: -------------------------------------------------------------------------------- 1 | Euler Path/Circuit : Visit all edges excatly once. 2 | 3 | Hamilton Path/Circuit: Visit all vertex excatly once. 4 | 5 | [Finding hamilton path/circuit is an NP - Complete problem] 6 | 7 | Euler Path/Circuit Undirected: 8 | 1.Euler Circuit: All vertices must have an even degree. 9 | 2.Euler Path: Exactly two vertices must have an odd degree (the path starts at one and ends at the other). 10 | 11 | 12 | Euler Path/Circuit Directed: 13 | 1. Euler Circuit: The graph must be strongly connected, and for every vertex, the in-degree must equal the out-degree. 14 | 15 | 2. Euler Path: The graph must be connected, Exactly one vertex must have out-degree - in-degree = 1 and exactly one vertex must have in-degree - out-degree = 1. All other vertices must have in-degree = out-degree. 16 | 17 | 18 | 19 | 20 | Hierholzer's algorithm For Undirected: 21 | 22 | 23 | // https://www.youtube.com/watch?v=8MpoO2zA2l4 [This video is for directed. But just remember that, instead of removing outdegree by 1, we will remove an edge. See the implementation of how we remove an edge. 24 | // Hierholzer's algorithm 25 | // Euler path/circuit in a undirected graph 26 | 27 | 28 | 29 | 30 | // https://cses.fi/problemset/task/1691 31 | 32 | const int N = 2e5+100; 33 | setgraph[N]; 34 | int degree[N]; 35 | vectorpath; 36 | bool vis[N]; 37 | 38 | void modified_dfs(int s) 39 | { 40 | 41 | while(!graph[s].empty()) // this is just to make sure if out[s], or out_degree of s becomes 0, we add it to path[] or solution, like stated in the heirhozler algoritm. 42 | { 43 | int v = *graph[s].begin(); // For undirected graph, we consider each edge as an out_edge, so we remove it from count, so that we can get out[s] = 0 and add s to path 44 | 45 | graph[s].erase(v); // this is removing [s - v] edge from count 46 | graph[v].erase(s); // this is removing [s - v] edge from count 47 | 48 | modified_dfs(v); 49 | } 50 | 51 | path.push_back(s); 52 | 53 | 54 | } 55 | 56 | int32_t main() 57 | { 58 | ios::sync_with_stdio(0); 59 | cin.tie(0); 60 | 61 | int n,m; 62 | cin>>n>>m; 63 | 64 | for(int i=0;i>a>>b; 68 | graph[a].insert(b); 69 | graph[b].insert(a); 70 | degree[a]++; 71 | degree[b]++; 72 | } 73 | 74 | bool ok = true; 75 | 76 | for(int i=1;i<=n;i++) 77 | { 78 | if(degree[i]%2!=0) 79 | { 80 | ok = false; 81 | break; 82 | } 83 | } 84 | 85 | if(!ok) 86 | { 87 | cout<<"IMPOSSIBLE"<graph[N]; 132 | int in[N]; 133 | int out[N]; 134 | vectorpath; 135 | bool vis[N]; 136 | 137 | void modified_dfs(int s) 138 | { 139 | 140 | while(!graph[s].empty()) // this is just to make sure if out[s], or out_degree of s becomes 0, we add it to path[] or solution, like stated in the heirhozler algoritm 141 | { 142 | int v = graph[s].back(); 143 | graph[s].pop_back(); 144 | modified_dfs(v); 145 | } 146 | 147 | path.push_back(s); 148 | 149 | 150 | } 151 | 152 | int32_t main() 153 | { 154 | ios::sync_with_stdio(0); 155 | cin.tie(0); 156 | 157 | int n,m; 158 | cin>>n>>m; 159 | 160 | for(int i=0;i>a>>b; 164 | graph[a].push_back(b); 165 | out[a]++; 166 | in[b]++; 167 | } 168 | 169 | bool ok = true; 170 | 171 | for(int i=2;i 7 | #include 8 | #include 9 | using namespace std; 10 | using namespace __gnu_pbds; 11 | #define endl "\n" 12 | #define int long long int 13 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 14 | 15 | const int N = 1e5+100; 16 | vectorgraph[N]; 17 | bool vis[N]; 18 | int vis_time_vertex[N]; 19 | int in[N]; 20 | int out[N]; 21 | int tim; 22 | 23 | void dfs(int s) 24 | { 25 | vis[s] = true; 26 | in[s] = ++tim; 27 | vis_time_vertex[tim] = s; // ET array [Instead for 2*n , IT runs for O(n) time] 28 | 29 | for(auto child : graph[s]) 30 | { 31 | if(vis[child]) 32 | { 33 | continue; 34 | } 35 | dfs(child); 36 | } 37 | out[s] = tim; 38 | } 39 | 40 | 41 | int Tree[4*N]; 42 | 43 | void update(int node ,int lo ,int hi ,int pos ,int val) 44 | { 45 | if(lo==hi) 46 | { 47 | Tree[node] = val; 48 | return; 49 | } 50 | 51 | int mid = (lo+hi)>>1; 52 | 53 | if(pos<=mid) 54 | { 55 | update(node*2 , lo, mid , pos,val); 56 | } 57 | else 58 | { 59 | update(node*2+1 , mid+1 , hi , pos ,val); 60 | } 61 | 62 | Tree[node] = max(Tree[node*2] , Tree[node*2+1]); 63 | } 64 | 65 | 66 | int query(int node, int lo ,int hi,int i,int j) 67 | { 68 | if(hij) 69 | { 70 | return -1; 71 | } 72 | 73 | if(lo>=i && hi <= j) 74 | { 75 | return Tree[node]; 76 | } 77 | 78 | int mid = (lo+hi)>>1; 79 | int x = query(node*2 , lo , mid , i , j); 80 | int y = query(node*2+1 , mid+1 , hi, i, j); 81 | 82 | return max(x,y); 83 | } 84 | 85 | int32_t main() 86 | { 87 | ios::sync_with_stdio(0); 88 | cin.tie(0); 89 | 90 | int n,q; 91 | cin>>n>>q; 92 | vectorv; 93 | 94 | for(int i=0;i>a; 98 | v.push_back(a); 99 | } 100 | 101 | for(int i=0;i>x>>y; 105 | graph[x].push_back(y); 106 | graph[y].push_back(x); 107 | } 108 | 109 | 110 | dfs(1); 111 | 112 | for(int i=1;i<=n;i++) 113 | { 114 | update(1,1,n,i , v[vis_time_vertex[i]-1]); 115 | } 116 | 117 | while(q--) 118 | { 119 | int tt; 120 | cin>>tt; 121 | int node1; 122 | cin>>node1; 123 | vector>p; 124 | 125 | bool flag = false; 126 | 127 | for(int i=0;i>v; 131 | 132 | if(in[v]= out[node1]) 133 | { 134 | flag = true; 135 | } 136 | 137 | if(in[v]>in[node1] && out[v] <= out[node1] ) 138 | { 139 | p.push_back({in[v] , out[v]}); 140 | } 141 | } 142 | 143 | if(flag) 144 | { 145 | cout<<-1<graph[N]; 24 | 25 | // This is for directed, for undirected make sure to include edges in both direction addEdge(u,v,c) and addEdge(v,u,c). 26 | void addEdge(int u, int v, int capacity, int id = -1) 27 | { 28 | graph[u].push_back({v, (int)graph[v].size(), capacity, id}); 29 | graph[v].push_back({u, (int)graph[u].size()-1, 0, -id}); 30 | } 31 | 32 | 33 | int dfs(int u, int sink, int flow, vector &visited) 34 | { 35 | if(u == sink) 36 | { 37 | return flow; 38 | } 39 | 40 | visited[u] = true; 41 | 42 | for(auto &[v, rev, residual_capacity, id] : graph[u]) 43 | { 44 | if(visited[v] || residual_capacity == 0) 45 | { 46 | continue; 47 | } 48 | 49 | int pathFlow = min(flow, residual_capacity); 50 | int pushedFlow = dfs(v, sink, pathFlow, visited); 51 | 52 | if(pushedFlow > 0) 53 | { 54 | residual_capacity -= pushedFlow; 55 | graph[v][rev].residual_capacity += pushedFlow; 56 | 57 | return pushedFlow; // we need to find only one augmenting path, so we return after any children who is giving pushFlow > 0 58 | } 59 | 60 | } 61 | 62 | return 0; // no augmenting path found 63 | } 64 | 65 | 66 | int FordFulkerson(int source ,int sink) 67 | { 68 | int maxFlow = 0; 69 | vectorvisited(N,false); 70 | 71 | while(true) 72 | { 73 | fill(visited.begin(),visited.end(),false); 74 | 75 | int flow = dfs(source, sink, INT_MAX, visited); 76 | 77 | if(flow == 0) // no augmenting path found 78 | { 79 | break; 80 | } 81 | 82 | maxFlow += flow; 83 | } 84 | 85 | return maxFlow; 86 | } 87 | 88 | 89 | int32_t main() 90 | { 91 | ios::sync_with_stdio(0); 92 | cin.tie(0); 93 | 94 | int n,m; 95 | cin>>n>>m; 96 | 97 | for(int i=0;i>u>>v>>c; 101 | addEdge(u,v,c); 102 | } 103 | 104 | cout<graph[N]; 27 | 28 | 29 | // This is for directed, for undirected make sure to include edges in both direction addEdge(u,v,c) and addEdge(v,u,c). 30 | void addEdge(int u, int v, int capacity, int id = -1) 31 | { 32 | graph[u].push_back({v, (int)graph[v].size(), capacity, id}); 33 | graph[v].push_back({u, (int)graph[u].size()-1, 0, -id}); 34 | } 35 | 36 | int bfs(int source, int sink, vector> &parent) 37 | { 38 | for(int i=0;ivisited(N, false); 44 | 45 | visited[source] = true; 46 | queueq; 47 | q.push(source); 48 | 49 | int flow = INT_MAX; 50 | 51 | while(!q.empty()) 52 | { 53 | int u = q.front(); 54 | q.pop(); 55 | 56 | for(auto &[v, rev, residual_capacity, id] : graph[u]) 57 | { 58 | if(visited[v] || residual_capacity == 0) 59 | { 60 | continue; 61 | } 62 | 63 | flow = min(flow, residual_capacity); 64 | 65 | parent[v] = {u,rev}; // rev contains index of u in graph[v] 66 | 67 | visited[v] = true; 68 | 69 | if(v == sink) 70 | { 71 | return flow; 72 | } 73 | 74 | q.push(v); 75 | 76 | 77 | 78 | } 79 | } 80 | 81 | return 0; // no augmenting path 82 | } 83 | 84 | 85 | int Edmonds_Karp(int source, int sink) 86 | { 87 | int max_flow = 0; 88 | 89 | vector>parent(N, {-1,-1}); // parent[v] = {u, rev}, where rev contains index of u in graph[v] 90 | 91 | while(true) 92 | { 93 | int flow = bfs(source, sink, parent); 94 | 95 | if(flow == 0) 96 | { 97 | break; 98 | } 99 | 100 | max_flow += flow; 101 | 102 | int v = sink; 103 | 104 | while(v != source) 105 | { 106 | int u = parent[v].first; 107 | int rev = parent[v].second; 108 | 109 | graph[v][rev].residual_capacity += flow; 110 | graph[u][graph[v][rev].rev].residual_capacity -= flow; 111 | 112 | v = parent[v].first; 113 | } 114 | } 115 | 116 | return max_flow; 117 | } 118 | 119 | 120 | 121 | int32_t main() 122 | { 123 | ios::sync_with_stdio(0); 124 | cin.tie(0); 125 | 126 | int n,m; 127 | cin>>n>>m; 128 | 129 | for(int i=0;i>u>>v>>w; 133 | 134 | addEdge(u,v,w); 135 | } 136 | 137 | cout< graph[N]; 21 | int level[N]; 22 | int done[N]; // done[u] array is to keep track of which edges have already been tried for a given node during a DFS call. This ensures that we don't revisit the same edges unnecessarily and speeds up the process. 23 | 24 | 25 | // This is for directed, for undirected make sure to include edges in both direction addEdge(u,v,c) and addEdge(v,u,c). 26 | void addEdge(int u, int v, int capacity, int id = -1) 27 | { 28 | graph[u].push_back({v, (int)graph[v].size(), capacity, id}); 29 | graph[v].push_back({u, (int)graph[u].size()-1, 0, -id}); 30 | } 31 | 32 | 33 | // BFS to build Level Graph 34 | bool bfs(int source, int sink) 35 | { 36 | fill(level, level+N, -1); 37 | level[source] = 0; 38 | queueq; 39 | q.push(source); 40 | 41 | while(!q.empty()) 42 | { 43 | int u = q.front(); 44 | q.pop(); 45 | 46 | for(auto &[v, rev, residual_capacity, id]: graph[u]) 47 | { 48 | if(level[v] != -1 || residual_capacity == 0) 49 | { 50 | continue; 51 | } 52 | 53 | level[v] = level[u] + 1; 54 | q.push(v); 55 | } 56 | } 57 | 58 | return level[sink] != -1; // sink is reachable = true 59 | } 60 | 61 | 62 | 63 | // DFS to send flow along augmenting paths 64 | int dfs(int u, int sink, int flow) 65 | { 66 | if(u == sink) 67 | { 68 | return flow; 69 | } 70 | 71 | for(; done[u] < graph[u].size(); done[u]++) 72 | { 73 | auto &[v, rev, residual_capacity, id] = graph[u][done[u]]; 74 | 75 | if(level[v] == level[u]+1 && residual_capacity > 0) // only traversing trough level graph 76 | { 77 | int pathFlow = min(flow, residual_capacity); 78 | int pushedFlow = dfs(v, sink, pathFlow); 79 | 80 | if(pushedFlow > 0) 81 | { 82 | residual_capacity -= pushedFlow; 83 | graph[v][rev].residual_capacity += pushedFlow; 84 | 85 | return pushedFlow; 86 | } 87 | } 88 | } 89 | 90 | return 0; 91 | } 92 | 93 | 94 | int Dinic(int source, int sink) 95 | { 96 | int max_flow = 0; 97 | 98 | while(bfs(source, sink)) // build the level graph 99 | { 100 | fill(done, done+N, 0); 101 | 102 | while(true) // traverse on the same level graph multiple time, which is efficiant than Edmonds-Karp. As we are blocking multiple edges(residual_capacity = 0) using the same level graph. 103 | { 104 | int flow = dfs(source, sink, inf); 105 | 106 | if(flow == 0) 107 | { 108 | break; 109 | } 110 | 111 | max_flow += flow; 112 | } 113 | 114 | } 115 | 116 | return max_flow; 117 | } 118 | 119 | 120 | int32_t main() 121 | { 122 | ios::sync_with_stdio(0); 123 | cin.tie(0); 124 | 125 | int n,m; 126 | cin>>n>>m; 127 | for(int i=0;i>u>>v>>c; 131 | addEdge(u,v,c); 132 | } 133 | 134 | cout< graph[N]; 14 | int level[N]; 15 | int done[N]; // done[u] array is to keep track of which edges have already been tried for a given node during a DFS call. This ensures that we don't revisit the same edges unnecessarily and speeds up the process. 16 | 17 | 18 | // This is for directed, for undirected make sure to include edges in both direction addEdge(u,v,c) and addEdge(v,u,c). 19 | void addEdge(int u, int v, int capacity, int id = -1) 20 | { 21 | graph[u].push_back({v, (int)graph[v].size(), capacity, id}); 22 | graph[v].push_back({u, (int)graph[u].size()-1, 0, -id}); 23 | } 24 | 25 | 26 | // BFS to build Level Graph 27 | bool bfs(int source, int sink) 28 | { 29 | fill(level, level+N, -1); 30 | level[source] = 0; 31 | queueq; 32 | q.push(source); 33 | 34 | while(!q.empty()) 35 | { 36 | int u = q.front(); 37 | q.pop(); 38 | 39 | for(auto &[v, rev, residual_capacity, id]: graph[u]) 40 | { 41 | if(level[v] != -1 || residual_capacity == 0) 42 | { 43 | continue; 44 | } 45 | 46 | level[v] = level[u] + 1; 47 | q.push(v); 48 | } 49 | } 50 | 51 | return level[sink] != -1; // sink is reachable = true 52 | } 53 | 54 | 55 | // DFS to send flow along augmenting paths 56 | int dfs(int u, int sink, int flow) 57 | { 58 | if(u == sink) 59 | { 60 | return flow; 61 | } 62 | 63 | for(; done[u] < graph[u].size(); done[u]++) 64 | { 65 | auto &[v, rev, residual_capacity, id] = graph[u][done[u]]; 66 | 67 | if(level[v] == level[u]+1 && residual_capacity > 0) // only traversing trough level graph 68 | { 69 | int pathFlow = min(flow, residual_capacity); 70 | int pushedFlow = dfs(v, sink, pathFlow); 71 | 72 | if(pushedFlow > 0) 73 | { 74 | residual_capacity -= pushedFlow; 75 | graph[v][rev].residual_capacity += pushedFlow; 76 | 77 | return pushedFlow; 78 | } 79 | } 80 | } 81 | 82 | return 0; 83 | } 84 | 85 | 86 | int Dinic(int source, int sink) 87 | { 88 | int max_flow = 0; 89 | 90 | while(bfs(source, sink)) // build the level graph 91 | { 92 | fill(done, done+N, 0); 93 | 94 | while(true) // traverse on the same level graph multiple time, which is efficiant than Edmonds-Karp. As we are blocking multiple edges(residual_capacity = 0) using the same level graph. 95 | { 96 | int flow = dfs(source, sink, inf); 97 | 98 | if(flow == 0) 99 | { 100 | break; 101 | } 102 | 103 | max_flow += flow; 104 | } 105 | 106 | } 107 | 108 | return max_flow; 109 | } 110 | 111 | 112 | 113 | // For minimum cut, edges on minimum cut will have residual capacity = 0. 114 | // So if we run a BFS from source and reach vertices through edges that still have residual capacity > 0. We will eventually stop after finding residual capacity = 0 edges. 115 | // Now visited ones are Vs and unvisited once are Vt in a minimum cut X = (Vs, Vt), where Vs includes source and Vt includes sink. 116 | 117 | 118 | // Finds all reachable nodes from the source in the residual graph after dinic is called 119 | void FindReachable_BFS(int source, vector &visited) 120 | { 121 | queueq; 122 | q.push(source); 123 | visited[source] = true; 124 | 125 | while(!q.empty()) 126 | { 127 | int u = q.front(); 128 | q.pop(); 129 | 130 | for(auto &[v, rev, residual_capacity, id]: graph[u]) 131 | { 132 | if(visited[v] == true || residual_capacity == 0) 133 | { 134 | continue; 135 | } 136 | 137 | visited[v] = true; 138 | q.push(v); 139 | } 140 | } 141 | } 142 | 143 | vector> FindMinCut(int source, int sink) 144 | { 145 | vector visited(N, false); 146 | 147 | FindReachable_BFS(source, visited); 148 | 149 | vector> Edges; 150 | 151 | for(int u=1; u < N; u++) 152 | { 153 | if(visited[u]) 154 | { 155 | for(auto &[v, rev, residual_capacity, id]: graph[u]) 156 | { 157 | if(visited[v] == false && residual_capacity == 0) 158 | { 159 | Edges.push_back({u,v}); 160 | } 161 | } 162 | } 163 | } 164 | 165 | return Edges; 166 | } 167 | 168 | 169 | 170 | int32_t main() 171 | { 172 | ios::sync_with_stdio(0); 173 | cin.tie(0); 174 | 175 | int n,m; 176 | cin>>n>>m; 177 | 178 | map, bool> mp; 179 | 180 | for(int i=0;i>u>>v; 184 | 185 | mp[{u,v}] = true; 186 | 187 | addEdge(u,v,1); 188 | addEdge(v,u,1); // the problem is for undirected 189 | } 190 | 191 | cout<> Cut_edges = FindMinCut(1, n); 194 | 195 | 196 | set>ans; // Since this is a undirected flow network, we need to remove all the duplicates , and only consider edges like input 197 | 198 | for(auto x : Cut_edges) 199 | { 200 | if(mp[{x.first, x.second}]) 201 | { 202 | ans.insert({x.first, x.second}); 203 | } 204 | else 205 | { 206 | ans.insert({x.second, x.first}); 207 | } 208 | } 209 | 210 | 211 | for(auto x : ans) 212 | { 213 | cout< left_i_is_connected_to_right; 15 | vector right_i_is_connected_to_left; 16 | vector level; 17 | vector> graph; 18 | 19 | HopcroftKarp(int _n, int _m) 20 | { 21 | left_size = _n; 22 | right_size = _m; 23 | int p = _n + _m + 1; 24 | graph.resize(p); 25 | left_i_is_connected_to_right.resize(p, 0); 26 | right_i_is_connected_to_left.resize(p, 0); 27 | level.resize(p, 0); 28 | } 29 | 30 | void addEdge(int u, int v) 31 | { 32 | graph[u].push_back(v + left_size); // right vertex is increased by left_size here.. 33 | } 34 | 35 | bool bfs() 36 | { 37 | queue q; 38 | 39 | for (int u = 1; u <= left_size; u++) 40 | { 41 | if (!left_i_is_connected_to_right[u]) 42 | { 43 | level[u] = 0; 44 | q.push(u); 45 | } 46 | else 47 | { 48 | level[u] = inf; 49 | } 50 | } 51 | 52 | level[0] = inf; 53 | 54 | while (!q.empty()) 55 | { 56 | int u = q.front(); 57 | q.pop(); 58 | 59 | for (auto v : graph[u]) 60 | { 61 | if (level[right_i_is_connected_to_left[v]] == inf) 62 | { 63 | level[right_i_is_connected_to_left[v]] = level[u] + 1; 64 | q.push(right_i_is_connected_to_left[v]); 65 | } 66 | } 67 | } 68 | 69 | return level[0] != inf; 70 | } 71 | 72 | bool dfs(int u) 73 | { 74 | if (!u) 75 | { 76 | return true; 77 | } 78 | 79 | for (auto v : graph[u]) 80 | { 81 | if (level[right_i_is_connected_to_left[v]] == level[u] + 1 && dfs(right_i_is_connected_to_left[v])) 82 | { 83 | left_i_is_connected_to_right[u] = v; 84 | right_i_is_connected_to_left[v] = u; 85 | return true; 86 | } 87 | } 88 | 89 | level[u] = inf; 90 | 91 | return false; 92 | } 93 | 94 | int MaximumBipartiteMatching() 95 | { 96 | int ans = 0; 97 | 98 | while (bfs()) 99 | { 100 | for (int u = 1; u <= left_size; u++) 101 | { 102 | if (!left_i_is_connected_to_right[u] && dfs(u)) 103 | { 104 | ans++; 105 | } 106 | } 107 | } 108 | 109 | return ans; 110 | } 111 | 112 | vector> GetMatchingEdges() 113 | { 114 | vector> matching; 115 | 116 | for (int u = 1; u <= left_size; u++) 117 | { 118 | if (left_i_is_connected_to_right[u]) 119 | { 120 | matching.push_back({u, left_i_is_connected_to_right[u] - left_size}); // subtract left_size to get original right-side vertex 121 | } 122 | } 123 | 124 | return matching; 125 | } 126 | }; 127 | 128 | 129 | 130 | int32_t main() 131 | { 132 | ios::sync_with_stdio(0); 133 | cin.tie(0); 134 | 135 | int left_size, right_size, edges; 136 | 137 | cin >> left_size >> right_size >> edges; 138 | 139 | HopcroftKarp HK(left_size, right_size); 140 | 141 | for (int i = 0; i < edges; i++) 142 | { 143 | int u, v; 144 | cin >> u >> v; 145 | 146 | HK.addEdge(u, v); 147 | } 148 | 149 | cout << HK.MaximumBipartiteMatching() << endl; 150 | 151 | vector> matching = HK.GetMatchingEdges(); 152 | 153 | for (auto x : matching) 154 | { 155 | cout << x.first << " " << x.second << endl; 156 | } 157 | 158 | return 0; 159 | } 160 | 161 | // https://cses.fi/problemset/task/1696/ 162 | -------------------------------------------------------------------------------- /FLOW6 - Minimum Cost Maximum Flow (MCMF).cpp: -------------------------------------------------------------------------------- 1 | 2 | // This is similar to maximum flow algorithms. 3 | // In maximum flow algorithms, in each iteration, we used to find an augmenting path using DFS/BFS and then perform flow augmentation through its edges 4 | 5 | // But in Minimum cost maximum flow algorithm, 6 | // In each iteration, instead of finding just an augmenting path, we will find minimum cost augmenting path. [minimum cost augmenting path = cost of sending each unit of flow through this path is minimum among all the remaining augmenting paths] 7 | 8 | // So, for MCMF, in each iteration, we will use Dijkstra to find minimum cost augmenting path, then do flow augmentation through it's edges. 9 | // And that's it. 10 | 11 | 12 | 13 | 14 | // Adjusting Dijkstra to accomodate negative edges: 15 | // For max flow algorithms we consider edges in reverse to undo flow if needed. In this case, we are gonna have to undo the cost too. That's why for reverse edge, cost is negative "graph[v].push_back({u, (int)graph[u].size()-1, 0, -cost, -id});" 16 | // Also, for some problems cost of edges might be given negative in the input. 17 | // As we know dijkstra algorithm does not works for negative edges, we need to make adjustment to the edge weight's so that they become non-negative. 18 | // We shall use the same techniques that is used in johnson's algorithm for all pair shortest path in O(V^2 log V + VE) [https://github.com/ArfatulAsif/Competitive-programming/blob/main/Johnson%E2%80%99s%20algorithm%20for%20All-pairs%20shortest%20paths.cpp] 19 | // Using a bellman-ford to shift edges , new weight = w(u, v) + h[u] - h[v]. in this case u = source 20 | // Here the potential function h[] is used to "normalize" the graph by shifting the edge weights in such a way that all edges have non-negative weights. 21 | // Then perform Repeated Dijkstra 22 | // After each Dijkstra retrieve distance(u,v) = dijkstra(u, v) + h[v] - h[u]. u = source. 23 | // Here distance(source,sink) = minimum cost of sending 1 unit of flow from source to sink 24 | // Then update the potential function h[]. And repeat Dijkstra untill no augmenting path found/target_flow achieved. 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | // Property of this code: 33 | // Works for both directed, undirected and with negative cost (in the input) too. 34 | // Complexity: O(min(E^2 *V log V, E logV * flow)) 35 | // This code does not works if there is a negative cycle in the input. [that's why It Normally does not work for Maximum Cost maximum Flow, as taking -cost with edges might form negative cycles] 36 | 37 | 38 | 39 | 40 | const int N = 1000; 41 | 42 | struct MCMF 43 | { 44 | 45 | struct edge 46 | { 47 | int v; // to 48 | int rev; //Reverse edge index in graph[v] 49 | int residual_capacity; // Residual capacity of the edge 50 | int cost; // Cost per unit of flow through the edge 51 | int edge_id; // Edge ID (optional, if needed) 52 | }; 53 | 54 | vectorgraph[N]; 55 | 56 | int n; 57 | int source, sink; 58 | 59 | vector dist; 60 | vectorh; // potential array 61 | 62 | vector> parent; // parent[v] = {u, rev} = {parent node u, parent node u's index in graph[v]} 63 | 64 | 65 | MCMF(int _n) 66 | { 67 | n = _n+1; 68 | } 69 | 70 | // This is for directed, for undirected make sure to include edges in both direction addEdge(u,v) and addEdge(v,u). 71 | void addEdge(int u, int v, int capacity, int cost, int id = -1) 72 | { 73 | graph[u].push_back({v, (int)graph[v].size(), capacity, cost, id}); 74 | graph[v].push_back({u, (int)graph[u].size()-1, 0, -cost, -id}); 75 | } 76 | 77 | 78 | bool BellmanFord() 79 | { 80 | dist.assign(n, inf); 81 | dist[source] = 0; 82 | vector>AllEdges; // {u, edge(v,rev, residual_capacity, cost, id)} 83 | 84 | for(int i=0;i dist[u]+cost && residual_capacity>0) 104 | { 105 | dist[v] = dist[u]+cost; 106 | flag = true; 107 | } 108 | 109 | } 110 | 111 | if(flag == false) 112 | { 113 | break; 114 | } 115 | } 116 | 117 | for(auto &e: AllEdges) 118 | { 119 | int u = e.first; 120 | auto &[v, rev, residual_capacity, cost, id] = e.second; 121 | 122 | if(dist[v] > dist[u]+cost && residual_capacity>0) 123 | { 124 | dist[v] = dist[u]+cost; 125 | 126 | return false; // neg cycle exists 127 | } 128 | 129 | } 130 | 131 | for(int i=0;i, vector>, greater>> pq; 150 | 151 | dist[source] = 0; 152 | 153 | pq.push({0, source}); 154 | 155 | while(!pq.empty()) 156 | { 157 | int u = pq.top().second; 158 | int u_cost = pq.top().first; // cost of 1 unit of flow from source to u 159 | pq.pop(); 160 | 161 | if(u_cost != dist[u]) 162 | { 163 | continue; 164 | } 165 | 166 | for(auto &[v, rev, residual_capacity, cost, id]: graph[u]) 167 | { 168 | int adjusted_weight = cost + h[u] - h[v]; // this is always non_negative, even if h[u] or h[v] is negative. 169 | 170 | if(dist[v] > dist[u]+adjusted_weight && residual_capacity > 0) 171 | { 172 | dist[v] = dist[u]+adjusted_weight; 173 | parent[v] = {u, rev}; // {u, position of u in graph[v]} 174 | pq.push({dist[v], v}); 175 | } 176 | } 177 | } 178 | 179 | for(int i=0;i MinimumCostMaximumFlow(int _source, int _sink, int target_flow = inf) 201 | { 202 | source = _source; 203 | sink = _sink; 204 | 205 | int total_flow_sent = 0; 206 | int total_cost = 0; 207 | 208 | h.assign(n, 0); 209 | 210 | if(!BellmanFord()) // using bellman-Ford for adjusting weight to accomodate negative neg weight edges 211 | { 212 | return {inf,inf}; // Either sink is not reachable or negative cycle exists 213 | } 214 | 215 | while( total_flow_sent < target_flow) 216 | { 217 | if(!Dijkstra()) // Use Dijkstra to find shortest augmenting path 218 | { 219 | // If no augmenting pathfound 220 | break; 221 | } 222 | 223 | 224 | int v = sink; 225 | int augmenting_path_flow = inf; 226 | int cost_per_unit_of_flow_from_source_to_sink = dist[sink]; 227 | 228 | while(v != source) 229 | { 230 | int u = parent[v].first; 231 | int rev = parent[v].second; 232 | 233 | int idx_of_v_in_u = graph[v][rev].rev; 234 | 235 | int residual_capacity = graph[u][idx_of_v_in_u].residual_capacity; 236 | 237 | augmenting_path_flow = min(augmenting_path_flow, residual_capacity); 238 | 239 | v = parent[v].first; 240 | 241 | } 242 | 243 | 244 | //adjust the residual capacity 245 | 246 | v = sink; 247 | while(v != source) 248 | { 249 | int u = parent[v].first; 250 | int rev = parent[v].second; 251 | 252 | int idx_of_v_in_u = graph[v][rev].rev; 253 | 254 | graph[u][idx_of_v_in_u].residual_capacity -= augmenting_path_flow; 255 | graph[v][rev].residual_capacity += augmenting_path_flow; 256 | 257 | v = parent[v].first; 258 | 259 | } 260 | 261 | 262 | if(total_flow_sent + augmenting_path_flow >= target_flow) 263 | { 264 | int take = target_flow - total_flow_sent; 265 | total_cost += (take*cost_per_unit_of_flow_from_source_to_sink); 266 | total_flow_sent = target_flow; 267 | break; 268 | } 269 | else 270 | { 271 | total_flow_sent += augmenting_path_flow; 272 | 273 | total_cost += (augmenting_path_flow*cost_per_unit_of_flow_from_source_to_sink); 274 | } 275 | 276 | } 277 | 278 | 279 | if(total_flow_sent < target_flow) 280 | { 281 | return {inf, inf}; 282 | } 283 | 284 | 285 | return {total_flow_sent, total_cost}; 286 | } 287 | 288 | 289 | }; 290 | 291 | 292 | 293 | int32_t main() 294 | { 295 | ios::sync_with_stdio(0); 296 | cin.tie(0); 297 | 298 | int n,m,k; 299 | cin>>n>>m>>k; 300 | 301 | MCMF mcmf(n); 302 | 303 | for(int i=0;i>u>>v>>c>>cost; 307 | 308 | mcmf.addEdge(u,v,c,cost); // if undirect then add mcmf.addEdge(v,u,c,cost) too 309 | } 310 | 311 | 312 | 313 | int ans = mcmf.MinimumCostMaximumFlow(1,n,k).second; 314 | 315 | if(ans == inf) 316 | { 317 | cout<<-1< * log(V * max_edge_cost)) = O( V^3 * log(V * C)) 3 | // Really fast in practice, 3e4 edges are fine. 4 | // Operates on integers, costs are multiplied by N!! 5 | 6 | 7 | 8 | 9 | // this is shohage vais code, I didnot understand this ... When I do I will write one. 10 | 11 | #include 12 | using namespace std; 13 | 14 | // source: dacin21 15 | template 16 | struct mcSFlow { 17 | struct Edge { 18 | cost_t c; 19 | flow_t f; 20 | int to, rev; 21 | Edge(int _to, cost_t _c, flow_t _f, int _rev): c(_c), f(_f), to(_to), rev(_rev) {} 22 | }; 23 | static constexpr cost_t INFCOST = numeric_limits::max() / 2; 24 | cost_t eps; 25 | int N, S, T; 26 | vector > G; 27 | vector isq, cur; 28 | vector ex; 29 | vector h; 30 | mcSFlow(int _N, int _S, int _T): eps(0), N(_N), S(_S), T(_T), G(_N) {} 31 | void add_edge(int a, int b, cost_t cost, flow_t cap) { 32 | assert(cap >= 0); 33 | assert(a >= 0 && a < N && b >= 0 && b < N); 34 | if(a == b) { 35 | assert(cost >= 0); 36 | return; 37 | } 38 | cost *= N; 39 | eps = max(eps, abs(cost)); 40 | G[a].emplace_back(b, cost, cap, G[b].size()); 41 | G[b].emplace_back(a, -cost, 0, G[a].size() - 1); 42 | } 43 | void add_flow(Edge& e, flow_t f) { 44 | Edge &back = G[e.to][e.rev]; 45 | if (!ex[e.to] && f) 46 | hs[h[e.to]].push_back(e.to); 47 | e.f -= f; 48 | ex[e.to] += f; 49 | back.f += f; 50 | ex[back.to] -= f; 51 | } 52 | vector > hs; 53 | vector co; 54 | flow_t max_flow() { 55 | ex.assign(N, 0); 56 | h.assign(N, 0); 57 | hs.resize(2 * N); 58 | co.assign(2 * N, 0); 59 | cur.assign(N, 0); 60 | h[S] = N; 61 | ex[T] = 1; 62 | co[0] = N - 1; 63 | for(auto &e : G[S]) add_flow(e, e.f); 64 | if(hs[0].size()) 65 | for (int hi = 0; hi >= 0;) { 66 | int u = hs[hi].back(); 67 | hs[hi].pop_back(); 68 | while (ex[u] > 0) { // discharge u 69 | if (cur[u] == G[u].size()) { 70 | h[u] = 1e9; 71 | for(unsigned int i = 0; i < G[u].size(); ++i) { 72 | auto &e = G[u][i]; 73 | if (e.f && h[u] > h[e.to] + 1) { 74 | h[u] = h[e.to] + 1, cur[u] = i; 75 | } 76 | } 77 | if (++co[h[u]], !--co[hi] && hi < N) 78 | for(int i = 0; i < N; ++i) 79 | if (hi < h[i] && h[i] < N) { 80 | --co[h[i]]; 81 | h[i] = N + 1; 82 | } 83 | hi = h[u]; 84 | } else if (G[u][cur[u]].f && h[u] == h[G[u][cur[u]].to] + 1) 85 | add_flow(G[u][cur[u]], min(ex[u], G[u][cur[u]].f)); 86 | else ++cur[u]; 87 | } 88 | while (hi >= 0 && hs[hi].empty()) --hi; 89 | } 90 | return -ex[S]; 91 | } 92 | void push(Edge &e, flow_t amt) { 93 | if(e.f < amt) amt = e.f; 94 | e.f -= amt; 95 | ex[e.to] += amt; 96 | G[e.to][e.rev].f += amt; 97 | ex[G[e.to][e.rev].to] -= amt; 98 | } 99 | void relabel(int vertex) { 100 | cost_t newHeight = -INFCOST; 101 | for(unsigned int i = 0; i < G[vertex].size(); ++i) { 102 | Edge const&e = G[vertex][i]; 103 | if(e.f && newHeight < h[e.to] - e.c) { 104 | newHeight = h[e.to] - e.c; 105 | cur[vertex] = i; 106 | } 107 | } 108 | h[vertex] = newHeight - eps; 109 | } 110 | static constexpr int scale = 2; 111 | pair minCostMaxFlow() { 112 | cost_t retCost = 0; 113 | for(int i = 0; i < N; ++i) 114 | for(Edge &e : G[i]) 115 | retCost += e.c * (e.f); 116 | //find max-flow 117 | flow_t retFlow = max_flow(); 118 | h.assign(N, 0); 119 | ex.assign(N, 0); 120 | isq.assign(N, 0); 121 | cur.assign(N, 0); 122 | queue q; 123 | for(; eps; eps >>= scale) { 124 | //refine 125 | fill(cur.begin(), cur.end(), 0); 126 | for(int i = 0; i < N; ++i) 127 | for(auto &e : G[i]) 128 | if(h[i] + e.c - h[e.to] < 0 && e.f) push(e, e.f); 129 | for(int i = 0; i < N; ++i) { 130 | if(ex[i] > 0) { 131 | q.push(i); 132 | isq[i] = 1; 133 | } 134 | } 135 | // make flow feasible 136 | while(!q.empty()) { 137 | int u = q.front(); 138 | q.pop(); 139 | isq[u] = 0; 140 | while(ex[u] > 0) { 141 | if(cur[u] == G[u].size()) 142 | relabel(u); 143 | for(unsigned int &i = cur[u], max_i = G[u].size(); i < max_i; ++i) { 144 | Edge &e = G[u][i]; 145 | if(h[u] + e.c - h[e.to] < 0) { 146 | push(e, ex[u]); 147 | if(ex[e.to] > 0 && isq[e.to] == 0) { 148 | q.push(e.to); 149 | isq[e.to] = 1; 150 | } 151 | if(ex[u] == 0) break; 152 | } 153 | } 154 | } 155 | } 156 | if(eps > 1 && eps >> scale == 0) { 157 | eps = 1 << scale; 158 | } 159 | } 160 | for(int i = 0; i < N; ++i) { 161 | for(Edge &e : G[i]) { 162 | retCost -= e.c * (e.f); 163 | } 164 | } 165 | return make_pair(retFlow, retCost / 2 / N); 166 | } 167 | flow_t getFlow(Edge const &e) { 168 | return G[e.to][e.rev].f; 169 | } 170 | }; 171 | -------------------------------------------------------------------------------- /FLOW8 - Weighted Maximum Bipartite Matching (Hungarian algorithm).cpp: -------------------------------------------------------------------------------- 1 | 2 | /* Hungarian Algorithm: 3 | - Complexity: O(V^3), optimized for both minimum and maximum cost maximum matching. 4 | - Minimum Cost Maximum Matching: Uses original costs. [ Also set, c[i][j] = inf] 5 | - Maximum Cost Maximum Matching: Negates the costs and returns the negated result. [ add_edge(u, v, -cost) and cout<<(-M.WeightedBipartiteMatching())< q; 22 | int start, finish, n; 23 | 24 | Hungarian() {} 25 | 26 | Hungarian(int n1, int n2): n(max(n1, n2)) 27 | { 28 | for (int i = 1; i <= n; ++i) 29 | { 30 | fy[i] = left_i_connected_to_right[i] = right_i_connected_to_left[i] = 0; 31 | 32 | for (int j = 1; j <= n; ++j) 33 | { 34 | c[i][j] = 0; // Set to 0 for maximum cost matching 35 | } 36 | } 37 | } 38 | 39 | void add_edge(int u, int v, int cost) 40 | { 41 | c[u][v] = min(c[u][v], cost); 42 | } 43 | 44 | inline int getC(int u, int v) 45 | { 46 | return c[u][v] - fx[u] - fy[v]; 47 | } 48 | 49 | void initBFS() 50 | { 51 | while (!q.empty()) q.pop(); 52 | q.push(start); 53 | 54 | for (int i = 0; i <= n; ++i) trace[i] = 0; 55 | 56 | for (int v = 1; v <= n; ++v) 57 | { 58 | d[v] = getC(start, v); 59 | arg[v] = start; 60 | } 61 | finish = 0; 62 | } 63 | 64 | void findAugPath() 65 | { 66 | while (!q.empty()) 67 | { 68 | int u = q.front(); 69 | q.pop(); 70 | for (int v = 1; v <= n; ++v) 71 | { 72 | if (!trace[v]) 73 | { 74 | int w = getC(u, v); 75 | if (!w) 76 | { 77 | trace[v] = u; 78 | if (!right_i_connected_to_left[v]) 79 | { 80 | finish = v; 81 | return; 82 | } 83 | q.push(right_i_connected_to_left[v]); 84 | } 85 | if (d[v] > w) 86 | { 87 | d[v] = w; 88 | arg[v] = u; 89 | } 90 | } 91 | } 92 | } 93 | } 94 | 95 | void subX_addY() 96 | { 97 | int delta = inf; 98 | for (int v = 1; v <= n; ++v) 99 | { 100 | if (!trace[v] && d[v] < delta) 101 | { 102 | delta = d[v]; 103 | } 104 | } 105 | 106 | fx[start] += delta; 107 | 108 | for (int v = 1; v <= n; ++v) 109 | { 110 | if (trace[v]) 111 | { 112 | int u = right_i_connected_to_left[v]; 113 | fy[v] -= delta; 114 | fx[u] += delta; 115 | } 116 | else 117 | { 118 | d[v] -= delta; 119 | } 120 | } 121 | for (int v = 1; v <= n; ++v) 122 | { 123 | if (!trace[v] && !d[v]) 124 | { 125 | trace[v] = arg[v]; 126 | if (!right_i_connected_to_left[v]) 127 | { 128 | finish = v; 129 | return; 130 | } 131 | q.push(right_i_connected_to_left[v]); 132 | } 133 | } 134 | } 135 | 136 | void Enlarge() 137 | { 138 | do { 139 | int u = trace[finish]; 140 | int next = left_i_connected_to_right[u]; 141 | left_i_connected_to_right[u] = finish; 142 | right_i_connected_to_left[finish] = u; 143 | finish = next; 144 | } while (finish); 145 | } 146 | 147 | int WeightedBipartiteMatching() 148 | { 149 | for (int u = 1; u <= n; ++u) 150 | { 151 | fx[u] = c[u][1]; 152 | for (int v = 1; v <= n; ++v) 153 | { 154 | fx[u] = min(fx[u], c[u][v]); 155 | } 156 | } 157 | for (int v = 1; v <= n; ++v) 158 | { 159 | fy[v] = c[1][v] - fx[1]; 160 | 161 | for (int u = 1; u <= n; ++u) 162 | { 163 | fy[v] = min(fy[v], c[u][v] - fx[u]); 164 | } 165 | } 166 | for (int u = 1; u <= n; ++u) 167 | { 168 | start = u; 169 | initBFS(); 170 | while (!finish) 171 | { 172 | findAugPath(); 173 | if (!finish) subX_addY(); 174 | } 175 | Enlarge(); 176 | } 177 | int ans = 0; 178 | for (int i = 1; i <= n; ++i) 179 | { 180 | if (c[i][left_i_connected_to_right[i]] != inf) ans += c[i][left_i_connected_to_right[i]]; 181 | else left_i_connected_to_right[i] = 0; 182 | } 183 | return ans; 184 | } 185 | 186 | 187 | vector> FindBipartiteMatching(int left_size, int right_size) 188 | { 189 | vector>matching; 190 | 191 | for(int i=1;i<=left_size;i++) 192 | { 193 | if(left_i_connected_to_right[i]!=0) 194 | { 195 | matching.push_back({i,left_i_connected_to_right[i]}); 196 | } 197 | } 198 | 199 | // Or we can use below part, either one is fine. 200 | // for(int i=1;i<=right_size;i++) 201 | // { 202 | // if(right_i_connected_to_left[i] != 0) 203 | // { 204 | // matching.push_back({right_i_connected_to_left[i], i}); 205 | // } 206 | // } 207 | 208 | return matching; 209 | } 210 | }; 211 | 212 | 213 | 214 | 215 | int32_t main() 216 | { 217 | ios::sync_with_stdio(0); 218 | cin.tie(0); 219 | 220 | int t; 221 | cin>>t; 222 | for(int tt=1;tt<=t;tt++) 223 | { 224 | int n; 225 | cin>>n; 226 | 227 | int left_size, right_size; 228 | 229 | left_size = right_size = n; 230 | 231 | Hungarian M(left_size,right_size); 232 | 233 | vectorv1,v2; 234 | for(int i=0;i>a; 238 | v1.push_back(a); 239 | } 240 | 241 | for(int i=0;i>a; 245 | v2.push_back(a); 246 | } 247 | 248 | for(int i=0;iv2[j]) 253 | { 254 | M.add_edge(i+1,j+1, -2); // -cost, so that it becomes maximum cost maximum bipartite matching . For minimum cost, dont negate 255 | } 256 | if(v1[i] == v2[j]) 257 | { 258 | M.add_edge(i+1,j+1,-1); 259 | } 260 | } 261 | } 262 | 263 | 264 | cout<<"Case "< left_i_is_connected_to_right; 8 | vector right_i_is_connected_to_left; 9 | vector level; 10 | vector> graph; 11 | 12 | HopcroftKarp(int _n, int _m) 13 | { 14 | left_size = _n; 15 | right_size = _m; 16 | int p = _n + _m + 1; 17 | graph.resize(p); 18 | left_i_is_connected_to_right.resize(p, 0); 19 | right_i_is_connected_to_left.resize(p, 0); 20 | level.resize(p, 0); 21 | } 22 | 23 | void addEdge(int u, int v) 24 | { 25 | graph[u].push_back(v + left_size); // right vertex is increased by left_size here.. 26 | } 27 | 28 | bool bfs() 29 | { 30 | queue q; 31 | 32 | for (int u = 1; u <= left_size; u++) 33 | { 34 | if (!left_i_is_connected_to_right[u]) 35 | { 36 | level[u] = 0; 37 | q.push(u); 38 | } 39 | else 40 | { 41 | level[u] = inf; 42 | } 43 | } 44 | 45 | level[0] = inf; 46 | 47 | while (!q.empty()) 48 | { 49 | int u = q.front(); 50 | q.pop(); 51 | 52 | for (auto v : graph[u]) 53 | { 54 | if (level[right_i_is_connected_to_left[v]] == inf) 55 | { 56 | level[right_i_is_connected_to_left[v]] = level[u] + 1; 57 | q.push(right_i_is_connected_to_left[v]); 58 | } 59 | } 60 | } 61 | 62 | return level[0] != inf; 63 | } 64 | 65 | bool dfs(int u) 66 | { 67 | if (!u) 68 | { 69 | return true; 70 | } 71 | 72 | for (auto v : graph[u]) 73 | { 74 | if (level[right_i_is_connected_to_left[v]] == level[u] + 1 && dfs(right_i_is_connected_to_left[v])) 75 | { 76 | left_i_is_connected_to_right[u] = v; 77 | right_i_is_connected_to_left[v] = u; 78 | return true; 79 | } 80 | } 81 | 82 | level[u] = inf; 83 | 84 | return false; 85 | } 86 | 87 | int MaximumBipartiteMatching() 88 | { 89 | int ans = 0; 90 | 91 | while (bfs()) 92 | { 93 | for (int u = 1; u <= left_size; u++) 94 | { 95 | if (!left_i_is_connected_to_right[u] && dfs(u)) 96 | { 97 | ans++; 98 | } 99 | } 100 | } 101 | 102 | return ans; 103 | } 104 | 105 | vector> GetMatchingEdges() 106 | { 107 | vector> matching; 108 | 109 | for (int u = 1; u <= left_size; u++) 110 | { 111 | if (left_i_is_connected_to_right[u]) 112 | { 113 | matching.push_back({u, left_i_is_connected_to_right[u] - left_size}); // subtract left_size to get original right-side vertex 114 | } 115 | } 116 | 117 | return matching; 118 | } 119 | 120 | vector> FindMinimumVertexCover() 121 | { 122 | vector> min_vertex_cover; 123 | vector visited_left(left_size+1); 124 | vector visited_right(right_size+1); 125 | 126 | queueq; 127 | 128 | for(int i=1;i <= left_size; i++) 129 | { 130 | if(!left_i_is_connected_to_right[i]) // selecting unmatched vertices from left side. [unmatched: those vertices that are not end points of matching edges] 131 | { 132 | visited_left[i] = true; 133 | q.push(i); 134 | } 135 | } 136 | 137 | while(!q.empty()) 138 | { 139 | int left = q.front(); 140 | q.pop(); 141 | 142 | for(auto v : graph[left]) 143 | { 144 | if(v <= left_size) // v is not a right vertex 145 | { 146 | continue; 147 | } 148 | 149 | int right = v - left_size; 150 | 151 | // left -------unmatched_edge-------> right. 152 | if(visited_right[right] == false) 153 | { 154 | visited_right[right] = true; 155 | 156 | int matched_left = right_i_is_connected_to_left[right+left_size]; 157 | 158 | // matched_left <---------matched_edge----------right. 159 | if(matched_left!=0 && visited_left[matched_left] == false) 160 | { 161 | visited_left[matched_left] = true; 162 | q.push(matched_left); 163 | } 164 | 165 | } 166 | 167 | } 168 | } 169 | 170 | 171 | // Taking unmarked vertices from left 172 | for(int i=1;i<=left_size;i++) 173 | { 174 | if(visited_left[i]==false) 175 | { 176 | min_vertex_cover.push_back({i, 'L'}); 177 | } 178 | } 179 | 180 | 181 | // Taking marked vertices from right 182 | for(int i=1;i<=right_size;i++) 183 | { 184 | if(visited_right[i] == true) 185 | { 186 | min_vertex_cover.push_back({i,'R'}); 187 | } 188 | } 189 | 190 | return min_vertex_cover; 191 | } 192 | 193 | vector> FindMaximumIndependentSet() 194 | { 195 | vector> max_independent_set; 196 | vector visited_left(left_size+1); 197 | vector visited_right(right_size+1); 198 | 199 | queueq; 200 | 201 | for(int i=1;i <= left_size; i++) 202 | { 203 | if(!left_i_is_connected_to_right[i]) // selecting unmatched vertices from left side. [unmatched: those vertices that are not end points of matching edges] 204 | { 205 | visited_left[i] = true; 206 | q.push(i); 207 | } 208 | } 209 | 210 | while(!q.empty()) 211 | { 212 | int left = q.front(); 213 | q.pop(); 214 | 215 | for(auto v : graph[left]) 216 | { 217 | if(v <= left_size) // v is not a right vertex 218 | { 219 | continue; 220 | } 221 | 222 | int right = v - left_size; 223 | 224 | // left -------unmatched_edge-------> right. 225 | if(visited_right[right] == false) 226 | { 227 | visited_right[right] = true; 228 | 229 | int matched_left = right_i_is_connected_to_left[right+left_size]; 230 | 231 | // matched_left <---------matched_edge----------right. 232 | if(matched_left!=0 && visited_left[matched_left] == false) 233 | { 234 | visited_left[matched_left] = true; 235 | q.push(matched_left); 236 | } 237 | 238 | } 239 | 240 | } 241 | } 242 | 243 | 244 | //Exact opposite of Minimum Vertex Cover 245 | 246 | // Taking marked vertices from left 247 | for(int i=1;i<=left_size;i++) 248 | { 249 | if(visited_left[i]==true) 250 | { 251 | max_independent_set.push_back({i, 'L'}); 252 | } 253 | } 254 | 255 | 256 | // Taking unmarked vertices from right 257 | for(int i=1;i<=right_size;i++) 258 | { 259 | if(visited_right[i] == false) 260 | { 261 | max_independent_set.push_back({i,'R'}); 262 | } 263 | } 264 | 265 | return max_independent_set; 266 | } 267 | }; 268 | 269 | 270 | int32_t main() 271 | { 272 | ios::sync_with_stdio(0); 273 | cin.tie(0); 274 | 275 | int left_size, right_size, edges; 276 | 277 | cin>>left_size>>right_size>>edges; 278 | 279 | 280 | HopcroftKarp HK(left_size,right_size); 281 | 282 | for(int i=0;i>u>>v; 286 | 287 | HK.addEdge(u,v); 288 | 289 | } 290 | 291 | 292 | HK.MaximumBipartiteMatching(); // we need to call it First....... 293 | 294 | vector> min_vertex_cover = HK.FindMinimumVertexCover(); 295 | 296 | for(auto x : min_vertex_cover) 297 | { 298 | cout<> 30)) * 0xbf58476d1ce4e5b9; 9 | x = (x ^ (x >> 27)) * 0x94d049bb133111eb; 10 | return x ^ (x >> 31); 11 | } 12 | 13 | size_t operator()(uint64_t x) const { 14 | static const uint64_t FIXED_RANDOM = chrono::steady_clock::now().time_since_epoch().count(); 15 | return splitmix64(x + FIXED_RANDOM); 16 | } 17 | }; //For long long int to long long int 18 | 19 | 20 | typedef gp_hash_tabletable; 21 | 22 | 23 | 24 | 25 | 26 | 27 | // 28 | //struct chash { 29 | // int operator()(pair x) const { return x.first* 31 + x.second; } 30 | //}; 31 | //gp_hash_table, int, chash> table; 32 | 33 | 34 | //This was for {pair to int} mapping 35 | 36 | 37 | int32_t main() 38 | { 39 | ios::sync_with_stdio(0); 40 | cin.tie(0); 41 | 42 | //gp_hash_table m; can do this too 43 | 44 | table m; 45 | 46 | 47 | m[1]++; 48 | m[2]++; 49 | m[1]++; 50 | m[1]++; 51 | 52 | cout< 8 | #include 9 | #include 10 | using namespace std; 11 | using namespace __gnu_pbds; 12 | #define endl "\n" 13 | #define int long long int 14 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 15 | 16 | const int N = 1e4+100; 17 | int dp[N]; 18 | int calculateMex(unordered_set &s) 19 | { 20 | int mex = 0; 21 | while(s.find(mex)!=s.end()) 22 | { 23 | mex++; 24 | } 25 | return mex; 26 | } 27 | 28 | int grundy(int n) //depending on the problem this function can be very different ....... Look at the light oj various Sprague-Grundy Number problems 29 | { 30 | if(n<3) 31 | { 32 | dp[n] = 0; 33 | return 0; 34 | } 35 | if(dp[n]!=-1) 36 | { 37 | return dp[n]; 38 | } 39 | 40 | int left = n/2; 41 | 42 | if(n%2==0) 43 | { 44 | left--; 45 | } 46 | 47 | unordered_sets; 48 | 49 | for(int i=1;i<=left;i++) 50 | { 51 | s.insert(grundy(i)^grundy(n-i)); 52 | } 53 | 54 | dp[n] = calculateMex(s); 55 | 56 | return dp[n]; 57 | } 58 | 59 | int32_t main() 60 | { 61 | ios::sync_with_stdio(0); 62 | cin.tie(0); 63 | 64 | int t; 65 | cin>>t; 66 | 67 | memset(dp,-1,sizeof(dp)); 68 | 69 | for(int tt=1;tt<=t;tt++) 70 | { 71 | int n; 72 | cin>>n; 73 | 74 | int Xor = 0; 75 | 76 | for(int i=0;i>a; 80 | 81 | Xor ^= grundy(a); 82 | } 83 | 84 | cout<<"Case "< 5 | #include 6 | #include 7 | using namespace std; 8 | using namespace __gnu_pbds; 9 | #define endl "\n" 10 | #define int long long int 11 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 12 | 13 | 14 | const double eps = 1e-9; 15 | 16 | int Gauss(vector> a, vector &ans) 17 | { 18 | int n = (int)a.size(), m = (int)a[0].size() - 1; 19 | vector pos(m, -1); 20 | double det = 1; 21 | int rank = 0; 22 | 23 | 24 | for (int col = 0, row = 0; col < m && row < n; ++col) 25 | { 26 | int mx = row; 27 | for (int i = row; i < n; i++) 28 | { 29 | if (fabs(a[i][col]) > fabs(a[mx][col])) 30 | { 31 | mx = i; 32 | } 33 | 34 | } 35 | 36 | if (fabs(a[mx][col]) < eps) 37 | { 38 | det = 0; 39 | continue; 40 | } 41 | for (int i = col; i <= m; i++) 42 | { 43 | swap(a[row][i], a[mx][i]); 44 | } 45 | 46 | if (row != mx) 47 | { 48 | det = -det; 49 | } 50 | 51 | det *= a[row][col]; 52 | pos[col] = row; 53 | for (int i = 0; i < n; i++) 54 | { 55 | if (i != row && fabs(a[i][col]) > eps) 56 | { 57 | double c = a[i][col] / a[row][col]; 58 | for (int j = col; j <= m; j++) 59 | { 60 | a[i][j] -= a[row][j] * c; 61 | } 62 | 63 | } 64 | } 65 | ++row; 66 | ++rank; 67 | } 68 | ans.assign(m, 0); 69 | for (int i = 0; i < m; i++) 70 | { 71 | if (pos[i] != -1) 72 | { 73 | ans[i] = a[pos[i]][m] / a[pos[i]][i]; 74 | } 75 | 76 | } 77 | for (int i = 0; i < n; i++) 78 | { 79 | double sum = 0; 80 | for (int j = 0; j < m; j++) 81 | { 82 | sum += ans[j] * a[i][j]; 83 | } 84 | 85 | if (fabs(sum - a[i][m]) > eps) 86 | { 87 | return -1; // no solution 88 | } 89 | 90 | } 91 | for (int i = 0; i < m; i++) 92 | { 93 | if (pos[i] == -1) 94 | { 95 | return 2; // infinte solutions 96 | } 97 | 98 | } 99 | 100 | return 1; // unique solution 101 | } 102 | 103 | 104 | 105 | 106 | int32_t main() 107 | { 108 | ios::sync_with_stdio(0); 109 | cin.tie(0); 110 | 111 | 112 | int n, m; 113 | cin >> n >> m; 114 | 115 | vector> v(n); 116 | for (int i = 0; i < n; i++) 117 | { 118 | for (int j = 0; j <= m; j++) // m-th colunm contain the value of constant 119 | { 120 | double x; 121 | cin >> x; 122 | v[i].push_back(x); 123 | } 124 | } 125 | vector ans; 126 | 127 | int k = Gauss(v, ans); 128 | 129 | if (k) 130 | { 131 | for (int i = 0; i < n; i++) 132 | { 133 | cout << fixed << setprecision(5) << ans[i] << ' '; 134 | } 135 | 136 | } 137 | else 138 | { 139 | cout << "no solution"<graph[N]; 12 | int level[N]; 13 | 14 | 15 | //LCA Part 16 | const int L = log2(N); 17 | int lca_din[N]; 18 | int lca_dout[N]; 19 | int up[N][L+1]; 20 | int tim_lca; 21 | 22 | 23 | 24 | 25 | //SEGMENT Tree Part 26 | int n; // to be taken input as no of vertex 27 | struct node 28 | { 29 | int val; 30 | int lazy; 31 | } Tree[4*N] ; 32 | int ara[N]; 33 | int tin[N]; // most important array , in this array vertex on same heavy chain will have consecutive visiting time , [for vertex no it will give visiting no ] [ segment tree is built on visiting no] 34 | // tin[v] = v visiting time 35 | int tout[N]; 36 | int vis_tim_vertex[N]; // visiting time dile vertex no return korbe [for build() in segment tree ] 37 | int nxt[N]; //for DSU part 38 | int subtree[N]; 39 | int tim; 40 | 41 | 42 | 43 | 44 | //LCA 45 | 46 | void dfs(int s,int par,int lev) 47 | { 48 | lca_din[s] = tim_lca++; 49 | up[s][0] = par; 50 | level[s] = lev; 51 | 52 | for(int i=1;i<=L;i++) 53 | { 54 | up[s][i] = up[up[s][i-1]][i-1]; 55 | } 56 | 57 | for(auto child: graph[s]) 58 | { 59 | if(child==par) 60 | { 61 | continue; 62 | } 63 | dfs(child,s,lev+1); 64 | } 65 | lca_dout[s] = tim_lca++; 66 | 67 | } 68 | 69 | bool is_ancestor(int u,int v) 70 | { 71 | return (lca_din[u]<=lca_din[v] && lca_dout[u]>=lca_dout[v]); 72 | } 73 | 74 | int lca(int u,int v) 75 | { 76 | if(is_ancestor(u,v)) 77 | { 78 | return u; 79 | } 80 | if(is_ancestor(v,u)) 81 | { 82 | return v; 83 | } 84 | 85 | for(int i=L;i>=0;i--) 86 | { 87 | if(!is_ancestor(up[u][i] , v)) 88 | { 89 | u = up[u][i]; 90 | } 91 | } 92 | return up[u][0]; 93 | } 94 | 95 | 96 | 97 | 98 | //Heavy edge on graph[s][0] 99 | 100 | void dfs1(int s, int par) 101 | { 102 | 103 | subtree[s] = 1; 104 | 105 | for(auto &child : graph[s]) // & child pathate hobe 106 | { 107 | if(child==par) 108 | { 109 | continue; 110 | } 111 | 112 | dfs1(child,s); 113 | 114 | subtree[s] += subtree[child]; 115 | 116 | if(subtree[child]>subtree[graph[s][0]]) 117 | { 118 | swap(child,graph[s][0]); // &child pathate hobe for swap operation 119 | } 120 | 121 | } 122 | 123 | } 124 | 125 | 126 | 127 | 128 | //Heavy light decomposition er DSU part 129 | void dfs_hld(int s,int par) 130 | { 131 | 132 | tin[s] = ++tim; 133 | vis_tim_vertex[tim] = s; 134 | 135 | for(auto child : graph[s]) 136 | { 137 | if(child==par) 138 | { 139 | continue; 140 | } 141 | 142 | if(child==graph[s][0]) 143 | { 144 | nxt[child] = nxt[s]; // heavy child gulake ekta dsu chain he haralam , nxt[child] = chain er up most vertex return korbe 145 | } 146 | else 147 | { 148 | nxt[child] = child; 149 | } 150 | 151 | dfs_hld(child,s); 152 | 153 | } 154 | tout[s] = tim; 155 | } 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | void propagate(int node ,int lo,int hi) 164 | { 165 | int temp = Tree[node].lazy; 166 | 167 | Tree[node*2].lazy += temp; 168 | Tree[node*2+1].lazy += temp; 169 | 170 | int mid = (lo+hi)>>1; 171 | 172 | Tree[node*2].val += (mid-lo+1)*temp; // lazy pawar sathey sathey value node e add kore dilam jate query er somoy add kora na lage 173 | Tree[node*2+1].val += (hi-(mid+1)+1)*temp; // lazy pawar sathey sathey value node e add kore dilam jate query er somoy add kora na lage 174 | 175 | Tree[node].lazy = 0; 176 | } 177 | 178 | 179 | void build(int node,int lo,int hi) // indexing visiting time diye 180 | { 181 | if(lo==hi) 182 | { 183 | Tree[node].val = ara[vis_tim_vertex[lo]]; // lo th time e je vertex visit hobe te holo vis_tim_vertex[lo] 184 | return; 185 | } 186 | 187 | int mid = (lo+hi)>>1; 188 | 189 | build(node*2,lo,mid); 190 | build(node*2+1,mid+1,hi); 191 | 192 | Tree[node].val = Tree[node*2].val + Tree[node*2+1].val; 193 | } 194 | 195 | 196 | void update(int node,int lo,int hi,int i,int j ,int val) 197 | { 198 | if(Tree[node].lazy>0) 199 | { 200 | propagate(node,lo,hi); 201 | } 202 | if(hij) 203 | { 204 | return; 205 | } 206 | if(lo>=i && hi<=j) 207 | { 208 | Tree[node].val += (hi-lo+1)*val; 209 | Tree[node].lazy += val; 210 | return; 211 | } 212 | 213 | int mid = (lo+hi)>>1; 214 | 215 | update(node*2 , lo , mid , i, j,val); 216 | update(node*2+1 , mid+1 , hi, i, j, val); 217 | 218 | Tree[node].val = Tree[node*2].val + Tree[node*2+1].val; 219 | 220 | } 221 | 222 | int query(int node,int lo,int hi,int i,int j) 223 | { 224 | if(Tree[node].lazy>0) 225 | { 226 | propagate(node,lo,hi); 227 | } 228 | if(hij) 229 | { 230 | return 0; 231 | } 232 | if(lo>=i && hi<=j) 233 | { 234 | return Tree[node].val; 235 | } 236 | 237 | int mid = (lo+hi)>>1; 238 | 239 | int x = query(node*2,lo,mid,i,j); 240 | int y = query(node*2+1,mid+1,hi,i,j); 241 | 242 | return x+y; 243 | } 244 | 245 | 246 | 247 | 248 | 249 | //Queries Started 250 | 251 | int dist(int u,int v) // given unweighted edges 252 | { 253 | return level[u]+level[v] - 2*level[lca(u,v)]; 254 | } 255 | 256 | 257 | 258 | 259 | int query_up(int l,int u) // l is ancestor of u 260 | { 261 | int res = 0; 262 | while(nxt[u]!=nxt[l]) 263 | { 264 | res += query(1,1,n, tin[nxt[u]] , tin[u]); 265 | u = up[nxt[u]][0]; //next heavy chain e jump 266 | } 267 | 268 | res += query(1,1, n , tin[l] , tin[u]); // l included 269 | 270 | return res; 271 | } 272 | 273 | int sum_query_hld(int u,int v) 274 | { 275 | if(is_ancestor(u,v)) 276 | { 277 | return query_up(u,v); 278 | } 279 | if(is_ancestor(v,u)) 280 | { 281 | return query_up(v,u); 282 | } 283 | 284 | int l = lca(u,v); 285 | int x = query_up(l,u); 286 | int y = query_up(l,v); 287 | 288 | return x+y - query_up(l,l); // l would be counted twice since it is vertex query 289 | } 290 | 291 | 292 | void update_up(int l,int u,int val) // l is ancestor of u 293 | { 294 | while(nxt[u]!=nxt[l]) 295 | { 296 | update(1,1,n,tin[nxt[u]],tin[u],val); 297 | u = up[nxt[u]][0]; 298 | } 299 | update(1,1,n,tin[l],tin[u],val); // including l 300 | } 301 | 302 | void update_query_hld(int u,int v,int val) // u to v er path e thaka sob vertex er value er sathe val add 303 | { 304 | if(is_ancestor(u,v)) 305 | { 306 | update_up(u,v,val); 307 | return; 308 | } 309 | if(is_ancestor(v,u)) 310 | { 311 | update_up(v,u,val); 312 | return; 313 | } 314 | int l = lca(u,v); 315 | update_up(l,u,val); 316 | update_up(l,v,val); 317 | 318 | update_up(l,l,-val); // l updated twice thats why ekber minus 319 | 320 | } 321 | 322 | 323 | void hld() 324 | { 325 | dfs(1,1,1); // for LCA up[1][0] = 1; for is_ancestor to work properly 326 | 327 | dfs1(1,0); 328 | 329 | dfs_hld(1,0); 330 | 331 | build(1,1,n); 332 | } 333 | 334 | 335 | 336 | 337 | 338 | int32_t main() 339 | { 340 | ios::sync_with_stdio(0); 341 | cin.tie(0); 342 | 343 | cin>>n; 344 | 345 | for(int i=1;i<=n;i++) 346 | { 347 | cin>>ara[i]; 348 | } 349 | 350 | for(int i=0;i>x>>y; 354 | graph[x].push_back(y); 355 | graph[y].push_back(x); 356 | } 357 | 358 | hld(); 359 | 360 | int q; 361 | cin>>q; 362 | 363 | cout<<"Ans to the queries: "<>a; 369 | if(a==1) 370 | { 371 | int u,v,x; 372 | cin>>u>>v>>x; 373 | update_query_hld(u,v,x); 374 | } 375 | else 376 | { 377 | int u, v; 378 | cin>>u>>v; 379 | cout<prime; 11 | 12 | void pre() 13 | { 14 | memset(sieve , true, sizeof(sieve)); 15 | 16 | for(int i=2;i fact(int n) 36 | { 37 | mapm; 38 | 39 | for(int i=0;i1) 48 | { 49 | m[n]++; 50 | } 51 | 52 | return m; 53 | } 54 | 55 | int32_t main() 56 | { 57 | ios::sync_with_stdio(0); 58 | cin.tie(0); 59 | 60 | pre(); 61 | 62 | int t; 63 | cin>>t; 64 | 65 | while(t--) 66 | { 67 | int a,m; 68 | cin>>a>>m; 69 | 70 | int test = __gcd(a,m); 71 | 72 | mapfact1; 73 | mapfact2; 74 | 75 | fact1 = fact(m); 76 | 77 | fact2 = fact(test); 78 | 79 | if(test!=1) 80 | { 81 | for(auto x : fact2) 82 | { 83 | fact1[x.first]-=x.second; 84 | } 85 | } 86 | 87 | 88 | vectorv; 89 | 90 | for(auto x : fact1) 91 | { 92 | if(x.second!=0) 93 | { 94 | v.push_back(x.first); 95 | } 96 | } 97 | 98 | int ans = (a+m-1)/test - (a-1)/test ; 99 | 100 | for(int i=1;i<(1<> graph[N], altered_graph[N]; 17 | 18 | int dist[N]; 19 | int vis[N]; 20 | 21 | void dijkstra(int ss, int V) 22 | { 23 | for (int i = 0; i < V; i++) 24 | { 25 | dist[i] = inf; 26 | vis[i] = false; 27 | } 28 | 29 | dist[ss] = 0; 30 | priority_queue, vector>, greater>> pq; 31 | pq.push({0, ss}); 32 | 33 | while (!pq.empty()) 34 | { 35 | int father = pq.top().second; 36 | pq.pop(); 37 | 38 | if (vis[father]) continue; 39 | vis[father] = true; 40 | 41 | for (auto child : altered_graph[father]) 42 | { 43 | int v = child.first; 44 | int vw = child.second; 45 | if (vis[v]) continue; 46 | if (dist[v] > dist[father] + vw) 47 | { 48 | dist[v] = dist[father] + vw; 49 | pq.push({dist[v], v}); 50 | } 51 | } 52 | } 53 | } 54 | 55 | vector bellmanFord(int V) 56 | { 57 | vector dist(V, inf); 58 | dist[V - 1] = 0; 59 | 60 | for (int i = 0; i < V - 1; i++) 61 | { 62 | for (int u = 0; u < V; u++) 63 | { 64 | for (auto edge : graph[u]) 65 | { 66 | int v = edge.first; 67 | int weight = edge.second; 68 | if (dist[u] != inf && dist[u] + weight < dist[v]) 69 | { 70 | dist[v] = dist[u] + weight; 71 | } 72 | } 73 | } 74 | } 75 | 76 | for (int u = 0; u < V; u++) { 77 | for (auto edge : graph[u]) { 78 | int v = edge.first; 79 | int weight = edge.second; 80 | if (dist[u] != inf && dist[u] + weight < dist[v]) 81 | { 82 | cout << "Negative weight cycle detected!" << endl; 83 | exit(0); 84 | } 85 | } 86 | } 87 | 88 | return dist; 89 | } 90 | 91 | void johnsonAlgorithm(int V) { 92 | for (int i = 0; i < V; i++) 93 | { 94 | graph[V].push_back({i, 0}); 95 | } 96 | 97 | vector h = bellmanFord(V + 1); // The potential function h[] is used to "normalize" the graph by shifting the edge weights in such a way that all edges have non-negative weights 98 | 99 | for (int i = 0; i < V; i++) 100 | { 101 | for (auto& edge : graph[i]) 102 | { 103 | int v = edge.first; 104 | int weight = edge.second; 105 | altered_graph[i].push_back({v, weight + h[i] - h[v]}); 106 | } 107 | } 108 | 109 | for (int u = 0; u < V; u++) 110 | { 111 | cout << "Shortest Distance from vertex " << u << ":"<> input = { 136 | {0, -5, 2, 3}, 137 | {0, 0, 4, 0}, 138 | {0, 0, 0, 1}, 139 | {0, 0, 0, 0} 140 | }; 141 | 142 | for (int i = 0; i < 4; i++) 143 | { 144 | for (int j = 0; j < 4; j++) 145 | { 146 | if (input[i][j] != 0) 147 | { 148 | graph[i].push_back({j, input[i][j]}); 149 | } 150 | } 151 | } 152 | 153 | johnsonAlgorithm(4); 154 | 155 | return 0; 156 | } 157 | -------------------------------------------------------------------------------- /LCA in O(1).cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | // ============= Euler Tour type - 03 ========== LCA in O(1) ============ 4 | 5 | // ET3 = 1 2 3 2 4 6 4 7 4 2 5 2 1 8 9 8 10 8 1 6 | 7 | // in this tour technique (child - father - child) is always maintained , so between 2 child , their father shall be present (not grand father though) 8 | // So query(u,v) means : 9 | // u first appear in ET3 and v first appear in ET3 er moddhe jetar depth lowest setai LCA ; 10 | // we find this using sparse table O(1) , segment tree O(logn) 11 | 12 | 13 | const int N = 1e5; 14 | vectorgraph[N]; 15 | vectorET3; 16 | vectorp(N,-1); //p[v] = v first appear in ET3; 17 | bool vis[N]; 18 | int dept[N]; 19 | const int L = log2(N); 20 | int st[N][L+1]; 21 | int Log2[N]; 22 | 23 | void dfs(int s) 24 | { 25 | vis[s] = true; 26 | ET3.push_back(s); 27 | for(auto child: graph[s]) 28 | { 29 | if(vis[child]) 30 | { 31 | continue; 32 | } 33 | dept[child] = dept[s]+1; 34 | 35 | dfs(child); 36 | 37 | ET3.push_back(s); 38 | } 39 | } 40 | 41 | void pre() 42 | { 43 | Log2[0] = 0; 44 | Log2[1] = 0; 45 | Log2[2] = 1; 46 | for(int i=3;i>n; 94 | for(int i=0;i>x>>y; 98 | graph[x].push_back(y); 99 | graph[y].push_back(x); 100 | } 101 | 102 | dfs(1); 103 | 104 | for(int i=0;i>q; 117 | 118 | 119 | cout<<"LCAs : "<>u>>v; 124 | 125 | 126 | int a = min(p[v],p[u]); // p[u] first pos of u , p[v] first pos of v in ET3; 127 | int b = max(p[v],p[u]); 128 | 129 | int i = Log2[b-a+1]; 130 | 131 | int vertex1 = st[a][i]; 132 | int vertex2 = st[b-(1<graph[N]; 5 | bool vis[N]; 6 | int din[N]; 7 | int dout[N]; 8 | int tin = 0; 9 | const int L = log2(N); 10 | int up[N][L+1]; 11 | 12 | void dfs(int s,int par) 13 | { 14 | vis[s] = true; 15 | din[s] = tin++; 16 | 17 | up[s][0] = par; 18 | 19 | for(int i=1;i<=L;i++) 20 | { 21 | up[s][i] = up[ up[s][i-1] ][i-1]; // Here is the binary lifting is occuring try for this graph ( 1->2 , 2->3 , 3->4 , 4->5,5->6,6->7,7->8,8->9,9->10) then s = 10 er jonno eta draw kore buja 22 | } 23 | 24 | 25 | for(auto child : graph[s]) 26 | { 27 | if(vis[child]) 28 | { 29 | continue; 30 | } 31 | dfs(child,s); 32 | } 33 | dout[s] = tin++; 34 | } 35 | 36 | bool is_ancestor(int u,int v) // u is ancestor of v 37 | { 38 | if(din[u]<=din[v] && dout[u]>=dout[v]) 39 | { 40 | return true; 41 | } 42 | return false; 43 | } 44 | 45 | 46 | 47 | int lca(int u,int v) 48 | { 49 | if(is_ancestor(u,v)) 50 | { 51 | return u; 52 | } 53 | if(is_ancestor(v,u)) 54 | { 55 | return v; 56 | } 57 | for(int i=L;i>=0;i--) 58 | { 59 | if(!is_ancestor(up[u][i] ,v)) // ancestor na hole jump dissi [up[1][0] = 1 na hole ei part kaj korbe na] 60 | { 61 | u = up[u][i]; 62 | } 63 | } 64 | return up[u][0]; 65 | 66 | } 67 | 68 | 69 | int32_t main() 70 | { 71 | ios::sync_with_stdio(0); 72 | cin.tie(0); 73 | int n,m; 74 | cin>>n>>m; 75 | 76 | for(int i=0;i>x>>y; 80 | graph[x].push_back(y); 81 | graph[y].push_back(x); 82 | } 83 | 84 | dfs(1,1); // parent of 1 should be 1 i.e. up[1][0] = 1; 85 | 86 | int q; 87 | cin>>q; 88 | while(q--) 89 | { 90 | int u,v; 91 | cin>>u>>v; 92 | cout<graph[N]; 22 | int in[N]; 23 | vectordis(N,-inf); 24 | int parent[N]; 25 | 26 | int32_t main() 27 | { 28 | ios::sync_with_stdio(0); 29 | cin.tie(0); 30 | int n,m; 31 | cin>>n>>m; 32 | for(int i=0;i>x>>y; 36 | graph[x].push_back(y); 37 | in[y]++; 38 | } 39 | queueq; 40 | 41 | for(int i=1;i<=n;i++) 42 | { 43 | if(in[i]==0) 44 | { 45 | q.push(i); 46 | } 47 | } 48 | vectortopo; 49 | while(!q.empty()) 50 | { 51 | int father = q.front(); 52 | topo.push_back(father); 53 | q.pop(); 54 | 55 | 56 | for(auto child: graph[father]) 57 | { 58 | in[child]--; 59 | if(in[child]==0) 60 | { 61 | q.push(child); 62 | } 63 | } 64 | } 65 | 66 | dis[1] = 0; 67 | 68 | for(auto s : topo) 69 | { 70 | if(dis[s]!=-inf) 71 | { 72 | for(auto child: graph[s]) 73 | { 74 | if(dis[child]ans; 96 | 97 | while(v!=0) 98 | { 99 | ans.push_back(v); 100 | v = parent[v]; 101 | } 102 | reverse(ans.begin(),ans.end()); 103 | 104 | cout<Q; 32 | 33 | 34 | //All NUMBERS are different thats why this solution works using Ordered_SET 35 | 36 | void results(vector &v , int m) 37 | { 38 | 39 | block = sqrt(v.size()+0.0); // USE block = 666; way faster 40 | 41 | sort(Q.begin() , Q.end() , cmp); 42 | 43 | int currL = 0; 44 | int currR = 0; 45 | 46 | 47 | 48 | ordered_set S; 49 | 50 | for(int i=0;iL) 58 | { 59 | S.insert(v[currL-1]); 60 | currL--; 61 | } 62 | while(currR<=R) 63 | { 64 | S.insert(v[currR]); 65 | currR++; 66 | } 67 | 68 | while(currLR+1) 76 | { 77 | int a = v[currR-1]; 78 | S.erase(a); 79 | currR--; 80 | } 81 | 82 | 83 | ans[idx] = *S.find_by_order(k-1); // 0 based bole k-1 will refer to k-th 84 | 85 | } 86 | 87 | 88 | } 89 | 90 | 91 | int32_t main() 92 | { 93 | ios::sync_with_stdio(0); 94 | cin.tie(0); 95 | 96 | int n,m; 97 | cin>>n>>m; 98 | 99 | vectorv; 100 | 101 | for(int i=0;i>a; 105 | v.push_back(a); 106 | } 107 | 108 | for(int g=0;g>i>>j>>k; 112 | Q.push_back({i-1,j-1,k,g}); 113 | } 114 | 115 | results(v,m); 116 | 117 | for(int i=0;i>>edge; 6 | vector , int>>MST; 7 | int mstCost = 0; 8 | 9 | 10 | void make(int v) 11 | { 12 | parent[v] = v; 13 | sz[v] = 1; 14 | } 15 | 16 | int find(int v) 17 | { 18 | if(v==parent[v]) 19 | { 20 | return v; 21 | } 22 | return parent[v] = find(parent[v]); 23 | } 24 | 25 | void Union(int a,int b) 26 | { 27 | a = find(a); 28 | b = find(b); 29 | if(a!=b) 30 | { 31 | if(sz[a]>n>>m; 63 | for(int i=0;i>x>>y>>w; 67 | edge.push_back({w , { x , y }}); 68 | } 69 | sort(edge.begin() , edge.end()); 70 | for(int i=1;i<=n;i++) // 1 based 71 | { 72 | make(i); 73 | } 74 | Krushkal(); 75 | 76 | cout<<"----------"<>graph[N]; 5 | vectorkey(N , inf); 6 | vectorparent(N,-1); 7 | int mstCost = 0; 8 | int vis[N]; 9 | 10 | void Prims(int s) 11 | { 12 | priority_queue< pair , vector> , greater> > pq; 13 | key[s] = 0; 14 | pq.push({0,s}); 15 | parent[s] = -1; 16 | while(!pq.empty()) 17 | { 18 | int father = pq.top().second; 19 | 20 | if(vis[father]==false) 21 | { 22 | mstCost += pq.top().first; 23 | vis[father] = true; 24 | } 25 | 26 | pq.pop(); 27 | for(auto child : graph[father]) 28 | { 29 | if(vis[child.first]) 30 | { 31 | continue; 32 | } 33 | if(child.second < key[child.first]) // Note e leka ase why , tar por oo fresh mind a 2,1 ta mST draw kore buja jay . Try the graph attach below 34 | { 35 | key[child.first] = child.second; 36 | parent[child.first] = father; 37 | pq.push({child.second , child.first}); 38 | } 39 | } 40 | } 41 | return; 42 | } 43 | 44 | int32_t main() 45 | { 46 | ios::sync_with_stdio(0); 47 | cin.tie(0); 48 | int n,m; 49 | cin>>n>>m; 50 | for(int i=0;i>x>>y>>w; 54 | graph[x].push_back({y,w}); 55 | graph[y].push_back({x,w}); 56 | } 57 | Prims(1); 58 | cout<<"------"<>Multiply (vector> &one , vector> &two) //pass them as reference to save time 5 | { 6 | vector>res; 7 | int D = one.size(); 8 | for(int i=0;itemp; 11 | for(int j=0;j> Mat_Ex(vector> Mat , int b) //don't pass Mat as reference 30 | { 31 | vector> ans 32 | { 33 | {1,0}, 34 | {0,1} 35 | }; //Identity Matrix {Adjust its dimension according to Mat 36 | 37 | while(b>0) 38 | { 39 | if(b&1) 40 | { 41 | ans = Multiply(ans,Mat); 42 | } 43 | Mat = Multiply(Mat,Mat); 44 | b>>=1; 45 | } 46 | 47 | return ans; 48 | 49 | } 50 | 51 | int32_t main() 52 | { 53 | ios::sync_with_stdio(0); 54 | cin.tie(0); 55 | 56 | // For finding nth fibbonacci 57 | vector>Mat{ 58 | {1,1}, 59 | {1,0} 60 | }; 61 | 62 | int n; 63 | 64 | cin>>n; //n>2 65 | 66 | cout<Tree[4*N]; // each node will store (indexes) in sorted order 12 | vector>A; 13 | 14 | 15 | void build(int node,int lo,int hi) 16 | { 17 | if(lo==hi) 18 | { 19 | Tree[node].push_back(A[lo].second); // indexex will be stored at every node in sorted order 20 | return; 21 | } 22 | 23 | int mid = (lo+hi)>>1; 24 | 25 | build(node*2,lo,mid); 26 | build(node*2+1 , mid+1,hi); 27 | 28 | merge(Tree[node*2].begin() , Tree[node*2].end() , Tree[node*2+1].begin() , Tree[node*2+1].end() , back_inserter(Tree[node])); // this merge fuction works excatly like merge in mergeSort [watch gfg] 29 | 30 | 31 | } 32 | 33 | int query(int node ,int lo,int hi,int i,int j,int k) 34 | { 35 | if(lo==hi) 36 | { 37 | return Tree[node][0]; 38 | } 39 | int mid = (lo+hi)>>1; 40 | 41 | int a = lower_bound(Tree[node*2].begin() , Tree[node*2].end() , i)-Tree[node*2].begin(); 42 | int b = upper_bound(Tree[node*2].begin() , Tree[node*2].end() , j)-Tree[node*2].begin(); 43 | 44 | int M = b-a; // [i:j] range er moddhe koyta indexes ase in Tree[node*2] Tee 45 | 46 | if(M>=k) 47 | { 48 | //Tree[node*2] tee thaka indexes er songkha greater than K hoy taile k-th minimum er index will exits here 49 | return query(node*2 , lo , mid , i , j , k); 50 | } 51 | else 52 | { 53 | //let say k = 6 and M = 4 : 54 | //er ortho hocce [i:j] range er 4 ta indexes exists kore Tree[node*2] te 55 | 56 | // baki 2 ta indexes exits here : k-M 57 | 58 | //Keep in mind that elements at the right childs > elements at the left childs . thats why we have to pass k-M here to get k-th minimum number er index 59 | 60 | return query(node*2+1 , mid+1 , hi , i , j , k-M); 61 | } 62 | 63 | 64 | } 65 | 66 | 67 | int32_t main() 68 | { 69 | ios::sync_with_stdio(0); 70 | cin.tie(0); 71 | 72 | int n , q; 73 | cin>>n>>q; 74 | 75 | vectorv; 76 | for(int i=0;i>a; 80 | v.push_back(a); 81 | } 82 | 83 | for(int i=0;i>i>>j>>k; //i,j 1-based 96 | 97 | int kthminimumdx = query(1,0,n-1,i-1,j-1,k); // pass 0-based 98 | 99 | cout<>n; 73 | 74 | vectorv; 75 | for(int i=0;i>a; 79 | v.push_back(a); 80 | arr[a]++; 81 | } 82 | 83 | pre(); 84 | 85 | for(int d = N-50; d >= 1; d--) 86 | { 87 | int count = 0; 88 | for(int i = d; i 19 | #include 20 | #include 21 | using namespace std; 22 | using namespace __gnu_pbds; 23 | #define endl "\n" 24 | #define int long long int 25 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 26 | 27 | //Next Greater Element 28 | //Monotonic non-increasing stack 29 | 30 | int32_t main() 31 | { 32 | ios::sync_with_stdio(0); 33 | cin.tie(0); 34 | 35 | int n; 36 | cin>>n; 37 | vectorv; 38 | 39 | for(int i=0;i>a; 43 | v.push_back(a); 44 | } 45 | vectornxtGreater(n,-1); // keep the indices 46 | 47 | stacks; 48 | 49 | for(int i=0;i 76 | #include 77 | #include 78 | using namespace std; 79 | using namespace __gnu_pbds; 80 | #define endl "\n" 81 | #define int long long int 82 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 83 | 84 | //Previous Greater Element 85 | //Monotonic strickly decreasing stack 86 | 87 | int32_t main() 88 | { 89 | ios::sync_with_stdio(0); 90 | cin.tie(0); 91 | 92 | int n; 93 | cin>>n; 94 | vectorv; 95 | 96 | for(int i=0;i>a; 100 | v.push_back(a); 101 | } 102 | 103 | vectorprevGreater(n,-1);//keeps the indexes 104 | 105 | stackst; 106 | 107 | for(int i=0;i 138 | #include 139 | #include 140 | using namespace std; 141 | using namespace __gnu_pbds; 142 | #define endl "\n" 143 | #define int long long int 144 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 145 | 146 | //Next Smaller Element 147 | //Monotonic non-decreasing stack 148 | 149 | int32_t main() 150 | { 151 | ios::sync_with_stdio(0); 152 | cin.tie(0); 153 | 154 | int n; 155 | cin>>n; 156 | vectorv; 157 | 158 | for(int i=0;i>a; 162 | v.push_back(a); 163 | } 164 | 165 | vectornxtSmaller(n,-1); // indexes 166 | 167 | stackst; 168 | 169 | for(int i=0;i v[i]) 172 | { 173 | 174 | nxtSmaller[st.top()] = i; 175 | st.pop(); 176 | } 177 | st.push(i); 178 | } 179 | 180 | for(int i=0;i 197 | #include 198 | #include 199 | using namespace std; 200 | using namespace __gnu_pbds; 201 | #define endl "\n" 202 | #define int long long int 203 | #define ordered_set tree< int, null_type, less, rb_tree_tag,tree_order_statistics_node_update> 204 | 205 | //Previous smaller element 206 | //Monotonic increasing stack 207 | 208 | int32_t main() 209 | { 210 | ios::sync_with_stdio(0); 211 | cin.tie(0); 212 | 213 | int n; 214 | cin>>n; 215 | vectorv; 216 | 217 | for(int i=0;i>a; 221 | v.push_back(a); 222 | } 223 | 224 | vectorprevSmaller(n,-1); //indexes 225 | 226 | stackst; 227 | for(int i=0;i=v[i]) 230 | { 231 | st.pop(); 232 | } 233 | if(!st.empty()) 234 | { 235 | prevSmaller[i] = st.top(); 236 | } 237 | st.push(i); 238 | } 239 | 240 | for(int i=0;i>n>>m; 16 | vector>edges; 17 | 18 | for(int i=0;i>x>>y>>w; 22 | edges.push_back({x,y,w}); 23 | } 24 | int take; 25 | 26 | for(int i=1;idis[u]+w) 32 | { 33 | dis[v] = dis[u]+w; 34 | take = v; 35 | relaxtant[v] = u; 36 | } 37 | } 38 | } 39 | 40 | if(take==-1) 41 | { 42 | cout<<"NO"<ans; 62 | 63 | for(int u = v ; ; u = relaxtant[u]) 64 | { 65 | ans.push_back(u); 66 | if(u==v && ans.size()>1) 67 | { 68 | break; 69 | } 70 | } 71 | 72 | reverse(ans.begin(),ans.end()); 73 | 74 | for(auto x : ans) 75 | { 76 | cout< 10 | #include 11 | #include 12 | using namespace std; 13 | #define endl "\n" 14 | #define inf 1000000000000000000 15 | #define int long long int 16 | 17 | namespace __gnu_pbds{ 18 | typedef tree, 21 | rb_tree_tag, 22 | tree_order_statistics_node_update> ordered_set; 23 | } 24 | using namespace __gnu_pbds; 25 | 26 | 27 | void Insert(ordered_set &s,int x){ //this function inserts one more occurrence of (x) into the set. 28 | 29 | s.insert(x); 30 | 31 | } 32 | 33 | 34 | bool Exist(ordered_set &s,int x){ //this function checks weather the value (x) exists in the set or not. 35 | 36 | if((s.upper_bound(x))==s.end()){ 37 | return 0; 38 | } 39 | return ((*s.upper_bound(x))==x); 40 | 41 | } 42 | 43 | 44 | void Erase(ordered_set &s,int x){ //this function erases one occurrence of the value (x). 45 | 46 | if(Exist(s,x)){ 47 | s.erase(s.upper_bound(x)); 48 | } 49 | 50 | } 51 | 52 | 53 | int FirstIdx(ordered_set &s,int x){ //this function returns the first index of the value (x)..(0 indexing). 54 | 55 | if(!Exist(s,x)){ 56 | return -1; 57 | } 58 | return (s.order_of_key(x)); 59 | 60 | } 61 | 62 | 63 | int Value(ordered_set &s,int idx){ //this function returns the value at the index (idx)..(0 indexing). 64 | 65 | return (*s.find_by_order(idx)); 66 | 67 | } 68 | 69 | 70 | int LastIdx(ordered_set &s,int x){ //this function returns the last index of the value (x)..(0 indexing). 71 | 72 | if(!Exist(s,x)){ 73 | return -1; 74 | } 75 | if(Value(s,(int)s.size()-1)==x){ 76 | return (int)(s.size())-1; 77 | } 78 | return FirstIdx(s,*s.lower_bound(x))-1; 79 | 80 | } 81 | 82 | 83 | int Count(ordered_set &s,int x){ //this function returns the number of occurrences of the value (x). 84 | 85 | if(!Exist(s,x)){ 86 | return 0; 87 | } 88 | return LastIdx(s,x)-FirstIdx(s,x)+1; 89 | 90 | } 91 | 92 | 93 | void Clear(ordered_set &s){ //this function clears all the elements from the set. 94 | 95 | s.clear(); 96 | 97 | } 98 | 99 | 100 | int Size(ordered_set &s){ //this function returns the size of the set. 101 | 102 | return (int)(s.size()); 103 | 104 | } 105 | 106 | 107 | 108 | 109 | int32_t main() 110 | { 111 | ios::sync_with_stdio(0); 112 | cin.tie(0); 113 | 114 | // 115 | 116 | 117 | } 118 | -------------------------------------------------------------------------------- /Persistent Segment Tree (k-th minimum in range).cpp: -------------------------------------------------------------------------------- 1 | //Works for non-distinct elements too [unlike mo's algo] 2 | 3 | const int M = 5000+100; 4 | const int N = 100000+100; 5 | int arr[N]; 6 | 7 | struct node 8 | { 9 | int val; 10 | node *left; 11 | node *right; 12 | 13 | node(node *l , node *r , int v) 14 | { 15 | left = l; 16 | right = r; 17 | val = v; 18 | } 19 | 20 | }; 21 | 22 | node *version[M]; 23 | 24 | //initially building an empty segment tree 25 | 26 | void build(node *n , int lo,int hi) 27 | { 28 | if(lo==hi) 29 | { 30 | n->val = 0; 31 | return; 32 | } 33 | n->left = new node(NULL,NULL,0); 34 | n->right = new node(NULL,NULL,0); 35 | 36 | int mid = (lo+hi)>>1; 37 | 38 | build(n->left,lo,mid); 39 | build(n->right,mid+1,hi); 40 | 41 | n->val = 0; 42 | 43 | } 44 | 45 | void update(node *prev, node *cur ,int lo,int hi,int pos,int val) 46 | { 47 | if(pos>hi || poshi) 48 | { 49 | return; 50 | } 51 | if(lo==hi) 52 | { 53 | cur->val = prev->val; //If elements are not distinct , we need to add previous values too : else just cur->val = 1 would works 54 | cur->val += val; 55 | 56 | return; 57 | } 58 | int mid = (lo+hi)>>1; 59 | 60 | if(pos<=mid) 61 | { 62 | cur->right = prev->right; 63 | cur->left = new node(NULL , NULL,0); 64 | update(prev->left , cur->left , lo,mid,pos,val); 65 | } 66 | else 67 | { 68 | cur->left = prev->left; 69 | cur->right = new node(NULL , NULL,0); 70 | update(prev->right , cur->right , mid+1,hi,pos,val); 71 | } 72 | 73 | cur->val = cur->left->val + cur->right->val; 74 | 75 | 76 | } 77 | 78 | int query(node *j , node *i ,int lo,int hi,int k) 79 | { 80 | if(lo==hi) 81 | { 82 | return lo; 83 | } 84 | int M = (j->left->val) - (i->left->val); 85 | 86 | 87 | int mid = (lo+hi)>>1; 88 | 89 | 90 | if(M>=k) 91 | { 92 | return query(j->left,i->left,lo,mid,k); 93 | } 94 | else 95 | { 96 | return query(j->right,i->right,mid+1,hi,k-M); 97 | } 98 | } 99 | 100 | int32_t main() 101 | { 102 | ios::sync_with_stdio(0); 103 | cin.tie(0); 104 | int n,m; 105 | cin>>n>>m; 106 | 107 | version[0] = new node(NULL,NULL,0); 108 | 109 | 110 | build(version[0],1,n); // empty segment tree 111 | 112 | vectorv; 113 | 114 | for(int i=1;i<=n;i++) 115 | { 116 | int a; 117 | cin>>a; 118 | arr[i] = a; 119 | v.push_back(a); 120 | } 121 | sort(v.begin(),v.end()); 122 | 123 | for(int i=1;i<=n;i++) 124 | { 125 | arr[i] = lower_bound(v.begin(),v.end(),arr[i])-v.begin() + 1; // converting all the elements to (0 < arr[i] <= n) 126 | //Here arr[i] = arr[i]-th element in vector v; //THis makes sure we getting the position rather than the number . This will make sure Histogram fenwick tree < N size 127 | } 128 | 129 | for(int i=1;i<=n;i++) 130 | { 131 | version[i] = new node(NULL,NULL,0); // this line must be here : if we allocate space for versions before here we shall get RUN TIME ERROR due to unused allocations 132 | update(version[i-1],version[i],1,n,arr[i],1); 133 | } 134 | 135 | while(m--) 136 | { 137 | int l,r,k; 138 | cin>>l>>r>>k; 139 | 140 | int idx = query(version[r],version[l-1],1,n,k); 141 | 142 | 143 | //(idx - 1)-th element in vector V is the ans : [think the Fenwick tree implementation of Order statistices tree 144 | 145 | cout<= 1e15 [ which requres less than 1e6 moves ] 6 | [There is a 90% plus probability that using 1e6 Random number we shall get a divisor] 7 | 8 | 9 | int cnt = 0; 10 | int Pollard_Rho(int N) 11 | { 12 | if(N==1) 13 | { 14 | return N; 15 | } 16 | if(N%2==0) 17 | { 18 | return 2; 19 | } 20 | 21 | srand(time(NULL)); 22 | 23 | int x = (rand()%(N-2))+2; 24 | 25 | int y = x; 26 | 27 | int d = 1; 28 | 29 | int c = (rand()%(N-1))+1; 30 | 31 | while(d==1) 32 | { 33 | //Tortoise : Slow pointer 34 | x = (((x%N)*(x%N))%N + c)%N; 35 | 36 | //Hare : Fast Pointer 37 | y = (((y%N)*(y%N))%N + c)%N; 38 | y = (((y%N)*(y%N))%N + c)%N; 39 | 40 | d = __gcd( abs(x-y) , N); 41 | 42 | ++cnt; 43 | 44 | if(d==N) // x mod N = y mod N . which means [ abs(x-y) = N ] eta hole Notun kore abar algorithm chalate hobe 45 | { 46 | //Mane Pollard rho loop e pore gese 47 | 48 | //Notun iteration dorkar hobe 49 | return Pollard_Rho(N); 50 | } 51 | 52 | 53 | } 54 | 55 | return d; 56 | } 57 | 58 | int32_t main() 59 | { 60 | ios::sync_with_stdio(0); 61 | cin.tie(0); 62 | 63 | 64 | int N = 223007; 65 | 66 | N = N*N*N; 67 | //cin>>N; 68 | 69 | int d = Pollard_Rho(N); 70 | 71 | cout<<"The Number : "<fact(int n) 34 | { 35 | mapm; 36 | 37 | while(n>1) 38 | { 39 | int a = lp[n]; 40 | m[a]++; 41 | n/=lp[n]; 42 | } 43 | if(n>1) 44 | { 45 | m[n]++; 46 | } 47 | 48 | return m; 49 | 50 | } 51 | 52 | 53 | int32_t main() 54 | { 55 | 56 | pre(); // Remember to declare this in the main function 57 | 58 | 59 | int n; 60 | cin>>n; 61 | 62 | mapm; 63 | m = fact(n); 64 | 65 | for(auto x: m) 66 | { 67 | cout< LinkedIn
2 | 3 | In this repository I have stored some of the most used intermediate and advanced level algorithms in competitive programming . 4 | I have written all the codes by myself . Before that I myself have learned these algorihms from various blog's . I have written them while maintaing strong modularity , so that when I need to remember a process or an algorithms I can do so just by looking at the codes . I have also commented in the codes where it was necessary . 5 | 6 | *These codes are written in way which makes them very easy to understand for a beginner or an intermediate level coder .*
7 | 8 | I strongly hope that an intermediate level programmer will benefit highly from my library , as there isn't much resources available for advanced topic's . 9 | 10 | I am currently in the enrichment process of my library . Hope to include almost all the algorithms used by an intermediate or an advanced level cp coder. 11 | 12 | ```c 13 | #include 14 | int main() 15 | { 16 | printf("Happy Coding"); 17 | return 0; 18 | } 19 | ``` 20 | 21 | -------------------------------------------------------------------------------- /Segment Tree With Lazy Propagation.cpp: -------------------------------------------------------------------------------- 1 | //Arfatul Islam Asif 2 | 3 | //Sum update Query 4 | 5 | const int N = 1e5+100; 6 | 7 | struct node 8 | { 9 | int val; 10 | int lazy; 11 | 12 | } Tree[4*N]; 13 | 14 | int ara[N]; 15 | 16 | 17 | void propagate(int node,int lo,int hi) 18 | { 19 | int temp = Tree[node].lazy; 20 | 21 | Tree[node*2].lazy += temp; 22 | Tree[node*2+1].lazy += temp; 23 | Tree[node].lazy = 0; 24 | 25 | int mid = (lo+hi)>>1; 26 | 27 | Tree[node*2].val += (mid-lo+1)*temp; // lazy pawar sathe sathei sob node e add kore disi , tai query korar somoy node er Lazy add korte hobe na 28 | Tree[node*2+1].val +=(hi-(mid+1)+1)*temp; // // lazy pawar sathe sathei sob node e add kore disi , tai query korar somoy node er Lazy add korte hobe na 29 | } 30 | 31 | 32 | void build(int node,int lo,int hi) 33 | { 34 | if(lo==hi) 35 | { 36 | Tree[node].val = ara[lo]; 37 | return; 38 | } 39 | 40 | int mid = (lo+hi)>>1; 41 | 42 | build(node*2,lo,mid); 43 | build(node*2+1 , mid+1,hi); 44 | 45 | Tree[node].val = Tree[node*2].val + Tree[node*2+1].val; 46 | } 47 | 48 | void update(int node,int lo,int hi,int i,int j , int val) 49 | { 50 | if(Tree[node].lazy>0) 51 | { 52 | propagate(node,lo,hi); 53 | } 54 | if(hij) 55 | { 56 | return; 57 | } 58 | if(lo>=i && hi<=j) 59 | { 60 | Tree[node].lazy += val; 61 | Tree[node].val += (hi-lo+1)*val; // ekhanei sob node e add kore disi , tai query korar somoy node er Lazy add korte hobe na 62 | 63 | return; 64 | } 65 | 66 | int mid = (lo+hi)>>1; 67 | update(node*2,lo,mid,i,j,val); 68 | update(node*2+1,mid+1,hi,i,j,val); 69 | 70 | Tree[node].val = Tree[node*2].val + Tree[node*2+1].val; 71 | } 72 | 73 | int query(int node,int lo,int hi,int i,int j) 74 | { 75 | if(Tree[node].lazy>0) 76 | { 77 | propagate(node,lo,hi); 78 | } 79 | if(hij) 80 | { 81 | return 0; 82 | } 83 | if(lo>=i && hi<=j) 84 | { 85 | return Tree[node].val; 86 | } 87 | int mid = (lo+hi)>>1; 88 | 89 | int x = query(node*2,lo,mid,i,j); 90 | int y = query(node*2+1,mid+1,hi,i,j); 91 | 92 | return x+y; 93 | 94 | } 95 | 96 | 97 | int32_t main() 98 | { 99 | ios::sync_with_stdio(0); 100 | cin.tie(0); 101 | 102 | int n; 103 | cin>>n; 104 | for(int i=1;i<=n;i++) 105 | { 106 | cin>>ara[i]; 107 | } 108 | 109 | build(1,1,n); 110 | 111 | int q; 112 | cin>>q; 113 | while(q--) 114 | { 115 | int a; 116 | cin>>a; 117 | if(a==1) 118 | { 119 | int i,j,x; 120 | cin>>i>>j>>x; 121 | update(1,1,n,i,j,x); 122 | } 123 | else 124 | { 125 | int i,j; 126 | cin>>i>>j; 127 | cout<>graph[N]; 8 | int in[N]; 9 | vectordis(N,inf); 10 | int parent[N]; 11 | 12 | int32_t main() 13 | { 14 | ios::sync_with_stdio(0); 15 | cin.tie(0); 16 | int n,m; 17 | cin>>n>>m; 18 | for(int i=0;i>x>>y>>w; 22 | graph[x].push_back({y,w}); 23 | in[y]++; 24 | } 25 | queueq; 26 | 27 | for(int i=1;i<=n;i++) 28 | { 29 | if(in[i]==0) 30 | { 31 | q.push(i); 32 | } 33 | } 34 | vectortopo; 35 | while(!q.empty()) 36 | { 37 | int father = q.front(); 38 | topo.push_back(father); 39 | q.pop(); 40 | 41 | 42 | for(auto child: graph[father]) 43 | { 44 | in[child.first]--; 45 | if(in[child.first]==0) 46 | { 47 | q.push(child.first); 48 | } 49 | } 50 | } 51 | 52 | dis[1] = 0; 53 | 54 | for(auto s : topo) 55 | { 56 | if(dis[s]!=inf) 57 | { 58 | for(auto child: graph[s]) 59 | { 60 | if(dis[child.first]>dis[s]+child.second) 61 | { 62 | parent[child.first] = s; 63 | dis[child.first] = dis[s]+child.second; 64 | } 65 | } 66 | } 67 | } 68 | 69 | 70 | 71 | if(dis[n]==inf) 72 | { 73 | cout<<"IMPOSSIBLE"<ans; 82 | 83 | while(v!=0) 84 | { 85 | ans.push_back(v); 86 | v = parent[v]; 87 | } 88 | reverse(ans.begin(),ans.end()); 89 | 90 | 91 | cout<<"dis[n] " < &mp, int k, int add) 91 | { 92 | mp[add]++; 93 | bool ok = mp.size() <= k; 94 | mp[add]--; 95 | if (mp[add] == 0) 96 | { 97 | mp.erase(add); 98 | } 99 | return ok; 100 | } 101 | 102 | int subArrayWithAtmostK(vector &nums, int k) 103 | { 104 | int j = 0; 105 | int ans = 0; 106 | unordered_map mp; 107 | 108 | mp[nums[j]]++; 109 | 110 | for (int i = 0; i < nums.size(); i++) 111 | { 112 | while (j + 1 < nums.size() && isValid(mp, k, nums[j + 1])) 113 | { 114 | mp[nums[j + 1]]++; 115 | j++; // j porjont neya [j included] 116 | } 117 | 118 | if (mp.size() <= k) 119 | { 120 | ans += j - i + 1; // j included. 121 | } 122 | 123 | mp[nums[i]]--; 124 | if (mp[nums[i]] == 0) 125 | { 126 | mp.erase(nums[i]); 127 | } 128 | } 129 | 130 | return ans; 131 | } 132 | int subarraysWithKDistinct(vector &nums, int k) 133 | { 134 | return subArrayWithAtmostK(nums, k) - subArrayWithAtmostK(nums, k - 1); 135 | 136 | // Sub Array With excatly K different integers = Sub Array With AtMost K different integers - Sub Array With AtMost (K-1) different integers 137 | } 138 | }; 139 | 140 | 141 | ===================== Variable length Sliding Window ============== Examples problem(Sub-Array Counting : excalty k property) 142 | 143 | https://leetcode.com/problems/count-number-of-nice-subarrays/?envType=list&envId=p96cfcjg 144 | 145 | class Solution 146 | { 147 | public: 148 | bool isValid(int odd, int k, int num) 149 | { 150 | odd += (num & 1); 151 | return odd <= k; 152 | } 153 | int atMostK(vector &nums, int k) 154 | { 155 | int j = 0; 156 | int ans = 0; 157 | int odd = (nums[0] & 1); 158 | for (int i = 0; i < nums.size(); i++) 159 | { 160 | while (j + 1 < nums.size() && isValid(odd, k, nums[j + 1])) 161 | { 162 | j++; 163 | odd += (nums[j] & 1); // j included . which means j porjonto 164 | } 165 | 166 | if (odd <= k) 167 | { 168 | ans += j - i + 1; // j included 169 | } 170 | 171 | odd -= (nums[i] & 1); 172 | } 173 | 174 | return ans; 175 | } 176 | 177 | int numberOfSubarrays(vector &nums, int k) 178 | { 179 | return atMostK(nums, k) - atMostK(nums, k - 1); //= Excatly k 180 | } 181 | }; 182 | 183 | 184 | -------------------------------------------------------------------------------- /Strongly Connected Components to DAG.cpp: -------------------------------------------------------------------------------- 1 | //Arfatul Islam Asif 2 | //This programme can be used for both finding SCC's and converting a directed graph to DAG 3 | 4 | const int N = 1000; 5 | vectorgraph[N]; 6 | vectorRgraph[N]; 7 | bool vis1[N]; 8 | bool vis2[N]; 9 | int group_id[N]; 10 | vectorforder; 11 | 12 | void dfs1(int s) 13 | { 14 | vis1[s] = true; 15 | for(auto child : graph[s]) 16 | { 17 | if(vis1[child]) 18 | { 19 | continue; 20 | } 21 | dfs1(child); 22 | } 23 | forder.push_back(s); 24 | } 25 | void dfs2(int s , int no) 26 | { 27 | vis2[s] = true; 28 | group_id[s] = no; 29 | for(auto child : Rgraph[s]) 30 | { 31 | if(vis2[child]) 32 | { 33 | continue; 34 | } 35 | dfs2(child , no); 36 | } 37 | return; 38 | } 39 | 40 | 41 | 42 | 43 | int32_t main() 44 | { 45 | ios::sync_with_stdio(0); 46 | cin.tie(0); 47 | int n,m; 48 | cin>>n>>m; 49 | for(int i=0;i>x>>y; 53 | graph[x].push_back(y); 54 | Rgraph[y].push_back(x); 55 | } 56 | for(int i=0;iDAG[N]; 74 | //SCC to DAG convertion 75 | for(int i=0;igraph[N]; 6 | bool vis[N]; 7 | int in[N]; 8 | 9 | int32_t main() 10 | { 11 | ios::sync_with_stdio(0); 12 | cin.tie(0); 13 | 14 | int n,m; 15 | cin>>n>>m; 16 | 17 | for(int i=0;i>x>>y; 21 | graph[x].push_back(y); 22 | in[y]++; 23 | } 24 | queueq; 25 | for(int i=1;i<=n;i++) 26 | { 27 | if(in[i]==0) 28 | { 29 | q.push(i); 30 | } 31 | } 32 | vectorans; 33 | while(!q.empty()) 34 | { 35 | int father = q.front(); 36 | vis[father] = true; 37 | ans.push_back(father); 38 | q.pop(); 39 | for(auto child : graph[father]) 40 | { 41 | if(vis[child]) 42 | { 43 | continue; 44 | } 45 | in[child]--; 46 | if(in[child]==0) 47 | { 48 | q.push(child); 49 | vis[child] = true; 50 | } 51 | } 52 | } 53 | 54 | 55 | for(int i=1;i<=n;i++) 56 | { 57 | if(vis[i]==false) 58 | { 59 | cout<<"IMPOSSIBLE"< 0) 72 | { 73 | cur = node[cur][s[i]-'a']; 74 | } 75 | else 76 | { 77 | return false; 78 | } 79 | } 80 | 81 | return true; 82 | } 83 | 84 | 85 | bool Search(string s) 86 | { 87 | int cur = root; 88 | for(int i=0;i 0) 91 | { 92 | cur = node[cur][s[i]-'a']; 93 | } 94 | else 95 | { 96 | return false; 97 | } 98 | } 99 | 100 | return (is_end[cur]>0); 101 | } 102 | 103 | 104 | 105 | int32_t main() 106 | { 107 | ios::sync_with_stdio(0); 108 | cin.tie(0); 109 | 110 | 111 | ini(); 112 | 113 | int n; 114 | cin>>n; 115 | 116 | while(n--) 117 | { 118 | string s; 119 | cin>>s; 120 | Insert(s); 121 | } 122 | 123 | string a; 124 | cin>>a; 125 | 126 | cout<>b; 132 | 133 | Remove(b); // asuming b is present in trie 134 | 135 | cout< 2 e ekti edge directed , then er information store hobe cnt[2] Te ::::::::::: ebong 2 = node[1][] is like 1 = parent[2] er moto in directed tree :: age amra parent diye jump kortm : Now children diye jump] 7 | int root = 0; //which means Edge info is actually stored in the 2nd child : and edges er characterictistics is (2 = node[1][]) refers to (1) ---> (2) edge 8 | int avail = 0; 9 | int cnt[N]; 10 | 11 | 12 | void ini() 13 | { 14 | root = 0; 15 | avail = 0; 16 | for(int i=0;i=0;i--) // MSB must be at the top 1 28 | { 29 | if(x &(1<=0;i--) // MSB must be at the top 1 60 | { 61 | if(x &(1<=0;i--) // MSB must be at the top 1 79 | { 80 | if(x&(1<0) // ei edge er info is being stored in cur = node[cur][0] 83 | { 84 | ans |= (1<0) 96 | { 97 | ans |= (1<=0;i--) 117 | { 118 | if(x&(1<0) 121 | { 122 | cur = node[cur][1]; 123 | } 124 | else 125 | { 126 | ans |=(1<0) 134 | { 135 | cur = node[cur][0]; 136 | } 137 | else 138 | { 139 | ans |=(1<=2 156 | cin>>n; 157 | 158 | vectorv; 159 | 160 | ini(); 161 | 162 | //Insert(0); // IN some problem you might need to use this where you have the choice of choosing any number of element : i.e n==1 is allowed 163 | 164 | for(int i=0;i>a; 168 | v.push_back(a); 169 | Insert(a); 170 | } 171 | 172 | int mx = 0; 173 | 174 | for(int i=0;i 2 ----------> 3 18 | \ / 19 | \ 4 1 / 20 | \ / 21 | \ / 22 | \> 4 2), dist[3] = 2 (1 -> 2 -> 3), dist[4] = 3 (1 -> 2 -> 3 -> 4). 26 | 27 | **If we increase weight of all edges by a constant value, lets say 1, will the relative distance (include edges) remained same?** 28 | 29 | ``` 30 | 2 2 31 | 1 -----------> 2 ----------> 3 32 | \ / 33 | \ 5 2 / 34 | \ / 35 | \ / 36 | \> 4 2), dist[3] = 4 (1 -> 2 -> 3), dist[4] = 5 (1 -> 4). 40 | **Look dist[4] has changed its relative order** 41 | 42 | 43 |
44 |
45 | 46 | ## Reweight each edges of the graph like w'(u, v) = w(u, v) + potential[u] – potential[v], using Bellman-Ford to eliminate negative weights, then apply Dijkstra's algorithm to find the shortest paths from source to all other vertex. 47 | 48 |
49 | **It will works: The question is where we can use it?** 50 | **Answer: Jonson's algorithm for all pair shortest path** 51 | 52 | ## Jonson's algorithm for all pair shortest path: 53 | **Step 1: Adjust negative edges using bellman-ford.** 54 | **Step 2: Run dijkstra for each vertex to find all pair shortest path** 55 | 56 | ``` 57 | This is better than floyed-warshall O(V^3). As it's complexity is O(V2log V + VE) 58 | ``` 59 | 60 | 61 | 62 | # By the way, it will not work if negative cycle exists. 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /XOR Basis.cpp: -------------------------------------------------------------------------------- 1 | // Intermediary 2 | // Young kid on the block 3 | // AIAsif try's Continuing the journey 4 | #include 5 | #include 6 | #include 7 | using namespace std; 8 | using namespace __gnu_pbds; 9 | #define endl "\n" 10 | #define int long long int 11 | #define ordered_set tree, rb_tree_tag, tree_order_statistics_node_update> 12 | 13 | 14 | Basis: A set B of vectors in a vector space V is called a basis (pl.: bases) if every element of V may be written in a unique way as a finite linear combination of elements of B. The coefficients of this linear combination are referred to as components or coordinates of the vector with respect to B. The elements of a basis are called basis vectors. 15 | 16 | class Basis 17 | { 18 | vector a; 19 | 20 | public: 21 | void insert(int x) 22 | { 23 | for (auto &i : a) 24 | 25 | x = min(x, x ^ i); //for row echelon form get the smallest bit which is not equals to 0 means this bit is controlled by this 26 | if (!x) 27 | return; 28 | for (auto &i : a) 29 | if ((i ^ x) < i) 30 | i ^= x; 31 | a.push_back(x); 32 | sort(a.begin(), a.end()); 33 | } 34 | bool can(int x) 35 | { 36 | for (auto &i : a) 37 | x = min(x, x ^ i); 38 | return !x; 39 | } 40 | int maxxor(int x = 0) 41 | { 42 | for (auto &i : a) 43 | x = max(x, x ^ i); 44 | return x; 45 | } 46 | int minxor(int x = 0) // always returning zero . If you are not considering empty set then take care of it... 47 | { 48 | for (auto &i : a) 49 | x = min(x, x ^ i); 50 | return x; 51 | } 52 | int kth(int k) //1st is 0 . which means empty set is also coounted .. if you don't want to count empty set then take care of the condition whether zero can be made using existing 53 | { // 1st is 0 54 | int sz = (int)a.size(); 55 | if (k > (1LL << sz)) 56 | return -1; 57 | k--; 58 | int ans = 0; 59 | for (int i = 0; i < sz; i++) 60 | if (k >> i & 1) 61 | ans ^= a[i]; 62 | return ans; 63 | } 64 | 65 | } t; 66 | 67 | int32_t main() 68 | { 69 | ios::sync_with_stdio(0); 70 | cin.tie(0); 71 | 72 | int n; 73 | cin>>n; 74 | for(int i=0;i>a; 78 | t.insert(a); 79 | } 80 | 81 | 82 | cout< z_function(const string &s) 10 | { 11 | int n = s.size(); 12 | vectorz(n); 13 | int l = 0; 14 | int r = 0; 15 | for(int i=1;ir) 26 | { 27 | l = i; 28 | r = i+z[i]; 29 | } 30 | } 31 | 32 | return z; 33 | } 34 | 35 | 36 | int stringCompression(const string &s) //length of smallest string t such s = t+t+..+t ; Or s is the concatenation of one or more t; 37 | { 38 | vectorz = z_function(s); 39 | 40 | for(int i=1;i