├── _misc ├── tick.cc ├── dictionary.cc └── parse_args.hh ├── _note └── heuristic_search.md ├── other ├── sorting_network.cc ├── coordinate_compression.cc ├── gregorian_calendar.cc ├── all_nearest_smaller_values.cc ├── unweighted_interval_scheduling.cc ├── weighted_interval_scheduling.cc ├── poker_hands.cc ├── cube.cc └── knapsack_expcore.cc ├── data_structure ├── union_find2.cc ├── minmax_heap.cc ├── skew_heap.cc ├── initializable_array.cc ├── sparse_table.cc ├── persistent_heap.cc ├── fenwick_tree_2d.cc ├── sqrt_array.cc ├── persistent_union_find.cc ├── disjoint_sparse_table.cc ├── partially_persistent_union_find.cc ├── leftist_heap.cc ├── persistent_rope.cc └── randomized_binary_search_tree.cc ├── README.md ├── combinatorics ├── next_radix.cc ├── permutation_hash.cc └── permutation_index.cc ├── __WIP └── equivalence.cc ├── number_theory ├── number_theoretic_function.txt ├── primes.cc ├── divisor_sigma.cc ├── mobius_mu.cc └── euler_phi.cc ├── machine_learning ├── roc-auc.cc ├── bayesian_bradley_terry.py └── bradley_terry.cc ├── numeric ├── find_min_unimodal.cc ├── derivative.cc ├── ODE_runge_kutta.cc ├── dual_number.cc ├── integrate.cc └── ODE_dormand_prince.cc ├── geometry ├── covered_range.cc ├── bk_tree.cc ├── coordinate_domination.cc ├── !circle.cc ├── rectangle_union.cc └── convex_hull.cc ├── string ├── infix_to_postfix.cc ├── sunday.cc ├── suffix_array.cc ├── palindromic_tree.cc ├── boyer_moore.cc ├── knuth_morris_pratt.cc └── earley.cc ├── graph ├── kruskal.cc ├── is_bipartite.cc ├── bipartite_matching.cc ├── is_claw_free.cc ├── is_graphic.cc ├── kcore.cc ├── topological_sort.cc ├── maximum_flow_ford_fulkerson.cc ├── is_cograph.cc ├── maximum_flow_edmonds_karp.cc ├── least_common_ancestor_doubling.cc ├── cycle_enumeration.cc ├── chromatic_number.cc ├── least_common_ancestor_sparsetable.cc ├── eulerian_path_undirected.cc ├── prufer_code.cc ├── transitive_reduction_dag.cc ├── bipartite_matching_HK.cc ├── least_common_ancestor_tarjan.cc ├── strongly_connected_component_kosaraju.cc ├── strongly_connected_component_gabow.cc ├── least_common_ancestor_heavylight.cc ├── strongly_connected_component_tarjan.cc ├── link_cut_tree.cc ├── minimum_feedback_arc_set.cc ├── betweenness_centrality.cc └── maximum_flow_dinic.cc ├── dynamic_programming ├── longest_zigzag_subsequence.cc ├── longest_increasing_subsequence.cc ├── minimum_coin_change.cc ├── knapsack.cc └── rod_cutting.cc └── math ├── quadratic_equation.cc ├── lattice_below_line.cc ├── permanent.cc ├── linear_recursion.cc ├── fast_fourier_transform.cc └── SimplexMethodLP.cc /_misc/tick.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | double tick() { 4 | static double old; 5 | 6 | struct timeval tv; 7 | gettimeofday(&tv, NULL); 8 | double now = tv.tv_sec + tv.tv_usec * 1e-6, ret = now; 9 | 10 | ret -= old; 11 | old = now; 12 | return ret; 13 | } 14 | -------------------------------------------------------------------------------- /_misc/dictionary.cc: -------------------------------------------------------------------------------- 1 | template 2 | struct dictionary { 3 | unordered_map dict; 4 | vector idict; 5 | size_t id(T s) { 6 | if (!dict.count(s)) { 7 | dict[s] = idict.size(); 8 | idict.push_back(s); 9 | } 10 | return dict[s]; 11 | } 12 | T value(size_t id) { 13 | return idict[id]; 14 | } 15 | size_t size() const { 16 | return dict.size(); 17 | } 18 | }; 19 | -------------------------------------------------------------------------------- /_note/heuristic_search.md: -------------------------------------------------------------------------------- 1 | Heuristic Search Algorithms 2 | =========================== 3 | 4 | Overview 5 | -------- 6 | 7 | Basically, there are three choices: 8 | 9 | + A* 10 | + IDA* (iterative deepening A*) 11 | + RBFS (recursive best first search) 12 | 13 | If the state space is sufficiently small, use A*. 14 | Otherwise, use IDA* or RBFS. 15 | If good solutions are spreaded among search pathes, use IDA*. 16 | Otherwise, i.e., good solutions are condensed, use RBFS. 17 | 18 | TODO 19 | -------------------------------------------------------------------------------- /other/sorting_network.cc: -------------------------------------------------------------------------------- 1 | // Optimal Sorting (Sorting Network) 2 | // Twice faster than std::sort 3 | // 4 | // Reference: 5 | // http://www.angelfire.com/blog/ronz/Articles/sn2-13_16_horz_2.gif 6 | 7 | #define SW(a,b) if (a > b) swap(a, b); 8 | #define SORT3(a,b,c) SW(b,c)SW(a,b)SW(b,c) 9 | #define SORT4(a,b,c,d) SW(a,b)SW(c,d)SW(b,d)SW(a,c)SW(b,c) 10 | #define SORT5(a,b,c,d,e) \ 11 | SW(b,c)SW(d,e)SW(b,d)SW(a,c)SW(c,e)SW(a,d)SW(a,b)SW(c,d)SW(b,c) 12 | #define SORT6(a,b,c,d,e,f) SW(a,b)SW(a,b)SW(c,d)SW(e,f)SW(a,c)SW(d,f)\ 13 | SW(b,e)SW(a,b)SW(c,d)SW(e,f)SW(b,c)SW(d,e)SW(c,d) 14 | -------------------------------------------------------------------------------- /_misc/parse_args.hh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | namespace parse_args { 8 | std::unordered_map args; 9 | void init(int argc, char *argv[]) { 10 | for (int i = 1; argv[i]; ++i) { 11 | char cmd[256], param[256]; 12 | std::sscanf(argv[i], "%[^=]=%s", cmd, param); 13 | args[cmd] = param; 14 | } 15 | } 16 | bool has(std::string s) { 17 | return args.count(s); 18 | } 19 | template 20 | T get(std::string s, T x = T()) { 21 | std::stringstream ss(args[s]); 22 | T value; 23 | ss >> value; 24 | return value; 25 | } 26 | }; 27 | -------------------------------------------------------------------------------- /data_structure/union_find2.cc: -------------------------------------------------------------------------------- 1 | struct union_find { 2 | int components; 3 | vector root, next, size; 4 | void clear(int n) { 5 | components = n; 6 | root.resize(n); 7 | iota(root.begin(), root.end(), 0); 8 | next.assign(n, -1); 9 | size.assign(n, 1); 10 | } 11 | bool find(int x, int y) { return root[x] == root[y]; } 12 | bool unite(int x, int y) { 13 | if ((x = root[x]) == (y = root[y])) return false; 14 | if (size[x] < size[y]) swap(x, y); 15 | size[x] += size[y]; 16 | for (int z; y >= 0; y = z) { 17 | z = next[y]; 18 | next[y] = next[x]; 19 | next[x] = y; 20 | root[y] = x; 21 | } 22 | --components; 23 | return true; 24 | } 25 | }; 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # C++ implementations of algorithms 2 | 3 | These are my C++ implementations of algorithms, 4 | which are written for studying/understanding algorithms. 5 | 6 | These codes are published in **public domain.** 7 | You can use the codes for *any purpose without any warranty*. 8 | 9 | 10 | author: Takanori MAEHARA (web: http://www.prefield.com, e-mail: maehara@prefield.com, twitter: @tmaehara) 11 | 12 | # Recruiting 13 | 14 | I am a researcher at *RIKEN Center for Advanced Intelligence Project*, which is a Japanese governmental academic research institute. I am working on discrete algorithms (including topics in this github repository). We are hiring *strong programmers* who has some achievements in some competitions. If you are interested in, please feel free to contact me. 15 | -------------------------------------------------------------------------------- /combinatorics/next_radix.cc: -------------------------------------------------------------------------------- 1 | // 2 | // 0 0 0 3 | // 1 0 0 4 | // 2 0 0 5 | // 0 1 0 6 | // 1 1 0 7 | // 2 1 0 8 | // ... 9 | // 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | using namespace std; 17 | 18 | #define fst first 19 | #define snd second 20 | #define all(c) ((c).begin()), ((c).end()) 21 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 22 | 23 | template 24 | bool next_radix(It begin, It end, int base) { 25 | for (It cur = begin; cur != end; ++cur) { 26 | if ((*cur += 1) >= base) *cur = 0; 27 | else return true; 28 | } 29 | return false; 30 | } 31 | 32 | int main() { 33 | vector a(3, 0); 34 | do { 35 | for (int i = 0; i < 3; ++i) cout << a[i]; 36 | cout << endl; 37 | } while (next_radix(all(a), 2)); 38 | } 39 | -------------------------------------------------------------------------------- /__WIP/equivalence.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | 9 | #define fst first 10 | #define snd second 11 | #define all(c) ((c).begin()), ((c).end()) 12 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 13 | 14 | // specify operator == 15 | template 16 | void equivalence(vector x, F eq) { 17 | vector parent(x.size()); 18 | for (int i = ; i < x.size(); ++i) { 19 | parent[i] = i; 20 | for (int j = 0; j < i; ++j) { 21 | parent[j] = parent[parent[j]]; 22 | if (eq(x[i], x[j])) parent[parent[parent[j]]] = i; 23 | } 24 | } 25 | for (int i = 0; i < x.size(); ++i) parent[i] = parent[parent[i]]; 26 | 27 | for (int i = 0; i < parent.size(); ++i) 28 | printf("%d %d \n", x[i], parent[i]); 29 | } 30 | 31 | int main() { 32 | vector x = {3,1,4,1,5,9,2,6,5,3,5,8,9}; 33 | equivalence(x, [&](int a, int b) { return a == b; }); 34 | } 35 | -------------------------------------------------------------------------------- /number_theory/number_theoretic_function.txt: -------------------------------------------------------------------------------- 1 | A function f is called "number theoretic" or "multiplicative" if f(nm) = f(n) f(m) where n and m are coprime. 2 | 3 | Any number theoretic function can be computed in 4 | 1) O(sqrt(n)) for a certain n. 5 | 2) O((hi-lo) loglog hi) for n in [lo,hi). 6 | 7 | 8 | 9 | 1) 10 | f(n) = 1; 11 | for p = 1 ... sqrt(n): 12 | if n % p == 0: 13 | q = 1 14 | while (n % k == 0) 15 | q *= p; 16 | n /= p; 17 | f(n) *= f(q) // q is a prime power 18 | 19 | Complexity: 20 | increment of p: O(sqrt(n)) 21 | division by p: O(log n) 22 | ==> O(sqrt(n)). 23 | 24 | 2) 25 | f(n) = 1 for all n in [lo, hi) 26 | r(n) = n for all n in [lo, hi) 27 | for p in primes: 28 | for n in [lo,hi) such that n%p == 0: 29 | if r(n) < p: break; 30 | q = 1 31 | while r(n) % p == 0: 32 | q *= p 33 | r(n) /= p 34 | f(n) *= f(q) 35 | 36 | Complexity: 37 | division by p: sum of exponents in n!, which is known to be O(n log log n) 38 | ==> O(n log log n). 39 | -------------------------------------------------------------------------------- /machine_learning/roc-auc.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | using namespace std; 4 | 5 | double trapezoid(double x1, double x2, double y1, double y2) { 6 | return (y2+y1)/2 * abs(x2-x1); 7 | } 8 | 9 | double auc(vector test, vector pred) { 10 | int n = test.size(); 11 | assert(n == pred.size()); 12 | 13 | vector idx(n); 14 | for (int i = 0; i < n; ++i) idx[i] = i; 15 | sort(idx.begin(), idx.end(), [&](int i, int j) { return pred[i] > pred[j]; }); 16 | 17 | double a = 0.0; 18 | double fp = 0, tp = 0, fp_prev = 0, tp_prev = 0; 19 | double prev_score = -1.0/0.0; 20 | for (int i: idx) { 21 | if (pred[i] != prev_score) { 22 | a += trapezoid(fp, fp_prev, tp, tp_prev); 23 | prev_score = pred[i]; 24 | fp_prev = fp; 25 | tp_prev = tp; 26 | } 27 | if (test[i] == 1) { 28 | tp += 1; 29 | } else { 30 | fp += 1; 31 | } 32 | } 33 | a += trapezoid(fp, fp_prev, tp, tp_prev); 34 | return a / (tp * fp); 35 | } 36 | int main() { 37 | vector test = {0, 1, 0, 1, 1}; 38 | vector pred = {0.2, 0.3, 0.4, 0.5, 0.6}; 39 | cout << auc(test, pred) << endl; 40 | } 41 | -------------------------------------------------------------------------------- /numeric/find_min_unimodal.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Minimum of unimodal function (goldsection search) 3 | // 4 | // Description: 5 | // Unimodal function is a function that has unique peak. 6 | // 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | using namespace std; 14 | 15 | #define fst first 16 | #define snd second 17 | #define all(c) ((c).begin()), ((c).end()) 18 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 19 | 20 | template 21 | double find_min(F f, double a, double d, double eps = 1e-8) { 22 | const double r = 2 / (3 + sqrt(5.)); 23 | double b = a + r*(d-a), c = d - r*(d-a), fb = f(b), fc = f(c); 24 | while (d - a > eps) { 25 | if (fb > fc) { // '<': maximum, '>': minimum 26 | a = b; b = c; c = d - r * (d - a); 27 | fb = fc; fc = f(c); 28 | } else { 29 | d = c; c = b; b = a + r * (d - a); 30 | fc = fb; fb = f(b); 31 | } 32 | } 33 | return c; 34 | } 35 | 36 | int main() { 37 | cout << find_min([](double x) { return x*x; }, -1, 1) << endl; 38 | } 39 | -------------------------------------------------------------------------------- /other/coordinate_compression.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Coordinate Compression 3 | // 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | using namespace std; 11 | 12 | #define fst first 13 | #define snd second 14 | #define all(c) ((c).begin()), ((c).end()) 15 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 16 | 17 | template 18 | struct coordinate_compression { 19 | vector xs, ys; // O(n log n) 20 | coordinate_compression(vector xs) : xs(xs), ys(xs) { 21 | sort(all(ys)); 22 | ys.erase(unique(all(ys)), ys.end()); 23 | } 24 | int index(T a) const { // O(log n) 25 | auto it = lower_bound(all(ys), a); 26 | if (it == ys.end() || *it != a) return -1; 27 | return distance(ys.begin(), it); 28 | } 29 | int value(int k) const { return ys[k]; } // k in [0, ys.size()) 30 | int size() const { return ys.size(); } 31 | }; 32 | 33 | 34 | int main() { 35 | vector x = {3,1,4,1,5,9}; 36 | coordinate_compression compressor(x); 37 | for (int i = 0; i , class G = greater> 16 | struct minmax_heap { 17 | priority_queue, G> minh, minp; 18 | priority_queue, L> maxh, maxp; 19 | void normalize() { 20 | while (!minp.empty() && minp.top() == minh.top()) { 21 | minp.pop(); 22 | minh.pop(); 23 | } 24 | while (!maxp.empty() && maxp.top() == maxh.top()) { 25 | maxp.pop(); 26 | maxh.pop(); 27 | } 28 | } 29 | void push(T x) { minh.push(x); maxh.push(x); } 30 | T min() { normalize(); return minh.top(); } 31 | T max() { normalize(); return maxh.top(); } 32 | void pop_min() { normalize(); maxp.push(minh.top()); minh.pop(); } 33 | void pop_max() { normalize(); minp.push(maxh.top()); maxh.pop(); } 34 | }; 35 | -------------------------------------------------------------------------------- /geometry/covered_range.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Covered Range 3 | // 4 | // Description: 5 | // Given a set of intervals [l_j, r_j). 6 | // Find a measure of union of the intervals. 7 | // 8 | // Algorithm: 9 | // Plane sweep. 10 | // 11 | // Complexity: 12 | // O(n log n). 13 | // 14 | // Verified: 15 | // SPOJ18531 16 | // 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | using namespace std; 24 | 25 | #define fst first 26 | #define snd second 27 | #define all(c) ((c).begin()), ((c).end()) 28 | 29 | // measure of union of x[j]. 30 | template 31 | T covered_range(vector> x) { 32 | typedef pair event; 33 | vector es; 34 | for (int i = 0; i < x.size(); ++i) { 35 | es.push_back({x[i].fst, i}); 36 | es.push_back({x[i].snd,~i}); 37 | } 38 | sort(all(es)); 39 | int c = 0; 40 | T a = es[0].fst, ans = 0; 41 | for (auto e: es) { 42 | if (c > 0) ans += e.fst - a; 43 | if (e.snd >= 0) ++c; 44 | else --c; 45 | a = e.fst; 46 | } 47 | return ans; 48 | } 49 | 50 | int main() { 51 | vector> x; 52 | x.push_back({0,3}); 53 | x.push_back({2,5}); 54 | x.push_back({4,6}); 55 | cout << covered_range(x) << endl; 56 | } 57 | -------------------------------------------------------------------------------- /string/infix_to_postfix.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Convert infix notation to postfix notation 3 | // 4 | // Description: 5 | // 1 * 2 + 3 * (4 + 5) <- infix notation 6 | // 1 2 * 3 4 5 + * + <- postfix notation 7 | // Postfix notation is easy to evaluate. 8 | // 9 | // Algorithm: 10 | // Shunting-yard algorithm by Dijkstra. 11 | // 12 | // Verified: 13 | // SPOJ 4: ONP - Transform the Expression 14 | // 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace std; 25 | 26 | string infix_to_postfix(string s) { 27 | s += '_'; // terminal symbol 28 | stringstream ss; 29 | vector op = {'_'}; 30 | auto rank = [](char c) { return string("(^/*-+)").find(c); }; 31 | for (char c: s) { 32 | if (isalnum(c)) ss << c; 33 | else { 34 | for (; op.back() != '('; op.pop_back()) { 35 | if (rank(op.back()) >= rank(c)) break; 36 | ss << op.back(); 37 | } 38 | if (c == ')') op.pop_back(); 39 | else op.push_back(c); 40 | } 41 | } 42 | return ss.str(); 43 | } 44 | 45 | int main() { 46 | int ncase; scanf("%d", &ncase); 47 | for (int icase = 0; icase < ncase; ++icase) { 48 | char s[1024]; scanf("%s", s); 49 | printf("%s\n", infix_to_postfix(s).c_str()); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /data_structure/skew_heap.cc: -------------------------------------------------------------------------------- 1 | // Skew Heap 2 | // 3 | // Description: 4 | // Heap data structure with the following operations. 5 | // 6 | // 1. push a value O(log n) 7 | // 2. pop the smallest value O(log n) 8 | // 3. merge two heaps O(log n + log m) 9 | // 4. add a value to all elements O(1) 10 | // 11 | 12 | struct skew_heap { 13 | struct node { 14 | node *ch[2]; 15 | int key; 16 | int delta; 17 | } *root; 18 | skew_heap() : root(0) { } 19 | void propagate(node *a) { 20 | a->key += a->delta; 21 | if (a->ch[0]) a->ch[0]->delta += a->delta; 22 | if (a->ch[1]) a->ch[1]->delta += a->delta; 23 | a->delta = 0; 24 | } 25 | node *merge(node *a, node *b) { 26 | if (!a || !b) return a ? a : b; 27 | propagate(a); propagate(b); 28 | if (a->key > b->key) swap(a, b); // min heap 29 | a->ch[1] = merge(b, a->ch[1]); 30 | swap(a->ch[0], a->ch[1]); 31 | return a; 32 | } 33 | void push(int key) { 34 | node *n = new node(); 35 | n->ch[0] = n->ch[1] = 0; 36 | n->key = key; n->delta = 0; 37 | root = merge(root, n); 38 | } 39 | void pop() { 40 | propagate(root); 41 | node *temp = root; 42 | root = merge(root->ch[0], root->ch[1]); 43 | } 44 | int top() { 45 | propagate(root); 46 | return root->key; 47 | } 48 | bool empty() { 49 | return !root; 50 | } 51 | void add(int delta) { 52 | if (root) root->delta += delta; 53 | } 54 | void merge(skew_heap x) { // destroy x 55 | root = merge(root, x.root); 56 | } 57 | }; 58 | -------------------------------------------------------------------------------- /numeric/derivative.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Numerical Derivative by Ridder's method. 3 | // 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | using namespace std; 13 | 14 | #define fst first 15 | #define snd second 16 | #define all(c) ((c).begin()), ((c).end()) 17 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 18 | 19 | template 20 | double differentiate(F f, double x, double eps = 1e-8) { 21 | const int n = 10; 22 | const double alpha = 1.4; 23 | double h = 1e-2, a[n][n], ans = 1.0/0.0, err = 1.0/0.0; 24 | 25 | a[0][0] = (f(x + h) - f(x - h)) / (2 * h); 26 | for (int i = 1; i < n; ++i) { 27 | h /= alpha; 28 | a[0][i] = (f(x + h) - f(x - h))/(2 * h); 29 | double fac = alpha * alpha; 30 | for (int j = 1; j <= i; ++j) { 31 | a[j][i] = (a[j-1][i] * fac - a[j-1][i-1])/(fac - 1.0); 32 | fac *= alpha * alpha; 33 | double errt = max(fabs(a[j][i] - a[j-1][i]), fabs(a[j][i] - a[j-1][i-1])); 34 | if (errt <= err) { 35 | err = errt; 36 | ans = a[j][i]; 37 | if (err < eps) return ans; 38 | } 39 | } 40 | if (fabs(a[i][i] - a[i-1][i-1]) >= 2 * err) break; 41 | } 42 | return ans; 43 | } 44 | 45 | double f(double x) { 46 | return exp(-x*x); 47 | } 48 | double df(double x) { 49 | return -2 * x * exp(-x*x); 50 | } 51 | 52 | int main() { 53 | for (int i = 0; i < 10; ++i) { 54 | double x = rand() / (1.0 + RAND_MAX); 55 | cout << differentiate(f, x) - df(x) << endl; 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /graph/kruskal.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Minimum Spanning Tree (Kruskal) 3 | // 4 | // 5 | // Description 6 | // Finding minimum spanning tree. 7 | // 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | using namespace std; 14 | 15 | #define fst first 16 | #define snd second 17 | #define all(c) ((c).begin()), ((c).end()) 18 | 19 | struct edge { 20 | int src, dst; 21 | int weight; 22 | }; 23 | struct graph { 24 | int n; 25 | vector edges; 26 | graph(int n = 0) : n(n) { } 27 | void add_edge(int src, int dst, int weight) { 28 | n = max(n, max(src, dst)+1); 29 | edges.push_back({src, dst, weight}); 30 | } 31 | vector p; 32 | int root(int i) { 33 | return p[i] < 0 ? i : p[i] = root(p[i]); 34 | } 35 | bool unite(int i, int j) { 36 | if ((i = root(i)) == (j = root(j))) return false; 37 | if (p[i] > p[j]) swap(i, j); 38 | p[i] += p[j]; p[j] = i; 39 | return true; 40 | } 41 | int kruskal() { 42 | p.assign(n, -1); 43 | sort(all(edges), [](edge x, edge y) { 44 | return x.weight < y.weight; 45 | }); 46 | int result = 0; 47 | for (auto e: edges) 48 | if (unite(e.src, e.dst)) 49 | result += e.weight; 50 | return result; 51 | } 52 | }; 53 | 54 | graph random_graph(int n, int d) { 55 | graph g(n); 56 | for (int i = 0; i < n; ++i) { 57 | for (int k = 0; k < d; ++k) { 58 | int j = rand() % n; 59 | g.add_edge(i, j, rand() % n); 60 | } 61 | } 62 | return g; 63 | } 64 | int main() { 65 | auto g = random_graph(100000, 100); 66 | cout << "[kruskal] " << g.kruskal() << endl; 67 | } 68 | -------------------------------------------------------------------------------- /other/gregorian_calendar.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Calendar 3 | // 4 | // Description: 5 | // A code for converting gregorian date <=> julian day number. 6 | // 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | using namespace std; 15 | 16 | #define fst first 17 | #define snd second 18 | #define all(c) ((c).begin()), ((c).end()) 19 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 20 | 21 | struct gregorian_date { 22 | int y, m, d; 23 | gregorian_date(int y, int m, int d) : y(y), m(m), d(d) { } 24 | 25 | // from julian day number to gregorian date 26 | gregorian_date(int x) { 27 | int e = 4*x + 4*((((4*x+274277)/146097)*3)/4) + 5455; 28 | int h = 5*((e% 1461) / 4) + 2; 29 | d = (h%153)/5 + 1; 30 | m = (h/153+2)%12 + 1; 31 | y = (e/1461) + (14-m)/12 - 4716; 32 | } 33 | // number of days from the epoch 34 | int julian_day_number() const { 35 | int a = (14-m)/12, Y = y+4800-a, M = m+12*a-3; 36 | return d + (153*M+2)/5 + 365*Y + (Y/4) - (Y/100) + (Y/400) - 32045; 37 | } 38 | // week of the day 39 | int day_of_week() const { 40 | return (julian_day_number() + 1) % 7; 41 | } 42 | bool is_leap() const { 43 | return (y % 4 == 0 && y % 100 != 0) || y % 400 == 0; 44 | } 45 | }; 46 | ostream &operator<<(ostream &os, const gregorian_date &g) { 47 | os << g.y << "/" << g.m << "/" << g.d; 48 | return os; 49 | } 50 | 51 | int main() { 52 | int JDN = gregorian_date(2000, 1, 1).julian_day_number(); 53 | cout << JDN << endl; 54 | auto g = gregorian_date(JDN); 55 | cout << g << endl; 56 | } 57 | -------------------------------------------------------------------------------- /data_structure/initializable_array.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Initializable Array 3 | // 4 | // Description: 5 | // It allows the following operations in O(1) time. 6 | // - init(a): initialize xs[i] = a for all i 7 | // - xs[i]: return xs[i] 8 | // - set(i, a): set xs[i] = a 9 | // The important operation is "init", which usually 10 | // requires O(n) time. By maintaining timestamps, 11 | // we can "emulate" the initialization. 12 | // 13 | // Complexity: 14 | // O(1). 15 | // 16 | // References: 17 | // J. Bentley (1986): Programming pearls. Addison-Wesley. 18 | // 19 | #include 20 | using namespace std; 21 | 22 | #define fst first 23 | #define snd second 24 | #define all(c) ((c).begin()), ((c).end()) 25 | 26 | template 27 | struct InitializableArray { 28 | T initv, *value; 29 | size_t b, *from, *to; 30 | InitializableArray(int n) { 31 | value = new T[n]; 32 | from = new size_t[n]; 33 | to = new size_t[n]; 34 | } 35 | bool chain(int i) { 36 | int j = from[i]; 37 | return j < b && to[j] == i; 38 | } 39 | void init(T a) { 40 | initv = a; 41 | b = 0; 42 | } 43 | T operator[](int i) { 44 | return chain(i) ? value[i] : initv; 45 | } 46 | void set(int i, T a) { 47 | if (!chain(i)) { 48 | from[i] = b; 49 | to[b++] = i; 50 | } 51 | value[i] = a; 52 | } 53 | }; 54 | 55 | int main() { 56 | InitializableArray a(3); 57 | cout << a.value[0] << endl; 58 | a.init(0); 59 | a.init(2); 60 | a.set(1, 5); 61 | cout << a[0] << endl; 62 | cout << a[1] << endl; 63 | cout << a[2] << endl; 64 | a.set(2, 3); 65 | a.init(0); 66 | } 67 | -------------------------------------------------------------------------------- /other/all_nearest_smaller_values.cc: -------------------------------------------------------------------------------- 1 | // 2 | // All nearest smaller values 3 | // 4 | // Description: 5 | // Compute nearest smaller values for all i: 6 | // nearest_smaller[i] = argmax { j : j < i, a[j] < a[i] }. 7 | // 8 | // Algorithm: 9 | // Barbay-Fischer-Navarro's simple algorithm. 10 | // 11 | // Complexity: 12 | // O(n). 13 | // 14 | // References: 15 | // J. Barbay, J. Fischer, and G. Navarro (2012): 16 | // LRM-Trees: Compressed indices, adaptive sorting, and compressed permutations. 17 | // Theoretical Computer Science, vol. 459, pp. 26-41. 18 | // 19 | // Verified: 20 | // SPOJ277, SPOJ1805 21 | // 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | 29 | #define fst first 30 | #define snd second 31 | #define all(c) ((c).begin()), ((c).end()) 32 | 33 | template 34 | vector nearest_smallers(const vector &x) { 35 | vector z; 36 | for (int i = 0; i < x.size(); ++i) { 37 | int j = i-1; 38 | while (j >= 0 && x[j] >= x[i]) j = z[j]; 39 | z.push_back(j); 40 | } 41 | return z; 42 | } 43 | 44 | 45 | int main() { 46 | for (int n; scanf("%d", &n) == 1 && n > 0; ) { 47 | vector x(n); 48 | for (int i = 0; i < n; ++i) 49 | scanf("%lld", &x[i]); 50 | auto v = nearest_smallers(x); 51 | reverse(all(x)); 52 | auto u = nearest_smallers(x); 53 | reverse(all(x)); 54 | reverse(all(u)); 55 | long long best = 0; 56 | for (int i = 0; i < n; ++i) { 57 | long long area = x[i] * (n - v[i] - u[i] - 2); 58 | best = max(best, area); 59 | } 60 | printf("%lld\n", best); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /machine_learning/bayesian_bradley_terry.py: -------------------------------------------------------------------------------- 1 | # 2 | # Bayesian version of Bradley-Terry model 3 | # 4 | # Reference: 5 | # Ruby C. Weng and Chih-Jen Lin (2011): 6 | # A Bayesian approximation method for online ranking. 7 | # Jornal on Machine Learning Research, vol.12, pp.267--300 8 | # (no-team version of Algorithm 1) 9 | # 10 | 11 | import math 12 | import random 13 | from collections import defaultdict 14 | import matplotlib.pyplot as plt 15 | 16 | beta = 25.0/6.0 17 | kappa = 0.0001 18 | mu = defaultdict(lambda: 25.0) 19 | sigma = defaultdict(lambda: 25.0/3.0) 20 | 21 | # exp(a) / (exp(a) + exp(b)) 22 | def logit(a, b): 23 | return 1.0 / (1.0 + math.exp(b - a)) 24 | def c(i,q): 25 | return (sigma[i]**2 + sigma[q]**2 + 2.0 * beta**2)**0.5 26 | def p(i,q): 27 | return logit(mu[i]/c(i,q), mu[q]/c(i,q)) 28 | def gamma(i,q): 29 | return sigma[i] / c(i,q) 30 | 31 | # result: {player} -> Int (smaller is better) 32 | def update(result): 33 | for i in result: 34 | Omega = 0 35 | Delta = 0 36 | for q in result: 37 | if i == q: continue 38 | if result[i] < result[q]: s = 1.0 39 | if result[i] > result[q]: s = 0.0 40 | if result[i] == result[q]: s = 0.5 41 | Omega += sigma[i]**2 / c(i,q) * (s - p(i,q)) 42 | Delta += gamma(i,q) * sigma[i]**2 / c(i,q)**2 * p(i,q) * p(q,i) 43 | mu[i] += Omega 44 | sigma[i] *= max(1.0 - Delta, kappa) 45 | 46 | # verification 47 | out = [] 48 | for iter in range(10000): 49 | if random.random() < 0.25: 50 | update({'a': 1, 'b': 2}) 51 | else: 52 | update({'a': 2, 'b': 1}) 53 | out.append(p('a', 'b')) 54 | plt.plot(out) 55 | plt.show() 56 | -------------------------------------------------------------------------------- /data_structure/sparse_table.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Sparse Table for Range Minimum Query 3 | // 4 | // Description: 5 | // The sparse table stores 6 | // table[h][i] = min(x[i], ..., x[i+2^h-1]) 7 | // to solve 8 | // RMQ(i,j) = min { x[i], ..., x[j-1] }. 9 | // 10 | // Algorithm: 11 | // table[h+1][i] = min(table[h][i], table[h][i+2^h]). 12 | // RMQ(i,j) = min(table[h][i], table[h][j-2^h-1]. 13 | // 14 | // Complexity: 15 | // O(n log n) for construction, 16 | // O(1) for query. 17 | // 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | using namespace std; 27 | 28 | #define fst first 29 | #define snd second 30 | #define all(c) ((c).begin()), ((c).end()) 31 | 32 | template 33 | struct sparse_table { 34 | const vector &x; 35 | vector> table; 36 | int argmin(int i, int j) { return x[i] < x[j] ? i : j; } 37 | sparse_table(const vector &x) : x(x) { 38 | int logn = sizeof(int)*__CHAR_BIT__-1-__builtin_clz(x.size()); 39 | table.assign(logn+1, vector(x.size())); 40 | iota(all(table[0]), 0); 41 | for (int h = 0; h+1 <= logn; ++h) 42 | for (int i = 0; i+(1< a = {5,3,8,2,1,5,6}; 53 | int n = a.size(); 54 | sparse_table ST(a); 55 | 56 | for (int i = 0; i < n; ++i) { 57 | for (int j = i+1; j <= n; ++j) { 58 | cout << ST.range_min(i, j) << " "; 59 | } 60 | cout << endl; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /graph/is_bipartite.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Bipartite graph recognition 3 | // 4 | // Description: 5 | // A graph is bipartite if there is a 2-partition 6 | // such that there are no edges in the same components. 7 | // 8 | // Algorithm: 9 | // Depth first search. 10 | // 11 | // Verified: 12 | // SPOJ3337 13 | // 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | using namespace std; 23 | 24 | struct graph { 25 | int n; 26 | vector> adj; 27 | graph(int n = 0) : n(n), adj(n) { } 28 | void add_edge(int src, int dst) { 29 | n = max(n, max(src, dst)+1); 30 | adj.resize(n); 31 | adj[src].push_back(dst); 32 | adj[dst].push_back(src); 33 | } 34 | }; 35 | bool is_bipartite(graph g) { 36 | vector color(g.n, -1); 37 | for (int u = 0; u < g.n; ++u) { 38 | if (color[u] != -1) continue; 39 | color[u] = 0; 40 | for (vector S = {u}; !S.empty(); ) { 41 | int v = S.back(); S.pop_back(); 42 | for (auto w: g.adj[v]) { 43 | if (color[w] == color[v]) return false; 44 | if (color[w] == -1) { 45 | color[w] = !color[v]; 46 | S.push_back(w); 47 | } 48 | } 49 | } 50 | } 51 | return true; 52 | } 53 | 54 | 55 | int main() { 56 | int ncase; 57 | scanf("%d", &ncase); 58 | for (int icase = 0; icase < ncase; ++icase) { 59 | printf("Scenario #%d:\n", icase+1); 60 | int n, m; 61 | scanf("%d %d", &n, &m); 62 | graph g(n); 63 | for (int i = 0; i < m; ++i) { 64 | int u, v; 65 | scanf("%d %d", &u, &v); 66 | g.add_edge(u-1, v-1); 67 | } 68 | if (is_bipartite(g)) printf("No suspicious bugs found!\n"); 69 | else printf("Suspicious bugs found!\n"); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /graph/bipartite_matching.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Ford-Fulkerson' maximum bipartite matching 3 | // 4 | // Description: 5 | // Compute the maximum cardinality matching for bipartite graph. 6 | // 7 | // Algorithm: 8 | // Ford-Fulkerson type (DFS-based) augmentaing path algorithm. 9 | // 10 | // Complexity: 11 | // O(m n) time 12 | // 13 | // Verified: 14 | // AOJ Matching 15 | // 16 | // Note: 17 | // TLE in SPOJ 4206: Fast Maximum Matching 18 | // 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | using namespace std; 30 | #define all(c) (c).begin(), (c).end() 31 | 32 | struct graph { 33 | int L, R; 34 | vector> adj; 35 | graph(int L, int R) : L(L), R(R), adj(L+R) { } 36 | void add_edge(int u, int v) { 37 | adj[u].push_back(v+L); 38 | adj[v+L].push_back(u); 39 | } 40 | int maximum_matching() { 41 | vector visited(L), mate(L+R, -1); 42 | function augment = [&](int u) { // DFS 43 | if (visited[u]) return false; 44 | visited[u] = true; 45 | for (int w: adj[u]) { 46 | int v = mate[w]; 47 | if (v < 0 || augment(v)) { 48 | mate[u] = w; 49 | mate[w] = u; 50 | return true; 51 | } 52 | } 53 | return false; 54 | }; 55 | int match = 0; 56 | for (int u = 0; u < L; ++u) { 57 | fill(all(visited), 0); 58 | if (augment(u)) ++match; 59 | } 60 | return match; 61 | } 62 | }; 63 | 64 | int main() { 65 | int L, R, m; 66 | scanf("%d %d %d", &L, &R, &m); 67 | graph g(L, R); 68 | for (int i = 0; i < m; ++i) { 69 | int u, v; 70 | scanf("%d %d", &u, &v); 71 | g.add_edge(u, v); 72 | } 73 | printf("%d\n", g.maximum_matching()); 74 | } 75 | -------------------------------------------------------------------------------- /dynamic_programming/longest_zigzag_subsequence.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Longest ZigZag Subsequence 3 | // 4 | // Description: 5 | // 6 | // A sequence xs is zigzag if x[i] < x[i+1], x[i+1] > x[i+2], for all i 7 | // (initial direction can be arbitrary). The maximum length zigzag 8 | // subsequence is computed in O(n) time by a greedy method. 9 | // 10 | // First, we contract contiguous same numbers. Then, the number of 11 | // peaks corresponds to the longest zig-zag subsequence. 12 | // 13 | #include 14 | 15 | using namespace std; 16 | 17 | #define fst first 18 | #define snd second 19 | #define all(c) ((c).begin()), ((c).end()) 20 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 21 | 22 | template 23 | int longestZigZagSubsequence(vector xs) { 24 | int n = xs.size(), len = 1, prev = -1; 25 | for (int i = 0, j; i < n; i = j) { 26 | for (j = i+1; j < n && xs[i] == xs[j]; ++j); 27 | if (j < n) { 28 | int sign = (xs[i] < xs[j]); 29 | if (prev != sign) ++len; 30 | prev = sign; 31 | } 32 | } 33 | return len; 34 | } 35 | 36 | // DP for verification 37 | template 38 | int longestZigZagSubsequenceN(vector A) { 39 | int n = A.size(); 40 | vector> Z(n, vector(2)); 41 | Z[0][0] = 1; 42 | Z[0][1] = 1; 43 | int best = 1; 44 | for(int i = 1; i < n; i++){ 45 | for(int j = i-1; j>= 0; j--){ 46 | if(A[j] < A[i]) Z[i][0] = max(Z[j][1]+1, Z[i][0]); 47 | if(A[j] > A[i]) Z[i][1] = max(Z[j][0]+1, Z[i][1]); 48 | } 49 | best = max(best, max(Z[i][0],Z[i][1])); 50 | } 51 | return best; 52 | } 53 | 54 | int main() { 55 | for (int seed = 0; seed < 10000; ++seed) { 56 | srand(seed); 57 | int n = 100; 58 | vector a(n); 59 | for (int i = 0; i < n; ++i) { 60 | a[i] = rand() % n; 61 | } 62 | assert(longestZigZagSubsequence(a) == longestZigZagSubsequenceN(a)); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /dynamic_programming/longest_increasing_subsequence.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Longest increasing subsequence 3 | // 4 | // Description: 5 | // We are given a sequence x[1,n]. 6 | // 7 | // Algorithm: 8 | // During iterations for k = 1, ..., n, 9 | // we maintain two arrays, length and tail: 10 | // length[k] = length of LIS ending a[k], 11 | // tail[l] = last element in LIS with length l, 12 | // where 13 | // tail[length[k]] = k. 14 | // Since tail is a decreasing sequence, length[k] can be 15 | // computed in O(log n) time by binary search. 16 | // (using lower_bound, it finds a strict LIS, and 17 | // using upper_bound, it finds a weak LIS.) 18 | // 19 | // Complexity: 20 | // O(n log n). 21 | // 22 | // Comment: 23 | // The algorithm finds a last element (in dictionary order of index). 24 | // For other order of the sequence, it is useful to work with the bucket 25 | // bucket[l] = { k : length[k] == l }. 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | using namespace std; 33 | 34 | #define fst first 35 | #define snd second 36 | #define all(c) ((c).begin()), ((c).end()) 37 | 38 | template 39 | vector longest_increasing_subsequence(const vector &x) { 40 | int n = x.size(); 41 | vector length(n); 42 | vector tail; 43 | for (int k = 0; k < n; ++k) { 44 | length[k] = distance(tail.begin(), upper_bound(all(tail), k, 45 | [&](int i, int j) { return x[i] < x[j]; } 46 | )); 47 | if (length[k] == tail.size()) tail.push_back(k); 48 | else tail[length[k]] = k; 49 | } 50 | int m = *max_element(all(length)); 51 | vector y(m+1); 52 | for (int i = n-1; i >= 0; --i) 53 | if (length[i] == m) y[m--] = x[i]; 54 | return y; 55 | } 56 | 57 | 58 | int main() { 59 | vector x = {3,1,4,2}; 60 | auto y = lis(x); 61 | for (auto a: y) cout << a << " "; cout << endl; 62 | } 63 | -------------------------------------------------------------------------------- /other/unweighted_interval_scheduling.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Interval Scheduling (unweighted) 3 | // 4 | // Description: 5 | // We are given a set of intervals [b_i, e_i], i = 1, ..., n. 6 | // Find a set of disjoint intervals with maximum cardinality. 7 | // 8 | // Algorithm: 9 | // Greedy. Sort by e_i and then take the intervals greedily. 10 | // 11 | // Consider an optimal scheduling and I = [b, e] be the first interval. 12 | // If there exists an interval I' = [b', e'] with e' < e, then 13 | // we can replace I by I' without increasing the cardinality. 14 | // This shows that there exists a solution that contains the interval 15 | // with the earliest end point. 16 | // 17 | // Complexity: 18 | // O(n log n). 19 | // 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | 29 | #define fst first 30 | #define snd second 31 | #define all(c) ((c).begin()), ((c).end()) 32 | 33 | template 34 | struct unweighted_interval_scheduling { 35 | struct interval { T b, e; }; 36 | vector is; 37 | void add_interval(T b, T e) { 38 | is.push_back({b, e}); 39 | } 40 | T max_scheduling() { 41 | sort(all(is), [](interval x, interval y) { 42 | return x.e < y.e; 43 | }); 44 | int score = 0; 45 | T sweep = is[0].b - 1; 46 | for (int i = 0; i < is.size(); ++i) { 47 | if (is[i].b >= sweep) { 48 | ++score; 49 | sweep = is[i].e; 50 | } 51 | } 52 | return score; 53 | } 54 | }; 55 | 56 | int main() { 57 | int ncase; scanf("%d", &ncase); 58 | for (int icase = 0; icase < ncase; ++icase) { 59 | int n; scanf("%d", &n); 60 | unweighted_interval_scheduling is; 61 | for (int i = 0; i < n; ++i) { 62 | int u, v; 63 | scanf("%d %d", &u, &v); 64 | is.add_interval(u, v); 65 | } 66 | printf("%d\n", is.max_scheduling()); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /math/quadratic_equation.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Solving a quadratic equation ax^2 + bx + c == 0 3 | // 4 | // Description 5 | // The solution is given by x = (-b \pm sqrt(D)) / 2a, where D = b^2 - 4ac. 6 | // To avoid the numerical errors, it should be computed by 7 | // if b > 0 then x1 = (-b - sqrt(D))/2a 8 | // otherwise x1 = (-b + sqrt(D))/2a 9 | // and 10 | // x2 = c/(a1*x1) 11 | // 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | using namespace std; 21 | #define fst first 22 | #define snd second 23 | #define all(c) ((c).begin()), ((c).end()) 24 | 25 | double quad_eqn(double a, double b, double c) { // assuming a != 0 26 | double D = b*b - 4*a*c, x1, x2; 27 | if (b > 0) x1 = (-b - sqrt(D))/(2*a); 28 | else x1 = (-b + sqrt(D))/(2*a); 29 | x2 = c / (a * x1); 30 | return max(x1, x2); 31 | } 32 | 33 | 34 | // verify: SPOJ22329 35 | 36 | typedef long long ll; 37 | // number of eggs in x days for the hens with ability d 38 | ll egg(ll d, ll x) { 39 | ll k = quad_eqn(1, 2*d-1, -2*x); 40 | while (k*k + (2*d-1)*k <= 2*x) ++k; 41 | while (k*k + (2*d-1)*k > 2*x) --k; 42 | return k; 43 | } 44 | ll eggs(vector &ds, ll x) { 45 | ll ans = 0; 46 | for (auto d: ds) ans += egg(d, x); 47 | return ans; 48 | } 49 | 50 | int main() { 51 | int ncase; scanf("%d", &ncase); 52 | for (int icase = 0; icase < ncase; ++icase) { 53 | ll n, k; scanf("%lld %lld", &n, &k); 54 | vector ds(n); 55 | for (int i = 0; i < n; ++i) 56 | scanf("%lld", &ds[i]); 57 | 58 | ll lo = 0, hi = 1; // x <= lo ==> eggs < k; x >= hi ==> eggs >= k 59 | while (eggs(ds, hi) < k) { 60 | lo = hi; 61 | hi *= 2; 62 | } 63 | while (lo+1 < hi) { 64 | ll mi = (lo + hi) / 2; 65 | if (eggs(ds, mi) < k) lo = mi; 66 | else hi = mi; 67 | } 68 | printf("%lld\n", hi); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /graph/is_claw_free.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Claw-free graph recognition 3 | // 4 | // Description: 5 | // A graph is claw-free if it contains no claws, i.e., 6 | // o--o--o 7 | // | 8 | // o 9 | // as a subgraph. In other words, in a claw-free graph, 10 | // any neighbors N(v) in contains no triangles. 11 | // 12 | // Algorithm: 13 | // We test the triangle-freeness in each N(v) of G~. 14 | // Here, we can use that |N(v)| <= 2 sqrt(m) due to the 15 | // Turan's theorem (hand-shaking type theorem). 16 | // 17 | // Complexity: 18 | // O(n d^3). Here, d <= 2 sqrt(m). 19 | // Note that d^3 can be reduced to d^\omega by using 20 | // fast matrix multiplication. 21 | // 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | using namespace std; 30 | 31 | #define fst first 32 | #define snd second 33 | #define all(c) ((c).begin()), ((c).end()) 34 | 35 | struct graph { 36 | int n; 37 | vector> adj; 38 | graph(int n) : n(n), adj(n) { } 39 | void add_edge(int src, int dst) { 40 | adj[src].push_back(dst); 41 | adj[dst].push_back(src); 42 | } 43 | }; 44 | bool is_claw_free(graph g) { 45 | int threshold = 0; 46 | vector> N(g.n); 47 | for (int s = 0; s < g.n; ++s) { 48 | threshold += g.adj[s].size(); 49 | for (int v: g.adj[s]) N[s].insert(v); 50 | } 51 | threshold = 2 * sqrt(threshold); 52 | for (int s = 0; s < g.n; ++s) { 53 | vector &nbh = g.adj[s]; 54 | if (nbh.size() > threshold) return false; // Turan's theorem 55 | for (int i = 0; i < nbh.size(); ++i) 56 | for (int j = i+1; j < nbh.size(); ++j) 57 | if (!N[nbh[j]].count(nbh[i])) 58 | for (int k = i+2; k < nbh.size(); ++k) 59 | if (!N[nbh[k]].count(nbh[i]) && !N[nbh[k]].count(nbh[j])) 60 | return false; 61 | } 62 | return true; 63 | } 64 | 65 | int main() { 66 | 67 | } 68 | -------------------------------------------------------------------------------- /string/sunday.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Sunday string matching 3 | // 4 | // Description: 5 | // It processes a pattern string to find 6 | // all occurrence of a given text. 7 | // 8 | // Algorithm: 9 | // It simplifies a boyer-moore algorithm. 10 | // 11 | // Complexity: 12 | // O(nm) in worst case; but fastest in a random case. 13 | // 14 | // Verified: 15 | // SPOJ 21524 16 | // 17 | // Comment: 18 | // We recommend you to use KMP or BM in programming contest 19 | // because, there may be some "worst case" instances. 20 | // You can use this algorithm in more "practical" use. 21 | // 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | using namespace std; 30 | 31 | #define fst first 32 | #define snd second 33 | #define all(c) ((c).begin()), ((c).end()) 34 | 35 | struct sunday { 36 | int m; 37 | const char *p; 38 | vector skip; 39 | sunday(const char *p) : p(p), m(strlen(p)) { 40 | skip.assign(0x100, m+1); 41 | for (int i = 0; i < m; ++i) 42 | skip[p[i]] = m - i; 43 | } 44 | vector match(const char s[]) { 45 | int n = strlen(s); 46 | vector occur; 47 | for (int i = 0; i <= n - m; ) { 48 | if (memcmp(p, s + i, m) == 0) { 49 | /* match at s[i, ..., i+m-1] */ 50 | occur.push_back(i); 51 | } 52 | i += skip[s[i + m]]; 53 | } 54 | return occur; 55 | } 56 | }; 57 | 58 | int main() { 59 | int ncase; scanf("%d", &ncase); 60 | for (int icase = 0; icase < ncase; ++icase) { 61 | if (icase > 0) printf("\n"); 62 | char s[1000010], p[1000010]; 63 | scanf("%s %s", s, p); 64 | sunday M(p); 65 | auto v = M.match(s); 66 | if (v.empty()) { 67 | printf("Not Found\n"); 68 | } else { 69 | printf("%d\n", v.size()); 70 | for (int i = 0; i < v.size(); ++i) { 71 | if (i > 0) printf(" "); 72 | printf("%d", v[i]+1); 73 | } 74 | printf("\n"); 75 | } 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /combinatorics/permutation_hash.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Permutation Hash 3 | // 4 | // Description: 5 | // hash_perm gives one-to-one correspondence between 6 | // permutations over [0,n) and the integer less than n!. 7 | // unhash_perm is the inverse function of hash_perm. 8 | // 9 | // Algorithm: 10 | // The idea is based on the Fisher-Yates shuffle algorithm: 11 | // while n > 1: 12 | // swap(x[n-1], x[rand() % n]); 13 | // --n; 14 | // For an integer given by a factorial number system: 15 | // hash = d_0 (n-1)! + d_1 (n-2)! + ... + d_{n-1} 0! 16 | // The algorithm computes 17 | // while n > 1: 18 | // swap(x[n-1], x[d_{n-1}] 19 | // --n; 20 | // 21 | // Complexity: 22 | // O(n) time, O(n) space. 23 | // 24 | // Verification: 25 | // self. 26 | 27 | 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | using namespace std; 36 | 37 | #define fst first 38 | #define snd second 39 | #define all(c) ((c).begin()), ((c).end()) 40 | 41 | typedef long long ll; 42 | 43 | vector unhash_perm(ll r, int n) { 44 | vector x(n); 45 | iota(all(x), 0); 46 | for (; n > 0; --n) { 47 | swap(x[n-1], x[r % n]); 48 | r /= n; 49 | } 50 | return x; 51 | } 52 | ll hash_perm(vector x) { 53 | int n = x.size(); 54 | vector y(n); 55 | for (int i = 0; i < n; ++i) y[x[i]] = i; 56 | ll c = 0, fac = 1; 57 | for (; n > 1; --n) { 58 | c += fac * x[n-1]; fac *= n; 59 | swap(x[n-1], x[y[n-1]]); 60 | swap(y[n-1], y[x[y[n-1]]]); 61 | } 62 | return c; 63 | } 64 | 65 | int main() { 66 | int n = 9; 67 | vector x(n); 68 | iota(all(x), 0); 69 | do { 70 | ll r = hash_perm(x); 71 | cout << r << ": "; 72 | auto a = unhash_perm(r, n); 73 | for (int i = 0; i < n; ++i) { 74 | cout << a[i] << " "; 75 | if (a[i] != x[i]) exit(-1); 76 | } 77 | cout << endl; 78 | } while (next_permutation(all(x))); 79 | return 0; 80 | } 81 | -------------------------------------------------------------------------------- /math/lattice_below_line.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Number of lattice points below a line 3 | // 4 | // Description: 5 | // 6 | // Let a, b, n, m be nonnegative integers. The task is to compute 7 | // sum_{i in [0,n)} floor((a + ib)/m). 8 | // 9 | // We compute this quantity in two directions alternately. 10 | // First, let 11 | // a = (a/m) m + (a%m), 12 | // b = (b/m) m + (b%m). 13 | // Then the quantity is 14 | // sum [(a/m)+i*(b/m)] + floor(((a%m) + i(b%m))/m) 15 | // Here, the first term is analytically evaluated. 16 | // The second term is zero if b%m == 0. Otherwise, the task is 17 | // reduced to compute 18 | // sum_{i in [0,n)} floor((a + ib)/m) 19 | // where a < m, b < m. By changing the axes, this quantity is 20 | // sum_{i in [0,n')} floor((a' + ib')/m') 21 | // where 22 | // n' = (a + b n) / m, 23 | // a' = (a + b n) % m, 24 | // b' = m, 25 | // m' = b. 26 | // 27 | // We evaluate the number of iterations. Since the computation 28 | // between b and m is the same as the one of the Euclidean 29 | // algorithm. Thus it terminates in O(log m) time. 30 | // 31 | // Complexity: 32 | // 33 | // O(log m). 34 | // 35 | // Verified: 36 | // 37 | // Somewhere 38 | 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | 45 | using namespace std; 46 | 47 | #define fst first 48 | #define snd second 49 | #define all(c) ((c).begin()), ((c).end()) 50 | 51 | // 52 | // sum_{0<=i= 0, a >= 0, b >= 0 53 | // 54 | // 55 | using Int = long long; 56 | Int latticeBelowLine(Int n, Int a, Int b, Int m) { 57 | Int ans = 0; 58 | while (m) { 59 | ans += (n-1)*n/2*(b/m) + n*(a/m); 60 | a %= m; 61 | b %= m; 62 | auto z = (a+b*n); 63 | a = z%m; 64 | n = z/m; 65 | swap(b, m); 66 | } 67 | return ans; 68 | } 69 | 70 | int main() { 71 | srand(time(0)); 72 | Int a = rand(), b = rand(), n = rand(), m = rand(); 73 | cout << latticeBelowLine(n, a, b, m) << endl; 74 | } 75 | -------------------------------------------------------------------------------- /data_structure/persistent_heap.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Persistent Heap (based on randomized meldable heap) 3 | // 4 | // Description: 5 | // Meldable heap with O(1) copy. 6 | // 7 | // Algorithm: 8 | // It is a persistence version of randomized meldable heap. 9 | // 10 | // Complexity: 11 | // O(log n) time/space for each operations. 12 | // 13 | // 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | using namespace std; 29 | 30 | #define fst first 31 | #define snd second 32 | #define all(c) ((c).begin()), ((c).end()) 33 | 34 | template 35 | struct persistent_heap { 36 | struct node { 37 | T x; 38 | node *l, *r; 39 | } *root; 40 | persistent_heap() : root(0) { } 41 | node *merge(node *a, node *b) { 42 | if (!a || !b) return a ? a : b; 43 | if (a->x > b->x) swap(a, b); 44 | if (rand() % 2) return new node({a->x, merge(a->l, b), a->r}); 45 | else return new node({a->x, a->l, merge(a->r, b)}); 46 | } 47 | void merge(persistent_heap b) { root = merge(root, b.root); } 48 | void push(T x) { root = merge(root, new node({x})); } 49 | void pop() { root = merge(root->l, root->r); } 50 | T top() const { return root->x; } 51 | }; 52 | 53 | 54 | int main() { 55 | priority_queue, greater> que; 56 | persistent_heap heap; 57 | int n = 10000; 58 | for (int i = 0; i < n; ++i) { 59 | int x = rand(); 60 | heap.push(x); 61 | que.push(x); 62 | } 63 | auto tmp_que = que; 64 | auto tmp_heap = heap; 65 | while (!que.empty()) { 66 | if (heap.top() != que.top()) cout << "***" << endl; 67 | que.pop(); 68 | heap.pop(); 69 | } 70 | heap = tmp_heap; 71 | que = tmp_que; 72 | 73 | while (!que.empty()) { 74 | if (heap.top() != que.top()) cout << "***" << endl; 75 | que.pop(); 76 | heap.pop(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /other/weighted_interval_scheduling.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Interval Scheduling (weighted) 3 | // 4 | // Description: 5 | // We are given a set of intervals [b_i, e_i] with weight w_i, i = 1, ..., n. 6 | // Find a set of disjoint intervals with maximum weight. 7 | // 8 | // Algorithm: 9 | // Dynamic programming. Suppose e_1 <= e_2 <= ... <= e_n. 10 | // Let f(k) be the optimal score for intervals {1, ..., k}. 11 | // Then, we have 12 | // f(k) = max( f(p(k)), f(k-1) ), 13 | // where 14 | // p(k) = max { j : e_j <= b_k }. 15 | // p(k) is computed in O(log n). Thereore the above DP is 16 | // performed in O(n log n). 17 | // 18 | // Complexity: 19 | // O(n log n). 20 | // 21 | // Verified: 22 | // SPOJ11515. 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | 32 | #define fst first 33 | #define snd second 34 | #define all(c) ((c).begin()), ((c).end()) 35 | 36 | template 37 | struct weighted_interval_scheduling { 38 | struct interval { T b, e, w; }; 39 | vector is; 40 | void add_interval(T b, T e, T w) { 41 | is.push_back({b, e, w}); 42 | } 43 | T max_scheduling() { 44 | int n = is.size(); 45 | sort(all(is), [](interval x, interval y) { return x.e < y.e; }); 46 | vector p(n); 47 | for (int i = 0; i < n; ++i) { 48 | auto cond = [](T key, interval x) { return key < x.e; }; 49 | p[i] = upper_bound(all(is), is[i].b, cond) - is.begin(); 50 | } 51 | vector f(n+1); 52 | for (int i = 0; i < n; ++i) 53 | f[i+1] = max(f[p[i]] + is[i].w, f[i]); 54 | return f[n]; 55 | } 56 | }; 57 | 58 | int main() { 59 | int ncase; scanf("%d", &ncase); 60 | for (int icase = 0; icase < ncase; ++icase) { 61 | int n; scanf("%d", &n); 62 | weighted_interval_scheduling is; 63 | for (int i = 0; i < n; ++i) { 64 | int u, v; 65 | scanf("%d %d", &u, &v); 66 | is.add_interval(u, v, 1); 67 | } 68 | printf("%d\n", is.max_scheduling()); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /numeric/ODE_runge_kutta.cc: -------------------------------------------------------------------------------- 1 | // 2 | // The 4th order Runge-Kutta ODE solver 3 | // 4 | // Description: 5 | // It numerically solves an ordinary differential equation 6 | // dx/dt = f(t, x) 7 | // 8 | // Algorithm: 9 | // The 4th order Runge-Kutta algorithm. Let 10 | // k1 = h f(t, x) 11 | // k2 = h f(t+h/2, x+k1/2) 12 | // k3 = h f(t+h/2, x+k2/2) 13 | // k4 = h f(t+h, x+k3) 14 | // Then 15 | // x(t+h) = x(t) + (k1 + 2 k2 + 2 k3 + k4) / 6. 16 | // 17 | // This is the most commonly used ODE solver, 18 | // and referred as *THE* Runge Kutta method or RK4. 19 | // RK4 with sufficiently small step size gives usually 20 | // sufficient result in solving non-stiff ODEs. 21 | // 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | using namespace std; 30 | 31 | #define fst first 32 | #define snd second 33 | #define all(c) ((c).begin()), ((c).end()) 34 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 35 | 36 | 37 | template 38 | double runge_kutta(F f, double t, double tend, double x) { 39 | const double EPS = 1e-5; 40 | for (double h = EPS; t < tend; ) { 41 | if (t + h >= tend) h = tend - t; 42 | double k1 = h * f(t , x ); 43 | double k2 = h * f(t + h/2, x + k1/2); 44 | double k3 = h * f(t + h/2, x + k2/2); 45 | double k4 = h * f(t + h , x + k3 ); 46 | x += (k1 + 2 * k2 + 2 * k3 + k4) / 6; 47 | t += h; // (t, x) 48 | } 49 | return x; 50 | } 51 | 52 | // for comparison 53 | template 54 | double euler(F f, double t, double tend, double x) { 55 | const double EPS = 1e-5; 56 | for (double h = EPS; t < tend; ) { 57 | if (t + h >= tend) h = tend - t; 58 | x += h * f(t, x); 59 | t += h; 60 | } 61 | return x; 62 | } 63 | 64 | int main() { 65 | auto f = [](double t, double x) { 66 | return t * x; 67 | }; 68 | printf("%f\n", runge_kutta(f, 0, 1, 1)); 69 | printf("%f\n", euler(f, 0, 1, 1)); 70 | printf("%f\n", exp(1.0/2.0)); 71 | } 72 | 73 | -------------------------------------------------------------------------------- /data_structure/fenwick_tree_2d.cc: -------------------------------------------------------------------------------- 1 | // 2 | // 2D Fenwick Tree 3 | // 4 | // Description: 5 | // A data structure that allows 6 | // add(k,a): x[k] += a 7 | // sum(k): sum { x[i] : i <= k } 8 | // where k denotes a point in [0,n)x[0,m). 9 | // 10 | // Algorithm: 11 | // Fenwick tree of Fenwick tree. 12 | // 13 | // Complexity: 14 | // O((log n)^2) time, O(n^2) space. 15 | // 16 | // Verified: 17 | // SPOJ1029 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace std; 25 | 26 | #define fst first 27 | #define snd second 28 | #define all(c) ((c).begin()), ((c).end()) 29 | 30 | template 31 | struct fenwick_tree_2d { 32 | vector> x; 33 | fenwick_tree_2d(int n, int m) : x(n, vector(m)) { } 34 | void add(int k1, int k2, int a) { // x[k] += a 35 | for (; k1 < x.size(); k1 |= k1+1) 36 | for (int k=k2; k < x[k1].size(); k |= k+1) x[k1][k] += a; 37 | } 38 | T sum(int k1, int k2) { // return x[0] + ... + x[k] 39 | T s = 0; 40 | for (; k1 >= 0; k1 = (k1&(k1+1))-1) 41 | for (int k=k2; k >= 0; k = (k&(k+1))-1) s += x[k1][k]; 42 | return s; 43 | } 44 | }; 45 | 46 | void doit() { 47 | int n; scanf("%d", &n); 48 | fenwick_tree_2d FT(n,n); 49 | while (1) { 50 | char cmd[5]; 51 | scanf("%s", cmd); 52 | switch (cmd[1]) { 53 | case 'E': { 54 | int i, j, a; 55 | scanf("%d %d %d", &i, &j, &a); 56 | int b = FT.sum(i, j) + FT.sum(i-1,j-1) - FT.sum(i-1,j) - FT.sum(i,j-1); 57 | FT.add(i, j, a - b); 58 | } 59 | break; 60 | case 'U': { 61 | int i, j, k, l; 62 | scanf("%d %d %d %d", &i, &j, &k, &l); 63 | int b = FT.sum(k, l) + FT.sum(i-1,j-1) - FT.sum(i-1,l) - FT.sum(k,j-1); 64 | printf("%d\n", b); 65 | break; 66 | } 67 | case 'N': { 68 | printf("\n"); 69 | return; 70 | } 71 | } 72 | } 73 | } 74 | int main() { 75 | int cases; 76 | scanf("%d", &cases); 77 | for (int icases = 0; icases < cases; ++icases) doit(); 78 | } 79 | -------------------------------------------------------------------------------- /string/suffix_array.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Suffix Array (Manbar and Myers' O(n (log n)^2)) 3 | // 4 | // Description: 5 | // For a string s, tts suffix array is a lexicographically sorted 6 | // list of suffixes of s. For example, for s = "abbab", its SA is 7 | // 0 ab 8 | // 1 abbab 9 | // 2 b 10 | // 3 bab 11 | // 4 bbab 12 | // 13 | // Algorithm: 14 | // Manbar and Myers' doubling algorithm. 15 | // Suppose that suffixes are sorted by its first h characters. 16 | // Then, the comparison of first 2h characters is computed by 17 | // suf(i) <_2h suf(j) == if (suf(i) !=_h suf(j)) suf(i) <_h suf(j) 18 | // else suf(i+h) <_h suf(j+h) 19 | // 20 | // Complexity: 21 | // O(n (log n)^2). 22 | // If we use radix sort instead of standard sort, 23 | // we obtain O(n log n) algorithm. However, it does not improve 24 | // practical performance so much. 25 | // 26 | // Verify: 27 | // SPOJ 6409: SARRAY (80 pt) 28 | // 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | 37 | using namespace std; 38 | 39 | #define fst first 40 | #define snd second 41 | #define all(c) ((c).begin()), ((c).end()) 42 | 43 | struct suffix_array { 44 | int n; 45 | vector x; 46 | suffix_array(const char *s) : n(strlen(s)), x(n) { 47 | vector r(n), t(n); 48 | for (int i = 0; i < n; ++i) r[x[i] = i] = s[i]; 49 | for (int h = 1; t[n-1] != n-1; h *= 2) { 50 | auto cmp = [&](int i, int j) { 51 | if (r[i] != r[j]) return r[i] < r[j]; 52 | return i+h < n && j+h < n ? r[i+h] < r[j+h] : i > j; 53 | }; 54 | sort(all(x), cmp); 55 | for (int i = 0; i+1 < n; ++i) t[i+1] = t[i] + cmp(x[i], x[i+1]); 56 | for (int i = 0; i < n; ++i) r[x[i]] = t[i]; 57 | } 58 | } 59 | int operator[](int i) const { return x[i]; } 60 | }; 61 | 62 | int main() { 63 | char s[100010]; 64 | scanf("%s", s); 65 | suffix_array sary(s); 66 | for (int i = 0; i < sary.n; ++i) 67 | printf("%d\n", sary[i]); 68 | } 69 | -------------------------------------------------------------------------------- /math/permanent.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Permanent 3 | // 4 | // Description: 5 | // Permanent of n x n matrix A is defined by 6 | // perm(A) := \sum_{sigma: permutation} \sum_{i=1}^n A_{i sigma(i)}. 7 | // Note that the determinant is defined by 8 | // det(A) := \sum_{sigma: permutation} sgn(sigma) \sum_{i=1}^n A_{i sigma(i)}. 9 | // Thus, these differs only sig(sigma) factor. 10 | // Computing permanent is known to be #P-hard (even for mod 3) 11 | // 12 | // Algorithm: 13 | // Ryser's inclusion exclusion princple. Let V = {1, ..., n}. 14 | // perm(A) = sum_{S subseteq V} (-1)^{|V - S|} a(S), 15 | // where 16 | // a(S) = prod_{j in V} sum_{i in S} A_{i j}. 17 | // 18 | // Complexity: 19 | // O(n 2^n). 20 | // 21 | // Verified: 22 | // SPOJ423 23 | // 24 | // References: 25 | // H. J. Ryser (1963): 26 | // Combinatorial Mathematics. 27 | // The Mathematical Association of America. 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | using namespace std; 37 | 38 | #define fst first 39 | #define snd second 40 | #define all(c) ((c).begin()), ((c).end()) 41 | 42 | typedef long long ll; 43 | typedef vector vec; 44 | typedef vector mat; 45 | ll permanent(mat A) { 46 | int n = A.size(); 47 | vector a(n); // row sum 48 | vector io(n); // included or not 49 | ll perm = 0; 50 | for (int i = 1; i < (1<=1 13 | 14 | // Complexity: 15 | // For m number of coins and n amount of money: 16 | // O(mn) for time 17 | // O(n) for space 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace std; 25 | 26 | // find the minimum number of coin changes and print the witness 27 | int coin_change(int coins[], int cn, int money){ 28 | int table[money+1]; 29 | table[0] = 0; 30 | int pred[money+1]; 31 | for (int i=0; i<=money;i++){ 32 | pred[i] = 0; 33 | } 34 | for (int j=1; j<=money;j++){ 35 | table[j] = INT_MAX; 36 | } 37 | for (int i=1;i<=money;i++){ 38 | int mini = table[i]; 39 | for (int j=0; j= coins[j]){ 41 | mini = min(mini, table[i-coins[j]]+1); 42 | pred[i] = j; 43 | } 44 | } 45 | table[i] = mini; 46 | } 47 | int m = money; 48 | while (m != 0){ 49 | cout<<"change coin: "< d_p >= ... >= d_n. Such p is obtained by a 17 | // binary search; therefore it runs in O(log n) time. 18 | // Therefore the total complexity is O(n log n). 19 | // 20 | // Verified: 21 | // UVA 11414: Dream 22 | // UVA 10720: Graph Construction 23 | 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | 32 | #define fst first 33 | #define snd second 34 | #define all(c) ((c).begin()), ((c).end()) 35 | 36 | bool is_graphic(vector d) { 37 | int n = d.size(); 38 | sort(all(d), greater()); 39 | vector s(n+1); 40 | for (int i = 0; i < n; ++i) s[i+1] = s[i] + d[i]; 41 | if (s[n] % 2) return false; 42 | for (int k = 1; k <= n; ++k) { 43 | int p = distance(d.begin(), 44 | lower_bound(d.begin()+k, d.end(), k, greater())); 45 | if (s[k] > k * (p-1) + s[n] - s[p]) return false; 46 | } 47 | return true; 48 | } 49 | 50 | // UVA 10720: Graph Construction 51 | /* 52 | int main() { 53 | int ncase; scanf("%d", &ncase); 54 | for (int icase = 0; icase < ncase; ++icase) { 55 | int n; scanf("%d", &n); 56 | vector d(n); 57 | for (int i = 0; i < n; ++i) 58 | scanf("%d", &d[i]); 59 | printf("%s\n", (is_graphic(d) ? "Yes" : "No")); 60 | } 61 | } 62 | */ 63 | int main() { 64 | for (int n; scanf("%d", &n); ) { 65 | if (n == 0) break; 66 | vector d(n); 67 | for (int i = 0; i < n; ++i) 68 | scanf("%d", &d[i]); 69 | printf("%s\n", (is_graphic(d) ? "Possible" : "Not possible")); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /graph/kcore.cc: -------------------------------------------------------------------------------- 1 | // 2 | // k-Core Decomposition 3 | // 4 | // Description: 5 | // This finds a k-Core decomposition of given graph G. 6 | // Here, k-core decomposition is a layered decomposition 7 | // V \supset C_1 \supset C_2 \supset ... \supset C_m 8 | // such that each C_k is a k-connected subgraph. 9 | // The largest k is is known as the degeneracy of graph. 10 | // 11 | // Algorithm: 12 | // Greedy. Pick smallest connected vertex u and 13 | // remove u and the adjacent edges. 14 | // 15 | // Complexity: 16 | // O(m log n). 17 | // 18 | // References: 19 | // S. B. Seidman (1983): 20 | // Network structure and minimum degree. 21 | // Social Networks, vol. 5, 269-287. 22 | // 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | 32 | struct edge { 33 | int src, dst; 34 | }; 35 | struct k_core_decomposition { 36 | vector edges; 37 | void add_edge(int src, int dst) { 38 | edges.push_back({src, dst}); 39 | } 40 | int n; 41 | vector> adj; 42 | void make_graph(int n_ = 0) { 43 | n = n_; 44 | for (auto e: edges) 45 | n = max(n, max(e.src, e.dst)+1); 46 | adj.resize(n); 47 | for (auto e: edges) 48 | adj[e.src].push_back(e); 49 | } 50 | // A subgraph C_k := { v : k[v] >= k } is a maximal k-connected subgraph 51 | vector kindex; 52 | int solve() { 53 | typedef pair node; 54 | priority_queue, greater> Q; 55 | kindex.assign(n, -1); 56 | vector degree(n); 57 | for (int u = 0; u < n; ++u) 58 | Q.push({degree[u] = adj[u].size(), u}); 59 | while (!Q.empty()) { 60 | auto p = Q.top(); Q.pop(); 61 | if (degree[p.second] < p.first) continue; 62 | kindex[p.second] = degree[p.second]; 63 | for (edge e: adj[p.second]) 64 | if (kindex[e.dst] < 0) 65 | Q.push({--degree[e.dst], e.dst}); 66 | } 67 | return *max_element(kindex.begin(), kindex.end()); 68 | } 69 | }; 70 | 71 | int main() { 72 | cout << plus()(2,3) << endl; 73 | } 74 | -------------------------------------------------------------------------------- /data_structure/sqrt_array.cc: -------------------------------------------------------------------------------- 1 | // 2 | // SQRT Array 3 | // 4 | // Description: 5 | // An array with o(n) deletion and insertion 6 | // 7 | // Algorithm: 8 | // Decompose array into O(sqrt(n)) subarrays. 9 | // Then the all operation is performed in O(sqrt(n)). 10 | // 11 | // Complexity: 12 | // O(sqrt(n)); however, due to the cheap constant factor, 13 | // it is comparable with binary search trees. 14 | // If only deletion is required, it is better choice. 15 | // 16 | // Verified: 17 | // erase: SPOJ16016 18 | // 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | using namespace std; 26 | 27 | #define fst first 28 | #define snd second 29 | #define all(c) ((c).begin()), ((c).end()) 30 | 31 | template 32 | struct sqrt_array { 33 | int n; 34 | vector> x; 35 | sqrt_array(int n) : n(n) { 36 | int sqrtn = sqrt(n); 37 | for (int t = n; t > 0; t -= sqrtn) 38 | x.push_back(vector(min(t, sqrtn))); 39 | } 40 | void erase(int k) { 41 | --n; 42 | int i = 0; 43 | for (; k >= x[i].size(); k -= x[i++].size()); 44 | x[i].erase(x[i].begin()+k); 45 | if (x[i].empty()) x.erase(x.begin()+i); 46 | } 47 | void insert(int k, T a = T()) { 48 | if (n++ == 0) x.push_back({}); 49 | int i = 0; 50 | for (; i < x.size() && k >= x[i].size(); k -= x[i++].size()); 51 | if (i == x.size()) x[--i].push_back(a); 52 | else x[i].insert(x[i].begin()+k, a); 53 | int sqrtn = sqrt(n); 54 | if (x[i].size() > 2*sqrtn) { 55 | vector y(x[i].begin()+sqrtn, x[i].end()); 56 | x[i].resize(sqrtn); 57 | x.insert(x.begin()+i+1, y); 58 | } 59 | } 60 | T &operator[](int k) { 61 | int i = 0; 62 | for (; k >= x[i].size(); k -= x[i++].size()); 63 | return x[i][k]; 64 | } 65 | int size() const { return n; } 66 | }; 67 | 68 | int main() { 69 | int n = 100; 70 | sqrt_array x(0); 71 | cout << "here" << endl; 72 | for (int i = 0; i < n; ++i) 73 | x.insert(rand() % max(x.size(), 1), i); 74 | for (int i = 0; i < n; ++i) 75 | x[i] = i; 76 | for (int i = 0; i < n; ++i) 77 | x.erase(rand() % x.size()); 78 | } 79 | -------------------------------------------------------------------------------- /graph/topological_sort.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Topological Sort 3 | // 4 | // 5 | // Description: 6 | // 7 | // Let G = (V, E) be a graph. An ordering ord: [n] -> V is a topological 8 | // ordering if i > j then there is no edge from ord[i] to ord[j]. 9 | // G has a topological ordering if and only if G is DAG. 10 | // 11 | // A topological order can be obtained in O(n + m) time by using 12 | // an iterative method (Kahn's algorithm) or a recursive method 13 | // (by Tarjan's algorithm). The following implementation is a 14 | // Kahn's algorithm. 15 | // 16 | // Note that if you want to find the all topological orders, 17 | // 18 | // 19 | // Complexity: 20 | // 21 | // O(n + m) 22 | // 23 | // 24 | // Verified: 25 | // 26 | // AOJ GPL_4_B 27 | // 28 | // References: 29 | // 30 | // Arthur B. Kahn (1962): 31 | // "Topological sorting of large networks". 32 | // Communications of the ACM, 5 (11): 558--562. 33 | // 34 | #include 35 | using namespace std; 36 | 37 | #define fst first 38 | #define snd second 39 | #define all(c) ((c).begin()), ((c).end()) 40 | 41 | struct Graph { 42 | int n; 43 | vector> adj; 44 | Graph(int n) : n(n), adj(n) { } 45 | void addEdge(int u, int v) { 46 | adj[u].push_back(v); 47 | } 48 | }; 49 | 50 | // return empty list if g has no topological order 51 | vector topologicalSort(Graph g) { 52 | vector deg(g.n); 53 | for (int u = 0; u < g.n; ++u) 54 | for (int v: g.adj[u]) ++deg[v]; 55 | vector stack; 56 | for (int u = 0; u < g.n; ++u) 57 | if (!deg[u]) stack.push_back(u); 58 | 59 | vector order; 60 | while (!stack.empty()) { 61 | int u = stack.back(); stack.pop_back(); 62 | order.push_back(u); 63 | for (int v: g.adj[u]) 64 | if (!--deg[v]) stack.push_back(v); 65 | } 66 | return order.size() == g.n ? order : vector(); 67 | } 68 | 69 | int main() { 70 | int n, m; cin >> n >> m; 71 | Graph g(n); 72 | for (int i = 0; i < m; ++i) { 73 | int u, v; cin >> u >> v; 74 | g.addEdge(u, v); 75 | } 76 | auto ord = topologicalSort(g); 77 | for (int i = 0; i < ord.size(); ++i) { 78 | if (i > 0) cout << " "; 79 | cout << ord[i]; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /dynamic_programming/knapsack.cc: -------------------------------------------------------------------------------- 1 | // 2 | // 0/1 Knapsack Problem (Dynamic Programming) 3 | // 4 | // Description: 5 | // We are given a set of items with profit p_i and weight w_i. 6 | // The problem is to find a subset of items that maximizes 7 | // the total profit under the total weight less than some capacity c. 8 | // 9 | // 1) c is small ==> weight DP 10 | // 2) p is small ==> price DP 11 | // 3) both are large ==> branch and bound. 12 | // 13 | // Algorithm: 14 | // weight DP: 15 | // Let F[a,i] be the max profit for weight <= a using items 1 ... i. 16 | // Then we have 17 | // F[a, i] = max(F[a, i-1], F[a-w[i], i-1] + p[i]). 18 | // The solution is F[c,n]. 19 | // 20 | // Profit DP: 21 | // Let F[a,i] be the min weight for profit >= a using items 1 ... i. 22 | // Then we have 23 | // F[a, i] = min(F[a, i-1], F[a-p[i], i-1] + w[i]). 24 | // The solution is max { a : F[a,n] <= c } 25 | // 26 | // Complexity: 27 | // O(n c) for weight DP. 28 | // O(n (sum p)) for profit DP. 29 | // 30 | // Verified: 31 | // SPOJ3321. 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | using namespace std; 39 | 40 | // weight DP 41 | // Complexity: O(nc) 42 | // 43 | // F[a] := maximum profit for weight >= a 44 | // 45 | int knapsackW(vector p, vector w, int c) { 46 | int n = w.size(); 47 | vector F(c+1); 48 | for (int i = 0; i < n; ++i) 49 | for (int a = c; a >= w[i]; --a) 50 | F[a] = max(F[a], F[a-w[i]] + p[i]); 51 | return F[c]; 52 | } 53 | 54 | // Profit DP 55 | // Complexity: O(n sum p) 56 | // 57 | // F[a] := minimum weight for profit a 58 | // 59 | int knapsackP(vector p, vector w, int c) { 60 | int n = p.size(), P = accumulate(all(p), 0); 61 | vector F(P+1, c+1); F[0] = 0; 62 | for (int i = 0; i < n; ++i) 63 | for (int a = P; a >= p[i]; --a) 64 | F[a] = min(F[a], F[a-p[i]] + w[i]); 65 | for (int a = P; a >= 0; --a) 66 | if (F[a] <= c) return a; 67 | } 68 | 69 | 70 | int main() { 71 | vector p = {3,1,4,1,5,9}; 72 | vector w = {2,6,5,3,5,8}; 73 | int c = 10; 74 | 75 | cout << knapsackW(p, w, c) << endl; 76 | cout << knapsackP(p, w, c) << endl; 77 | } 78 | -------------------------------------------------------------------------------- /data_structure/persistent_union_find.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Persistence Union Find 3 | // 4 | // Description: 5 | // Use persistent array instead of standard array in union find data structure 6 | // 7 | // Complexity: 8 | // O(a* T(n)), where T(n) is a complexity of persistent array 9 | // 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | using namespace std; 22 | 23 | #define fst first 24 | #define snd second 25 | 26 | template 27 | struct persistent_array { 28 | const int n; 29 | T *arr; 30 | vector> op; 31 | persistent_array(int n, T x = T(0)) : n(n) { 32 | arr = new T[n]; 33 | fill(arr, arr+n, x); 34 | } 35 | const T& get(int k) { 36 | for (int i = op.size()-1; i >= 0; --i) 37 | if (op[i].fst == k) return op[i].snd; 38 | return arr[k]; 39 | } 40 | const T& set(int k, const T &x) { 41 | op.push_back({k, x}); 42 | if (op.size()*op.size() > n) { 43 | T *new_arr = new T[n]; 44 | copy(arr, arr+n, new_arr); 45 | arr = new_arr; 46 | for (int i = 0; i < op.size(); ++i) 47 | arr[op[i].fst] = op[i].snd; 48 | op.clear(); 49 | } 50 | return x; 51 | } 52 | }; 53 | 54 | struct persistent_union_find { 55 | persistent_array p; 56 | persistent_union_find(int n) : p(n, -1) { } 57 | bool unite(int u, int v) { 58 | if ((u = root(u)) == (v = root(v))) return false; 59 | if (p.get(u) > p.get(v)) swap(u, v); 60 | p.set(u, p.get(u) + p.get(v)); p.set(v, u); 61 | return true; 62 | } 63 | bool find(int u, int v) { return root(u) == root(v); } 64 | int root(int u) { return p.get(u) < 0 ? u : p.set(u, root(p.get(u))); } 65 | int size(int u) { return -p.get(root(u)); } 66 | }; 67 | 68 | int main() { 69 | persistent_union_find uf(8); 70 | 71 | uf.unite(0,1); 72 | uf.unite(1,2); 73 | uf.unite(2,3); 74 | 75 | uf.unite(4,5); 76 | uf.unite(5,6); 77 | uf.unite(6,7); 78 | 79 | persistent_union_find tmp = uf; 80 | 81 | cout << uf.find(0,7) << endl; 82 | uf.unite(3,4); 83 | cout << uf.find(0,7) << endl; 84 | 85 | cout << tmp.find(0,7) << endl; 86 | } 87 | -------------------------------------------------------------------------------- /string/palindromic_tree.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Palindromic Tree 3 | // 4 | // Description: 5 | // It is a tre data structure for a string s such that 6 | // 1) each node corresponds to a palindromic substring, 7 | // 2) next[p][c] is a link from "p" to "cpc" 8 | // 3) suf[p] is a maximal palindromic suffix of p 9 | // 10 | // Algorithm: 11 | // Manacher-like construction 12 | // 13 | // Complexity: 14 | // O(n) 15 | // 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | using namespace std; 24 | 25 | #define fst first 26 | #define snd second 27 | #define all(c) ((c).begin()), ((c).end()) 28 | 29 | struct palindromic_tree { 30 | vector> next; 31 | vector suf, len; 32 | int new_node() { 33 | next.push_back(vector(256,-1)); 34 | suf.push_back(0); 35 | len.push_back(0); 36 | return next.size() - 1; 37 | } 38 | palindromic_tree(char *s) { 39 | len[new_node()] = -1; 40 | len[new_node()] = 0; 41 | int t = 1; 42 | for (int i = 0; s[i]; ++i) { 43 | int p = t; 44 | for (; i-1-len[p] < 0 || s[i-1-len[p]] != s[i]; p = suf[p]); 45 | if ((t = next[p][s[i]]) >= 0) continue; 46 | t = new_node(); 47 | len[t] = len[p] + 2; 48 | next[p][s[i]] = t; 49 | if (len[t] == 1) { 50 | suf[t] = 1; // EMPTY 51 | } else { 52 | p = suf[p]; 53 | for (; i-1-len[p] < 0 || s[i-1-len[p]] != s[i]; p = suf[p]); 54 | suf[t] = next[p][s[i]]; 55 | } 56 | } 57 | } 58 | void display() { 59 | vector buf; 60 | function rec = [&](int p) { 61 | if (len[p] > 0) { 62 | for (int i = buf.size()-1; i >= 0; --i) cout << buf[i]; 63 | for (int i = len[p] % 2; i < buf.size(); ++i) cout << buf[i]; 64 | cout << endl; 65 | } 66 | for (int a = 0; a < 256; ++a) { 67 | if (next[p][a] >= 0) { 68 | buf.push_back(a); 69 | rec(next[p][a]); 70 | buf.pop_back(); 71 | } 72 | } 73 | }; 74 | rec(0); rec(1); 75 | } 76 | }; 77 | 78 | char s[30010]; 79 | int main() { 80 | int k; 81 | scanf("%d %s", &k, s); 82 | palindromic_tree T(s); 83 | T.display(); 84 | } 85 | -------------------------------------------------------------------------------- /number_theory/primes.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Generate primes 3 | // 4 | // Description: 5 | // Generating primes from 1 to n. 6 | // 7 | // Algorithm: 8 | // Segmented sieve. It first enumerates small primes (upto sqrt(n)) 9 | // Then it sieve the rest numbers. 10 | // 11 | // Complexity: 12 | // O(n log log n) 13 | // 14 | // Verified: 15 | // SPOJ2, SPOJ503 16 | // 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | 29 | #define fst first 30 | #define snd second 31 | #define all(c) c.begin(), c.end() 32 | 33 | typedef long long ll; 34 | vector primes(ll lo, ll hi) { // primes in [lo, hi) 35 | const ll M = 1 << 14, SQR = 1 << 16; 36 | vector composite(M), small_composite(SQR); 37 | 38 | vector> sieve; 39 | for (ll i = 3; i < SQR; i+=2) { 40 | if (!small_composite[i]) { 41 | ll k = i*i + 2*i*max(0.0, ceil((lo - i*i)/(2.0*i))); 42 | sieve.push_back({2*i, k}); 43 | for (ll j = i*i; j < SQR; j += 2*i) 44 | small_composite[j] = 1; 45 | } 46 | } 47 | vector ps; 48 | if (lo <= 2) { ps.push_back(2); lo = 3; } 49 | for (ll k = lo|1, low = lo; low < hi; low += M) { 50 | ll high = min(low + M, hi); 51 | fill(all(composite), 0); 52 | for (auto &z: sieve) 53 | for (; z.snd < high; z.snd += z.fst) 54 | composite[z.snd - low] = 1; 55 | for (; k < high; k+=2) 56 | if (!composite[k - low]) ps.push_back(k); 57 | } 58 | return ps; 59 | } 60 | vector primes(ll n) { // primes in [0,n) 61 | return primes(0,n); 62 | } 63 | 64 | // SPOJ503 65 | int main() { 66 | int n; scanf("%d", &n); 67 | for (int i = 0; i < n; ++i) { 68 | ll lo, hi; 69 | scanf("%lld %lld", &lo, &hi); 70 | auto x = primes(lo, hi+1); 71 | for (ll p: x) printf("%lld\n", p); 72 | } 73 | } 74 | /* 75 | // SPOJ2 76 | int main() { 77 | int n; scanf("%d", &n); 78 | for (int i = 0; i < n; ++i) { 79 | if (i > 0) printf("\n"); 80 | ll lo, hi; 81 | scanf("%lld %lld", &lo, &hi); 82 | auto x = primes(lo, hi+1); 83 | for (ll p: x) printf("%lld\n", p); 84 | } 85 | } 86 | */ 87 | -------------------------------------------------------------------------------- /geometry/bk_tree.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Burkhard-Keller Tree (metric tree) 3 | // 4 | // Description: 5 | // Let V be a (finite) set, and d: V x V -> R be a metric. 6 | // BK tree supports the following operations: 7 | // - insert(p): insert a point p, O((log n)^2) 8 | // - traverse(p,d): enumerate all q with d(p,q) <= d 9 | // 10 | // Remark: 11 | // To delete elements and/or rebalance the tree, 12 | // we can use the same technique as the scapegoat tree. 13 | // 14 | // Reference 15 | // W. Burkhard and R. Keller (1973): 16 | // Some approaches to best-match file searching, 17 | // Communications of the ACM, vol. 16, issue. 4, pp. 230--236. 18 | // 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | 32 | #define fst first 33 | #define snd second 34 | typedef pair PII; 35 | int dist(PII a, PII b) { return max(abs(a.fst-b.fst),abs(a.snd-b.snd)); } 36 | void process(PII a) { printf("%d %d\n", a.fst,a.snd); } 37 | 38 | template 39 | struct bk_tree { 40 | typedef int dist_type; 41 | struct node { 42 | T p; 43 | unordered_map ch; 44 | } *root; 45 | bk_tree() : root(0) { } 46 | 47 | node *insert(node *n, T p) { 48 | if (!n) { n = new node(); n->p = p; return n; } 49 | dist_type d = dist(n->p, p); 50 | n->ch[d] = insert(n->ch[d], p); 51 | return n; 52 | } 53 | void insert(T p) { root = insert(root, p); } 54 | void traverse(node *n, T p, dist_type dmax) { 55 | if (!n) return; 56 | dist_type d = dist(n->p, p); 57 | if (d < dmax) { 58 | process(n->p); // write your process 59 | } 60 | for (auto i: n->ch) 61 | if (-dmax <= i.fst - d && i.fst - d <= dmax) 62 | traverse(i.snd, p, dmax); 63 | } 64 | void traverse(T p, dist_type dmax) { traverse(root, p, dmax); } 65 | }; 66 | 67 | int main() { 68 | 69 | bk_tree B; 70 | for (int i = 0; i < 10; ++i) 71 | for (int j = 0; j < 10; ++j) 72 | B.insert(PII(i,j)); 73 | 74 | B.traverse( PII(0,0), 2 ); 75 | 76 | cout << endl; 77 | 78 | B.traverse( PII(3,3), 2 ); 79 | 80 | cout << endl; 81 | } 82 | -------------------------------------------------------------------------------- /numeric/dual_number.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Automatic Differentiation by Dual Numbers 3 | // 4 | // Description: 5 | // 6 | // Dual number is an extended real number of the form a + epsilon b, 7 | // where epsilon is the first order infinitesimal (i.e, epsilon^2 = 0). 8 | // By overloading each function for dual numbers, as in the code, 9 | // we can obtain the derivative of f at a by evaluating f(a + epsilon). 10 | // 11 | // Complexity: 12 | // 13 | // Linear in composition depth. 14 | // 15 | #include 16 | 17 | using namespace std; 18 | 19 | #define fst first 20 | #define snd second 21 | #define all(c) ((c).begin()), ((c).end()) 22 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 23 | 24 | using Real = double; 25 | struct DualNumber { 26 | Real a, b; // a + epsilon b 27 | DualNumber(Real a = 0, Real b = 0) : a(a), b(b) { } 28 | DualNumber &operator+=(DualNumber x) { b+=x.b; a+=x.a; return *this; } 29 | DualNumber &operator*=(DualNumber x) { b=b*x.a+a*x.b; a*=x.a; return *this; } 30 | DualNumber operator+() const { return *this; } 31 | DualNumber operator-() const { return {-a, -b}; } 32 | DualNumber inv() const { return {1.0/a, -b/(a*a)}; } 33 | DualNumber &operator-=(DualNumber x) { return *this += -x; } 34 | DualNumber &operator/=(DualNumber x) { return *this *= x.inv(); } 35 | }; 36 | DualNumber operator+(DualNumber x, DualNumber y) { return x += y; } 37 | DualNumber operator-(DualNumber x, DualNumber y) { return x -= y; } 38 | DualNumber operator*(DualNumber x, DualNumber y) { return x *= y; } 39 | DualNumber operator/(DualNumber x, DualNumber y) { return x /= y; } 40 | 41 | // define functions with its derivative 42 | DualNumber pow(DualNumber x, Real e) { return {pow(x.a,e),x.b*pow(x.a,e-1)}; } 43 | DualNumber sqrt(DualNumber x) { return pow(x,0.5); } 44 | DualNumber exp(DualNumber x) { return {exp(x.a),x.b*exp(x.a)}; } 45 | DualNumber cos(DualNumber x) { return {cos(x.a),-x.b*sin(x.a)}; } 46 | DualNumber sin(DualNumber x) { return {sin(x.a),x.b*cos(x.a)}; } 47 | DualNumber tan(DualNumber x) { return sin(x)/cos(x); } 48 | DualNumber log(DualNumber x) { return {log(x.a),x.b/x.a}; } 49 | 50 | int main() { 51 | DualNumber x = 3, y = 4; 52 | auto f = [&](DualNumber x) { 53 | return sin(x*x) + cos(exp(x)) + tan(x); 54 | }; 55 | x.b = 1; // set infinitesimal part 56 | cout << f(x).b << endl; 57 | } 58 | 59 | -------------------------------------------------------------------------------- /data_structure/disjoint_sparse_table.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Disjoint Sparse Table 3 | // 4 | // Description: 5 | // 6 | // Let `otimes` be a binary associative operator. 7 | // The disjoint sparse table is a data structure for a 8 | // sequence xs that admits a query 9 | // prod(i,j) = xs[i] `otimes` ... `otimes` xs[j-1] 10 | // in time O(1). 11 | // 12 | // The structure is a segment tree whose node maintains 13 | // prod(i,m) and prod(m,j) for all i, j in the segment. 14 | // Then prod(i,j) is evaluated by finding the node that 15 | // splits [i,j) and returning prod(i,m)*prod(m,j). 16 | // 17 | // Complexity: 18 | // 19 | // preprocessing O(n log n) 20 | // query O(1) 21 | // 22 | #include 23 | 24 | using namespace std; 25 | 26 | #define fst first 27 | #define snd second 28 | #define all(c) ((c).begin()), ((c).end()) 29 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 30 | 31 | template 32 | struct DisjointSparseTable { 33 | vector> ys; 34 | Op otimes; 35 | DisjointSparseTable(vector xs, Op otimes_) : otimes(otimes_) { 36 | int n = 1; 37 | while (n <= xs.size()) n *= 2; 38 | xs.resize(n); 39 | ys.push_back(xs); 40 | for (int h = 1; ; ++h) { 41 | int range = (2 << h), half = (range /= 2); 42 | if (range > n) break; 43 | ys.push_back(xs); 44 | for (int i = half; i < n; i += range) { 45 | for (int j = i-2; j >= i-half; --j) 46 | ys[h][j] = otimes(ys[h][j], ys[h][j+1]); 47 | for (int j = i+1; j < min(n, i+half); ++j) 48 | ys[h][j] = otimes(ys[h][j-1], ys[h][j]); 49 | } 50 | } 51 | } 52 | T prod(int i, int j) { // [i, j) query 53 | --j; 54 | int h = sizeof(int)*__CHAR_BIT__-1-__builtin_clz(i ^ j); 55 | return otimes(ys[h][i], ys[h][j]); 56 | } 57 | }; 58 | template 59 | auto makeDisjointSparseTable(vector xs, Op op) { 60 | return DisjointSparseTable(xs, op); 61 | } 62 | 63 | int main() { 64 | vector xs = {3,1,4,1,5,1}; 65 | int n = xs.size(); 66 | auto otimes = [](int a, int b) { return max(a, b); }; 67 | auto dst = makeDisjointSparseTable(xs, otimes); 68 | 69 | for (int i = 0; i < n; ++i) { 70 | for (int j = i+1; j <= n; ++j) { 71 | cout << i << " " << j << " " << dst.prod(i, j) << " "; 72 | int a = xs[i]; 73 | for (int k = i+1; k < j; ++k) 74 | a = otimes(a, xs[k]); 75 | cout << a << endl; 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /data_structure/partially_persistent_union_find.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Partially Persistent Union Find 3 | // 4 | // Description: 5 | // It is a persistent version of union find data structure. 6 | // It allows us to apply "find" to the all versions and 7 | // "unite" to the latest version. 8 | // 9 | // Complexity: 10 | // O(log n) for each query. 11 | // 12 | // Verified: 13 | // CODE THANKS FESTIVAL 2017, Union Set: 14 | // https://code-thanks-festival-2017-open.contest.atcoder.jp/tasks/code_thanks_festival_2017_h 15 | // 16 | 17 | #include 18 | using namespace std; 19 | 20 | #define fst first 21 | #define snd second 22 | #define all(c) ((c).begin()), ((c).end()) 23 | using namespace std; 24 | 25 | struct PartiallyPersistentUnionFind { 26 | vector>> parent; // (parent index, modified time) 27 | int now = 0; // time = 0 is the initial state 28 | PartiallyPersistentUnionFind(int n) : parent(n, {{-1,0}}) { } 29 | bool unite(int u, int v) { 30 | ++now; 31 | u = root(u, now); v = root(v, now); 32 | if (u == v) return false; 33 | if (parent[u].back().fst > parent[v].back().fst) swap(u, v); 34 | parent[u].push_back({parent[u].back().fst+parent[v].back().fst, now}); 35 | parent[v].push_back({u, now}); 36 | return true; 37 | } 38 | bool find(int u, int v, int t) { return root(u, t) == root(v, t); } 39 | int root(int u, int t) { 40 | if (parent[u].back().fst >= 0 && parent[u].back().snd <= t) 41 | return root(parent[u].back().fst, t); 42 | return u; 43 | } 44 | int size(int u, int t) { 45 | u = root(u, t); 46 | int lo = 0, hi = parent[u].size(); 47 | while (lo + 1 < hi) { 48 | int mi = (lo + hi) / 2; 49 | if (parent[u][mi].snd <= t) lo = mi; 50 | else hi = mi; 51 | } 52 | return -parent[u][lo].fst; 53 | } 54 | }; 55 | 56 | int main() { 57 | int n, m, q, a, b; 58 | scanf("%d %d", &n, &m); 59 | 60 | PartiallyPersistentUnionFind uf(n); 61 | for (int i = 0; i < m; ++i) { 62 | scanf("%d %d", &a, &b); --a; --b; 63 | uf.unite(a, b); 64 | } 65 | scanf("%d", &q); 66 | for (int i = 0; i < q; ++i) { 67 | scanf("%d %d", &a, &b); --a; --b; 68 | int lo = 0, hi = uf.now+1; 69 | while (lo+1 < hi) { 70 | int mi = (lo + hi) / 2; 71 | if (uf.find(a, b, mi)) hi = mi; 72 | else lo = mi; 73 | } 74 | if (hi == uf.now+1) printf("-1\n"); 75 | else printf("%d\n", hi); 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /data_structure/leftist_heap.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Leftist Heap 3 | // 4 | // Description: 5 | // 6 | // Leftist heap is a heap data structure that allows 7 | // the meld (merge) operation in O(log n) time. 8 | // Use this for persistent heaps. 9 | // 10 | // Complexity: 11 | // 12 | // O(1) for top, O(log n) for push/pop/meld 13 | // 14 | // g++ -std=c++17 -O3 -fmax-errors=1 -fsanitize=undefined 15 | #include 16 | 17 | using namespace std; 18 | 19 | #define fst first 20 | #define snd second 21 | #define all(c) ((c).begin()), ((c).end()) 22 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 23 | 24 | template 25 | struct LeftistHeap { 26 | struct Node { 27 | T key; 28 | Node *left = 0, *right = 0; 29 | int dist = 0; 30 | } *root = 0; 31 | static Node *merge(Node *x, Node *y) { 32 | if (!x) return y; 33 | if (!y) return x; 34 | if (x->key > y->key) swap(x, y); 35 | x->right = merge(x->right, y); 36 | if (!x->left || x->left->dist < x->dist) swap(x->left, x->right); 37 | x->dist = (x->right ? x->right->dist : 0) + 1; 38 | return x; 39 | } 40 | void push(T key) { root = merge(root, new Node({key})); } 41 | void pop() { root = merge(root->left, root->right); } 42 | T top() { return root->key; } 43 | }; 44 | 45 | // 46 | // Persistent Implementaiton. (allow copy) 47 | // 48 | template 49 | struct PersistentLeftistHeap { 50 | struct Node { 51 | T key; 52 | Node *left = 0, *right = 0; 53 | int dist = 0; 54 | } *root = 0; 55 | static Node *merge(Node *x, Node *y) { 56 | if (!x) return y; 57 | if (!y) return x; 58 | if (x->key > y->key) swap(x, y); 59 | x = new Node(*x); 60 | x->right = merge(x->right, y); 61 | if (!x->left || x->left->dist < x->dist) swap(x->left, x->right); 62 | x->dist = (x->right ? x->right->dist : 0) + 1; 63 | return x; 64 | } 65 | void push(T key) { root = merge(root, new Node({key})); } 66 | void pop() { root = merge(root->left, root->right); } 67 | T top() { return root->key; } 68 | }; 69 | 70 | int main() { 71 | PersistentLeftistHeap heap; 72 | heap.push(3); 73 | heap.push(1); 74 | heap.push(4); 75 | heap.push(1); 76 | heap.push(5); 77 | cout << heap.top() << endl; heap.pop(); 78 | cout << heap.top() << endl; heap.pop(); 79 | auto temp = heap; 80 | cout << heap.top() << endl; heap.pop(); 81 | cout << heap.top() << endl; heap.pop(); 82 | cout << temp.top() << endl; temp.pop(); 83 | cout << temp.top() << endl; temp.pop(); 84 | } 85 | -------------------------------------------------------------------------------- /dynamic_programming/rod_cutting.cc: -------------------------------------------------------------------------------- 1 | // Rod Cutting Problem 2 | // Description: 3 | // Given an array of values and a rod length, find the maximum value we can get by doing the rod cutting 4 | // Algorithm: 5 | // let di denotes the maximum value we can get from cutting a rod of length i, vi denotes the value of a cut of length i 6 | // then 7 | // di = 0, when i = 0; 8 | // di = max(di-k + vk) for 1<=k<=i, when i>=1 9 | // Complexity: 10 | // let m be the number of different kinds of cut, let n be the rod length 11 | // time complexity: O(mn) 12 | // Space complexity: O(n) 13 | 14 | #include 15 | #include 16 | 17 | using namespace std; 18 | 19 | // output the maximum values we can get 20 | int rod_cut(int v[], int vsize, int len){ 21 | int values[len+1]; 22 | values[0] = 0; 23 | int cur_max; 24 | for (int i=1; i<=len; i++){ 25 | cur_max = -1; 26 | for (int j=1; j<=i; j++){ 27 | if (j cur_max){ 28 | cur_max = values[i-j] + v[j]; 29 | } 30 | } 31 | values[i] = cur_max; 32 | } 33 | return values[len]; 34 | } 35 | 36 | // output the patterns of cutting that can generates the maximum value 37 | // pred[] keeps track of the cut of each length 38 | // pred[i] denotes the cut made when the rod length is i 39 | int* rod_cutting_witness(int v[], int vsize, int len){ 40 | int values[len+1]; 41 | values[0] = 0; 42 | int cur_max; 43 | int *witness = new int [len+1]; 44 | witness[0] = 0; 45 | for (int i=1; i<=len; i++){ 46 | cur_max = -1; 47 | for (int j=1; j<=i; j++){ 48 | if (j cur_max){ 49 | cur_max = values[i-j]+v[j]; 50 | values[i] = cur_max; 51 | witness[i] = j; 52 | } 53 | } 54 | } 55 | int m = len; 56 | while (m != 0){ 57 | if (witness[m] < m){ 58 | cout<<"cut: "<R. 6 | // The algorithm finds a maximum flow. 7 | // 8 | // Algorithm: 9 | // Ford-Fulkerson's augmenting path algorithm 10 | // 11 | // Complexity: 12 | // O(m F), where F is the maximum flow value. 13 | // 14 | // Verified: 15 | // AOJ GRL_6_A: Maximum Flow 16 | // 17 | // Reference: 18 | // B. H. Korte and J. Vygen (2008): 19 | // Combinatorial Optimization: Theory and Algorithms. 20 | // Springer Berlin Heidelberg. 21 | // 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | using namespace std; 30 | 31 | #define fst first 32 | #define snd second 33 | #define all(c) ((c).begin()), ((c).end()) 34 | 35 | const int INF = 1 << 30; 36 | struct graph { 37 | typedef long long flow_type; 38 | struct edge { 39 | int src, dst; 40 | flow_type capacity, flow; 41 | size_t rev; 42 | }; 43 | int n; 44 | vector> adj; 45 | graph(int n) : n(n), adj(n) { } 46 | void add_edge(int src, int dst, flow_type capacity) { 47 | adj[src].push_back({src, dst, capacity, 0, adj[dst].size()}); 48 | adj[dst].push_back({dst, src, 0, 0, adj[src].size()-1}); 49 | } 50 | int max_flow(int s, int t) { 51 | vector visited(n); 52 | function augment = [&](int u, flow_type cur) { 53 | if (u == t) return cur; 54 | visited[u] = true; 55 | for (auto &e: adj[u]) { 56 | if (!visited[e.dst] && e.capacity > e.flow) { 57 | flow_type f = augment(e.dst, min(e.capacity - e.flow, cur)); 58 | if (f > 0) { 59 | e.flow += f; 60 | adj[e.dst][e.rev].flow -= f; 61 | return f; 62 | } 63 | } 64 | } 65 | return flow_type(0); 66 | }; 67 | for (int u = 0; u < n; ++u) 68 | for (auto &e: adj[u]) e.flow = 0; 69 | 70 | flow_type flow = 0; 71 | while (1) { 72 | fill(all(visited), false); 73 | flow_type f = augment(s, INF); 74 | if (f == 0) break; 75 | flow += f; 76 | } 77 | return flow; 78 | } 79 | }; 80 | 81 | int main() { 82 | for (int n, m; scanf("%d %d", &n, &m) == 2; ) { 83 | graph g(n); 84 | for (int i = 0; i < m; ++i) { 85 | int u, v, w; 86 | scanf("%d %d %d", &u, &v, &w); 87 | g.add_edge(u, v, w); 88 | } 89 | printf("%d\n", g.max_flow(0, n-1)); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /other/poker_hands.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Poker Hands 3 | // 4 | // Description: 5 | // It determines the hand of the poker. 6 | // 7 | // Algorithm: 8 | // Naive. 9 | // 10 | // Verified: 11 | // UVA 10315 Poker Hands ( 12 | // 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | using namespace std; 21 | 22 | #define fst first 23 | #define snd second 24 | #define all(c) ((c).begin()), ((c).end()) 25 | 26 | struct card { 27 | int rank, suit; 28 | card(int rank, int suit) : rank(rank), suit(suit) { } 29 | card(char s[]) { // s is, i.e., "7H" 30 | for (rank = 0; s[0] != "_A23456789TJQK"[rank]; ++rank); 31 | for (suit = 0; s[1] != "CDHS"[suit]; ++suit); 32 | } 33 | }; 34 | 35 | enum { 36 | HIGHEST_CARD, ONE_PAIR, TWO_PAIR, THREE_OF_A_KIND, 37 | STRAIGHT, FLUSH, FULL_HOUSE, FOUR_OF_A_KIND, STRAIGHT_FLUSH }; 38 | pair> poker_hand(vector cs) { 39 | vector code, freq(30); 40 | for (card c: cs) freq[c.rank+13] = freq[c.rank] += 1; 41 | for (int i = 14; i >= 2; --i) 42 | if (freq[i]) code.push_back(i); 43 | stable_sort(all(code), [&](int i, int j) { return freq[i] > freq[j]; }); 44 | 45 | bool straight = false, flush = true; 46 | for (int i = 1, j; i <= 10; ++i) { // beginning of straight (10 == 'X') 47 | for (j = 0; j < 5 && freq[i+j]; ++j); 48 | if (j == 5) straight = true; 49 | } 50 | for (int i = 1; i < 5; ++i) 51 | if (cs[0].suit != cs[i].suit) flush = false; 52 | 53 | if (straight && flush) return {STRAIGHT_FLUSH, code}; 54 | if (freq[code[0]] == 4) return {FOUR_OF_A_KIND, code}; 55 | if (freq[code[0]] == 3 && freq[code[1]] == 2) return {FULL_HOUSE, code}; 56 | if (flush) return {FLUSH, code}; 57 | if (straight) return {STRAIGHT, code}; 58 | if (freq[code[0]] == 3) return {THREE_OF_A_KIND, code}; 59 | if (freq[code[0]] == 2 && freq[code[1]] == 2) return {TWO_PAIR, code}; 60 | if (freq[code[0]] == 2) return {ONE_PAIR, code}; 61 | return {HIGHEST_CARD, code}; 62 | }; 63 | 64 | int main() { 65 | while (1) { 66 | vector cs[2]; 67 | pair> res[2]; 68 | for (int k = 0; k < 2; ++k) { 69 | for (int i = 0; i < 5; ++i) { 70 | char s[120]; 71 | if (scanf("%s", s) != 1) return 0;; 72 | cs[k].push_back(card(s)); 73 | } 74 | res[k] = poker_hand(cs[k]); 75 | } 76 | if (res[0] < res[1]) printf("White wins.\n"); 77 | else if (res[0] > res[1]) printf("Black wins.\n"); 78 | else printf("Tie.\n"); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /math/linear_recursion.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Linear Recurrence Solver 3 | // 4 | // Description: 5 | // Consider 6 | // x[i+n] = a[0] x[i] + a[1] x[i+1] + ... + a[n-1] x[i+n-1]. 7 | // with initial solution x[0], x[1], ..., x[n-1]. 8 | // We compute k-th term of x in O(n^2 log k) time. 9 | // 10 | // Algorithm: 11 | // Since x[k] is linear in x[0], ..., x[n-1], 12 | // there exists function f: Z -> R^n such that 13 | // x[k] = f(k)[0] x[0] + ... + f(k)[n-1] x[n-1]. 14 | // Here, f satisfies the following identities: 15 | // x[2k] = f(k)[0] x[k] + ... + f(k)[n-1] x[k+n-1] 16 | // = f(k)[0] (f(k)[0] x[0] + ... + f(k)[n-1] x[n-1]) 17 | // + f(k)[1] (f(k)[0] x[1] + ... + f(k)[n-1] x[n-1+1]) 18 | // + ... 19 | // = sum{0<=i 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | using namespace std; 41 | 42 | #define fst first 43 | #define snd second 44 | #define all(c) ((c).begin()), ((c).end()) 45 | 46 | int linear_recurrence(vector a, vector x, int k) { 47 | int n = a.size(); 48 | vector t(2*n+1); 49 | 50 | function (int)> rec = [&](int k) { 51 | vector c(n); 52 | if (k < n) c[k] = 1; 53 | else { 54 | vector b = rec(k / 2); 55 | fill(all(t), 0); 56 | for (int i = 0; i < n; ++i) 57 | for (int j = 0; j < n; ++j) 58 | t[i+j+(k&1)] += b[i]*b[j]; 59 | for (int i = 2*n-1; i >= n; --i) 60 | for (int j = 0; j < n; j++) 61 | t[i-n+j] += a[j]*t[i]; 62 | for (int i = 0; i < n; ++i) 63 | c[i] = t[i]; 64 | } 65 | return c; 66 | }; 67 | vector c = rec(k); 68 | int ans = 0; 69 | for (int i = 0; i < x.size(); ++i) 70 | ans += c[i]*x[i]; 71 | return ans; 72 | } 73 | 74 | 75 | int main() { 76 | // x[n+k] = x[n] + 2*x[n+1] + 3*x[n+2]; 77 | // x[0] = 6, x[1] = 5, x[2] = 4. 78 | // 10-th term = 220696 79 | cout << linear_recurrence({1,2,3}, {6,5,4}, 10) << endl; 80 | } 81 | -------------------------------------------------------------------------------- /graph/is_cograph.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Cograph Recognition 3 | // 4 | // Description: 5 | // Cographs are recursively defined as follows: 6 | // (1) A singleton is a cograph 7 | // (2) A disjoint union of cographs is a cograph 8 | // (3) A complement of a cograph is a cograph 9 | // Cographs are also characterized by P_4-free graphs. 10 | // 11 | // Algorithm: 12 | // By using the above constructive characterization, we have 13 | // G is a cograph iff all connected components of G~ are cographs. 14 | // This yields a polynomial time algorithm. 15 | // Note that there are O(n + m) algorithm for this problem 16 | // by using lexicographic BFS. 17 | // 18 | // Complexity: 19 | // O(n^3) in worst case. 20 | // 21 | // Verify: 22 | // POJ3236: Michelle's Evaluation 23 | // 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | using namespace std; 34 | 35 | #define fst first 36 | #define snd second 37 | #define all(c) ((c).begin()), ((c).end()) 38 | 39 | struct graph { 40 | int n; 41 | vector > adj; 42 | graph(int n) : n(n), adj(n) { } 43 | void add_edge(int src, int dst) { 44 | adj[src].push_back(dst); 45 | adj[dst].push_back(src); 46 | } 47 | }; 48 | 49 | bool is_cograph(graph g, int depth = 0) { 50 | if (g.n <= 3) return true; 51 | for (auto &nbh: g.adj) sort(all(nbh)); 52 | vector index(g.n, -1); 53 | for (int i = 0; i < g.n; ++i) { 54 | if (index[i] >= 0) continue; 55 | int size = 0; 56 | index[i] = size++; 57 | vector S(1,i), comps; 58 | while (!S.empty()) { 59 | int j = S.back(); S.pop_back(); 60 | comps.push_back(j); 61 | for (int k: g.adj[j]) { 62 | if (index[k] < 0) { 63 | index[k] = size++; 64 | S.push_back(k); 65 | } 66 | } 67 | } 68 | graph h(size); 69 | for (int j = 0; j < comps.size(); ++j) 70 | for (int k = j+1; k < comps.size(); ++k) 71 | if (!binary_search(all(g.adj[comps[j]]), comps[k])) 72 | h.add_edge(index[comps[j]], index[comps[k]]); 73 | if (depth > 0 && g.n == h.n) return false; // both G and G' are connected 74 | if (!is_cograph(h, depth+1)) return false; // some component is not a cograph 75 | } 76 | return true; 77 | } 78 | 79 | int main() { 80 | int n, m; 81 | scanf("%d %d", &n, &m); 82 | 83 | graph g(n); 84 | for (int i = 0; i < m; ++i) { 85 | int u, v; 86 | scanf("%d %d", &u, &v); 87 | g.add_edge(u-1, v-1); 88 | } 89 | printf("%s\n", (is_cograph_n(g) ? "Yes" : "No")); 90 | } 91 | -------------------------------------------------------------------------------- /graph/maximum_flow_edmonds_karp.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Maximum Flow (Edmonds-Karp) 3 | // 4 | // Description: 5 | // Given a directed network G = (V, E) with edge capacity c: E->R. 6 | // The algorithm finds a maximum flow. 7 | // 8 | // Algorithm: 9 | // Edmonds-Karp shortest augmenting path algorithm. 10 | // 11 | // Complexity: 12 | // O(n m^2) 13 | // 14 | // Verified: 15 | // AOJ GRL_6_A: Maximum Flow 16 | // 17 | // Reference: 18 | // B. H. Korte and J. Vygen (2008): 19 | // Combinatorial Optimization: Theory and Algorithms. 20 | // Springer Berlin Heidelberg. 21 | // 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | 32 | #define fst first 33 | #define snd second 34 | #define all(c) ((c).begin()), ((c).end()) 35 | 36 | const int INF = 1 << 30; 37 | struct graph { 38 | int n; 39 | struct edge { 40 | int src, dst; 41 | int capacity, residue; 42 | size_t rev; 43 | }; 44 | edge &rev(edge e) { return adj[e.dst][e.rev]; }; 45 | 46 | vector> adj; 47 | graph(int n) : n(n), adj(n) { } 48 | void add_edge(int src, int dst, int capacity) { 49 | adj[src].push_back({src, dst, capacity, 0, adj[dst].size()}); 50 | adj[dst].push_back({dst, src, 0, 0, adj[src].size()-1}); 51 | } 52 | int max_flow(int s, int t) { 53 | for (int u = 0; u < n; ++u) 54 | for (auto &e: adj[u]) e.residue = e.capacity; 55 | int total = 0; 56 | while (1) { 57 | vector prev(n, -1); prev[s] = -2; 58 | queue que; que.push(s); 59 | while (!que.empty() && prev[t] == -1) { 60 | int u = que.front(); que.pop(); 61 | for (edge &e: adj[u]) { 62 | if (prev[e.dst] == -1 && e.residue > 0) { 63 | prev[e.dst] = e.rev; 64 | que.push(e.dst); 65 | } 66 | } 67 | } 68 | if (prev[t] == -1) break; 69 | int inc = INF; 70 | for (int u = t; u != s; u = adj[u][prev[u]].dst) 71 | inc = min(inc, rev(adj[u][prev[u]]).residue); 72 | for (int u = t; u != s; u = adj[u][prev[u]].dst) { 73 | adj[u][prev[u]].residue += inc; 74 | rev(adj[u][prev[u]]).residue -= inc; 75 | } 76 | total += inc; 77 | } // { u : visited[u] == true } is s-side 78 | return total; 79 | } 80 | }; 81 | 82 | int main() { 83 | for (int n, m; scanf("%d %d", &n, &m) == 2; ) { 84 | graph g(n); 85 | for (int i = 0; i < m; ++i) { 86 | int u, v, w; 87 | scanf("%d %d %d", &u, &v, &w); 88 | g.add_edge(u, v, w); 89 | } 90 | printf("%d\n", g.max_flow(0, n-1)); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /geometry/coordinate_domination.cc: -------------------------------------------------------------------------------- 1 | // 2 | // 3D Coordinate-Wise Domination 3 | // 4 | // Description: 5 | // 6 | // Point (x,y,z) dominates (x',y',z') if 7 | // x < x', y < y', and z < z' 8 | // holds. Kung-Luccio-Preparata proposed an algorithm to compute 9 | // the all set of dominating points in O(n log n) time. 10 | // 11 | // It maintains a data structure to check the domination in (y,z) plane, 12 | // and proceed the points in the decreasing order of x. 13 | // By the processing order, the new point is never dominated by the latter 14 | // points and is dominated by the previous points if its (y,z) coordinate 15 | // is dominated by them. 16 | // 17 | // Complexity: 18 | // 19 | // O(n log n). By using this method recursively, 20 | // we can solve d-dimensional domination in O(n log^{d-2} n). 21 | // 22 | // Reference: 23 | // 24 | // Hsiang-Tsung Kung, Fabrizio Luccio, Franco P. Preparata (1975): 25 | // "On finding the maxima of a set of vectors." Journal of the ACM, 26 | // vol.22, no.4, pp.469-476. 27 | // 28 | 29 | #include 30 | using namespace std; 31 | 32 | #define fst first 33 | #define snd second 34 | #define all(c) ((c).begin()), ((c).end()) 35 | 36 | using Real = int; 37 | struct Point { 38 | Real x, y, z; 39 | bool operator < (Point p) const { 40 | if (x != p.x) return x < p.x; 41 | if (y != p.y) return y < p.y; 42 | return z < p.z; 43 | } 44 | }; 45 | int domination(vector xs) { 46 | multiset frontier; 47 | auto bad = [&](multiset::iterator it) { 48 | auto jt = next(it); 49 | if (jt == frontier.end()) return false; 50 | return it->y < jt->y && it->z < jt->z; 51 | }; 52 | int n = xs.size(); 53 | sort(all(xs)); 54 | int count = 0; 55 | for (int i = n-1; i >= 0; --i) { 56 | auto proc = [&] { 57 | if (i < n-1 && xs[i].x == xs[i+1].x) return true; 58 | Point p(xs[i]); p.x = 0; 59 | auto it = frontier.insert(p); 60 | if (bad(it)) { 61 | frontier.erase(it); 62 | return false; 63 | } else { 64 | while (it != frontier.begin() && bad(prev(it))) 65 | frontier.erase(prev(it)); 66 | return true; 67 | } 68 | }; 69 | if (proc()) ++count; 70 | } 71 | return count; 72 | } 73 | 74 | 75 | int main() { 76 | int ncase; scanf("%d", &ncase); 77 | for (int icase = 0; icase < ncase; ++icase) { 78 | int n; scanf("%d", &n); 79 | vector ps(n); 80 | for (int i = 0; i < n; ++i) { 81 | scanf("%d %d %d", &ps[i].x, &ps[i].y, &ps[i].z); 82 | ps[i].x = -ps[i].x; 83 | ps[i].y = -ps[i].y; 84 | ps[i].z = -ps[i].z; 85 | } 86 | printf("%d\n", domination(ps)); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /graph/least_common_ancestor_doubling.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Least common ancestor by doubling 3 | // 4 | // Description: 5 | // For a rooted tree T, LCA(u,v) is a vertex u 6 | // that is the deepest node that is a common ancestor of u and v. 7 | // 8 | // Algorithm: 9 | // It stores all 2^k-ancestors for all vertices. 10 | // Then LCA can be solved in O(log n) time. 11 | // 12 | // Complexity: 13 | // O(n) for preprocessing, 14 | // O(log n) for query 15 | // 16 | // Verified: 17 | // AOJ GRL_5C 18 | // 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | using namespace std; 26 | 27 | #define fst first 28 | #define snd second 29 | #define all(c) ((c).begin()), ((c).end()) 30 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 31 | 32 | 33 | struct tree { 34 | int n, logn; 35 | vector> adj; 36 | tree(int n) : n(n), adj(n) { 37 | logn = 1; 38 | for (int k = 1; k < n; k *= 2) ++logn; 39 | } 40 | void add_edge(int s, int t) { 41 | adj[s].push_back(t); 42 | adj[t].push_back(s); 43 | } 44 | vector> parent; 45 | vector rank, depth; 46 | void rootify(int r) { 47 | parent.assign(logn, vector(n, r)); 48 | rank.resize(n); 49 | depth.assign(n, 0); 50 | int id = 0; 51 | function dfs = [&](int u, int p) { 52 | rank[u] = id++; 53 | parent[0][u] = p; 54 | for (int i = 0; i+1 < logn; ++i) 55 | parent[i+1][u] = parent[i][parent[i][u]]; 56 | for (int v: adj[u]) 57 | if (v != p) { depth[v] = depth[u]+1; dfs(v, u); } 58 | }; dfs(r, r); 59 | } 60 | 61 | int lca(int u, int v) { 62 | if (depth[u] > depth[v]) swap(u, v); 63 | for (int i = depth[v]-depth[u], k = 0; i; i /= 2) { 64 | if (i & 1) v = parent[k][v]; 65 | ++k; 66 | } 67 | if (u == v) return u; 68 | for (int i = logn-1; i >= 0; --i) { 69 | if (parent[i][u] != parent[i][v]) { 70 | u = parent[i][u]; 71 | v = parent[i][v]; 72 | } 73 | } 74 | return parent[0][u]; 75 | } 76 | }; 77 | 78 | int main() { 79 | int n; 80 | scanf("%d", &n); 81 | tree T(n); 82 | for (int u = 0; u < n ; ++u) { 83 | int k; 84 | scanf("%d", &k); 85 | for (int j = 0; j < k; ++j) { 86 | int v; 87 | scanf("%d", &v); 88 | T.add_edge(u, v); 89 | } 90 | } 91 | T.rootify(0); 92 | int q; 93 | scanf("%d", &q); 94 | for (int i = 0; i < q; ++i) { 95 | int u, v; 96 | scanf("%d %d", &u, &v); 97 | printf("%d\n", T.lca(u, v)); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /graph/cycle_enumeration.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Hawick and James' cycle enumeration 3 | // 4 | // Description: 5 | // For a directed graph, it enumerates all cycles 6 | // 7 | // Algorithm: 8 | // Hawick and James's implementation of Johnson algorithm. 9 | // 10 | // Complexity 11 | // O(n+m) average time for each cycles, 12 | // O(n+m) space. 13 | // 14 | // References: 15 | // K. A. Hawick and H. A. James (2007): 16 | // Enumerating circuits and loops in graphs with self-arcs and multiple-arcs. 17 | // in Proceedings of International Conference on Foundations of Computer Science, 18 | // pp.14--20. 19 | // 20 | // D. B. Johnson (1975): 21 | // Finding all the elementary circuits of a directed graph. 22 | // SIAM Journal on Computing, vol.4, pp.77--84. 23 | // 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | using namespace std; 32 | 33 | #define fst first 34 | #define snd second 35 | #define all(c) ((c).begin()), ((c).end()) 36 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 37 | 38 | struct graph { 39 | int n; 40 | vector> adj; 41 | graph(int n) : n(n), adj(n) { } 42 | void add_edge(int u, int v) { 43 | adj[u].push_back(v); 44 | } 45 | 46 | void all_cycles() { 47 | vector S; 48 | for (int s = 0; s < n; ++s) { 49 | vector blocked(n); 50 | vector> B(n); 51 | 52 | function unblock = [&](int u) { 53 | blocked[u] = false; 54 | for (int v: B[u]) 55 | if (blocked[v]) unblock(v); 56 | B[u].clear(); 57 | }; 58 | function rec = [&](int u) { 59 | bool is_cycle = false; 60 | blocked[u] = true; 61 | S.push_back(u); 62 | for (int v: adj[u]) { 63 | if (v < s) continue; 64 | if (v == s) { 65 | is_cycle = true; // S forms a cycle 66 | cout << "found cycle" << endl; 67 | for (auto w: S) cout << " " << w; 68 | cout << endl; 69 | } else if (!blocked[v] && rec(v)) is_cycle = true; 70 | if (is_cycle) unblock(u); 71 | else { 72 | for (int v: adj[u]) { 73 | if (v < s) continue; 74 | if (!B[v].count(u)) B[v].insert(u); 75 | } 76 | } 77 | } 78 | S.pop_back(); 79 | return is_cycle; 80 | }; rec(s); 81 | } 82 | } 83 | }; 84 | 85 | int main() { 86 | int n = 4; 87 | graph g(n); 88 | g.add_edge(0,1); 89 | g.add_edge(1,2); 90 | g.add_edge(2,0); 91 | g.add_edge(1,3); 92 | g.add_edge(3,0); 93 | g.add_edge(2,3); 94 | 95 | g.all_cycles(); 96 | } 97 | -------------------------------------------------------------------------------- /graph/chromatic_number.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Exact Algorithm for Chromatic Number 3 | // 4 | // Description: 5 | // 6 | // A vertex coloring is an assignment of colors to the vertices 7 | // such that no adjacent vertices have a same color. The smallest 8 | // number of colors for a vertex coloring is called the chromatic 9 | // number. Computing the chromatic number is NP-hard. 10 | // 11 | // We can compute the chromatic number by the inclusion-exlusion 12 | // principle. The complexity is O(poly(n) 2^n). The following 13 | // implementation runs in O(n 2^n) but is a Monte-Carlo algorithm 14 | // since it takes modulos to avoid multiprecision numbers. 15 | // 16 | // Complexity: 17 | // 18 | // O(n 2^n) 19 | // 20 | // References: 21 | // 22 | // Andreas Bjorklund and Thore Husfeldt (2006): 23 | // "Inclusion--Exclusion Algorithms for Counting Set Partitions." 24 | // in Proceedings of the 47th Annual Symposium on Foundations of 25 | // Computer Science, pp. 575--582. 26 | // 27 | #include 28 | 29 | using namespace std; 30 | 31 | #define fst first 32 | #define snd second 33 | #define all(c) ((c).begin()), ((c).end()) 34 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 35 | 36 | struct Graph { 37 | int n; 38 | vector> adj; 39 | Graph(int n) : n(n), adj(n) { } 40 | void addEdge(int u, int v) { 41 | adj[u].push_back(v); 42 | adj[v].push_back(u); 43 | } 44 | }; 45 | 46 | int chromaticNumber(Graph g) { 47 | const int N = 1 << g.n; 48 | vector nbh(g.n); 49 | for (int u = 0; u < g.n; ++u) 50 | for (int v: g.adj[u]) 51 | nbh[u] |= (1 << v); 52 | 53 | int ans = g.n; 54 | for (int d: {7}) { // ,11,21,33,87,93}) { 55 | long long mod = 1e9 + d; 56 | vector ind(N), aux(N, 1); 57 | ind[0] = 1; 58 | for (int S = 1; S < N; ++S) { 59 | int u = __builtin_ctz(S); 60 | ind[S] = ind[S^(1<> 1); // gray-code 66 | aux[S] = (aux[S] * ind[S]) % mod; 67 | chi += (i & 1) ? aux[S] : -aux[S]; 68 | } 69 | if (chi % mod) ans = k; 70 | } 71 | } 72 | return ans; 73 | } 74 | 75 | int main() { 76 | int n = 6; 77 | Graph g(n); 78 | g.addEdge(0,1); 79 | g.addEdge(1,2); 80 | g.addEdge(2,3); 81 | g.addEdge(0,2); 82 | g.addEdge(3,4); 83 | g.addEdge(4,5); 84 | g.addEdge(5,0); 85 | // 0 86 | // 1 5 87 | // 88 | // 2 4 89 | // 3 90 | /* 91 | for (int i = 0; i < n; ++i) 92 | for (int j = 0; j < i; ++j) 93 | g.addEdge(i, j); 94 | */ 95 | cout << chromaticNumber(g) << endl; 96 | } 97 | -------------------------------------------------------------------------------- /combinatorics/permutation_index.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Permutation Index 3 | // 4 | // Description: 5 | // index_perm computes the lexicographical index of the permutation x of [0,n), 6 | // i.e., it gives k such that x = next_permutation^k [0,n). 7 | // 8 | // unindex_perm is the inverse function of index_perm. 9 | // 10 | // Algorithm: 11 | // We represent index in the factorial number system: 12 | // index = d_0 (n-1)! + d_1 (n-2)! + ... + d_{n-1} 0! 13 | // 14 | // index_perm: 15 | // for i = 0, ..., n-1: 16 | // d[i] = the number of active numbers j < x[i] 17 | // deactivate x[i] 18 | // 19 | // unindex_perm: 20 | // for i = 0, ..., n-1: 21 | // x[i] = d[i]-th active element 22 | // deactivate x[i] 23 | // 24 | // Both can be computed in O(n log n) time by using Fenwick tree. 25 | // 26 | // Complexity: 27 | // O(n log n) time, O(n) space. 28 | // 29 | // Note: 30 | // It correctly works for n <= 20 since 20! < 2^64. 31 | // 32 | // References: 33 | // B. Bonet (2008): 34 | // Efficient algorithms to rank and unrank permutations in lexicographic order. 35 | // In Proceedings of the AAAI Workshop on Search in AI and Robotics, pp.18--23. 36 | 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | 44 | using namespace std; 45 | 46 | #define fst first 47 | #define snd second 48 | #define all(c) ((c).begin()), ((c).end()) 49 | 50 | typedef long long ll; 51 | 52 | ll index_perm(vector x) { 53 | ll r = 0; 54 | vector a(x.size()+1); 55 | for (int i = 0; i < x.size(); ++i) { 56 | int s = x[i]; 57 | for (int k = x[i]; k > 0; k &= k-1) s -= a[k]; 58 | r = (x.size() - i) * r + s; 59 | for (int k = x[i]+1; k < x.size(); k += k&-k) ++a[k]; 60 | } 61 | return r; 62 | } 63 | vector unindex_perm(ll r, int n) { 64 | vector d(n), x(n, n); 65 | for (int i = n-1; i >= 0; --i) { 66 | d[i] = r % (n - i); r /= (n - i); 67 | } 68 | vector a(n+1); 69 | for (int k = 1; k <= n; ++k) a[k] = k & -k; 70 | for (int i = 0; i < n; ++i) { 71 | for (int s: {1,2,4,8,16}) x[i] |= (x[i] >> s); 72 | for (int p = ++x[i]; p > 0; p >>= 1, x[i] |= p) 73 | if (x[i] <= n && a[x[i]] <= d[i]) d[i] -= a[x[i]]; else x[i] ^= p; 74 | for (int k = x[i]+1; k < x.size(); k += k&-k) --a[k]; 75 | } 76 | return x; 77 | } 78 | 79 | int main() { 80 | int n = 20; 81 | vector x(n); 82 | iota(all(x), 0); 83 | for (int i = 0; i < 100; ++i) { 84 | random_shuffle(all(x)); 85 | ll r = index_perm(x); 86 | auto a = unindex_perm(r, n); 87 | for (int i = 0; i < n; ++i) { 88 | if (a[i] != x[i]) { 89 | printf("wrong\n"); 90 | } 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /graph/least_common_ancestor_sparsetable.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Least common ancestor by euler tour + sparse table 3 | // 4 | // Description: 5 | // For a rooted tree T, LCA(u,v) is a vertex u 6 | // that is the deepest node that is a common ancestor of u and v. 7 | // 8 | // Algorithm: 9 | // This first finds an euler tour of the tree. 10 | // Then, RMQ(pos[u], pos[v]) = LCA(u, v), where 11 | // RMQ is the range minimum query between i and j, 12 | // where the weight is defined by the depth of the node. 13 | // 14 | // Complexity: 15 | // O(n log n) for preprocessing, 16 | // O(1) for query 17 | // 18 | // Verified: 19 | // AOJ GRL_5C 20 | // 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | 29 | #define fst first 30 | #define snd second 31 | #define all(c) ((c).begin()), ((c).end()) 32 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 33 | 34 | 35 | struct tree { 36 | int n; 37 | vector> adj; 38 | tree(int n) : n(n), adj(n) { } 39 | void add_edge(int s, int t) { 40 | adj[s].push_back(t); 41 | adj[t].push_back(s); 42 | } 43 | vector pos, tour, depth; 44 | vector> table; 45 | int argmin(int i, int j) { return depth[i] < depth[j] ? i : j; } 46 | void rootify(int r) { 47 | pos.resize(n); 48 | function dfs = [&](int u, int p, int d) { 49 | pos[u] = depth.size(); 50 | tour.push_back(u); 51 | depth.push_back(d); 52 | for (int v: adj[u]) { 53 | if (v != p) { 54 | dfs(v, u, d+1); 55 | tour.push_back(u); 56 | depth.push_back(d); 57 | } 58 | } 59 | }; dfs(r, r, 0); 60 | int logn = sizeof(int)*__CHAR_BIT__-1-__builtin_clz(tour.size()); // log2 61 | table.resize(logn+1, vector(tour.size())); 62 | iota(all(table[0]), 0); 63 | for (int h = 0; h < logn; ++h) 64 | for (int i = 0; i+(1< j) swap(i, j); 69 | int h = sizeof(int)*__CHAR_BIT__-1-__builtin_clz(j-i); // = log2 70 | return i == j ? u : tour[argmin(table[h][i], table[h][j-(1< 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | using namespace std; 18 | 19 | #define fst first 20 | #define snd second 21 | #define all(c) ((c).begin()), ((c).end()) 22 | 23 | typedef complex point; 24 | namespace std { 25 | bool operator < (point p, point q) { 26 | if (real(p) != real(q)) return real(p) < real(q); 27 | return imag(p) < imag(q); 28 | } 29 | }; 30 | double dot(point p, point q) { return real(conj(p) * q); } 31 | double cross(point p, point q) { return imag(conj(p) * q); } 32 | double EPS = 1e-8; 33 | int sign(double x) { 34 | if (x < -EPS) return -1; 35 | if (x > +EPS) return +1; 36 | return 0; 37 | } 38 | 39 | struct circle { point p; double r; }; 40 | struct line { point p, q; }; 41 | 42 | vector intersect(circle C, circle D) { 43 | double d = abs(C.p - D.p); 44 | if (sign(d - C.r + D.r) > 0) return {}; // too far 45 | if (sign(d - abs(C.r - D.r)) <= 0) return {}; // too close 46 | double a = (C.r*C.r - D.r*D.r + d*d)/(2*d); 47 | double h = sqrt(C.r*C.r - a*a); 48 | point v = (C.p - D.p) / d; 49 | if (sign(h) == 0) return {C.p + v*a}; // touch 50 | return {C.p + v*a + point(0,1)*v*h, // intersect 51 | C.p + v*a - point(0,1)*v*h}; 52 | } 53 | // 54 | // Intersection of line L and circle C. 55 | // 56 | // Let p(t) = L.p + t (L.q - L.p). We solve 57 | // |p(t) - C.p|^2 = C.r^2 58 | // By letting u = L.p - L.q, v = L.p - C.p, the above is 59 | // t^2 (u,u) + 2 t (u,v) + (v,v) = C.r^2 60 | // ~~a~~ ~~b~~ ~~c~~ 61 | // Thus 62 | // det = b^2 - ac, 63 | // t in { (b + sqrt(det))/a, c/(b + sqrt(det)) } 64 | // 65 | vector intersect(line L, circle C) { 66 | point u = L.p - L.q, v = L.p - C.p; 67 | double a = norm(u), b = dot(u,v), c = norm(v), 68 | det = b*b - a*c, r = b + sqrt(max(det, 0.0)); 69 | if (sign(det) < 0) return {}; // no solution 70 | if (sign(det) == 0) return {L.p - b/a * u}; // touch 71 | return {L.p - (b + sqrt(det))/a*u, 72 | L.p - c/(b + sqrt(det))*u}; 73 | } 74 | 75 | // 76 | // Tangent point(s) of point p and circle C 77 | // 78 | // Let q be a tangent point. 79 | // The angle between q-p-c.p is 80 | // sin(t) = r/|p - c.p|. 81 | // and the solution is 82 | // p + (c.p - p) * exp(\pm it). 83 | // 84 | // Verified: SPOJ18531 85 | // 86 | vector tangent(point p, circle c) { 87 | double sin2 = c.r*c.r/norm(p - c.p); 88 | if (sign(1-sin2) < 0) return {}; 89 | if (sign(1-sin2) == 0) return {p}; 90 | point z(sqrt(1-sin2), sqrt(sin2)); 91 | return {p+(c.p-p)*conj(z), p+(c.p-p)*z}; 92 | } 93 | 94 | int main() { 95 | } 96 | -------------------------------------------------------------------------------- /numeric/integrate.cc: -------------------------------------------------------------------------------- 1 | 2 | // 3 | // Numerical Integration (Adaptive Gauss--Lobatto formula) 4 | // 5 | // Description: 6 | // Gauss--Lobatto formula is a numerical integrator 7 | // that is exact for polynomials of degree <= 2n+1. 8 | // Adaptive Gauss--Lobatto recursively decomposes the 9 | // domain and computes integral by using G-L formula. 10 | // 11 | // Algorithm: 12 | // Above. 13 | // 14 | // Complexity: 15 | // O(#pieces) for a piecewise polynomials. 16 | // In general, it converges in O(1/n^6) for smooth functions. 17 | // For (possibly) non-smooth functions, this is the best integrator. 18 | // 19 | // Verified: 20 | // AOJ 2034 21 | // 22 | // References: 23 | // W. Gander and W. Gautschi (2000): 24 | // Adaptive quadrature - revisited. 25 | // BIT Numerical Mathematics, vol.40, no.1, pp.84--101. 26 | // 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | 34 | using namespace std; 35 | 36 | #define fst first 37 | #define snd second 38 | #define all(c) ((c).begin()), ((c).end()) 39 | 40 | template 41 | double integrate(F f, double lo, double hi, double eps = 1e-8) { 42 | const double th = eps / 1e-14; // (= eps / machine_epsilon) 43 | function rec = 44 | [&](double x0, double x6, double y0, double y6, int d) { 45 | const double a = sqrt(2.0/3.0), b = 1.0 / sqrt(5.0); 46 | double x3 = (x0 + x6)/2, y3 = f(x3), h = (x6 - x0)/2; 47 | double x1 = x3-a*h, x2 = x3-b*h, x4 = x3+b*h, x5 = x3+a*h; 48 | double y1 = f(x1), y2 = f(x2), y4 = f(x4), y5 = f(x5); 49 | double I1 = (y0+y6 + 5*(y2+y4)) * (h/6); 50 | double I2 = (77*(y0+y6) + 432*(y1+y5) + 625*(y2+y4) + 672*y3) * (h/1470); 51 | if (x3 + h == x3 || d > 50) return 0.0; 52 | if (d > 4 && th + (I1-I2) == th) return I2; // avoid degeneracy 53 | return (double)(rec(x0, x1, y0, y1, d+1) + rec(x1, x2, y1, y2, d+1) 54 | + rec(x2, x3, y2, y3, d+1) + rec(x3, x4, y3, y4, d+1) 55 | + rec(x4, x5, y4, y5, d+1) + rec(x5, x6, y5, y6, d+1)); 56 | }; 57 | return rec(lo, hi, f(lo), f(hi), 0); 58 | } 59 | 60 | int main() { 61 | for (int n; scanf("%d", &n); ) { 62 | if (n == 0) break; 63 | double r; 64 | scanf("%lf\n", &r); 65 | 66 | vector> p(n); 67 | for (int i = 0; i < n; ++i) 68 | scanf("%lf %lf", &p[i].fst, &p[i].snd); 69 | function f = [&](double x) { 70 | if (x < p[0].fst) return 0.0; 71 | if (x >= p.back().fst) return 0.0; 72 | for (int i = 0; i+1 < n; ++i) 73 | if (x < p[i+1].fst) 74 | return p[i].snd + (p[i+1].snd-p[i].snd)/(p[i+1].fst-p[i].fst)*(x-p[i].fst); 75 | }; 76 | function g = [&](double x) { 77 | return f(x) * f(x + r); 78 | }; 79 | printf("%.12f\n", integrate(g, p[0].fst-100, p.back().fst+100)); 80 | } 81 | } 82 | 83 | -------------------------------------------------------------------------------- /graph/eulerian_path_undirected.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Undirected Eulerian Path (Hierholzer's algorithm) 3 | // 4 | // Description: 5 | // A path (walk) P is Eulerian if P visits all edges exactly once. 6 | // A graph admits Eulerian path if and only if there are exactly two 7 | // odd-degree vertices (they are initial and terminal of a path). 8 | // 9 | // Algorithm: 10 | // Hierholzer's algorithm performs DFS from the initial vertex. 11 | // 12 | // Complexity: 13 | // O(n + m) 14 | // 15 | // Verified: 16 | // UVA 10054 The Necklace (tour) 17 | // 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | using namespace std; 26 | 27 | #define fst first 28 | #define snd second 29 | #define all(c) ((c).begin()), ((c).end()) 30 | 31 | struct graph { 32 | int n; 33 | struct edge { int src, dst, rev; }; 34 | vector> adj; 35 | graph() : n(0) { } 36 | //graph(int n) : n(n), adj(n) { } 37 | 38 | void add_edge(int src, int dst) { 39 | n = max(n, max(src, dst)+1); 40 | if (adj.size() < n) adj.resize(n); 41 | adj[src].push_back({src, dst, (int)adj[dst].size()}); 42 | adj[dst].push_back({dst, src, (int)adj[src].size()-1}); 43 | } 44 | 45 | // destructive 46 | vector path; 47 | void visit(int u) { 48 | while (!adj[u].empty()) { 49 | auto e = adj[u].back(); 50 | adj[u].pop_back(); 51 | if (e.src >= 0) { 52 | adj[e.dst][e.rev].src = -1; 53 | visit(e.dst); 54 | } 55 | } 56 | path.push_back(u); 57 | } 58 | vector eulerian_path() { 59 | int m = 0, s = -1; 60 | for (int u = 0; u < n; ++u) { 61 | m += adj[u].size(); 62 | if (adj[u].size() % 2 == 1) s = u; 63 | } 64 | path.clear(); if (s >= 0) visit(s); 65 | if (path.size() != m/2 + 1) return {}; 66 | return path; 67 | } 68 | vector eulerian_tour() { 69 | int m = 0, s = 0; 70 | for (int u = 0; u < n; ++u) { 71 | m += adj[u].size(); 72 | if (adj[u].size() > 0) s = u; 73 | } 74 | path.clear(); visit(s); 75 | if (path.size() != m/2 + 1 || path[0] != path.back()) return {}; 76 | return path; 77 | } 78 | }; 79 | 80 | 81 | int main() { 82 | int ncase; 83 | scanf("%d", &ncase); 84 | for (int icase = 0; icase < ncase; ++icase) { 85 | if (icase > 0) printf("\n"); 86 | printf("Case #%d\n", icase+1); 87 | 88 | int m; 89 | scanf("%d", &m); 90 | graph g; 91 | for (int i = 0; i < m; ++i) { 92 | int s, t; 93 | scanf("%d %d", &s, &t); 94 | g.add_edge(s-1, t-1); 95 | } 96 | auto path = g.eulerian_tour(); 97 | if (path.empty()) { 98 | printf("some beads may be lost\n"); 99 | } else { 100 | for (int i = 0; i+1 < path.size(); ++i) 101 | printf("%d %d\n", path[i]+1, path[i+1]+1); 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /graph/prufer_code.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Prufer Code in Linear Time 3 | // 4 | // Description: 5 | // Prufer code gives one-to-one correspondence 6 | // between labeled trees and integers of length n-2. 7 | // This immediately shows the Cayley theorem: 8 | // the number of labeled trees is n^{n-2}. 9 | // The algorithm computes the Prufer code in linear 10 | // time. 11 | // 12 | // Complexity: 13 | // O(n) 14 | // 15 | // Reference: 16 | // Xiaodong Wang, Lei Wang, and Yingjie Wui (2009): 17 | // "An Optimal Algorithm for Prufer Codes", 18 | // Journal of Software Engineering and Applications, 19 | // vol.2, 111--115. 20 | // 21 | 22 | #include 23 | 24 | using namespace std; 25 | 26 | #define fst first 27 | #define snd second 28 | #define all(c) ((c).begin()), ((c).end()) 29 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 30 | 31 | struct Tree { 32 | int n; 33 | vector> adj; 34 | Tree(int n) : n(n), adj(n) { } 35 | void addEdge(int u, int v) { 36 | adj[u].push_back(v); 37 | adj[v].push_back(u); 38 | } 39 | }; 40 | 41 | vector labeledTreeToCode(Tree T) { 42 | vector deg(T.n), parent(T.n, -1), code; 43 | function dfs = [&](int u) { 44 | deg[u] = T.adj[u].size(); 45 | for (int v: T.adj[u]) { 46 | if (v != parent[u]) { 47 | parent[v] = u; 48 | dfs(v); 49 | } 50 | } 51 | }; dfs(T.n-1); 52 | 53 | int index = -1; 54 | while (deg[++index] != 1); 55 | for (int u = index, i = 0; i < T.n-2; ++i) { 56 | int v = parent[u]; 57 | code.push_back(v); 58 | if (--deg[v] == 1 && v < index) { 59 | u = v; 60 | } else { 61 | while (deg[++index] != 1); 62 | u = index; 63 | } 64 | } 65 | return code; 66 | } 67 | 68 | Tree codeToLabeledTree(vector code) { 69 | int n = code.size() + 2; 70 | Tree T(n); 71 | vector deg(n, 1); 72 | for (int i = 0; i < n-2; ++i) 73 | ++deg[code[i]]; 74 | 75 | int index = -1; 76 | while (deg[++index] != 1); 77 | for (int u = index, i = 0; i < n-2; ++i) { 78 | int v = code[i]; 79 | T.addEdge(u, v); 80 | --deg[u]; --deg[v]; 81 | if (deg[v] == 1 && v < index) { 82 | u = v; 83 | } else { 84 | while (deg[++index] != 1); 85 | u = index; 86 | } 87 | } 88 | for (int u = 0; u < n-1; ++u) 89 | if (deg[u] == 1) T.addEdge(u, n-1); 90 | return T; 91 | } 92 | 93 | int main() { 94 | Tree T(6); 95 | T.addEdge(0, 3); 96 | T.addEdge(1, 3); 97 | T.addEdge(2, 3); 98 | T.addEdge(3, 4); 99 | T.addEdge(4, 5); 100 | auto code = labeledTreeToCode(T); 101 | for (int u: code) { 102 | cout << u << " "; 103 | } 104 | cout << endl; 105 | 106 | Tree G = codeToLabeledTree(code); 107 | for (int u = 0; u < G.adj.size(); ++u) { 108 | for (int v: G.adj[u]) { 109 | if (u < v) cout << u << " " << v << endl; 110 | } 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /math/fast_fourier_transform.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Fast Fourier Transformation 3 | // 4 | // Description: 5 | // Given a complex sequence a[0,n), where n is a power of two. 6 | // Compute 7 | // A[k] = sum_k a[k] E^k 8 | // where E = exp(2 pi i / n). 9 | // 10 | // Algorithm: 11 | // Cooley-Turkey's algorithm. 12 | // 13 | // Complexity: 14 | // O(n log n). 15 | // 16 | // Verified: 17 | // SPOJ235. 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | 29 | typedef complex C; 30 | void fft(vector &a, int sign = 1) { 31 | int n = a.size(); // n should be a power of two 32 | double theta = 8 * sign * atan(1.0) / n; 33 | for (int i = 0, j = 1; j < n - 1; ++j) { 34 | for (int k = n >> 1; k > (i ^= k); k >>= 1); 35 | if (j < i) swap(a[i], a[j]); 36 | } 37 | for (int m, mh = 1; (m = mh << 1) <= n; mh = m) { 38 | int irev = 0; 39 | for (int i = 0; i < n; i += m) { 40 | C w = exp(C(0, theta*irev)); 41 | for (int k = n >> 2; k > (irev ^= k); k >>= 1); 42 | for (int j = i; j < mh + i; ++j) { 43 | int k = j + mh; 44 | C x = a[j] - a[k]; 45 | a[j] += a[k]; 46 | a[k] = w * x; 47 | } 48 | } 49 | } 50 | } 51 | 52 | 53 | const int WIDTH = 5; 54 | const long long RADIX = 100000; // = 10^WIDTH 55 | 56 | vector parse(const char s[]) { 57 | int n = strlen(s); 58 | int m = (n + WIDTH-1) / WIDTH; 59 | vector v(m); 60 | for (int i = 0; i < m; ++i) { 61 | int b = n - WIDTH * i, x = 0; 62 | for (int a = max(0, b - WIDTH); a < b; ++a) 63 | x = x * 10 + s[a] - '0'; 64 | v[i] = x; 65 | } 66 | return v; 67 | } 68 | 69 | void print(const vector &v) { 70 | int i, N = v.size(); 71 | vector digits(N + 1, 0); 72 | long double err = 0; 73 | 74 | for (i = 0; i < N; i++) { 75 | digits[i] = (long long)(v[i].real() + 0.5); 76 | } 77 | long long c = 0; 78 | for (i = 0; i < N; i++) { 79 | c += digits[i]; 80 | digits[i] = c % RADIX; 81 | c /= RADIX; 82 | } 83 | for (i = N-1; i > 0 && digits[i] == 0; i--); 84 | printf("%lld", digits[i]); 85 | for (i--; i >= 0; i--) 86 | printf("%.*lld", WIDTH, digits[i]); 87 | printf("\n"); 88 | } 89 | 90 | char a[310000], b[310000]; 91 | int main() { 92 | 93 | int T; scanf("%d", &T); 94 | while (T--) { 95 | scanf("%s %s", a, b); 96 | vector A = parse(a); 97 | vector B = parse(b); 98 | 99 | int N = 1; 100 | while (N < max(A.size(), B.size())) N *= 2; 101 | N *= 2; 102 | A.resize(N); 103 | B.resize(N); 104 | 105 | fft(A, +1); 106 | fft(B, +1); 107 | for (int i = 0; i < N; i++) A[i] *= B[i]; 108 | fft(A, -1); 109 | for (int i = 0; i < N; i++) A[i] /= N; 110 | 111 | print(A); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /other/cube.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Cube container 3 | // 4 | // Descrption: 5 | // It has a cube container which has six faces on 6 | // front, up, down, left, right, and bottom. 7 | // It admits the following three rotations 8 | // rotX: front -> up -> back -> down 9 | // rotY: left -> up -> right -> down 10 | // rotZ: left -> front -> right -> down 11 | // 12 | // Algorithm: 13 | // Trivial. 14 | // 15 | // Verified: 16 | // SPOJ 21526 17 | // 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace std; 25 | 26 | #define fst first 27 | #define snd second 28 | #define all(c) ((c).begin()), ((c).end()) 29 | 30 | template 31 | struct cube { 32 | T F, B, U, D, L, R; 33 | void rotX() { T x = D; D = B; B = U; U = F; F = x; } // FUBD -> DFUB 34 | void rotY() { T x = D; D = R; R = U; U = L; L = x; } // LURD -> DLUR 35 | void rotZ() { T x = B; B = R; R = F; F = L; L = x; } // LFRB -> BLFR 36 | 37 | // usage: 38 | // for (int i = 0; i < 24; ++i) { 39 | // /* do something */ 40 | // c.next_roll(); 41 | // } 42 | // 43 | // or 44 | // 45 | // do { 46 | // /* do something */ 47 | // } while (c.next_roll()); 48 | bool next_roll() { 49 | static int it = 0; 50 | rotZ(); 51 | if (it % 8 == 3) rotX(); 52 | if (it % 8 == 7) rotY(); 53 | return it = (it == 23 ? 0 : it+1); 54 | } 55 | bool operator==(cube c) const { 56 | for (int k = 0; k < 6; ++k) { 57 | if (U != c.U || F != c.F) continue; 58 | for (int i = 0; i < 4; ++i) { 59 | if (L == c.L && F == c.F && R == c.R && L == c.R) return true; 60 | c.rotZ(); 61 | } 62 | if (k % 2) c.rotY(); else c.rotZ(); 63 | } 64 | return false; 65 | } 66 | }; 67 | template 68 | ostream &operator<<(ostream &ofs, cube c) { 69 | return (ofs << c.F << c.B << c.U << c.D << c.L << c.R); 70 | } 71 | 72 | void test() { 73 | cube c = {1,2,3,4,5,6}; 74 | do { 75 | cout << c << endl; 76 | } while (c.next_roll()); 77 | } 78 | 79 | int main() { 80 | test(); 81 | return 0; 82 | 83 | int ncase; scanf("%d", &ncase); 84 | for (int icase = 0; icase < ncase; ++icase) { 85 | cube c; 86 | c.F = 0; c.U = 1; c.D = 2; c.L = 3; c.R = 4; c.B = 5; 87 | char s[6][1024]; 88 | for (int i = 0; i < 6; ++i) 89 | scanf("%s", s[i]); 90 | int q; scanf("%d", &q); 91 | for (int i = 0; i < q; ++i) { 92 | char t[1024]; 93 | int k; 94 | scanf("%s %d", t, &k); 95 | k %= 4; 96 | if (t[0] == 'X') { 97 | while (k--) c.rotX(); 98 | } else if (t[0] == 'Y') { 99 | while (k--) c.rotY(); 100 | } else { 101 | while (k--) c.rotZ(); 102 | } 103 | } 104 | printf("%s %s %s %s %s %s\n", 105 | s[c.F], s[c.U], s[c.D], s[c.L], s[c.R], s[c.B]); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /graph/transitive_reduction_dag.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Transitive Reduction of DAG 3 | // 4 | // Description: 5 | // A transitive reduction of a graph G = (V, E) is a 6 | // a graph H = (V, F) such that transitive closures of 7 | // H and G are the same. 8 | // There are possibly many transitive reductions with 9 | // the fewest edges, and finding one of them is NP-hard. 10 | // On he other hand, if a graph is directed acyclic, 11 | // its transitive reduction uniquely exists and can be 12 | // found in O(nm) time. 13 | // Note that transitive closure and reduction have the 14 | // same time complexity on DAG. 15 | // 16 | // Algorithm: 17 | // For each vertex u, compute longest path distance from u 18 | // to v in adj[u]. Then, remove all edges (u,v) with d(u,v) > 1. 19 | // 20 | // Complexity: 21 | // O(nm). Usually the coefficient is not so large. 22 | // 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | 32 | #define fst first 33 | #define snd second 34 | #define all(c) ((c).begin()), ((c).end()) 35 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 36 | 37 | struct graph { // DAG 38 | int n; 39 | vector> adj; 40 | graph(int n) : n(n), adj(n) { } 41 | void add_edge(int i, int j) { 42 | adj[i].push_back(j); 43 | } 44 | 45 | void transitive_reduction() { 46 | vector ord, d(n, -1); 47 | function rec = [&](int u) { 48 | d[u] = 0; 49 | for (int v: adj[u]) 50 | if (d[v] < 0) rec(v); 51 | ord.push_back(u); 52 | for (int v: ord) d[v] = 0; 53 | for (int i = ord.size()-1; i >= 0; --i) 54 | for (int w: adj[ord[i]]) 55 | d[w] = max(d[w], d[ord[i]] + 1); 56 | 57 | adj[u].erase( 58 | remove_if(all(adj[u]), [&](int v) { return d[v] > 1; }), 59 | adj[u].end() 60 | ); 61 | }; 62 | for (int u = 0; u < n; ++u) 63 | if (d[u] < 0) rec(u); 64 | } 65 | }; 66 | 67 | 68 | // === tick a time === 69 | #include 70 | double tick() { 71 | static clock_t oldtick; 72 | clock_t newtick = clock(); 73 | double diff = 1.0*(newtick - oldtick) / CLOCKS_PER_SEC; 74 | oldtick = newtick; 75 | return diff; 76 | } 77 | 78 | int main() { 79 | int n = 10000, m = 100 * n; 80 | set> edges; 81 | while (edges.size() < m) { 82 | int u = rand() % n, v = rand() % n; 83 | if (u == v) continue; 84 | if (u > v) swap(u, v); 85 | edges.insert({u, v}); 86 | } 87 | graph g(n); 88 | for (auto p: edges) 89 | g.add_edge(p.fst, p.snd); 90 | 91 | 92 | tick(); 93 | g.transitive_reduction(); 94 | cout << tick() << endl; 95 | 96 | int mm = 0; 97 | for (int u = 0; u < n; ++u) 98 | mm += g.adj[u].size(); 99 | cout << m << " ==> " << mm << " ( " << mm/double(m) << ")" << endl; 100 | } 101 | -------------------------------------------------------------------------------- /data_structure/persistent_rope.cc: -------------------------------------------------------------------------------- 1 | // Persistent Rope 2 | // 3 | // Description: 4 | // Rope is a binary tree data structure to maintains a sequence. 5 | // 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | using namespace std; 21 | 22 | #define fst first 23 | #define snd second 24 | #define all(c) ((c).begin()), ((c).end()) 25 | 26 | 27 | struct rope { 28 | struct node { 29 | char v; 30 | node *l, *r; 31 | int s; 32 | node(char v, node *l, node *r) : v(v), l(l), r(r) { 33 | s = 1 + (l ? l->s : 0) + (r ? r->s : 0); 34 | } 35 | } *root; 36 | node *join(node *a, node *b) { 37 | auto R = [](int a, int b) { return rand() % (a + b) < a; }; 38 | if (!a || !b) return a ? a : b; 39 | if (R(a->s, b->s)) return new node(a->v, a->l, join(a->r, b)); 40 | else return new node(b->v, join(a, b->l), b->r); 41 | } 42 | pair split(node *a, int s) { 43 | if (!a || s <= 0) return {0, a}; 44 | if (a->s <= s) return {a, 0}; 45 | if (a->l && s <= a->l->s) { 46 | auto p = split(a->l, s); 47 | return {p.fst, new node(a->v, p.snd, a->r)}; 48 | } else { 49 | auto p = split(a->r, s - (a->l ? a->l->s : 0) - 1); 50 | return {new node(a->v, a->l, p.fst), p.snd}; 51 | } 52 | } 53 | pair cut(node *a, int l, int r) { // (sub, rest) 54 | if (l >= r) return {0, a}; 55 | auto p = split(a, l), q = split(p.snd, r - l); 56 | return {q.fst, join(p.fst, q.snd)}; 57 | } 58 | rope(const char s[]) { 59 | function build = [&](int l, int r) { 60 | if (l >= r) return (node*)0; 61 | int m = (l + r) / 2; 62 | return new node(s[m], build(l, m), build(m+1, r)); 63 | }; 64 | root = build(0, strlen(s)); 65 | } 66 | rope(rope::node *r) : root(r) { } 67 | int size() const { return root ? root->s : 0; } 68 | rope insert(int k, const char s[]) { 69 | auto p = split(root, k); 70 | return {join(p.fst, join(rope(s).root, p.snd))}; 71 | } 72 | rope substr(int l, int r) { return {cut(root, l, r).fst}; } 73 | rope erase(int l, int r) { return {cut(root, l, r).snd}; } 74 | char at(int k) const { 75 | function rec = [&](node *a) { 76 | int s = a->l ? a->l->s : 0; 77 | if (k == s) return a->v; 78 | if (k < s) return rec(a->l); 79 | k -= s+1; return rec(a->r); 80 | }; 81 | return rec(root); 82 | } 83 | string str() const { 84 | stringstream ss; 85 | function rec = [&](node *a) { 86 | if (!a) return; 87 | rec(a->l); ss << a->v; rec(a->r); 88 | }; rec(root); 89 | return ss.str(); 90 | } 91 | }; 92 | 93 | int main() { 94 | rope a("abcde"), b("ABCDE"); 95 | for (int i = 0; i < 5; ++i) { 96 | cout << a.at(i) << " "; 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /number_theory/divisor_sigma.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Divisor function 3 | // 4 | // Description: 5 | // sigma(n) = sum[n % d == 0] d 6 | // equivalently, 7 | // sigma(p^k) = 1 + p + p^2 + ... + p^k 8 | // with multiplicative. 9 | // 10 | // Complexity: 11 | // divisor_sigma(n): O(sqrt(n)) by trial division. 12 | // divisor_sigma(lo,hi): O((hi-lo) loglog(hi)) by prime sieve. 13 | // 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | using namespace std; 21 | 22 | #define fst first 23 | #define snd second 24 | #define all(c) ((c).begin()), ((c).end()) 25 | 26 | typedef long long ll; 27 | ll divisor_sigma(ll n) { 28 | ll sigma = 0, d = 1; 29 | for (; d*d < n; ++d) 30 | if (n % d == 0) sigma += d + n/d; 31 | if (d*d == n) sigma += d; 32 | return sigma; 33 | } 34 | vector primes(ll lo, ll hi) { // primes in [lo, hi) 35 | const ll M = 1 << 14, SQR = 1 << 16; 36 | vector composite(M), small_composite(SQR); 37 | 38 | vector> sieve; 39 | for (ll i = 3; i < SQR; i+=2) { 40 | if (!small_composite[i]) { 41 | ll k = i*i + 2*i*max(0.0, ceil((lo - i*i)/(2.0*i))); 42 | sieve.push_back({2*i, k}); 43 | for (ll j = i*i; j < SQR; j += 2*i) 44 | small_composite[j] = 1; 45 | } 46 | } 47 | vector ps; 48 | if (lo <= 2) { ps.push_back(2); lo = 3; } 49 | for (ll k = lo|1, low = lo; low < hi; low += M) { 50 | ll high = min(low + M, hi); 51 | fill(all(composite), 0); 52 | for (auto &z: sieve) 53 | for (; z.snd < high; z.snd += z.fst) 54 | composite[z.snd - low] = 1; 55 | for (; k < high; k+=2) 56 | if (!composite[k - low]) ps.push_back(k); 57 | } 58 | return ps; 59 | } 60 | vector primes(ll n) { // primes in [0,n) 61 | return primes(0,n); 62 | } 63 | vector divisor_sigma(ll lo, ll hi) { // sigma(n) for all n in [lo, hi) 64 | vector ps = primes(sqrt(hi)+1); 65 | vector res(hi-lo), sigma(hi-lo, 1); 66 | iota(all(res), lo); 67 | 68 | for (ll p: ps) { 69 | for (ll k = ((lo+(p-1))/p)*p; k < hi; k += p) { 70 | ll b = 1; 71 | while (res[k-lo] > 1 && res[k-lo] % p == 0) { 72 | res[k-lo] /= p; 73 | b = 1 + b * p; 74 | } 75 | sigma[k-lo] *= b; 76 | } 77 | } 78 | for (ll k = lo; k < hi; ++k) 79 | if (res[k-lo] > 1) 80 | sigma[k-lo] *= (1 + res[k-lo]); 81 | return sigma; // sigma[k-lo] = sigma(k) 82 | } 83 | 84 | int main() { 85 | for (int i = 0; i < 17; ++i) 86 | cout << divisor_sigma(i) << " "; 87 | cout << endl; 88 | 89 | auto x = divisor_sigma(0, 17); 90 | for (int i = 0; i < 17; ++i) 91 | cout << x[i] << " "; 92 | cout << endl; 93 | 94 | for (int iter = 0; iter < 100; ++iter) { 95 | int lo = rand(), hi = lo + rand(); 96 | auto x = divisor_sigma(lo, hi); 97 | for (int n = lo; n < hi; ++n) 98 | if (x[n-lo] != divisor_sigma(n)) cout << "!!" << endl; 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /graph/bipartite_matching_HK.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Hopcroft-Karp's maximum cardinality bipartite matching 3 | // 4 | // Description: 5 | // Compute the maximum cardinality matching for bipartite graph. 6 | // 7 | // Algorithm: 8 | // The algorithm iterates following procedures: 9 | // (1) BFS from the source to get the distance to the sink. 10 | // If not reachable, there are no augment path hence break. 11 | // (2) Find vertex disjoint shortest augment paths by DFS. 12 | // It can be shown that the outer-loop is atmost O(\sqrt{n}) times 13 | // therefore the whole complexity is O(m \sqrt{n}). 14 | // Note that this is a specialzation of Dinic's maximum flow. 15 | // 16 | // 17 | // Complexity: 18 | // O(m \sqrt{n}) time 19 | // 20 | // Verified: 21 | // SPOJ 4206: Fast Maximum Matching 22 | // 23 | // References: 24 | // J. E. Hopcroft and R. M. Karp (1973): 25 | // An n^5/2 algorithm for maximum matchings in bipartite graphs. 26 | // SIAM Journal on Computing, vol.2, no.4, pp.225-231. 27 | // 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | using namespace std; 39 | 40 | struct graph { 41 | int L, R; 42 | vector> adj; 43 | graph(int L, int R) : L(L), R(R), adj(L+R) { } 44 | void add_edge(int u, int v) { 45 | adj[u].push_back(v+L); 46 | adj[v+L].push_back(u); 47 | } 48 | int maximum_matching() { 49 | vector level(L), mate(L+R, -1); 50 | 51 | function levelize = [&]() { // BFS 52 | queue Q; 53 | for (int u = 0; u < L; ++u) { 54 | level[u] = -1; 55 | if (mate[u] < 0) { 56 | level[u] = 0; 57 | Q.push(u); 58 | } 59 | } 60 | while (!Q.empty()) { 61 | int u = Q.front(); Q.pop(); 62 | for (int w: adj[u]) { 63 | int v = mate[w]; 64 | if (v < 0) return true; 65 | if (level[v] < 0) { 66 | level[v] = level[u] + 1; 67 | Q.push(v); 68 | } 69 | } 70 | } 71 | return false; 72 | }; 73 | function augment = [&](int u) { // DFS 74 | for (int w: adj[u]) { 75 | int v = mate[w]; 76 | if (v < 0 || (level[v] > level[u] && augment(v))) { 77 | mate[u] = w; 78 | mate[w] = u; 79 | return true; 80 | } 81 | } 82 | return false; 83 | }; 84 | int match = 0; 85 | while (levelize()) 86 | for (int u = 0; u < L; ++u) 87 | if (mate[u] < 0 && augment(u)) 88 | ++match; 89 | return match; 90 | } 91 | }; 92 | 93 | int main() { 94 | int L, R, m; 95 | scanf("%d %d %d", &L, &R, &m); 96 | graph g(L, R); 97 | for (int i = 0; i < m; ++i) { 98 | int u, v; 99 | scanf("%d %d", &u, &v); 100 | g.add_edge(u, v); 101 | } 102 | printf("%d\n", g.maximum_matching()); 103 | } 104 | -------------------------------------------------------------------------------- /graph/least_common_ancestor_tarjan.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Offline least common ancestor 3 | // 4 | // Description 5 | // For a rooted tree T, LCA(u,v) is a vertex u 6 | // that is the deepest node that is a common ancestor of u and v. 7 | // It computes all lcas of (u_j, v_j) for v = 1, ..., q. 8 | // 9 | // Algorithm 10 | // Tarjan's dfs and union-find. 11 | // 12 | // Complexity: 13 | // O((m+q) a(n)), where a(n) is the inverse Ackermann function. 14 | // 15 | // Verified: 16 | // SPOJ14932 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | using namespace std; 25 | 26 | #define fst first 27 | #define snd second 28 | #define all(c) ((c).begin()), ((c).end()) 29 | 30 | struct graph { 31 | int n; 32 | vector> adj; 33 | graph(int n = 0) : n(n), adj(n) { } 34 | void add_edge(int src, int dst) { 35 | n = max(n, max(src, dst)+1); 36 | adj.resize(n); 37 | adj[src].push_back(dst); 38 | } 39 | struct query { int u, v, a; }; 40 | struct union_find { 41 | vector p; 42 | union_find(int n) : p(n, -1) { }; 43 | bool unite(int u, int v) { 44 | if ((u = root(u)) == (v = root(v))) return false; 45 | if (p[u] > p[v]) swap(u, v); 46 | p[u] += p[v]; p[v] = u; 47 | return true; 48 | } 49 | int root(int u) { return p[u] < 0 ? u : p[u] = root(p[u]); } 50 | }; 51 | void lca(vector &queries) { 52 | vector> Q(n); 53 | for (auto &q: queries) { 54 | Q[q.u].push_back(&q); 55 | Q[q.v].push_back(&q); 56 | } 57 | union_find uf(n); 58 | vector anc(n), color(n); 59 | iota(all(anc), 0); 60 | function rec = [&](int u) { 61 | for (auto v: adj[u]) { 62 | rec(v); 63 | uf.unite(u, v); 64 | anc[uf.root(u)] = u; 65 | } 66 | color[u] = 1; 67 | for (auto it: Q[u]) { 68 | if (it->u != u) swap(it->u, it->v); 69 | if (color[it->v] == 1) it->a = anc[uf.root(it->v)]; 70 | } 71 | }; 72 | vector deg(n); 73 | for (int u = 0; u < n; ++u) 74 | for (auto v: adj[u]) 75 | ++deg[v]; 76 | for (int u = 0; u < n; ++u) 77 | if (deg[u] == 0) rec(u); 78 | } 79 | }; 80 | 81 | int main() { 82 | int ncase; scanf("%d", &ncase); 83 | for (int icase = 0; icase < ncase; ++icase) { 84 | printf("Case %d:\n", icase+1); 85 | int n; scanf("%d", &n); 86 | graph g(n); 87 | for (int i = 0; i < n; ++i) { 88 | int k; scanf("%d", &k); 89 | for (int j = 0; j < k; ++j) { 90 | int l; scanf("%d", &l); 91 | g.add_edge(i, l-1); 92 | } 93 | } 94 | int q; scanf("%d", &q); 95 | vector queries; 96 | for (int i = 0; i < q; ++i) { 97 | int u, v; scanf("%d %d", &u, &v); 98 | queries.push_back({u-1, v-1, -1}); 99 | } 100 | g.lca(queries); 101 | for (auto q: queries) 102 | printf("%d\n", q.a+1); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /geometry/rectangle_union.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Area of Union of Rectangles (Bentley) 3 | // 4 | // Description: 5 | // For a given set of rectangles, it gives the area of the union. 6 | // This problem is sometines called the Klee's measure problem [Klee'77]. 7 | // 8 | // Algorithm: 9 | // Bentley's plane-sweep algorithm [Bentley'77]. 10 | // We first apply the coordinate compression technique. 11 | // Then the y-structure, which is called measure tree, is simply implemented 12 | // by using segment tree data structure. 13 | // 14 | // Complexity: 15 | // O(n log n) time and O(n) space. 16 | // 17 | // Verify: 18 | // LightOJ 1120: Rectangle Union 19 | // 20 | // References: 21 | // 22 | // V. Klee (1977): 23 | // Can the measure of \cup[a_i, b_i] be computed in less than O(n \log n) steps? 24 | // American Mathematical Monthly, vol.84, pp. 284--285. 25 | // 26 | // J. L. Bentley (1977): 27 | // Algorithms for Klee's rectangle problems. 28 | // Unpublished notes, Computer Science Department, Carnegie Mellon University. 29 | // 30 | 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | using namespace std; 39 | 40 | #define fst first 41 | #define snd second 42 | #define all(c) ((c).begin()), ((c).end()) 43 | 44 | struct rectangle { int xl, yl, xh, yh; }; 45 | long long rectangle_area(vector rs) { 46 | vector ys; // coordinate compression 47 | for (int i = 0; i < rs.size(); ++i) { 48 | ys.push_back(rs[i].yl); 49 | ys.push_back(rs[i].yh); 50 | } 51 | sort(all(ys)); ys.erase(unique(all(ys)), ys.end()); 52 | 53 | int n = ys.size(); // measure tree 54 | vector C(8*n), A(8*n); 55 | function aux = 56 | [&](int a, int b, int c, int l, int r, int k) { 57 | if ((a = max(a,l)) >= (b = min(b,r))) return; 58 | if (a == l && b == r) C[k] += c; 59 | else { 60 | aux(a, b, c, l, (l+r)/2, 2*k+1); 61 | aux(a, b, c, (l+r)/2, r, 2*k+2); 62 | } 63 | if (C[k]) A[k] = ys[r] - ys[l]; 64 | else A[k] = A[2*k+1] + A[2*k+2]; 65 | }; 66 | 67 | struct event { int x, l, h, c; }; // plane sweep 68 | vector es; 69 | for (auto r: rs) { 70 | int l = distance(ys.begin(), lower_bound(all(ys), r.yl)); 71 | int h = distance(ys.begin(), lower_bound(all(ys), r.yh)); 72 | es.push_back({r.xl, l, h, +1}); 73 | es.push_back({r.xh, l, h, -1}); 74 | } 75 | sort(all(es), [](event a, event b) { return a.x != b.x ? a.x < b.x : a.c > b.c; }); 76 | long long area = 0, prev = 0; 77 | for (auto &e: es) { 78 | area += (e.x - prev) * A[0]; 79 | prev = e.x; 80 | aux(e.l,e.h,e.c,0,n,0); 81 | } 82 | return area; 83 | } 84 | 85 | 86 | int main() { 87 | int ncase; scanf("%d", &ncase); 88 | for (int icase = 0; icase < ncase; ++icase) { 89 | int n; scanf("%d", &n); 90 | vector rs(n); 91 | for (int i = 0; i < n; ++i) 92 | scanf("%d %d %d %d", &rs[i].xl, &rs[i].yl, &rs[i].xh, &rs[i].yh); 93 | printf("Case %d: %lld\n", icase+1, rectangle_area(rs)); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /data_structure/randomized_binary_search_tree.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Randomized Binary Search Tree (merge-split) 3 | // 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | using namespace std; 17 | 18 | #define fst first 19 | #define snd second 20 | #define all(c) ((c).begin()), ((c).end()) 21 | 22 | template 23 | struct randomized_binary_search_tree { 24 | struct node { 25 | T x; 26 | node *l, *r; 27 | int s; 28 | } *root; 29 | randomized_binary_search_tree() : root(0) { } 30 | 31 | node *update(node *t) { 32 | if (!t) return t; 33 | t->s = 1; 34 | if (t->l) t->s += t->l->s; 35 | if (t->r) t->s += t->r->s; 36 | return t; 37 | } 38 | node *make_node(const T&x, node *l = 0, node *r = 0) { 39 | return update(new node({x, l, r})); 40 | } 41 | node *merge(node *a, node *b) { 42 | if (!a || !b) return a ? a : b; 43 | if (rand() % (a->s + b->s) < a->s) { 44 | a->r = merge(a->r, b); 45 | return update(a); 46 | } else { 47 | b->l = merge(a, b->l); 48 | return update(b); 49 | } 50 | } 51 | pair split(node *a, const T &x) { 52 | if (!a) return {0, 0}; 53 | if (a->x < x) { 54 | auto p = split(a->r, x); 55 | a->r = p.fst; 56 | return {update(a), p.snd}; 57 | } else { 58 | auto p = split(a->l, x); 59 | a->l = p.snd; 60 | return {p.fst, update(a)}; 61 | } 62 | } 63 | void insert(const T& x) { 64 | auto p = split(root, x); 65 | root = merge(merge(p.fst, make_node(x)), p.snd); 66 | } 67 | node *remove(node *t, const T &x) { 68 | if (!t) return t; 69 | if (t->x == x) return merge(t->l, t->r); 70 | if (t->x < x) t->r = remove(t->r, x); 71 | else t->l = remove(t->l, x); 72 | return update(t); 73 | } 74 | void remove(const T& x) { 75 | root = remove(root, x); 76 | } 77 | node *find(const T &x) { 78 | node *t = root; 79 | while (t && t->x != x) 80 | t = (t->x < x ? t->r : t->l); 81 | return t; 82 | } 83 | }; 84 | 85 | #include 86 | double tick() { 87 | static clock_t oldtick; 88 | clock_t newtick = clock(); 89 | double diff = 1.0*(newtick - oldtick) / CLOCKS_PER_SEC; 90 | oldtick = newtick; 91 | return diff; 92 | } 93 | 94 | int main() { 95 | const int n = 10000; 96 | tick(); 97 | multiset S; 98 | randomized_binary_search_tree T; 99 | for (int i = 0; i < n; ++i) { 100 | int x = rand() % 100; 101 | T.insert(x); 102 | S.insert(x); 103 | } 104 | for (int i = 0; i < n; ++i) { 105 | int x = rand() % 100; 106 | T.remove(x); 107 | auto it = S.find(x); 108 | if (it != S.end()) S.erase(it); 109 | } 110 | for (int i = 0; i < n; ++i) { 111 | int x = rand() % 100; 112 | if ((T.find(x) != 0) != (S.find(x) != S.end())) cout << "!" << endl; 113 | } 114 | cout << "tick: " << tick() << endl; 115 | } 116 | -------------------------------------------------------------------------------- /graph/strongly_connected_component_kosaraju.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Kosaraju's strongly connected component 3 | // 4 | // Description: 5 | // For a graph G = (V, E), u and v are strongly connected if 6 | // there are paths u -> v and v -> u. This defines an equivalent 7 | // relation, and its equivalent class is called a strongly 8 | // connected component. 9 | // 10 | // Algorithm: 11 | // Kosaraju's algorithm performs DFS on G and rev(G). 12 | // First DFS finds topological ordering of SCCs, and 13 | // the second DFS extracts components. 14 | // 15 | // Complexity: 16 | // O(n + m) 17 | // 18 | // Verified: 19 | // SPOJ 6818 20 | // 21 | // References: 22 | // A. V. Aho, J. E. Hopcroft, and J. D. Ullman (1983): 23 | // Data Structures and Algorithms, 24 | // Addison-Wesley. 25 | // 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | using namespace std; 40 | 41 | #define fst first 42 | #define snd second 43 | #define all(c) ((c).begin()), ((c).end()) 44 | 45 | 46 | struct graph { 47 | int n; 48 | vector> adj, rdj; 49 | graph(int n) : n(n), adj(n), rdj(n) { } 50 | void add_edge(int src, int dst) { 51 | adj[src].push_back(dst); 52 | rdj[dst].push_back(src); 53 | } 54 | 55 | vector> strongly_connected_components() { // kosaraju 56 | vector ord, visited(n); 57 | vector> scc; 58 | function>&, vector&)> dfs 59 | = [&](int u, vector> &adj, vector &out) { 60 | visited[u] = true; 61 | for (int v: adj[u]) 62 | if (!visited[v]) dfs(v, adj, out); 63 | out.push_back(u); 64 | }; 65 | for (int u = 0; u < n; ++u) 66 | if (!visited[u]) dfs(u, adj, ord); 67 | fill(all(visited), false); 68 | for (int i = n-1; i >= 0; --i) 69 | if (!visited[ord[i]]) 70 | scc.push_back({}), dfs(ord[i], rdj, scc.back()); 71 | return scc; 72 | } 73 | }; 74 | 75 | int main() { 76 | int n, m; 77 | scanf("%d %d", &n, &m); 78 | graph g(n); 79 | for (int k = 0; k < m; ++k) { 80 | int i, j; 81 | scanf("%d %d", &i, &j); 82 | g.add_edge(i-1, j-1); 83 | } 84 | 85 | vector> scc = g.strongly_connected_components(); 86 | vector outdeg(scc.size()); 87 | vector id(n); 88 | for (int i = 0; i < scc.size(); ++i) 89 | for (int u: scc[i]) id[u] = i; 90 | for (int u = 0; u < n; ++u) 91 | for (int v: g.adj[u]) 92 | if (id[u] != id[v]) ++outdeg[id[u]]; 93 | 94 | if (count(all(outdeg), 0) != 1) { 95 | printf("0\n"); 96 | } else { 97 | int i = find(all(outdeg), 0) - outdeg.begin(); 98 | sort(all(scc[i])); 99 | printf("%d\n%d", scc[i].size(), scc[i][0]+1); 100 | for (int j = 1; j < scc[i].size(); ++j) 101 | printf(" %d", scc[i][j]+1); 102 | printf("\n"); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /geometry/convex_hull.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Convex hull of 2D points 3 | // 4 | // Description: 5 | // Find a convex hull of point sets. 6 | // 7 | // Algorithm: 8 | // Andrew's monotone chain. 9 | // 10 | // References: 11 | // A. M. Andrew (1979): 12 | // Another efficient algorithm for convex hulls in two dimensions. 13 | // Information Processing Letters, vol.9, pp.216-219. 14 | // 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | 29 | #define ALL(c) c.begin(), c.end() 30 | #define FOR(i,c) for(typeof(c.begin())i=c.begin();i!=c.end();++i) 31 | #define REP(i,n) for(int i=0;i Point; 37 | #define X real() 38 | #define Y imag() 39 | Value dot(Point a, Point b) { return real(conj(a)*b); } 40 | Value cross(Point a, Point b) { return imag(conj(a)*b); } 41 | Value dist2(Point a, Point b) { return dot(a-b, a-b); } 42 | 43 | int ccw(Point a, Point b, Point c) { 44 | b -= a; c -= a; 45 | if (cross(b,c) > 0) return +1; // counter clockwise 46 | if (cross(b,c) < 0) return -1; // clockwise 47 | if (dot(b,c) < 0) return +2; // c--a--b on line 48 | if (dot(b,b) < dot(c,c)) return -2; // a--b--c on lne 49 | return 0; 50 | } 51 | 52 | // Convex Hull 53 | // 54 | // Algorithm: 55 | // Andrew's monotone chain 56 | namespace std { 57 | bool operator < (Point a, Point b) { // bottom-left 58 | return a.Y != b.Y ? a.Y < b.Y : a.X < b.X; 59 | } 60 | } 61 | vector convexHull(vector p) { 62 | int n = p.size(), k = 0; 63 | vector h(2*n); 64 | sort(ALL(p)); 65 | for (int i = 0; i < n; h[k++] = p[i++]) 66 | while (k >= 2 && ccw(h[k-2], h[k-1], p[i]) <= 0) --k; 67 | for (int i = n-2, t = k+1; i >= 0; h[k++] = p[i--]) 68 | while (k >= t && ccw(h[k-2], h[k-1], p[i]) <= 0) --k; 69 | return vector(h.begin(), h.begin() + k - (k > 1)); 70 | } 71 | 72 | 73 | // SPOJ 26: Build the Fence 74 | #define prev(p, i) ((i)-1>=0 ? p[(i)-1]: p[(i)-1+p.size()]) 75 | #define curr(p, i) ((i) < p.size() ? p[i] : p[(i) - p.size()]) 76 | #define next(p, i) ((i)+1 dic; 78 | void solve() { 79 | static int _count; 80 | if (_count++ > 0) printf("\n"); 81 | 82 | dic.clear(); 83 | int n; scanf("%d", &n); 84 | vector p(n); 85 | REP(i,n) { 86 | scanf("%lld %lld", &p[i].X, &p[i].Y); 87 | if (dic[p[i]] == 0) dic[p[i]] = i+1; 88 | } 89 | vector ch = convexHull(p); 90 | double len = 0; 91 | vector out; 92 | REP(i, ch.size()) len += sqrt(dist2(curr(ch,i), next(ch,i))); 93 | printf("%.2lf\n", len); 94 | REP(i, ch.size()) { 95 | if (i > 0) printf(" "); 96 | printf("%d", dic[ch[i]]); 97 | } 98 | printf("\n"); 99 | } 100 | 101 | int main() { 102 | int T; scanf("%d", &T); 103 | while (T--) { 104 | solve(); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /graph/strongly_connected_component_gabow.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Gabow's strongly connected component 3 | // 4 | // Description: 5 | // For a graph G = (V, E), u and v are strongly connected if 6 | // there are paths u -> v and v -> u. This defines an equivalent 7 | // relation, and its equivalent class is called a strongly 8 | // connected component. 9 | // 10 | // 11 | // Algorithm: 12 | // Gabow's double stack algorithm. 13 | // This is simpler than Tarjan's algorithm. 14 | // In my opinion, use Gabow or Kosaraju for SCC problems. 15 | // 16 | // Complexity: 17 | // O(n + m) 18 | // 19 | // Verified: 20 | // SPOJ 6818 21 | // 22 | // References: 23 | // H. N. Gabow (2000): 24 | // Path-based depth first search strong and biconnected components. 25 | // Information Processing Letters, vol.74 no.3-4, pp.107-114. 26 | // 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | using namespace std; 41 | 42 | #define fst first 43 | #define snd second 44 | #define all(c) ((c).begin()), ((c).end()) 45 | 46 | 47 | struct graph { 48 | int n; 49 | vector> adj; 50 | graph(int n) : n(n), adj(n) { } 51 | void add_edge(int src, int dst) { 52 | adj[src].push_back(dst); 53 | } 54 | 55 | vector> strongly_connected_components() { 56 | vector> scc; 57 | vector S, B, I(n); 58 | function dfs = [&](int u) { 59 | B.push_back(I[u] = S.size()); 60 | S.push_back(u); 61 | for (int v: adj[u]) { 62 | if (!I[v]) dfs(v); 63 | else while (I[v] < B.back()) B.pop_back(); 64 | } 65 | if (I[u] == B.back()) { 66 | scc.push_back({}); 67 | B.pop_back(); 68 | for (; I[u] < S.size(); S.pop_back()) { 69 | scc.back().push_back(S.back()); 70 | I[S.back()] = n + scc.size(); 71 | } 72 | } 73 | }; 74 | for (int u = 0; u < n; ++u) 75 | if (!I[u]) dfs(u); 76 | return scc; // I[u] - n is the index of u 77 | } 78 | }; 79 | 80 | int main() { 81 | int n, m; 82 | scanf("%d %d", &n, &m); 83 | graph g(n); 84 | for (int k = 0; k < m; ++k) { 85 | int i, j; 86 | scanf("%d %d", &i, &j); 87 | g.add_edge(i-1, j-1); 88 | } 89 | 90 | vector> scc = g.strongly_connected_components(); 91 | vector outdeg(scc.size()); 92 | vector id(n); 93 | for (int i = 0; i < scc.size(); ++i) 94 | for (int u: scc[i]) id[u] = i; 95 | for (int u = 0; u < n; ++u) 96 | for (int v: g.adj[u]) 97 | if (id[u] != id[v]) ++outdeg[id[u]]; 98 | 99 | if (count(all(outdeg), 0) != 1) { 100 | printf("0\n"); 101 | } else { 102 | int i = find(all(outdeg), 0) - outdeg.begin(); 103 | sort(all(scc[i])); 104 | printf("%d\n%d", scc[i].size(), scc[i][0]+1); 105 | for (int j = 1; j < scc[i].size(); ++j) 106 | printf(" %d", scc[i][j]+1); 107 | printf("\n"); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /number_theory/mobius_mu.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Mobius Mu 3 | // 4 | // Description: 5 | // mu(n) = 1 if n is square-free, even number of prime factors 6 | // -1 if ... odd ... 7 | // 0 if n has a squared prime factor. 8 | // equivalently, multiplicative with 9 | // mu(p^k) = 1 if k = 0 10 | // -1 if k = 1 11 | // 0 if k > 1. 12 | // 13 | // Complexity: 14 | // mobius_mu(n): O(sqrt(n)) by trial division. 15 | // mobius_mu(lo,hi): O((hi-lo) loglog(hi)) by prime sieve. 16 | // 17 | // Verified: 18 | // 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | using namespace std; 27 | 28 | #define fst first 29 | #define snd second 30 | #define all(c) ((c).begin()), ((c).end()) 31 | 32 | typedef long long ll; 33 | 34 | ll mobius_mu(ll n) { 35 | if (n == 0) return 0; 36 | ll mu = 1; 37 | for (ll x = 2; x*x <= n; ++x) { 38 | if (n % x == 0) { 39 | mu = -mu; 40 | n /= x; 41 | if (n % x == 0) return 0; 42 | } 43 | } 44 | return n > 1 ? -mu : mu; 45 | } 46 | 47 | vector primes(ll lo, ll hi) { // primes in [lo, hi) 48 | const ll M = 1 << 14, SQR = 1 << 16; 49 | vector composite(M), small_composite(SQR); 50 | 51 | vector> sieve; 52 | for (ll i = 3; i < SQR; i+=2) { 53 | if (!small_composite[i]) { 54 | ll k = i*i + 2*i*max(0.0, ceil((lo - i*i)/(2.0*i))); 55 | sieve.push_back({2*i, k}); 56 | for (ll j = i*i; j < SQR; j += 2*i) 57 | small_composite[j] = 1; 58 | } 59 | } 60 | vector ps; 61 | if (lo <= 2) { ps.push_back(2); lo = 3; } 62 | for (ll k = lo|1, low = lo; low < hi; low += M) { 63 | ll high = min(low + M, hi); 64 | fill(all(composite), 0); 65 | for (auto &z: sieve) 66 | for (; z.snd < high; z.snd += z.fst) 67 | composite[z.snd - low] = 1; 68 | for (; k < high; k+=2) 69 | if (!composite[k - low]) ps.push_back(k); 70 | } 71 | return ps; 72 | } 73 | vector primes(ll n) { // primes in [0,n) 74 | return primes(0,n); 75 | } 76 | vector mobius_mu(ll lo, ll hi) { // phi(n) for all n in [lo, hi) 77 | vector ps = primes(sqrt(hi)+1); 78 | vector res(hi-lo), mu(hi-lo, 1); 79 | iota(all(res), lo); 80 | 81 | for (ll p: ps) { 82 | for (ll k = ((lo+(p-1))/p)*p; k < hi; k += p) { 83 | mu[k-lo] = -mu[k-lo]; 84 | if (res[k-lo] % p == 0) { 85 | res[k-lo] /= p; 86 | if (res[k-lo] % p == 0) { 87 | mu[k-lo] = 0; 88 | res[k-lo] = 1; 89 | } 90 | } 91 | } 92 | } 93 | for (ll k = lo; k < hi; ++k) { 94 | if (res[k-lo] > 1) 95 | mu[k-lo] = -mu[k-lo]; 96 | } 97 | return mu; // mu[k-lo] = mu(k) 98 | } 99 | 100 | int main() { 101 | for (int iter = 0; iter < 1000; ++iter) { 102 | int lo = rand(), hi = lo + rand(); 103 | auto x = mobius_mu(lo, hi); 104 | for (int i = lo; i < hi; ++i) { 105 | if (x[i-lo] != mobius_mu(i)) { 106 | cout << lo << " " << hi << " " << mobius_mu(i) << " " << x[i-lo] << endl; 107 | } 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /graph/least_common_ancestor_heavylight.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Least common ancestor by heavy-light decomposition 3 | // 4 | // Description: 5 | // For a rooted tree T, LCA(u,v) is a vertex u 6 | // that is the deepest node that is a common ancestor of u and v. 7 | // 8 | // Algorithm: 9 | // A heavy-light decomposition finds a partition of a tree into 10 | // a set of paths that is recursively defined as follows: 11 | // 1) root is contained in a "heavy" path that spans root-to-child 12 | // 2) other childs are linked by "light" edges and the subtrees are 13 | // recursively decomposed by the heavy-light decomposition. 14 | // Here, all subtrees have fewer nodes than the "heavy" path. 15 | // 16 | // If two nodes u and v are located in the same heavy-path, LCA is 17 | // immediately obtained (shallower node is the LCA). Otherwise, 18 | // climbing light links until these are located in the same heavy-path. 19 | // By construction, only O(log n) light-link-climb is enough to reach 20 | // the root of the tree; thus it gives O(log n) algorithm. 21 | // 22 | // Complexity: 23 | // O(n) for preprocessing, 24 | // O(log n) for query 25 | // 26 | // Verified: 27 | // AOJ GRL_5C 28 | // 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | using namespace std; 36 | 37 | #define fst first 38 | #define snd second 39 | #define all(c) ((c).begin()), ((c).end()) 40 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 41 | 42 | 43 | struct tree { 44 | int n; 45 | vector> adj; 46 | tree(int n) : n(n), adj(n) { } 47 | void add_edge(int s, int t) { 48 | adj[s].push_back(t); 49 | adj[t].push_back(s); 50 | } 51 | vector size, depth, head, parent; 52 | void rootify(int r) { 53 | size = depth = parent = head = vector(n, -1); 54 | function dfs = [&](int u, int p) { 55 | parent[u] = p; 56 | depth[u] = depth[p]+1; 57 | for (int v: adj[u]) 58 | if (v != p) size[u] += dfs(v, u); 59 | return ++size[u]; 60 | }; dfs(r, r); 61 | function dec = [&](int u, int s) { 62 | head[u] = s; 63 | int z = -1; 64 | for (int v: adj[u]) 65 | if (head[v] < 0 && (z < 0 || size[z] < size[v])) z = v; 66 | for (int v: adj[u]) 67 | if (head[v] < 0) dec(v, v == z ? s : v); 68 | }; dec(r, r); 69 | } 70 | int lca(int u, int v) { 71 | while (head[u] != head[v]) 72 | if (depth[head[u]] < depth[head[v]]) v = parent[head[v]]; 73 | else u = parent[head[u]]; 74 | return depth[u] < depth[v] ? u : v; 75 | } 76 | }; 77 | 78 | int main() { 79 | int n; 80 | scanf("%d", &n); 81 | tree T(n); 82 | for (int u = 0; u < n ; ++u) { 83 | int k; 84 | scanf("%d", &k); 85 | for (int j = 0; j < k; ++j) { 86 | int v; 87 | scanf("%d", &v); 88 | T.add_edge(u, v); 89 | } 90 | } 91 | T.rootify(0); 92 | int q; 93 | scanf("%d", &q); 94 | for (int i = 0; i < q; ++i) { 95 | int u, v; 96 | scanf("%d %d", &u, &v); 97 | printf("%d\n", T.lca(u, v)); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /other/knapsack_expcore.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Knapsack Problem (branch-and-bound with expanding core) 3 | // 4 | // Description: 5 | // We are given a set of items with profit p_i and weight w_i. 6 | // The problem is to find a subset of items that maximizes 7 | // the total profit under the total weight less than some capacity c. 8 | // 9 | // 1) c is small ==> capacity DP 10 | // 2) p is small ==> price DP 11 | // 3) both are large ==> branch and bound. 12 | // 13 | // Algorithm: 14 | // Branch and bound with expanding core. 15 | // We first sort the items by p_i/w_i in descending order. 16 | // Then, the fractional solution is given by selecting 17 | // integral {1 ... b-1} and fractional b. 18 | // Branch-and-bound method recursively finds a solution for b = 0 and 1. 19 | // 20 | // For an efficient implementation, we maintain a interval [s,t]; 21 | // which means that all items i <= s is selected, and all items j >= t 22 | // is not selected. The algorithm recursively select s or discard t. 23 | // 24 | // Intuitively, the algorithm enumerates all possibilities in [s,t]. 25 | // The set of items in [s,t] is called "core." 26 | // 27 | // Complexity: 28 | // O(2^c), where c is the size of core. Basically, c is not so large. 29 | // 30 | // Verified: 31 | // SPOJ3321. 32 | // 33 | // References: 34 | // H. Kellerer, U. Pferschy, and D. Pisinger (2004): 35 | // Knapsack problems. 36 | // Springer Science & Business Media. 37 | 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | 48 | using namespace std; 49 | 50 | #define all(c) c.begin(),c.end() 51 | 52 | template 53 | struct knapsack { 54 | T c; 55 | struct item { T p, w; }; // price/weight 56 | vector is; 57 | void add_item(T p, T w) { 58 | is.push_back({p, w}); 59 | } 60 | T det(T a, T b, T c, T d) { 61 | return a * d - b * c; 62 | } 63 | T z; 64 | void expbranch(T p, T w, int s, int t) { 65 | if (w <= c) { 66 | if (p >= z) z = p; 67 | for (; t < is.size(); ++t) { 68 | if (det(p - z - 1, w - c, is[t].p, is[t].w) < 0) return; 69 | expbranch(p + is[t].p, w + is[t].w, s, t + 1); 70 | } 71 | } else { 72 | for (; s >= 0; --s) { 73 | if (det(p - z - 1, w - c, is[s].p, is[s].w) < 0) return; 74 | expbranch(p - is[s].p, w - is[s].w, s - 1, t); 75 | } 76 | } 77 | } 78 | T solve() { 79 | sort(all(is), [](const item &a, const item &b) { 80 | return a.p * b.w > a.w * b.p; 81 | }); 82 | T p = 0, w = 0; 83 | z = 0; 84 | int b = 0; 85 | for (; b < is.size() && w <= c; ++b) { 86 | p += is[b].p; 87 | w += is[b].w; 88 | } 89 | expbranch(p, w, b-1, b); 90 | return z; 91 | } 92 | }; 93 | 94 | int main() { 95 | int s, n; 96 | scanf("%d %d", &s, &n); 97 | knapsack solver; 98 | solver.c = s; 99 | for (int i = 0; i < n; ++i) { 100 | int v, w; 101 | scanf("%d %d", &w, &v); 102 | solver.add_item(v, w); 103 | } 104 | printf("%d\n", solver.solve()); 105 | } 106 | -------------------------------------------------------------------------------- /graph/strongly_connected_component_tarjan.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Tarjan's strongly connected component 3 | // 4 | // Description: 5 | // For a graph G = (V, E), u and v are strongly connected if 6 | // there are paths u -> v and v -> u. This defines an equivalent 7 | // relation, and its equivalent class is called a strongly 8 | // connected component. 9 | // 10 | // Algorithm: 11 | // Tarjan's single stack algorithm. 12 | // This does not need to keep reverse edges; 13 | // thus it is memory efficient than Kosaraju's algorithm. 14 | // 15 | // Complexity: 16 | // O(n + m) 17 | // 18 | // Verified: 19 | // SPOJ 6818 20 | // 21 | // References: 22 | // R. E. Tarjan (1972): 23 | // Depth-first search and linear graph algorithms. 24 | // SIAM Journal on Computing, vol.1, no.2, pp.146--160. 25 | // 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | using namespace std; 40 | 41 | #define fst first 42 | #define snd second 43 | #define all(c) ((c).begin()), ((c).end()) 44 | 45 | struct graph { 46 | int n; 47 | vector> adj; 48 | graph(int n) : n(n), adj(n) { } 49 | void add_edge(int src, int dst) { 50 | adj[src].push_back(dst); 51 | } 52 | 53 | vector> strongly_connected_components() { 54 | vector open, id(n); 55 | vector> scc; 56 | int t = -n-1; 57 | auto argmin = [&](int u, int v) { return id[u] < id[v] ? u : v; }; 58 | function dfs = [&](int u) { 59 | open.push_back(u); 60 | id[u] = t++; 61 | int w = u; 62 | for (int v: adj[u]) { 63 | if (id[v] == 0) w = argmin(w, dfs(v)); 64 | else if (id[v] < 0) w = argmin(w, v); 65 | } 66 | if (w == u) { 67 | scc.push_back({}); 68 | while (1) { 69 | int v = open.back(); 70 | open.pop_back(); 71 | id[v] = scc.size(); 72 | scc.back().push_back(v); 73 | if (u == v) break; 74 | } 75 | } 76 | return w; 77 | }; 78 | for (int u = 0; u < n; ++u) 79 | if (id[u] == 0) dfs(u); 80 | return scc; 81 | } 82 | }; 83 | 84 | int main() { 85 | int n, m; 86 | scanf("%d %d", &n, &m); 87 | graph g(n); 88 | for (int k = 0; k < m; ++k) { 89 | int i, j; 90 | scanf("%d %d", &i, &j); 91 | g.add_edge(i-1, j-1); 92 | } 93 | 94 | vector> scc = g.strongly_connected_components(); 95 | vector outdeg(scc.size()); 96 | vector id(n); 97 | for (int i = 0; i < scc.size(); ++i) 98 | for (int u: scc[i]) id[u] = i; 99 | for (int u = 0; u < n; ++u) 100 | for (int v: g.adj[u]) 101 | if (id[u] != id[v]) ++outdeg[id[u]]; 102 | 103 | if (count(all(outdeg), 0) != 1) { 104 | printf("0\n"); 105 | } else { 106 | int i = find(all(outdeg), 0) - outdeg.begin(); 107 | sort(all(scc[i])); 108 | printf("%d\n%d", scc[i].size(), scc[i][0]+1); 109 | for (int j = 1; j < scc[i].size(); ++j) 110 | printf(" %d", scc[i][j]+1); 111 | printf("\n"); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /graph/link_cut_tree.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Link Cut Tree (Slator-Tarjan) 3 | // 4 | // Decription: 5 | // It maintains rooted arborescences with the following operations 6 | // link(u,v) : add link from u to v 7 | // cut(u) : cut link from u (to the root direction) 8 | // lca(u,v) : least common ancestor of u and v 9 | // 10 | // Algorithm: 11 | // Classify links into solid and dashed. 12 | // Here, each vertex has at most one solid link. 13 | // Then, maintain solid paths by splay trees, 14 | // which are sorted in the depth of the vertices. 15 | // see: http://planarity.org/Klein_splay_trees_and_link-cut_trees.pdf 16 | // 17 | // Complexity: 18 | // O(log n), amortized. 19 | // 20 | // References: 21 | // D. D. Sleator and R. E. Tarjan (1983): 22 | // A Data Structure for Dynamic Trees. 23 | // Journal oF Computer and System Sciences, vol. 26, no. 3, pp. 362-391. 24 | // 25 | // Verified: AOJ Spaceships 26 | 27 | 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | using namespace std; 34 | 35 | struct link_cut_tree { 36 | struct node { 37 | node *child[2], *parent; 38 | }; 39 | bool is_root(node *x) { 40 | return !x->parent || (x->parent->child[0] != x 41 | && x->parent->child[1] != x); 42 | } 43 | int dir(node *x) { return x->parent && x->parent->child[1] == x; } 44 | void rot(node* t) { 45 | node *p = t->parent; 46 | int d = dir(t); 47 | p->child[d] = t->child[!d]; 48 | if (p->child[d]) p->child[d]->parent = p; 49 | if (!is_root(p)) p->parent->child[dir(p)] = t; 50 | t->parent = p->parent; 51 | t->child[!d] = p; 52 | p->parent = t; 53 | } 54 | void splay(node *x) { 55 | while (!is_root(x)) { 56 | if (!is_root(x->parent)) { 57 | if (dir(x) == dir(x->parent)) rot(x->parent); 58 | else rot(x); 59 | } 60 | rot(x); 61 | } 62 | } 63 | node *expose(node *x) { 64 | node *r = 0; 65 | for (node *p = x; p; p = p->parent) { 66 | splay(p); 67 | p->child[1] = r; 68 | r = p; 69 | } 70 | splay(x); 71 | return r; 72 | } 73 | 74 | vector ns; 75 | link_cut_tree(int n) : ns(n) { 76 | for (int i = 0; i < n; ++i) 77 | ns[i].child[0] = ns[i].child[1] = ns[i].parent = 0; 78 | } 79 | void link(int x, int y) { 80 | expose(&ns[x]); 81 | expose(&ns[y]); 82 | ns[y].child[1] = &ns[x]; 83 | ns[x].parent = &ns[y]; 84 | } 85 | void cut(int x) { 86 | expose(&ns[x]); 87 | node *y = ns[x].child[0]; 88 | ns[x].child[0] = y->parent = 0; 89 | } 90 | int lca(int x, int y) { 91 | expose(&ns[x]); 92 | node *u = expose(&ns[y]); 93 | return ns[x].parent ? u - &ns[0] : -1; 94 | } 95 | }; 96 | 97 | int main() { 98 | int n, q, t, a, b; 99 | scanf("%d %d", &n, &q); 100 | 101 | link_cut_tree LCT(n); 102 | for (int i = 0; i < q; ++i) { 103 | scanf("%d", &t); 104 | if (t == 1) { 105 | scanf("%d %d", &a, &b); 106 | LCT.link(a-1, b-1); 107 | } else if (t == 2) { 108 | scanf("%d", &a); 109 | LCT.cut(a-1); 110 | } else { 111 | scanf("%d %d", &a, &b); 112 | int c = LCT.lca(a-1, b-1); 113 | printf("%d\n", c < 0 ? c : c+1); 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /graph/minimum_feedback_arc_set.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Minimum Feedback Arc Set 3 | // 4 | // Description: 5 | // We are given an undirected weighted graph G = (V, E; w). 6 | // A set of edges F is said to be a feedback arc set if 7 | // (V, E-F) is has no cycles. 8 | // The problem is to find a minimum weight feedback arc set. 9 | // 10 | // Algorithm: 11 | // Min FAS problem is equivalent to find an ordering p 12 | // that minimizes the total weight of inconsistence edges: 13 | // \sum { w(u,v) : u <_p v }. 14 | // This gives a dynamic programming solution. 15 | // For a subset S of V, let f(S) be the value of MFAS for G(S). 16 | // Then we have 17 | // f(S) = min { f(S-u) + sum { w(u,v) : v in S } }. 18 | // This means that the optimal solution for S is obtained by 19 | // adding largest element u to the optimal solution S-u. 20 | // 21 | // Complexity: 22 | // O(m 2^n). 23 | // 24 | // References: 25 | // V. Raman and S. Saurabh (2007): 26 | // Improved fixed parameter tractable algorithms for two edge 27 | // problems: MAXCUT and MAXDAG. 28 | // Information Processing Letters, vol. 104, pp. 65--72. 29 | 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | using namespace std; 37 | 38 | #define fst first 39 | #define snd second 40 | #define all(c) ((c).begin()), ((c).end()) 41 | 42 | 43 | template 44 | struct graph { 45 | struct edge { 46 | int src, dst; 47 | T weight; 48 | }; 49 | int n; 50 | vector> adj; 51 | T inf; 52 | graph(int n) : n(n), adj(n), inf(0) { } 53 | void add_edge(int src, int dst, T weight) { 54 | adj[src].push_back({src, dst, weight}); 55 | inf += e.weight; 56 | } 57 | 58 | T min_feedback_arc_set() { 59 | vector f(1< pi(n); iota(all(pi), 0); 79 | do { 80 | T ans = 0; 81 | for (int u = 0; u < n; ++u) 82 | for (edge e: adj[u]) 83 | if (pi[e.src] < pi[e.dst]) 84 | ans += e.weight; 85 | opt = min(opt, ans); 86 | } while (next_permutation(all(pi))); 87 | return opt; 88 | } 89 | }; 90 | 91 | int main() { 92 | for (int seed = 0; seed < 1000; ++seed) { 93 | srand(seed); 94 | int n = 7; 95 | graph g(n); 96 | for (int i = 0; i < n; ++i) { 97 | for (int j = 0; j < n; ++j) { 98 | if (i == j) continue; 99 | int w = rand() % 10; 100 | g.add_edge(i, j, w); 101 | // cout << i << " " << j << " " << w << endl; 102 | } 103 | } 104 | int a = g.min_feedback_arc_set(); 105 | int b = g.min_feedback_arc_set_naive(); 106 | if (a != b) { 107 | cout << seed << endl; 108 | cout << "DP = " << a << endl; 109 | cout << "naive = " << b << endl; 110 | break; 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /graph/betweenness_centrality.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Betweenness centrality of undirected unweighted graph (Brandes) 3 | // 4 | // Description: 5 | // 6 | // Compute betweenness centrality, defined by 7 | // f(u) := \sum_{u,t \eq v} |s-t shortest paths that contains v|/|s-t shortest paths| 8 | // 9 | // Algorithm: 10 | // 11 | // Brandes's algorithm, O(nm) time, O(m) space. 12 | // 13 | // References: 14 | // 15 | // U. Brandes (2001): A faster algorithm for betweenness centrality. 16 | // Journal of Mathematical Sociology, vol.25, pp.163–177. 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | using namespace std; 32 | 33 | #define fst first 34 | #define snd second 35 | #define all(c) ((c).begin()), ((c).end()) 36 | 37 | struct edge { 38 | size_t src, dst; 39 | }; 40 | struct graph { 41 | vector edges; 42 | void add_edge(size_t src, size_t dst) { 43 | edges.push_back({src, dst}); 44 | } 45 | size_t n; 46 | vector> adj; 47 | void make_graph(int n_ = 0) { 48 | n = n_; 49 | for (auto e: edges) 50 | n = max(n, max(e.src, e.dst)+1); 51 | adj.resize(n); 52 | for (auto e: edges) { 53 | adj[e.src].push_back(e); 54 | swap(e.src, e.dst); 55 | adj[e.src].push_back(e); 56 | } 57 | } 58 | 59 | vector betweeness_centrality() { 60 | vector centrality(n); 61 | 62 | for (size_t s = 0; s < n; ++s) { 63 | vector S; 64 | vector sigma(n); sigma[s] = 1; 65 | vector dist(n, -1); dist[s] = 0; 66 | queue que; que.push(s); 67 | while (!que.empty()) { 68 | size_t u = que.front(); 69 | S.push_back(u); 70 | que.pop(); 71 | for (auto e: adj[u]) { 72 | if (dist[e.dst] < 0) { 73 | dist[e.dst] = dist[e.src] + 1; 74 | que.push(e.dst); 75 | } 76 | if (dist[e.dst] == dist[e.src] + 1) { 77 | sigma[e.dst] += sigma[e.src]; 78 | } 79 | } 80 | } 81 | vector delta(n); 82 | while (!S.empty()) { 83 | size_t u = S.back(); 84 | S.pop_back(); 85 | for (auto e: adj[u]) { 86 | if (dist[e.dst] == dist[e.src] + 1) { 87 | delta[e.src] += sigma[e.src] / sigma[e.dst] * (1 + delta[e.dst]); 88 | } 89 | } 90 | if (u != s) centrality[u] += delta[u]; 91 | } 92 | } 93 | return centrality; 94 | } 95 | }; 96 | 97 | int main() { 98 | graph g; 99 | g.add_edge( 0, 1 ); 100 | g.add_edge( 0, 2 ); 101 | g.add_edge( 0, 3 ); 102 | 103 | g.add_edge( 1, 4 ); 104 | g.add_edge( 1, 5 ); 105 | g.add_edge( 4, 5 ); 106 | g.add_edge( 4, 6 ); 107 | g.add_edge( 5, 6 ); 108 | 109 | g.add_edge( 2, 7 ); 110 | g.add_edge( 2, 8 ); 111 | g.add_edge( 7, 8 ); 112 | g.add_edge( 7, 9 ); 113 | g.add_edge( 8, 9 ); 114 | 115 | g.add_edge( 3,10 ); 116 | g.add_edge( 3,11 ); 117 | g.add_edge(10,11 ); 118 | g.add_edge(10,12 ); 119 | g.add_edge(11,12 ); 120 | 121 | g.make_graph(); 122 | g.betweeness_centrality(); 123 | } 124 | 125 | -------------------------------------------------------------------------------- /math/SimplexMethodLP.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Parametric Self-Dual Simplex method 3 | // 4 | // Description: 5 | // Solve a canonical LP: 6 | // min. c x 7 | // s.t. A x <= b 8 | // x >= 0 9 | // 10 | // 11 | // Algorithm: 12 | // Parametric self-dual simplex method. 13 | // 14 | // 15 | // Complexity: 16 | // O(n+m) iterations on average. 17 | // 18 | // 19 | // References: 20 | // 21 | // - G. B. Dantzig (1963): 22 | // Linear Programming and Extensions. 23 | // Princeton University Press. 24 | // 25 | // - R. J. Vanderbei (2007): 26 | // Linear programming: Foundations and Extensions. 27 | // 3rd eds., Springer. 28 | // 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | using namespace std; 39 | 40 | #define ALL(c) c.begin(), c.end() 41 | #define FOR(i,c) for(typeof(c.begin())i=c.begin();i!=c.end();++i) 42 | #define REP(i,n) for(int i=0;i= -EPS) return -T[m][n+m]; // optimal 63 | 64 | if (t < T[q][n+m]) { // tight on c -> primal update 65 | REP(j, m) if (T[j][p] >= EPS) 66 | if (T[j][p]*(T[q][n+m]-t) >= T[q][p]*(T[j][n+m]-t)) q = j; 67 | if (T[q][p] <= EPS) return INF; // primal infeasible 68 | } else { // tight on b -> dual update 69 | REP(i, n+m+1) T[q][i] *= -1; 70 | REP(i, n+m) if (T[q][i] >= EPS) 71 | if (T[q][i]*(T[m][p]-t) >= T[q][p]*(T[m][i]-t)) p = i; 72 | if (T[q][p] <= EPS) return -INF; // dual infeasible 73 | } 74 | REP(i, m+n+1) if (i != p) T[q][i] /= T[q][p]; T[q][p] = 1; // pivot(q,p) 75 | REP(j, m+1) if (j != q) { 76 | double alpha = T[j][p]; 77 | REP(i, n+m+1) T[j][i] -= T[q][i] * alpha; 78 | } 79 | } 80 | } 81 | 82 | #include 83 | int doit(int seed) { 84 | // verify with matlab 85 | cout << "seed = " << seed << endl; 86 | srand(seed); 87 | 88 | int n = 7, m = 3; 89 | double c[n]; 90 | double A[m*n]; 91 | double b[m]; 92 | REP(i, n) c[i] = (rand() % 21) - 5; 93 | REP(i, m) b[i] = (rand() % 21) - 5; 94 | REP(i, n) REP(j, m) A[j*n+i] = (rand() % 21) - 5; 95 | 96 | cout << "c = ["; 97 | REP(i,n) cout << c[i] << (i == n-1 ? "];" : ","); cout << endl; 98 | cout << "b = ["; 99 | REP(i,m) cout << b[i] << (i == m-1 ? "];" : ";"); cout << endl; 100 | cout << "A = ["; 101 | REP(j, m) { 102 | REP(i, n) cout << A[j*n+i] << (i == n-1 ? j == m-1 ? "];" : ";" : ","); 103 | } 104 | cout << endl; 105 | cout << "[sol,opt] = linprog(c,[A;-eye(size(c,2))],[b;zeros(size(c,2),1)])" << endl; 106 | cout << endl; 107 | 108 | cout << simplexMethodPD(c, n, b, m, A) << endl; 109 | } 110 | 111 | int main() { doit( time(0) ); } 112 | -------------------------------------------------------------------------------- /string/boyer_moore.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Boyer-Moore string matching 3 | // 4 | // Description: 5 | // It processes a pattern string to find 6 | // all occurrence of a given text. 7 | // 8 | // Algorithm: 9 | // It matches a pattern string from the last to the front. 10 | // If the string is random, this can skip many comparison. 11 | // 12 | // Complexity: 13 | // O(n + |occur|) with O(m) preprocessing. 14 | // In a random case, it reduced to O(n/m + |occur|). 15 | // 16 | // Verified: 17 | // SPOJ 21524 18 | // 19 | // Comment: 20 | // BM is often considered faster than KMP. 21 | // However, in the programming contest setting, 22 | // these are equally fast. 23 | // 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | using namespace std; 32 | 33 | #define fst first 34 | #define snd second 35 | #define all(c) ((c).begin()), ((c).end()) 36 | 37 | struct boyer_moore { 38 | int m; 39 | const char *p; 40 | vector skip, next; 41 | boyer_moore(const char *p) : p(p), m(strlen(p)) { 42 | skip.resize(0x100); // bad char heuristics 43 | for (int i = 0; i < m; ++i) 44 | skip[p[i]] = m - i - 1; 45 | 46 | vector g(m, m); // good suffix heuristics 47 | next.resize(m); 48 | for (int i = 0; i < m; ++i) 49 | next[i] = 2*m-i-1; 50 | for (int i = m-1, j = m; i >= 0; --i, --j) { 51 | g[i] = j; 52 | while (j < m && p[j] != p[i]) { 53 | next[j] = min(next[j], m-i-1); 54 | j = g[j]; 55 | } 56 | } 57 | } 58 | vector match(const char s[]) { 59 | int n = strlen(s); 60 | vector occur; 61 | for (int i = m-1; i < n; ) { 62 | int j = m-1; 63 | while (j >= 0 && s[i] == p[j]) --i, --j; 64 | if (j < 0) { 65 | /* match at s[i+1, ..., i+m] */ 66 | occur.push_back(i+1); 67 | i += m + 1; 68 | } else i += max(skip[s[i]], next[j]); 69 | } 70 | return occur; 71 | } 72 | }; 73 | 74 | 75 | // for comparison 76 | struct knuth_morris_pratt { 77 | int m; 78 | const char *p; 79 | vector fail; 80 | knuth_morris_pratt(const char *p) : p(p), m(strlen(p)) { 81 | fail.resize(m+1, -1); 82 | for (int i = 1, j = -1; i <= m; ++i) { 83 | while (j >= 0 && p[j] != p[i-1]) j = fail[j]; 84 | fail[i] = ++j; 85 | } 86 | } 87 | vector match(const char *s) { 88 | int n = strlen(s); 89 | vector occur; 90 | for (int i = 0, k = 0; i < n; ++i) { 91 | while (k >= 0 && s[i] != p[k]) k = fail[k]; 92 | if (++k == m) { 93 | /* match at s[i-m+1 ... i] */ 94 | occur.push_back(i-m+1); 95 | } 96 | } 97 | return occur; 98 | } 99 | }; 100 | 101 | 102 | int main() { 103 | int ncase; scanf("%d", &ncase); 104 | for (int icase = 0; icase < ncase; ++icase) { 105 | if (icase > 0) printf("\n"); 106 | char s[1000010], p[1000010]; 107 | scanf("%s %s", s, p); 108 | boyer_moore M(p); 109 | auto v = M.match(s); 110 | if (v.empty()) { 111 | printf("Not Found\n"); 112 | } else { 113 | printf("%d\n", v.size()); 114 | for (int i = 0; i < v.size(); ++i) { 115 | if (i > 0) printf(" "); 116 | printf("%d", v[i]+1); 117 | } 118 | printf("\n"); 119 | } 120 | } 121 | } 122 | 123 | -------------------------------------------------------------------------------- /string/knuth_morris_pratt.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Knuth-Morris-Pratt string matching 3 | // 4 | // Description: 5 | // It processes a pattern string to find 6 | // all occurrence of a given text. 7 | // 8 | // Algorithm: 9 | // It constructs an automaton of accepting a pattern, 10 | // and then gives a text into the automaton.. 11 | // 12 | // Complexity: 13 | // O(n + |occur|) with O(m) preprocessing. 14 | // In a random case, it reduced to O(n/m + |occur|). 15 | // 16 | // Verified: 17 | // SPOJ 21524 18 | // 19 | // Comment: 20 | // KMP is often considered slower than BM. 21 | // However, in the programming contest setting, 22 | // these are equally fast. 23 | // 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | using namespace std; 32 | 33 | #define fst first 34 | #define snd second 35 | #define all(c) ((c).begin()), ((c).end()) 36 | 37 | struct knuth_morris_pratt { 38 | int m; 39 | const char *p; 40 | vector fail; 41 | knuth_morris_pratt(const char *p) : p(p), m(strlen(p)) { 42 | fail.resize(m+1, -1); 43 | for (int i = 1, j = -1; i <= m; ++i) { 44 | while (j >= 0 && p[j] != p[i-1]) j = fail[j]; 45 | fail[i] = ++j; 46 | } 47 | } 48 | vector match(const char *s) { 49 | int n = strlen(s); 50 | vector occur; 51 | for (int i = 0, k = 0; i < n; ++i) { 52 | while (k >= 0 && s[i] != p[k]) k = fail[k]; 53 | if (++k == m) { 54 | /* match at s[i-m+1 ... i] */ 55 | occur.push_back(i-m+1); 56 | } 57 | } 58 | return occur; 59 | } 60 | }; 61 | 62 | 63 | // for comparison: boyer moore 64 | struct boyer_moore { 65 | int m; 66 | const char *p; 67 | vector skip, next; 68 | boyer_moore(const char *p) : p(p), m(strlen(p)) { 69 | skip.resize(0x100); // bad char heuristics 70 | for (int i = 0; i < m; ++i) 71 | skip[p[i]] = m - i - 1; 72 | 73 | vector g(m, m); // good suffix heuristics 74 | next.resize(m); 75 | for (int i = 0; i < m; ++i) 76 | next[i] = 2*m-i-1; 77 | for (int i = m-1, j = m; i >= 0; --i, --j) { 78 | g[i] = j; 79 | while (j < m && p[j] != p[i]) { 80 | next[j] = min(next[j], m-i-1); 81 | j = g[j]; 82 | } 83 | } 84 | } 85 | vector match(const char s[]) { 86 | int n = strlen(s); 87 | vector occur; 88 | for (int i = m-1; i < n; ) { 89 | int j = m-1; 90 | while (j >= 0 && s[i] == p[j]) --i, --j; 91 | if (j < 0) { 92 | /* match at s[i+1, ..., i+m] */ 93 | occur.push_back(i+1); 94 | i += m + 1; 95 | } else i += max(skip[s[i]], next[j]); 96 | } 97 | return occur; 98 | } 99 | }; 100 | 101 | int main() { 102 | int ncase; scanf("%d", &ncase); 103 | for (int icase = 0; icase < ncase; ++icase) { 104 | if (icase > 0) printf("\n"); 105 | char s[1000010], p[1000010]; 106 | scanf("%s %s", s, p); 107 | knuth_morris_pratt M(p); 108 | auto v = M.match(s); 109 | if (v.empty()) { 110 | printf("Not Found\n"); 111 | } else { 112 | printf("%d\n", v.size()); 113 | for (int i = 0; i < v.size(); ++i) { 114 | if (i > 0) printf(" "); 115 | printf("%d", v[i]+1); 116 | } 117 | printf("\n"); 118 | } 119 | } 120 | } 121 | 122 | -------------------------------------------------------------------------------- /numeric/ODE_dormand_prince.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Dormand-Prince ODE solver 3 | // 4 | // Description: 5 | // It numerically solves an ordinary differential equation 6 | // dx/dt = f(t, x) 7 | // 8 | // Algorithm: 9 | // Dormand and Prince's adaptive step-size Runge-Kutta, 10 | // which is used in Matlab as "ode45" function. 11 | // This performs 4th and 5th-order Runge-Kutta methods 12 | // to estimate the optimal step size. 13 | // 14 | // References: 15 | // J. R. Dormand and P. J. Prince (1980): 16 | // A family of embedded Runge-Kutta formulae. 17 | // Journal of Computational and Applied Mathematics, vol.6, no.1, pp.13--26. 18 | // 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | using namespace std; 27 | 28 | #define fst first 29 | #define snd second 30 | #define all(c) ((c).begin()), ((c).end()) 31 | #define TEST(s) if (!(s)) { cout << __LINE__ << " " << #s << endl; exit(-1); } 32 | 33 | template 34 | double dormand_prince(F f, double t, double tend, double x) { 35 | double c[] = {0, 1./5, 3./10, 4./5, 8./9, 1, 1}; 36 | double a[7][7] = { 37 | {0}, 38 | {1./5}, 39 | {3./40, 9./40}, 40 | {44./45, -56./15, 32./9}, 41 | {19372./6561, -25360./2187, 64448./6561, -212./729}, 42 | {9017./3168, -355./33, 46732./5247, 49./176, -5103./18656}, 43 | {35./384, 0, 500./1113, 125./192, -2187./6784, 11./84} 44 | }; 45 | double b[] = {5179./57600, 0, 7571./16695, 393./640, -92097./339200, 187./2100, 1./40}; 46 | double e[] = {71./57600, 0, -71./16695, 71./1920, -17253./339200, 22./525, -1./40}; 47 | 48 | const double EPS = 1e-5; 49 | double h = EPS; 50 | while (t < tend) { 51 | if (t + h >= tend) h = tend - t; 52 | double k[7]; 53 | for (int i = 0; i < 7; ++i) { 54 | double u = 0; 55 | for (int j = 0; j < i; ++j) 56 | u += a[i][j] * k[j]; 57 | k[i] = h * f(t + c[i] * h, x + u); 58 | } 59 | double err = 0; 60 | for (int i = 0; i < 7; ++i) x += b[i] * k[i]; 61 | t += h; // (t, x) 62 | 63 | for (int i = 0; i < 7; ++i) err += e[i] * k[i]; 64 | double s = pow(EPS * h / (2 * abs(err)), 1./5); 65 | s = min(max(s, 1./4), 4.); 66 | h = s * h; 67 | } 68 | return x; 69 | } 70 | 71 | // for comparison 72 | template 73 | double runge_kutta(F f, double t, double tend, double x) { 74 | const double EPS = 1e-5; 75 | for (double h = EPS; t < tend; ) { 76 | if (t + h >= tend) h = tend - t; 77 | double k1 = h * f(t , x ); 78 | double k2 = h * f(t + h/2, x + k1/2); 79 | double k3 = h * f(t + h/2, x + k2/2); 80 | double k4 = h * f(t + h , x + k3 ); 81 | x += (k1 + 2 * k2 + 2 * k3 + k4) / 6; 82 | t += h; // (t, x) 83 | } 84 | return x; 85 | } 86 | 87 | // for comparison 88 | template 89 | double euler(F f, double t, double tend, double x) { 90 | const double EPS = 1e-5; 91 | for (double h = EPS; t < tend; ) { 92 | if (t + h >= tend) h = tend - t; 93 | x += h * f(t, x); 94 | t += h; 95 | } 96 | return x; 97 | } 98 | 99 | int main() { 100 | auto f = [](double t, double x) { 101 | return t * x; 102 | }; 103 | printf("%f\n", dormand_prince(f, 0, 1, 1)); 104 | printf("%f\n", runge_kutta(f, 0, 1, 1)); 105 | printf("%f\n", euler(f, 0, 1, 1)); 106 | printf("%f\n", exp(1.0/2.0)); 107 | } 108 | -------------------------------------------------------------------------------- /number_theory/euler_phi.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Euler Phi (Totient Function) 3 | // 4 | // Description: 5 | // phi(n) = #{ k <= n : k is coprime to n } 6 | // = n (1 - 1/p1) ... (1 - 1/pm). 7 | // or equivalently 8 | // phi(p^k) = (p-1) p^{k-1}. 9 | // with multiplicative. 10 | // 11 | // Complexity: 12 | // euler_phi(n): O(sqrt(n)) by trial division. 13 | // euler_phi(lo,hi): O((hi-lo) loglog(hi)) by prime sieve. 14 | // 15 | // Verified: 16 | // SPOJ 22268 17 | // 18 | // Note: 19 | // Complexity of sieve ver. equals to the sum of exponents in hi!/lo!. 20 | // This is known to be O(hi loglog hi - lo loglog lo). 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | using namespace std; 29 | 30 | #define fst first 31 | #define snd second 32 | #define all(c) ((c).begin()), ((c).end()) 33 | 34 | typedef long long ll; 35 | 36 | ll euler_phi(ll n) { 37 | if (n == 0) return 0; 38 | ll ans = n; 39 | for (ll x = 2; x*x <= n; ++x) { 40 | if (n % x == 0) { 41 | ans -= ans / x; 42 | while (n % x == 0) n /= x; 43 | } 44 | } 45 | if (n > 1) ans -= ans / n; 46 | return ans; 47 | } 48 | 49 | vector primes(ll lo, ll hi) { // primes in [lo, hi) 50 | const ll M = 1 << 14, SQR = 1 << 16; 51 | vector composite(M), small_composite(SQR); 52 | 53 | vector> sieve; 54 | for (ll i = 3; i < SQR; i+=2) { 55 | if (!small_composite[i]) { 56 | ll k = i*i + 2*i*max(0.0, ceil((lo - i*i)/(2.0*i))); 57 | sieve.push_back({2*i, k}); 58 | for (ll j = i*i; j < SQR; j += 2*i) 59 | small_composite[j] = 1; 60 | } 61 | } 62 | vector ps; 63 | if (lo <= 2) { ps.push_back(2); lo = 3; } 64 | for (ll k = lo|1, low = lo; low < hi; low += M) { 65 | ll high = min(low + M, hi); 66 | fill(all(composite), 0); 67 | for (auto &z: sieve) 68 | for (; z.snd < high; z.snd += z.fst) 69 | composite[z.snd - low] = 1; 70 | for (; k < high; k+=2) 71 | if (!composite[k - low]) ps.push_back(k); 72 | } 73 | return ps; 74 | } 75 | vector primes(ll n) { // primes in [0,n) 76 | return primes(0,n); 77 | } 78 | vector euler_phi(ll lo, ll hi) { // phi(n) for all n in [lo, hi) 79 | vector ps = primes(sqrt(hi)+1); 80 | vector res(hi-lo), phi(hi-lo, 1); 81 | iota(all(res), lo); 82 | 83 | for (ll p: ps) { 84 | for (ll k = ((lo+(p-1))/p)*p; k < hi; k += p) { 85 | if (res[k-lo] < p) continue; 86 | phi[k-lo] *= (p - 1); 87 | res[k-lo] /= p; 88 | while (res[k-lo] > 1 && res[k-lo] % p == 0) { 89 | phi[k-lo] *= p; 90 | res[k-lo] /= p; 91 | } 92 | } 93 | } 94 | for (ll k = lo; k < hi; ++k) { 95 | if (res[k-lo] > 1) 96 | phi[k-lo] *= (res[k-lo]-1); 97 | } 98 | return phi; // phi[k-lo] = phi(k) 99 | } 100 | 101 | // === tick a time === 102 | #include 103 | double tick() { 104 | static clock_t oldtick; 105 | clock_t newtick = clock(); 106 | double diff = 1.0*(newtick - oldtick) / CLOCKS_PER_SEC; 107 | oldtick = newtick; 108 | return diff; 109 | } 110 | 111 | int main() { 112 | 113 | for (int iter = 0; iter < 100; ++iter) { 114 | int lo = rand(), hi = lo + rand(); 115 | auto x = euler_phi(lo, hi); 116 | for (int n = lo; n < hi; ++n) 117 | if (x[n-lo] != euler_phi(n)) cout << "!!" << endl; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /graph/maximum_flow_dinic.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Dinic's maximum flow 3 | // 4 | // Description: 5 | // Given a directed network G = (V, E) with edge capacity c: E->R. 6 | // The algorithm finds a maximum flow. 7 | // 8 | // Algorithm: 9 | // Dinic's blocking flow algorithm. 10 | // 11 | // Complexity: 12 | // O(n^2 m), but very fast in practice. 13 | // In particular, for a unit capacity graph, 14 | // it runs in O(m min{m^{1/2}, n^{2/3}}). 15 | // 16 | // Verified: 17 | // SPOJ FASTFLOW 18 | // 19 | // Reference: 20 | // E. A. Dinic (1970): 21 | // Algorithm for solution of a problem of maximum flow in networks with power estimation. 22 | // Soviet Mathematics Doklady, vol. 11, pp. 1277-1280. 23 | // 24 | // B. H. Korte and J. Vygen (2008): 25 | // Combinatorial Optimization: Theory and Algorithms. 26 | // Springer Berlin Heidelberg. 27 | // 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | using namespace std; 37 | 38 | #define fst first 39 | #define snd second 40 | #define all(c) ((c).begin()), ((c).end()) 41 | 42 | const long long INF = (1ll << 50); 43 | struct graph { 44 | typedef long long flow_type; 45 | struct edge { 46 | int src, dst; 47 | flow_type capacity, flow; 48 | size_t rev; 49 | }; 50 | int n; 51 | vector> adj; 52 | graph(int n) : n(n), adj(n) { } 53 | void add_edge(int src, int dst, flow_type capacity) { 54 | adj[src].push_back({src, dst, capacity, 0, adj[dst].size()}); 55 | adj[dst].push_back({dst, src, 0, 0, adj[src].size()-1}); 56 | } 57 | flow_type max_flow(int s, int t) { 58 | vector level(n), iter(n); 59 | function levelize = [&]() { // foward levelize 60 | level.assign(n, -1); level[s] = 0; 61 | queue Q; Q.push(s); 62 | while (!Q.empty()) { 63 | int u = Q.front(); Q.pop(); 64 | if (u == t) break; 65 | for (auto &e: adj[u]) { 66 | if (e.capacity > e.flow && level[e.dst] < 0) { 67 | Q.push(e.dst); 68 | level[e.dst] = level[u] + 1; 69 | } 70 | } 71 | } 72 | return level[t]; 73 | }; 74 | function augment = [&](int u, flow_type cur) { 75 | if (u == t) return cur; 76 | for (int &i = iter[u]; i < adj[u].size(); ++i) { 77 | edge &e = adj[u][i], &r = adj[e.dst][e.rev]; 78 | if (e.capacity > e.flow && level[u] < level[e.dst]) { 79 | flow_type f = augment(e.dst, min(cur, e.capacity - e.flow)); 80 | if (f > 0) { 81 | e.flow += f; 82 | r.flow -= f; 83 | return f; 84 | } 85 | } 86 | } 87 | return flow_type(0); 88 | }; 89 | for (int u = 0; u < n; ++u) // initialize 90 | for (auto &e: adj[u]) e.flow = 0; 91 | 92 | flow_type flow = 0; 93 | while (levelize() >= 0) { 94 | fill(all(iter), 0); 95 | for (flow_type f; (f = augment(s, INF)) > 0; ) 96 | flow += f; 97 | } 98 | return flow; 99 | } 100 | }; 101 | 102 | int main() { 103 | for (int n, m; scanf("%d %d", &n, &m) == 2; ) { 104 | graph g(n); 105 | for (int i = 0; i < m; ++i) { 106 | int u, v, w; 107 | scanf("%d %d %d", &u, &v, &w); 108 | //g.add_edge(u, v, w); 109 | g.add_edge(u-1, v-1, w); 110 | } 111 | printf("%lld\n", g.max_flow(0, n-1)); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /machine_learning/bradley_terry.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Bradley-Terry model for pairwise comparison 3 | // 4 | // Description: 5 | // Consider pairwise comparisons between n players. 6 | // This model assumes that each player i has a strength w_i, 7 | // and player i beats player j with probability w_i/(w_i + w_j). 8 | // The algorithm estimates the strengths from a comparison data. 9 | // 10 | // Algorithm: 11 | // We maximize the log-likelihood: 12 | // L(w) := sum ( log(w_i) - log(w_i + w_j) ). 13 | // We derive an iterative algorithm. Let w be a current solution. 14 | // We observe that the second term is an obstraction for maximizatino 15 | // because log(w_i + w_j) is concave. we Wpproximate this term by 16 | // log(w_i' + w_j') <= log(w_i + w_j) + (w_i' + w_j')/(w_i + w_j). 17 | // Then, we have a minorization 18 | // L(w') >= sum ( log(w_i') - (w_i' + w_j')/(w_i + w_j) ) + const. 19 | // Here, the right hand side has a unique maximum that is given by 20 | // w_i = n_i ( sum 1/(w_i' + w_j') )^{-1}. 21 | // By iterating this process, w converges to a local optimal solution. 22 | // Under mild assumptions, the likelihood has a unique optimal solution; 23 | // therefore this algorithm converges to a global optimal solution. 24 | // 25 | // Complexity: 26 | // O(n^2) per iteration. The number of iterations are usually small. 27 | // 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | 35 | using namespace std; 36 | 37 | #define fst first 38 | #define snd second 39 | #define all(c) ((c).begin()), ((c).end()) 40 | 41 | struct bradley_terry { 42 | int n; 43 | vector w; 44 | vector> a; 45 | bradley_terry(int n) : n(n), w(n,1) { regularize(); } 46 | 47 | // reguralization avoids no-match pairs 48 | void regularize() { 49 | a.assign(n, vector(n, 1)); 50 | for (int i = 0; i < n; ++i) 51 | a[i][i] = n-1; 52 | } 53 | 54 | // win beats lose num times 55 | void add_match(int win, int lose, int num = 1) { 56 | a[win][lose] += num; 57 | a[win][win] += num; 58 | } 59 | 60 | // estimate the strengths 61 | void learning() { 62 | for (int iter = 0; iter < 100; ++iter) { 63 | double norm = 0; 64 | vector z(n); 65 | for (int i = 0; i < n; ++i) { 66 | double sum = 0; 67 | for (int j = 0; j < n; ++j) 68 | if (i != j) sum += (a[i][j] + a[j][i]) / (w[i] + w[j]); 69 | z[i] = a[i][i] / sum; 70 | norm += z[i]; 71 | } 72 | double err = 0; 73 | for (int i = 0; i < n; ++i) { 74 | err += abs(w[i] - z[i] / norm); 75 | w[i] = z[i] / norm; 76 | } 77 | if (err < 1e-6) break; 78 | } 79 | } 80 | 81 | }; 82 | 83 | 84 | // 2014, NPB (Nippon Professional Baseball) Central League 85 | int data[6][6] = { 86 | { 0, 13, 13, 16, 11, 13}, // Giants 87 | {11, 0, 14, 12, 16, 13}, // Tigers 88 | {10, 10, 0, 14, 15, 16}, // Carp 89 | { 8, 11, 10, 0, 14, 11}, // Dragons 90 | {13, 8, 8, 9, 0, 16}, // BayStars 91 | {11, 11, 8, 12, 8, 0}, // Swallows 92 | }; 93 | 94 | int main() { 95 | int n = 6; 96 | bradley_terry BT(n); 97 | for (int i = 0; i < n; ++i) { 98 | for (int j = 0; j < n; ++j) { 99 | BT.add_match(i, j, data[i][j]); 100 | } 101 | } 102 | BT.learning(); 103 | 104 | for (int i = 0; i < n; ++i) { 105 | cout << BT.w[i] << endl; 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /string/earley.cc: -------------------------------------------------------------------------------- 1 | // 2 | // Earley Parser 3 | // 4 | // Description: 5 | // We are given CFG, i.e., 6 | // A -> B 7 | // A -> aAa|bAb 8 | // B -> aa|bb 9 | // B -> a|b 10 | // It determines that a given string is matched by the CFG. 11 | // 12 | // Algorithm: 13 | // Earley algorithm. It generates all states with memoisation. 14 | // Here, state is given by (rule, pos-in-rule, pos-in-text). 15 | // 16 | // Complexity: 17 | // O(|G|^2 n^3) in the worst case. 18 | // If a grammar is simple, it usually reduced to O(|G|^2 n^2). 19 | // 20 | // Remark: 21 | // Because of simplicity, This implementation does not allow the 22 | // epsilon rule. Please expand epsilon rule by hand. 23 | // (TODO!) 24 | // 25 | // 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | using namespace std; 33 | 34 | #define fst first 35 | #define snd second 36 | #define all(c) ((c).begin()), ((c).end()) 37 | 38 | struct earley_parser { 39 | vector terminal; 40 | vector>> grammar; 41 | int add_symbol(char c = 0) { 42 | terminal.push_back(c); 43 | grammar.push_back({}); 44 | return grammar.size()-1; 45 | } 46 | void add_grammar(int A, vector As) { 47 | As.push_back(0); 48 | grammar[A].push_back(As); 49 | } 50 | earley_parser() { add_symbol(); add_symbol(); } 51 | bool parse(const char s[], int init) { 52 | int n = strlen(s); 53 | struct state { int a, k, p, i; }; 54 | vector>> chart(n+1, vector>(grammar.size())); 55 | auto enqueue = [&](vector &curr, const state &S) { 56 | for (auto &T: curr) 57 | if (T.a == S.a && T.k == S.k && T.p == S.p && T.i == S.i) return; 58 | curr.push_back(S); 59 | }; 60 | auto symbol = [&](const state &S) { return grammar[S.a][S.k][S.p]; }; 61 | grammar[1] = { {init, 0} }; 62 | vector curr = {{1, 0, 0, 0}}, next; 63 | for (int k = 0; k <= n; ++k) { 64 | for (int i = 0; i < curr.size(); ++i) { 65 | state S = curr[i]; 66 | int B = symbol(S); 67 | if (B) { 68 | if (!terminal[B]) { 69 | for (int j = 0; j < grammar[B].size(); ++j) 70 | enqueue(curr, {B, j, 0, k}); 71 | } else if (terminal[B] == s[k]) { 72 | enqueue(next, {S.a, S.k, S.p+1, S.i}); 73 | } 74 | } else { 75 | for (auto &T: chart[S.i][S.a]) 76 | enqueue(curr, {T.a, T.k, T.p+1, T.i}); 77 | } 78 | } 79 | for (auto &T: curr) 80 | chart[k][symbol(T)].push_back(T); 81 | curr.swap(next); 82 | next.clear(); 83 | } 84 | for (auto &T: chart[n][0]) 85 | if (T.a == 1) return true; 86 | return false; 87 | } 88 | }; 89 | 90 | int main() { 91 | earley_parser parser; 92 | // A -> B 93 | // A -> aAa|bAb 94 | // B -> aa|bb 95 | // B -> a|b 96 | int A = parser.add_symbol(); 97 | int B = parser.add_symbol(); 98 | int a = parser.add_symbol('a'); 99 | int b = parser.add_symbol('b'); 100 | parser.add_grammar(A, {B}); 101 | parser.add_grammar(A, {a,A,a}); 102 | parser.add_grammar(A, {b,A,b}); 103 | parser.add_grammar(B, {a}); 104 | parser.add_grammar(B, {b}); 105 | parser.add_grammar(B, {a,a}); 106 | parser.add_grammar(B, {b,b}); 107 | for (char s[1024]; cin >> s; ) { 108 | cout << parser.parse(s, A) << endl; 109 | } 110 | } 111 | --------------------------------------------------------------------------------