├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── src ├── caching.rs ├── graph │ ├── connectivity.rs │ ├── flow.rs │ ├── mod.rs │ └── util.rs ├── li_chao.rs ├── lib.rs ├── math │ ├── fft.rs │ ├── mod.rs │ └── num.rs ├── order.rs ├── range_query │ ├── README.md │ ├── dynamic_arq.rs │ ├── mod.rs │ ├── specs.rs │ ├── sqrt_decomp.rs │ └── static_arq.rs ├── rng.rs ├── scanner.rs └── string_proc.rs └── tests └── codeforces343d.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | **/*.rs.bk 3 | Cargo.lock 4 | .idea/ 5 | .vscode/ 6 | *.code-workspace -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | #- 1.58.0 # Version currently supported by Codeforces 4 | - stable 5 | - beta 6 | - nightly 7 | before_script: 8 | - rustup component add clippy 9 | script: 10 | - cargo test --verbose 11 | - cargo clippy -- -D warnings 12 | matrix: 13 | allow_failures: 14 | - rust: nightly 15 | fast_finish: true 16 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "contest-algorithms" 3 | version = "0.3.1-alpha.1" 4 | authors = ["Aram Ebtekar"] 5 | edition = "2024" 6 | 7 | description = "Common algorithms and data structures for programming contests" 8 | repository = "https://github.com/EbTech/rust-algorithms" 9 | readme = "README.md" 10 | keywords = ["competitive", "programming", "codeforces"] 11 | categories = ["algorithms", "data-structures"] 12 | license = "MIT" 13 | 14 | [badges] 15 | travis-ci = { repository = "EbTech/rust-algorithms", branch = "master" } 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Aram Ebtekar 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Contest Algorithms in Rust 2 | 3 | [![Crates.io Version](https://img.shields.io/crates/v/contest-algorithms.svg)](https://crates.io/crates/contest-algorithms) 4 | [![Documentation](https://docs.rs/contest-algorithms/badge.svg)](https://docs.rs/contest-algorithms) 5 | [![license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/bevyengine/bevy/blob/master/LICENSE) 6 | [![Crates.io Downloads](https://img.shields.io/crates/d/contest-algorithms.svg)](https://crates.io/crates/contest-algorithms) 7 | [![Build Status](https://travis-ci.org/EbTech/rust-algorithms.svg?branch=master)](https://travis-ci.org/EbTech/rust-algorithms) 8 | [![Gitter](https://badges.gitter.im/rust-algos/community.svg)](https://gitter.im/rust-algos/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) 9 | 10 | A collection of classic data structures and algorithms, emphasizing usability, beauty and clarity over full generality. As such, this should be viewed not as a blackbox *library*, but as a whitebox *cookbook* demonstrating the design and implementation of algorithms. I hope it will be useful to students and educators, as well as fans of algorithmic programming contests. 11 | 12 | This repository is distributed under the [MIT License](LICENSE). Contest submissions need not include the license text. Enjoy! 13 | 14 | ## For Students and Educators 15 | 16 | When learning a new algorithm or data structure, it's often helpful to see or play with a concrete implementation. As such, this repository catalogues several classic algorithms in their simplest forms. 17 | 18 | In addition, the Rust language has outstanding pedagogical attributes. Its compiler acts as a teacher, enforcing strict discipline while pointing to clearer ways to structure one's logic. 19 | 20 | ## For Programming Contests 21 | 22 | The original intent of this project was to build a reference for use in programming contests. As a result, it contains algorithms that are frequently useful to have in one's toolkit, with an emphasis on code that is concise and easy to modify under time pressure. 23 | 24 | Most competitive programmers rely on C++ for its fast execution time. However, it's notoriously unsafe, diverting a considerable share of the contestant's time and attention on mistake prevention and debugging. Java is the next most popular choice, offering a little safety at some expense to speed of coding and execution. 25 | 26 | To my delight, I found that Rust eliminates entire classes of bugs, while reducing visual clutter to make the rest easier to spot. And, it's *fast*. There's a learning curve, to be sure. However, a proficient Rust programmer stands to gain a competitive advantage as well as a more pleasant experience! 27 | 28 | Some contest sites and online judges that support Rust: 29 | - [Codeforces](https://codeforces.com) 30 | - [CodeChef](https://www.codechef.com) 31 | - [AtCoder](https://atcoder.jp) 32 | - [Kattis](https://open.kattis.com/help/rust) 33 | - [SPOJ](https://www.spoj.com/) 34 | - [LeetCode](https://leetcode.com/contest) 35 | - [HackerRank](https://www.hackerrank.com/contests) 36 | - [Timus](http://acm.timus.ru/help.aspx?topic=rust) 37 | 38 | For help in getting started, you may check out [some of my past submissions](https://codeforces.com/contest/1168/submission/55200038) (requires login). 39 | 40 | ## Programming Language Advocacy 41 | 42 | My other goal is to appeal to developers who feel limited by ancient (yet still mainstream) programming languages, by demonstrating the power of modern techniques. 43 | 44 | Rather than try to persuade you with words, this repository aims to show by example. If you'd like to learn the language, I recommend [the official book](https://doc.rust-lang.org/book/) or [Programming Rust](https://www.amazon.com/Programming-Rust-Fast-Systems-Development-dp-1492052590/dp/1492052590). 45 | 46 | # Contents 47 | 48 | ## [Graphs](src/graph/) 49 | 50 | ### [Graph representations](src/graph/mod.rs) 51 | 52 | - Integer index-based adjacency list representation 53 | - Disjoint set union 54 | 55 | ### [Elementary graph algorithms](src/graph/util.rs) 56 | 57 | - Euler path and tour 58 | - Kruskal's minimum spanning tree 59 | - Dijkstra's single-source shortest paths 60 | - DFS pre-order traversal 61 | 62 | ### [Connected components](src/graph/connectivity.rs) 63 | 64 | - Connected components 65 | - Strongly connected components 66 | - Bridges and 2-edge-connected components 67 | - Articulation points and 2-vertex-connected components 68 | - Topological sort 69 | - 2-SAT solver 70 | 71 | ### [Network flows](src/graph/flow.rs) 72 | 73 | - Dinic's blocking maximum flow 74 | - Minimum cut 75 | - Hopcroft-Karp bipartite matching 76 | - Minimum cost maximum flow 77 | 78 | ## [Math](src/math/) 79 | 80 | ### [Number theory](src/math/mod.rs) 81 | 82 | - Greatest common divisor 83 | - Canonical solution to Bezout's identity 84 | - Miller's primality test 85 | 86 | ### [Generic FFT](src/math/fft.rs) 87 | 88 | - Fast Fourier transform 89 | - Number theoretic transform 90 | - Convolution 91 | 92 | ### [Arithmetic](src/math/num.rs) 93 | 94 | - Rational numbers 95 | - Complex numbers 96 | - Linear algebra 97 | - Safe modular arithmetic 98 | 99 | ## [Ordering and search](src/order.rs) 100 | 101 | - Comparator for `PartialOrd` 102 | - Binary search: drop-in replacements for C++ `lower_bound()`/`upper_bound()` 103 | - Merge and mergesort 104 | - Coordinate compression 105 | - Online convex hull trick (update and query the upper envelope of a set of lines) 106 | 107 | ## [Associative range query](src/range_query) 108 | 109 | - Statically allocated binary indexed ARQ tree (a.k.a. generic segtree with lazy propagation) 110 | - Dynamically allocated ARQ tree, optionally sparse and persistent 111 | - Mo's algorithm (a.k.a. query square root decomposition) 112 | 113 | ## [Scanner](src/scanner.rs) 114 | 115 | - Utility for reading input data ergonomically 116 | - File and standard I/O examples 117 | 118 | ## [String processing](src/string_proc.rs) 119 | 120 | - Generic trie 121 | - Knuth-Morris-Pratt single-pattern string matching 122 | - Aho-Corasick multi-pattern string matching 123 | - Suffix array: O(n log n) construction using counting sort 124 | - Longest common prefix 125 | - Manacher's linear-time palindrome search 126 | 127 | -------------------------------------------------------------------------------- /src/caching.rs: -------------------------------------------------------------------------------- 1 | //! Basic Cacher struct which stores a closure and a hashmap. 2 | //! The hashmap stores key value pairs representing previous 3 | //! function calls. 4 | //! 5 | //! When the Cacher function is run, it first does a lookup 6 | //! to see if the value has already been calculated. If it has, 7 | //! it returns that value. If it hasn't, it calculates the value, 8 | //! adds it to the hashmap, and returns it. 9 | 10 | use std::collections::HashMap; 11 | 12 | /// The Cacher struct (Memoization) stores a function and a Hashmap. 13 | /// The HashMap keeps track of previous input and output for the function so 14 | /// that it only ever has to be called once per input. Use for expensive functions. 15 | pub struct Cacher 16 | where 17 | F: Fn(U) -> V, 18 | U: std::cmp::Eq + std::hash::Hash + Copy, 19 | V: Copy, 20 | { 21 | calculation: F, 22 | values: HashMap, 23 | } 24 | 25 | impl Cacher 26 | where 27 | F: Fn(U) -> V, 28 | U: std::cmp::Eq + std::hash::Hash + Copy, 29 | V: Copy, 30 | { 31 | /// Constuctor for the Casher 32 | /// # Examples 33 | /// ``` 34 | /// # use contest_algorithms::caching::Cacher; 35 | /// let mut squared = Cacher::new(|n: u32| n*n); 36 | /// ``` 37 | pub fn new(calculation: F) -> Cacher { 38 | Cacher { 39 | calculation, 40 | values: HashMap::new(), 41 | } 42 | } 43 | 44 | /// Performs a lookup into the HashMap to see if the value has already 45 | /// been calculated. If it has, returns the value. If it has not, 46 | /// calls the function, stores the value, then returns the value. 47 | /// # Examples 48 | /// ``` 49 | /// # use contest_algorithms::caching::Cacher; 50 | /// let mut squared = Cacher::new(|n: u32| n*n); 51 | /// 52 | /// // This is where we call the function 53 | /// let sixteen = squared.call(4); 54 | /// ``` 55 | // TODO: whenever Rust's Entry API gains the ability to take ownership of 56 | // arg only when necessary, this method should follow the same practice. 57 | // Also, Cacher should implement Fn(U)->V once this is possible. 58 | pub fn call(&mut self, arg: U) -> V { 59 | let calc = &self.calculation; 60 | *self.values.entry(arg).or_insert_with_key(|&key| calc(key)) 61 | } 62 | 63 | /// Calls the function without performing a lookup and replaces 64 | /// the old return value with the new one, and returns it. 65 | /// Potentially useful if the function reads from a file or RNG 66 | /// whose state may have changed. 67 | // TODO: if there's state, FnMut seems more appropriate. 68 | pub fn call_and_replace(&mut self, arg: U) -> V { 69 | let new_val = (self.calculation)(arg); 70 | self.values.insert(arg, new_val); 71 | new_val 72 | } 73 | } 74 | 75 | #[cfg(test)] 76 | mod tests { 77 | use super::*; 78 | 79 | #[test] 80 | fn test_cacher_basically_works() { 81 | let mut word_len = Cacher::new(|word: &str| word.len()); 82 | let hello = word_len.call("hello"); 83 | 84 | // Test function returns correctly 85 | assert_eq!(hello, 5); 86 | 87 | // Test HashMap is correct length 88 | assert_eq!(word_len.values.len(), 1); 89 | 90 | // Test HashMap has correct value after one insert 91 | let mut test_map = HashMap::new(); 92 | test_map.insert("hello", 5); 93 | assert_eq!(word_len.values, test_map); 94 | 95 | // Test HashMap has correct value after duplicate insert 96 | word_len.call("hello"); 97 | assert_eq!(word_len.values, test_map); 98 | 99 | // Test HashMap has correct values after unique input 100 | word_len.call("wazzup"); 101 | test_map.insert("wazzup", 6); 102 | assert_eq!(word_len.values, test_map); 103 | } 104 | 105 | #[test] 106 | fn test_cacher_speed() { 107 | // Simulate a function that takes 1 second to complete 108 | let mut func = Cacher::new(|x| { 109 | std::thread::sleep(std::time::Duration::from_millis(100)); 110 | x * x 111 | }); 112 | 113 | // Would take 10 minutes without caching 114 | for _ in 0..6000 { 115 | assert_eq!(25, func.call(5)); 116 | } 117 | } 118 | 119 | #[test] 120 | fn test_call_and_replace() { 121 | use std::time::Instant; 122 | 123 | let mut func = Cacher::new(|_param: usize| Instant::now()); 124 | let first_instant = func.call(0); 125 | let lookup_instant = func.call(0); 126 | 127 | assert_eq!(first_instant, lookup_instant); 128 | assert_eq!(1, func.values.len()); 129 | 130 | let second_instant = func.call_and_replace(0); 131 | assert_eq!(1, func.values.len()); 132 | assert_ne!(second_instant, lookup_instant); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/graph/connectivity.rs: -------------------------------------------------------------------------------- 1 | //! Graph connectivity structures. 2 | use super::Graph; 3 | 4 | /// Helper struct that carries data needed for the depth-first searches in 5 | /// ConnectivityGraph's constructor. 6 | struct ConnectivityData { 7 | time: usize, 8 | vis: Box<[usize]>, 9 | low: Box<[usize]>, 10 | v_stack: Vec, 11 | e_stack: Vec, 12 | } 13 | 14 | impl ConnectivityData { 15 | fn new(num_v: usize) -> Self { 16 | Self { 17 | time: 0, 18 | vis: vec![0; num_v].into_boxed_slice(), 19 | low: vec![0; num_v].into_boxed_slice(), 20 | v_stack: vec![], 21 | e_stack: vec![], 22 | } 23 | } 24 | 25 | fn visit(&mut self, u: usize) { 26 | self.time += 1; 27 | self.vis[u] = self.time; 28 | self.low[u] = self.time; 29 | self.v_stack.push(u); 30 | } 31 | 32 | fn lower(&mut self, u: usize, val: usize) { 33 | if self.low[u] > val { 34 | self.low[u] = val 35 | } 36 | } 37 | } 38 | 39 | /// Represents the decomposition of a graph into any of its constituent parts: 40 | /// 41 | /// - Connected components (CC), 42 | /// - Strongly connected components (SCC), 43 | /// - 2-edge-connected components (2ECC), 44 | /// - 2-vertex-connected components (2VCC) 45 | /// 46 | /// Multiple-edges and self-loops are correctly handled. 47 | pub struct ConnectivityGraph<'a> { 48 | /// Immutable graph, frozen for the lifetime of the ConnectivityGraph object. 49 | pub graph: &'a Graph, 50 | /// ID of a vertex's CC, SCC or 2ECC, whichever applies. Range 1 to num_cc. 51 | pub cc: Vec, 52 | /// ID of an edge's 2VCC, where applicable. Ranges from 1 to num_vcc. 53 | pub vcc: Vec, 54 | /// Total number of CCs, SCCs or 2ECCs, whichever applies. 55 | pub num_cc: usize, 56 | /// Total number of 2VCCs, where applicable. 57 | pub num_vcc: usize, 58 | } 59 | 60 | impl<'a> ConnectivityGraph<'a> { 61 | /// Computes CCs (connected components), SCCs (strongly connected 62 | /// components), 2ECCs (2-edge-connected components), and/or 2VCCs 63 | /// (2-vertex-connected components), depending on the parameter and graph: 64 | /// - is_directed == true on directed graph: SCCs in rev-topological order 65 | /// - is_directed == true on undirected graph: CCs 66 | /// - is_directed == false on undirected graph: 2ECCs and 2VCCs 67 | /// - is_directed == false on directed graph: undefined behavior 68 | pub fn new(graph: &'a Graph, is_directed: bool) -> Self { 69 | let mut connect = Self { 70 | graph, 71 | cc: vec![0; graph.num_v()], 72 | vcc: vec![0; graph.num_e()], 73 | num_cc: 0, 74 | num_vcc: 0, 75 | }; 76 | let mut data = ConnectivityData::new(graph.num_v()); 77 | for u in 0..graph.num_v() { 78 | if data.vis[u] == 0 { 79 | if is_directed { 80 | connect.scc(&mut data, u); 81 | } else { 82 | connect.bcc(&mut data, u, graph.num_e() + 1); 83 | } 84 | } 85 | } 86 | connect 87 | } 88 | 89 | fn scc(&mut self, data: &mut ConnectivityData, u: usize) { 90 | data.visit(u); 91 | for (_, v) in self.graph.adj_list(u) { 92 | if data.vis[v] == 0 { 93 | self.scc(data, v); 94 | } 95 | if self.cc[v] == 0 { 96 | data.lower(u, data.low[v]); 97 | } 98 | } 99 | if data.vis[u] == data.low[u] { 100 | self.num_cc += 1; 101 | while let Some(v) = data.v_stack.pop() { 102 | self.cc[v] = self.num_cc; 103 | if v == u { 104 | break; 105 | } 106 | } 107 | } 108 | } 109 | 110 | /// From the directed implication graph corresponding to a 2-SAT clause, 111 | /// finds a satisfying assignment if it exists or returns None otherwise. 112 | pub fn two_sat_assign(&self) -> Option> { 113 | (0..self.graph.num_v() / 2) 114 | .map(|i| { 115 | let scc_true = self.cc[2 * i]; 116 | let scc_false = self.cc[2 * i + 1]; 117 | if scc_true == scc_false { 118 | None 119 | } else { 120 | Some(scc_true < scc_false) 121 | } 122 | }) 123 | .collect() 124 | } 125 | 126 | /// Gets the vertices of a graph according to a topological order of the 127 | /// strongly connected components. Most often used on DAGs. 128 | pub fn topological_sort(&self) -> Vec { 129 | let mut vertices = (0..self.graph.num_v()).collect::>(); 130 | vertices.sort_unstable_by_key(|&u| self.num_cc - self.cc[u]); 131 | vertices 132 | } 133 | 134 | fn bcc(&mut self, data: &mut ConnectivityData, u: usize, par: usize) { 135 | data.visit(u); 136 | for (e, v) in self.graph.adj_list(u) { 137 | if data.vis[v] == 0 { 138 | data.e_stack.push(e); 139 | self.bcc(data, v, e); 140 | data.lower(u, data.low[v]); 141 | if data.vis[u] <= data.low[v] { 142 | // u is a cut vertex unless it's a one-child root 143 | self.num_vcc += 1; 144 | while let Some(top_e) = data.e_stack.pop() { 145 | self.vcc[top_e] = self.num_vcc; 146 | self.vcc[top_e ^ 1] = self.num_vcc; 147 | if e ^ top_e <= 1 { 148 | break; 149 | } 150 | } 151 | } 152 | } else if data.vis[v] < data.vis[u] && e ^ par != 1 { 153 | data.lower(u, data.vis[v]); 154 | data.e_stack.push(e); 155 | } else if v == u { 156 | // e is a self-loop 157 | self.num_vcc += 1; 158 | self.vcc[e] = self.num_vcc; 159 | self.vcc[e ^ 1] = self.num_vcc; 160 | } 161 | } 162 | if data.vis[u] == data.low[u] { 163 | // par is a cut edge unless par==-1 164 | self.num_cc += 1; 165 | while let Some(v) = data.v_stack.pop() { 166 | self.cc[v] = self.num_cc; 167 | if v == u { 168 | break; 169 | } 170 | } 171 | } 172 | } 173 | 174 | /// In an undirected graph, determines whether u is an articulation vertex. 175 | pub fn is_cut_vertex(&self, u: usize) -> bool { 176 | if let Some(first_e) = self.graph.first[u] { 177 | self.graph 178 | .adj_list(u) 179 | .any(|(e, _)| self.vcc[first_e] != self.vcc[e]) 180 | } else { 181 | false 182 | } 183 | } 184 | 185 | /// In an undirected graph, determines whether e is a bridge 186 | pub fn is_cut_edge(&self, e: usize) -> bool { 187 | let u = self.graph.endp[e ^ 1]; 188 | let v = self.graph.endp[e]; 189 | self.cc[u] != self.cc[v] 190 | } 191 | } 192 | 193 | #[cfg(test)] 194 | mod test { 195 | use super::*; 196 | 197 | #[test] 198 | fn test_toposort() { 199 | let mut graph = Graph::new(4, 5); 200 | graph.add_edge(0, 0); 201 | graph.add_edge(0, 2); 202 | graph.add_edge(3, 2); 203 | graph.add_edge(3, 1); 204 | graph.add_edge(1, 0); 205 | 206 | assert_eq!( 207 | ConnectivityGraph::new(&graph, true).topological_sort(), 208 | vec![3, 1, 0, 2] 209 | ); 210 | } 211 | 212 | #[test] 213 | fn test_two_sat() { 214 | let mut graph = Graph::new(6, 8); 215 | let (x, y, z) = (0, 2, 4); 216 | 217 | graph.add_two_sat_clause(x, z); 218 | graph.add_two_sat_clause(y ^ 1, z ^ 1); 219 | graph.add_two_sat_clause(y, y); 220 | assert_eq!( 221 | ConnectivityGraph::new(&graph, true).two_sat_assign(), 222 | Some(vec![true, true, false]) 223 | ); 224 | 225 | graph.add_two_sat_clause(z, z); 226 | assert_eq!(ConnectivityGraph::new(&graph, true).two_sat_assign(), None); 227 | } 228 | 229 | #[test] 230 | fn test_biconnected() { 231 | let mut graph = Graph::new(3, 6); 232 | graph.add_undirected_edge(0, 1); 233 | graph.add_undirected_edge(1, 2); 234 | graph.add_undirected_edge(1, 2); 235 | 236 | let cg = ConnectivityGraph::new(&graph, false); 237 | let bridges = (0..graph.num_e()) 238 | .filter(|&e| cg.is_cut_edge(e)) 239 | .collect::>(); 240 | let articulation_points = (0..graph.num_v()) 241 | .filter(|&u| cg.is_cut_vertex(u)) 242 | .collect::>(); 243 | 244 | assert_eq!(bridges, vec![0, 1]); 245 | assert_eq!(articulation_points, vec![1]); 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /src/graph/flow.rs: -------------------------------------------------------------------------------- 1 | //! Maximum flows, matchings, and minimum cuts. 2 | use super::{AdjListIterator, Graph}; 3 | 4 | /// Representation of a network flow problem with (optional) costs. 5 | pub struct FlowGraph { 6 | /// Owned graph, managed by this FlowGraph object. 7 | pub graph: Graph, 8 | /// Edge capacities. 9 | pub cap: Vec, 10 | /// Edge cost per unit flow. 11 | pub cost: Vec, 12 | } 13 | 14 | impl FlowGraph { 15 | /// An upper limit to the flow. 16 | const INF: i64 = i64::MAX; 17 | 18 | /// Initializes an flow network with vmax vertices and no edges. 19 | pub fn new(vmax: usize, emax_hint: usize) -> Self { 20 | Self { 21 | graph: Graph::new(vmax, 2 * emax_hint), 22 | cap: Vec::with_capacity(2 * emax_hint), 23 | cost: Vec::with_capacity(2 * emax_hint), 24 | } 25 | } 26 | 27 | /// Adds an edge with specified directional capacities and cost per unit of 28 | /// flow. If only forward flow is allowed, rcap should be zero. 29 | pub fn add_edge(&mut self, u: usize, v: usize, cap: i64, rcap: i64, cost: i64) { 30 | self.cap.push(cap); 31 | self.cap.push(rcap); 32 | self.cost.push(cost); 33 | self.cost.push(-cost); 34 | self.graph.add_undirected_edge(u, v); 35 | } 36 | 37 | /// Dinic's algorithm to find the maximum flow from s to t where s != t. 38 | /// Generalizes the Hopcroft-Karp maximum bipartite matching algorithm. 39 | /// V^2E in general, min(V^(2/3),sqrt(E))E when all edges are unit capacity, 40 | /// sqrt(V)E when all vertices are unit capacity as in bipartite graphs. 41 | /// 42 | /// # Panics 43 | /// 44 | /// Panics if the maximum flow is 2^63 or larger. 45 | pub fn dinic(&self, s: usize, t: usize) -> (i64, Vec) { 46 | let mut flow = vec![0; self.graph.num_e()]; 47 | let mut max_flow = 0; 48 | loop { 49 | let dist = self.dinic_search(s, &flow); 50 | if dist[t] == Self::INF { 51 | break; 52 | } 53 | // Keep track of adjacency lists to avoid revisiting blocked edges. 54 | let mut adj_iters = (0..self.graph.num_v()) 55 | .map(|u| self.graph.adj_list(u).peekable()) 56 | .collect::>(); 57 | max_flow += self.dinic_augment(s, t, Self::INF, &dist, &mut adj_iters, &mut flow); 58 | } 59 | (max_flow, flow) 60 | } 61 | 62 | // Compute BFS distances to restrict attention to shortest path edges. 63 | fn dinic_search(&self, s: usize, flow: &[i64]) -> Vec { 64 | let mut dist = vec![Self::INF; self.graph.num_v()]; 65 | let mut q = ::std::collections::VecDeque::new(); 66 | dist[s] = 0; 67 | q.push_back(s); 68 | while let Some(u) = q.pop_front() { 69 | for (e, v) in self.graph.adj_list(u) { 70 | if dist[v] == Self::INF && flow[e] < self.cap[e] { 71 | dist[v] = dist[u] + 1; 72 | q.push_back(v); 73 | } 74 | } 75 | } 76 | dist 77 | } 78 | 79 | // Pushes a blocking flow that increases the residual's s-t distance. 80 | fn dinic_augment( 81 | &self, 82 | u: usize, 83 | t: usize, 84 | f: i64, 85 | dist: &[i64], 86 | adj: &mut [::std::iter::Peekable], 87 | flow: &mut [i64], 88 | ) -> i64 { 89 | if u == t { 90 | return f; 91 | } 92 | let mut df = 0; 93 | 94 | while let Some(&(e, v)) = adj[u].peek() { 95 | let rem_cap = (self.cap[e] - flow[e]).min(f - df); 96 | if rem_cap > 0 && dist[v] == dist[u] + 1 { 97 | let cf = self.dinic_augment(v, t, rem_cap, dist, adj, flow); 98 | flow[e] += cf; 99 | flow[e ^ 1] -= cf; 100 | df += cf; 101 | if df == f { 102 | break; 103 | } 104 | } 105 | // The current edge is either saturated or blocked. 106 | adj[u].next(); 107 | } 108 | df 109 | } 110 | 111 | /// After running maximum flow, use this to recover the dual minimum cut. 112 | pub fn min_cut(&self, dist: &[i64]) -> Vec { 113 | (0..self.graph.num_e()) 114 | .filter(|&e| { 115 | let u = self.graph.endp[e ^ 1]; 116 | let v = self.graph.endp[e]; 117 | dist[u] < Self::INF && dist[v] == Self::INF 118 | }) 119 | .collect() 120 | } 121 | 122 | /// Among all s-t maximum flows, finds one with minimum cost, assuming 123 | /// s != t and no negative-cost cycles. 124 | /// 125 | /// # Panics 126 | /// 127 | /// Panics if the flow or cost overflow a 64-bit signed integer. 128 | pub fn mcf(&self, s: usize, t: usize) -> (i64, i64, Vec) { 129 | let mut pot = vec![0; self.graph.num_v()]; 130 | 131 | // Bellman-Ford deals with negative-cost edges at initialization. 132 | for _ in 1..self.graph.num_v() { 133 | for e in 0..self.graph.num_e() { 134 | if self.cap[e] > 0 { 135 | let u = self.graph.endp[e ^ 1]; 136 | let v = self.graph.endp[e]; 137 | pot[v] = pot[v].min(pot[u] + self.cost[e]); 138 | } 139 | } 140 | } 141 | 142 | let mut flow = vec![0; self.graph.num_e()]; 143 | let (mut min_cost, mut max_flow) = (0, 0); 144 | loop { 145 | let par = self.mcf_search(s, &flow, &mut pot); 146 | if par[t].is_none() { 147 | break; 148 | } 149 | let (dc, df) = self.mcf_augment(t, &par, &mut flow); 150 | min_cost += dc; 151 | max_flow += df; 152 | } 153 | (min_cost, max_flow, flow) 154 | } 155 | 156 | // Maintains Johnson's potentials to prevent negative-cost residual edges. 157 | // This allows running Dijkstra instead of the slower Bellman-Ford. 158 | fn mcf_search(&self, s: usize, flow: &[i64], pot: &mut [i64]) -> Vec> { 159 | let mut vis = vec![false; self.graph.num_v()]; 160 | let mut dist = vec![Self::INF; self.graph.num_v()]; 161 | let mut par = vec![None; self.graph.num_v()]; 162 | 163 | dist[s] = 0; 164 | while let Some(u) = (0..self.graph.num_v()) 165 | .filter(|&u| !vis[u] && dist[u] < Self::INF) 166 | .min_by_key(|&u| dist[u] - pot[u]) 167 | { 168 | vis[u] = true; 169 | pot[u] = dist[u]; 170 | for (e, v) in self.graph.adj_list(u) { 171 | if dist[v] > dist[u] + self.cost[e] && flow[e] < self.cap[e] { 172 | dist[v] = dist[u] + self.cost[e]; 173 | par[v] = Some(e); 174 | } 175 | } 176 | } 177 | par 178 | } 179 | 180 | // Pushes flow along an augmenting path of minimum cost. 181 | fn mcf_augment(&self, t: usize, par: &[Option], flow: &mut [i64]) -> (i64, i64) { 182 | let (mut dc, mut df) = (0, Self::INF); 183 | let mut u = t; 184 | while let Some(e) = par[u] { 185 | df = df.min(self.cap[e] - flow[e]); 186 | u = self.graph.endp[e ^ 1]; 187 | } 188 | u = t; 189 | while let Some(e) = par[u] { 190 | flow[e] += df; 191 | flow[e ^ 1] -= df; 192 | dc += df * self.cost[e]; 193 | u = self.graph.endp[e ^ 1]; 194 | } 195 | (dc, df) 196 | } 197 | } 198 | 199 | #[cfg(test)] 200 | mod test { 201 | use super::*; 202 | 203 | #[test] 204 | fn test_basic_flow() { 205 | let mut graph = FlowGraph::new(3, 2); 206 | graph.add_edge(0, 1, 4, 0, 0); 207 | graph.add_edge(1, 2, 3, 0, 0); 208 | 209 | let flow = graph.dinic(0, 2).0; 210 | assert_eq!(flow, 3); 211 | } 212 | 213 | #[test] 214 | fn test_min_cost_flow() { 215 | let mut graph = FlowGraph::new(4, 4); 216 | graph.add_edge(0, 1, 10, 0, -10); 217 | graph.add_edge(1, 2, 7, 0, 8); 218 | graph.add_edge(2, 3, 7, 0, 8); 219 | graph.add_edge(1, 3, 7, 0, 10); 220 | 221 | let (cost, flow, _) = graph.mcf(0, 3); 222 | assert_eq!(cost, 18); 223 | assert_eq!(flow, 10); 224 | } 225 | 226 | #[test] 227 | fn test_max_matching() { 228 | let mut graph = FlowGraph::new(14, 4); 229 | 230 | let source = 0; 231 | let sink = 13; 232 | 233 | //Vertex indices of "left hand side" of bipartite graph go from [left_start, right_start) 234 | let left_start = 1; 235 | //Vertex indices of "right hand side" of bipartite graph go from [right_start, sink) 236 | let right_start = 7; 237 | 238 | //Initialize source / sink connections; both left & right have 6 nodes 239 | for lhs_vertex in left_start..left_start + 6 { 240 | graph.add_edge(source, lhs_vertex, 1, 0, 0); 241 | } 242 | 243 | for rhs_vertex in right_start..right_start + 6 { 244 | graph.add_edge(rhs_vertex, sink, 1, 0, 0); 245 | } 246 | 247 | graph.add_edge(left_start + 0, right_start + 1, 1, 0, 0); 248 | graph.add_edge(left_start + 0, right_start + 2, 1, 0, 0); 249 | graph.add_edge(left_start + 2, right_start + 0, 1, 0, 0); 250 | graph.add_edge(left_start + 2, right_start + 3, 1, 0, 0); 251 | graph.add_edge(left_start + 3, right_start + 2, 1, 0, 0); 252 | graph.add_edge(left_start + 4, right_start + 2, 1, 0, 0); 253 | graph.add_edge(left_start + 4, right_start + 3, 1, 0, 0); 254 | graph.add_edge(left_start + 5, right_start + 5, 1, 0, 0); 255 | 256 | let (flow_amt, flow) = graph.dinic(source, sink); 257 | assert_eq!(flow_amt, 5); 258 | 259 | //L->R edges in maximum matching 260 | let left_right_edges = flow 261 | .into_iter() 262 | .enumerate() 263 | .filter(|&(_e, f)| f > 0) 264 | //map to u->v 265 | .map(|(e, _f)| (graph.graph.endp[e ^ 1], graph.graph.endp[e])) 266 | //leave out source and sink nodes 267 | .filter(|&(u, v)| u != source && v != sink) 268 | .collect::>(); 269 | 270 | assert_eq!( 271 | left_right_edges, 272 | vec![(1, 8), (3, 7), (4, 9), (5, 10), (6, 12)] 273 | ); 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /src/graph/mod.rs: -------------------------------------------------------------------------------- 1 | //! Basic graph module without explicit support for deletion. 2 | //! 3 | //! # Panics 4 | //! 5 | //! All methods will panic if given an out-of-bounds element index. 6 | pub mod connectivity; 7 | pub mod flow; 8 | pub mod util; 9 | 10 | /// Represents a union of disjoint sets. Each set's elements are arranged in a 11 | /// tree, whose root is the set's representative. 12 | pub struct DisjointSets { 13 | parent: Vec, 14 | } 15 | 16 | impl DisjointSets { 17 | /// Initializes disjoint sets containing one element each. 18 | pub fn new(size: usize) -> Self { 19 | Self { 20 | parent: (0..size).collect(), 21 | } 22 | } 23 | 24 | /// Finds the set's representative. Do path compression along the way to make 25 | /// future queries faster. 26 | pub fn find(&mut self, u: usize) -> usize { 27 | let pu = self.parent[u]; 28 | if pu != u { 29 | self.parent[u] = self.find(pu); 30 | } 31 | self.parent[u] 32 | } 33 | 34 | /// Merges the sets containing u and v into a single set containing their 35 | /// union. Returns true if u and v were previously in different sets. 36 | pub fn merge(&mut self, u: usize, v: usize) -> bool { 37 | let (pu, pv) = (self.find(u), self.find(v)); 38 | self.parent[pu] = pv; 39 | pu != pv 40 | } 41 | } 42 | 43 | /// A compact graph representation. Edges are numbered in order of insertion. 44 | /// Each adjacency list consists of all edges pointing out from a given vertex. 45 | pub struct Graph { 46 | /// Maps a vertex id to the first edge in its adjacency list. 47 | first: Vec>, 48 | /// Maps an edge id to the next edge in the same adjacency list. 49 | next: Vec>, 50 | /// Maps an edge id to the vertex that it points to. 51 | endp: Vec, 52 | } 53 | 54 | impl Graph { 55 | /// Initializes a graph with vmax vertices and no edges. To reduce 56 | /// unnecessary allocations, emax_hint should be close to the number of 57 | /// edges that will be inserted. 58 | pub fn new(vmax: usize, emax_hint: usize) -> Self { 59 | Self { 60 | first: vec![None; vmax], 61 | next: Vec::with_capacity(emax_hint), 62 | endp: Vec::with_capacity(emax_hint), 63 | } 64 | } 65 | 66 | /// Returns the number of vertices. 67 | pub fn num_v(&self) -> usize { 68 | self.first.len() 69 | } 70 | 71 | /// Returns the number of edges, double-counting undirected edges. 72 | pub fn num_e(&self) -> usize { 73 | self.endp.len() 74 | } 75 | 76 | /// Adds a directed edge from u to v. 77 | pub fn add_edge(&mut self, u: usize, v: usize) { 78 | self.next.push(self.first[u]); 79 | self.first[u] = Some(self.num_e()); 80 | self.endp.push(v); 81 | } 82 | 83 | /// An undirected edge is two directed edges. If edges are added only via 84 | /// this funcion, the reverse of any edge e can be found at e^1. 85 | pub fn add_undirected_edge(&mut self, u: usize, v: usize) { 86 | self.add_edge(u, v); 87 | self.add_edge(v, u); 88 | } 89 | 90 | /// If we think of each even-numbered vertex as a variable, and its 91 | /// odd-numbered successor as its negation, then we can build the 92 | /// implication graph corresponding to any 2-CNF formula. 93 | /// Note that u||v == !u -> v == !v -> u. 94 | pub fn add_two_sat_clause(&mut self, u: usize, v: usize) { 95 | self.add_edge(u ^ 1, v); 96 | self.add_edge(v ^ 1, u); 97 | } 98 | 99 | /// Gets vertex u's adjacency list. 100 | pub fn adj_list(&self, u: usize) -> AdjListIterator { 101 | AdjListIterator { 102 | graph: self, 103 | next_e: self.first[u], 104 | } 105 | } 106 | } 107 | 108 | /// An iterator for convenient adjacency list traversal. 109 | pub struct AdjListIterator<'a> { 110 | graph: &'a Graph, 111 | next_e: Option, 112 | } 113 | 114 | impl Iterator for AdjListIterator<'_> { 115 | type Item = (usize, usize); 116 | 117 | /// Produces an outgoing edge and vertex. 118 | fn next(&mut self) -> Option { 119 | self.next_e.map(|e| { 120 | let v = self.graph.endp[e]; 121 | self.next_e = self.graph.next[e]; 122 | (e, v) 123 | }) 124 | } 125 | } 126 | 127 | #[cfg(test)] 128 | mod test { 129 | use super::*; 130 | 131 | #[test] 132 | fn test_adj_list() { 133 | let mut graph = Graph::new(5, 6); 134 | graph.add_edge(2, 3); 135 | graph.add_edge(2, 4); 136 | graph.add_edge(4, 1); 137 | graph.add_edge(1, 2); 138 | graph.add_undirected_edge(0, 2); 139 | 140 | let adj = graph.adj_list(2).collect::>(); 141 | 142 | assert_eq!(adj, vec![(5, 0), (1, 4), (0, 3)]); 143 | for (e, v) in adj { 144 | assert_eq!(v, graph.endp[e]); 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/graph/util.rs: -------------------------------------------------------------------------------- 1 | use super::{DisjointSets, Graph}; 2 | use crate::graph::AdjListIterator; 3 | use std::cmp::Reverse; 4 | 5 | impl Graph { 6 | /// Finds the sequence of edges in an Euler path starting from u, assuming 7 | /// it exists and that the graph is directed. Undefined behavior if this 8 | /// precondition is violated. To extend this to undirected graphs, maintain 9 | /// a visited array to skip the reverse edge. 10 | pub fn euler_path(&self, u: usize) -> Vec { 11 | let mut adj_iters = (0..self.num_v()) 12 | .map(|u| self.adj_list(u)) 13 | .collect::>(); 14 | let mut edges = Vec::with_capacity(self.num_e()); 15 | Self::euler_recurse(u, &mut adj_iters, &mut edges); 16 | edges.reverse(); 17 | edges 18 | } 19 | 20 | // Helper function used by euler_path. Note that we can't use a for-loop 21 | // that would consume the adjacency list as recursive calls may need it. 22 | fn euler_recurse(u: usize, adj: &mut [AdjListIterator], edges: &mut Vec) { 23 | while let Some((e, v)) = adj[u].next() { 24 | Self::euler_recurse(v, adj, edges); 25 | edges.push(e); 26 | } 27 | } 28 | 29 | /// Kruskal's minimum spanning tree algorithm on an undirected graph. 30 | pub fn min_spanning_tree(&self, weights: &[i64]) -> Vec { 31 | assert_eq!(self.num_e(), 2 * weights.len()); 32 | let mut edges = (0..weights.len()).collect::>(); 33 | edges.sort_unstable_by_key(|&e| weights[e]); 34 | 35 | let mut components = DisjointSets::new(self.num_v()); 36 | edges 37 | .into_iter() 38 | .filter(|&e| components.merge(self.endp[2 * e], self.endp[2 * e + 1])) 39 | .collect() 40 | } 41 | 42 | // Single-source shortest paths on a directed graph with non-negative weights 43 | pub fn dijkstra(&self, weights: &[u64], u: usize) -> Vec { 44 | assert_eq!(self.num_e(), weights.len()); 45 | let mut dist = vec![u64::MAX; weights.len()]; 46 | let mut heap = std::collections::BinaryHeap::new(); 47 | 48 | dist[u] = 0; 49 | heap.push((Reverse(0), 0)); 50 | while let Some((Reverse(dist_u), u)) = heap.pop() { 51 | if dist[u] == dist_u { 52 | for (e, v) in self.adj_list(u) { 53 | let dist_v = dist_u + weights[e]; 54 | if dist[v] > dist_v { 55 | dist[v] = dist_v; 56 | heap.push((Reverse(dist_v), v)); 57 | } 58 | } 59 | } 60 | } 61 | dist 62 | } 63 | 64 | pub fn dfs(&self, root: usize) -> DfsIterator { 65 | let mut visited = vec![false; self.num_v()]; 66 | visited[root] = true; 67 | let adj_iters = (0..self.num_v()) 68 | .map(|u| self.adj_list(u)) 69 | .collect::>(); 70 | 71 | DfsIterator { 72 | visited, 73 | stack: vec![root], 74 | adj_iters, 75 | } 76 | } 77 | } 78 | 79 | pub struct DfsIterator<'a> { 80 | visited: Vec, 81 | stack: Vec, 82 | adj_iters: Vec>, 83 | } 84 | 85 | impl Iterator for DfsIterator<'_> { 86 | type Item = (usize, usize); 87 | 88 | /// Returns next edge and vertex in the depth-first traversal 89 | // Refs: https://www.geeksforgeeks.org/iterative-depth-first-traversal/ 90 | // https://en.wikipedia.org/wiki/Depth-first_search 91 | fn next(&mut self) -> Option { 92 | loop { 93 | let &u = self.stack.last()?; 94 | for (e, v) in self.adj_iters[u].by_ref() { 95 | if !self.visited[v] { 96 | self.visited[v] = true; 97 | self.stack.push(v); 98 | return Some((e, v)); 99 | } 100 | } 101 | self.stack.pop(); 102 | } 103 | } 104 | } 105 | 106 | #[cfg(test)] 107 | mod test { 108 | use super::*; 109 | 110 | #[test] 111 | fn test_euler() { 112 | let mut graph = Graph::new(3, 4); 113 | graph.add_edge(0, 1); 114 | graph.add_edge(1, 0); 115 | graph.add_edge(1, 2); 116 | graph.add_edge(2, 1); 117 | 118 | assert_eq!(graph.euler_path(0), vec![0, 2, 3, 1]); 119 | } 120 | 121 | #[test] 122 | fn test_min_spanning_tree() { 123 | let mut graph = Graph::new(3, 6); 124 | graph.add_undirected_edge(0, 1); 125 | graph.add_undirected_edge(1, 2); 126 | graph.add_undirected_edge(2, 0); 127 | let weights = [7, 3, 5]; 128 | 129 | let mst = graph.min_spanning_tree(&weights); 130 | let mst_cost = mst.iter().map(|&e| weights[e]).sum::(); 131 | assert_eq!(mst, vec![1, 2]); 132 | assert_eq!(mst_cost, 8); 133 | } 134 | 135 | #[test] 136 | fn test_dijkstra() { 137 | let mut graph = Graph::new(3, 3); 138 | graph.add_edge(0, 1); 139 | graph.add_edge(1, 2); 140 | graph.add_edge(2, 0); 141 | let weights = [7, 3, 5]; 142 | 143 | let dist = graph.dijkstra(&weights, 0); 144 | assert_eq!(dist, vec![0, 7, 10]); 145 | } 146 | 147 | #[test] 148 | fn test_dfs() { 149 | let mut graph = Graph::new(4, 6); 150 | graph.add_edge(0, 2); 151 | graph.add_edge(2, 0); 152 | graph.add_edge(1, 2); 153 | graph.add_edge(0, 1); 154 | graph.add_edge(3, 3); 155 | graph.add_edge(2, 3); 156 | 157 | let dfs_root = 2; 158 | let dfs_traversal = std::iter::once(dfs_root) 159 | .chain(graph.dfs(dfs_root).map(|(_, v)| v)) 160 | .collect::>(); 161 | 162 | assert_eq!(dfs_traversal, vec![2, 3, 0, 1]); 163 | } 164 | 165 | #[test] 166 | fn test_dfs2() { 167 | let mut graph = Graph::new(5, 6); 168 | graph.add_edge(0, 2); 169 | graph.add_edge(2, 1); 170 | graph.add_edge(1, 0); 171 | graph.add_edge(0, 3); 172 | graph.add_edge(3, 4); 173 | graph.add_edge(4, 0); 174 | 175 | let dfs_root = 0; 176 | let dfs_traversal = std::iter::once(dfs_root) 177 | .chain(graph.dfs(dfs_root).map(|(_, v)| v)) 178 | .collect::>(); 179 | 180 | assert_eq!(dfs_traversal, vec![0, 3, 4, 2, 1]); 181 | } 182 | 183 | #[test] 184 | fn test_dfs_space_complexity() { 185 | let num_v = 20; 186 | let mut graph = Graph::new(num_v, 0); 187 | for i in 0..num_v { 188 | for j in 0..num_v { 189 | graph.add_undirected_edge(i, j); 190 | } 191 | } 192 | 193 | let dfs_root = 7; 194 | let mut dfs_search = graph.dfs(dfs_root); 195 | let mut dfs_check = vec![dfs_root]; 196 | for _ in 1..num_v { 197 | dfs_check.push(dfs_search.next().unwrap().1); 198 | assert!(dfs_search.stack.len() <= num_v + 1); 199 | } 200 | 201 | dfs_check.sort(); 202 | dfs_check.dedup(); 203 | assert_eq!(0, dfs_check[0]); 204 | assert_eq!(num_v, dfs_check.len()); 205 | assert_eq!(num_v - 1, dfs_check[num_v - 1]); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/li_chao.rs: -------------------------------------------------------------------------------- 1 | /// A structure for answering maximum queries on a set of linear functions. Supports two 2 | /// operations: inserting a linear function and querying for maximum at a given point. 3 | /// The queries can be done in any order, and we can do all the calculations using integers. 4 | /// https://cp-algorithms.com/geometry/convex_hull_trick.html#li-chao-tree 5 | /// Compared to the code in the above link, this implementation further improves the algorithm by 6 | /// reducing the number of nodes to (right - left). This is done by removing the midpoint of a 7 | /// segment from both children. Even better, this allows the index of a node to just be the 8 | /// midpoint of the interval! 9 | /// 10 | /// Just like normal segment trees, this could be modified to a dynamic tree when the range is 11 | /// huge, or if the queries are known in advance the x-coordinates can be compressed. 12 | /// (it can also be made persistent!). 13 | pub struct LiChaoTree { 14 | left: i64, 15 | right: i64, 16 | lines: Vec<(i64, i64)>, 17 | } 18 | 19 | impl LiChaoTree { 20 | /// Creates a new tree, built to handle queries on the interval [left, right). 21 | pub fn new(left: i64, right: i64) -> Self { 22 | Self { 23 | left, 24 | right, 25 | lines: vec![(0, i64::MIN); (right - left) as usize], 26 | } 27 | } 28 | 29 | /// Every node in the tree has the property that the line that maximizes its midpoint is found 30 | /// either in the node or one of its ancestors. When we visit a node, we compute the winner at 31 | /// the midpoint of the node. The winner is stored in the node. The loser can still possibly 32 | /// beat the winner on some segment, either to the left or to the right of the current 33 | /// midpoint, so we propagate it to that segment. This sequence ensures that the invariant is 34 | /// kept. 35 | fn max_with_impl(&mut self, mut m: i64, mut b: i64, l: i64, r: i64) { 36 | if r <= l { 37 | return; 38 | } 39 | let ix = ((r - self.left + l - self.left) / 2) as usize; 40 | let mid = self.left + (ix as i64); 41 | let (ref mut m_ix, ref mut b_ix) = self.lines[ix]; 42 | if m * mid + b > *m_ix * mid + *b_ix { 43 | std::mem::swap(&mut m, m_ix); 44 | std::mem::swap(&mut b, b_ix); 45 | } 46 | if m < *m_ix { 47 | self.max_with_impl(m, b, l, mid); 48 | } else if m > *m_ix { 49 | self.max_with_impl(m, b, mid + 1, r); 50 | } 51 | } 52 | 53 | /// Adds the line with slope m and intercept b. O(log N) complexity. 54 | pub fn max_with(&mut self, m: i64, b: i64) { 55 | self.max_with_impl(m, b, self.left, self.right); 56 | } 57 | 58 | /// Because of the invariant established by add_line, we know that the best line for a given 59 | /// point is stored in one of the ancestors of its node. So we accumulate the maximum answer as 60 | /// we go back up the tree. 61 | fn evaluate_impl(&self, x: i64, l: i64, r: i64) -> i64 { 62 | if r == l { 63 | return i64::MIN; 64 | } 65 | let ix = ((r - self.left + l - self.left) / 2) as usize; 66 | let mid = ix as i64 + self.left; 67 | let y = self.lines[ix].0 * x + self.lines[ix].1; 68 | if x == mid { 69 | y 70 | } else if x < mid { 71 | self.evaluate_impl(x, l, mid).max(y) 72 | } else { 73 | self.evaluate_impl(x, mid + 1, r).max(y) 74 | } 75 | } 76 | 77 | /// Finds the maximum mx+b among all lines in the structure. O(log N) complexity. 78 | pub fn evaluate(&self, x: i64) -> i64 { 79 | self.evaluate_impl(x, self.left, self.right) 80 | } 81 | } 82 | 83 | #[cfg(test)] 84 | mod test { 85 | use super::*; 86 | 87 | #[test] 88 | fn test_li_chao_tree() { 89 | let lines = [(0, -3), (-1, 0), (1, -8), (-2, 1), (1, -4)]; 90 | let xs = [0, 1, 2, 3, 4, 5]; 91 | // results[i] consists of the expected y-coordinates after processing 92 | // the first i+1 lines. 93 | let results = [ 94 | [-3, -3, -3, -3, -3, -3], 95 | [0, -1, -2, -3, -3, -3], 96 | [0, -1, -2, -3, -3, -3], 97 | [1, -1, -2, -3, -3, -3], 98 | [1, -1, -2, -1, 0, 1], 99 | ]; 100 | let mut li_chao = LiChaoTree::new(0, 6); 101 | 102 | assert_eq!(li_chao.evaluate(0), i64::MIN); 103 | for (&(slope, intercept), expected) in lines.iter().zip(results.iter()) { 104 | li_chao.max_with(slope, intercept); 105 | let ys: Vec = xs.iter().map(|&x| li_chao.evaluate(x)).collect(); 106 | assert_eq!(expected, &ys[..]); 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Algorithms Cookbook in Rust. 2 | 3 | pub mod caching; 4 | pub mod graph; 5 | pub mod li_chao; 6 | pub mod math; 7 | pub mod order; 8 | pub mod range_query; 9 | pub mod rng; 10 | pub mod scanner; 11 | pub mod string_proc; 12 | -------------------------------------------------------------------------------- /src/math/fft.rs: -------------------------------------------------------------------------------- 1 | //! The Fast Fourier Transform (FFT) and Number Theoretic Transform (NTT) 2 | use super::num::{CommonField, Complex, PI}; 3 | use std::ops::{Add, Div, Mul, Neg, Sub}; 4 | 5 | // We can delete this struct once f64::reverse_bits() stabilizes. 6 | struct BitRevIterator { 7 | a: usize, 8 | n: usize, 9 | } 10 | impl BitRevIterator { 11 | fn new(n: usize) -> Self { 12 | assert!(n.is_power_of_two()); 13 | Self { a: 2 * n - 1, n } 14 | } 15 | } 16 | impl Iterator for BitRevIterator { 17 | type Item = usize; 18 | 19 | fn next(&mut self) -> Option { 20 | if self.a == 2 * self.n - 2 { 21 | return None; 22 | } 23 | let mut mask = self.n; 24 | while self.a & mask > 0 { 25 | self.a ^= mask; 26 | mask /= 2; 27 | } 28 | self.a |= mask; 29 | Some(self.a / 2) 30 | } 31 | } 32 | 33 | #[allow(clippy::upper_case_acronyms)] 34 | pub trait FFT: Sized + Copy { 35 | type F: Sized 36 | + Copy 37 | + From 38 | + Neg 39 | + Add 40 | + Div 41 | + Mul 42 | + Sub; 43 | 44 | const ZERO: Self; 45 | 46 | /// A primitive nth root of one raised to the powers 0, 1, 2, ..., n/2 - 1 47 | fn get_roots(n: usize, inverse: bool) -> Vec; 48 | /// 1 for forward transform, 1/n for inverse transform 49 | fn get_factor(n: usize, inverse: bool) -> Self::F; 50 | /// The inverse of Self::F::from() 51 | fn extract(f: Self::F) -> Self; 52 | } 53 | 54 | impl FFT for f64 { 55 | type F = Complex; 56 | 57 | const ZERO: f64 = 0.0; 58 | 59 | fn get_roots(n: usize, inverse: bool) -> Vec { 60 | let step = if inverse { -2.0 } else { 2.0 } * PI / n as f64; 61 | (0..n / 2) 62 | .map(|i| Complex::from_polar(1.0, step * i as f64)) 63 | .collect() 64 | } 65 | 66 | fn get_factor(n: usize, inverse: bool) -> Self::F { 67 | Self::F::from(if inverse { (n as f64).recip() } else { 1.0 }) 68 | } 69 | 70 | fn extract(f: Self::F) -> f64 { 71 | f.real 72 | } 73 | } 74 | 75 | // NTT notes: see problem 30-6 in CLRS for details, keeping in mind that 76 | // 2187 and 410692747 are inverses and 2^26th roots of 1 mod (7<<26)+1 77 | // 15311432 and 469870224 are inverses and 2^23rd roots of 1 mod (119<<23)+1 78 | // 440564289 and 1713844692 are inverses and 2^27th roots of 1 mod (15<<27)+1 79 | // 125 and 2267742733 are inverses and 2^30th roots of 1 mod (3<<30)+1 80 | impl FFT for i64 { 81 | type F = CommonField; 82 | 83 | const ZERO: Self = 0; 84 | 85 | fn get_roots(n: usize, inverse: bool) -> Vec { 86 | assert!(n <= 1 << 23); 87 | let mut prim_root = Self::F::from(15_311_432); 88 | if inverse { 89 | prim_root = prim_root.recip(); 90 | } 91 | for _ in (0..).take_while(|&i| n < 1 << (23 - i)) { 92 | prim_root = prim_root * prim_root; 93 | } 94 | 95 | let mut roots = Vec::with_capacity(n / 2); 96 | let mut root = Self::F::from(1); 97 | for _ in 0..roots.capacity() { 98 | roots.push(root); 99 | root = root * prim_root; 100 | } 101 | roots 102 | } 103 | 104 | fn get_factor(n: usize, inverse: bool) -> Self::F { 105 | Self::F::from(if inverse { n as Self } else { 1 }).recip() 106 | } 107 | 108 | fn extract(f: Self::F) -> Self { 109 | f.val 110 | } 111 | } 112 | 113 | /// Computes the discrete fourier transform of v, whose length is a power of 2. 114 | /// Forward transform: polynomial coefficients -> evaluate at roots of unity 115 | /// Inverse transform: values at roots of unity -> interpolated coefficients 116 | pub fn fft(v: &[T::F], inverse: bool) -> Vec { 117 | let n = v.len(); 118 | assert!(n.is_power_of_two()); 119 | 120 | let factor = T::get_factor(n, inverse); 121 | let roots_of_unity = T::get_roots(n, inverse); 122 | let mut dft = BitRevIterator::new(n) 123 | .map(|i| v[i] * factor) 124 | .collect::>(); 125 | 126 | for m in (0..).map(|s| 1 << s).take_while(|&m| m < n) { 127 | for k in (0..n).step_by(2 * m) { 128 | for j in 0..m { 129 | let u = dft[k + j]; 130 | let t = dft[k + j + m] * roots_of_unity[n / 2 / m * j]; 131 | dft[k + j] = u + t; 132 | dft[k + j + m] = u - t; 133 | } 134 | } 135 | } 136 | dft 137 | } 138 | 139 | /// From a slice of reals (f64 or i64), computes DFT of size at least desired_len 140 | pub fn dft_from_reals(v: &[T], desired_len: usize) -> Vec { 141 | assert!(v.len() <= desired_len); 142 | 143 | let complex_v = v 144 | .iter() 145 | .cloned() 146 | .chain(std::iter::repeat(T::ZERO)) 147 | .take(desired_len.next_power_of_two()) 148 | .map(T::F::from) 149 | .collect::>(); 150 | fft::(&complex_v, false) 151 | } 152 | 153 | /// The inverse of dft_from_reals() 154 | pub fn idft_to_reals(dft_v: &[T::F], desired_len: usize) -> Vec { 155 | assert!(dft_v.len() >= desired_len); 156 | 157 | let complex_v = fft::(dft_v, true); 158 | complex_v 159 | .into_iter() 160 | .take(desired_len) 161 | .map(T::extract) 162 | .collect() 163 | } 164 | 165 | /// Given two polynomials (vectors) sum_i a[i] x^i and sum_i b[i] x^i, 166 | /// computes their product (convolution) c[k] = sum_(i+j=k) a[i]*b[j]. 167 | /// Uses complex FFT if inputs are f64, or modular NTT if inputs are i64. 168 | pub fn convolution(a: &[T], b: &[T]) -> Vec { 169 | let len_c = a.len() + b.len() - 1; 170 | let dft_a = dft_from_reals(a, len_c).into_iter(); 171 | let dft_b = dft_from_reals(b, len_c).into_iter(); 172 | let dft_c = dft_a.zip(dft_b).map(|(a, b)| a * b).collect::>(); 173 | idft_to_reals(&dft_c, len_c) 174 | } 175 | 176 | #[cfg(test)] 177 | mod test { 178 | use super::*; 179 | 180 | #[test] 181 | fn test_complex_dft() { 182 | let v = vec![7.0, 1.0, 1.0]; 183 | let dft_v = dft_from_reals(&v, v.len()); 184 | let new_v: Vec = idft_to_reals(&dft_v, v.len()); 185 | 186 | let six = Complex::from(6.0); 187 | let seven = Complex::from(7.0); 188 | let nine = Complex::from(9.0); 189 | let i = Complex::new(0.0, 1.0); 190 | 191 | assert_eq!(dft_v, vec![nine, six + i, seven, six - i]); 192 | assert_eq!(new_v, v); 193 | } 194 | 195 | #[test] 196 | fn test_modular_dft() { 197 | let v = vec![7, 1, 1]; 198 | let dft_v = dft_from_reals(&v, v.len()); 199 | let new_v: Vec = idft_to_reals(&dft_v, v.len()); 200 | 201 | let seven = CommonField::from(7); 202 | let one = CommonField::from(1); 203 | let prim = CommonField::from(15_311_432).pow(1 << 21); 204 | let prim2 = prim * prim; 205 | 206 | let eval0 = seven + one + one; 207 | let eval1 = seven + prim + prim2; 208 | let eval2 = seven + prim2 + one; 209 | let eval3 = seven + prim.recip() + prim2; 210 | 211 | assert_eq!(dft_v, vec![eval0, eval1, eval2, eval3]); 212 | assert_eq!(new_v, v); 213 | } 214 | 215 | #[test] 216 | fn test_complex_convolution() { 217 | let x = vec![7.0, 1.0, 1.0]; 218 | let y = vec![2.0, 4.0]; 219 | let z = convolution(&x, &y); 220 | let m = convolution(&vec![999.0], &vec![1e6]); 221 | 222 | assert_eq!(z, vec![14.0, 30.0, 6.0, 4.0]); 223 | assert_eq!(m, vec![999e6]); 224 | } 225 | 226 | #[test] 227 | fn test_modular_convolution() { 228 | let x = vec![7, 1, 1]; 229 | let y = vec![2, 4]; 230 | let z = convolution(&x, &y); 231 | let m = convolution(&vec![999], &vec![1_000_000]); 232 | 233 | assert_eq!(z, vec![14, 30, 6, 4]); 234 | assert_eq!(m, vec![999_000_000 - super::super::num::COMMON_PRIME]); 235 | } 236 | } 237 | -------------------------------------------------------------------------------- /src/math/mod.rs: -------------------------------------------------------------------------------- 1 | //! Number-theoretic utilities for contest problems. 2 | pub mod fft; 3 | pub mod num; 4 | 5 | /// Finds (d, coef_a, coef_b) such that d = gcd(a, b) = a * coef_a + b * coef_b. 6 | pub fn extended_gcd(a: i64, b: i64) -> (i64, i64, i64) { 7 | if b == 0 { 8 | (a.abs(), a.signum(), 0) 9 | } else { 10 | let (d, coef_b, coef_a) = extended_gcd(b, a % b); 11 | (d, coef_a, coef_b - coef_a * (a / b)) 12 | } 13 | } 14 | 15 | /// Assuming a != 0, finds smallest coef_b >= 0 such that a * coef_a + b * coef_b = c. 16 | /// 17 | /// # Panics 18 | /// 19 | /// Panics if a == 0. 20 | pub fn canon_egcd(a: i64, b: i64, c: i64) -> Option<(i64, i64, i64)> { 21 | let (d, _, coef_b_init) = extended_gcd(a, b); 22 | if c % d == 0 { 23 | let a_d = (a / d).abs(); 24 | let coef_b = (coef_b_init * (c / d) % a_d + a_d) % a_d; 25 | let coef_a = (c - b * coef_b) / a; 26 | Some((d, coef_a, coef_b)) 27 | } else { 28 | None 29 | } 30 | } 31 | 32 | // TODO: deduplicate modular arithmetic code with num::Field 33 | fn pos_mod(n: i64, m: i64) -> i64 { 34 | if n < 0 { n + m } else { n } 35 | } 36 | fn mod_mul(a: i64, b: i64, m: i64) -> i64 { 37 | pos_mod((a as i128 * b as i128 % m as i128) as i64, m) 38 | } 39 | fn mod_exp(mut base: i64, mut exp: u64, m: i64) -> i64 { 40 | assert!(m >= 1); 41 | let mut ans = 1 % m; 42 | base %= m; 43 | while exp > 0 { 44 | if exp % 2 == 1 { 45 | ans = mod_mul(ans, base, m); 46 | } 47 | base = mod_mul(base, base, m); 48 | exp /= 2; 49 | } 50 | pos_mod(ans, m) 51 | } 52 | 53 | fn is_strong_probable_prime(n: i64, exp: u64, r: i64, a: i64) -> bool { 54 | let mut x = mod_exp(a, exp, n); 55 | if x == 1 || x == n - 1 { 56 | return true; 57 | } 58 | for _ in 1..r { 59 | x = mod_mul(x, x, n); 60 | if x == n - 1 { 61 | return true; 62 | } 63 | } 64 | false 65 | } 66 | 67 | /// Assuming x >= 0, returns whether x is prime 68 | pub fn is_prime(n: i64) -> bool { 69 | const BASES: [i64; 12] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]; 70 | assert!(n >= 0); 71 | match n { 72 | 0 | 1 => false, 73 | 2 | 3 => true, 74 | _ if n % 2 == 0 => false, 75 | _ => { 76 | let r = (n - 1).trailing_zeros() as i64; 77 | let exp = (n - 1) as u64 >> r; 78 | BASES 79 | .iter() 80 | .all(|&base| base > n - 2 || is_strong_probable_prime(n, exp, r, base)) 81 | } 82 | } 83 | } 84 | 85 | fn pollard_rho(n: i64) -> i64 { 86 | for a in 1..n { 87 | let f = |x| pos_mod(mod_mul(x, x, n) + a, n); 88 | let mut x = 2; 89 | let mut y = 2; 90 | loop { 91 | x = f(x); 92 | y = f(f(y)); 93 | let div = num::fast_gcd(x - y, n); 94 | if div == n { 95 | break; 96 | } else if div > 1 { 97 | return div; 98 | } 99 | } 100 | } 101 | panic!("No divisor found!"); 102 | } 103 | 104 | /// Assuming x >= 1, finds the prime factorization of n 105 | /// TODO: pollard_rho needs randomization to ensure correctness in contest settings! 106 | pub fn factorize(n: i64) -> Vec { 107 | assert!(n >= 1); 108 | let r = n.trailing_zeros() as usize; 109 | let mut factors = vec![2; r]; 110 | let mut stack = match n >> r { 111 | 1 => vec![], 112 | x => vec![x], 113 | }; 114 | while let Some(top) = stack.pop() { 115 | if is_prime(top) { 116 | factors.push(top); 117 | } else { 118 | let div = pollard_rho(top); 119 | stack.push(div); 120 | stack.push(top / div); 121 | } 122 | } 123 | factors.sort_unstable(); 124 | factors 125 | } 126 | 127 | #[cfg(test)] 128 | mod test { 129 | use super::*; 130 | 131 | #[test] 132 | fn test_egcd() { 133 | let (a, b) = (14, 35); 134 | 135 | let (d, x, y) = extended_gcd(a, b); 136 | assert_eq!(d, 7); 137 | assert_eq!(a * x + b * y, d); 138 | 139 | assert_eq!(canon_egcd(a, b, d), Some((d, -2, 1))); 140 | assert_eq!(canon_egcd(b, a, d), Some((d, -1, 3))); 141 | } 142 | 143 | #[test] 144 | fn test_modexp() { 145 | let m = 1_000_000_007; 146 | assert_eq!(mod_exp(0, 0, m), 1); 147 | assert_eq!(mod_exp(0, 1, m), 0); 148 | assert_eq!(mod_exp(0, 10, m), 0); 149 | assert_eq!(mod_exp(123, 456, m), 565291922); 150 | } 151 | 152 | #[test] 153 | fn test_miller() { 154 | assert_eq!(is_prime(2), true); 155 | assert_eq!(is_prime(4), false); 156 | assert_eq!(is_prime(6), false); 157 | assert_eq!(is_prime(8), false); 158 | assert_eq!(is_prime(269), true); 159 | assert_eq!(is_prime(1000), false); 160 | assert_eq!(is_prime(1_000_000_007), true); 161 | assert_eq!(is_prime((1 << 61) - 1), true); 162 | assert_eq!(is_prime(7156857700403137441), false); 163 | } 164 | 165 | #[test] 166 | fn test_pollard() { 167 | assert_eq!(factorize(1), vec![]); 168 | assert_eq!(factorize(2), vec![2]); 169 | assert_eq!(factorize(4), vec![2, 2]); 170 | assert_eq!(factorize(12), vec![2, 2, 3]); 171 | assert_eq!( 172 | factorize(7156857700403137441), 173 | vec![11, 13, 17, 19, 29, 37, 41, 43, 61, 97, 109, 127] 174 | ); 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/math/num.rs: -------------------------------------------------------------------------------- 1 | //! Rational and Complex numbers, safe modular arithmetic, and linear algebra, 2 | //! implemented minimally for contest use. 3 | //! If you need more features, you might be interested in crates.io/crates/num 4 | pub use std::f64::consts::PI; 5 | use std::ops::{Add, Div, Index, IndexMut, Mul, Neg, Sub}; 6 | 7 | /// Fast iterative version of Euclid's GCD algorithm 8 | pub fn fast_gcd(mut a: i64, mut b: i64) -> i64 { 9 | while b != 0 { 10 | a %= b; 11 | std::mem::swap(&mut a, &mut b); 12 | } 13 | a.abs() 14 | } 15 | 16 | /// Represents a fraction reduced to lowest terms 17 | #[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)] 18 | pub struct Rational { 19 | pub num: i64, 20 | pub den: i64, 21 | } 22 | impl Rational { 23 | pub fn new(num: i64, den: i64) -> Self { 24 | let g = fast_gcd(num, den) * den.signum(); 25 | Self { 26 | num: num / g, 27 | den: den / g, 28 | } 29 | } 30 | pub fn abs(self) -> Self { 31 | Self { 32 | num: self.num.abs(), 33 | den: self.den, 34 | } 35 | } 36 | pub fn recip(self) -> Self { 37 | let g = self.num.signum(); 38 | Self { 39 | num: self.den / g, 40 | den: self.num / g, 41 | } 42 | } 43 | } 44 | impl From for Rational { 45 | fn from(num: i64) -> Self { 46 | Self { num, den: 1 } 47 | } 48 | } 49 | impl Neg for Rational { 50 | type Output = Self; 51 | fn neg(self) -> Self { 52 | Self { 53 | num: -self.num, 54 | den: self.den, 55 | } 56 | } 57 | } 58 | #[allow(clippy::suspicious_arithmetic_impl)] 59 | impl Add for Rational { 60 | type Output = Self; 61 | fn add(self, other: Self) -> Self { 62 | Self::new( 63 | self.num * other.den + self.den * other.num, 64 | self.den * other.den, 65 | ) 66 | } 67 | } 68 | #[allow(clippy::suspicious_arithmetic_impl)] 69 | impl Sub for Rational { 70 | type Output = Self; 71 | fn sub(self, other: Self) -> Self { 72 | Self::new( 73 | self.num * other.den - self.den * other.num, 74 | self.den * other.den, 75 | ) 76 | } 77 | } 78 | impl Mul for Rational { 79 | type Output = Self; 80 | fn mul(self, other: Self) -> Self { 81 | Self::new(self.num * other.num, self.den * other.den) 82 | } 83 | } 84 | #[allow(clippy::suspicious_arithmetic_impl)] 85 | impl Div for Rational { 86 | type Output = Self; 87 | fn div(self, other: Self) -> Self { 88 | self * other.recip() 89 | } 90 | } 91 | impl Ord for Rational { 92 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 93 | (self.num * other.den).cmp(&(self.den * other.num)) 94 | } 95 | } 96 | impl PartialOrd for Rational { 97 | fn partial_cmp(&self, other: &Self) -> Option { 98 | Some(self.cmp(other)) 99 | } 100 | } 101 | 102 | /// Represents a complex number using floating-point arithmetic 103 | #[derive(Clone, Copy, PartialEq, Debug)] 104 | pub struct Complex { 105 | pub real: f64, 106 | pub imag: f64, 107 | } 108 | impl Complex { 109 | pub fn new(real: f64, imag: f64) -> Self { 110 | Self { real, imag } 111 | } 112 | pub fn from_polar(r: f64, th: f64) -> Self { 113 | Self::new(r * th.cos(), r * th.sin()) 114 | } 115 | pub fn abs_square(self) -> f64 { 116 | self.real * self.real + self.imag * self.imag 117 | } 118 | pub fn argument(self) -> f64 { 119 | self.imag.atan2(self.real) 120 | } 121 | pub fn conjugate(self) -> Self { 122 | Self::new(self.real, -self.imag) 123 | } 124 | pub fn recip(self) -> Self { 125 | let denom = self.abs_square(); 126 | Self::new(self.real / denom, -self.imag / denom) 127 | } 128 | } 129 | impl From for Complex { 130 | fn from(real: f64) -> Self { 131 | Self::new(real, 0.0) 132 | } 133 | } 134 | impl Neg for Complex { 135 | type Output = Self; 136 | fn neg(self) -> Self { 137 | Self::new(-self.real, -self.imag) 138 | } 139 | } 140 | impl Add for Complex { 141 | type Output = Self; 142 | fn add(self, other: Self) -> Self { 143 | Self::new(self.real + other.real, self.imag + other.imag) 144 | } 145 | } 146 | impl Sub for Complex { 147 | type Output = Self; 148 | fn sub(self, other: Self) -> Self { 149 | Self::new(self.real - other.real, self.imag - other.imag) 150 | } 151 | } 152 | impl Mul for Complex { 153 | type Output = Self; 154 | fn mul(self, other: Self) -> Self { 155 | let real = self.real * other.real - self.imag * other.imag; 156 | let imag = self.imag * other.real + self.real * other.imag; 157 | Self::new(real, imag) 158 | } 159 | } 160 | #[allow(clippy::suspicious_arithmetic_impl)] 161 | impl Div for Complex { 162 | type Output = Self; 163 | fn div(self, other: Self) -> Self { 164 | self * other.recip() 165 | } 166 | } 167 | 168 | /// Represents an element of the finite (Galois) field of prime order M, where 169 | /// 1 <= M < 2^31.5. If M is not prime, ring operations are still valid 170 | /// but recip() and division are not. Note that the latter operations are also 171 | /// the slowest, so precompute any inverses that you intend to use frequently. 172 | #[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)] 173 | pub struct Modulo { 174 | pub val: i64, 175 | } 176 | impl Modulo { 177 | /// Computes self^n in O(log n) time 178 | pub fn pow(mut self, mut n: u64) -> Self { 179 | let mut result = Self::from_small(1); 180 | while n > 0 { 181 | if n % 2 == 1 { 182 | result = result * self; 183 | } 184 | self = self * self; 185 | n /= 2; 186 | } 187 | result 188 | } 189 | /// Computes inverses of 1 to n in O(n) time 190 | pub fn vec_of_recips(n: i64) -> Vec { 191 | let mut recips = vec![Self::from(0), Self::from(1)]; 192 | for i in 2..=n { 193 | let (md, dv) = (M % i, M / i); 194 | recips.push(recips[md as usize] * Self::from_small(-dv)); 195 | } 196 | recips 197 | } 198 | /// Computes self^-1 in O(log M) time 199 | pub fn recip(self) -> Self { 200 | self.pow(M as u64 - 2) 201 | } 202 | /// Avoids the % operation but requires -M <= x < M 203 | fn from_small(s: i64) -> Self { 204 | let val = if s < 0 { s + M } else { s }; 205 | Self { val } 206 | } 207 | } 208 | impl From for Modulo { 209 | fn from(val: i64) -> Self { 210 | // Self { val: val.rem_euclid(M) } 211 | Self::from_small(val % M) 212 | } 213 | } 214 | impl Neg for Modulo { 215 | type Output = Self; 216 | fn neg(self) -> Self { 217 | Self::from_small(-self.val) 218 | } 219 | } 220 | impl Add for Modulo { 221 | type Output = Self; 222 | fn add(self, other: Self) -> Self { 223 | Self::from_small(self.val + other.val - M) 224 | } 225 | } 226 | impl Sub for Modulo { 227 | type Output = Self; 228 | fn sub(self, other: Self) -> Self { 229 | Self::from_small(self.val - other.val) 230 | } 231 | } 232 | impl Mul for Modulo { 233 | type Output = Self; 234 | fn mul(self, other: Self) -> Self { 235 | Self::from(self.val * other.val) 236 | } 237 | } 238 | #[allow(clippy::suspicious_arithmetic_impl)] 239 | impl Div for Modulo { 240 | type Output = Self; 241 | fn div(self, other: Self) -> Self { 242 | self * other.recip() 243 | } 244 | } 245 | 246 | /// Prime modulus that's commonly used in programming competitions 247 | pub const COMMON_PRIME: i64 = 998_244_353; // 2^23 * 7 * 17 + 1; 248 | pub type CommonField = Modulo; 249 | 250 | #[derive(Clone, PartialEq, Debug)] 251 | pub struct Matrix { 252 | cols: usize, 253 | inner: Box<[f64]>, 254 | } 255 | impl Matrix { 256 | pub fn zero(rows: usize, cols: usize) -> Self { 257 | let inner = vec![0.0; rows * cols].into_boxed_slice(); 258 | Self { cols, inner } 259 | } 260 | pub fn one(cols: usize) -> Self { 261 | let mut matrix = Self::zero(cols, cols); 262 | for i in 0..cols { 263 | matrix[i][i] = 1.0; 264 | } 265 | matrix 266 | } 267 | pub fn vector(vec: &[f64], as_row: bool) -> Self { 268 | let cols = if as_row { vec.len() } else { 1 }; 269 | let inner = vec.to_vec().into_boxed_slice(); 270 | Self { cols, inner } 271 | } 272 | pub fn pow(&self, mut n: u64) -> Self { 273 | let mut base = self.clone(); 274 | let mut result = Self::one(self.cols); 275 | while n > 0 { 276 | if n % 2 == 1 { 277 | result = &result * &base; 278 | } 279 | base = &base * &base; 280 | n /= 2; 281 | } 282 | result 283 | } 284 | pub fn rows(&self) -> usize { 285 | self.inner.len() / self.cols 286 | } 287 | pub fn transpose(&self) -> Self { 288 | let mut matrix = Matrix::zero(self.cols, self.rows()); 289 | for i in 0..self.rows() { 290 | for j in 0..self.cols { 291 | matrix[j][i] = self[i][j]; 292 | } 293 | } 294 | matrix 295 | } 296 | pub fn recip(&self) -> Self { 297 | unimplemented!(); 298 | } 299 | } 300 | impl Index for Matrix { 301 | type Output = [f64]; 302 | fn index(&self, row: usize) -> &Self::Output { 303 | let start = self.cols * row; 304 | &self.inner[start..start + self.cols] 305 | } 306 | } 307 | impl IndexMut for Matrix { 308 | fn index_mut(&mut self, row: usize) -> &mut Self::Output { 309 | let start = self.cols * row; 310 | &mut self.inner[start..start + self.cols] 311 | } 312 | } 313 | impl Neg for &Matrix { 314 | type Output = Matrix; 315 | fn neg(self) -> Matrix { 316 | let inner = self.inner.iter().map(|&v| -v).collect(); 317 | Matrix { 318 | cols: self.cols, 319 | inner, 320 | } 321 | } 322 | } 323 | impl Add for &Matrix { 324 | type Output = Matrix; 325 | fn add(self, other: Self) -> Matrix { 326 | let self_iter = self.inner.iter(); 327 | let inner = self_iter 328 | .zip(other.inner.iter()) 329 | .map(|(&u, &v)| u + v) 330 | .collect(); 331 | Matrix { 332 | cols: self.cols, 333 | inner, 334 | } 335 | } 336 | } 337 | impl Sub for &Matrix { 338 | type Output = Matrix; 339 | fn sub(self, other: Self) -> Matrix { 340 | let self_iter = self.inner.iter(); 341 | let inner = self_iter 342 | .zip(other.inner.iter()) 343 | .map(|(&u, &v)| u - v) 344 | .collect(); 345 | Matrix { 346 | cols: self.cols, 347 | inner, 348 | } 349 | } 350 | } 351 | impl Mul for &Matrix { 352 | type Output = Matrix; 353 | fn mul(self, scalar: f64) -> Matrix { 354 | let inner = self.inner.iter().map(|&v| v * scalar).collect(); 355 | Matrix { 356 | cols: self.cols, 357 | inner, 358 | } 359 | } 360 | } 361 | impl Mul for &Matrix { 362 | type Output = Matrix; 363 | fn mul(self, other: Self) -> Matrix { 364 | assert_eq!(self.cols, other.rows()); 365 | let mut matrix = Matrix::zero(self.rows(), other.cols); 366 | for i in 0..self.rows() { 367 | for k in 0..self.cols { 368 | for j in 0..other.cols { 369 | matrix[i][j] += self[i][k] * other[k][j]; 370 | } 371 | } 372 | } 373 | matrix 374 | } 375 | } 376 | 377 | #[cfg(test)] 378 | mod test { 379 | use super::*; 380 | 381 | #[test] 382 | fn test_rational() { 383 | let three = Rational::from(3); 384 | let six = Rational::from(6); 385 | let three_and_half = three + three / six; 386 | 387 | assert_eq!(three_and_half.num, 7); 388 | assert_eq!(three_and_half.den, 2); 389 | assert_eq!(three_and_half, Rational::new(-35, -10)); 390 | assert!(three_and_half > Rational::from(3)); 391 | assert!(three_and_half < Rational::from(4)); 392 | 393 | let minus_three_and_half = six - three_and_half + three / (-three / six); 394 | let zero = three_and_half + minus_three_and_half; 395 | 396 | assert_eq!(minus_three_and_half.num, -7); 397 | assert_eq!(minus_three_and_half.den, 2); 398 | assert_eq!(three_and_half, -minus_three_and_half); 399 | assert_eq!(zero.num, 0); 400 | assert_eq!(zero.den, 1); 401 | } 402 | 403 | #[test] 404 | fn test_complex() { 405 | let four = Complex::new(4.0, 0.0); 406 | let two_i = Complex::new(0.0, 2.0); 407 | 408 | assert_eq!(four / two_i, -two_i); 409 | assert_eq!(two_i * -two_i, four); 410 | assert_eq!(two_i - two_i, Complex::from(0.0)); 411 | assert_eq!(four.abs_square(), 16.0); 412 | assert_eq!(two_i.abs_square(), 4.0); 413 | assert_eq!((-four).argument(), -PI); 414 | assert_eq!((-two_i).argument(), -PI / 2.0); 415 | assert_eq!(four.argument(), 0.0); 416 | assert_eq!(two_i.argument(), PI / 2.0); 417 | } 418 | 419 | #[test] 420 | fn test_field() { 421 | let base = CommonField::from(1234); 422 | let zero = base - base; 423 | let one = base.recip() * base; 424 | let two = CommonField::from(2 - 5 * COMMON_PRIME); 425 | 426 | assert_eq!(zero.val, 0); 427 | assert_eq!(one.val, 1); 428 | assert_eq!(one + one, two); 429 | assert_eq!(one / base * (base * base) - base / one, zero); 430 | } 431 | 432 | #[test] 433 | fn test_vec_of_recips() { 434 | let recips = CommonField::vec_of_recips(20); 435 | 436 | assert_eq!(recips.len(), 21); 437 | for i in 1..recips.len() { 438 | assert_eq!(recips[i], CommonField::from(i as i64).recip()); 439 | } 440 | } 441 | 442 | #[test] 443 | fn test_linalg() { 444 | let zero = Matrix::zero(2, 2); 445 | let one = Matrix::one(2); 446 | let rotate_90 = Matrix { 447 | cols: 2, 448 | inner: Box::new([0.0, -1.0, 1.0, 0.0]), 449 | }; 450 | let x_vec = Matrix::vector(&[1.0, 0.0], false); 451 | let y_vec = Matrix::vector(&[0.0, 1.0], false); 452 | let x_dot_x = &x_vec.transpose() * &x_vec; 453 | let x_dot_y = &x_vec.transpose() * &y_vec; 454 | 455 | assert_eq!(x_dot_x, Matrix::one(1)); 456 | assert_eq!(x_dot_x[0][0], 1.0); 457 | assert_eq!(x_dot_y, Matrix::zero(1, 1)); 458 | assert_eq!(x_dot_y[0][0], 0.0); 459 | assert_eq!(&one - &one, zero); 460 | assert_eq!(&one * 0.0, zero); 461 | assert_eq!(&rotate_90 * &rotate_90, -&one); 462 | assert_eq!(&rotate_90 * &x_vec, y_vec); 463 | assert_eq!(&rotate_90 * &y_vec, -&x_vec); 464 | assert_eq!(&rotate_90 * &(&x_vec + &y_vec), &y_vec - &x_vec); 465 | } 466 | } 467 | -------------------------------------------------------------------------------- /src/order.rs: -------------------------------------------------------------------------------- 1 | //! Ordering algorithms. 2 | 3 | /// A comparator on partially ordered elements, that panics if they are incomparable 4 | /// 5 | /// # Example 6 | /// 7 | /// ``` 8 | /// use contest_algorithms::order::asserting_cmp; 9 | /// let mut vec = vec![4.5, -1.7, 1.2]; 10 | /// vec.sort_unstable_by(asserting_cmp); 11 | /// assert_eq!(vec, vec![-1.7, 1.2, 4.5]); 12 | /// ``` 13 | pub fn asserting_cmp(a: &T, b: &T) -> std::cmp::Ordering { 14 | a.partial_cmp(b).expect("Comparing incomparable elements") 15 | } 16 | 17 | /// Assuming slice is sorted and totally ordered, returns the minimum i for which 18 | /// slice[i] >= key, or slice.len() if no such i exists 19 | pub fn slice_lower_bound(slice: &[T], key: &T) -> usize { 20 | slice 21 | .binary_search_by(|x| asserting_cmp(x, key).then(std::cmp::Ordering::Greater)) 22 | .unwrap_err() 23 | } 24 | 25 | /// Assuming slice is sorted and totally ordered, returns the minimum i for which 26 | /// slice[i] > key, or slice.len() if no such i exists 27 | pub fn slice_upper_bound(slice: &[T], key: &T) -> usize { 28 | slice 29 | .binary_search_by(|x| asserting_cmp(x, key).then(std::cmp::Ordering::Less)) 30 | .unwrap_err() 31 | } 32 | 33 | /// Stably merges two sorted and totally ordered collections into one 34 | pub fn merge_sorted( 35 | i1: impl IntoIterator, 36 | i2: impl IntoIterator, 37 | ) -> Vec { 38 | let mut i1 = i1.into_iter().peekable(); 39 | let mut i2 = i2.into_iter().peekable(); 40 | let mut merged = Vec::with_capacity(i1.size_hint().0 + i2.size_hint().0); 41 | while let (Some(a), Some(b)) = (i1.peek(), i2.peek()) { 42 | merged.push(if a <= b { i1.next() } else { i2.next() }.unwrap()); 43 | } 44 | merged.extend(i1.chain(i2)); 45 | merged 46 | } 47 | 48 | /// A stable sort 49 | pub fn merge_sort(mut v: Vec) -> Vec { 50 | if v.len() < 2 { 51 | v 52 | } else { 53 | let v2 = v.split_off(v.len() / 2); 54 | merge_sorted(merge_sort(v), merge_sort(v2)) 55 | } 56 | } 57 | 58 | /// A simple data structure for coordinate compression 59 | pub struct SparseIndex { 60 | coords: Vec, 61 | } 62 | 63 | impl SparseIndex { 64 | /// Builds an index, given the full set of coordinates to compress. 65 | pub fn new(mut coords: Vec) -> Self { 66 | coords.sort_unstable(); 67 | coords.dedup(); 68 | Self { coords } 69 | } 70 | 71 | /// Returns Ok(i) if the coordinate q appears at index i 72 | /// Returns Err(i) if q appears between indices i-1 and i 73 | pub fn compress(&self, q: i64) -> Result { 74 | self.coords.binary_search(&q) 75 | } 76 | } 77 | 78 | /// Represents a maximum (upper envelope) of a collection of linear functions of one 79 | /// variable, evaluated using an online version of the convex hull trick. 80 | /// It combines the offline algorithm with square root decomposition, resulting in an 81 | /// asymptotically suboptimal but simple algorithm with good amortized performance: 82 | /// N inserts interleaved with Q queries yields O(N sqrt Q + Q log N) time complexity 83 | /// in general, or O((N + Q) log N) if all queries come after all inserts. 84 | // Proof: the Q log N term comes from calls to slice_lower_bound(). As for the N sqrt Q, 85 | // note that between successive times when the hull is rebuilt, O(N) work is done, 86 | // and the running totals of insertions and queries satisfy del_N (del_Q + 1) > N. 87 | // Now, either del_Q >= sqrt Q, or else del_Q <= 2 sqrt Q - 1 88 | // => del_N > N / (2 sqrt Q). 89 | // Since del(N sqrt Q) >= max(N del(sqrt Q), del_N sqrt Q) 90 | // >= max(N del_Q / (2 sqrt Q), del_N sqrt Q), 91 | // we conclude that del(N sqrt Q) >= N / 2. 92 | #[derive(Default)] 93 | pub struct PiecewiseLinearConvexFn { 94 | recent_lines: Vec<(f64, f64)>, 95 | sorted_lines: Vec<(f64, f64)>, 96 | intersections: Vec, 97 | amortized_work: usize, 98 | } 99 | 100 | impl PiecewiseLinearConvexFn { 101 | /// Replaces the represented function with the maximum of itself and a provided line 102 | pub fn max_with(&mut self, new_m: f64, new_b: f64) { 103 | self.recent_lines.push((new_m, new_b)); 104 | } 105 | 106 | /// Similar to max_with but requires that (new_m, new_b) be the largest pair so far 107 | fn max_with_sorted(&mut self, new_m: f64, new_b: f64) { 108 | while let Some(&(last_m, last_b)) = self.sorted_lines.last() { 109 | // If slopes are equal, get rid of the old line as its intercept is lower 110 | if (new_m - last_m).abs() > 1e-9 { 111 | let intersect = (new_b - last_b) / (last_m - new_m); 112 | if self.intersections.last() < Some(&intersect) { 113 | self.intersections.push(intersect); 114 | break; 115 | } 116 | } 117 | self.intersections.pop(); 118 | self.sorted_lines.pop(); 119 | } 120 | self.sorted_lines.push((new_m, new_b)); 121 | } 122 | 123 | /// Evaluates the function at x 124 | fn eval_unoptimized(&self, x: f64) -> f64 { 125 | let idx = slice_lower_bound(&self.intersections, &x); 126 | self.recent_lines 127 | .iter() 128 | .chain(self.sorted_lines.get(idx)) 129 | .map(|&(m, b)| m * x + b) 130 | .max_by(asserting_cmp) 131 | .unwrap_or(-1e18) 132 | } 133 | 134 | /// Evaluates the function at x with good amortized runtime 135 | pub fn evaluate(&mut self, x: f64) -> f64 { 136 | self.amortized_work += self.recent_lines.len(); 137 | if self.amortized_work > self.sorted_lines.len() { 138 | self.amortized_work = 0; 139 | self.recent_lines.sort_unstable_by(asserting_cmp); 140 | self.intersections.clear(); 141 | let all_lines = merge_sorted(self.recent_lines.drain(..), self.sorted_lines.drain(..)); 142 | for (new_m, new_b) in all_lines { 143 | self.max_with_sorted(new_m, new_b); 144 | } 145 | } 146 | self.eval_unoptimized(x) 147 | } 148 | } 149 | 150 | #[cfg(test)] 151 | mod test { 152 | use super::*; 153 | 154 | #[test] 155 | fn test_bounds() { 156 | let mut vals = vec![16, 45, 45, 45, 82]; 157 | 158 | assert_eq!(slice_upper_bound(&vals, &44), 1); 159 | assert_eq!(slice_lower_bound(&vals, &45), 1); 160 | assert_eq!(slice_upper_bound(&vals, &45), 4); 161 | assert_eq!(slice_lower_bound(&vals, &46), 4); 162 | 163 | vals.dedup(); 164 | for (i, q) in vals.iter().enumerate() { 165 | assert_eq!(slice_lower_bound(&vals, q), i); 166 | assert_eq!(slice_upper_bound(&vals, q), i + 1); 167 | } 168 | } 169 | 170 | #[test] 171 | fn test_merge_sorted() { 172 | let vals1 = vec![16, 45, 45, 82]; 173 | let vals2 = vec![-20, 40, 45, 50]; 174 | let vals_merged = vec![-20, 16, 40, 45, 45, 45, 50, 82]; 175 | 176 | assert_eq!(merge_sorted(None, Some(42)), vec![42]); 177 | assert_eq!(merge_sorted(vals1.iter().cloned(), None), vals1); 178 | assert_eq!(merge_sorted(vals1, vals2), vals_merged); 179 | } 180 | 181 | #[test] 182 | fn test_merge_sort() { 183 | let unsorted = vec![8, -5, 1, 4, -3, 4]; 184 | let sorted = vec![-5, -3, 1, 4, 4, 8]; 185 | 186 | assert_eq!(merge_sort(unsorted), sorted); 187 | assert_eq!(merge_sort(sorted.clone()), sorted); 188 | } 189 | 190 | #[test] 191 | fn test_coord_compress() { 192 | let mut coords = vec![16, 99, 45, 18]; 193 | let index = SparseIndex::new(coords.clone()); 194 | 195 | coords.sort_unstable(); 196 | for (i, q) in coords.into_iter().enumerate() { 197 | assert_eq!(index.compress(q - 1), Err(i)); 198 | assert_eq!(index.compress(q), Ok(i)); 199 | assert_eq!(index.compress(q + 1), Err(i + 1)); 200 | } 201 | } 202 | 203 | #[test] 204 | fn test_range_compress() { 205 | let queries = vec![(0, 10), (10, 19), (20, 29)]; 206 | let coords = queries.iter().flat_map(|&(i, j)| vec![i, j + 1]).collect(); 207 | let index = SparseIndex::new(coords); 208 | 209 | assert_eq!(index.coords, vec![0, 10, 11, 20, 30]); 210 | } 211 | 212 | #[test] 213 | fn test_convex_hull_trick() { 214 | let lines = [(0, -3), (-1, 0), (1, -8), (-2, 1), (1, -4)]; 215 | let xs = [0, 1, 2, 3, 4, 5]; 216 | // results[i] consists of the expected y-coordinates after processing 217 | // the first i+1 lines. 218 | let results = [ 219 | [-3, -3, -3, -3, -3, -3], 220 | [0, -1, -2, -3, -3, -3], 221 | [0, -1, -2, -3, -3, -3], 222 | [1, -1, -2, -3, -3, -3], 223 | [1, -1, -2, -1, 0, 1], 224 | ]; 225 | let mut func = PiecewiseLinearConvexFn::default(); 226 | assert_eq!(func.evaluate(0.0), -1e18); 227 | for (&(slope, intercept), expected) in lines.iter().zip(results.iter()) { 228 | func.max_with(slope as f64, intercept as f64); 229 | let ys: Vec = xs.iter().map(|&x| func.evaluate(x as f64) as i64).collect(); 230 | assert_eq!(expected, &ys[..]); 231 | } 232 | } 233 | } 234 | -------------------------------------------------------------------------------- /src/range_query/README.md: -------------------------------------------------------------------------------- 1 | # Associative Range Query (ARQ) and Mo's Algorithm 2 | 3 | For more information on Associative Range Query, you may research "segment trees" in the programming contest literature. My implementation is more general than usual; for more information on it, please see my [blog post on Codeforces](https://codeforces.com/blog/entry/68419). 4 | -------------------------------------------------------------------------------- /src/range_query/dynamic_arq.rs: -------------------------------------------------------------------------------- 1 | //! Associative Range Query Tree with dynamic allocation, supporting sparse 2 | //! initialization and persistence 3 | use super::ArqSpec; 4 | 5 | pub struct DynamicArqNode { 6 | val: T::S, 7 | app: Option, 8 | down: (usize, usize), 9 | } 10 | 11 | // TODO: in a future Rust version, this might be replaced by a #[derive(Clone)] 12 | impl Clone for DynamicArqNode { 13 | fn clone(&self) -> Self { 14 | Self { 15 | val: self.val.clone(), 16 | app: self.app.clone(), 17 | down: self.down, 18 | } 19 | } 20 | } 21 | 22 | impl Default for DynamicArqNode { 23 | fn default() -> Self { 24 | Self { 25 | val: T::identity(), 26 | app: None, 27 | down: (usize::MAX, usize::MAX), 28 | } 29 | } 30 | } 31 | 32 | impl DynamicArqNode { 33 | fn apply(&mut self, f: &T::F, size: i64) { 34 | self.val = T::apply(f, &self.val, size); 35 | if size > 1 { 36 | let h = match self.app { 37 | Some(ref g) => T::compose(f, g), 38 | None => f.clone(), 39 | }; 40 | self.app = Some(h); 41 | } 42 | } 43 | } 44 | 45 | pub type ArqView = (usize, i64); 46 | 47 | /// A dynamic, and optionally persistent, associative range query data structure. 48 | pub struct DynamicArq { 49 | nodes: Vec>, 50 | is_persistent: bool, 51 | } 52 | 53 | impl DynamicArq { 54 | /// Initializes the data structure without creating any nodes. 55 | pub fn new(is_persistent: bool) -> Self { 56 | Self { 57 | nodes: vec![], 58 | is_persistent, 59 | } 60 | } 61 | 62 | /// Lazily builds a tree initialized to the identity. 63 | pub fn build_from_identity(&mut self, size: i64) -> ArqView { 64 | self.nodes.push(DynamicArqNode::default()); 65 | (self.nodes.len() - 1, size) 66 | } 67 | 68 | /// Builds a tree whose leaves are set to a given non-empty slice. 69 | pub fn build_from_slice(&mut self, init_val: &[T::S]) -> ArqView { 70 | if init_val.len() == 1 { 71 | let root = DynamicArqNode { 72 | val: init_val[0].clone(), 73 | ..Default::default() 74 | }; 75 | self.nodes.push(root); 76 | (self.nodes.len() - 1, 1) 77 | } else { 78 | let ls = init_val.len() / 2; 79 | let (l_init, r_init) = init_val.split_at(ls); 80 | let l_view = self.build_from_slice(l_init); 81 | let r_view = self.build_from_slice(r_init); 82 | self.merge_equal_sized(l_view, r_view) 83 | } 84 | } 85 | 86 | /// Merges two balanced subtrees into a single tree with a 0-indexed view. 87 | pub fn merge_equal_sized(&mut self, (lp, ls): ArqView, (rp, rs): ArqView) -> ArqView { 88 | assert!(ls == rs || ls + 1 == rs); 89 | let p = self.nodes.len(); 90 | let root = DynamicArqNode { 91 | down: (lp, rp), 92 | ..Default::default() 93 | }; 94 | self.nodes.push(root); 95 | self.pull(p); 96 | (p, ls + rs) 97 | } 98 | 99 | pub fn push(&mut self, (p, s): ArqView) -> (ArqView, ArqView) { 100 | if self.nodes[p].down.0 == usize::MAX { 101 | self.nodes.push(DynamicArqNode::default()); 102 | self.nodes.push(DynamicArqNode::default()); 103 | self.nodes[p].down = (self.nodes.len() - 2, self.nodes.len() - 1) 104 | }; 105 | let (lp, rp) = self.nodes[p].down; 106 | let ls = s / 2; 107 | if let Some(ref f) = self.nodes[p].app.take() { 108 | self.nodes[lp].apply(f, ls); 109 | self.nodes[rp].apply(f, s - ls); 110 | } 111 | ((lp, ls), (rp, s - ls)) 112 | } 113 | 114 | pub fn pull(&mut self, p: usize) { 115 | let (lp, rp) = self.nodes[p].down; 116 | let left_val = &self.nodes[lp].val; 117 | let right_val = &self.nodes[rp].val; 118 | self.nodes[p].val = T::op(left_val, right_val); 119 | } 120 | 121 | fn clone_node(&mut self, p_orig: usize) -> usize { 122 | if self.is_persistent { 123 | let node = self.nodes[p_orig].clone(); 124 | self.nodes.push(node); 125 | self.nodes.len() - 1 126 | } else { 127 | p_orig 128 | } 129 | } 130 | 131 | /// Applies the endomorphism f to all entries from l to r, inclusive. 132 | /// If l == r, the updates are eager. Otherwise, they are lazy. 133 | pub fn update(&mut self, view: ArqView, l: i64, r: i64, f: &T::F) -> ArqView { 134 | let (p_orig, s) = view; 135 | if r < 0 || s - 1 < l { 136 | view 137 | } else if l <= 0 && s - 1 <= r { 138 | let p_clone = self.clone_node(p_orig); 139 | self.nodes[p_clone].apply(f, s); 140 | (p_clone, s) 141 | } else { 142 | let (l_view, r_view) = self.push(view); 143 | let ls = l_view.1; 144 | let p_clone = self.clone_node(p_orig); 145 | let lp_clone = self.update(l_view, l, r, f).0; 146 | let rp_clone = self.update(r_view, l - ls, r - ls, f).0; 147 | self.nodes[p_clone].down = (lp_clone, rp_clone); 148 | self.pull(p_clone); 149 | (p_clone, s) 150 | } 151 | } 152 | 153 | /// Returns the aggregate range query on all entries from l to r, inclusive. 154 | pub fn query(&mut self, view: ArqView, l: i64, r: i64) -> T::S { 155 | let (p, s) = view; 156 | if r < 0 || s - 1 < l { 157 | T::identity() 158 | } else if l <= 0 && s - 1 <= r { 159 | self.nodes[p].val.clone() 160 | } else { 161 | let (l_view, r_view) = self.push(view); 162 | let ls = l_view.1; 163 | let l_agg = self.query(l_view, l, r); 164 | let r_agg = self.query(r_view, l - ls, r - ls); 165 | T::op(&l_agg, &r_agg) 166 | } 167 | } 168 | } 169 | 170 | /// An example of binary search to find the first position whose element is negative. 171 | /// The DynamicArq version works on trees of any size, not necessarily a power of two. 172 | pub fn first_negative(arq: &mut DynamicArq, view: ArqView) -> Option { 173 | let (p, s) = view; 174 | if s == 1 { 175 | Some(0).filter(|_| arq.nodes[p].val < 0) 176 | } else { 177 | let (l_view, r_view) = arq.push(view); 178 | let (lp, ls) = l_view; 179 | if arq.nodes[lp].val < 0 { 180 | first_negative(arq, l_view) 181 | } else { 182 | first_negative(arq, r_view).map(|x| ls + x) 183 | } 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/range_query/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod dynamic_arq; 2 | pub mod specs; 3 | pub mod sqrt_decomp; 4 | pub mod static_arq; 5 | pub use dynamic_arq::{ArqView, DynamicArq}; 6 | pub use specs::ArqSpec; 7 | pub use static_arq::StaticArq; 8 | 9 | #[cfg(test)] 10 | mod test { 11 | use super::specs::*; 12 | use super::*; 13 | 14 | #[test] 15 | fn test_rmq() { 16 | let mut arq = StaticArq::::new(&[0; 10]); 17 | 18 | assert_eq!(arq.query(0, 9), 0); 19 | 20 | arq.update(2, 4, &-5); 21 | arq.update(5, 7, &-3); 22 | arq.update(1, 6, &1); 23 | 24 | assert_eq!(arq.query(0, 9), -3); 25 | } 26 | 27 | #[test] 28 | fn test_dynamic_rmq() { 29 | let mut arq = DynamicArq::::new(false); 30 | let view = arq.build_from_slice(&[0; 10]); 31 | 32 | assert_eq!(arq.query(view, 0, 9), 0); 33 | 34 | arq.update(view, 2, 4, &-5); 35 | arq.update(view, 5, 7, &-3); 36 | arq.update(view, 1, 6, &1); 37 | 38 | assert_eq!(arq.query(view, 0, 9), -3); 39 | } 40 | 41 | #[test] 42 | fn test_persistent_rmq() { 43 | let mut arq = DynamicArq::::new(true); 44 | let mut view = arq.build_from_slice(&[0; 10]); 45 | 46 | let at_init = view; 47 | view = arq.update(view, 2, 4, &-5); 48 | let snapshot = view; 49 | view = arq.update(view, 5, 7, &-3); 50 | view = arq.update(view, 1, 6, &1); 51 | 52 | assert_eq!(arq.query(at_init, 0, 9), 0); 53 | assert_eq!(arq.query(snapshot, 0, 9), -5); 54 | assert_eq!(arq.query(view, 0, 9), -3); 55 | } 56 | 57 | #[test] 58 | fn test_huge_rmq() { 59 | let quintillion = 1_000_000_000_000_000_000; 60 | let mut arq = DynamicArq::::new(false); 61 | let view = arq.build_from_identity(9 * quintillion + 1); 62 | 63 | arq.update(view, 2 * quintillion, 4 * quintillion, &-5); 64 | arq.update(view, 5 * quintillion, 7 * quintillion, &-3); 65 | arq.update(view, 1 * quintillion, 6 * quintillion, &1); 66 | 67 | assert_eq!(arq.query(view, 0, 9 * quintillion), -3); 68 | } 69 | 70 | #[test] 71 | fn test_range_sum() { 72 | let mut arq = StaticArq::::new(&[0; 10]); 73 | 74 | assert_eq!(arq.query(0, 9), 0); 75 | 76 | arq.update(1, 3, &10); 77 | arq.update(3, 5, &1); 78 | 79 | assert_eq!(arq.query(0, 9), 23); 80 | assert_eq!(arq.query(10, 4), 0); 81 | } 82 | 83 | #[test] 84 | fn test_dynamic_range_sum() { 85 | let mut arq = DynamicArq::::new(false); 86 | let view = arq.build_from_slice(&[0; 10]); 87 | 88 | assert_eq!(arq.query(view, 0, 9), 0); 89 | 90 | arq.update(view, 1, 3, &10); 91 | arq.update(view, 3, 5, &1); 92 | 93 | assert_eq!(arq.query(view, 0, 9), 23); 94 | assert_eq!(arq.query(view, 10, 4), 0); 95 | } 96 | 97 | #[test] 98 | fn test_supply_demand() { 99 | let mut arq = StaticArq::::new(&[(0, 0, 0); 10]); 100 | 101 | arq.update(1, 1, &(25, 100)); 102 | arq.update(3, 3, &(100, 30)); 103 | arq.update(9, 9, &(0, 20)); 104 | 105 | assert_eq!(arq.query(0, 9), (125, 150, 75)); 106 | } 107 | 108 | #[test] 109 | fn test_dynamic_supply_demand() { 110 | let mut arq = DynamicArq::::new(false); 111 | let view = arq.build_from_identity(10); 112 | 113 | arq.update(view, 1, 1, &(25, 100)); 114 | arq.update(view, 3, 3, &(100, 30)); 115 | arq.update(view, 9, 9, &(0, 20)); 116 | 117 | assert_eq!(arq.query(view, 0, 9), (125, 150, 75)); 118 | } 119 | 120 | #[test] 121 | fn test_binary_search_rmq() { 122 | let vec = vec![2, 1, 0, -1, -2, -3, -4, -5]; 123 | let mut arq = StaticArq::::new(&vec); 124 | let first_neg = static_arq::first_negative(&mut arq); 125 | 126 | arq.update(3, 7, &0); 127 | let first_neg_zeros = static_arq::first_negative(&mut arq); 128 | 129 | assert_eq!(first_neg, Some(3)); 130 | assert_eq!(first_neg_zeros, None); 131 | } 132 | 133 | #[test] 134 | fn test_dynamic_binary_search_rmq() { 135 | let vec = vec![2, 1, 0, -1, -2, -3, -4, -5]; 136 | let mut arq = DynamicArq::::new(false); 137 | let view = arq.build_from_slice(&vec); 138 | let first_neg = dynamic_arq::first_negative(&mut arq, view); 139 | 140 | arq.update(view, 3, 7, &0); 141 | let first_neg_zeros = dynamic_arq::first_negative(&mut arq, view); 142 | 143 | assert_eq!(first_neg, Some(3)); 144 | assert_eq!(first_neg_zeros, None); 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/range_query/specs.rs: -------------------------------------------------------------------------------- 1 | //! A collection of example ArqSpec implementations 2 | 3 | pub trait ArqSpec { 4 | /// Type of underlying array elements. 5 | type S: Clone; 6 | /// Type of data representing an endomorphism. 7 | // Note that while a Fn(S) -> S may seem like a more natural representation 8 | // for an endomorphism, compositions would then have to delegate to each of 9 | // their parts. This representation is more efficient. 10 | type F: Clone; 11 | 12 | /// Must satisfy the Associative Law: 13 | /// For all a,b,c, op(a, op(b, c)) = op(op(a, b), c) 14 | fn op(a: &Self::S, b: &Self::S) -> Self::S; 15 | /// Must satisfy the Identity Law: 16 | /// For all a, op(a, identity()) = op(identity(), a) = a 17 | fn identity() -> Self::S; 18 | /// Must satisfy the Composition Law: 19 | /// For all f,g,a, apply(compose(f, g), a) = apply(f, apply(g, a)) 20 | fn compose(f: &Self::F, g: &Self::F) -> Self::F; 21 | /// Must satisfy the Distributive Law: 22 | /// For all f,a,b, apply(f, op(a, b), s+t) = op(apply(f, a, s), apply(f, b, t)) 23 | /// The `size` parameter makes this law easier to satisfy in certain cases. 24 | fn apply(f: &Self::F, a: &Self::S, size: i64) -> Self::S; 25 | 26 | // The following relaxations to the laws may apply. 27 | // If only point updates are made, the Composition and Distributive Laws 28 | // no longer apply. 29 | // - compose() is never called, so it can be left unimplemented!(). 30 | // - apply() is only ever called on leaves, i.e., with size == 1. 31 | // If only point queries are made, the Associative and Distributive Laws 32 | // no longer apply. 33 | // - op()'s result only matters when identity() is an argument. 34 | // - apply()'s result only matters on leaves, i.e., with size == 1. 35 | } 36 | 37 | /// Range Minimum Query (RMQ), a classic application of ARQ. 38 | /// update(l, r, &f) sets all entries a[l..=r] to f. 39 | /// query(l, r) finds the minimum value in a[l..=r]. 40 | // 41 | // Exercises: try augmenting this struct to find the index of a minimum element 42 | // in a range query, as well as the number of elements equal to the minimum. 43 | // Then instead of overwriting values with a constant assignment a[i] = f, 44 | // try supporting addition: a[i] += f. 45 | pub enum AssignMin {} 46 | impl ArqSpec for AssignMin { 47 | type S = i64; 48 | type F = i64; 49 | fn op(&a: &Self::S, &b: &Self::S) -> Self::S { 50 | a.min(b) 51 | } 52 | fn identity() -> Self::S { 53 | i64::MAX 54 | } 55 | fn compose(&f: &Self::F, _: &Self::F) -> Self::F { 56 | f 57 | } 58 | fn apply(&f: &Self::F, _: &Self::S, _: i64) -> Self::S { 59 | f 60 | } 61 | } 62 | 63 | /// Range Sum Query, a slightly trickier classic application of ARQ. 64 | /// update(l, r, &f) sets all entries a[l..=r] to f. 65 | /// query(l, r) sums all the entries a[l..=r]. 66 | /// 67 | /// # Panics 68 | /// 69 | /// Associated functions will panic on overflow. 70 | // 71 | // Note that while the `size` parameter seems necessary to satisfy the 72 | // Distributive Law, it is merely a convenience: in essence what we've done 73 | // is move to the product monoid of tuples (value, size_of_subtree). 74 | // 75 | // In mathematical jargon, we say that constant assignment f(a) = f is not an 76 | // endomorphism on (i64, +) because f(a+b) = f != 2*f = f(a) + f(b). 77 | // On the other hand, f((a, s)) = (f*s, s) is indeed an endomorphism on pairs 78 | // with vector addition: f((a, s) + (b, t)) = f((a+b, s+t)) = (f*(s+t), s+t) 79 | // = (f*s, s) + (f*t, t) = f((a,s)) + f((b,t)). 80 | pub enum AssignSum {} 81 | impl ArqSpec for AssignSum { 82 | type S = i64; 83 | type F = i64; 84 | fn op(&a: &Self::S, &b: &Self::S) -> Self::S { 85 | a + b 86 | } 87 | fn identity() -> Self::S { 88 | 0 89 | } 90 | fn compose(&f: &Self::F, _: &Self::F) -> Self::F { 91 | f 92 | } 93 | fn apply(&f: &Self::F, _: &Self::S, size: i64) -> Self::S { 94 | f * size 95 | } 96 | } 97 | 98 | /// Supply & Demand, based on https://codeforces.com/gym/102218/problem/F 99 | /// update(i, i, &(p, o)) increases supply by p and demand by o at time i. 100 | /// query(l, r) computes total supply and demand at times l to r, as well as 101 | // how much of the supply is subsequently met by the demand. 102 | // 103 | // Note that the apply() operation is only correct when applied to leaf nodes. 104 | // Therefore, update() must only be used in "eager" mode, i.e., with l == r. 105 | // compose() should be unimplemented!() to prevent accidental "lazy" updates. 106 | pub enum SupplyDemand {} 107 | impl ArqSpec for SupplyDemand { 108 | type S = (i64, i64, i64); // production, orders, sales 109 | type F = (i64, i64); 110 | fn op((p1, o1, s1): &Self::S, (p2, o2, s2): &Self::S) -> Self::S { 111 | let extra = (p1 - s1).min(o2 - s2); 112 | (p1 + p2, o1 + o2, s1 + s2 + extra) 113 | } 114 | fn identity() -> Self::S { 115 | (0, 0, 0) 116 | } 117 | fn compose(_: &Self::F, _: &Self::F) -> Self::F { 118 | unimplemented!() 119 | } 120 | fn apply(&(p_add, o_add): &Self::F, &(p, o, _): &Self::S, s: i64) -> Self::S { 121 | assert_eq!(s, 1); 122 | let p = p + p_add; 123 | let o = o + o_add; 124 | (p, o, p.min(o)) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/range_query/sqrt_decomp.rs: -------------------------------------------------------------------------------- 1 | /// A generic implementation of Mo's algorithm, aka Query Sqrt Decomposition. 2 | /// It answers q offline queries over intervals in 0..n by shifting the query 3 | /// interval's endpoints by one position at a time. 4 | /// Each endpoint incurs a total cost of at most n * sqrt(q * L_OP * R_OP). 5 | pub trait MoState { 6 | type Q; 7 | type A; 8 | 9 | /// cost ratio L_OP / R_OP between a left endpoint and a right endpoint move 10 | const L_R_RATIO: f64 = 1.0; 11 | 12 | fn query(&self, q: &Self::Q) -> Self::A; 13 | fn insert_left(&mut self, pos: usize); 14 | fn remove_left(&mut self, pos: usize); 15 | 16 | fn insert_right(&mut self, pos: usize) { 17 | self.insert_left(pos); 18 | } 19 | fn remove_right(&mut self, pos: usize) { 20 | self.remove_left(pos); 21 | } 22 | /// After initializing self to a state corresponding to an empty interval, 23 | /// call this function to answer all your queries. 24 | fn process(&mut self, queries: &[(usize, usize, Self::Q)]) -> Vec { 25 | let q = queries.len(); 26 | let mut q_positions: Vec = (0..q).collect(); 27 | if let Some(max_r) = queries.iter().map(|&(_, r, _)| r).max() { 28 | let q_adjusted = q as f64 * Self::L_R_RATIO; 29 | let bucket_width = 1 + max_r / q_adjusted.sqrt() as usize; 30 | q_positions.sort_unstable_by_key(|&i| { 31 | let (l, mut r) = (queries[i].0, queries[i].1); 32 | let bucket = l / bucket_width; 33 | if bucket % 2 != 0 { 34 | r = max_r - r; 35 | } 36 | (bucket, r) 37 | }); 38 | } 39 | 40 | let (mut cur_l, mut cur_r) = (1, 0); 41 | let mut answers = Vec::with_capacity(queries.len()); 42 | for i in q_positions { 43 | let (l, r, ref q) = queries[i]; 44 | while cur_l > l { 45 | cur_l -= 1; 46 | self.insert_left(cur_l); 47 | } 48 | while cur_r < r { 49 | cur_r += 1; 50 | self.insert_right(cur_r); 51 | } 52 | while cur_l < l { 53 | self.remove_left(cur_l); 54 | cur_l += 1; 55 | } 56 | while cur_r > r { 57 | self.remove_right(cur_r); 58 | cur_r -= 1; 59 | } 60 | answers.push((i, self.query(q))); 61 | } 62 | answers.sort_unstable_by_key(|&(i, _)| i); 63 | answers.into_iter().map(|(_, ans)| ans).collect() 64 | } 65 | } 66 | 67 | pub struct DistinctVals { 68 | vals: Vec, 69 | counts: Vec, 70 | distinct: usize, 71 | } 72 | impl DistinctVals { 73 | pub fn new(vals: Vec) -> Self { 74 | let &max_val = vals.iter().max().unwrap_or(&0); 75 | Self { 76 | vals, 77 | counts: vec![0; max_val + 1], 78 | distinct: 0, 79 | } 80 | } 81 | } 82 | impl MoState for DistinctVals { 83 | type Q = (); 84 | type A = usize; 85 | fn query(&self, _: &Self::Q) -> Self::A { 86 | self.distinct 87 | } 88 | fn insert_left(&mut self, pos: usize) { 89 | let v = self.vals[pos]; 90 | if self.counts[v] == 0 { 91 | self.distinct += 1; 92 | } 93 | self.counts[v] += 1; 94 | } 95 | fn remove_left(&mut self, pos: usize) { 96 | let v = self.vals[pos]; 97 | self.counts[v] -= 1; 98 | if self.counts[v] == 0 { 99 | self.distinct -= 1; 100 | } 101 | } 102 | } 103 | 104 | #[cfg(test)] 105 | mod test { 106 | use super::*; 107 | 108 | #[test] 109 | fn test_mos_algorithm() { 110 | let queries = vec![(0, 2, ()), (5, 5, ()), (2, 6, ()), (0, 6, ())]; 111 | let arr = vec![4, 8, 4, 7, 1, 9, 8]; 112 | 113 | let answers = DistinctVals::new(arr).process(&queries); 114 | 115 | assert_eq!(answers, vec![2, 1, 5, 5]); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/range_query/static_arq.rs: -------------------------------------------------------------------------------- 1 | //! Associative Range Query Tree 2 | use super::ArqSpec; 3 | 4 | /// Colloquially known as a "segtree" in the sport programming literature, it 5 | /// represents a sequence of elements a_i (0 <= i < size) from a monoid (S, +) 6 | /// on which we want to support fast range operations: 7 | /// 8 | /// - update(l, r, f) replaces a_i (l <= i <= r) by f(a_i) for an endomorphism f 9 | /// - query(l, r) returns the aggregate a_l + a_{l+1} + ... + a_r 10 | /// 11 | /// This compact representation is based on a [blog post by Al.Cash] 12 | /// (http://codeforces.com/blog/entry/18051). All nodes have 0 or 2 children. 13 | /// Hence, trees whose size is not a power of two will have multiple roots. 14 | /// 15 | /// Future work: ArqTree would lend itself naturally to Rust's ownership system. 16 | /// Initially, we should only have access to the root nodes: 17 | /// if size is a power of two, there is a unique root at index 1. 18 | /// arq.push(i) locks i and acquires access to its children. 19 | /// arq.pull(i) is called when the lock on i is released. 20 | pub struct StaticArq { 21 | val: Vec, 22 | app: Vec>, 23 | } 24 | 25 | impl StaticArq { 26 | /// Initializes a static balanced binary tree on top of the given sequence. 27 | pub fn new(init_val: &[T::S]) -> Self { 28 | let size = init_val.len(); 29 | let mut val = vec![T::identity(); size]; 30 | val.extend_from_slice(init_val); 31 | let app = vec![None; size]; 32 | 33 | let mut arq = Self { val, app }; 34 | for p in (0..size).rev() { 35 | arq.pull(p); 36 | } 37 | arq 38 | } 39 | 40 | fn apply(&mut self, p: usize, f: &T::F, s: i64) { 41 | self.val[p] = T::apply(f, &self.val[p], s); 42 | if let Some(lazy) = self.app.get_mut(p) { 43 | let h = match *lazy { 44 | Some(ref g) => T::compose(f, g), 45 | None => f.clone(), 46 | }; 47 | *lazy = Some(h); 48 | } 49 | } 50 | 51 | fn push(&mut self, p: usize) { 52 | if let Some(ref f) = self.app[p].take() { 53 | let s = (self.app.len().div_ceil(p) / 2).next_power_of_two() as i64; 54 | self.apply(p << 1, f, s); 55 | self.apply((p << 1) | 1, f, s); 56 | } 57 | } 58 | 59 | fn pull(&mut self, p: usize) { 60 | self.val[p] = T::op(&self.val[p << 1], &self.val[(p << 1) | 1]); 61 | } 62 | 63 | fn push_to(&mut self, p: usize) { 64 | let one_plus_floor_log_p = (p + 1).next_power_of_two().trailing_zeros(); 65 | for i in (1..one_plus_floor_log_p).rev() { 66 | self.push(p >> i); 67 | } 68 | } 69 | 70 | fn pull_from(&mut self, mut p: usize) { 71 | while p > 1 { 72 | p >>= 1; 73 | self.pull(p); 74 | } 75 | } 76 | 77 | /// Applies the endomorphism f to all entries from l to r, inclusive. 78 | /// If l == r, the updates are eager. Otherwise, they are lazy. 79 | /// 80 | /// # Panics 81 | /// 82 | /// Panics if r >= size. Note that l > r is valid, meaning an empty range. 83 | pub fn update(&mut self, mut l: usize, mut r: usize, f: &T::F) { 84 | l += self.app.len(); 85 | r += self.app.len(); 86 | if l < r { 87 | self.push_to(l); 88 | } 89 | self.push_to(r); 90 | let (mut l0, mut r0, mut s) = (1, 1, 1); 91 | while l <= r { 92 | if l & 1 == 1 { 93 | self.apply(l, f, s); 94 | l0 = l0.max(l); 95 | l += 1; 96 | } 97 | if r & 1 == 0 { 98 | self.apply(r, f, s); 99 | r0 = r0.max(r); 100 | r -= 1; 101 | } 102 | l >>= 1; 103 | r >>= 1; 104 | s <<= 1; 105 | } 106 | self.pull_from(l0); 107 | self.pull_from(r0); 108 | } 109 | 110 | /// Returns the aggregate range query on all entries from l to r, inclusive. 111 | /// 112 | /// # Panics 113 | /// 114 | /// Panics if r >= size. Note that l > r is valid, meaning an empty range. 115 | pub fn query(&mut self, mut l: usize, mut r: usize) -> T::S { 116 | l += self.app.len(); 117 | r += self.app.len(); 118 | if l < r { 119 | self.push_to(l); 120 | } 121 | self.push_to(r); 122 | let (mut l_agg, mut r_agg) = (T::identity(), T::identity()); 123 | while l <= r { 124 | if l & 1 == 1 { 125 | l_agg = T::op(&l_agg, &self.val[l]); 126 | l += 1; 127 | } 128 | if r & 1 == 0 { 129 | r_agg = T::op(&self.val[r], &r_agg); 130 | r -= 1; 131 | } 132 | l >>= 1; 133 | r >>= 1; 134 | } 135 | T::op(&l_agg, &r_agg) 136 | } 137 | } 138 | 139 | /// An example of binary search to find the first position whose element is negative. 140 | /// In this case, we use RMQ to locate the leftmost negative element. 141 | /// To ensure the existence of a valid root note (i == 1) from which to descend, 142 | /// the tree's size must be a power of two. 143 | pub fn first_negative(arq: &mut StaticArq) -> Option { 144 | assert!(arq.app.len().is_power_of_two()); 145 | let mut p = 1; 146 | if arq.val[p] >= 0 { 147 | None 148 | } else { 149 | while p < arq.app.len() { 150 | arq.push(p); 151 | p <<= 1; 152 | if arq.val[p] >= 0 { 153 | p |= 1; 154 | } 155 | } 156 | Some(p - arq.app.len()) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/rng.rs: -------------------------------------------------------------------------------- 1 | //! Pseudorandom number generators (PRNGs). 2 | 3 | /// A simple and efficient random number generator. 4 | pub type SmallRng = Xoshiro256PlusPlus; 5 | 6 | /// A xoshiro256++ random number generator. 7 | /// 8 | /// This is a simplified version of the `SmallRng` implementation from the 9 | /// excellent `rand` crate, keeping only essential features. 10 | /// 11 | /// The xoshiro256++ algorithm is not suitable for cryptographic purposes, but 12 | /// is very fast and has excellent statistical properties. 13 | /// 14 | /// * Source: [Docs.rs](https://docs.rs/rand/0.8.4/src/rand/rngs/xoshiro256plusplus.rs.html) 15 | /// * Theory: [Xorshift - Wikipedia](https://en.wikipedia.org/wiki/Xorshift) 16 | #[derive(Debug, Clone, PartialEq, Eq)] 17 | pub struct Xoshiro256PlusPlus { 18 | s: [u64; 4], 19 | } 20 | 21 | impl Xoshiro256PlusPlus { 22 | /// Construct a new RNG from a 64-bit seed. 23 | pub fn new(mut state: u64) -> Self { 24 | const PHI: u64 = 0x9e3779b97f4a7c15; 25 | let mut seed = <[u64; 4]>::default(); 26 | for chunk in &mut seed { 27 | state = state.wrapping_add(PHI); 28 | let mut z = state; 29 | z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9); 30 | z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb); 31 | z = z ^ (z >> 31); 32 | *chunk = z; 33 | } 34 | Self { s: seed } 35 | } 36 | 37 | /// Generate a random `u32`. 38 | #[inline] 39 | pub fn next_u32(&mut self) -> u32 { 40 | (self.next_u64() >> 32) as u32 41 | } 42 | 43 | /// Generate a random `u64`. 44 | #[inline] 45 | pub fn next_u64(&mut self) -> u64 { 46 | let result_plusplus = self.s[0] 47 | .wrapping_add(self.s[3]) 48 | .rotate_left(23) 49 | .wrapping_add(self.s[0]); 50 | 51 | let t = self.s[1] << 17; 52 | 53 | self.s[2] ^= self.s[0]; 54 | self.s[3] ^= self.s[1]; 55 | self.s[1] ^= self.s[2]; 56 | self.s[0] ^= self.s[3]; 57 | 58 | self.s[2] ^= t; 59 | 60 | self.s[3] = self.s[3].rotate_left(45); 61 | 62 | result_plusplus 63 | } 64 | } 65 | 66 | #[cfg(test)] 67 | mod tests { 68 | use super::*; 69 | 70 | #[test] 71 | fn test_xoshiro256plusplus() { 72 | let mut rng = Xoshiro256PlusPlus::new(42); 73 | assert_eq!(rng.next_u64(), 15021278609987233951); 74 | assert_eq!(rng.next_u64(), 5881210131331364753); 75 | assert_eq!(rng.next_u64(), 18149643915985481100); 76 | assert_eq!(rng.next_u64(), 12933668939759105464); 77 | assert_eq!(rng.next_u64(), 14637574242682825331); 78 | assert_eq!(rng.next_u64(), 10848501901068131965); 79 | assert_eq!(rng.next_u64(), 2312344417745909078); 80 | assert_eq!(rng.next_u64(), 11162538943635311430); 81 | } 82 | 83 | #[test] 84 | fn reference() { 85 | let mut rng = Xoshiro256PlusPlus { s: [1, 2, 3, 4] }; 86 | // These values were produced with the reference implementation: 87 | // http://xoshiro.di.unimi.it/xoshiro256plusplus.c 88 | let expected = [ 89 | 41943041, 90 | 58720359, 91 | 3588806011781223, 92 | 3591011842654386, 93 | 9228616714210784205, 94 | 9973669472204895162, 95 | 14011001112246962877, 96 | 12406186145184390807, 97 | 15849039046786891736, 98 | 10450023813501588000, 99 | ]; 100 | for &e in &expected { 101 | assert_eq!(rng.next_u64(), e); 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/scanner.rs: -------------------------------------------------------------------------------- 1 | //! Generic utility for reading data from standard input, based on [voxl's 2 | //! stdin wrapper](http://codeforces.com/contest/702/submission/19589375). 3 | use std::io; 4 | use std::str; 5 | 6 | /// Reads white-space separated tokens one at a time. 7 | pub struct Scanner { 8 | reader: R, 9 | buffer: Vec, 10 | } 11 | 12 | impl Scanner { 13 | pub fn new(reader: R) -> Self { 14 | Self { 15 | reader, 16 | buffer: vec![], 17 | } 18 | } 19 | 20 | /// Use "turbofish" syntax token::() to select data type of next token. 21 | /// 22 | /// # Panics 23 | /// 24 | /// Panics if there's an I/O error or if the token cannot be parsed as T. 25 | pub fn token(&mut self) -> T { 26 | loop { 27 | if let Some(token) = self.buffer.pop() { 28 | return token.parse().ok().expect("Failed parse"); 29 | } 30 | let mut input = String::new(); 31 | self.reader.read_line(&mut input).expect("Failed read"); 32 | self.buffer = input.split_whitespace().rev().map(String::from).collect(); 33 | } 34 | } 35 | } 36 | 37 | /// Same API as Scanner but nearly twice as fast, using horribly unsafe dark arts 38 | pub struct UnsafeScanner { 39 | reader: R, 40 | buf_str: Vec, 41 | buf_iter: str::SplitAsciiWhitespace<'static>, 42 | } 43 | 44 | impl UnsafeScanner { 45 | pub fn new(reader: R) -> Self { 46 | Self { 47 | reader, 48 | buf_str: vec![], 49 | buf_iter: "".split_ascii_whitespace(), 50 | } 51 | } 52 | 53 | /// This function should be marked unsafe, but noone has time for that in a 54 | /// programming contest. Use at your own risk! 55 | pub fn token(&mut self) -> T { 56 | loop { 57 | if let Some(token) = self.buf_iter.next() { 58 | return token.parse().ok().expect("Failed parse"); 59 | } 60 | self.buf_str.clear(); 61 | self.reader 62 | .read_until(b'\n', &mut self.buf_str) 63 | .expect("Failed read"); 64 | self.buf_iter = unsafe { 65 | let slice = str::from_utf8_unchecked(&self.buf_str); 66 | std::mem::transmute(slice.split_ascii_whitespace()) 67 | } 68 | } 69 | } 70 | } 71 | 72 | pub fn scanner_from_file(filename: &str) -> Scanner> { 73 | let file = std::fs::File::open(filename).expect("Input file not found"); 74 | Scanner::new(io::BufReader::new(file)) 75 | } 76 | 77 | pub fn writer_to_file(filename: &str) -> io::BufWriter { 78 | let file = std::fs::File::create(filename).expect("Output file not found"); 79 | io::BufWriter::new(file) 80 | } 81 | 82 | #[cfg(test)] 83 | mod test { 84 | use super::*; 85 | 86 | fn solve(scan: &mut Scanner, out: &mut W) { 87 | let x = scan.token::(); 88 | let y = scan.token::(); 89 | writeln!(out, "{} - {} = {}", x, y, x - y).ok(); 90 | } 91 | 92 | fn unsafe_solve(scan: &mut UnsafeScanner, out: &mut W) { 93 | let x = scan.token::(); 94 | let y = scan.token::(); 95 | writeln!(out, "{} - {} = {}", x, y, x - y).ok(); 96 | } 97 | 98 | #[test] 99 | fn test_in_memory_io() { 100 | let input: &[u8] = b"50 8"; 101 | let mut scan = Scanner::new(input); 102 | let mut out = vec![]; 103 | 104 | solve(&mut scan, &mut out); 105 | assert_eq!(out, b"50 - 8 = 42\n"); 106 | } 107 | 108 | #[test] 109 | fn test_in_memory_unsafe() { 110 | let input: &[u8] = b"50 8"; 111 | let mut scan = UnsafeScanner::new(input); 112 | let mut out = vec![]; 113 | 114 | unsafe_solve(&mut scan, &mut out); 115 | assert_eq!(out, b"50 - 8 = 42\n"); 116 | } 117 | 118 | #[test] 119 | fn test_compile_stdio() { 120 | let mut scan = Scanner::new(io::stdin().lock()); 121 | let mut out = io::BufWriter::new(io::stdout().lock()); 122 | 123 | if false { 124 | solve(&mut scan, &mut out); 125 | } 126 | } 127 | 128 | #[test] 129 | #[should_panic(expected = "Input file not found")] 130 | fn test_panic_file() { 131 | let mut scan = scanner_from_file("input_file.txt"); 132 | let mut out = writer_to_file("output_file.txt"); 133 | 134 | solve(&mut scan, &mut out); 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/string_proc.rs: -------------------------------------------------------------------------------- 1 | //! String processing algorithms. 2 | use std::cmp::{max, min}; 3 | use std::collections::{HashMap, VecDeque, hash_map::Entry}; 4 | 5 | /// Prefix trie, easily augmentable by adding more fields and/or methods 6 | pub struct Trie { 7 | links: Vec>, 8 | } 9 | 10 | impl Default for Trie { 11 | /// Creates an empty trie with a root node. 12 | fn default() -> Self { 13 | Self { 14 | links: vec![HashMap::new()], 15 | } 16 | } 17 | } 18 | 19 | impl Trie { 20 | /// Inserts a word into the trie, and returns the index of its node. 21 | pub fn insert(&mut self, word: impl IntoIterator) -> usize { 22 | let mut node = 0; 23 | 24 | for ch in word { 25 | let len = self.links.len(); 26 | node = match self.links[node].entry(ch) { 27 | Entry::Occupied(entry) => *entry.get(), 28 | Entry::Vacant(entry) => { 29 | entry.insert(len); 30 | self.links.push(HashMap::new()); 31 | len 32 | } 33 | } 34 | } 35 | node 36 | } 37 | 38 | /// Finds a word in the trie, and returns the index of its node. 39 | pub fn get(&self, word: impl IntoIterator) -> Option { 40 | let mut node = 0; 41 | for ch in word { 42 | node = *self.links[node].get(&ch)?; 43 | } 44 | Some(node) 45 | } 46 | } 47 | 48 | /// Single-pattern matching with the Knuth-Morris-Pratt algorithm 49 | pub struct Matcher<'a, C: Eq> { 50 | /// The string pattern to search for. 51 | pub pattern: &'a [C], 52 | /// KMP match failure automaton: fail[i] is the length of the longest 53 | /// string that's both a proper prefix and a proper suffix of pattern[0..=i]. 54 | pub fail: Vec, 55 | } 56 | 57 | impl<'a, C: Eq> Matcher<'a, C> { 58 | /// Precomputes the automaton that allows linear-time string matching. 59 | /// 60 | /// # Example 61 | /// 62 | /// ``` 63 | /// use contest_algorithms::string_proc::Matcher; 64 | /// let byte_string: &[u8] = b"hello"; 65 | /// let utf8_string: &str = "hello"; 66 | /// let vec_char: Vec = utf8_string.chars().collect(); 67 | /// 68 | /// let match_from_byte_literal = Matcher::new(byte_string); 69 | /// let match_from_utf8 = Matcher::new(utf8_string.as_bytes()); 70 | /// let match_from_chars = Matcher::new(&vec_char); 71 | /// 72 | /// let vec_int = vec![4, -3, 1]; 73 | /// let match_from_ints = Matcher::new(&vec_int); 74 | /// ``` 75 | /// 76 | /// # Panics 77 | /// 78 | /// Panics if pattern is empty. 79 | pub fn new(pattern: &'a [C]) -> Self { 80 | let mut fail = Vec::with_capacity(pattern.len()); 81 | fail.push(0); 82 | let mut len = 0; 83 | for ch in &pattern[1..] { 84 | while len > 0 && pattern[len] != *ch { 85 | len = fail[len - 1]; 86 | } 87 | if pattern[len] == *ch { 88 | len += 1; 89 | } 90 | fail.push(len); 91 | } 92 | Self { pattern, fail } 93 | } 94 | 95 | /// KMP algorithm, sets @return[i] = length of longest prefix of pattern 96 | /// matching a suffix of text[0..=i]. 97 | pub fn kmp_match(&self, text: impl IntoIterator) -> Vec { 98 | let mut len = 0; 99 | text.into_iter() 100 | .map(|ch| { 101 | if len == self.pattern.len() { 102 | len = self.fail[len - 1]; 103 | } 104 | while len > 0 && self.pattern[len] != ch { 105 | len = self.fail[len - 1]; 106 | } 107 | if self.pattern[len] == ch { 108 | len += 1; 109 | } 110 | len 111 | }) 112 | .collect() 113 | } 114 | } 115 | 116 | /// Multi-pattern matching with the Aho-Corasick algorithm 117 | pub struct MultiMatcher { 118 | /// A prefix trie storing the string patterns to search for. 119 | pub trie: Trie, 120 | /// Stores which completed pattern string each node corresponds to. 121 | pub pat_id: Vec>, 122 | /// Aho-Corasick failure automaton. fail[i] is the node corresponding to the 123 | /// longest prefix-suffix of the node corresponding to i. 124 | pub fail: Vec, 125 | /// Shortcut to the next match along the failure chain, or to the root. 126 | pub fast: Vec, 127 | } 128 | 129 | impl MultiMatcher { 130 | fn next(trie: &Trie, fail: &[usize], mut node: usize, ch: &C) -> usize { 131 | loop { 132 | if let Some(&child) = trie.links[node].get(ch) { 133 | return child; 134 | } else if node == 0 { 135 | return 0; 136 | } 137 | node = fail[node]; 138 | } 139 | } 140 | 141 | /// Precomputes the automaton that allows linear-time string matching. 142 | /// If there are duplicate patterns, all but one copy will be ignored. 143 | pub fn new(patterns: impl IntoIterator>) -> Self { 144 | let mut trie = Trie::default(); 145 | #[allow(clippy::needless_collect)] // It's not needless: it affects trie.links.len() 146 | let pat_nodes: Vec = patterns.into_iter().map(|pat| trie.insert(pat)).collect(); 147 | 148 | let mut pat_id = vec![None; trie.links.len()]; 149 | for (i, node) in pat_nodes.into_iter().enumerate() { 150 | pat_id[node] = Some(i); 151 | } 152 | 153 | let mut fail = vec![0; trie.links.len()]; 154 | let mut fast = vec![0; trie.links.len()]; 155 | let mut q: VecDeque = trie.links[0].values().cloned().collect(); 156 | 157 | while let Some(node) = q.pop_front() { 158 | for (ch, &child) in &trie.links[node] { 159 | let nx = Self::next(&trie, &fail, fail[node], ch); 160 | fail[child] = nx; 161 | fast[child] = if pat_id[nx].is_some() { nx } else { fast[nx] }; 162 | q.push_back(child); 163 | } 164 | } 165 | 166 | Self { 167 | trie, 168 | pat_id, 169 | fail, 170 | fast, 171 | } 172 | } 173 | 174 | /// Aho-Corasick algorithm, sets @return[i] = node corresponding to 175 | /// longest prefix of some pattern matching a suffix of text[0..=i]. 176 | pub fn ac_match(&self, text: impl IntoIterator) -> Vec { 177 | let mut node = 0; 178 | text.into_iter() 179 | .map(|ch| { 180 | node = Self::next(&self.trie, &self.fail, node, &ch); 181 | node 182 | }) 183 | .collect() 184 | } 185 | 186 | /// For each non-empty match, returns where in the text it ends, and the index 187 | /// of the corresponding pattern. 188 | pub fn get_end_pos_and_pat_id(&self, match_nodes: &[usize]) -> Vec<(usize, usize)> { 189 | let mut res = vec![]; 190 | for (text_pos, &(mut node)) in match_nodes.iter().enumerate() { 191 | while node != 0 { 192 | if let Some(id) = self.pat_id[node] { 193 | res.push((text_pos + 1, id)); 194 | } 195 | node = self.fast[node]; 196 | } 197 | } 198 | res 199 | } 200 | } 201 | 202 | /// Suffix array data structure, useful for a variety of string queries. 203 | pub struct SuffixArray { 204 | /// The suffix array itself, holding suffix indices in sorted order. 205 | pub sfx: Vec, 206 | /// rank[i][j] = rank of the j'th suffix, considering only 2^i chars. 207 | /// In other words, rank[i] is a ranking of the substrings text[j..j+2^i]. 208 | pub rank: Vec>, 209 | } 210 | 211 | impl SuffixArray { 212 | /// O(n + max_key) stable sort on the items generated by vals. 213 | /// Items v in vals are sorted according to val_to_key[v]. 214 | fn counting_sort( 215 | vals: impl Iterator + Clone, 216 | val_to_key: &[usize], 217 | max_key: usize, 218 | ) -> Vec { 219 | let mut counts = vec![0; max_key]; 220 | for v in vals.clone() { 221 | counts[val_to_key[v]] += 1; 222 | } 223 | let mut total = 0; 224 | for c in counts.iter_mut() { 225 | total += *c; 226 | *c = total - *c; 227 | } 228 | let mut result = vec![0; total]; 229 | for v in vals { 230 | let c = &mut counts[val_to_key[v]]; 231 | result[*c] = v; 232 | *c += 1; 233 | } 234 | result 235 | } 236 | 237 | /// Suffix array construction in O(n log n) time. 238 | pub fn new(text: impl IntoIterator) -> Self { 239 | let init_rank = text.into_iter().map(|ch| ch as usize).collect::>(); 240 | let n = init_rank.len(); 241 | let mut sfx = Self::counting_sort(0..n, &init_rank, 256); 242 | let mut rank = vec![init_rank]; 243 | // Invariant at the start of every loop iteration: 244 | // suffixes are sorted according to the first skip characters. 245 | for skip in (0..).map(|i| 1 << i).take_while(|&skip| skip < n) { 246 | let prev_rank = rank.last().unwrap(); 247 | let mut cur_rank = prev_rank.clone(); 248 | 249 | let pos = (n - skip..n).chain(sfx.into_iter().filter_map(|p| p.checked_sub(skip))); 250 | sfx = Self::counting_sort(pos, prev_rank, max(n, 256)); 251 | 252 | let mut prev = sfx[0]; 253 | cur_rank[prev] = 0; 254 | for &cur in sfx.iter().skip(1) { 255 | if max(prev, cur) + skip < n 256 | && prev_rank[prev] == prev_rank[cur] 257 | && prev_rank[prev + skip] == prev_rank[cur + skip] 258 | { 259 | cur_rank[cur] = cur_rank[prev]; 260 | } else { 261 | cur_rank[cur] = cur_rank[prev] + 1; 262 | } 263 | prev = cur; 264 | } 265 | rank.push(cur_rank); 266 | } 267 | Self { sfx, rank } 268 | } 269 | 270 | /// Computes the length of longest common prefix of text[i..] and text[j..]. 271 | pub fn longest_common_prefix(&self, mut i: usize, mut j: usize) -> usize { 272 | let mut len = 0; 273 | for (k, rank) in self.rank.iter().enumerate().rev() { 274 | if rank[i] == rank[j] { 275 | i += 1 << k; 276 | j += 1 << k; 277 | len += 1 << k; 278 | if max(i, j) >= self.sfx.len() { 279 | break; 280 | } 281 | } 282 | } 283 | len 284 | } 285 | } 286 | 287 | /// Manacher's algorithm for computing palindrome substrings in linear time. 288 | /// pal[2*i] = odd length of palindrome centred at text[i]. 289 | /// pal[2*i+1] = even length of palindrome centred at text[i+0.5]. 290 | /// 291 | /// # Panics 292 | /// 293 | /// Panics if text is empty. 294 | pub fn palindromes(text: &[impl Eq]) -> Vec { 295 | let mut pal = Vec::with_capacity(2 * text.len() - 1); 296 | pal.push(1); 297 | while pal.len() < pal.capacity() { 298 | let i = pal.len() - 1; 299 | let max_len = min(i + 1, pal.capacity() - i); 300 | while pal[i] < max_len && text[(i - pal[i] - 1) / 2] == text[(i + pal[i] + 1) / 2] { 301 | pal[i] += 2; 302 | } 303 | if let Some(a) = 1usize.checked_sub(pal[i]) { 304 | pal.push(a); 305 | } else { 306 | for d in 1.. { 307 | let (a, b) = (pal[i - d], pal[i] - d); 308 | if a < b { 309 | pal.push(a); 310 | } else { 311 | pal.push(b); 312 | break; 313 | } 314 | } 315 | } 316 | } 317 | pal 318 | } 319 | 320 | /// Z algorithm: computes the array Z[..], where Z[i] is the length of the 321 | /// longest text prefix of text[i..] that is **also a prefix** of text. 322 | /// 323 | /// It runs in O(n) time, maintaining the invariant that l <= i and 324 | /// text[0..r-l] == text[l..r]. It can be embedded in a larger algorithm, 325 | /// or used for string searching as an alternative to KMP. 326 | /// 327 | /// # Example 328 | /// 329 | /// ``` 330 | /// use contest_algorithms::string_proc::z_algorithm; 331 | /// let z = z_algorithm(b"ababbababbabababbabababbababbaba"); 332 | /// assert_eq!( 333 | /// z, 334 | /// vec![ 335 | /// 32, 0, 2, 0, 0, 9, 0, 2, 0, 0, 4, 0, 9, 0, 2, 0, 0, 4, 0, 13, 0, 2, 336 | /// 0, 0, 8, 0, 2, 0, 0, 3, 0, 1, 337 | /// ], 338 | /// ); 339 | /// ``` 340 | pub fn z_algorithm(text: &[impl Eq]) -> Vec { 341 | let n = text.len(); 342 | let (mut l, mut r) = (1, 1); 343 | let mut z = Vec::with_capacity(n); 344 | z.push(n); 345 | for i in 1..n { 346 | if r > i + z[i - l] { 347 | z.push(z[i - l]); 348 | } else { 349 | l = i; 350 | while r < i || (r < n && text[r - i] == text[r]) { 351 | r += 1; 352 | } 353 | z.push(r - i); 354 | } 355 | } 356 | z 357 | } 358 | 359 | #[cfg(test)] 360 | mod test { 361 | use super::*; 362 | 363 | #[test] 364 | fn test_trie() { 365 | let dict = vec!["banana", "benefit", "banapple", "ban"]; 366 | 367 | let trie = dict.into_iter().fold(Trie::default(), |mut trie, word| { 368 | trie.insert(word.bytes()); 369 | trie 370 | }); 371 | 372 | assert_eq!(trie.get("".bytes()), Some(0)); 373 | assert_eq!(trie.get("b".bytes()), Some(1)); 374 | assert_eq!(trie.get("banana".bytes()), Some(6)); 375 | assert_eq!(trie.get("be".bytes()), Some(7)); 376 | assert_eq!(trie.get("bane".bytes()), None); 377 | } 378 | 379 | #[test] 380 | fn test_kmp_matching() { 381 | let pattern = "ana"; 382 | let text = "banana"; 383 | 384 | let matches = Matcher::new(pattern.as_bytes()).kmp_match(text.bytes()); 385 | 386 | assert_eq!(matches, vec![0, 1, 2, 3, 2, 3]); 387 | } 388 | 389 | #[test] 390 | fn test_ac_matching() { 391 | let dict = vec!["banana", "benefit", "banapple", "ban", "fit"]; 392 | let text = "banana bans, apple benefits."; 393 | 394 | let matcher = MultiMatcher::new(dict.iter().map(|s| s.bytes())); 395 | let match_nodes = matcher.ac_match(text.bytes()); 396 | let end_pos_and_id = matcher.get_end_pos_and_pat_id(&match_nodes); 397 | 398 | assert_eq!( 399 | end_pos_and_id, 400 | vec![(3, 3), (6, 0), (10, 3), (26, 1), (26, 4)] 401 | ); 402 | } 403 | 404 | #[test] 405 | fn test_suffix_array() { 406 | let text1 = "bobocel"; 407 | let text2 = "banana"; 408 | 409 | let sfx1 = SuffixArray::new(text1.bytes()); 410 | let sfx2 = SuffixArray::new(text2.bytes()); 411 | 412 | assert_eq!(sfx1.sfx, vec![0, 2, 4, 5, 6, 1, 3]); 413 | assert_eq!(sfx2.sfx, vec![5, 3, 1, 0, 4, 2]); 414 | 415 | assert_eq!(sfx1.longest_common_prefix(0, 2), 2); 416 | assert_eq!(sfx2.longest_common_prefix(1, 3), 3); 417 | 418 | // Check that sfx and rank.last() are essentially inverses of each other. 419 | for (p, &r) in sfx1.rank.last().unwrap().iter().enumerate() { 420 | assert_eq!(sfx1.sfx[r], p); 421 | } 422 | for (p, &r) in sfx2.rank.last().unwrap().iter().enumerate() { 423 | assert_eq!(sfx2.sfx[r], p); 424 | } 425 | } 426 | 427 | #[test] 428 | fn test_palindrome() { 429 | let text = "banana"; 430 | 431 | let pal_len = palindromes(text.as_bytes()); 432 | 433 | assert_eq!(pal_len, vec![1, 0, 1, 0, 3, 0, 5, 0, 3, 0, 1]); 434 | } 435 | } 436 | -------------------------------------------------------------------------------- /tests/codeforces343d.rs: -------------------------------------------------------------------------------- 1 | //! Solves [Water Tree](http://codeforces.com/contest/343/problem/D). 2 | //! To make a self-contained file for contest submission, dump each desired 3 | //! module's contents directly here instead of the use statements. 4 | //! Also, use the commented code in main() to employ standard I/O. 5 | extern crate contest_algorithms; 6 | use contest_algorithms::graph::Graph; 7 | use contest_algorithms::range_query::{StaticArq, specs::AssignSum}; 8 | use contest_algorithms::scanner::Scanner; 9 | use std::io; 10 | 11 | const SAMPLE_INPUT: &[u8] = b"\ 12 | 5 13 | 1 2 14 | 5 1 15 | 2 3 16 | 4 2 17 | 12 18 | 1 1 19 | 2 3 20 | 3 1 21 | 3 2 22 | 3 3 23 | 3 4 24 | 1 2 25 | 2 4 26 | 3 1 27 | 3 3 28 | 3 4 29 | 3 5 30 | "; 31 | const SAMPLE_OUTPUT: &[u8] = b"\ 32 | 0 33 | 0 34 | 0 35 | 1 36 | 0 37 | 1 38 | 0 39 | 1 40 | "; 41 | 42 | fn dfs( 43 | graph: &Graph, 44 | u: usize, 45 | l: &mut [usize], 46 | r: &mut [usize], 47 | p: &mut [usize], 48 | time: &mut usize, 49 | ) { 50 | *time += 1; 51 | l[u] = *time; 52 | 53 | for (_, v) in graph.adj_list(u) { 54 | if l[v] == 0 { 55 | p[v] = l[u]; 56 | dfs(graph, v, l, r, p, time); 57 | } 58 | } 59 | 60 | r[u] = *time; 61 | } 62 | 63 | fn solve(scan: &mut Scanner, out: &mut W) { 64 | let n = scan.token::(); 65 | let mut tree = Graph::new(n, 2 * (n - 1)); 66 | for _ in 1..n { 67 | let u = scan.token::() - 1; 68 | let v = scan.token::() - 1; 69 | tree.add_undirected_edge(u, v); 70 | } 71 | 72 | let mut l = vec![0; n]; 73 | let mut r = vec![0; n]; 74 | let mut p = vec![0; n]; 75 | dfs(&tree, 0, &mut l, &mut r, &mut p, &mut 0); 76 | 77 | let mut arq = StaticArq::::new(&vec![0; n + 1]); 78 | let q = scan.token::(); 79 | for _ in 0..q { 80 | let c = scan.token::(); 81 | let v = scan.token::() - 1; 82 | let len = (r[v] - l[v] + 1) as i64; 83 | let sum = arq.query(l[v], r[v]); 84 | if c == 1 { 85 | if sum != len { 86 | arq.update(p[v], p[v], &0); 87 | arq.update(l[v], r[v], &1); 88 | } 89 | } else if c == 2 { 90 | arq.update(l[v], l[v], &0); 91 | } else { 92 | let ans = if sum == len { 1 } else { 0 }; 93 | writeln!(out, "{}", ans).ok(); 94 | } 95 | } 96 | } 97 | 98 | #[test] 99 | fn main() { 100 | let mut scan = Scanner::new(SAMPLE_INPUT); 101 | let mut out = vec![]; 102 | solve(&mut scan, &mut out); 103 | 104 | assert_eq!(out, SAMPLE_OUTPUT); 105 | } 106 | --------------------------------------------------------------------------------