├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md └── ll ├── Cargo.toml └── src ├── data.rs ├── level.rs ├── lib.rs ├── main.rs ├── reporters ├── level.rs ├── mod.rs ├── term_status.rs ├── text.rs └── utils.rs ├── task.rs ├── task_tree.rs ├── tests ├── basic_test.rs └── mod.rs ├── uniq_id.rs └── utils.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust CI 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | test: 14 | runs-on: ${{ matrix.os }} 15 | 16 | strategy: 17 | matrix: 18 | os: [ubuntu-latest, macOS-latest] 19 | rust: 20 | - stable 21 | - beta 22 | - nightly 23 | 24 | steps: 25 | - uses: actions/checkout@v2 26 | 27 | - uses: actions-rs/toolchain@v1 28 | with: 29 | profile: minimal 30 | toolchain: ${{ matrix.rust }} 31 | override: true 32 | components: rustfmt, clippy 33 | 34 | - uses: actions-rs/cargo@v1 35 | - name: Run tests 36 | run: cargo test --verbose 37 | 38 | - uses: actions-rs/cargo@v1 39 | with: 40 | command: fmt 41 | args: --all -- --check 42 | 43 | - uses: actions-rs/cargo@v1 44 | with: 45 | command: clippy 46 | args: -- -D warnings 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | 13 | #Added by cargo 14 | # 15 | #already existing elements were commented out 16 | 17 | /target 18 | #Cargo.lock 19 | .vscode/ 20 | 21 | .DS_Store -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "ll", 5 | ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Aaron Abramov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LL - Rust Logging Library 2 | 3 | [![Crates.io][crates-badge]][crates-url] 4 | [![Docs.rs][docs-badge]][docs-url] 5 | ![Rust CI](https://github.com/aaronabramov/ll/workflows/Rust%20CI/badge.svg) 6 | 7 | [crates-badge]: https://img.shields.io/crates/v/ll.svg 8 | [crates-url]: https://crates.io/crates/ll/ 9 | [docs-badge]: https://docs.rs/ll/badge.svg 10 | [docs-url]: https://docs.rs/ll 11 | 12 | ![ll_header](https://user-images.githubusercontent.com/940133/88867783-18595100-d1d4-11ea-8894-5be572bab2a9.png) 13 | -------------------------------------------------------------------------------- /ll/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ll" 3 | version = "7.1.0" 4 | edition = "2018" 5 | authors = ["Aaron Abramov "] 6 | description = "rust logging library" 7 | license = "MIT" 8 | repository = "https://github.com/aaronabramov/ll" 9 | 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | anyhow = "1" 14 | async-trait = "0.1" 15 | chrono = "0.4" 16 | colored = "1.9" 17 | crossterm = "0.28" 18 | lazy_static = "1" 19 | strip-ansi-escapes = "0.1" 20 | term_size = "0.3" 21 | tokio = { version = "1", features = ["full"] } 22 | 23 | [dev-dependencies] 24 | k9 = "0.11" 25 | -------------------------------------------------------------------------------- /ll/src/data.rs: -------------------------------------------------------------------------------- 1 | use crate::level::Level; 2 | use std::collections::{BTreeMap, BTreeSet}; 3 | 4 | #[derive(Debug, Clone, Default)] 5 | pub struct Data { 6 | pub map: BTreeMap, 7 | } 8 | 9 | impl Data { 10 | pub fn empty() -> Self { 11 | Self { 12 | map: BTreeMap::new(), 13 | } 14 | } 15 | } 16 | 17 | impl Data { 18 | pub fn add, V: Into>(&mut self, key: S, value: V) { 19 | let (key, tags) = crate::utils::extract_tags(key.into()); 20 | let data_entry = DataEntry(value.into(), tags); 21 | self.map.insert(key, data_entry); 22 | } 23 | 24 | pub fn merge(&mut self, other: &Data) { 25 | for (k, v) in &other.map { 26 | self.map.insert(k.clone(), v.clone()); 27 | } 28 | } 29 | 30 | pub fn is_empty(&self) -> bool { 31 | self.map.is_empty() 32 | } 33 | 34 | // Filter out data entries that are not supposed to be logger for 35 | // the set log level, based on event tags. 36 | // e.g. if the event is `some_event#trace` and current level is Info, 37 | // we would not want to log it. 38 | pub fn filter_for_level(&mut self, level: Level) { 39 | let mut to_remove = vec![]; 40 | for (key, entry) in &self.map { 41 | let entry_log_level = crate::utils::extract_log_level_from_tags(&entry.1); 42 | 43 | if let Some(entry_log_level) = entry_log_level { 44 | if entry_log_level > level { 45 | to_remove.push(key.clone()); 46 | } 47 | } 48 | } 49 | 50 | // as of right now BTreeMap doesn't implement `.retain()`, so we'll 51 | // have to do it the old way 52 | for key_to_remove in &to_remove { 53 | self.map.remove(key_to_remove); 54 | } 55 | } 56 | } 57 | 58 | #[derive(Debug, Clone)] 59 | pub enum DataValue { 60 | String(String), 61 | Int(i64), 62 | Float(f64), 63 | None, 64 | } 65 | 66 | impl std::fmt::Display for Data { 67 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 68 | let mut result = String::new(); 69 | for (k, v) in &self.map { 70 | result.push_str(&format!(" {}: {}\n", k, v.0)); 71 | } 72 | write!(f, "{}", result) 73 | } 74 | } 75 | 76 | impl std::fmt::Display for DataValue { 77 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 78 | let result = match self { 79 | DataValue::String(string) => string.to_owned(), 80 | DataValue::Int(i) => format!("{}", i), 81 | DataValue::Float(f) => format!("{}", f), 82 | DataValue::None => String::new(), 83 | }; 84 | write!(f, "{}", result) 85 | } 86 | } 87 | 88 | #[derive(Debug, Clone)] 89 | pub struct DataEntry(pub DataValue, pub BTreeSet); 90 | 91 | impl<'a> From<&'a str> for DataValue { 92 | fn from(v: &'a str) -> Self { 93 | DataValue::String(v.to_owned()) 94 | } 95 | } 96 | 97 | impl<'a> From<&'a String> for DataValue { 98 | fn from(v: &'a String) -> Self { 99 | DataValue::String(v.clone()) 100 | } 101 | } 102 | 103 | impl From for DataValue { 104 | fn from(v: String) -> Self { 105 | DataValue::String(v) 106 | } 107 | } 108 | 109 | impl From for DataValue { 110 | fn from(v: bool) -> Self { 111 | DataValue::String(format!("{}", v)) 112 | } 113 | } 114 | 115 | impl From for DataValue { 116 | fn from(value: f64) -> Self { 117 | DataValue::Float(value) 118 | } 119 | } 120 | 121 | impl From> for DataValue { 122 | fn from(maybe_value: Option) -> Self { 123 | match maybe_value { 124 | Some(v) => v.into(), 125 | None => DataValue::None, 126 | } 127 | } 128 | } 129 | 130 | impl From<&Option> for DataValue { 131 | fn from(maybe_value: &Option) -> Self { 132 | match maybe_value { 133 | Some(v) => v.into(), 134 | None => DataValue::None, 135 | } 136 | } 137 | } 138 | 139 | macro_rules! from_int_types { 140 | ( $( $t:ty ),* ) => { 141 | $( 142 | impl From<$t> for DataValue { 143 | fn from(value: $t) -> Self { 144 | DataValue::Int(value as i64) 145 | } 146 | } 147 | )* 148 | }; 149 | } 150 | 151 | from_int_types!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize); 152 | -------------------------------------------------------------------------------- /ll/src/level.rs: -------------------------------------------------------------------------------- 1 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] 2 | pub enum Level { 3 | Info = 1, 4 | Debug = 2, 5 | Trace = 3, 6 | } 7 | 8 | #[cfg(test)] 9 | mod test { 10 | use super::*; 11 | 12 | #[test] 13 | fn ordering() { 14 | let mut levels = vec![ 15 | Level::Trace, 16 | Level::Debug, 17 | Level::Info, 18 | Level::Debug, 19 | Level::Trace, 20 | ]; 21 | levels.sort(); 22 | 23 | assert_eq!( 24 | levels, 25 | vec![ 26 | Level::Info, 27 | Level::Debug, 28 | Level::Debug, 29 | Level::Trace, 30 | Level::Trace 31 | ] 32 | ); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /ll/src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | # ll - Logging Library 3 | 4 | **ll** is a lightweight logging library. Its main focus is to provide the ability 5 | to manually instrument portions of code to track and log its execution. 6 | 7 | Instrumentation of the code is done by wrapping parts of code into `Tasks`. 8 | Tasks emit a `start` event when the task is started and `end` event when it's finished. 9 | 10 | These events are consumed by `Reporters`. Multiple reporters can be used at the same time 11 | and they will each receive task events. Different reporters can report/log task events to 12 | different systems/sources, e.g. print them to STDOUT, write to a database, file or 13 | third-party system. 14 | 15 | Tasks are organized in a task tree. Each task can spawn multiple subtasks and there's always 16 | parent-child relationship between them. 17 | TaskTree is the main struct that holds configuration for how to spawn/log/report tasks. 18 | 19 | Example 20 | 21 | ``` 22 | use ll::Task; 23 | 24 | async fn do_something() { 25 | ll::reporters::term_status::show(); 26 | 27 | let root_task = Task::create_new("root_task"); 28 | root_task.spawn("subtask_1", |task| async move { 29 | task.spawn_sync("subtask_2", |task| { 30 | // do other stuff 31 | Ok(()) 32 | })?; 33 | Ok(()) 34 | }).await.unwrap(); 35 | } 36 | ``` 37 | 38 | */ 39 | #![allow(clippy::new_without_default)] 40 | 41 | pub mod data; 42 | pub mod level; 43 | pub mod task; 44 | pub mod task_tree; 45 | pub mod uniq_id; 46 | pub mod utils; 47 | 48 | pub use task::Task; 49 | 50 | pub mod reporters; 51 | pub use task_tree::add_reporter; 52 | 53 | #[cfg(test)] 54 | mod tests; 55 | 56 | pub use data::{Data, DataEntry, DataValue}; 57 | pub use reporters::term_status::TermStatus; 58 | pub use reporters::text::StdioReporter; 59 | pub use reporters::text::StringReporter; 60 | pub use task_tree::ErrorFormatter; 61 | pub use task_tree::TaskInternal; 62 | pub use task_tree::TaskTree; 63 | -------------------------------------------------------------------------------- /ll/src/main.rs: -------------------------------------------------------------------------------- 1 | use ll::reporters::Level; 2 | use ll::Task; 3 | use std::sync::Arc; 4 | 5 | const FAIL_SOME: bool = false; 6 | 7 | #[tokio::main] 8 | async fn main() { 9 | let mut reporter = ll::reporters::StdioReporter::new(); 10 | reporter.log_task_start = true; 11 | // reporter.timestamp_format = Some(ll::reporters::text::TimestampFormat::Local); 12 | reporter.max_log_level = Level::L1; 13 | ll::add_reporter(Arc::new(reporter)); 14 | let root_task = Task::create_new("root #nostatus #l0"); 15 | ll::reporters::term_status::show(); 16 | ll::task_tree::TASK_TREE.set_force_flush(true); 17 | 18 | root_task 19 | .spawn("will_finish_fast #l3", |task| async move { 20 | tokio::spawn(async move { 21 | tokio::time::sleep(tokio::time::Duration::from_millis(6000)).await; 22 | task.spawn("will_spawn_after_parent_is_done", |_| async move { 23 | tokio::time::sleep(tokio::time::Duration::from_millis(10000)).await; 24 | 25 | Ok(()) 26 | }) 27 | .await 28 | .ok(); 29 | }); 30 | Ok(()) 31 | }) 32 | .await 33 | .ok(); 34 | 35 | Task::create_new("root2"); 36 | root_task 37 | .spawn("task_1 #randomtag", |task| async move { 38 | tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 39 | let t_clone = task.clone(); 40 | 41 | tokio::spawn(async move { 42 | t_clone 43 | .spawn("detached_async_task", |task| async move { 44 | for i in 0..=1000 { 45 | task.progress(i, 1000); 46 | tokio::time::sleep(tokio::time::Duration::from_millis(8)).await; 47 | } 48 | Ok(()) 49 | }) 50 | .await 51 | .ok(); 52 | }); 53 | 54 | let (a, b) = tokio::join!( 55 | task.spawn("task_2 #l3", |task| async move { 56 | task.data("hey", 1); 57 | task.data("yo", "sup"); 58 | task.data("dontprint #dontprint", 4); 59 | 60 | task.create("task_2.5"); 61 | task.create("won't be printed #dontprint"); 62 | 63 | tokio::time::sleep(tokio::time::Duration::from_millis(11000)).await; 64 | Ok(()) 65 | }), 66 | task.spawn("task_3", |task| async move { 67 | for i in 10..=99 { 68 | let mut result = vec![]; 69 | let s = format!("{} {} <<<>>>", i, "-".repeat(i % 10)); 70 | result.push(s); 71 | // println!("{}", result.join("\n")); 72 | } 73 | 74 | tokio::time::sleep(tokio::time::Duration::from_millis(2750)).await; 75 | task.data_transitive("transitive", 555); 76 | 77 | task.spawn("task_4", |task| async move { 78 | task.spawn("will_error", |task| async move { 79 | task.spawn_sync("hello", |_task| Ok(()))?; 80 | tokio::spawn(async move { 81 | task.spawn("will run longer that parent", |_task| async move { 82 | tokio::time::sleep(tokio::time::Duration::from_millis(12000)) 83 | .await; 84 | Ok(()) 85 | }) 86 | .await 87 | }); 88 | 89 | if FAIL_SOME { 90 | anyhow::bail!("omg no i failed"); 91 | } 92 | #[allow(unreachable_code)] 93 | Ok(()) 94 | }) 95 | .await?; 96 | 97 | tokio::time::sleep(tokio::time::Duration::from_millis(3200)).await; 98 | Ok(()) 99 | }) 100 | .await?; 101 | tokio::time::sleep(tokio::time::Duration::from_millis(2750)).await; 102 | Ok(()) 103 | }), 104 | ); 105 | 106 | a.ok(); 107 | b?; 108 | tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; 109 | Ok(()) 110 | }) 111 | .await 112 | .map_err(|e| println!("{:?}", e)) 113 | .ok(); 114 | 115 | drop(root_task); 116 | tokio::time::sleep(tokio::time::Duration::from_millis(10000)).await; 117 | } 118 | -------------------------------------------------------------------------------- /ll/src/reporters/level.rs: -------------------------------------------------------------------------------- 1 | use std::default::Default; 2 | 3 | /// Logging levers, by default all tasks log as L1, but can be changed to 4 | /// l0, l2, l3 by using #l0 #l2 #l3 tags in the task name. 5 | /// Reporters can be set to ignore anything up from a certain level. 6 | #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] 7 | pub enum Level { 8 | L0, 9 | L1, 10 | L2, 11 | L3, 12 | } 13 | 14 | impl Default for Level { 15 | fn default() -> Self { 16 | Self::L1 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /ll/src/reporters/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod level; 2 | pub mod term_status; 3 | pub mod text; 4 | pub mod utils; 5 | 6 | pub use level::Level; 7 | pub use term_status::TermStatus; 8 | pub use text::StdioReporter; 9 | pub use text::StringReporter; 10 | 11 | pub const DONTPRINT_TAG: &str = "dontprint"; 12 | 13 | use crate::task_tree::TaskInternal; 14 | use std::sync::Arc; 15 | 16 | pub trait Reporter: Send + Sync { 17 | fn task_start(&self, _task: Arc) {} 18 | fn task_end(&self, _task: Arc) {} 19 | fn task_progress(&self, _task: Arc) {} 20 | } 21 | -------------------------------------------------------------------------------- /ll/src/reporters/term_status.rs: -------------------------------------------------------------------------------- 1 | use super::Level; 2 | use crate::task_tree::{TaskInternal, TaskResult, TaskStatus, TaskTree, TASK_TREE}; 3 | use crate::uniq_id::UniqID; 4 | use anyhow::{Context, Result}; 5 | use colored::Colorize; 6 | use crossterm::{cursor, style, terminal}; 7 | use std::io::Write; 8 | use std::sync::Arc; 9 | use std::sync::RwLock; 10 | 11 | const NOSTATUS_TAG: &str = "nostatus"; 12 | 13 | lazy_static::lazy_static! { 14 | pub static ref TERM_STATUS: TermStatus = TermStatus::new(TASK_TREE.clone()); 15 | } 16 | 17 | pub fn show() { 18 | // Only enable it if it's a TTY terminal, otherwise output 19 | // can get super messy. 20 | if crossterm::tty::IsTty::is_tty(&std::io::stderr()) { 21 | TERM_STATUS.show(); 22 | } 23 | } 24 | 25 | pub fn hide() { 26 | TERM_STATUS.hide(); 27 | } 28 | 29 | #[derive(Clone)] 30 | pub struct TermStatus(Arc>); 31 | 32 | impl TermStatus { 33 | fn new(task_tree: Arc) -> Self { 34 | Self(Arc::new(RwLock::new(TermStatusInternal::new(task_tree)))) 35 | } 36 | 37 | pub fn show(&self) { 38 | let mut lock = self.0.write().unwrap(); 39 | if lock.enabled { 40 | return; 41 | } else { 42 | lock.enabled = true; 43 | } 44 | drop(lock); 45 | 46 | let t = self.clone(); 47 | std::thread::spawn(move || { 48 | loop { 49 | // This is dumb, but it lets regular `println!` macros and such 50 | // time to acquire a global mutex to print whatever they want to 51 | // print. Without it this fuction will release and acquire the 52 | // lock right away without letting anything print at all. 53 | std::thread::sleep(std::time::Duration::from_millis(1)); 54 | let stdout = std::io::stdout(); 55 | let stderr = std::io::stderr(); 56 | 57 | // Get both locks for stdout and stderr so nothing can print to 58 | // it while the status tree is displayed. If something prints 59 | // while the tree si there everything will get messed up, output 60 | // will be lost and parts of tree will end up as random noise. 61 | let stdout_lock = stdout.lock(); 62 | let mut stderr_lock = stderr.lock(); 63 | 64 | let mut internal = t.0.write().unwrap(); 65 | if internal.enabled { 66 | internal.print(&mut stderr_lock).ok(); 67 | } else { 68 | break; 69 | } 70 | // STDIO is locked the whole time. 71 | // WARN: If there a heavy IO 72 | // happening this will obviously slow things down quite a bit. 73 | std::thread::sleep(std::time::Duration::from_millis(50)); 74 | 75 | if internal.enabled { 76 | internal.clear(&mut stderr_lock).ok(); 77 | } else { 78 | break; 79 | } 80 | 81 | drop(stdout_lock); 82 | drop(stderr_lock); 83 | } 84 | }); 85 | } 86 | 87 | pub fn hide(&self) { 88 | self.0.write().unwrap().enabled = false; 89 | } 90 | } 91 | 92 | /* 93 | Vec of indentations. Bool represents whether a vertical line needs to be 94 | at every point of the indentation, e.g. 95 | 96 | [▶] Root Task 97 | │ 98 | ├ [✓] Task 1 99 | │ ╰ [▶] Task 3 <-- vec[true, true] has line 100 | ╰ [✓] Task 1 101 | ╰ [⨯] Failed task <-- vec[false, true] no line 102 | */ 103 | type Depth = Vec; 104 | 105 | #[derive(Clone)] 106 | pub struct TermStatusInternal { 107 | current_height: usize, 108 | task_tree: Arc, 109 | pub max_log_level: Level, 110 | enabled: bool, 111 | } 112 | 113 | impl TermStatusInternal { 114 | fn new(task_tree: Arc) -> Self { 115 | Self { 116 | current_height: 0, 117 | task_tree, 118 | max_log_level: Level::default(), 119 | enabled: false, 120 | } 121 | } 122 | 123 | fn print(&mut self, stdio: &mut impl Write) -> Result<()> { 124 | let rows = self.make_status_rows()?; 125 | 126 | let height = rows.len(); 127 | 128 | if let (0, 0) = (height, self.current_height) { 129 | return Ok(()); 130 | } 131 | 132 | self.current_height = height; 133 | 134 | crossterm::execute!(stdio, style::Print("\n")).ok(); 135 | crossterm::execute!(stdio, style::Print(rows.join("\n"))).ok(); 136 | crossterm::execute!(stdio, style::Print("\n")).ok(); 137 | 138 | Ok(()) 139 | } 140 | 141 | fn make_status_rows(&self) -> Result> { 142 | let tree = self.task_tree.tree_internal.read().unwrap(); 143 | let child_to_parents = tree.child_to_parents(); 144 | let parent_to_children = tree.parent_to_children(); 145 | 146 | let mut stack: Vec<(UniqID, Depth)> = tree 147 | .root_tasks() 148 | .iter() 149 | .filter(|id| !child_to_parents.contains_key(id)) 150 | .map(|id| (*id, vec![])) 151 | .collect(); 152 | 153 | let mut rows = vec![]; 154 | while let Some((id, depth)) = stack.pop() { 155 | let task = tree.get_task(id).context("must be present")?; 156 | 157 | let dontprint = !self.should_print(task); 158 | 159 | let children_iter = parent_to_children.get(&id).into_iter().flatten().peekable(); 160 | let mut append_to_stack = vec![]; 161 | 162 | let last_visible_child = children_iter 163 | .clone() 164 | .filter(|id| tree.get_task(**id).map_or(false, |t| self.should_print(t))) 165 | .last(); 166 | 167 | // we still need to DFS the ones that we don't print to make sure 168 | // we're not skipping their children 169 | for subtask_id in children_iter { 170 | let mut new_depth = depth.clone(); 171 | // If we're not printing it, we're not adding the indent either 172 | // so this tasks children will become children of the parent task 173 | if !dontprint { 174 | new_depth.push(Some(subtask_id) != last_visible_child); 175 | } 176 | append_to_stack.push((*subtask_id, new_depth)); 177 | } 178 | 179 | // Since we're popping, we'll be going through children in reverse order, 180 | // so we need to counter that. 181 | append_to_stack.reverse(); 182 | stack.append(&mut append_to_stack); 183 | 184 | if !dontprint { 185 | rows.push(self.task_row(task, depth)?); 186 | } 187 | } 188 | 189 | let (_, term_height) = crossterm::terminal::size().unwrap_or((50, 50)); 190 | let max_height = term_height as usize - 2; 191 | 192 | if rows.len() > max_height { 193 | let trimmed = rows.len() - max_height; 194 | rows = rows.into_iter().take(max_height).collect(); 195 | rows.push(format!(".......{} more tasks.......", trimmed)) 196 | } 197 | 198 | Ok(rows) 199 | } 200 | 201 | fn should_print(&self, task: &TaskInternal) -> bool { 202 | let level = super::utils::parse_level(task); 203 | !task.tags.contains(NOSTATUS_TAG) && (level <= self.max_log_level) 204 | } 205 | 206 | fn task_row(&self, task_internal: &TaskInternal, mut depth: Depth) -> Result { 207 | /* 208 | 209 | [▶] Root Task 210 | │ 211 | ├ [✓] Task 1 212 | │ ╰ [▶] Task 3 213 | ├ [✓] Task 1 214 | ╰ [⨯] Failed task 215 | */ 216 | 217 | let indent = if let Some(last_indent) = depth.pop() { 218 | // Worst case utf8 symbol pre level is 4 bytes 219 | let mut indent = String::with_capacity(4 * depth.len()); 220 | for has_vertical_line in depth.into_iter() { 221 | if has_vertical_line { 222 | indent.push_str("│ "); 223 | } else { 224 | indent.push_str(" "); 225 | } 226 | } 227 | 228 | if last_indent { 229 | indent.push_str("├ "); 230 | } else { 231 | indent.push_str("╰ "); 232 | } 233 | 234 | indent 235 | } else { 236 | String::new() 237 | }; 238 | 239 | let status = match task_internal.status { 240 | TaskStatus::Running => " ▶ ".black().on_yellow(), 241 | TaskStatus::Finished(TaskResult::Success, _) => " ✓ ".black().on_green(), 242 | TaskStatus::Finished(TaskResult::Failure(_), _) => " x ".white().on_red(), 243 | }; 244 | 245 | let progress = make_progress(task_internal); 246 | 247 | let duration = match task_internal.status { 248 | TaskStatus::Finished(_, finished_at) => { 249 | finished_at.duration_since(task_internal.started_at) 250 | } 251 | _ => task_internal.started_at.elapsed(), 252 | }?; 253 | 254 | let secs = duration.as_secs(); 255 | let millis = (duration.as_millis() % 1000) / 100; 256 | let ts = format!(" [{}.{}s] ", secs, millis).dimmed(); 257 | 258 | Ok(format!( 259 | "{}{}{}{}{}", 260 | indent, status, ts, progress, task_internal.name 261 | )) 262 | } 263 | 264 | fn clear(&self, stdio: &mut impl Write) -> Result<()> { 265 | if self.current_height != 0 { 266 | for _ in 0..(self.current_height + 1) { 267 | crossterm::execute!(stdio, terminal::Clear(terminal::ClearType::CurrentLine)).ok(); 268 | crossterm::execute!(stdio, cursor::MoveUp(1)).ok(); 269 | } 270 | } 271 | 272 | Ok(()) 273 | } 274 | } 275 | 276 | fn make_progress(task: &TaskInternal) -> String { 277 | const PROGRESS_BAR_LEN: i64 = 30; 278 | 279 | if let Some((done, total)) = &task.progress { 280 | if *total == 0 { 281 | // otherwise we'll divide by 0 and it'll panic 282 | return String::new(); 283 | } 284 | let pct_done = (done * 100) / total; 285 | let done_blocks_len = std::cmp::min((PROGRESS_BAR_LEN * pct_done) / 100, PROGRESS_BAR_LEN); 286 | let todo_blocks_len = PROGRESS_BAR_LEN - done_blocks_len; 287 | let done_blocks = " ".repeat(done_blocks_len as usize).on_bright_green(); 288 | let todo_blocks = ".".repeat(todo_blocks_len as usize).on_black(); 289 | format!(" [{}{}] {}/{} ", done_blocks, todo_blocks, done, total) 290 | } else { 291 | String::new() 292 | } 293 | } 294 | -------------------------------------------------------------------------------- /ll/src/reporters/text.rs: -------------------------------------------------------------------------------- 1 | use super::Level; 2 | use super::DONTPRINT_TAG; 3 | use crate::task_tree::{TaskInternal, TaskResult, TaskStatus}; 4 | use chrono::prelude::*; 5 | use chrono::{DateTime, Local, Utc}; 6 | use colored::*; 7 | use std::sync::{Arc, Mutex, RwLock}; 8 | 9 | use super::Reporter; 10 | 11 | /// Simple drain that logs everything into STDOUT 12 | pub struct StdioReporter { 13 | pub timestamp_format: Option, 14 | /// By default this reporter writes to STDERR, 15 | /// this flag will make it write to STDOUT instead 16 | pub use_stdout: bool, 17 | /// Report every time a new task is started as well, not only when tasks are 18 | /// finished 19 | pub log_task_start: bool, 20 | pub max_log_level: Level, 21 | } 22 | 23 | // Similar to STDOUT drain, but instead logs everything into a string 24 | // that it owns that can later be inspected/dumped. 25 | #[derive(Clone)] 26 | pub struct StringReporter { 27 | pub output: Arc>, 28 | timestamp_format: Arc>, 29 | duration_format: Arc>, 30 | strip_ansi: bool, 31 | } 32 | 33 | #[derive(Clone, Copy)] 34 | pub enum TaskReportType { 35 | Start, 36 | End, 37 | } 38 | 39 | impl StdioReporter { 40 | pub fn new() -> Self { 41 | Self { 42 | timestamp_format: None, 43 | use_stdout: false, 44 | log_task_start: false, 45 | max_log_level: Level::default(), 46 | } 47 | } 48 | 49 | fn report(&self, task_internal: Arc, report_type: TaskReportType) { 50 | let level = super::utils::parse_level(&task_internal); 51 | 52 | if level <= self.max_log_level { 53 | if task_internal.tags.contains(DONTPRINT_TAG) { 54 | return; 55 | } 56 | 57 | let timestamp_format = self.timestamp_format.unwrap_or(TimestampFormat::UTC); 58 | let result = make_string( 59 | &task_internal, 60 | timestamp_format, 61 | DurationFormat::Milliseconds, 62 | report_type, 63 | ); 64 | 65 | if self.use_stdout { 66 | println!("{}", result); 67 | } else { 68 | eprintln!("{}", result); 69 | } 70 | } 71 | } 72 | } 73 | 74 | #[derive(Clone, Copy)] 75 | #[allow(clippy::upper_case_acronyms)] 76 | pub enum TimestampFormat { 77 | UTC, 78 | Local, 79 | None, 80 | Redacted, 81 | } 82 | 83 | #[derive(Clone, Copy)] 84 | pub enum DurationFormat { 85 | Milliseconds, 86 | None, 87 | } 88 | 89 | impl Reporter for StdioReporter { 90 | fn task_start(&self, task_internal: Arc) { 91 | if self.log_task_start { 92 | self.report(task_internal, TaskReportType::Start) 93 | } 94 | } 95 | 96 | fn task_end(&self, task_internal: Arc) { 97 | self.report(task_internal, TaskReportType::End) 98 | } 99 | } 100 | 101 | pub fn strip_ansi(s: &str) -> String { 102 | String::from_utf8( 103 | strip_ansi_escapes::strip(s).expect("Cant strip ANSI escape characters from a string"), 104 | ) 105 | .expect("not a utf8 string") 106 | } 107 | 108 | impl StringReporter { 109 | pub fn new() -> Self { 110 | Self { 111 | output: Arc::new(Mutex::new(String::new())), 112 | timestamp_format: Arc::new(RwLock::new(TimestampFormat::Redacted)), 113 | duration_format: Arc::new(RwLock::new(DurationFormat::None)), 114 | strip_ansi: true, 115 | } 116 | } 117 | 118 | fn report(&self, task_internal: Arc, report_type: TaskReportType) { 119 | if task_internal.tags.contains(DONTPRINT_TAG) { 120 | return; 121 | } 122 | let timestamp_format = *self.timestamp_format.read().unwrap(); 123 | let duration_format = *self.duration_format.read().unwrap(); 124 | let mut result = make_string( 125 | &task_internal, 126 | timestamp_format, 127 | duration_format, 128 | report_type, 129 | ); 130 | if self.strip_ansi { 131 | result = strip_ansi(&result); 132 | } 133 | let mut output = self.output.lock().expect("poisoned lock"); 134 | output.push_str(&result); 135 | output.push('\n'); 136 | } 137 | 138 | pub fn set_timestamp_format(&self, format: TimestampFormat) { 139 | *self.timestamp_format.write().unwrap() = format; 140 | } 141 | 142 | pub fn log_duration(&self, enabled: bool) { 143 | *self.duration_format.write().unwrap() = if enabled { 144 | DurationFormat::Milliseconds 145 | } else { 146 | DurationFormat::None 147 | }; 148 | } 149 | } 150 | 151 | impl Reporter for StringReporter { 152 | fn task_start(&self, task_internal: Arc) { 153 | self.report(task_internal, TaskReportType::Start); 154 | } 155 | 156 | fn task_end(&self, task_internal: Arc) { 157 | self.report(task_internal, TaskReportType::End); 158 | } 159 | } 160 | 161 | impl std::fmt::Display for StringReporter { 162 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 163 | let s = self.output.lock().expect("poisoned lock"); 164 | write!(f, "{}", &s) 165 | } 166 | } 167 | 168 | pub fn make_string( 169 | task_internal: &TaskInternal, 170 | timestamp_format: TimestampFormat, 171 | duration_format: DurationFormat, 172 | report_type: TaskReportType, 173 | ) -> String { 174 | let timestamp = format_timestamp(timestamp_format, task_internal, report_type); 175 | let status = format_status(task_internal, duration_format, report_type); 176 | let name = format_name(task_internal, report_type); 177 | let (mut data, error) = if let TaskReportType::End = report_type { 178 | (format_data(task_internal), format_error(task_internal)) 179 | } else { 180 | (String::new(), String::new()) 181 | }; 182 | 183 | if !data.is_empty() && task_internal.hide_errors.is_some() { 184 | data = format!("{}\n", data); 185 | } 186 | 187 | let result = format!("{}{}{}{}{}", timestamp, status, name, data, error); 188 | 189 | result 190 | } 191 | 192 | fn format_timestamp( 193 | timestamp_format: TimestampFormat, 194 | task_internal: &TaskInternal, 195 | report_type: TaskReportType, 196 | ) -> String { 197 | let datetime: Option> = match report_type { 198 | TaskReportType::Start => Some(task_internal.started_at.into()), 199 | TaskReportType::End => { 200 | if let TaskStatus::Finished(_, at) = task_internal.status { 201 | Some(at.into()) 202 | } else { 203 | None 204 | } 205 | } 206 | }; 207 | 208 | match timestamp_format { 209 | TimestampFormat::None => String::new(), 210 | TimestampFormat::Redacted => "[ ] ".to_string(), // for testing 211 | TimestampFormat::Local => { 212 | if let Some(datetime) = datetime { 213 | let datetime: DateTime = datetime.into(); 214 | let rounded = datetime.round_subsecs(0); 215 | let formatted = rounded.format("%I:%M:%S%p"); 216 | format!("[{}] ", formatted).dimmed().to_string() 217 | } else { 218 | "[ ]".to_string() 219 | } 220 | } 221 | TimestampFormat::UTC => { 222 | if let Some(datetime) = datetime { 223 | let rounded = datetime.round_subsecs(0); 224 | format!("[{:?}] ", rounded).dimmed().to_string() 225 | } else { 226 | "[ ]".to_string() 227 | } 228 | } 229 | } 230 | } 231 | 232 | fn format_name(task_internal: &TaskInternal, report_type: TaskReportType) -> ColoredString { 233 | match (&task_internal.status, report_type) { 234 | (TaskStatus::Finished(TaskResult::Failure(_), _), _) => { 235 | format!("[ERR] {}", task_internal.full_name()).red() 236 | } 237 | (_, TaskReportType::Start) => task_internal.full_name().yellow(), 238 | (_, TaskReportType::End) => task_internal.full_name().green(), 239 | } 240 | } 241 | 242 | fn format_status( 243 | task_internal: &TaskInternal, 244 | format: DurationFormat, 245 | report_type: TaskReportType, 246 | ) -> String { 247 | match report_type { 248 | TaskReportType::Start => format!("| {} | ", "STARTING".yellow()), 249 | // If it's the end of the task, we'll print a timestamp 250 | TaskReportType::End => { 251 | if let TaskStatus::Finished(_, finished_at) = task_internal.status { 252 | let d = finished_at.duration_since(task_internal.started_at).ok(); 253 | match (d, format) { 254 | (Some(d), DurationFormat::Milliseconds) => { 255 | format!("| {:>6}ms | ", d.as_millis()) 256 | .bold() 257 | .dimmed() 258 | .to_string() 259 | } 260 | (Some(_), DurationFormat::None) => String::new(), 261 | (None, _) => String::new(), 262 | } 263 | } else { 264 | String::new() 265 | } 266 | } 267 | } 268 | } 269 | 270 | fn format_data(task_internal: &TaskInternal) -> String { 271 | let mut result = String::new(); 272 | let mut data = vec![]; 273 | for (k, entry) in task_internal.all_data() { 274 | if entry.1.contains(DONTPRINT_TAG) { 275 | continue; 276 | } 277 | 278 | data.push(format!(" | {}: {}", k, entry.0).dimmed().to_string()); 279 | } 280 | 281 | if !data.is_empty() { 282 | result.push('\n'); 283 | result.push_str(&data.join("\n")); 284 | } 285 | result 286 | } 287 | 288 | fn format_error(task_internal: &TaskInternal) -> String { 289 | let mut result = String::new(); 290 | if let TaskStatus::Finished(TaskResult::Failure(error_msg), _) = &task_internal.status { 291 | if let Some(msg) = &task_internal.hide_errors { 292 | return msg.dimmed().red().to_string(); 293 | } 294 | result.push_str("\n |\n"); 295 | let error_log = error_msg 296 | .split('\n') 297 | .map(|line| format!(" | {}", line)) 298 | .collect::>() 299 | .join("\n"); 300 | result.push_str(&error_log); 301 | } 302 | result 303 | } 304 | -------------------------------------------------------------------------------- /ll/src/reporters/utils.rs: -------------------------------------------------------------------------------- 1 | use super::Level; 2 | use crate::TaskInternal; 3 | 4 | pub fn parse_level(task_internal: &TaskInternal) -> Level { 5 | let mut all_level_tags = vec![]; 6 | for tag in &task_internal.tags { 7 | match tag.as_str() { 8 | "l0" => all_level_tags.push(Level::L0), 9 | "l1" => all_level_tags.push(Level::L1), 10 | "l2" => all_level_tags.push(Level::L2), 11 | "l3" => all_level_tags.push(Level::L3), 12 | _ => (), 13 | } 14 | } 15 | 16 | all_level_tags.into_iter().min().unwrap_or(Level::L1) 17 | } 18 | -------------------------------------------------------------------------------- /ll/src/task.rs: -------------------------------------------------------------------------------- 1 | use crate::data::DataValue; 2 | use crate::task_tree::{TaskTree, TASK_TREE}; 3 | use crate::uniq_id::UniqID; 4 | use anyhow::Result; 5 | use std::future::Future; 6 | use std::sync::Arc; 7 | 8 | pub type MarkDoneOnDrop = bool; 9 | 10 | #[derive(Clone)] 11 | pub struct Task(pub(crate) Arc); 12 | 13 | pub(crate) struct TaskData { 14 | pub(crate) id: UniqID, 15 | pub(crate) task_tree: Arc, 16 | pub(crate) mark_done_on_drop: MarkDoneOnDrop, 17 | } 18 | 19 | impl Task { 20 | pub fn create_new(name: &str) -> Self { 21 | let id = TASK_TREE.create_task_internal(name, None); 22 | Self(Arc::new(TaskData { 23 | id, 24 | task_tree: TASK_TREE.clone(), 25 | mark_done_on_drop: true, 26 | })) 27 | } 28 | 29 | pub fn create(&self, name: &str) -> Self { 30 | let id = self.0.task_tree.create_task_internal(name, Some(self.0.id)); 31 | Self(Arc::new(TaskData { 32 | id, 33 | task_tree: self.0.task_tree.clone(), 34 | mark_done_on_drop: true, 35 | })) 36 | } 37 | 38 | /// Spawn a new top level task, with no parent. 39 | /// This should usually be done in the very beginning of 40 | /// the process/application. 41 | pub async fn spawn_new(name: &str, f: F) -> Result 42 | where 43 | F: FnOnce(Task) -> FT, 44 | FT: Future> + Send, 45 | T: Send, 46 | { 47 | TASK_TREE.spawn(name.into(), f, None).await 48 | } 49 | 50 | pub async fn spawn>(&self, name: S, f: F) -> Result 51 | where 52 | F: FnOnce(Task) -> FT, 53 | FT: Future> + Send, 54 | T: Send, 55 | { 56 | self.0 57 | .task_tree 58 | .spawn(name.into(), f, Some(self.0.id)) 59 | .await 60 | } 61 | 62 | pub fn spawn_sync>(&self, name: S, f: F) -> Result 63 | where 64 | F: FnOnce(Task) -> Result, 65 | T: Send, 66 | { 67 | self.0.task_tree.spawn_sync(name.into(), f, Some(self.0.id)) 68 | } 69 | 70 | pub fn data>(&self, name: &str, data: D) { 71 | self.0.task_tree.add_data(self.0.id, name, data); 72 | } 73 | 74 | /// Get a piece of previously set data or transitive data. This can be 75 | /// useful if session/request tracking IDs need to be past to other loggers, 76 | /// e.g. when shelling out to another process that needs to set the same 77 | /// `session_id` inside so we can group the events together. 78 | pub fn get_data(&self, name: &str) -> Option { 79 | self.0.task_tree.get_data(self.0.id, name) 80 | } 81 | 82 | pub fn data_transitive>(&self, name: &str, data: D) { 83 | self.0 84 | .task_tree 85 | .add_data_transitive_for_task(self.0.id, name, data); 86 | } 87 | 88 | pub fn progress(&self, done: i64, total: i64) { 89 | self.0.task_tree.task_progress(self.0.id, done, total); 90 | } 91 | 92 | /// Reporters can use this flag to choose to not report errors. 93 | /// This is useful for cases where there's a large task chain and every 94 | /// single task reports a partial errors (that gets built up with each task) 95 | /// It would make sense to report it only once at the top level (thrift 96 | /// request, cli call, etc) and only mark other tasks. 97 | /// If set to Some, the message inside is what would be reported by default 98 | /// instead of reporting errors to avoid confusion (e.g. "error was hidden, 99 | /// see ...") 100 | /// see [hide_errors_default_msg()](crate::task_tree::TaskTree::hide_errors_default_msg) 101 | pub fn hide_error_msg(&self, msg: Option) { 102 | let msg = msg.map(Arc::new); 103 | self.0.task_tree.hide_error_msg_for_task(self.0.id, msg); 104 | } 105 | 106 | /// When errors occur, we attach task data to it in the description. 107 | /// If set to false, only task direct data will be attached and not 108 | /// transitive data. This is useful sometimes to remove the noise of 109 | /// transitive data appearing in every error in the chain (e.g. hostname) 110 | /// see [attach_transitive_data_to_errors_default()](crate::task_tree::TaskTree::attach_transitive_data_to_errors_default) 111 | pub fn attach_transitive_data_to_errors(&self, val: bool) { 112 | self.0 113 | .task_tree 114 | .attach_transitive_data_to_errors_for_task(self.0.id, val); 115 | } 116 | } 117 | 118 | impl Drop for TaskData { 119 | fn drop(&mut self) { 120 | if self.mark_done_on_drop { 121 | self.task_tree.mark_done(self.id, None); 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /ll/src/task_tree.rs: -------------------------------------------------------------------------------- 1 | use crate::data::{Data, DataEntry, DataValue}; 2 | use crate::reporters::Reporter; 3 | use crate::task::{Task, TaskData}; 4 | use crate::uniq_id::UniqID; 5 | use anyhow::{Context, Result}; 6 | use std::collections::{BTreeMap, BTreeSet, HashMap}; 7 | use std::future::Future; 8 | use std::sync::atomic::{AtomicBool, Ordering}; 9 | use std::sync::Arc; 10 | use std::sync::RwLock; 11 | use std::thread; 12 | use std::time::Duration; 13 | use std::time::SystemTime; 14 | 15 | lazy_static::lazy_static! { 16 | pub static ref TASK_TREE: Arc = TaskTree::new(); 17 | } 18 | 19 | pub fn add_reporter(reporter: Arc) { 20 | TASK_TREE.add_reporter(reporter); 21 | } 22 | 23 | pub trait ErrorFormatter: Send + Sync { 24 | fn format_error(&self, err: &anyhow::Error) -> String; 25 | } 26 | 27 | pub struct TaskTree { 28 | pub(crate) tree_internal: RwLock, 29 | /// If true, it will block the current thread until all task events are 30 | /// reported (e.g. written to STDOUT) 31 | force_flush: AtomicBool, 32 | } 33 | 34 | pub(crate) struct TaskTreeInternal { 35 | tasks_internal: BTreeMap, 36 | parent_to_children: BTreeMap>, 37 | child_to_parents: BTreeMap>, 38 | root_tasks: BTreeSet, 39 | reporters: Vec>, 40 | tasks_marked_for_deletion: HashMap, 41 | report_start: Vec, 42 | report_end: Vec, 43 | data_transitive: Data, 44 | remove_task_after_done_ms: u64, 45 | hide_errors_default_msg: Option>, 46 | attach_transitive_data_to_errors_default: bool, 47 | error_formatter: Option>, 48 | } 49 | 50 | #[derive(Clone)] 51 | pub struct TaskInternal { 52 | pub id: UniqID, 53 | pub name: String, 54 | pub parent_names: Vec, 55 | pub started_at: SystemTime, 56 | pub status: TaskStatus, 57 | pub data: Data, 58 | pub data_transitive: Data, 59 | pub tags: BTreeSet, 60 | /// optional tuple containing values indicating task progress, where 61 | /// first value is how many items finished and the second value is how many 62 | /// items there are total. E.g. if it's a task processing 10 pieces of work, 63 | /// (1, 10) would mean that 1 out of ten pieces is done. 64 | pub progress: Option<(i64, i64)>, 65 | pub hide_errors: Option>, 66 | pub attach_transitive_data_to_errors: bool, 67 | } 68 | 69 | #[derive(Clone)] 70 | pub enum TaskStatus { 71 | Running, 72 | Finished(TaskResult, SystemTime), 73 | } 74 | 75 | #[derive(Clone)] 76 | pub enum TaskResult { 77 | Success, 78 | Failure(String), 79 | } 80 | 81 | impl TaskTree { 82 | pub fn new() -> Arc { 83 | let s = Arc::new(Self { 84 | tree_internal: RwLock::new(TaskTreeInternal { 85 | tasks_internal: BTreeMap::new(), 86 | parent_to_children: BTreeMap::new(), 87 | child_to_parents: BTreeMap::new(), 88 | root_tasks: BTreeSet::new(), 89 | reporters: vec![], 90 | tasks_marked_for_deletion: HashMap::new(), 91 | report_start: vec![], 92 | report_end: vec![], 93 | data_transitive: Data::empty(), 94 | remove_task_after_done_ms: 0, 95 | hide_errors_default_msg: None, 96 | attach_transitive_data_to_errors_default: true, 97 | error_formatter: None, 98 | }), 99 | force_flush: AtomicBool::new(false), 100 | }); 101 | let clone = s.clone(); 102 | tokio::spawn(async move { 103 | loop { 104 | tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; 105 | let mut tree = clone.tree_internal.write().unwrap(); 106 | tree.garbage_collect(); 107 | } 108 | }); 109 | let clone = s.clone(); 110 | thread::spawn(move || loop { 111 | thread::sleep(std::time::Duration::from_millis(10)); 112 | clone.report_all(); 113 | }); 114 | 115 | s 116 | } 117 | 118 | pub fn set_force_flush(&self, enabled: bool) { 119 | self.force_flush.store(enabled, Ordering::SeqCst) 120 | } 121 | 122 | pub fn force_flush_enabled(&self) -> bool { 123 | self.force_flush.load(Ordering::SeqCst) 124 | } 125 | 126 | pub fn create_task(self: &Arc, name: &str) -> Task { 127 | let id = self.create_task_internal(name, None); 128 | Task(Arc::new(TaskData { 129 | id, 130 | task_tree: self.clone(), 131 | mark_done_on_drop: true, 132 | })) 133 | } 134 | 135 | pub fn add_reporter(&self, reporter: Arc) { 136 | self.tree_internal.write().unwrap().reporters.push(reporter); 137 | } 138 | 139 | fn pre_spawn(self: &Arc, name: String, parent: Option) -> Task { 140 | let task = Task(Arc::new(TaskData { 141 | id: self.create_task_internal(&name, parent), 142 | task_tree: self.clone(), 143 | mark_done_on_drop: false, 144 | })); 145 | self.maybe_force_flush(); 146 | task 147 | } 148 | 149 | fn post_spawn(self: &Arc, id: UniqID, result: Result) -> Result { 150 | let result = result.with_context(|| { 151 | let mut desc = String::from("[Task]"); 152 | if let Some(task_internal) = self.get_cloned_task(id) { 153 | desc.push_str(&format!(" {}", task_internal.name)); 154 | if task_internal.attach_transitive_data_to_errors { 155 | for (k, v) in task_internal.all_data() { 156 | desc.push_str(&format!("\n {}: {}", k, v.0)); 157 | } 158 | } else { 159 | for (k, v) in &task_internal.data.map { 160 | desc.push_str(&format!("\n {}: {}", k, v.0)); 161 | } 162 | }; 163 | if !desc.is_empty() { 164 | desc.push('\n'); 165 | } 166 | } 167 | desc 168 | }); 169 | let error_msg = if let Err(err) = &result { 170 | let formatter = { 171 | let formatter = self.tree_internal.read().unwrap().error_formatter.clone(); 172 | formatter 173 | }; 174 | if let Some(formatter) = formatter { 175 | Some(formatter.format_error(err)) 176 | } else { 177 | Some(format!("{:?}", err)) 178 | } 179 | } else { 180 | None 181 | }; 182 | self.mark_done(id, error_msg); 183 | self.maybe_force_flush(); 184 | result 185 | } 186 | 187 | pub fn spawn_sync( 188 | self: &Arc, 189 | name: String, 190 | f: F, 191 | parent: Option, 192 | ) -> Result 193 | where 194 | F: FnOnce(Task) -> Result, 195 | T: Send, 196 | { 197 | let task = self.pre_spawn(name, parent); 198 | let id = task.0.id; 199 | let result = f(task); 200 | self.post_spawn(id, result) 201 | } 202 | 203 | pub(crate) async fn spawn( 204 | self: &Arc, 205 | name: String, 206 | f: F, 207 | parent: Option, 208 | ) -> Result 209 | where 210 | F: FnOnce(Task) -> FT, 211 | FT: Future> + Send, 212 | T: Send, 213 | { 214 | let task = self.pre_spawn(name, parent); 215 | let id = task.0.id; 216 | let result = f(task).await; 217 | self.post_spawn(id, result) 218 | } 219 | 220 | pub fn create_task_internal>( 221 | self: &Arc, 222 | name: S, 223 | parent: Option, 224 | ) -> UniqID { 225 | let mut tree = self.tree_internal.write().unwrap(); 226 | 227 | let mut parent_names = vec![]; 228 | let mut data_transitive = tree.data_transitive.clone(); 229 | let (name, tags) = crate::utils::extract_tags(name.into()); 230 | let id = UniqID::new(); 231 | if let Some(parent_task) = parent.and_then(|pid| tree.tasks_internal.get(&pid)) { 232 | parent_names = parent_task.parent_names.clone(); 233 | parent_names.push(parent_task.name.clone()); 234 | data_transitive.merge(&parent_task.data_transitive); 235 | let parent_id = parent_task.id; 236 | 237 | tree.parent_to_children 238 | .entry(parent_id) 239 | .or_insert_with(BTreeSet::new) 240 | .insert(id); 241 | tree.child_to_parents 242 | .entry(id) 243 | .or_insert_with(BTreeSet::new) 244 | .insert(parent_id); 245 | } else { 246 | tree.root_tasks.insert(id); 247 | } 248 | 249 | let task_internal = TaskInternal { 250 | status: TaskStatus::Running, 251 | name, 252 | parent_names, 253 | id, 254 | started_at: SystemTime::now(), 255 | data: Data::empty(), 256 | data_transitive, 257 | tags, 258 | progress: None, 259 | hide_errors: tree.hide_errors_default_msg.clone(), 260 | attach_transitive_data_to_errors: tree.attach_transitive_data_to_errors_default, 261 | }; 262 | 263 | tree.tasks_internal.insert(id, task_internal); 264 | tree.report_start.push(id); 265 | 266 | id 267 | } 268 | 269 | pub fn mark_done(&self, id: UniqID, error_message: Option) { 270 | let mut tree = self.tree_internal.write().unwrap(); 271 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 272 | task_internal.mark_done(error_message); 273 | tree.mark_for_gc(id); 274 | tree.report_end.push(id); 275 | } 276 | } 277 | 278 | pub fn add_data, D: Into>(&self, id: UniqID, key: S, value: D) { 279 | let mut tree = self.tree_internal.write().unwrap(); 280 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 281 | task_internal.data.add(key, value); 282 | } 283 | } 284 | 285 | pub fn get_data>(&self, id: UniqID, key: S) -> Option { 286 | let mut tree = self.tree_internal.write().unwrap(); 287 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 288 | let all_data: BTreeMap<_, _> = task_internal.all_data().collect(); 289 | return all_data.get(&key.into()).map(|de| de.0.clone()); 290 | } 291 | None 292 | } 293 | 294 | pub(crate) fn add_data_transitive_for_task, D: Into>( 295 | &self, 296 | id: UniqID, 297 | key: S, 298 | value: D, 299 | ) { 300 | let mut tree = self.tree_internal.write().unwrap(); 301 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 302 | task_internal.data_transitive.add(key, value); 303 | } 304 | } 305 | /// Reporters can use this flag to choose to not report errors. 306 | /// This is useful for cases where there's a large task chain and every 307 | /// single task reports a partial errors (that gets built up with each task) 308 | /// It would make sense to report it only once at the top level (thrift 309 | /// request, cli call, etc) and only mark other tasks. 310 | /// If set to Some, the message inside is what would be reported by default 311 | /// instead of reporting errors to avoid confusion (e.g. "error was hidden, 312 | /// see ...") 313 | pub fn hide_errors_default_msg>(&self, msg: Option) { 314 | let mut tree = self.tree_internal.write().unwrap(); 315 | let msg = msg.map(|msg| Arc::new(msg.into())); 316 | tree.hide_errors_default_msg = msg; 317 | } 318 | 319 | pub(crate) fn hide_error_msg_for_task(&self, id: UniqID, msg: Option>) { 320 | let mut tree = self.tree_internal.write().unwrap(); 321 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 322 | task_internal.hide_errors = msg; 323 | } 324 | } 325 | 326 | /// When errors occur, we attach task data to it in the description. 327 | /// If set to false, only task direct data will be attached and not 328 | /// transitive data. This is useful sometimes to remove the noise of 329 | /// transitive data appearing in every error in the chain (e.g. hostname) 330 | pub fn attach_transitive_data_to_errors_default(&self, val: bool) { 331 | let mut tree = self.tree_internal.write().unwrap(); 332 | tree.attach_transitive_data_to_errors_default = val; 333 | } 334 | 335 | pub(crate) fn attach_transitive_data_to_errors_for_task(&self, id: UniqID, val: bool) { 336 | let mut tree = self.tree_internal.write().unwrap(); 337 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 338 | task_internal.attach_transitive_data_to_errors = val; 339 | } 340 | } 341 | 342 | /// Add a custom error formatter to change how error messages look in 343 | /// reporters. 344 | /// Unfortunately it is not configurable per reporter, because errors 345 | /// normally don't implement `Clone` and it will be almost impossible to add 346 | /// reference counters to all errors in all chains 347 | pub fn set_error_formatter(&self, error_formatter: Option>) { 348 | let mut tree = self.tree_internal.write().unwrap(); 349 | tree.error_formatter = error_formatter; 350 | } 351 | 352 | /// Add transitive data to the task tree. This transitive data will be 353 | /// added to every task created in this task tree 354 | pub fn add_data_transitive, D: Into>(&self, key: S, value: D) { 355 | let mut tree = self.tree_internal.write().unwrap(); 356 | tree.data_transitive.add(key, value); 357 | } 358 | 359 | pub fn task_progress(&self, id: UniqID, done: i64, total: i64) { 360 | let mut tree = self.tree_internal.write().unwrap(); 361 | if let Some(task_internal) = tree.tasks_internal.get_mut(&id) { 362 | task_internal.progress = Some((done, total)); 363 | } 364 | } 365 | 366 | fn get_cloned_task(&self, id: UniqID) -> Option { 367 | let tree = self.tree_internal.read().unwrap(); 368 | tree.get_task(id).ok().cloned() 369 | } 370 | 371 | /// If force_flush set to true, this function will block the thread until everything 372 | /// is reported. Useful for cases when the process exits before all async events 373 | /// are reported and stuff is missing from stdout. 374 | pub fn maybe_force_flush(&self) { 375 | if self.force_flush.load(Ordering::SeqCst) { 376 | self.report_all(); 377 | } 378 | } 379 | 380 | pub fn report_all(&self) { 381 | let mut tree = self.tree_internal.write().unwrap(); 382 | let (start_tasks, end_tasks, reporters) = tree.get_tasks_and_reporters(); 383 | drop(tree); 384 | for reporter in reporters { 385 | for task in &start_tasks { 386 | reporter.task_start(task.clone()); 387 | } 388 | for task in &end_tasks { 389 | reporter.task_end(task.clone()); 390 | } 391 | } 392 | } 393 | } 394 | 395 | impl TaskTreeInternal { 396 | pub fn get_task(&self, id: UniqID) -> Result<&TaskInternal> { 397 | self.tasks_internal.get(&id).context("task must be present") 398 | } 399 | 400 | pub fn root_tasks(&self) -> &BTreeSet { 401 | &self.root_tasks 402 | } 403 | 404 | pub fn child_to_parents(&self) -> &BTreeMap> { 405 | &self.child_to_parents 406 | } 407 | 408 | pub fn parent_to_children(&self) -> &BTreeMap> { 409 | &self.parent_to_children 410 | } 411 | 412 | fn mark_for_gc(&mut self, id: UniqID) { 413 | let mut stack = vec![id]; 414 | 415 | let mut tasks_to_finished_status = BTreeMap::new(); 416 | 417 | while let Some(id) = stack.pop() { 418 | if let Some(task_internal) = self.tasks_internal.get(&id) { 419 | tasks_to_finished_status 420 | .insert(id, matches!(task_internal.status, TaskStatus::Finished(..))); 421 | } 422 | 423 | for child_id in self.parent_to_children.get(&id).into_iter().flatten() { 424 | stack.push(*child_id); 425 | } 426 | } 427 | 428 | if tasks_to_finished_status 429 | .iter() 430 | .all(|(_, finished)| *finished) 431 | { 432 | for id in tasks_to_finished_status.keys().copied() { 433 | self.tasks_marked_for_deletion 434 | .entry(id) 435 | .or_insert_with(SystemTime::now); 436 | } 437 | 438 | // This sub branch might have been holding other parent branches that 439 | // weren't able to be garbage collected because of this subtree. we'll go 440 | // level up and perform the same logic. 441 | let parents = self.child_to_parents.get(&id).cloned().unwrap_or_default(); 442 | for parent_id in parents { 443 | self.mark_for_gc(parent_id); 444 | } 445 | } 446 | } 447 | 448 | fn garbage_collect(&mut self) { 449 | let mut will_delete = vec![]; 450 | for (id, time) in &self.tasks_marked_for_deletion { 451 | if let Ok(elapsed) = time.elapsed() { 452 | if elapsed > Duration::from_millis(self.remove_task_after_done_ms) { 453 | will_delete.push(*id); 454 | } 455 | } 456 | } 457 | 458 | for id in will_delete { 459 | self.tasks_internal.remove(&id); 460 | self.parent_to_children.remove(&id); 461 | self.root_tasks.remove(&id); 462 | if let Some(parents) = self.child_to_parents.remove(&id) { 463 | for parent in parents { 464 | if let Some(children) = self.parent_to_children.get_mut(&parent) { 465 | children.remove(&id); 466 | } 467 | } 468 | } 469 | self.tasks_marked_for_deletion.remove(&id); 470 | } 471 | } 472 | 473 | #[allow(clippy::type_complexity)] 474 | fn get_tasks_and_reporters( 475 | &mut self, 476 | ) -> ( 477 | Vec>, 478 | Vec>, 479 | Vec>, 480 | ) { 481 | let mut start_ids = vec![]; 482 | std::mem::swap(&mut start_ids, &mut self.report_start); 483 | let mut end_ids = vec![]; 484 | std::mem::swap(&mut end_ids, &mut self.report_end); 485 | 486 | let mut start_tasks = vec![]; 487 | let mut end_tasks = vec![]; 488 | 489 | for id in start_ids { 490 | if let Ok(task_internal) = self.get_task(id) { 491 | start_tasks.push(Arc::new(task_internal.clone())); 492 | } 493 | } 494 | for id in end_ids { 495 | if let Ok(task_internal) = self.get_task(id) { 496 | end_tasks.push(Arc::new(task_internal.clone())); 497 | } 498 | } 499 | 500 | let reporters = self.reporters.clone(); 501 | 502 | (start_tasks, end_tasks, reporters) 503 | } 504 | } 505 | 506 | impl TaskInternal { 507 | pub(crate) fn mark_done(&mut self, error_message: Option) { 508 | let task_status = match error_message { 509 | None => TaskResult::Success, 510 | Some(msg) => TaskResult::Failure(msg), 511 | }; 512 | self.status = TaskStatus::Finished(task_status, SystemTime::now()); 513 | } 514 | 515 | pub fn full_name(&self) -> String { 516 | let mut full_name = String::new(); 517 | for parent_name in &self.parent_names { 518 | full_name.push_str(parent_name); 519 | full_name.push(':'); 520 | } 521 | full_name.push_str(&self.name); 522 | full_name 523 | } 524 | 525 | pub fn all_data( 526 | &self, 527 | ) -> std::iter::Chain< 528 | std::collections::btree_map::Iter, 529 | std::collections::btree_map::Iter, 530 | > { 531 | self.data.map.iter().chain(self.data_transitive.map.iter()) 532 | } 533 | } 534 | -------------------------------------------------------------------------------- /ll/src/tests/basic_test.rs: -------------------------------------------------------------------------------- 1 | use crate::{task_tree::TaskTree, ErrorFormatter, StringReporter}; 2 | use anyhow::Result; 3 | use k9::*; 4 | use std::{sync::Arc, time::Duration}; 5 | 6 | async fn sleep() { 7 | // just enough to drain the reporter tokio tasks 8 | tokio::time::sleep(Duration::from_millis(100)).await; 9 | } 10 | 11 | fn setup() -> (Arc, StringReporter) { 12 | let string_reporter = StringReporter::new(); 13 | let tt = TaskTree::new(); 14 | tt.add_reporter(Arc::new(string_reporter.clone())); 15 | (tt, string_reporter) 16 | } 17 | 18 | #[tokio::test] 19 | async fn basic_events_test() -> Result<()> { 20 | let (tt, s) = setup(); 21 | 22 | let root = tt.create_task("root"); 23 | 24 | root.spawn_sync("test", |_| { 25 | let _r = 1 + 1; 26 | Ok(()) 27 | })?; 28 | 29 | root.spawn_sync("test_with_data", |t| -> Result<()> { 30 | t.data("hello", "hi"); 31 | t.data("int", 5); 32 | t.data("float", 5.98); 33 | anyhow::bail!("here is error msg"); 34 | }) 35 | .ok(); 36 | 37 | root.spawn_sync("test_3", |_e| Ok(()))?; 38 | 39 | sleep().await; 40 | snapshot!( 41 | s.to_string(), 42 | " 43 | [ ] | STARTING | root 44 | [ ] | STARTING | root:test 45 | [ ] | STARTING | [ERR] root:test_with_data 46 | [ ] | STARTING | root:test_3 47 | [ ] root:test 48 | [ ] [ERR] root:test_with_data 49 | | float: 5.98 50 | | hello: hi 51 | | int: 5 52 | | 53 | | [Task] test_with_data 54 | | float: 5.98 55 | | hello: hi 56 | | int: 5 57 | | 58 | | 59 | | Caused by: 60 | | here is error msg 61 | [ ] root:test_3 62 | 63 | " 64 | ); 65 | 66 | Ok(()) 67 | } 68 | 69 | #[tokio::test] 70 | async fn error_chain_test() -> Result<()> { 71 | let (tt, s) = setup(); 72 | 73 | let root = tt.create_task("root"); 74 | let result = root.spawn_sync("top_level", |t| { 75 | t.data("top_level_data", 5); 76 | 77 | t.spawn_sync("1_level", |t| { 78 | t.data("1_level_data", 9); 79 | t.spawn_sync("2_level", |_| { 80 | anyhow::ensure!(false, "oh noes, this fails"); 81 | Ok(()) 82 | }) 83 | })?; 84 | Ok(()) 85 | }); 86 | 87 | sleep().await; 88 | snapshot!( 89 | format!("{:?}", result.unwrap_err()), 90 | " 91 | [Task] top_level 92 | top_level_data: 5 93 | 94 | 95 | Caused by: 96 | 0: [Task] 1_level 97 | 1_level_data: 9 98 | 99 | 1: [Task] 2_level 100 | 101 | 2: oh noes, this fails 102 | " 103 | ); 104 | 105 | snapshot!( 106 | s.to_string(), 107 | " 108 | [ ] | STARTING | root 109 | [ ] | STARTING | [ERR] root:top_level 110 | [ ] | STARTING | [ERR] root:top_level:1_level 111 | [ ] | STARTING | [ERR] root:top_level:1_level:2_level 112 | [ ] [ERR] root:top_level:1_level:2_level 113 | | 114 | | [Task] 2_level 115 | | 116 | | 117 | | Caused by: 118 | | oh noes, this fails 119 | [ ] [ERR] root:top_level:1_level 120 | | 1_level_data: 9 121 | | 122 | | [Task] 1_level 123 | | 1_level_data: 9 124 | | 125 | | 126 | | Caused by: 127 | | 0: [Task] 2_level 128 | | 129 | | 1: oh noes, this fails 130 | [ ] [ERR] root:top_level 131 | | top_level_data: 5 132 | | 133 | | [Task] top_level 134 | | top_level_data: 5 135 | | 136 | | 137 | | Caused by: 138 | | 0: [Task] 1_level 139 | | 1_level_data: 9 140 | | 141 | | 1: [Task] 2_level 142 | | 143 | | 2: oh noes, this fails 144 | 145 | " 146 | ); 147 | Ok(()) 148 | } 149 | 150 | #[tokio::test] 151 | async fn error_chain_test_no_transitive() -> Result<()> { 152 | let (tt, s) = setup(); 153 | 154 | tt.attach_transitive_data_to_errors_default(false); 155 | tt.add_data_transitive("transitive_data", "transitive_value"); 156 | let root = tt.create_task("root"); 157 | let result = root.spawn_sync("top_level", |t| { 158 | t.data("top_level_data", 5); 159 | 160 | t.spawn_sync("1_level", |t| { 161 | t.data("1_level_data", 9); 162 | t.attach_transitive_data_to_errors(true); 163 | t.spawn_sync("2_level", |_| { 164 | anyhow::ensure!(false, "oh noes, this fails"); 165 | Ok(()) 166 | }) 167 | })?; 168 | Ok(()) 169 | }); 170 | 171 | sleep().await; 172 | snapshot!( 173 | format!("{:?}", result.unwrap_err()), 174 | " 175 | [Task] top_level 176 | top_level_data: 5 177 | 178 | 179 | Caused by: 180 | 0: [Task] 1_level 181 | 1_level_data: 9 182 | transitive_data: transitive_value 183 | 184 | 1: [Task] 2_level 185 | 186 | 2: oh noes, this fails 187 | " 188 | ); 189 | 190 | snapshot!( 191 | s.to_string(), 192 | " 193 | [ ] | STARTING | root 194 | [ ] | STARTING | [ERR] root:top_level 195 | [ ] | STARTING | [ERR] root:top_level:1_level 196 | [ ] | STARTING | [ERR] root:top_level:1_level:2_level 197 | [ ] [ERR] root:top_level:1_level:2_level 198 | | transitive_data: transitive_value 199 | | 200 | | [Task] 2_level 201 | | 202 | | 203 | | Caused by: 204 | | oh noes, this fails 205 | [ ] [ERR] root:top_level:1_level 206 | | 1_level_data: 9 207 | | transitive_data: transitive_value 208 | | 209 | | [Task] 1_level 210 | | 1_level_data: 9 211 | | transitive_data: transitive_value 212 | | 213 | | 214 | | Caused by: 215 | | 0: [Task] 2_level 216 | | 217 | | 1: oh noes, this fails 218 | [ ] [ERR] root:top_level 219 | | top_level_data: 5 220 | | transitive_data: transitive_value 221 | | 222 | | [Task] top_level 223 | | top_level_data: 5 224 | | 225 | | 226 | | Caused by: 227 | | 0: [Task] 1_level 228 | | 1_level_data: 9 229 | | transitive_data: transitive_value 230 | | 231 | | 1: [Task] 2_level 232 | | 233 | | 2: oh noes, this fails 234 | 235 | " 236 | ); 237 | Ok(()) 238 | } 239 | 240 | #[tokio::test] 241 | async fn error_chain_test_hide_errors() -> Result<()> { 242 | let (tt, s) = setup(); 243 | 244 | tt.hide_errors_default_msg(Some(" ")); 245 | 246 | let root = tt.create_task("root"); 247 | let result = root.spawn_sync("top_level", |t| { 248 | t.hide_error_msg(None); 249 | t.data("top_level_data", 5); 250 | 251 | t.spawn_sync("1_level", |t| { 252 | t.data("1_level_data", 9); 253 | t.spawn_sync("2_level", |_| { 254 | anyhow::ensure!(false, "oh noes, this fails"); 255 | Ok(()) 256 | }) 257 | })?; 258 | Ok(()) 259 | }); 260 | 261 | sleep().await; 262 | snapshot!( 263 | format!("{:?}", result.unwrap_err()), 264 | " 265 | [Task] top_level 266 | top_level_data: 5 267 | 268 | 269 | Caused by: 270 | 0: [Task] 1_level 271 | 1_level_data: 9 272 | 273 | 1: [Task] 2_level 274 | 275 | 2: oh noes, this fails 276 | " 277 | ); 278 | 279 | snapshot!( 280 | s.to_string(), 281 | " 282 | [ ] | STARTING | root 283 | [ ] | STARTING | [ERR] root:top_level 284 | [ ] | STARTING | [ERR] root:top_level:1_level 285 | [ ] | STARTING | [ERR] root:top_level:1_level:2_level 286 | [ ] [ERR] root:top_level:1_level:2_level 287 | [ ] [ERR] root:top_level:1_level 288 | | 1_level_data: 9 289 | 290 | [ ] [ERR] root:top_level 291 | | top_level_data: 5 292 | | 293 | | [Task] top_level 294 | | top_level_data: 5 295 | | 296 | | 297 | | Caused by: 298 | | 0: [Task] 1_level 299 | | 1_level_data: 9 300 | | 301 | | 1: [Task] 2_level 302 | | 303 | | 2: oh noes, this fails 304 | 305 | " 306 | ); 307 | Ok(()) 308 | } 309 | 310 | #[tokio::test] 311 | async fn error_chain_test_error_formatter() -> Result<()> { 312 | let (tt, s) = setup(); 313 | 314 | tt.hide_errors_default_msg(Some(" ")); 315 | 316 | struct CustomFormatter {} 317 | 318 | impl ErrorFormatter for CustomFormatter { 319 | fn format_error(&self, err: &anyhow::Error) -> String { 320 | err.chain() 321 | .into_iter() 322 | .rev() 323 | .enumerate() 324 | .map(|(i, e)| format!("{} --> {}", i, e.to_string().trim())) 325 | .collect::>() 326 | .join("\n") 327 | } 328 | } 329 | 330 | tt.set_error_formatter(Some(Arc::new(CustomFormatter {}))); 331 | let root = tt.create_task("root"); 332 | let result = root.spawn_sync("top_level", |t| { 333 | t.hide_error_msg(None); 334 | t.spawn_sync("random_stuff", |_| Ok(()))?; 335 | t.data("top_level_data", 5); 336 | 337 | t.spawn_sync("1_level", |t| { 338 | t.data("1_level_data", 9); 339 | t.spawn_sync("2_level", |_| { 340 | anyhow::ensure!(false, "oh noes, this fails"); 341 | Ok(()) 342 | }) 343 | })?; 344 | Ok(()) 345 | }); 346 | 347 | sleep().await; 348 | snapshot!( 349 | format!("{:?}", result.unwrap_err()), 350 | " 351 | [Task] top_level 352 | top_level_data: 5 353 | 354 | 355 | Caused by: 356 | 0: [Task] 1_level 357 | 1_level_data: 9 358 | 359 | 1: [Task] 2_level 360 | 361 | 2: oh noes, this fails 362 | " 363 | ); 364 | 365 | snapshot!( 366 | s.to_string(), 367 | " 368 | [ ] | STARTING | root 369 | [ ] | STARTING | [ERR] root:top_level 370 | [ ] | STARTING | root:top_level:random_stuff 371 | [ ] | STARTING | [ERR] root:top_level:1_level 372 | [ ] | STARTING | [ERR] root:top_level:1_level:2_level 373 | [ ] root:top_level:random_stuff 374 | [ ] [ERR] root:top_level:1_level:2_level 375 | [ ] [ERR] root:top_level:1_level 376 | | 1_level_data: 9 377 | 378 | [ ] [ERR] root:top_level 379 | | top_level_data: 5 380 | | 381 | | 0 --> oh noes, this fails 382 | | 1 --> [Task] 2_level 383 | | 2 --> [Task] 1_level 384 | | 1_level_data: 9 385 | | 3 --> [Task] top_level 386 | | top_level_data: 5 387 | 388 | " 389 | ); 390 | Ok(()) 391 | } 392 | 393 | #[tokio::test] 394 | async fn logger_data_test() -> Result<()> { 395 | let (tt, s) = setup(); 396 | tt.add_data_transitive("tree_transitive_data", 5); 397 | 398 | let root = tt.create_task("root"); 399 | 400 | let t1 = root.create("t1"); 401 | t1.data_transitive("process_id", 123); 402 | 403 | t1.spawn_sync("has_process_id", |_| Ok(()))?; 404 | 405 | let t2 = t1.create("t2"); 406 | t2.data_transitive("request_id", 234); 407 | t2.spawn_sync("has_process_and_request_id", |_| Ok(()))?; 408 | 409 | let t3 = t2.create("t3"); 410 | t3.data_transitive("request_id #dontprint", 592); 411 | t3.spawn_sync("wont_print_request_id", |_| Ok(()))?; 412 | 413 | let t4 = t3.create("t4"); 414 | t4.spawn_sync("wont_print_request_id", |task| { 415 | task.data("hello", "meow"); 416 | snapshot!( 417 | format!("{:?}", task.get_data("tree_transitive_data")), 418 | "Some(Int(5))" 419 | ); 420 | snapshot!( 421 | format!("{:?}", task.get_data("hello")), 422 | r#"Some(String("meow"))"# 423 | ); 424 | snapshot!( 425 | format!("{:?}", task.get_data("this_data_doesnt_exist")), 426 | "None" 427 | ); 428 | snapshot!( 429 | task.get_data("tree_transitive_data").unwrap().to_string(), 430 | "5" 431 | ); 432 | Ok(()) 433 | })?; 434 | 435 | sleep().await; 436 | snapshot!( 437 | s.to_string(), 438 | " 439 | [ ] | STARTING | root 440 | [ ] | STARTING | root:t1 441 | [ ] | STARTING | root:t1:has_process_id 442 | [ ] | STARTING | root:t1:t2 443 | [ ] | STARTING | root:t1:t2:has_process_and_request_id 444 | [ ] | STARTING | root:t1:t2:t3 445 | [ ] | STARTING | root:t1:t2:t3:wont_print_request_id 446 | [ ] | STARTING | root:t1:t2:t3:t4 447 | [ ] | STARTING | root:t1:t2:t3:t4:wont_print_request_id 448 | [ ] root:t1:has_process_id 449 | | process_id: 123 450 | | tree_transitive_data: 5 451 | [ ] root:t1:t2:has_process_and_request_id 452 | | process_id: 123 453 | | request_id: 234 454 | | tree_transitive_data: 5 455 | [ ] root:t1:t2:t3:wont_print_request_id 456 | | process_id: 123 457 | | tree_transitive_data: 5 458 | [ ] root:t1:t2:t3:t4:wont_print_request_id 459 | | hello: meow 460 | | process_id: 123 461 | | tree_transitive_data: 5 462 | 463 | " 464 | ); 465 | Ok(()) 466 | } 467 | 468 | #[tokio::test] 469 | async fn async_test() -> Result<()> { 470 | let (tt, s) = setup(); 471 | let root = tt.create_task("root"); 472 | 473 | root.spawn("async_event", |e| async move { 474 | e.data("async_data", 5); 475 | let block = async {}; 476 | block.await; 477 | Ok(()) 478 | }) 479 | .await?; 480 | 481 | sleep().await; 482 | snapshot!( 483 | s.to_string(), 484 | " 485 | [ ] | STARTING | root 486 | [ ] | STARTING | root:async_event 487 | [ ] root:async_event 488 | | async_data: 5 489 | 490 | " 491 | ); 492 | Ok(()) 493 | } 494 | 495 | // #[test] 496 | // fn custom_drain_test() { 497 | // let s = Arc::new(Mutex::new(String::new())); 498 | // struct AnalyticsDBDrain(Arc>); 499 | 500 | // impl ll::Drain for AnalyticsDBDrain { 501 | // fn log_event(&self, e: &ll::Event) { 502 | // let mut s = self.0.lock().unwrap(); 503 | // s.push_str(&e.name); 504 | // s.push(' '); 505 | // for (k, entry) in &e.data.map { 506 | // let v = &entry.0; 507 | // let tags = &entry.1; 508 | // s.push_str(&format!("{:?}", tags)); 509 | // match v { 510 | // ll::DataValue::Int(i) => s.push_str(&format!("{}: int: {}", k, i)), 511 | // _ => s.push_str(&format!("{}: {:?}", k, v)), 512 | // } 513 | // } 514 | // } 515 | // } 516 | 517 | // let mut l = ll::Logger::stdout(); 518 | // let drain = Arc::new(AnalyticsDBDrain(s.clone())); 519 | // l.add_drain(drain); 520 | 521 | // l.event("some_event #some_tag", |_| Ok(())).unwrap(); 522 | 523 | // l.event("other_event", |e| { 524 | // e.add_data("data #dontprint", 1); 525 | // Ok(()) 526 | // }) 527 | // .unwrap(); 528 | 529 | // snapshot!( 530 | // s.lock().unwrap().clone(), 531 | // "some_event other_event {\"dontprint\"}data: int: 1" 532 | // ); 533 | // } 534 | 535 | // #[test] 536 | // fn nested_loggers_test() -> Result<()> { 537 | // let (mut l, test_drain) = setup(); 538 | 539 | // l.add_data("process_id", 123); 540 | // l.event("has_process_id", |_| Ok(()))?; 541 | 542 | // let l2 = l.nest("my_app"); 543 | // l2.event("some_app_event", |_| Ok(()))?; 544 | 545 | // let mut l3 = l2.nest("db"); 546 | // l3.add_data("db_connection_id", 234); 547 | // l3.event("some_db_event", |_| Ok(()))?; 548 | 549 | // l2.event("another_app_event", |_| Ok(()))?; 550 | 551 | // snapshot!( 552 | // test_drain.to_string(), 553 | // " 554 | 555 | // [ ] has_process_id 556 | // | process_id: 123 557 | // [ ] my_app:some_app_event 558 | // | process_id: 123 559 | // [ ] my_app:db:some_db_event 560 | // | db_connection_id: 234 561 | // | process_id: 123 562 | // [ ] my_app:another_app_event 563 | // | process_id: 123 564 | 565 | // " 566 | // ); 567 | // Ok(()) 568 | // } 569 | 570 | // #[tokio::test] 571 | // async fn global_log_functions() -> Result<()> { 572 | // let (mut l, test_drain) = setup(); 573 | 574 | // l.add_data("process_id", 123); 575 | // ll::event(&l, "some_event", |_| Ok(()))?; 576 | 577 | // let l2 = l.nest("hello"); 578 | 579 | // ll::async_event(&l2, "async_event", |e| async move { 580 | // e.add_data("async_data", true); 581 | // Ok(()) 582 | // }) 583 | // .await?; 584 | 585 | // snapshot!( 586 | // test_drain.to_string(), 587 | // " 588 | 589 | // [ ] some_event 590 | // | process_id: 123 591 | // [ ] hello:async_event 592 | // | async_data: true 593 | // | process_id: 123 594 | 595 | // " 596 | // ); 597 | // Ok(()) 598 | // } 599 | 600 | // #[tokio::test] 601 | // async fn nested_events_test() -> Result<()> { 602 | // let (mut l, test_drain) = setup(); 603 | 604 | // l.add_data("process_id", 123); 605 | // ll::event(&l, "some_event", |e| { 606 | // e.event("some_nested_event", |e| { 607 | // e.add_data("nested_data", true); 608 | // Ok(()) 609 | // })?; 610 | // Ok(()) 611 | // })?; 612 | 613 | // l.async_event("async_event", |e| async move { 614 | // e.add_data("async_data", true); 615 | // e.async_event("nested_async_event", |e| async move { 616 | // e.add_data("nested_async_data", false); 617 | // Ok(()) 618 | // }) 619 | // .await?; 620 | // Ok(()) 621 | // }) 622 | // .await?; 623 | 624 | // snapshot!( 625 | // test_drain.to_string(), 626 | // " 627 | 628 | // [ ] some_nested_event 629 | // | nested_data: true 630 | // | process_id: 123 631 | // [ ] some_event 632 | // | process_id: 123 633 | // [ ] nested_async_event 634 | // | nested_async_data: false 635 | // | process_id: 123 636 | // [ ] async_event 637 | // | async_data: true 638 | // | process_id: 123 639 | 640 | // " 641 | // ); 642 | // Ok(()) 643 | // } 644 | -------------------------------------------------------------------------------- /ll/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod basic_test; 2 | -------------------------------------------------------------------------------- /ll/src/uniq_id.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::sync::atomic::{AtomicU64, Ordering}; 3 | 4 | lazy_static::lazy_static! { 5 | static ref INCREMENTAL_UNIQ_ID: AtomicU64 = AtomicU64::new(0); 6 | } 7 | #[derive(Clone, Copy, Hash, PartialOrd, PartialEq, Ord, Eq, Debug)] 8 | pub struct UniqID(u64); 9 | 10 | impl UniqID { 11 | pub fn new() -> Self { 12 | UniqID(INCREMENTAL_UNIQ_ID.fetch_add(1, Ordering::SeqCst)) 13 | } 14 | } 15 | 16 | impl fmt::Display for UniqID { 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | write!(f, "{}", self.0) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /ll/src/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::level::Level; 2 | use std::collections::{BTreeSet, LinkedList}; 3 | 4 | // Extract tags encoded in the key. we do it by using hashtags. 5 | // e.g. we can add them to the event like this: 6 | // `some.event#dont_print#trace` => wil result in the event called 7 | // `some.event` and a set of tags ['dont_print', 'trace'] 8 | pub(crate) fn extract_tags(string_with_tags: String) -> (String, BTreeSet) { 9 | let key_and_tags = string_with_tags 10 | .split('#') 11 | .map(|s| s.trim()) 12 | .filter(|s| !s.is_empty()) 13 | .collect::>(); 14 | 15 | if key_and_tags.len() < 2 { 16 | return (string_with_tags, BTreeSet::new()); 17 | } 18 | 19 | let mut key_and_tags = key_and_tags 20 | .iter() 21 | .map(|&s| s.to_string()) 22 | .collect::>(); 23 | 24 | if let Some(key) = key_and_tags.pop_front() { 25 | // If that was the only element we'll make it a key 26 | if key_and_tags.is_empty() { 27 | return (key, BTreeSet::new()); 28 | } 29 | 30 | return (key, key_and_tags.into_iter().collect()); 31 | } 32 | 33 | // If something went wrong and we had an empty list after split we'll 34 | // just return the key as it was given to us 35 | (string_with_tags, BTreeSet::new()) 36 | } 37 | 38 | pub(crate) fn extract_log_level_from_tags(tags: &BTreeSet) -> Option { 39 | let mut result_level = None; 40 | 41 | for tag in tags { 42 | let found_level = match tag.as_ref() { 43 | "info" => Some(Level::Info), 44 | "trace" => Some(Level::Trace), 45 | "debug" => Some(Level::Debug), 46 | _ => None, 47 | }; 48 | 49 | if let Some(found_level) = found_level { 50 | if let Some(entry_level_inner) = result_level { 51 | // If more than one tag is present, we take the lowest 52 | result_level = Some(std::cmp::min(found_level, entry_level_inner)) 53 | } else { 54 | result_level = Some(found_level) 55 | } 56 | } 57 | } 58 | 59 | result_level 60 | } 61 | 62 | #[cfg(test)] 63 | mod tests { 64 | use super::*; 65 | use k9::*; 66 | 67 | #[test] 68 | fn test_tags_extraction() { 69 | let mut result = String::new(); 70 | 71 | let events = vec![ 72 | "some.event#dont_print", 73 | "#dont_print", 74 | "another.event#dont_print#trace#dont_save", 75 | "#blessed#goals", 76 | "", 77 | "event #hey #another tag #hi", 78 | "fancy.event#debug", 79 | "fancy.event#info", 80 | "many.levels #info #debug #trace", 81 | "many.levels # #debug #trace", 82 | ]; 83 | 84 | for event in events { 85 | let (key, tags) = extract_tags(event.to_owned()); 86 | let level = extract_log_level_from_tags(&tags); 87 | result.push_str(&format!( 88 | "{:.<45} {:.<15} => {:.<35} | {:?}\n", 89 | event, 90 | key, 91 | tags.into_iter().collect::>().join(", "), 92 | level 93 | )) 94 | } 95 | 96 | snapshot!(result, " 97 | some.event#dont_print........................ some.event..... => dont_print......................... | None 98 | #dont_print.................................. #dont_print.... => ................................... | None 99 | another.event#dont_print#trace#dont_save..... another.event.. => dont_print, dont_save, trace....... | Some(Trace) 100 | #blessed#goals............................... blessed........ => goals.............................. | None 101 | ............................................. ............... => ................................... | None 102 | event #hey #another tag #hi............... event.......... => another tag, hey, hi............... | None 103 | fancy.event#debug............................ fancy.event.... => debug.............................. | Some(Debug) 104 | fancy.event#info............................. fancy.event.... => info............................... | Some(Info) 105 | many.levels #info #debug #trace.............. many.levels.... => debug, info, trace................. | Some(Info) 106 | many.levels # #debug #trace.................. many.levels.... => debug, trace....................... | Some(Debug) 107 | 108 | "); 109 | } 110 | } 111 | --------------------------------------------------------------------------------