├── .editorconfig ├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── benchmarks.rs ├── dev └── docker-compose.yml ├── examples ├── shared │ └── mod.rs ├── simple_tcp.rs └── simple_udp.rs └── src ├── backends ├── mod.rs ├── null.rs ├── tcp.rs └── udp.rs ├── errors.rs ├── level.rs ├── lib.rs ├── logger.rs ├── message ├── chunked_message.rs ├── compression.rs ├── mod.rs └── wire_message.rs └── util.rs /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*.rs] 4 | end_of_line = lf 5 | charset = utf-8 6 | trim_trailing_whitespace = true 7 | indent_style = space 8 | indent_size = 4 -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v1 12 | - name: Build 13 | run: cargo build --verbose 14 | - name: Run tests 15 | run: cargo test --verbose 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 7 | Cargo.lock 8 | 9 | 10 | # IDE specific 11 | .idea -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | matrix: 7 | allow_failures: 8 | - rust: nightly 9 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "gelf" 3 | version = "0.5.0" 4 | authors = ["Benjamin Zikarsky "] 5 | description = "A library for logging GELF messages to a Graylog compatible server" 6 | repository = "https://github.com/bzikarsky/gelf-rust" 7 | homepage = "https://github.com/bzikarsky/gelf-rust" 8 | documentation = "https://docs.rs/gelf/" 9 | keywords = ["gelf", "log", "graylog", "logging"] 10 | license = "MIT" 11 | edition = "2018" 12 | readme = "README.md" 13 | 14 | [dependencies] 15 | log = { version = "0.4", features = ["std"] } 16 | chrono = { version = "^0.4.0", features = ["serde"] } 17 | failure = "0.1" 18 | hostname = "^0.1.3" 19 | libc = "^0.2.20" 20 | serde_json = "^1.0.0" 21 | serde = { version = "^1.0.0", features = ["derive"] } 22 | rand = "^0.7.2" 23 | libdeflater = "0.2.0" 24 | bytes = "0.4.12" 25 | serde_with = { version = "1.3.1" } 26 | 27 | [profile.release] 28 | opt-level = 3 29 | lto = true 30 | 31 | [dev-dependencies] 32 | criterion = "0.3.0" 33 | jemallocator = "0.3.2" 34 | loom = "0.2.14" 35 | 36 | [[bench]] 37 | name = "benchmark" 38 | path = "benches/benchmarks.rs" 39 | harness = false 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Benjamin Zikarsky 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GELF for Rust (`gelf`) 2 | 3 | [![Crates.io](https://img.shields.io/crates/d/gelf.svg?style=flat-square)](https://crates.io/crates/gelf) 4 | [![Build Status](https://img.shields.io/travis/bzikarsky/gelf-rust.svg?style=flat-square)](https://travis-ci.org/bzikarsky/gelf-rust) 5 | 6 | A GELF implementation for Rust ([Documentation](https://docs.rs/gelf)) 7 | 8 | *TODO: What's GELF?* 9 | - *Link Graylog* 10 | - *Link GELF spec* 11 | 12 | *TODO: What's this library?* 13 | 14 | *TODO: GELF example usecases* 15 | 16 | ## Features 17 | 18 | *TODO: ::log-integration, list of backends, conversion of error levels* 19 | 20 | ## Install 21 | 22 | To make use of GELF for Rust, simply add it as a dependency in your `Cargo.toml`. Check for the latest 23 | version at [cargo.io](https://cargo.io/gelf): 24 | 25 | ```toml 26 | [dependencies] 27 | gelf = "a.b.c" 28 | ``` 29 | 30 | If you installed [`cargo-edit`](https://github.com/killercup/cargo-edit) you can easily add the latest 31 | version by running: 32 | 33 | ``` 34 | cargo add gelf 35 | ``` 36 | 37 | Finally add the crate to your application: 38 | 39 | ```rust 40 | extern crate gelf; 41 | ``` 42 | 43 | ## Examples & use 44 | Two introductory examples (for both standalone and `log`-integrated uses) can be found 45 | [in the crate's documentation](https://docs.rs/gelf/). 46 | 47 | Additional examples covering different backends and other advanced uses can be found in [`/examples`](examples). 48 | Every one of those can be run with ´cargo´, e.g.: 49 | 50 | ``` 51 | cargo run --example simple_udp 52 | ``` 53 | 54 | ## Documentation 55 | 56 | The documentation is available at https://docs.rs/gelf and will get built automatically for every crate version. 57 | 58 | ## License 59 | 60 | GELF for rust (`gelf`) is licensed under the [MIT-License](https://github.com/bzikarsky/gelf-rust/blob/master/LICENSE). 61 | 62 | ## Contact & Contributing 63 | 64 | Contributions are very welcome. I will lay out a guide for contributions in a `CONTRIBUTING.md`. Until then 65 | you are invited to PR/issue as you like :-) 66 | 67 | If you have any questions, feel free to contact me by [mail](mailto:benjamin@zikarsky.de), 68 | [Twitter](https://twitter.com/bzikarsky) or on IRC. I'll usually idle as `bzikarsky` on 69 | [freenode](https://freenode.net) in #graylog. 70 | 71 | *TODO: CONTRIBUTING.md* 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /benches/benchmarks.rs: -------------------------------------------------------------------------------- 1 | extern crate gelf; 2 | extern crate rand; 3 | extern crate criterion; 4 | 5 | use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput, BenchmarkGroup, BenchmarkId}; 6 | 7 | use rand::{thread_rng, Rng, RngCore}; 8 | use rand::distributions::Alphanumeric; 9 | use gelf::{Message, UdpBackend, MessageCompression, Logger}; 10 | use std::iter; 11 | use criterion::measurement::WallTime; 12 | 13 | #[global_allocator] 14 | static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; 15 | 16 | fn messages_iterator<'a>(characters: usize) -> impl std::iter::Iterator> { 17 | let short_message: String = thread_rng() 18 | .sample_iter(&Alphanumeric) 19 | .take(characters) 20 | .collect(); 21 | 22 | iter::repeat_with(move || Message::new(short_message.clone())) 23 | } 24 | 25 | fn random_messages_iterator<'a>() -> impl std::iter::Iterator> { 26 | 27 | iter::repeat_with(|| { 28 | let characters = thread_rng().next_u32(); 29 | 30 | let short_message: String = thread_rng() 31 | .sample_iter(&Alphanumeric) 32 | .take(characters as usize) 33 | .collect(); 34 | 35 | Message::new(short_message) 36 | }) 37 | } 38 | 39 | fn generate_messages<'a>(size: usize, characters: usize) -> Vec> { 40 | messages_iterator(characters).take(size).collect() 41 | } 42 | 43 | fn log_message_characters_benchmark(logger: Logger, group: &mut BenchmarkGroup) { 44 | for size in [100, 200, 500, 1000].iter() { 45 | let mut iterator = messages_iterator(*size); 46 | 47 | group.throughput(Throughput::Bytes(*size as u64)) 48 | .bench_function(BenchmarkId::new("Log message", format!("{} Bytes", size)), |b| { 49 | b.iter(|| { 50 | let next = iterator.next().expect("New item"); 51 | 52 | black_box(logger.log_message(black_box(next))) 53 | }) 54 | }); 55 | } 56 | } 57 | 58 | fn log_compression_none_benchmark(c: &mut Criterion) { 59 | let compressor = MessageCompression::None; 60 | 61 | let mut backed = UdpBackend::new("127.0.0.1:5659").unwrap(); 62 | 63 | backed.set_compression(compressor); 64 | 65 | let logger = Logger::new(Box::new(backed)).expect("Should create with success"); 66 | 67 | let mut group = c.benchmark_group("Log message without compression"); 68 | 69 | log_message_characters_benchmark(logger, &mut group) 70 | } 71 | 72 | fn log_compression_benchmark(compression: MessageCompression, group: &mut BenchmarkGroup) { 73 | let mut backend = UdpBackend::new("127.0.0.1:5659").unwrap(); 74 | backend.set_compression(compression); 75 | 76 | let logger = Logger::new(Box::new(backend)).expect("Should create with success"); 77 | 78 | log_message_characters_benchmark(logger, group) 79 | } 80 | 81 | fn log_compression_gzip_benchmark(c: &mut Criterion) { 82 | for level in 1..=12 { 83 | let compression = MessageCompression::Gzip { level }; 84 | 85 | let mut group = c.benchmark_group(format!("Log message with compression using Gzip level {}", level)); 86 | 87 | log_compression_benchmark(compression, &mut group) 88 | } 89 | 90 | } 91 | 92 | fn log_compression_zlib_benchmark(c: &mut Criterion) { 93 | for level in 1..=12 { 94 | let compression = MessageCompression::Zlib { level }; 95 | 96 | let mut group = c.benchmark_group(format!("Log message with compression using Zlib level {}", level)); 97 | 98 | log_compression_benchmark(compression, &mut group) 99 | } 100 | } 101 | 102 | criterion_group!(benches, log_compression_none_benchmark, log_compression_zlib_benchmark, log_compression_gzip_benchmark); 103 | criterion_main!(benches); -------------------------------------------------------------------------------- /dev/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | mongo: 4 | image: "mongo:3" 5 | elasticsearch: 6 | image: "elasticsearch:2" 7 | command: "elasticsearch -Des.cluster.name='graylog'" 8 | graylog: 9 | image: "graylog2/server:2.2.1-1" 10 | environment: 11 | GRAYLOG_PASSWORD_SECRET: somepasswordpepper 12 | GRAYLOG_ROOT_PASSWORD_SHA2: 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918 13 | GRAYLOG_WEB_ENDPOINT_URI: http://127.0.0.1:9000/api 14 | depends_on: 15 | - mongo 16 | - elasticsearch 17 | ports: 18 | - "9000:9000" 19 | - "12201:12201" 20 | - "12201:12201/udp" -------------------------------------------------------------------------------- /examples/shared/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module just contains some dirty code to make the examples work 2 | //! Just ignore it :-) 3 | 4 | #![allow(dead_code)] 5 | 6 | use std::env::Args; 7 | use std::io::Read; 8 | 9 | pub fn run_debug_server_udp(host: String, timeout_in_s: u64) { 10 | let socket = ::std::net::UdpSocket::bind(host.as_str()) 11 | .expect("Failed to create debug server UDP socket"); 12 | 13 | socket 14 | .set_read_timeout(Some(::std::time::Duration::new(timeout_in_s, 0))) 15 | .expect("Failed to set read timeout on UDP socket"); 16 | 17 | loop { 18 | let mut buf: [u8; 10000] = [0; 10000 /* should fit almost any message */]; 19 | let num_bytes = match socket.recv_from(&mut buf) { 20 | Ok((num_bytes, _)) => num_bytes, 21 | Err(_) => return, 22 | }; 23 | 24 | if buf[0] == 0x1e && buf[1] == 0x0f { 25 | let pos = buf[10]; 26 | let total = buf[11]; 27 | let id: u64 = buf[2..10].iter().fold(0, |x, &i| x << 8 | i as u64); 28 | 29 | println!( 30 | "Received message chunk ({}/{}) for message '{:?}': {}", 31 | pos, 32 | total, 33 | id, 34 | String::from_utf8_lossy(&buf[12..num_bytes]) 35 | ); 36 | } else { 37 | println!( 38 | "Received message: {}", 39 | String::from_utf8_lossy(&buf[0..num_bytes]) 40 | ); 41 | } 42 | } 43 | } 44 | 45 | pub fn run_debug_server_tcp(host: String, num_messages: u8) { 46 | let socket = ::std::net::TcpListener::bind(host.as_str()) 47 | .expect("Failed to create debug server TCP socket"); 48 | 49 | let (mut conn, _) = socket.accept().expect("Failed to accept connection"); 50 | 51 | let mut buf: [u8; 20] = [0; 20]; 52 | let mut msg_counter = 0; 53 | let mut keep: Vec = Vec::new(); 54 | 55 | loop { 56 | let num_bytes = conn.read(&mut buf).expect("Failed to read on connection"); 57 | let mut counter = 0; 58 | let mut last_msg = 0; 59 | for byte in buf[0..num_bytes].iter() { 60 | counter += 1; 61 | if *byte == 0x00 { 62 | println!( 63 | "Received message: {}{}", 64 | String::from_utf8_lossy(keep.as_slice()), 65 | String::from_utf8_lossy(&buf[last_msg..(last_msg + counter)]) 66 | ); 67 | keep.clear(); 68 | last_msg = counter; 69 | counter = 0; 70 | msg_counter += 1; 71 | } 72 | } 73 | 74 | if last_msg != num_bytes { 75 | keep.extend(buf[last_msg..num_bytes].iter()); 76 | } 77 | 78 | if msg_counter == num_messages { 79 | break; 80 | } 81 | } 82 | } 83 | 84 | pub struct Options { 85 | pub gelf_host: String, 86 | pub run_debug_server: bool, 87 | } 88 | 89 | impl Options { 90 | pub fn populate(&mut self, args: Args) { 91 | let args = args.collect::>(); 92 | for i in 0..args.len() { 93 | if args[i] == "--no-server" { 94 | self.run_debug_server = false; 95 | continue; 96 | } 97 | 98 | if args[i] == "--gelf-host" { 99 | self.gelf_host = args[i + 1].clone(); 100 | continue; 101 | } 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /examples/simple_tcp.rs: -------------------------------------------------------------------------------- 1 | //! A simple TCP logging example 2 | //! 3 | //! By default the example tries to start a debug GELF server which logs the 4 | //! GELF JSON messages to STDOUT. If you want to run your own debug output (or 5 | //! log to a Graylog instance) you can disable the debug server by passing the 6 | //! "--no-server" argument to the example run. E.g.: 7 | //! 8 | //! `cargo run --example simple_tcp -- --no-server` 9 | //! 10 | //! It's also possible to specify a remote GELF host location with the flag 11 | //! `--gelf-host `. This allows you to log over IPv6 for example: 12 | //! 13 | //! `cargo run --example simple_tcp -- --no-server --gelf-host [::1]:12201` 14 | 15 | #[macro_use] 16 | extern crate log; 17 | extern crate gelf; 18 | 19 | mod shared; 20 | 21 | use gelf::*; 22 | use log::LevelFilter as LogLevelFilter; 23 | use shared::*; 24 | 25 | /// Set a filter for log-messages. Messages below the defined level will be ignored 26 | const LOG_FILTER: LogLevelFilter = LogLevelFilter::Trace; 27 | 28 | /// Set the hostname which should be used in the GELF messages 29 | static HOSTNAME: &'static str = "test.local"; 30 | 31 | fn main() { 32 | // Default options: 33 | // - gelf_host: The UDP destination string (e.g. 127.0.0.1:12201 or [::1]:12201) 34 | // - run_debug_server: Whether the example should run its own debug server 35 | let mut options = Options { 36 | gelf_host: String::from("127.0.0.1:12201"), 37 | run_debug_server: true, 38 | }; 39 | 40 | // Read command line options 41 | options.populate(::std::env::args()); 42 | 43 | // Run debug graylog server if required 44 | let thread = if options.run_debug_server { 45 | let host = options.gelf_host.clone(); 46 | let handle = Some(::std::thread::spawn(|| { 47 | run_debug_server_tcp(host, 5); 48 | })); 49 | 50 | // Wait for the server to start 51 | ::std::thread::sleep(::std::time::Duration::new(1, 0)); 52 | handle 53 | } else { 54 | None 55 | }; 56 | 57 | // Create a UDP backend for given host and chunk_size 58 | let backend = 59 | TcpBackend::new(options.gelf_host.as_str()).expect("Failed to create a TCP backend"); 60 | 61 | // Create the logger with the given backend 62 | let mut logger = Logger::new(Box::new(backend)).expect("Failed to create the logger"); 63 | 64 | // Configure hostname (can be ommitted, defaults to a local hostname lookup) 65 | logger.set_hostname(String::from(HOSTNAME)); 66 | 67 | // Add an example metadata field which is added to every message which does not contain 68 | // the key already 69 | logger.set_default_metadata( 70 | String::from("facility"), 71 | String::from( 72 | ::std::env::current_exe() 73 | .unwrap() 74 | .as_path() 75 | .to_string_lossy(), 76 | ), 77 | ); 78 | 79 | // Install the logger as a system logger 80 | logger 81 | .install(LOG_FILTER) 82 | .expect("Failed to install the logger"); 83 | 84 | // Log! Go! 85 | trace!("trace"); 86 | debug!("debug"); 87 | info!("info"); 88 | warn!("warn"); 89 | error!("error"); 90 | 91 | // Wait for a possible debug log server to shutdown 92 | if let Some(handle) = thread { 93 | handle 94 | .join() 95 | .expect("Failed to shutdown debug graylog server"); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /examples/simple_udp.rs: -------------------------------------------------------------------------------- 1 | //! A simple UDP logging example 2 | //! 3 | //! By default the example tries to start a debug GELF server which logs the 4 | //! GELF JSON messages and chunks to STDOUT. If you want to run your own debug 5 | //! output (or log to a Graylog instance) you can disable the debug server by 6 | //! passing the "--no-server" argument to the example run. E.g.: 7 | //! 8 | //! `cargo run --example simple_udp -- --no-server` 9 | //! 10 | //! It's also possible to specify a remote GELF host location with the flag 11 | //! `--gelf-host `. This allows you to log over IPv6 for example: 12 | //! 13 | //! `cargo run --example simple_udp -- --no-server --gelf-host [::1]:12201` 14 | 15 | #[macro_use] 16 | extern crate log; 17 | extern crate gelf; 18 | 19 | mod shared; 20 | 21 | use gelf::*; 22 | use log::LevelFilter as LogLevelFilter; 23 | use shared::*; 24 | 25 | /// Set a filter for log-messages. Messages below the defined level will be ignored 26 | const LOG_FILTER: LogLevelFilter = LogLevelFilter::Trace; 27 | 28 | /// Set the compression used for the messages. Compression is 29 | /// disabled for readable examples 30 | const MESSAGE_COMPRESSION: MessageCompression = MessageCompression::None; 31 | 32 | /// Set the maximum size of a UDP packet 33 | /// If you want to see chunking, set it to something small like ChunkSize::Custom(50) 34 | const CHUNK_SIZE: ChunkSize = ChunkSize::LAN; 35 | 36 | /// Set the hostname which should be used in the GELF messages 37 | static HOSTNAME: &'static str = "test.local"; 38 | 39 | fn main() { 40 | // Default options: 41 | // - gelf_host: The UDP destination string (e.g. 127.0.0.1:12201 or [::1]:12201) 42 | // - run_debug_server: Whether the example should run its own debug server 43 | let mut options = Options { 44 | gelf_host: String::from("127.0.0.1:12201"), 45 | run_debug_server: true, 46 | }; 47 | 48 | // Read command line options 49 | options.populate(::std::env::args()); 50 | 51 | // Create a UDP backend for given host and chunk_size 52 | let mut backend = UdpBackend::new_with_chunksize(options.gelf_host.clone(), CHUNK_SIZE) 53 | .expect("Failed to create a UDP backend"); 54 | 55 | // Configure compression (can be ommited, defaults to Gzip) 56 | backend.set_compression(MESSAGE_COMPRESSION); 57 | 58 | // Create the logger with the given backend 59 | let mut logger = Logger::new(Box::new(backend)).expect("Failed to create the logger"); 60 | 61 | // Configure hostname (can be ommitted, defaults to a local hostname lookup) 62 | logger.set_hostname(String::from(HOSTNAME)); 63 | 64 | // Add an example metadata field which is added to every message which does not contain 65 | // the key already 66 | logger.set_default_metadata( 67 | String::from("facility"), 68 | String::from( 69 | ::std::env::current_exe() 70 | .unwrap() 71 | .as_path() 72 | .to_string_lossy(), 73 | ), 74 | ); 75 | 76 | // Install the logger as a system logger 77 | logger 78 | .install(LOG_FILTER) 79 | .expect("Failed to install the logger"); 80 | 81 | // Run debug graylog server if required 82 | let thread = if options.run_debug_server { 83 | let host = options.gelf_host.clone(); 84 | Some(::std::thread::spawn(|| { 85 | run_debug_server_udp(host, 1); 86 | })) 87 | } else { 88 | None 89 | }; 90 | 91 | // Log! Go! 92 | trace!("trace"); 93 | debug!("debug"); 94 | info!("info"); 95 | warn!("warn"); 96 | error!("error"); 97 | 98 | // Wait for a possible debug log server to shutdown 99 | if let Some(handle) = thread { 100 | handle 101 | .join() 102 | .expect("Failed to shutdown debug graylog server"); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/backends/mod.rs: -------------------------------------------------------------------------------- 1 | mod null; 2 | mod tcp; 3 | mod udp; 4 | 5 | pub use self::null::NullBackend; 6 | pub use self::tcp::TcpBackend; 7 | pub use self::udp::UdpBackend; 8 | 9 | use crate::{WireMessage, Result}; 10 | 11 | /// A trait for a GELF backend 12 | /// 13 | /// A backend is responsible for transporting a `WireMessage` to a 14 | /// Gelf host. It is responsible for creating required sockets and chosing 15 | /// proper serialization and encoding options (e.g. chunking with 16 | /// `ChunkedMessage` or compression with `MessageCompression`) 17 | pub trait Backend: Sync + Send { 18 | /// Log a message. 19 | fn log_message(&self, msg: WireMessage) -> Result<()>; 20 | } 21 | -------------------------------------------------------------------------------- /src/backends/null.rs: -------------------------------------------------------------------------------- 1 | use crate::{Backend, WireMessage, Result}; 2 | 3 | /// The `NullBackend` is a utility backend which discards all messages 4 | pub struct NullBackend; 5 | 6 | impl NullBackend { 7 | /// Construct a new NullBackend 8 | pub fn new() -> NullBackend { 9 | NullBackend {} 10 | } 11 | } 12 | 13 | impl Backend for NullBackend { 14 | /// Log a message. 15 | /// 16 | /// Logging a message with NullBackend is a noop and will never fail. 17 | fn log_message(&self, _: WireMessage) -> Result<()> { 18 | Ok(()) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/backends/tcp.rs: -------------------------------------------------------------------------------- 1 | use failure; 2 | use failure::Fail; 3 | use std::io::Write; 4 | use std::net; 5 | use std::sync; 6 | 7 | use crate::{Result, Error, Backend, WireMessage}; 8 | 9 | /// TcpBackend is a simple GELF over TCP backend. 10 | /// 11 | /// WireMessages are simply serialized and optionally compressed and pushed to 12 | /// a Gelf host over TCP. TCP's stream-based nature requires no chunking. 13 | /// GELF over TCP does not support any type of compression, due to the use of 14 | /// the null byte as a frame delimiter. 15 | pub struct TcpBackend { 16 | socket: sync::Arc>, 17 | } 18 | 19 | impl TcpBackend { 20 | /// Construct a new TcpBackend. 21 | pub fn new(destination: T) -> Result { 22 | let socket = net::TcpStream::connect(destination).map_err(|e| { 23 | failure::Error::from(e) 24 | .context("Failed to establish TCP connection") 25 | .context(Error::BackendCreationFailed) 26 | })?; 27 | 28 | socket.set_nonblocking(true).map_err(|e| { 29 | e.context("Failed to set TcpStream to non-blocking mode") 30 | .context(Error::BackendCreationFailed) 31 | })?; 32 | 33 | Ok(TcpBackend { 34 | socket: sync::Arc::new(sync::Mutex::new(socket)), 35 | }) 36 | } 37 | } 38 | 39 | impl Backend for TcpBackend { 40 | /// Log a message over TCP. 41 | fn log_message(&self, msg: WireMessage) -> Result<()> { 42 | let mut msg: Vec = msg.to_gelf()?.into(); 43 | 44 | // raw messages need to be terminated with a 0-byte 45 | msg.push(0x00); 46 | 47 | let mut socket = self.socket.lock().unwrap(); 48 | 49 | socket 50 | .write_all(&msg) 51 | .map_err(|e| e.context(Error::LogTransmitFailed))?; 52 | 53 | Ok(()) 54 | } 55 | } 56 | 57 | impl Drop for TcpBackend { 58 | /// Try to close the connection gracefully when TcpBackend goes out of scope 59 | fn drop(&mut self) { 60 | // When drop() is called unwrap() should never fail 61 | let mut socket = self.socket.lock().unwrap(); 62 | 63 | socket 64 | .flush() 65 | .and_then(|_| socket.shutdown(net::Shutdown::Both)) 66 | .unwrap_or_else(|_| warn!("Failed to flush and shutdown tcp socket cleanly")); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/backends/udp.rs: -------------------------------------------------------------------------------- 1 | use failure; 2 | use failure::Fail; 3 | use std::net; 4 | 5 | use crate::{ChunkSize, MessageCompression, Result, Error, WireMessage, Backend}; 6 | 7 | /// UdpBackend is the default and standard GELF backend 8 | /// 9 | /// It pushes messages to a GELF host via UDP. Messages are cut into chunks 10 | /// of a certain chunk-size. This size is important since the chunk-size + 11 | /// a stable overhead of 12 bytes needs to fit the transport layer's mtu. 12 | /// 13 | /// If the message fits into a single chunk, no chunking is applied. 14 | pub struct UdpBackend { 15 | socket: net::UdpSocket, 16 | destination: T, 17 | chunk_size: ChunkSize, 18 | compression: MessageCompression, 19 | } 20 | 21 | impl UdpBackend { 22 | /// Construct a new UdpBackend with default chunk-size (ChunkSize::LAN) 23 | pub fn new(destination: T) -> Result> { 24 | Self::new_with_chunksize(destination, ChunkSize::LAN) 25 | } 26 | 27 | /// Construct an new UdpBackend with the given chunk-size 28 | pub fn new_with_chunksize( 29 | destination: T, 30 | chunk_size: ChunkSize, 31 | ) -> Result> { 32 | // Get a single net::SocketAddr form the destination-address type 33 | let destination_addr = destination 34 | .to_socket_addrs() 35 | .map_err(|e| { 36 | failure::Error::from(e) 37 | .context("Failed to parse a destination address") 38 | .context(Error::BackendCreationFailed) 39 | })? 40 | .nth(0) 41 | .ok_or_else(|| format_err!("Invalid destination server address",) 42 | .context(Error::BackendCreationFailed))?; 43 | 44 | // Create an appropiate local socket for the given destination 45 | let local = match destination_addr { 46 | net::SocketAddr::V4(_) => "0.0.0.0:0", 47 | net::SocketAddr::V6(_) => "[::]:0", 48 | }; 49 | 50 | let socket = net::UdpSocket::bind(local).map_err(|e| { 51 | e.context("Failed to bind local socket") 52 | .context(Error::BackendCreationFailed) 53 | })?; 54 | 55 | socket.set_nonblocking(true).map_err(|e| { 56 | e.context("Failed to set UdpSocket to non-blocking mode") 57 | .context(Error::BackendCreationFailed) 58 | })?; 59 | 60 | Ok(UdpBackend { 61 | socket: socket, 62 | destination: destination, 63 | chunk_size: chunk_size, 64 | compression: MessageCompression::default(), 65 | }) 66 | } 67 | 68 | /// Return the current set compression algorithm 69 | pub fn compression(&self) -> MessageCompression { 70 | self.compression 71 | } 72 | 73 | /// Set the compression algorithm 74 | pub fn set_compression(&mut self, compression: MessageCompression) -> &mut Self { 75 | self.compression = compression; 76 | self 77 | } 78 | } 79 | 80 | impl Backend for UdpBackend { 81 | /// Log a message via UDP. 82 | fn log_message(&self, msg: WireMessage) -> Result<()> { 83 | let chunked_msg = msg.to_chunked_message(self.chunk_size, self.compression)?; 84 | let chunked_msg_size = chunked_msg.len(); 85 | let sent_bytes = chunked_msg 86 | .iter() 87 | .map( 88 | |chunk| match self.socket.send_to(&chunk, self.destination.clone()) { 89 | Err(_) => 0, 90 | Ok(size) => size, 91 | }, 92 | ) 93 | .fold(0_u64, |carry, size| carry + size as u64); 94 | 95 | if sent_bytes != chunked_msg_size { 96 | bail!(Error::LogTransmitFailed); 97 | } 98 | 99 | Ok(()) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use failure; 2 | use std; 3 | use libdeflater::CompressionError as CompressedError; 4 | 5 | #[derive(Clone, Debug, Fail)] 6 | pub enum Error { 7 | #[fail(display = "Failed to create the GELF backend")] 8 | BackendCreationFailed, 9 | #[fail(display = "'{}' is not a legal name for an additional GELF field", name)] 10 | IllegalNameForAdditional { name: String }, 11 | #[fail(display = "Failed to create the GELF logger")] 12 | LoggerCreateFailed, 13 | #[fail(display = "Failed to create a GELF log message")] 14 | LogTransmitFailed, 15 | #[fail(display = "Failed to compress the message with '{}'", compression_method)] 16 | CompressMessageFailed { 17 | compression_error: CompressionError, 18 | compression_method: &'static str 19 | }, 20 | #[fail(display = "Failed to serialize the message to GELF json")] 21 | SerializeMessageFailed, 22 | #[fail(display = "Failed to chunk the message")] 23 | ChunkMessageFailed, 24 | #[fail(display = "Illegal chunk size: {}", size)] 25 | IllegalChunkSize { size: u16 }, 26 | #[fail(display = "Invalid compression level: {}", level)] 27 | InvalidCompressionLevel { level: i32 }, 28 | } 29 | 30 | #[derive(Clone, Debug)] 31 | pub enum CompressionError { 32 | InsufficientSpace 33 | } 34 | 35 | impl From for CompressionError { 36 | fn from(err: CompressedError) -> Self { 37 | match err { 38 | CompressedError::InsufficientSpace => CompressionError::InsufficientSpace 39 | } 40 | } 41 | } 42 | 43 | pub type Result = std::result::Result; 44 | -------------------------------------------------------------------------------- /src/level.rs: -------------------------------------------------------------------------------- 1 | use log::{Level as LogLevel, LevelFilter as LogLevelFilter}; 2 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; 3 | 4 | /// GELF's representation of an error level 5 | /// 6 | /// GELF's error levels are equivalent to syslog's severity 7 | /// information (specified in [RFC 5424](https://tools.ietf.org/html/rfc5424)) 8 | /// 9 | /// The levels dont match `log`s levels, but (lossy) conversion methods 10 | /// are provided. These methods follow this conversion table: 11 | /// 12 | /// | GELF / Syslog | Rust | 13 | /// |-------------------|-----------| 14 | /// | Emergency (0) | Error (1) | 15 | /// | Alert (1) | Error (1) | 16 | /// | Critical (2) | Error (1) | 17 | /// | Error (3) | Error (1) | 18 | /// | Warning (4) | Warn (2) | 19 | /// | Notice (5) | Info (3) | 20 | /// | Informational (6) | Info (3) | 21 | /// | Debug (7) | Debug (4) | 22 | /// | Debug (7) | Trace (5) | 23 | #[derive(Debug, Clone, Copy, PartialEq)] 24 | pub enum Level { 25 | Emergency = 0, 26 | Alert = 1, 27 | Critical = 2, 28 | Error = 3, 29 | Warning = 4, 30 | Notice = 5, 31 | Informational = 6, 32 | Debug = 7, 33 | } 34 | 35 | impl Level { 36 | /// Get the GELF error level from given Rust error level 37 | pub fn from_rust(level: LogLevel) -> Level { 38 | match level { 39 | LogLevel::Error => Level::Error, 40 | LogLevel::Warn => Level::Warning, 41 | LogLevel::Info => Level::Informational, 42 | LogLevel::Debug | LogLevel::Trace => Level::Debug, 43 | } 44 | } 45 | 46 | /// Get the Rust error level from this GELF error level 47 | pub fn to_rust(self) -> LogLevel { 48 | match self { 49 | Level::Emergency | Level::Alert | Level::Critical | Level::Error => LogLevel::Error, 50 | Level::Warning => LogLevel::Warn, 51 | Level::Notice | Level::Informational => LogLevel::Info, 52 | Level::Debug => LogLevel::Debug, 53 | } 54 | } 55 | } 56 | 57 | impl<'de> Deserialize<'de> for Level { 58 | fn deserialize(deserializer: D) -> Result>::Error> where 59 | D: Deserializer<'de> { 60 | serde_json::Value::deserialize(deserializer)? 61 | .as_f64() 62 | .map(Level::from) 63 | .ok_or_else(|| serde::de::Error::custom("Expected i64 for Log Level")) 64 | } 65 | } 66 | 67 | impl Serialize for Level { 68 | fn serialize(&self, serializer: S) -> Result 69 | where S: Serializer { 70 | serializer.serialize_i8(self.into()) 71 | } 72 | } 73 | 74 | impl Into for Level { 75 | /// Allow for Into conversion to Rust's LogLevel 76 | fn into(self) -> LogLevel { 77 | self.to_rust() 78 | } 79 | } 80 | 81 | impl From for Level { 82 | /// Allow for Into conversion from Rust's LogLevel 83 | fn from(level: LogLevel) -> Level { 84 | Level::from_rust(level) 85 | } 86 | } 87 | 88 | impl Into for Level { 89 | /// Allow for Into conversion from Rust's LogLevelFilter 90 | fn into(self) -> LogLevelFilter { 91 | self.to_rust().to_level_filter() 92 | } 93 | } 94 | 95 | impl Into for &Level { 96 | fn into(self) -> i8 { 97 | *self as i8 98 | } 99 | } 100 | 101 | impl From for Level { 102 | fn from(value: i64) -> Self { 103 | match value { 104 | 0 => Level::Emergency, 105 | 1 => Level::Alert, 106 | 2 => Level::Critical, 107 | 3 => Level::Error, 108 | 4 => Level::Warning, 109 | 5 => Level::Notice, 110 | 6 => Level::Informational, 111 | 7 => Level::Debug, 112 | _ => Level::Informational 113 | } 114 | } 115 | } 116 | 117 | impl From for Level { 118 | fn from(value: f64) -> Self { 119 | Level::from(value as i64) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A GELF library for Rust. 2 | //! 3 | //! The library can be used either as a standalone logging library 4 | //! or it can be used as a logging-subsystem with the 5 | //! [`log`-crate](https://doc.rust-lang.org/log/log/index.html) 6 | //! 7 | //! # Use 8 | //! 9 | //! In general you should only use this crate in applications. Libraries should 10 | //! only develop against [`log`](https://doc.rust-lang.org/log/log/index.html). 11 | //! This then allows applications to use a custom logging-framework (like `gelf`) 12 | //! to do the actual logging. 13 | //! 14 | //! ## Standalone 15 | //! 16 | //! Standalone usage is recommended if the lightweight `log`-crate's features 17 | //! are not sufficient. In this case you need to inject/use an instance of 18 | //! `gelf::Logger` directly. This allows for sending custom built `gelf::Message`-objects. 19 | //! These messages can use all bells and whistles of GELF. 20 | //! 21 | //! ### Example 22 | //! 23 | //! ``` 24 | //! extern crate gelf; 25 | //! 26 | //! # use gelf::NullBackend; 27 | //! use gelf::{Logger, UdpBackend, Message, Level}; 28 | //! 29 | //! pub fn main() { 30 | //! // Set up logging 31 | //! let backend = UdpBackend::new("127.0.0.1:12201") 32 | //! .expect("Failed to create UDP backend"); 33 | //! # let backend = NullBackend::new(); 34 | //! let mut logger = Logger::new(Box::new(backend)) 35 | //! .expect("Failed to determine hostname"); 36 | //! logger.set_default_metadata(String::from("facility"), 37 | //! String::from("example-rust-app")); 38 | //! 39 | //! // Create a (complex) message 40 | //! let mut message = Message::new(String::from("Custom message!")); 41 | //! message 42 | //! .set_full_message(String::from("The full message text is more descriptive")) 43 | //! .set_metadata("foo", String::from("bar")).unwrap() 44 | //! .set_metadata("baz", String::from("bat")).unwrap(); 45 | //! 46 | //! // Log it 47 | //! logger.log_message(message); 48 | //! } 49 | //! ``` 50 | //! 51 | //! ## With `log` 52 | //! 53 | //! Usage with `log` allows to log easily with the help of its macros. There is no need to 54 | //! inject or access the logger object anywhere in your application. 55 | //! 56 | //! All the context information (line, file, etc.) the `log`-crate provides is added as metadata 57 | //! to the logged GELF message. 58 | //! 59 | //! ``` 60 | //! #[macro_use] 61 | //! extern crate log; 62 | //! 63 | //! extern crate gelf; 64 | //! 65 | //! # use gelf::NullBackend; 66 | //! use gelf::{Logger, UdpBackend, Message, Level}; 67 | //! use log::LevelFilter; 68 | //! 69 | //! pub fn main() { 70 | //! let backend = UdpBackend::new("127.0.0.1:12201") 71 | //! .expect("Failed to create UDP backend"); 72 | //! # let backend = NullBackend::new(); 73 | //! 74 | //! // Init logging system 75 | //! let logger = Logger::new(Box::new(backend)) 76 | //! .expect("Failed to determine hostname"); 77 | //! logger.install(LevelFilter::Trace) 78 | //! .expect("Failed to install logger"); 79 | //! 80 | //! info!("Descend into our program!"); 81 | //! somewhere() 82 | //! } 83 | //! 84 | //! pub fn somewhere() { 85 | //! trace!("Trace something here!"); 86 | //! over::the_rainbow(); 87 | //! } 88 | //! 89 | //! mod over { 90 | //! pub fn the_rainbow() { 91 | //! error!("Oh well..."); 92 | //! } 93 | //! } 94 | //! ``` 95 | #![crate_type = "lib"] 96 | 97 | extern crate chrono; 98 | extern crate hostname; 99 | extern crate libc; 100 | extern crate libdeflater; 101 | extern crate rand; 102 | 103 | #[macro_use] 104 | extern crate serde; 105 | 106 | #[cfg_attr(test, macro_use)] 107 | extern crate serde_json; 108 | 109 | #[macro_use] 110 | extern crate log; 111 | 112 | #[macro_use] 113 | extern crate failure; 114 | extern crate bytes; 115 | 116 | mod backends; 117 | mod errors; 118 | mod level; 119 | mod logger; 120 | mod message; 121 | mod util; 122 | 123 | pub use backends::{Backend, NullBackend, TcpBackend, UdpBackend}; 124 | pub use errors::{Error, Result}; 125 | pub use level::Level; 126 | pub use logger::Logger; 127 | pub use message::{ChunkSize, Message, MessageCompression, WireMessage}; 128 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use hostname; 4 | use log; 5 | use log::set_boxed_logger; 6 | use crate::{Backend, Error, Message, WireMessage}; 7 | use crate::errors::Result; 8 | 9 | /// Logger for sending log-messages 10 | /// 11 | /// A `Logger` instance can be either used as a standalone object to log directly 12 | /// to a log-server or it can be installed as a `log`-crate log-handler (with `Logger::install`). 13 | /// 14 | /// By default all encountered errors will be silently ignored. If you want the logger 15 | /// to panic when an error occurs, you can change the behaviour with `Logger::enable_panic_on_error`. 16 | pub struct Logger { 17 | hostname: String, 18 | backend: Box, 19 | default_metadata: HashMap, 20 | panic_on_error: bool, 21 | } 22 | 23 | impl Logger { 24 | /// Construct a new `Logger` instance 25 | /// 26 | /// The backend needs to be boxed for usage as a logger with the `log`-crate. 27 | /// This constructor tries to determine the local hostname (required by GELF) 28 | /// with the help of the `hostname`-crate. If you want to set a custom hostname 29 | /// check out the `Logger::new_with_hostname` constructor. 30 | pub fn new(backend: Box) -> Result { 31 | hostname::get_hostname() 32 | .map(|hostname| Logger::new_with_hostname(backend, &hostname)) 33 | .ok_or_else(|| format_err!("Failed to determine local hostname") 34 | .context(Error::LoggerCreateFailed) 35 | .into()) 36 | } 37 | 38 | /// Construct a new `Logger` instance with predetermined hostname 39 | /// 40 | /// The backend needs to be boxed for usage as a logger with the `log`-crate. It 41 | /// uses the passed hostname for the GELF `host` field 42 | pub fn new_with_hostname(backend: Box, hostname: &str) -> Logger { 43 | Logger { 44 | hostname: String::from(hostname), 45 | backend: backend, 46 | default_metadata: HashMap::new(), 47 | panic_on_error: false, 48 | } 49 | } 50 | 51 | /// Install a logger instance as a `log`-Logger 52 | /// 53 | /// This method wraps `log::set_logger` as a convenience function as required 54 | /// by the `log`-crate. `log_level` defines the maximum log-level the logger 55 | /// should log. 56 | /// 57 | /// Note that installing the logger consumes it. To uninstall you need to call 58 | /// `log::shutdown_logger` which returns the boxed, original `Logger` instance. 59 | pub fn install>(self, log_level: T) -> Result<()> { 60 | set_boxed_logger(Box::new(self))?; 61 | log::set_max_level(log_level.into()); 62 | 63 | Ok(()) 64 | } 65 | 66 | /// Log a message via the logger's transport to a GELF server. 67 | /// 68 | /// The logger will automatically add `default_metadata` fields to the message 69 | /// if missing in the passed `Message`. 70 | pub fn log_message(&self, msg: Message) { 71 | let result = self.backend.log_message(WireMessage::new(msg, &self)); 72 | 73 | if result.is_err() && self.panic_on_error { 74 | panic!(result.unwrap_err()); 75 | } 76 | } 77 | 78 | /// Return the hostname used for GELF's `host`-field 79 | pub fn hostname(&self) -> &String { 80 | &self.hostname 81 | } 82 | 83 | /// Set the hostname used for GELF's `host`-field 84 | pub fn set_hostname>(&mut self, hostname: S) -> &mut Self { 85 | self.hostname = hostname.into(); 86 | self 87 | } 88 | 89 | /// Return all default metadata 90 | pub fn default_metadata(&self) -> &HashMap { 91 | &self.default_metadata 92 | } 93 | 94 | /// Set a default metadata field 95 | /// 96 | /// Every logged `Message` is checked for every default_metadata field. 97 | /// If it contains an entry with the key, the default is ignored. But if 98 | /// there is no additional information present, the default is added to the message. 99 | /// 100 | /// This can be used for example to add a `facility` to every message: 101 | /// 102 | /// ``` 103 | /// # use gelf::{Logger, NullBackend, Message}; 104 | /// # let backend = NullBackend::new(); 105 | /// # let mut logger = Logger::new(Box::new(backend)).unwrap(); 106 | /// logger.set_default_metadata(String::from("facility"), String::from("my_awesome_rust_service")); 107 | /// 108 | /// logger.log_message(Message::new(String::from("This is important information"))); 109 | /// // -> The message will contain an additional field "_facility" with the value "my_awesome_rust_service" 110 | /// ``` 111 | pub fn set_default_metadata( 112 | &mut self, 113 | key: S, 114 | value: T 115 | ) -> &mut Self 116 | where 117 | S: Into, 118 | T: Into 119 | { 120 | self.default_metadata.insert(key.into(), value.into()); 121 | self 122 | } 123 | 124 | /// Return a flag whether the logger panics when it encounters an error 125 | pub fn panic_on_error(&self) -> bool { 126 | self.panic_on_error 127 | } 128 | 129 | /// Force the logger to panic when it encounters an error 130 | pub fn enable_panic_on_error(&mut self) -> &mut Self { 131 | self.panic_on_error = true; 132 | self 133 | } 134 | 135 | /// Force the logger to ignore an encountered error silently 136 | pub fn disable_panic_on_error(&mut self) -> &mut Self { 137 | self.panic_on_error = false; 138 | self 139 | } 140 | } 141 | 142 | impl log::Log for Logger { 143 | /// Determines if a log message with the specified metadata would be logged. 144 | /// 145 | /// See [docs](https://doc.rust-lang.org/log/log/trait.Log.html#tymethod.enabled) 146 | /// for more details 147 | fn enabled(&self, _: &log::Metadata) -> bool { 148 | // The logger does not dicard any log-level by itself, therefore it is 149 | // always enabled 150 | true 151 | } 152 | 153 | /// Logs the `LogRecord`. 154 | /// See [docs](https://doc.rust-lang.org/log/log/trait.Log.html#tymethod.log) 155 | /// for more details 156 | fn log(&self, record: &log::Record) { 157 | if !self.enabled(record.metadata()) { 158 | () 159 | } 160 | 161 | self.log_message(From::from(record)) 162 | } 163 | 164 | fn flush(&self) {} 165 | } 166 | -------------------------------------------------------------------------------- /src/message/chunked_message.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | 3 | use rand; 4 | use crate::Error; 5 | use crate::Result; 6 | 7 | /// Overhead per chunk is 12 bytes: magic(2) + id(8) + pos(1) + total (1) 8 | const CHUNK_OVERHEAD: u8 = 12; 9 | 10 | /// Chunk-size for LANs 11 | const CHUNK_SIZE_LAN: u16 = 8154; 12 | 13 | /// Chunk-size for WANs 14 | const CHUNK_SIZE_WAN: u16 = 1420; 15 | 16 | /// Magic bytes identifying a GELF message chunk 17 | static MAGIC_BYTES: &'static [u8; 2] = b"\x1e\x0f"; 18 | 19 | /// ChunkSize is a value type representing the size of a message-chunk 20 | /// 21 | /// It provides default sizes for WANs and LANs 22 | #[derive(Clone, Copy, Debug)] 23 | pub enum ChunkSize { 24 | LAN, 25 | WAN, 26 | Custom(u16), 27 | } 28 | 29 | impl ChunkSize { 30 | /// Return the size associated with the chunk-size 31 | pub fn size(self) -> u16 { 32 | match self { 33 | ChunkSize::LAN => CHUNK_SIZE_LAN, 34 | ChunkSize::WAN => CHUNK_SIZE_WAN, 35 | ChunkSize::Custom(size) => size, 36 | } 37 | } 38 | } 39 | 40 | /// ChunkedMessage is an internal type for chunking an already serialized `WireMessage` 41 | pub struct ChunkedMessage { 42 | chunk_size: ChunkSize, 43 | payload: Vec, 44 | num_chunks: u8, 45 | id: ChunkedMessageId, 46 | } 47 | 48 | impl ChunkedMessage { 49 | /// Construct a new ChunkedMessage 50 | /// 51 | /// Several sanity checks are performed on construction: 52 | /// - chunk_size must be greater than 0 53 | /// - GELF allows for a maximum of 128 chunks per message 54 | pub fn new(chunk_size: ChunkSize, message: Vec) -> Result { 55 | if chunk_size.size() == 0 { 56 | return Err(Error::IllegalChunkSize { 57 | size: chunk_size.size(), 58 | }.into()); 59 | } 60 | 61 | // Ceiled integer division with (a + b - 1) / b 62 | // Calculate with 64bit integers to avoid overflow - maybe replace with checked_*? 63 | let size = chunk_size.size() as u64; 64 | let num_chunks = (message.len() as u64 + size as u64 - 1) / size; 65 | 66 | if num_chunks > 128 { 67 | return Err(format_err!("Number of chunks exceeds 128, which the the maximum number of chunks in GELF. Check your chunk_size").context(Error::ChunkMessageFailed).into()); 68 | } 69 | 70 | Ok(ChunkedMessage { 71 | chunk_size: chunk_size, 72 | payload: message, 73 | id: ChunkedMessageId::random(), 74 | num_chunks: num_chunks as u8, 75 | }) 76 | } 77 | 78 | /// Return the byte-length of the chunked message including all overhead 79 | pub fn len(&self) -> u64 { 80 | if self.num_chunks > 1 { 81 | self.payload.len() as u64 + self.num_chunks as u64 * CHUNK_OVERHEAD as u64 82 | } else { 83 | self.payload.len() as u64 84 | } 85 | } 86 | 87 | /// Return an iterator over all chunks of the message 88 | pub fn iter(&self) -> ChunkedMessageIterator { 89 | ChunkedMessageIterator::new(self) 90 | } 91 | } 92 | 93 | /// An iterator over all a chunked message's chunks 94 | /// 95 | /// This always be constructed by `ChunkedMessage` 96 | pub struct ChunkedMessageIterator<'a> { 97 | chunk_num: u8, 98 | message: &'a ChunkedMessage, 99 | } 100 | 101 | impl<'a> ChunkedMessageIterator<'a> { 102 | /// Create a new ChunkedMessageIterator 103 | fn new(msg: &'a ChunkedMessage) -> ChunkedMessageIterator { 104 | ChunkedMessageIterator { 105 | message: msg, 106 | chunk_num: 0, 107 | } 108 | } 109 | } 110 | 111 | impl<'a> Iterator for ChunkedMessageIterator<'a> { 112 | type Item = Vec; 113 | 114 | /// Returns the next chunk (if existant) 115 | fn next(&mut self) -> Option> { 116 | if self.chunk_num >= self.message.num_chunks { 117 | return None; 118 | } 119 | 120 | let mut chunk = Vec::new(); 121 | 122 | // Set the chunks boundaries 123 | let chunk_size = self.message.chunk_size.size(); 124 | let slice_start = (self.chunk_num as u32 * chunk_size as u32) as usize; 125 | let slice_end = cmp::min( 126 | slice_start + chunk_size as usize, 127 | self.message.payload.len(), 128 | ); 129 | 130 | // The chunk header is only required when the message size exceeds one chunk 131 | if self.message.num_chunks > 1 { 132 | // Chunk binary layout: 133 | // 2 bytes (magic bytes) 134 | // 8 bytes (message id) 135 | // 1 byte (chunk number) 136 | // 1 byte (total amount of chunks in this message) 137 | // n bytes (chunk payload) 138 | chunk.extend(MAGIC_BYTES.iter()); 139 | chunk.extend(self.message.id.as_bytes()); 140 | chunk.push(self.chunk_num); 141 | chunk.push(self.message.num_chunks); 142 | } 143 | 144 | chunk.extend(self.message.payload[slice_start..slice_end].iter()); 145 | 146 | self.chunk_num += 1; 147 | 148 | Some(chunk) 149 | } 150 | } 151 | 152 | /// The representation of a chunked message id 153 | /// 154 | /// Every chunked message requires an ID which consists of 8 bytes. This is the same 155 | /// as an 64bit integer. This struct provides some convenience functions on this type. 156 | struct ChunkedMessageId([u8; 8]); 157 | 158 | #[allow(dead_code)] 159 | impl<'a> ChunkedMessageId { 160 | /// Create a new, random ChunkedMessageId. 161 | fn random() -> ChunkedMessageId { 162 | let mut bytes = [0; 8]; 163 | 164 | for b in 0..8 { 165 | bytes[b] = rand::random(); 166 | } 167 | 168 | return ChunkedMessageId::from_bytes(bytes); 169 | } 170 | 171 | /// Create a new ChunkedMessageId from a 64 int. 172 | fn from_int(mut id: u64) -> ChunkedMessageId { 173 | let mut bytes = [0; 8]; 174 | for i in 0..8 { 175 | bytes[7 - i] = (id & 0xff) as u8; 176 | id >>= 8; 177 | } 178 | 179 | ChunkedMessageId(bytes) 180 | } 181 | 182 | /// Create a new ChunkedMessageId from a byte-array. 183 | fn from_bytes(bytes: [u8; 8]) -> ChunkedMessageId { 184 | ChunkedMessageId(bytes) 185 | } 186 | 187 | /// Return the message id as a byte-slice. 188 | fn as_bytes(&self) -> &[u8; 8] { 189 | &self.0 190 | } 191 | 192 | /// Return the message id as an 64bit uint. 193 | fn to_int(&self) -> u64 { 194 | self.0.iter().fold(0_u64, |id, &i| id << 8 | i as u64) 195 | } 196 | } 197 | 198 | #[cfg(test)] 199 | mod tests { 200 | use super::*; 201 | 202 | #[test] 203 | fn chunked_message_id_from_and_to_bytes() { 204 | let raw_ids = vec![ 205 | b"\xff\xff\xff\xff\xff\xff\xff\xff", 206 | b"\x00\x00\x00\x00\x00\x00\x00\x00", 207 | b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", 208 | b"\x55\x55\x55\x55\x55\x55\x55\x55", 209 | b"\x00\x01\x02\x03\x04\x05\x06\x07", 210 | b"\x07\x06\x05\x04\x03\x02\x01\x00", 211 | b"\x00\x10\x20\x30\x40\x50\x60\x70", 212 | b"\x70\x60\x50\x40\x30\x20\x10\x00", 213 | ]; 214 | 215 | for raw_id in raw_ids { 216 | let id = ChunkedMessageId::from_bytes(raw_id.clone()); 217 | assert_eq!(id.as_bytes(), raw_id); 218 | } 219 | } 220 | 221 | #[test] 222 | fn chunked_message_id_from_and_to_int() { 223 | let raw_ids = vec![ 224 | 0xffffffffffffffff, 225 | 0x0000000000000000, 226 | 0xaaaaaaaaaaaaaaaa, 227 | 0x5555555555555555, 228 | 0x0001020304050607, 229 | 0x0706050403020100, 230 | 0x0010203040506070, 231 | 0x7060504030201000, 232 | ]; 233 | 234 | for raw_id in raw_ids { 235 | let id = ChunkedMessageId::from_int(raw_id); 236 | assert_eq!(id.to_int(), raw_id); 237 | } 238 | } 239 | 240 | #[test] 241 | #[should_panic(expected = "Number of chunks")] 242 | fn fail_too_many_chunks() { 243 | ChunkedMessage::new(ChunkSize::Custom(1), get_data(129)).unwrap(); 244 | } 245 | 246 | #[test] 247 | fn chunk_message_len() { 248 | let msg_1_chunk = ChunkedMessage::new(ChunkSize::Custom(1), get_data(1)).unwrap(); 249 | let msg_2_chunks = ChunkedMessage::new(ChunkSize::Custom(1), get_data(2)).unwrap(); 250 | let msg_128_chunks = ChunkedMessage::new(ChunkSize::Custom(1), get_data(128)).unwrap(); 251 | 252 | assert_eq!(msg_1_chunk.len(), 1); 253 | assert_eq!(msg_2_chunks.len() as u32, 2 + 2 * CHUNK_OVERHEAD as u32); 254 | assert_eq!( 255 | msg_128_chunks.len() as u64, 256 | 128 + 128 * (CHUNK_OVERHEAD as u64) 257 | ); 258 | } 259 | 260 | #[test] 261 | fn chunk_message_id_random() { 262 | let msg1 = ChunkedMessage::new(ChunkSize::Custom(1), get_data(1)).unwrap(); 263 | let msg2 = ChunkedMessage::new(ChunkSize::Custom(1), get_data(1)).unwrap(); 264 | let msg3 = ChunkedMessage::new(ChunkSize::Custom(1), get_data(1)).unwrap(); 265 | 266 | assert!(msg1.id.to_int() != msg2.id.to_int()); 267 | assert!(msg3.id.to_int() != msg2.id.to_int()); 268 | assert!(msg1.id.to_int() != msg3.id.to_int()); 269 | } 270 | 271 | #[test] 272 | fn chunk_message_correct_math() { 273 | let msg1 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(1)).unwrap(); 274 | let msg2 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(2)).unwrap(); 275 | let msg3 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(3)).unwrap(); 276 | let msg4 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(4)).unwrap(); 277 | let msg5 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(5)).unwrap(); 278 | let msg6 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(6)).unwrap(); 279 | let msg7 = ChunkedMessage::new(ChunkSize::Custom(3), get_data(7)).unwrap(); 280 | 281 | assert_eq!(msg1.num_chunks, 1); 282 | assert_eq!(msg2.num_chunks, 1); 283 | assert_eq!(msg3.num_chunks, 1); 284 | assert_eq!(msg4.num_chunks, 2); 285 | assert_eq!(msg5.num_chunks, 2); 286 | assert_eq!(msg6.num_chunks, 2); 287 | assert_eq!(msg7.num_chunks, 3); 288 | } 289 | 290 | #[test] 291 | fn chunk_message_chunking() { 292 | // 10 exact chunks 293 | check_chunks(10, 100, 10); 294 | 295 | // 4 inexact chunks 296 | check_chunks(33, 100, 4); 297 | 298 | // test no chunks 299 | let msg = ChunkedMessage::new(ChunkSize::Custom(100), get_data(100)).unwrap(); 300 | let mut iter = msg.iter(); 301 | let chunk = iter.next().unwrap(); 302 | assert_eq!(iter.next(), None); 303 | assert_eq!(chunk.len(), 100); 304 | assert_eq!(chunk[0], 0); 305 | assert_eq!(chunk[99], 99); 306 | } 307 | 308 | #[test] 309 | fn chunk_large_message_chunking() { 310 | // 100k of msg 311 | chunking(CHUNK_SIZE_WAN, 100000); 312 | } 313 | 314 | fn chunking(chunk_size: u16, msg_size: u32) { 315 | check_chunks( 316 | chunk_size as u16, 317 | msg_size, 318 | (msg_size / chunk_size as u32) as u8 + 1, 319 | ); 320 | } 321 | 322 | #[test] 323 | #[should_panic] 324 | fn test_illegal_chunk_size() { 325 | ChunkedMessage::new(ChunkSize::Custom(0), get_data(1)).unwrap(); 326 | } 327 | 328 | fn get_data(len: usize) -> Vec { 329 | let mut data = Vec::with_capacity(len); 330 | for i in 0..len { 331 | data.push(i as u8); 332 | } 333 | 334 | data 335 | } 336 | 337 | fn check_chunks(chunk_size: u16, msg_size: u32, expected_chunk_count: u8) { 338 | let msg_data = get_data(msg_size as usize); 339 | let msg_data_clone = msg_data.clone(); 340 | let msg = ChunkedMessage::new(ChunkSize::Custom(chunk_size as u16), msg_data).unwrap(); 341 | let mut counter: u8 = 0; 342 | for chunk in msg.iter() { 343 | println!("{:?}", chunk); 344 | 345 | // length is in budget 346 | 347 | assert!(chunk.len() as u16 <= chunk_size + 12); 348 | // magic bytes 349 | assert_eq!(chunk[0], MAGIC_BYTES[0]); 350 | assert_eq!(chunk[1], MAGIC_BYTES[1]); 351 | 352 | // pos/counter section 353 | assert_eq!(chunk[10], counter); 354 | assert_eq!(chunk[11], expected_chunk_count); 355 | 356 | // first and last byte 357 | let first_index = (counter as u32 * chunk_size as u32) as usize; 358 | let last_index = 359 | (::std::cmp::min((counter as u32 + 1) * chunk_size as u32, msg_size) - 1) as usize; 360 | assert_eq!(chunk[12], msg_data_clone[first_index]); 361 | assert_eq!(*chunk.last().unwrap(), msg_data_clone[last_index]); 362 | 363 | counter += 1; 364 | } 365 | 366 | assert_eq!(counter, expected_chunk_count); 367 | } 368 | 369 | } 370 | -------------------------------------------------------------------------------- /src/message/compression.rs: -------------------------------------------------------------------------------- 1 | use libdeflater::{CompressionLvl, Compressor}; 2 | use std::iter; 3 | use std::collections::HashMap; 4 | use std::cell::RefCell; 5 | 6 | use crate::{WireMessage, Error}; 7 | use crate::errors::Result; 8 | 9 | thread_local!(static COMPRESSORS: RefCell = RefCell::new(DeflaterCompressor::new())); 10 | 11 | /// MessageCompression represents all possible compression algorithms in GELF. 12 | #[derive(PartialEq, Clone, Copy)] 13 | pub enum MessageCompression { 14 | None, 15 | Gzip { 16 | level: i32 17 | }, 18 | Zlib { 19 | level: i32 20 | }, 21 | } 22 | 23 | impl MessageCompression { 24 | /// Return the default compression algorithm. 25 | pub fn default() -> MessageCompression { 26 | MessageCompression::Gzip {level: 1} 27 | } 28 | 29 | /// Compress a serialized message with the defined algorithm. 30 | pub fn compress(self, message: &WireMessage) -> Result> { 31 | let json = message.to_gelf()?; 32 | 33 | Ok(match self { 34 | MessageCompression::None => json.into_bytes(), 35 | MessageCompression::Gzip {level} => { 36 | COMPRESSORS.with(|compressor| { 37 | compressor.borrow_mut().with(level, |compressor| { 38 | let bound = compressor.gzip_compress_bound(json.as_bytes().len()); 39 | 40 | let mut buffer: Vec = iter::repeat(0 ).take(bound).collect(); 41 | 42 | compressor.gzip_compress(json.as_bytes(), buffer.as_mut_slice()) 43 | .map_err(|err| { 44 | Error::CompressMessageFailed { 45 | compression_method: "gzip", 46 | compression_error: err.into() 47 | } 48 | }) 49 | .map(|size|buffer.truncate(size)) 50 | .map(move |_| buffer) 51 | }) 52 | })? 53 | } 54 | 55 | MessageCompression::Zlib {level} => { 56 | COMPRESSORS.with(|compressor| { 57 | compressor.borrow_mut().with(level, |compressor| { 58 | let bound = compressor.zlib_compress_bound(json.as_bytes().len()); 59 | 60 | let mut buffer: Vec = iter::repeat(0 ).take(bound).collect(); 61 | 62 | compressor.zlib_compress(json.as_bytes(), buffer.as_mut_slice()) 63 | .map_err(|err| { 64 | Error::CompressMessageFailed { 65 | compression_method: "zlib", 66 | compression_error: err.into() 67 | } 68 | }) 69 | .map(|size|buffer.truncate(size)) 70 | .map(move |_| buffer) 71 | }) 72 | })? 73 | } 74 | }) 75 | } 76 | } 77 | 78 | #[derive(Default)] 79 | struct DeflaterCompressor { 80 | compressors: HashMap 81 | } 82 | 83 | impl DeflaterCompressor { 84 | pub fn new() -> Self { 85 | Self::default() 86 | } 87 | 88 | fn with(&mut self, level: i32, fa: F) -> R 89 | where F: Fn(&mut Compressor) -> R { 90 | 91 | let compressor = self.compressors.get_mut(&level); 92 | 93 | match compressor { 94 | None => { 95 | self.compressors.insert(level, Self::create_compression(level)); 96 | 97 | fa(self.compressors.get_mut(&level).expect("Should be present")) 98 | }, 99 | Some(c) => fa(c), 100 | } 101 | } 102 | 103 | fn create_compression(level: i32) -> Compressor { 104 | let compression_lvl = CompressionLvl::new(level).expect("Should be a valid level"); 105 | 106 | Compressor::new(compression_lvl) 107 | } 108 | } 109 | 110 | #[cfg(test)] 111 | mod test { 112 | use super::*; 113 | use serde_json::Value; 114 | use crate::{Logger, NullBackend, Message}; 115 | 116 | #[test] 117 | fn test_compression_none() { 118 | let logger = Logger::new(Box::new(NullBackend::new())).expect("Should not be an error"); 119 | 120 | let message = WireMessage::new(Message::new("Testing"), &logger); 121 | 122 | let compressor = MessageCompression::None; 123 | 124 | let actual = compressor.compress(&message).expect("Should success"); 125 | 126 | let actual: Value = serde_json::from_slice(actual.as_slice()).expect("Should success to parse"); 127 | let expected = serde_json::to_value(message).expect("Should success to encode"); 128 | 129 | assert_eq!(actual, expected, "Should not compress any data and be equal"); 130 | } 131 | 132 | #[test] 133 | fn test_compression_gzip() { 134 | let logger = Logger::new(Box::new(NullBackend::new())).expect("Should not be an error"); 135 | 136 | let message = WireMessage::new(Message::new("Testing"), &logger); 137 | 138 | for level in 1..=12 { 139 | 140 | let compressor = MessageCompression::Gzip {level}; 141 | 142 | let mut decompressor = libdeflater::Decompressor::new(); 143 | 144 | let actual = compressor.compress(&message).expect("Should success"); 145 | 146 | let mut buffer: Vec = iter::repeat(0).take(serde_json::to_vec(&message).unwrap().len()).collect(); 147 | 148 | let decoded = decompressor.gzip_decompress(actual.as_slice(), buffer.as_mut_slice()).expect("Should not throw an error"); 149 | 150 | buffer.truncate(decoded); 151 | 152 | let actual = buffer; 153 | 154 | let actual: Value = serde_json::from_slice(actual.as_slice()).expect("Should success to parse"); 155 | 156 | let expected = serde_json::to_value(&message).expect("Should success to encode"); 157 | 158 | assert_eq!(actual, expected, "Decoded data should be equal input"); 159 | } 160 | } 161 | 162 | #[test] 163 | fn test_compression_zlib() { 164 | let logger = Logger::new(Box::new(NullBackend::new())).expect("Should not be an error"); 165 | 166 | let message = WireMessage::new(Message::new("Testing"), &logger); 167 | 168 | for level in 1..=12 { 169 | 170 | let compressor = MessageCompression::Zlib {level}; 171 | 172 | let mut decompressor = libdeflater::Decompressor::new(); 173 | 174 | let actual = compressor.compress(&message).expect("Should success"); 175 | 176 | let mut buffer: Vec = iter::repeat(0).take(serde_json::to_vec(&message).unwrap().len()).collect(); 177 | 178 | let decoded = decompressor.zlib_decompress(actual.as_slice(), buffer.as_mut_slice()).expect("Should not throw an error"); 179 | 180 | buffer.truncate(decoded); 181 | 182 | let actual = buffer; 183 | 184 | let actual: Value = serde_json::from_slice(actual.as_slice()).expect("Should success to parse"); 185 | 186 | let expected = serde_json::to_value(&message).expect("Should success to encode"); 187 | 188 | assert_eq!(actual, expected, "Decoded data should be equal input"); 189 | } 190 | } 191 | 192 | #[test] 193 | fn test_concurrency_zlib() { 194 | 195 | for level in 1..=12 { 196 | 197 | let compressor = MessageCompression::Zlib {level}; 198 | 199 | loom::model(move || { 200 | let logger = loom::sync::Arc::new(Logger::new(Box::new(NullBackend::new())).expect("Should not be an error")); 201 | 202 | for _ in 0..3 { 203 | let logger = logger.clone(); 204 | 205 | loom::thread::spawn( move || { 206 | let message = WireMessage::new(Message::new("Testing"), &logger); 207 | 208 | let mut decompressor = libdeflater::Decompressor::new(); 209 | 210 | let actual = compressor.clone().compress(&message).expect("Should success"); 211 | 212 | let mut buffer: Vec = iter::repeat(0).take(serde_json::to_vec(&message).unwrap().len()).collect(); 213 | 214 | let decoded = decompressor.zlib_decompress(actual.as_slice(), buffer.as_mut_slice()).expect("Should not throw an error"); 215 | 216 | buffer.truncate(decoded); 217 | 218 | let actual = buffer; 219 | 220 | let actual: Value = serde_json::from_slice(actual.as_slice()).expect("Should success to parse"); 221 | 222 | let expected = serde_json::to_value(&message).expect("Should success to encode"); 223 | 224 | assert_eq!(actual, expected, "Decoded data should be equal input"); 225 | }); 226 | } 227 | }) 228 | 229 | } 230 | } 231 | 232 | #[test] 233 | fn test_concurrency_gzip() { 234 | 235 | for level in 1..=12 { 236 | let compressor = MessageCompression::Gzip { level }; 237 | 238 | loom::model(move || { 239 | let logger = loom::sync::Arc::new(Logger::new(Box::new(NullBackend::new())).expect("Should not be an error")); 240 | 241 | for _ in 0..3 { 242 | let logger = logger.clone(); 243 | 244 | loom::thread::spawn( move || { 245 | let message = WireMessage::new(Message::new("Testing"), &logger); 246 | 247 | let mut decompressor = libdeflater::Decompressor::new(); 248 | 249 | let actual = compressor.clone().compress(&message).expect("Should success"); 250 | 251 | let mut buffer: Vec = iter::repeat(0).take(serde_json::to_vec(&message).unwrap().len()).collect(); 252 | 253 | let decoded = decompressor.gzip_decompress(actual.as_slice(), buffer.as_mut_slice()).expect("Should not throw an error"); 254 | 255 | buffer.truncate(decoded); 256 | 257 | let actual = buffer; 258 | 259 | let actual: Value = serde_json::from_slice(actual.as_slice()).expect("Should success to parse"); 260 | 261 | let expected = serde_json::to_value(&message).expect("Should success to encode"); 262 | 263 | assert_eq!(actual, expected, "Decoded data should be equal input"); 264 | }); 265 | } 266 | }) 267 | 268 | } 269 | } 270 | 271 | } -------------------------------------------------------------------------------- /src/message/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::borrow::Cow; 3 | use chrono::{DateTime, Utc, NaiveDateTime}; 4 | use log; 5 | 6 | pub use self::chunked_message::{ChunkSize, ChunkedMessage}; 7 | pub use self::compression::MessageCompression; 8 | pub use self::wire_message::WireMessage; 9 | 10 | use crate::{Level, util, Error}; 11 | use crate::errors::Result; 12 | use serde::de; 13 | use serde::de::Deserialize; 14 | use serde_with::with_prefix; 15 | 16 | mod chunked_message; 17 | mod compression; 18 | mod wire_message; 19 | 20 | /// Message is thre representation of a GELF message. 21 | /// 22 | /// `Message` provides a fluid setter and getter interface to all of GELF's 23 | /// features. Only the `host`-field is not available. It is managed the 24 | /// `Logger`. 25 | /// 26 | /// A `Message` can also be constructed from a `log::LogRecord`. All 27 | /// available metadata is transferred over to the message object. 28 | #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] 29 | pub struct Message<'a> { 30 | short_message: Cow<'a, str>, 31 | full_message: Option>, 32 | #[serde(deserialize_with = "parse_unix_seconds")] 33 | timestamp: Option>, 34 | level: Level, 35 | #[serde(flatten, with = "prefix_metadata")] 36 | metadata: HashMap, Cow<'a, str>>, 37 | } 38 | 39 | impl<'a> Message<'a> { 40 | /// Construct a new log message. 41 | /// 42 | /// All fields will use their defaults. This means usually Option::None. 43 | /// A notable exception is `level`. The GELF spec requires this field to 44 | /// default to Level::Alert. 45 | pub fn new( 46 | short_message: S, 47 | ) -> Self 48 | where 49 | S: Into> + AsRef 50 | { 51 | Self::new_with_level(short_message, Level::Alert) 52 | } 53 | 54 | /// Construct a new log message with a defined level 55 | /// 56 | /// All fields will use their defaults. This means usually Option::None. 57 | pub fn new_with_level( 58 | short_message: S, 59 | level: Level, 60 | ) -> Self 61 | where 62 | S: Into> + AsRef 63 | { 64 | Message { 65 | short_message: short_message.into(), 66 | level, 67 | full_message: None, 68 | timestamp: None, 69 | metadata: HashMap::new(), 70 | } 71 | } 72 | 73 | /// Return the `short_message` 74 | pub fn short_message(&self) -> &Cow<'a, str> { 75 | &self.short_message 76 | } 77 | 78 | /// Set the `short_message` 79 | pub fn set_short_message( 80 | &mut self, 81 | msg: S 82 | ) -> &mut Self 83 | where 84 | S: Into> + AsRef 85 | { 86 | self.short_message = msg.into(); 87 | self 88 | } 89 | 90 | /// Return the `full_message` 91 | pub fn full_message(&self) -> &Option> { 92 | &self.full_message 93 | } 94 | 95 | /// Set the `full_message` 96 | pub fn set_full_message( 97 | &mut self, 98 | msg: S 99 | ) -> &mut Self 100 | where 101 | S: Into> + AsRef 102 | { 103 | self.full_message = Some(msg.into()); 104 | self 105 | } 106 | 107 | // Clear the `full_message` 108 | pub fn clear_full_message(&mut self) -> &mut Self { 109 | self.full_message = None; 110 | self 111 | } 112 | 113 | /// Return the `timestamp` 114 | pub fn timestamp(&self) -> &Option> { 115 | &self.timestamp 116 | } 117 | 118 | /// Set the `timestamp` 119 | pub fn set_timestamp(&mut self, ts: DateTime) -> &mut Self { 120 | self.timestamp = Some(ts); 121 | self 122 | } 123 | 124 | /// Clear the `timestamp` 125 | pub fn clear_timestamp(&mut self) -> &mut Self { 126 | self.timestamp = None; 127 | self 128 | } 129 | 130 | /// Return the `level` 131 | pub fn level(&self) -> Level { 132 | self.level 133 | } 134 | 135 | /// Set the `level` 136 | pub fn set_level(&mut self, level: Level) -> &mut Self { 137 | self.level = level; 138 | self 139 | } 140 | 141 | /// Return a metadata field with given key 142 | pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> { 143 | self.metadata.get(key) 144 | } 145 | 146 | /// Return all metadata 147 | pub fn all_metadata(&self) -> &HashMap, Cow<'a, str>> { 148 | &self.metadata 149 | } 150 | 151 | /// Set a metadata field with given key to value 152 | pub fn set_metadata( 153 | &mut self, 154 | key: S, 155 | value: T, 156 | ) -> Result<&mut Self> 157 | where 158 | S: Into> + AsRef, 159 | T: Into> + AsRef, 160 | { 161 | let key = key.into(); 162 | 163 | if key == "id" { 164 | return Err(Error::IllegalNameForAdditional { name: key.into() }.into()); 165 | } 166 | 167 | self.metadata.insert(key, value.into()); 168 | 169 | Ok(self) 170 | } 171 | } 172 | 173 | impl<'a> From<&'a log::Record<'a>> for Message<'a> { 174 | /// Create a `Message` from given `log::LogRecord` including all metadata 175 | fn from(record: &'a log::Record) -> Message<'a> { 176 | // Create message with given text and level 177 | let short_message = format!("{}", record.args()); 178 | 179 | let mut msg = Message::new_with_level( 180 | short_message, 181 | record.level().into(), 182 | ); 183 | 184 | msg.set_timestamp(Utc::now()); 185 | 186 | // Add default metadata, and ignore the results (`let _ = ...`) as all keys are valid 187 | // and set_metadata only fails on invalid keys 188 | let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string()); 189 | let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into())); 190 | let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string()); 191 | let _ = msg.set_metadata("process_id", util::pid().to_string()); 192 | 193 | msg 194 | } 195 | } 196 | 197 | with_prefix!(prefix_metadata "_"); 198 | 199 | fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result>, D::Error> 200 | where D: de::Deserializer<'de> 201 | { 202 | let value: Option = Deserialize::deserialize(d)?; 203 | 204 | let value = match value { 205 | Some(v) => v, 206 | None => return Ok(None) 207 | }; 208 | 209 | let seconds = value.trunc() as i64; 210 | let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32; 211 | let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs); 212 | if let Some(ndt) = ndt { 213 | Ok(Some(DateTime::::from_utc(ndt, Utc))) 214 | } else { 215 | Err(de::Error::custom(format!( 216 | "Invalid or out of range value '{}' for DateTime", 217 | value 218 | ))) 219 | } 220 | } 221 | 222 | #[cfg(test)] 223 | mod test { 224 | use super::*; 225 | use rand::{thread_rng, Rng}; 226 | use rand::distributions::{Alphanumeric, Uniform}; 227 | use serde_json::de::SliceRead; 228 | use serde_json::StreamDeserializer; 229 | use chrono::Timelike; 230 | 231 | fn random_message() -> Message<'static> { 232 | let short_message: String = thread_rng() 233 | .sample_iter(&Alphanumeric) 234 | .take(100) 235 | .collect(); 236 | 237 | let full_message: String = thread_rng() 238 | .sample_iter(&Alphanumeric) 239 | .take(200) 240 | .collect(); 241 | 242 | let mut rng = thread_rng(); 243 | 244 | let int = rng.sample::(Uniform::new_inclusive(0, 7)); 245 | 246 | let mut message = Message::new(short_message); 247 | 248 | message.set_full_message(full_message); 249 | message.set_level(Level::from(int)); 250 | 251 | random_metadata().into_iter().for_each(|pair| { 252 | message.set_metadata(pair.0, pair.1).unwrap(); 253 | }); 254 | 255 | message 256 | } 257 | 258 | fn random_metadata() -> HashMap { 259 | let mut rng = thread_rng(); 260 | 261 | let int = rng.sample::(Uniform::new_inclusive(5, 30)); 262 | 263 | std::iter::repeat_with(|| { 264 | let value: String = thread_rng() 265 | .sample_iter(&Alphanumeric) 266 | .take(200) 267 | .collect(); 268 | 269 | let key: String = thread_rng() 270 | .sample_iter(&Alphanumeric) 271 | .take(10) 272 | .collect(); 273 | 274 | (key, value) 275 | }).take(int) 276 | .fold(HashMap::new(), |mut acc, m| { 277 | acc.insert(m.0, m.1); 278 | 279 | acc 280 | }) 281 | } 282 | 283 | fn random_messages(amount: usize) -> impl Iterator> { 284 | std::iter::repeat_with(random_message).take(amount) 285 | } 286 | 287 | #[test] 288 | fn test_deserialize_valid_json() { 289 | let message = random_message(); 290 | 291 | let input = serde_json::to_string(&message).unwrap(); 292 | 293 | let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing"); 294 | 295 | assert_eq!(actual_message.short_message, message.short_message); 296 | assert_eq!(actual_message.full_message, message.full_message); 297 | assert_eq!(actual_message.timestamp, message.timestamp); 298 | assert_eq!(actual_message.metadata, message.metadata); 299 | assert_eq!(actual_message.level, message.level); 300 | } 301 | 302 | #[test] 303 | fn test_deserialize_multiple_valid_jsons() { 304 | let messages = random_messages(10).collect::>(); 305 | 306 | let input = messages.clone().into_iter() 307 | .map(|m| serde_json::to_string(&m).unwrap()) 308 | .fold(String::new(), |mut acc, v| { 309 | acc.push_str(v.as_str()); 310 | 311 | acc 312 | }); 313 | 314 | let read = SliceRead::new(input.as_bytes()); 315 | 316 | let mut stream: StreamDeserializer = serde_json::StreamDeserializer::new(read); 317 | 318 | let mut actual_parsed: Vec = vec![]; 319 | 320 | while let Some(m) = stream.next() { 321 | actual_parsed.push(m.unwrap()); 322 | } 323 | 324 | assert_eq!(actual_parsed, messages); 325 | assert_eq!(stream.byte_offset(), input.len()); 326 | } 327 | 328 | #[test] 329 | fn test_parse_timestamp_json() { 330 | let raw_message = r#" 331 | {"version": "1.1", 332 | "short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel", 333 | "full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n", 334 | "timestamp": 1578669969.108120000, 335 | "level": 6, 336 | "_thread_name": "Thread-11", 337 | "_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"} 338 | "#; 339 | 340 | let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success"); 341 | 342 | let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp"); 343 | assert_eq!(actual_timestamp.timestamp(), 1_578_669_969); 344 | assert!(actual_timestamp.nanosecond() < 108_120_000); 345 | 346 | assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n"); 347 | 348 | assert_eq!(actual_message.level(), Level::Informational); 349 | assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11"); 350 | assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer"); 351 | } 352 | 353 | } 354 | -------------------------------------------------------------------------------- /src/message/wire_message.rs: -------------------------------------------------------------------------------- 1 | use failure; 2 | use serde; 3 | use serde::ser::SerializeMap; 4 | use serde_json; 5 | use std::collections::HashMap; 6 | use crate::{Message, Logger, MessageCompression, ChunkSize}; 7 | use crate::errors::Result; 8 | use crate::errors::Error; 9 | use crate::message::ChunkedMessage; 10 | 11 | /// WireMessage is the representation of a fully assembled GELF message 12 | /// 13 | /// A fully assembled requires information only present in the `Logger`: 14 | /// Both the local hostname and possible missing metadata fields need to be 15 | /// added to the message. 16 | /// 17 | /// A WireMessage can be serialized to GELF/JSON (with and without compression) 18 | /// and is the abstraction passed to the transportation backends. 19 | pub struct WireMessage<'a> { 20 | host: &'a str, 21 | message: Message<'a>, 22 | } 23 | 24 | impl<'a> WireMessage<'a> { 25 | /// Construct a new wire message 26 | /// 27 | /// The logger is required for populating the `host`-field and metadata 28 | /// fields which were not added to the message. 29 | pub fn new(mut msg: Message<'a>, logger: &'a Logger) -> Self { 30 | // Filter all fields missing from the message 31 | let additionals_from_default: HashMap<&String, &String> = logger 32 | .default_metadata() 33 | .iter() 34 | .filter(|&(key, _)| !msg.metadata.contains_key(key.as_str())) 35 | .collect(); 36 | 37 | // add the missing metadata 38 | for (key, value) in additionals_from_default { 39 | msg.set_metadata(key.as_str(), value.as_str()).ok(); 40 | } 41 | 42 | WireMessage { 43 | host: logger.hostname(), 44 | message: msg, 45 | } 46 | } 47 | 48 | /// Return a GELF/JSON string of this message 49 | pub fn to_gelf(&self) -> Result { 50 | serde_json::to_string(self).map_err(|e| { 51 | failure::Error::from(e) 52 | .context(Error::SerializeMessageFailed) 53 | .into() 54 | }) 55 | } 56 | 57 | /// Return a compressed GELF/JSON string of this message 58 | pub fn to_compressed_gelf(&self, compression: MessageCompression) -> Result> { 59 | compression.compress(&self) 60 | } 61 | 62 | /// Serialize the messages and prepare it for chunking 63 | pub fn to_chunked_message( 64 | &self, 65 | chunk_size: ChunkSize, 66 | compression: MessageCompression, 67 | ) -> Result { 68 | ChunkedMessage::new(chunk_size, self.to_compressed_gelf(compression)?) 69 | } 70 | } 71 | 72 | impl<'a> serde::Serialize for WireMessage<'a> { 73 | /// Serialize the message to a GELF/JSON string 74 | fn serialize(&self, serializer: S) -> ::std::result::Result 75 | where 76 | S: serde::Serializer, 77 | { 78 | let mut map = serializer.serialize_map(None)?; 79 | 80 | map.serialize_key("version")?; 81 | map.serialize_value("1.1")?; 82 | 83 | map.serialize_key("host")?; 84 | map.serialize_value(self.host)?; 85 | 86 | map.serialize_key("short_message")?; 87 | map.serialize_value(&self.message.short_message())?; 88 | 89 | map.serialize_key("level")?; 90 | let level = self.message.level as u8; 91 | map.serialize_value(&level)?; 92 | 93 | if self.message.full_message().is_some() { 94 | map.serialize_key("full_message")?; 95 | map.serialize_value(&self.message.full_message())?; 96 | } 97 | 98 | if let Some(datetime) = self.message.timestamp() { 99 | let value = datetime.timestamp_millis() as f64 / 1000.0; 100 | 101 | map.serialize_key("timestamp")?; 102 | map.serialize_value(&value)?; 103 | } 104 | 105 | for (key, value) in self.message.all_metadata().iter() { 106 | let key = "_".to_string() + key; 107 | map.serialize_key(&key)?; 108 | map.serialize_value(value)?; 109 | } 110 | 111 | map.end() 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | mod tests { 117 | use super::*; 118 | 119 | use chrono::{TimeZone, Utc}; 120 | use crate::Level; 121 | 122 | #[test] 123 | fn wire_message_serialization() { 124 | let mut message = Message::new_with_level("short", Level::Alert); 125 | message.set_full_message("full"); 126 | 127 | let datetime = Utc.ymd(2000, 1, 1).and_hms_micro(1, 2, 3, 12_345); 128 | message.set_timestamp(datetime); 129 | 130 | message.set_metadata("key1", "value1").unwrap(); 131 | message.set_metadata("key2", "value2").unwrap(); 132 | 133 | let wire_msg = WireMessage { 134 | host: "host_value", 135 | message, 136 | }; 137 | 138 | let json = serde_json::to_value(wire_msg).expect("Failed to serialize WireMessage"); 139 | 140 | assert_eq!(Some(json!("1.1")), json.get("version").cloned()); 141 | assert_eq!(Some(json!("host_value")), json.get("host").cloned()); 142 | assert_eq!(Some(json!("short")), json.get("short_message").cloned()); 143 | assert_eq!(Some(json!("full")), json.get("full_message").cloned()); 144 | assert_eq!(Some(json!(1)), json.get("level").cloned()); // Level::Alert = 1 145 | 146 | let timestamp_secs = datetime.timestamp(); 147 | let expected_timestamp = timestamp_secs as f64 + 0.012; 148 | assert_eq!( 149 | Some(json!(&expected_timestamp)), 150 | json.get("timestamp").cloned() 151 | ); 152 | 153 | assert_eq!(Some(json!("value1")), json.get("_key1").cloned()); 154 | assert_eq!(Some(json!("value2")), json.get("_key2").cloned()); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use libc; 2 | 3 | /// Return the process-id (pid) of the current process 4 | pub fn pid() -> i32 { 5 | unsafe { libc::getpid() } 6 | } 7 | --------------------------------------------------------------------------------