├── .cargo └── config ├── .gitignore ├── .gitmodules ├── CONFIG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE.md ├── README.md ├── byztime-sys ├── Cargo.toml ├── README.md ├── build.rs └── src │ └── lib.rs ├── byztime ├── Cargo.toml ├── README.md └── src │ └── lib.rs ├── byztimed ├── .cargo │ └── config ├── Cargo.toml ├── README.md ├── benches │ └── main.rs ├── build.rs ├── src │ ├── aead.rs │ ├── config.rs │ ├── cookie.rs │ ├── core.rs │ ├── lib.rs │ ├── logging.rs │ ├── main.rs │ ├── ntske.rs │ ├── peer_name.rs │ ├── store.rs │ ├── time_client.rs │ ├── time_server.rs │ ├── time_test.rs │ └── wire.proto └── tests │ ├── 1node.rs │ ├── 4node_local.rs │ ├── common │ └── mod.rs │ └── test_certs │ ├── .gitignore │ ├── alice.crt │ ├── alice.csr │ ├── alice.key │ ├── bob.crt │ ├── bob.csr │ ├── bob.key │ ├── charlie.crt │ ├── charlie.csr │ ├── charlie.key │ ├── dave.crt │ ├── dave.csr │ ├── dave.key │ ├── gen-x509.sh │ ├── gorgias.crt │ ├── gorgias.csr │ ├── gorgias.key │ ├── openssl.cnf │ ├── trent.crt │ ├── trent.csr │ ├── trent.key │ └── trent.srl └── shell.nix /.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["-Ctarget-feature=+aes"] 3 | rustdocflags = ["-Ctarget-feature=+aes"] 4 | 5 | [target.x86_64-unknown-linux-gnu] 6 | rustflags = ["-Ctarget-cpu=sandybridge", "-Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3"] 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | *~ 3 | \#*\# 4 | .#* 5 | result 6 | byztime-sys/Cargo.lock 7 | byztimed/.cargo/registry -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "byztime-sys/libbyztime"] 2 | path = byztime-sys/libbyztime 3 | url = https://github.com/akamai-contrib/libbyztime 4 | -------------------------------------------------------------------------------- /CONFIG.md: -------------------------------------------------------------------------------- 1 | # Configuring byztimed 2 | 3 | This file documents the configuration file used by 4 | byztimed. Byztimed's configuration file is JSON. Here is an example 5 | that uses every available option: 6 | 7 | ``` 8 | { 9 | "timedata": "/path/to/my/timedata", 10 | "secret_store": "/path/to/my/secret_store/", 11 | "ro_mode": false, 12 | "bind_host": "198.51.100.4", 13 | "bind_port": 1021, 14 | "key": "/path/to/alice.example.com.key", 15 | "cert": "/path/to/alice.example.com.crt", 16 | "authorities": "/path/to/trent.pem", 17 | "poll_interval": 64, 18 | "drift_ppb": 100000, 19 | "logging": { 20 | "STDERR": "error", 21 | "/var/log/byztimed.log": "info" 22 | }, 23 | "log_format": "{d} {l} {t} - {m}{n}", 24 | "peers": { 25 | "bob": { 26 | "host": "192.0.2.1" 27 | "port": 1021, 28 | "cert_name": "bob.example.com", 29 | "dist": 300000, 30 | }, 31 | "charlie": { 32 | "host": "charlie.example.com", 33 | "port": 1021, 34 | "dist": 250000, 35 | "authorities": "/path/to/a_different_ca.pem", 36 | }, 37 | "dave": { 38 | "host": "dave.example.com", 39 | "port": 1021, 40 | "dist": 750000, 41 | } 42 | } 43 | } 44 | ``` 45 | 46 | ## Global section 47 | 48 | The following options can appear at the top level of the file. 49 | 50 | ### `timedata` 51 | 52 | **string, required**. Path to the timedata file. This file will be 53 | mapped into shared memory and used for communication between byztimed 54 | and its clients. If the file not exist, it will be created, and if it 55 | appears to be corrupt it will be rewritten. The file must be writable 56 | by byztimed and readable by its clients. 57 | 58 | ### `secret_store` 59 | 60 | **string, required**. Path to the secret store. Must be a directory 61 | and must exist; byztimed will populate it on first startup but the 62 | user is responsible for setting appropriate permissions on the 63 | directory. 64 | 65 | The secret store is just a cache and its contents can always be 66 | repopulated if lost, though if you have a very large number of nodes 67 | this may be time-consuming. Paranoid users may wish to place it on a 68 | tmpfs to prevent its contents from touching disk. 69 | 70 | ### `ro_mode` 71 | 72 | **boolean, optional**; defaults to `false`. When true, the server 73 | operates in read-only mode, polling its peers but not participating in 74 | consensus. 75 | 76 | ### `bind_host` 77 | 78 | **string, optional**; defaults to wildcard interface. Must be a 79 | well-formed IPv4 or IPv6 address. Tells byztimed what interface to 80 | bind to for its server. 81 | 82 | ### `bind_port` 83 | 84 | **integer, required** unless running in read-only mode. Tells byztimed 85 | what port to bind to for its server. The byztime protocol uses both 86 | TCP (for key establishment) and UDP (for time packets). The same port 87 | number is used for both. 88 | 89 | ### `key` 90 | 91 | **string, required** unless running in read-only mode. Path to a file 92 | containing your server's private key, in PKCS#8 PEM format. 93 | 94 | *Note*: well-formed PKCS#8 PEM keys begin with `-----BEGIN PRIVATE 95 | KEY-----`. If your key has some other heading, like `-----BEGIN RSA 96 | PRIVATE KEY-----` or `-----BEGIN EC PARAMETERS-----` it's in the 97 | wrong format and won't work. Pipe it through `openssl pkcs8 -topk8 98 | -nocrypt` to convert it. (See ) 99 | 100 | ### `cert` 101 | 102 | **string, required** unless running in read-only mode. Path to a file 103 | containing your server's X.509 certificate and any intermediate CAs, in 104 | PEM format. 105 | 106 | ### `authorities` 107 | 108 | **string, required** unless specified individually 109 | for every peer. Path to a file containing a list of trusted 110 | certificate authorities in PEM format. 111 | 112 | ### `poll_interval` 113 | 114 | **float, optional**, defaults to *8.0*. How often to poll each peer, 115 | in seconds. 116 | 117 | ### `drift_ppb` 118 | 119 | **integer, optional**, defaults to 250000. Upper bound on how quickly 120 | our system clock drifts, in parts per billion. Used in computing error 121 | bounds. 122 | 123 | ### `logging` 124 | 125 | **map, optional**, defaults to `{"STDERR": "info"}`. Each key is 126 | either the path to a log file or the special, case-sensitive string 127 | `"STDOUT"` or `"STDERR"`. Each value is one of `"error"`, `"warn"`, 128 | `"info"`, `"debug"`, or `"trace"`, specifying the minimum severity of 129 | log messages to output. Only debug builds will ever emit debug or 130 | trace messages. 131 | 132 | ### `log_format` 133 | 134 | **string, optional**, defaults to `"{d} {l} {t} - {m}{n}"`. A 135 | [log4rs format string](https://docs.rs/log4rs/0.12.0/log4rs/encode/pattern/index.html#formatters) 136 | controlling the format of log files. 137 | 138 | ### `peers` 139 | 140 | **map**; see next section for its format. 141 | 142 | ## Peers section 143 | 144 | Each key in the `"peers"` map is an arbitrary string identifying the 145 | peer; it will be used in log messages. Each value is another map 146 | containing the following entries. 147 | 148 | ### `host` 149 | 150 | **string, required**. IP address or hostname where the peer is listening. 151 | 152 | ### `port` 153 | 154 | **integer, required**. Port number where the peer is listening. 155 | 156 | ### `dist` 157 | 158 | **integer, optional**, defaults to 0. A lower bound on this peer's 159 | physical distance from us, given in meters. Setting this option will 160 | allow tighter error bounds to be achieved. (Again, set this to a 161 | *lower bound* to accomodate any uncertainty. The higher it is set, 162 | the tighter the error bounds that will be reported. If it is set 163 | too high, the error bounds may then become invalid). 164 | 165 | ### `cert_name` 166 | 167 | **string, optional**, defaults to being the same as `host`. The DNS 168 | name to expect when validating the peer's certificate. (Must be a DNS 169 | name; validation of certificates issued to IP addresses is not 170 | currently supported). 171 | 172 | ### `authorities` 173 | 174 | **string, optional**, defaults to the setting from the global section. 175 | Path to a file containing a list of certificate authorities, in PEM 176 | format, trusted to identify this peer. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ "byztime-sys", "byztimed", "byztime" ] -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Apache License 2 | ============== 3 | 4 | _Version 2.0, January 2004_ 5 | _<>_ 6 | 7 | ### Terms and Conditions for use, reproduction, and distribution 8 | 9 | #### 1. Definitions 10 | 11 | “License” shall mean the terms and conditions for use, reproduction, and 12 | distribution as defined by Sections 1 through 9 of this document. 13 | 14 | “Licensor” shall mean the copyright owner or entity authorized by the copyright 15 | owner that is granting the License. 16 | 17 | “Legal Entity” shall mean the union of the acting entity and all other entities 18 | that control, are controlled by, or are under common control with that entity. 19 | For the purposes of this definition, “control” means **(i)** the power, direct or 20 | indirect, to cause the direction or management of such entity, whether by 21 | contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the 22 | outstanding shares, or **(iii)** beneficial ownership of such entity. 23 | 24 | “You” (or “Your”) shall mean an individual or Legal Entity exercising 25 | permissions granted by this License. 26 | 27 | “Source” form shall mean the preferred form for making modifications, including 28 | but not limited to software source code, documentation source, and configuration 29 | files. 30 | 31 | “Object” form shall mean any form resulting from mechanical transformation or 32 | translation of a Source form, including but not limited to compiled object code, 33 | generated documentation, and conversions to other media types. 34 | 35 | “Work” shall mean the work of authorship, whether in Source or Object form, made 36 | available under the License, as indicated by a copyright notice that is included 37 | in or attached to the work (an example is provided in the Appendix below). 38 | 39 | “Derivative Works” shall mean any work, whether in Source or Object form, that 40 | is based on (or derived from) the Work and for which the editorial revisions, 41 | annotations, elaborations, or other modifications represent, as a whole, an 42 | original work of authorship. For the purposes of this License, Derivative Works 43 | shall not include works that remain separable from, or merely link (or bind by 44 | name) to the interfaces of, the Work and Derivative Works thereof. 45 | 46 | “Contribution” shall mean any work of authorship, including the original version 47 | of the Work and any modifications or additions to that Work or Derivative Works 48 | thereof, that is intentionally submitted to Licensor for inclusion in the Work 49 | by the copyright owner or by an individual or Legal Entity authorized to submit 50 | on behalf of the copyright owner. For the purposes of this definition, 51 | “submitted” means any form of electronic, verbal, or written communication sent 52 | to the Licensor or its representatives, including but not limited to 53 | communication on electronic mailing lists, source code control systems, and 54 | issue tracking systems that are managed by, or on behalf of, the Licensor for 55 | the purpose of discussing and improving the Work, but excluding communication 56 | that is conspicuously marked or otherwise designated in writing by the copyright 57 | owner as “Not a Contribution.” 58 | 59 | “Contributor” shall mean Licensor and any individual or Legal Entity on behalf 60 | of whom a Contribution has been received by Licensor and subsequently 61 | incorporated within the Work. 62 | 63 | #### 2. Grant of Copyright License 64 | 65 | Subject to the terms and conditions of this License, each Contributor hereby 66 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, 67 | irrevocable copyright license to reproduce, prepare Derivative Works of, 68 | publicly display, publicly perform, sublicense, and distribute the Work and such 69 | Derivative Works in Source or Object form. 70 | 71 | #### 3. Grant of Patent License 72 | 73 | Subject to the terms and conditions of this License, each Contributor hereby 74 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, 75 | irrevocable (except as stated in this section) patent license to make, have 76 | made, use, offer to sell, sell, import, and otherwise transfer the Work, where 77 | such license applies only to those patent claims licensable by such Contributor 78 | that are necessarily infringed by their Contribution(s) alone or by combination 79 | of their Contribution(s) with the Work to which such Contribution(s) was 80 | submitted. If You institute patent litigation against any entity (including a 81 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a 82 | Contribution incorporated within the Work constitutes direct or contributory 83 | patent infringement, then any patent licenses granted to You under this License 84 | for that Work shall terminate as of the date such litigation is filed. 85 | 86 | #### 4. Redistribution 87 | 88 | You may reproduce and distribute copies of the Work or Derivative Works thereof 89 | in any medium, with or without modifications, and in Source or Object form, 90 | provided that You meet the following conditions: 91 | 92 | * **(a)** You must give any other recipients of the Work or Derivative Works a copy of 93 | this License; and 94 | * **(b)** You must cause any modified files to carry prominent notices stating that You 95 | changed the files; and 96 | * **(c)** You must retain, in the Source form of any Derivative Works that You distribute, 97 | all copyright, patent, trademark, and attribution notices from the Source form 98 | of the Work, excluding those notices that do not pertain to any part of the 99 | Derivative Works; and 100 | * **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any 101 | Derivative Works that You distribute must include a readable copy of the 102 | attribution notices contained within such NOTICE file, excluding those notices 103 | that do not pertain to any part of the Derivative Works, in at least one of the 104 | following places: within a NOTICE text file distributed as part of the 105 | Derivative Works; within the Source form or documentation, if provided along 106 | with the Derivative Works; or, within a display generated by the Derivative 107 | Works, if and wherever such third-party notices normally appear. The contents of 108 | the NOTICE file are for informational purposes only and do not modify the 109 | License. You may add Your own attribution notices within Derivative Works that 110 | You distribute, alongside or as an addendum to the NOTICE text from the Work, 111 | provided that such additional attribution notices cannot be construed as 112 | modifying the License. 113 | 114 | You may add Your own copyright statement to Your modifications and may provide 115 | additional or different license terms and conditions for use, reproduction, or 116 | distribution of Your modifications, or for any such Derivative Works as a whole, 117 | provided Your use, reproduction, and distribution of the Work otherwise complies 118 | with the conditions stated in this License. 119 | 120 | #### 5. Submission of Contributions 121 | 122 | Unless You explicitly state otherwise, any Contribution intentionally submitted 123 | for inclusion in the Work by You to the Licensor shall be under the terms and 124 | conditions of this License, without any additional terms or conditions. 125 | Notwithstanding the above, nothing herein shall supersede or modify the terms of 126 | any separate license agreement you may have executed with Licensor regarding 127 | such Contributions. 128 | 129 | #### 6. Trademarks 130 | 131 | This License does not grant permission to use the trade names, trademarks, 132 | service marks, or product names of the Licensor, except as required for 133 | reasonable and customary use in describing the origin of the Work and 134 | reproducing the content of the NOTICE file. 135 | 136 | #### 7. Disclaimer of Warranty 137 | 138 | Unless required by applicable law or agreed to in writing, Licensor provides the 139 | Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, 140 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, 141 | including, without limitation, any warranties or conditions of TITLE, 142 | NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are 143 | solely responsible for determining the appropriateness of using or 144 | redistributing the Work and assume any risks associated with Your exercise of 145 | permissions under this License. 146 | 147 | #### 8. Limitation of Liability 148 | 149 | In no event and under no legal theory, whether in tort (including negligence), 150 | contract, or otherwise, unless required by applicable law (such as deliberate 151 | and grossly negligent acts) or agreed to in writing, shall any Contributor be 152 | liable to You for damages, including any direct, indirect, special, incidental, 153 | or consequential damages of any character arising as a result of this License or 154 | out of the use or inability to use the Work (including but not limited to 155 | damages for loss of goodwill, work stoppage, computer failure or malfunction, or 156 | any and all other commercial damages or losses), even if such Contributor has 157 | been advised of the possibility of such damages. 158 | 159 | #### 9. Accepting Warranty or Additional Liability 160 | 161 | While redistributing the Work or Derivative Works thereof, You may choose to 162 | offer, and charge a fee for, acceptance of support, warranty, indemnity, or 163 | other liability obligations and/or rights consistent with this License. However, 164 | in accepting such obligations, You may act only on Your own behalf and on Your 165 | sole responsibility, not on behalf of any other Contributor, and only if You 166 | agree to indemnify, defend, and hold each Contributor harmless for any liability 167 | incurred by, or claims asserted against, such Contributor by reason of your 168 | accepting any such warranty or additional liability. 169 | 170 | _END OF TERMS AND CONDITIONS_ 171 | 172 | ### APPENDIX: How to apply the Apache License to your work 173 | 174 | To apply the Apache License to your work, attach the following boilerplate 175 | notice, with the fields enclosed by brackets `[]` replaced with your own 176 | identifying information. (Don't include the brackets!) The text should be 177 | enclosed in the appropriate comment syntax for the file format. We also 178 | recommend that a file or class name and description of purpose be included on 179 | the same “printed page” as the copyright notice for easier identification within 180 | third-party archives. 181 | 182 | Copyright [yyyy] [name of copyright owner] 183 | 184 | Licensed under the Apache License, Version 2.0 (the "License"); 185 | you may not use this file except in compliance with the License. 186 | You may obtain a copy of the License at 187 | 188 | http://www.apache.org/licenses/LICENSE-2.0 189 | 190 | Unless required by applicable law or agreed to in writing, software 191 | distributed under the License is distributed on an "AS IS" BASIS, 192 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 193 | See the License for the specific language governing permissions and 194 | limitations under the License. 195 | 196 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Byztime 2 | 3 | Byztime is a 4 | [Byzantine-fault-tolerant](https://en.wikipedia.org/wiki/Byzantine_fault) 5 | protocol for synchronizing time among a group of peers, without 6 | reliance on any external time authority. The time kept by Byztime is 7 | simply a counter that advances at a rate of something very close to 8 | one unit per second, such that all nodes are in close agreement as 9 | to its current value. Byztime timestamps have no well-defined epoch. 10 | If all nodes have correctly-set system clocks when first 11 | initialized, then Byztime will initially match POSIX time, but will 12 | eventually drift away from it since 1. there is no external source 13 | keeping it in sync, and 2. Byztime's timescale lacks leap seconds. 14 | 15 | Byztime's algorithm is focused on keeping its *worst-case* error --- 16 | the absolute distance between any two correct nodes' estimate of the 17 | current time --- as small as possible. It achieves this somewhat at 18 | the expense of *typical-case* error, using only the single 19 | highest-quality time sample from each peer rather than combining many 20 | samples to smooth out network jitter. In the worst case, the 21 | difference between two correct nodes' clocks will asymptotically 22 | converge toward 4δ + 4ερ, where δ is the one-way network latency 23 | between the two farthest-spaced peers, ε is the (dimensionless) drift 24 | rate of correct nodes' hardware clocks, and ρ is the polling 25 | interval. If all nodes behave honestly, the bound improves to 2δ + 2ερ 26 | and will be reached after a single round of the protocol rather than 27 | converging asymptotically. 28 | 29 | Byztimed runs completely independently of NTP, and a bad NTP time 30 | source will not disrupt Byztime. This comes with a minor caveat: just 31 | before the daemon shuts down it records the the current offset between 32 | Byztime time and system time, and uses this offset to re-initialize 33 | its estimate following a reboot. The only time this particularly 34 | matters is if many nodes reboot simultaneously and the network loses 35 | quorum. What happens in this case depends somewhat on NTP and what 36 | order things start up in at boot time. If Byztime starts before NTP 37 | starts and shuts down only after NTP shuts down, then the continuity 38 | of the Byztime timescale will be as good as the RTC and the CMOS 39 | battery of the restarting nodes, but no better. On the other hand if 40 | NTP is allowed to stabilize the system clock before Byztime starts up, 41 | then the continuity of the Byztime scales will be as good as its NTP 42 | sources — which is probably a lot better than your RTC, but could be 43 | arbitrarily bad if the NTP source is faulty. Again, this only becomes 44 | an issue if Byztime loses quorum, meaning ⅓ or more of the network 45 | reboots at once. 46 | 47 | Byztime also currently relies on the system time for determining 48 | whether an X.509 certificate is expired. Once 49 | [Roughtime](https://tools.ietf.org/html/draft-ietf-ntp-roughtime) 50 | matures a bit we may consider integrating a Roughtime client into 51 | byztimed for certificate validation purposes. 52 | 53 | ## Build 54 | 55 | Byztime is built like any standard [Rust](https://rust-lang.org) 56 | crate. The easiest way to install byztimed is to get it from 57 | [crates.io](https://crates.io) via 58 | [`cargo`](https://doc.rust-lang.org/cargo/getting-started/installation.html): 59 | 60 | cargo install byztimed 61 | 62 | If you prefer to check out and build this repo, note that `byztimed` includes 63 | [`libbyztime`](https://github.com/akamai-contrib/libbyztime) 64 | as a submodule, so be sure to clone with `git clone 65 | --recurse-submodules`, or run `git submodule update --init 66 | --recursive` if you have already cloned without the 67 | `--recurse-submodules` option. 68 | 69 | Byztime is tested against Rust's stable channel, but compilers 70 | significantly older than the current stable will probably work. The 71 | most recent version known not to work is 1.38 (because we rely on 72 | async/await, which stabilized in 1.39). 73 | 74 | Byztimed currently runs only on Linux, and is well-tested only on 75 | AMD64. Other CPU architectures *should* work; please file a bug 76 | ticket if you encounter any issues. We hope to eventually support 77 | more operating systems, but this will be an uphill battle because 78 | Byztime depends on timekeeping facilities that currently only 79 | Linux provides. The effort to improve portability will likely require 80 | contributing some new functionality to other OS kernels. 81 | 82 | ## Usage 83 | 84 | Run byztimed as `byztimed `. See 85 | [CONFIG.md](CONFIG.md) for configuration file syntax. 86 | 87 | [`libbyztime`](https://github.com/akamai-contrib/libbyztime) 88 | is the C client library for consuming time from `byztimed`. See its 89 | `byztime.h` file for API documentation. The `byztime` crate within 90 | this repo provides idiomatic Rust bindings to libbyztime and its 91 | documentation can be read on [docs.rs](https://docs.rs/byztime). 92 | 93 | ## Protocol overview 94 | 95 | Although it is fundamentally a peer-to-peer protocol, Byztime uses a 96 | client-server communication pattern, with each node acting as both a 97 | client and a server to each other node. A client-only operation mode, 98 | wherein a node synchronizes itself to the consensus but does not vote 99 | in it, is also supported. 100 | 101 | Byztime uses [Network Time 102 | Security](https://www.rfc-editor.org/rfc/rfc8915.html) (NTS) for 103 | cryptographic link protection. Communication from each client to 104 | each server begins by the client initiating a TLS handshake and then 105 | using NTS-KE to negotiate shared keys and obtain NTS cookies. After 106 | NTS-KE is complete, the TLS connection closes and the remainder of 107 | the protocol runs over UDP. NTS provides message-level authenticated 108 | encryption. It provides replay protection for the client, but not 109 | for the server. The server never updates any state in response to a 110 | message from a client, so processing replays is harmless. For the 111 | remainder of this overview, we'll take the abstraction of 112 | authenticated encryption for granted and omit NTS-related machinery 113 | from our descriptions. 114 | 115 | Each node is assumed to be equipped with a *local clock* which 116 | counts the time elapsed since some arbitrary epoch such as when the 117 | system last booted. One node's local clock has no *a priori* known 118 | relationship to another's. Rather, this relationship is discovered 119 | through the execution of the protocol. The shared time that nodes 120 | seek to synchronize to is called the *global clock*. Nodes maintain 121 | an estimate of their *global offset*, which is the difference 122 | between the global clock and their local clock. The local clock never 123 | receives any adjustments; only the global offset does. 124 | 125 | The protocol proceeds by each node periodically sending a query to 126 | each of its peers, and the peer sending a response which includes a 127 | snapshot of its local clock and its current estimate of its global 128 | offset. Each query/response volley is called a *measurement*. 129 | 130 | The protocol uses the following global parameters: 131 | 132 | 1. `N`: the number of nodes participating in consensus. 133 | 134 | 2. `f`: the number of faulty nodes that can be tolerated. 135 | `f = floor((N-1)/3)`. 136 | 137 | 3. `drift`: a dimensionless number giving an upper bound on how fast 138 | or slow a correct node's local clock might be. For example if `drift` 139 | is 50e-6 then the clock might drift by up to 50µs per second. 140 | 141 | Each node keeps the following state: 142 | 143 | 1. The `era` of its local clock. This is a randomly-generated 144 | identifier which changes if the local clock loses its state, 145 | *e.g.* after a reboot. 146 | 147 | 2. `global_offset`: The node's estimate of the offset between the 148 | global clock and its local clock: `local_clock() + global_offset 149 | == estimate of global clock`. 150 | 151 | 3. `error`: The maximum absolute difference between the above 152 | estimate of the global clock and its true value. 153 | 154 | 4. `last_update`: The local clock time at which `global_offset` and 155 | `error` were last updated. 156 | 157 | And then for each of its peers: 158 | 159 | 5. `peer.era`: The peer's clock era as of the last time it communicated. 160 | 161 | 6. `peer.local_offset`: The node's estimate of the offset between 162 | its local clock and the peer's local clock: `local_clock() + 163 | peer.local_offset == estimate of peer's local clock`. 164 | 165 | 7. `peer.global_offset`: The peer's estimate of the offset between 166 | *its own* local clock and the global clock, as of the last time 167 | it communicated. 168 | 169 | 8. `peer.rtt`: The round trip time of the current "best" measurement 170 | of the peer's clock. This measurement is the one on which 171 | `peer.local_offset` is based. 172 | 173 | 9. `peer.origin_time`: The local clock time at which the query which 174 | led to the current best measurement was sent. 175 | 176 | 10. `peer.inflight_id`: The random unique identifier associated with 177 | a query, if any, that is currently awaiting a response. 178 | 179 | 11. `peer.inflight_origin_time`: The local clock time at which the 180 | current in-flight query (if any) was sent. 181 | 182 | There is some additional state related to NTS — a cache of cookies 183 | and shared keys — which works basically the same way as it does for 184 | NTP and we'll disregard it for the purposes of this explanation. 185 | 186 | At first run, nodes initialize `global_offset` such that the global 187 | clock matches their real-time clock. They periodically check the 188 | offset between the two and persist this offset to disk. This 189 | persisted value is used to recover a rough value with which to 190 | reinitialize `global_offset` after a reboot, but the error bounds 191 | on offsets recovered in this manner are considered infinite. 192 | 193 | Once per configured polling interval, clients send a query message to 194 | each of their peers, containing just a randomly-generated unique 195 | identifier. The sender updates `peer.inflight_id` and 196 | `peer.inflight_origin_time` to reflect the content of the packet and 197 | the time at which it was sent. If there was already another query in 198 | flight, the old query is assumed to have been dropped by the network 199 | and the new `inflight_id` and `inflight_origin_time` values 200 | overwrite the old ones. 201 | 202 | Servers respond immediately to any query they receive. The response 203 | contains: 204 | 205 | 1. `response_id`: A copy of the query's unique identifier. 206 | 207 | 2. `response_local`: A snapshot of the server's local clock. 208 | 209 | 3. `response_era`: The server's `era`. 210 | 211 | 4. `response_global_offset`: The server's `global_offset`. 212 | 213 | When the client receives the response, it processes it as follows: 214 | 215 | 1. Set `now` to a snapshot of the local clock at the moment the 216 | response was received. 217 | 218 | 2. Verify that `peer.inflight_id` is non-null and matches 219 | `response_id`. If not, discard the response. Otherwise, set 220 | `peer.inflight_id` to null and continue. 221 | 222 | 3. Set `peer.global_offset` to `response_global_offset`. 223 | 224 | 4. Compute `rtt` as `now - peer.inflight_origin_time`. 225 | 226 | 5. If this is the first response seen so far from this peer, or if 227 | `peer.era` does not match the era contained in the response, 228 | skip to step 8. 229 | 230 | 6. Compute the following lower-is-better quality metric for the 231 | current best measurement we have from this peer: 232 | `Q = peer.rtt/2 + 2 * drift * (now - peer.origin_time)`. This 233 | represents the worst-case error in estimating the offset between 234 | this node's local clock and the peer's local clock, taking into account 235 | network asymmetry and clock drift. Drift is multiplied by 2 236 | because the two clocks could each be drifting in opposite 237 | directions. 238 | 239 | 7. Compute this quality metric for the new measurement: 240 | `Q' = rtt/2 + 2 * drift * (now - peer.inflight_origin_time)`. If `Q' > Q`, 241 | then the old measurement is better than the new one, so keep 242 | it and return without further processing. 243 | 244 | 8. Set `peer.rtt` to `rtt`, `peer.origin_time` to 245 | `peer.inflight_origin_time`, and `peer.era` to `response_era`. 246 | 247 | 9. Set `peer.local_offset` to `response_clock + rtt/2 - now`. 248 | 249 | Now with the newly updated clock values from the peer, recompute 250 | `global_offset` and `error`: 251 | 252 | 10. For each peer `p`, compute an estimate `est = p.local_offset + 253 | p.global_offset` and error `err = p.rtt/2 + 2 * drift * (now - p.origin_time)`, 254 | giving an interval `(est - err, est + err)`. Create lists of all resulting 255 | minima and maxima. 256 | 257 | 11. If we ourselves are a participant in consensus, insert 258 | `global_offset` into the list of minima and the list of maxima. 259 | 260 | 12. Sort both lists. Discard the `f` lowest minima and the `f` 261 | highest maxima. Let `min'` equal the lowest remaining minimum 262 | and `max'` equal the highest remaining maximum. Let 263 | `global_offset' = (max' + min')/2`. Let `error' = (max' - min')/2`. 264 | This averaging method — discarding the `f` highest and 265 | lowest and taking the midpoint of the remaining range — is due 266 | to ["A New Fault-Tolerant Algorithm for Clock Synchronization" 267 | (Welch and Lynch 268 | 1988)](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.462.89&rep=rep1&type=pdf) 269 | and is crucial for achieving Byzantine fault tolerance. 270 | 271 | 13. Determine whether the new global offset and error are consistent 272 | with the old one. Let `age = now - last_update`. Let `min = 273 | global_offset - error` and let `max = global_offset + error`. 274 | Let `drift_limit = 2 * age * drift`. Now check that 275 | `min' > min - drift_limit` and `max' < max + drift_limit`. If this 276 | check fails, return without further processing. (This step is not 277 | necessary for ensuring synchronization, but without it, a MitM 278 | adversary could cause time throughout the network to advance 279 | too quickly or too slowly, by delaying query messages but 280 | not response messages or vice versa.) 281 | 282 | 14. Set `last_update` to `now`, `global_offset` to `global_offset'`, 283 | and `error` to `error'`. 284 | 285 | This completes our description of the protocol. Applications 286 | consuming time from Byztime query the current values of 287 | `global_offset`, `error`, and `last_update`. The global time is 288 | `local_time + global_offset`, with error bounds of 289 | `±(error + 2*drift*(local_time - last_update))`. 290 | 291 | Estimates of global time are not frequency-stable: they jump 292 | discontinuously with each update and can move backward. It's up to 293 | the application how to deal with this. `libbyztime` includes support 294 | for clamping the results of successive calls to `get_global_time()` 295 | to make them consistent with each other. 296 | 297 | ## Caveats 298 | 299 | Akamai has been using Byztime in business-critical applications 300 | since early 2020 and it has been very stable for us. However, until 301 | two specific issues are resolved, this software should be considered 302 | beta: 303 | 304 | 1. We are likely to make some backward-incompatible changes to 305 | Byztime's wire protocol. Byztime currently uses [NTS-KE 306 | codepoints](https://www.iana.org/assignments/nts/nts.xhtml) in 307 | the Experimental Use range; we plan to obtain and use permanent 308 | allocations from IANA. We also will likely change the format of 309 | Byztime's time packets, currently based on Protobufs, to a 310 | bespoke fixed-field format, in order to make parsing more 311 | predictable and make it easier to ensure to that request size 312 | matches response size. We plan to have a single flag-day release 313 | that makes all these changes at once, and then commit to 314 | backward-compatibility thereafter. 315 | 316 | 2. Some of Byztime's statistics-and-health reporting capabilities 317 | have have been removed for this open-source release because they 318 | depend on Akamai-internal infrastructure to function. We plan 319 | to redesign and reimplement this functionality around open 320 | standards. 321 | -------------------------------------------------------------------------------- /byztime-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "byztime-sys" 3 | version = "2.0.0" 4 | authors = ["Daniel Fox Franke "] 5 | description = "Raw FFI bindings for libbyztime" 6 | edition = "2018" 7 | links = "byztime" 8 | repository = "https://github.com/akamai-contrib/byztimed" 9 | license = "Apache-2.0" 10 | keywords = ["byzantine", "time", "byztime"] 11 | categories = ["external-ffi-bindings", "date-and-time"] 12 | 13 | 14 | [dependencies] 15 | libc = "0.2" 16 | 17 | [build-dependencies] 18 | cc = "1.0" -------------------------------------------------------------------------------- /byztime-sys/README.md: -------------------------------------------------------------------------------- 1 | Byztime is a Byzantine-fault-tolerant protocol for synchronizing 2 | time among a group of peers, without reliance on any external 3 | authority. This crate provides raw bindings to the C library 4 | [libbyztime](https://github.com/akamai-contrib/libbyztime) which 5 | handles communication between 6 | [byztimed](https://crates.io/crates/byztimed) and applications which 7 | consume time from it. -------------------------------------------------------------------------------- /byztime-sys/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::Path; 3 | use std::process::Command; 4 | 5 | fn main() { 6 | if let Some(lib_dir) = env::var_os("BYZTIME_LIB_DIR") { 7 | let lib_dir = Path::new(&lib_dir); 8 | if lib_dir.join("libbyztime.a").exists() { 9 | println!("cargo:rustc-link-search=native={}", lib_dir.display()); 10 | println!("cargo:rustc-link-lib=static=byztime"); 11 | return; 12 | } else { 13 | println!("cargo:warning={}/libbyztime.a not found, attempting to fall back to in-tree build of libbyztime", lib_dir.display()); 14 | } 15 | } 16 | 17 | let out_dir = env::var("OUT_DIR").unwrap(); 18 | let out_dir_arg = format!("outdir={}", out_dir); 19 | Command::new("make") 20 | .args(&[out_dir_arg]) 21 | .current_dir(&Path::new("./libbyztime")) 22 | .status() 23 | .unwrap(); 24 | println!("cargo:rustc-link-search=native={}", out_dir); 25 | println!("cargo:rustc-link-lib=static=byztime"); 26 | } 27 | -------------------------------------------------------------------------------- /byztime-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021, Akamai Technologies, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::os::raw; 5 | 6 | #[repr(C)] 7 | pub struct byztime_ctx { 8 | _unused: [u8; 0], 9 | } 10 | 11 | #[derive(Debug, Copy, Clone)] 12 | #[repr(C)] 13 | pub struct byztime_stamp { 14 | pub seconds: i64, 15 | pub nanoseconds: i64, 16 | } 17 | 18 | extern "C" { 19 | pub fn byztime_stamp_normalize(stamp: *mut byztime_stamp) -> raw::c_int; 20 | pub fn byztime_stamp_add( 21 | sum: *mut byztime_stamp, 22 | stamp1: *const byztime_stamp, 23 | stamp2: *const byztime_stamp, 24 | ) -> raw::c_int; 25 | pub fn byztime_stamp_sub( 26 | diff: *mut byztime_stamp, 27 | stamp1: *const byztime_stamp, 28 | stamp2: *const byztime_stamp, 29 | ) -> raw::c_int; 30 | pub fn byztime_stamp_scale( 31 | prod: *mut byztime_stamp, 32 | stamp: *const byztime_stamp, 33 | ppb: i64, 34 | ) -> raw::c_int; 35 | pub fn byztime_stamp_halve(prod: *mut byztime_stamp, stamp: *const byztime_stamp); 36 | pub fn byztime_stamp_cmp( 37 | stamp1: *const byztime_stamp, 38 | stamp2: *const byztime_stamp, 39 | ) -> raw::c_int; 40 | pub fn byztime_open_ro(pathname: *const raw::c_char) -> *mut byztime_ctx; 41 | pub fn byztime_get_offset( 42 | ctx: *mut byztime_ctx, 43 | min: *mut byztime_stamp, 44 | est: *mut byztime_stamp, 45 | max: *mut byztime_stamp, 46 | ) -> raw::c_int; 47 | pub fn byztime_get_global_time( 48 | ctx: *mut byztime_ctx, 49 | min: *mut byztime_stamp, 50 | est: *mut byztime_stamp, 51 | max: *mut byztime_stamp, 52 | ) -> raw::c_int; 53 | pub fn byztime_set_drift(ctx: *mut byztime_ctx, drift_ppb: i64); 54 | pub fn byztime_get_drift(ctx: *const byztime_ctx) -> i64; 55 | pub fn byztime_slew( 56 | ctx: *mut byztime_ctx, 57 | min_rate_ppb: i64, 58 | max_rate_ppb: i64, 59 | maxerror: *const byztime_stamp, 60 | ) -> raw::c_int; 61 | pub fn byztime_step(ctx: *mut byztime_ctx) -> raw::c_int; 62 | pub fn byztime_open_rw(pathname: *const raw::c_char) -> *mut byztime_ctx; 63 | pub fn byztime_set_offset( 64 | ctx: *mut byztime_ctx, 65 | offset: *const byztime_stamp, 66 | error: *const byztime_stamp, 67 | as_of: *const byztime_stamp, 68 | ) -> raw::c_int; 69 | pub fn byztime_get_offset_quick(ctx: *const byztime_ctx, offset: *mut byztime_stamp); 70 | pub fn byztime_get_offset_raw( 71 | ctx: *const byztime_ctx, 72 | offset: *mut byztime_stamp, 73 | error: *mut byztime_stamp, 74 | as_of: *mut byztime_stamp, 75 | ); 76 | pub fn byztime_update_real_offset(ctx: *mut byztime_ctx) -> raw::c_int; 77 | pub fn byztime_get_clock_era(era: *mut raw::c_uchar) -> raw::c_int; 78 | pub fn byztime_get_local_time(local_time: *mut byztime_stamp) -> raw::c_int; 79 | pub fn byztime_get_real_time(real_time: *mut byztime_stamp) -> raw::c_int; 80 | pub fn byztime_close(ctx: *mut byztime_ctx) -> raw::c_int; 81 | pub fn byztime_install_sigbus_handler(oact: *mut libc::sigaction) -> raw::c_int; 82 | pub fn byztime_handle_sigbus( 83 | signo: raw::c_int, 84 | info: *mut libc::siginfo_t, 85 | context: *mut raw::c_void, 86 | ); 87 | } 88 | -------------------------------------------------------------------------------- /byztime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "byztime" 3 | version = "2.0.0" 4 | authors = ["Daniel Franke "] 5 | edition = "2018" 6 | description = "Idiomatic Rust bindings for libbbyztime" 7 | repository = "https://github.com/akamai-contrib/byztimed" 8 | license = "Apache-2.0" 9 | keywords = ["byzantine", "time", "byztime"] 10 | categories = ["api-bindings", "date-and-time"] 11 | 12 | [features] 13 | with_quickcheck = [ "quickcheck", "rand" ] 14 | 15 | [dependencies] 16 | byztime-sys = { version = "2.0.0", path = "../byztime-sys" } 17 | errno = "0.2" 18 | libc = "0.2" 19 | quickcheck = { version = "0.9", optional = true } 20 | rand = { version = "0.7", optional = true } 21 | 22 | [dev-dependencies] 23 | rand = "0.7" 24 | quickcheck = "0.9" 25 | quickcheck_derive = "0.3" 26 | quickcheck_macros = "0.9" -------------------------------------------------------------------------------- /byztime/README.md: -------------------------------------------------------------------------------- 1 | Byztime is a Byzantine-fault-tolerant protocol for synchronizing 2 | time among a group of peers, without reliance on any external 3 | authority. This crate wraps 4 | [byztime_sys](https://crates.io/crates/byztime-sys) (which in turn 5 | wraps the C library 6 | [libbyztime](https://github.com/akamai-contrib/libbyztime)) to 7 | provide an idiomatic Rust API for communication from 8 | [byztimed](https://crates.io/crates/byztimed) to applications which 9 | consume time from it. -------------------------------------------------------------------------------- /byztimed/.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = ["-Ctarget-feature=+aes"] 3 | rustdocflags = ["-Ctarget-feature=+aes"] 4 | 5 | [target.x86_64-unknown-linux-gnu] 6 | rustflags = ["-Ctarget-cpu=sandybridge", "-Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3"] 7 | -------------------------------------------------------------------------------- /byztimed/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "byztimed" 3 | version = "2.0.1" 4 | authors = ["Daniel Franke "] 5 | description = "Byzantine fault-tolerant time synchronization" 6 | edition = "2018" 7 | repository = "https://github.com/akamai-contrib/byztimed" 8 | license = "Apache-2.0" 9 | keywords = ["byzantine", "time", "byztime"] 10 | categories = ["date-and-time"] 11 | 12 | 13 | [[bench]] 14 | name = "main" 15 | harness = false 16 | 17 | [dependencies] 18 | aead = "0.3" 19 | aes-siv = "0.5" 20 | bincode = "1.3" 21 | bytes = "1" 22 | byztime = { version = "2.0.0", path = "../byztime" } 23 | clap = "2.33" 24 | errno = "0.2" 25 | libc = "0.2" 26 | lmdb-rkv = "0.14" 27 | log = { version = "0.4", features = ["release_max_level_info"] } 28 | log4rs = "1.0" 29 | prost = "0.7" 30 | rand = "0.7" 31 | rkv = "0.17" 32 | serde = { version = "1.0", features = ["derive"] } 33 | serde_json = "1.0" 34 | tempfile = "3.1" 35 | trust-dns-resolver = "0.20" 36 | tokio = { version = "1", features = ["full"] } 37 | tokio-rustls = "0.22" 38 | 39 | [dev-dependencies] 40 | bencher = "0.1" 41 | byztime = { version = "2.0.0", path = "../byztime", features = ["with_quickcheck"] } 42 | lazy_static = "1.4" 43 | nix = "0.20" 44 | quickcheck = "0.9" 45 | quickcheck_derive = "0.3" 46 | quickcheck_macros = "0.9" 47 | 48 | [build-dependencies] 49 | prost-build = "0.7" 50 | -------------------------------------------------------------------------------- /byztimed/README.md: -------------------------------------------------------------------------------- 1 | # Byztime 2 | 3 | Byztime is a 4 | [Byzantine-fault-tolerant](https://en.wikipedia.org/wiki/Byzantine_fault) 5 | protocol for synchronizing time among a group of peers, without 6 | reliance on any external time authority. The time kept by Byztime is 7 | simply a counter that advances at a rate of something very close to 8 | one unit per second, such that all nodes are in close agreement as 9 | to its current value. Byztime timestamps have no well-defined epoch. 10 | If all nodes have correctly-set system clocks when first 11 | initialized, then Byztime will initially match POSIX time, but will 12 | eventually drift away from it since 1. there is no external source 13 | keeping it in sync, and 2. Byztime's timescale lacks leap seconds. 14 | 15 | Byztime's algorithm is focused on keeping its *worst-case* error --- 16 | the absolute distance between any two correct nodes' estimate of the 17 | current time --- as small as possible. It achieves this somewhat at 18 | the expense of *typical-case* error, using only the single 19 | highest-quality time sample from each peer rather than combining many 20 | samples to smooth out network jitter. In the worst case, the 21 | difference between two correct nodes' clocks will asymptotically 22 | converge toward 4δ + 4ερ, where δ is the one-way network latency 23 | between the two farthest-spaced peers, ε is the (dimensionless) drift 24 | rate of correct nodes' hardware clocks, and ρ is the polling 25 | interval. If all nodes behave honestly, the bound improves to 2δ + 2ερ 26 | and will be reached after a single round of the protocol rather than 27 | converging asymptotically. 28 | 29 | Byztimed runs completely independently of NTP, and a bad NTP time 30 | source will not disrupt Byztime. This comes with a minor caveat: just 31 | before the daemon shuts down it records the the current offset between 32 | Byztime time and system time, and uses this offset to re-initialize 33 | its estimate following a reboot. The only time this particularly 34 | matters is if many nodes reboot simultaneously and the network loses 35 | quorum. What happens in this case depends somewhat on NTP and what 36 | order things start up in at boot time. If Byztime starts before NTP 37 | starts and shuts down only after NTP shuts down, then the continuity 38 | of the Byztime timescale will be as good as the RTC and the CMOS 39 | battery of the restarting nodes, but no better. On the other hand if 40 | NTP is allowed to stabilize the system clock before Byztime starts up, 41 | then the continuity of the Byztime scales will be as good as its NTP 42 | sources — which is probably a lot better than your RTC, but could be 43 | arbitrarily bad if the NTP source is faulty. Again, this only becomes 44 | an issue if Byztime loses quorum, meaning ⅓ or more of the network 45 | reboots at once. 46 | 47 | Byztime also currently relies on the system time for determining 48 | whether an X.509 certificate is expired. Once 49 | [Roughtime](https://tools.ietf.org/html/draft-ietf-ntp-roughtime) 50 | matures a bit we may consider integrating a Roughtime client into 51 | byztimed for certificate validation purposes. 52 | 53 | ## Build 54 | 55 | Byztime is built like any standard [Rust](https://rust-lang.org) 56 | crate. The easiest way to install byztimed is to get it from 57 | [crates.io](https://crates.io) via 58 | [`cargo`](https://doc.rust-lang.org/cargo/getting-started/installation.html): 59 | 60 | cargo install byztimed 61 | 62 | If you prefer to check out and build this repo, note that `byztimed` includes 63 | [`libbyztime`](https://github.com/akamai-contrib/libbyztime) 64 | as a submodule, so be sure to clone with `git clone 65 | --recurse-submodules`, or run `git submodule update --init 66 | --recursive` if you have already cloned without the 67 | `--recurse-submodules` option. 68 | 69 | Byztime is tested against Rust's stable channel, but compilers 70 | significantly older than the current stable will probably work. The 71 | most recent version known not to work is 1.38 (because we rely on 72 | async/await, which stabilized in 1.39). 73 | 74 | Byztimed currently runs only on Linux, and is well-tested only on 75 | AMD64. Other CPU architectures *should* work; please file a bug 76 | ticket if you encounter any issues. We hope to eventually support 77 | more operating systems, but this will be an uphill battle because 78 | Byztime depends on timekeeping facilities that currently only 79 | Linux provides. The effort to improve portability will likely require 80 | contributing some new functionality to other OS kernels. 81 | 82 | ## Usage 83 | 84 | Run byztimed as `byztimed `. See 85 | [CONFIG.md](https://github.com/akamai-contrib/byztimed/CONFIG.md) 86 | for configuration file syntax. 87 | 88 | [`libbyztime`](https://github.com/akamai-contrib/libbyztime) 89 | is the C client library for consuming time from `byztimed`. See its 90 | `byztime.h` file for API documentation. The `byztime` crate within 91 | this repo provides idiomatic Rust bindings to libbyztime and its 92 | documentation can be read on [docs.rs](https://docs.rs/byztime). 93 | 94 | ## Protocol overview 95 | 96 | Although it is fundamentally a peer-to-peer protocol, Byztime uses a 97 | client-server communication pattern, with each node acting as both a 98 | client and a server to each other node. A client-only operation mode, 99 | wherein a node synchronizes itself to the consensus but does not vote 100 | in it, is also supported. 101 | 102 | Byztime uses [Network Time 103 | Security](https://www.rfc-editor.org/rfc/rfc8915.html) (NTS) for 104 | cryptographic link protection. Communication from each client to 105 | each server begins by the client initiating a TLS handshake and then 106 | using NTS-KE to negotiate shared keys and obtain NTS cookies. After 107 | NTS-KE is complete, the TLS connection closes and the remainder of 108 | the protocol runs over UDP. NTS provides message-level authenticated 109 | encryption. It provides replay protection for the client, but not 110 | for the server. The server never updates any state in response to a 111 | message from a client, so processing replays is harmless. For the 112 | remainder of this overview, we'll take the abstraction of 113 | authenticated encryption for granted and omit NTS-related machinery 114 | from our descriptions. 115 | 116 | Each node is assumed to be equipped with a *local clock* which 117 | counts the time elapsed since some arbitrary epoch such as when the 118 | system last booted. One node's local clock has no *a priori* known 119 | relationship to another's. Rather, this relationship is discovered 120 | through the execution of the protocol. The shared time that nodes 121 | seek to synchronize to is called the *global clock*. Nodes maintain 122 | an estimate of their *global offset*, which is the difference 123 | between the global clock and their local clock. The local clock never 124 | receives any adjustments; only the global offset does. 125 | 126 | The protocol proceeds by each node periodically sending a query to 127 | each of its peers, and the peer sending a response which includes a 128 | snapshot of its local clock and its current estimate of its global 129 | offset. Each query/response volley is called a *measurement*. 130 | 131 | The protocol uses the following global parameters: 132 | 133 | 1. `N`: the number of nodes participating in consensus. 134 | 135 | 2. `f`: the number of faulty nodes that can be tolerated. 136 | `f = floor((N-1)/3)`. 137 | 138 | 3. `drift`: a dimensionless number giving an upper bound on how fast 139 | or slow a correct node's local clock might be. For example if `drift` 140 | is 50e-6 then the clock might drift by up to 50µs per second. 141 | 142 | Each node keeps the following state: 143 | 144 | 1. The `era` of its local clock. This is a randomly-generated 145 | identifier which changes if the local clock loses its state, 146 | *e.g.* after a reboot. 147 | 148 | 2. `global_offset`: The node's estimate of the offset between the 149 | global clock and its local clock: `local_clock() + global_offset 150 | == estimate of global clock`. 151 | 152 | 3. `error`: The maximum absolute difference between the above 153 | estimate of the global clock and its true value. 154 | 155 | 4. `last_update`: The local clock time at which `global_offset` and 156 | `error` were last updated. 157 | 158 | And then for each of its peers: 159 | 160 | 5. `peer.era`: The peer's clock era as of the last time it communicated. 161 | 162 | 6. `peer.local_offset`: The node's estimate of the offset between 163 | its local clock and the peer's local clock: `local_clock() + 164 | peer.local_offset == estimate of peer's local clock`. 165 | 166 | 7. `peer.global_offset`: The peer's estimate of the offset between 167 | *its own* local clock and the global clock, as of the last time 168 | it communicated. 169 | 170 | 8. `peer.rtt`: The round trip time of the current "best" measurement 171 | of the peer's clock. This measurement is the one on which 172 | `peer.local_offset` is based. 173 | 174 | 9. `peer.origin_time`: The local clock time at which the query which 175 | led to the current best measurement was sent. 176 | 177 | 10. `peer.inflight_id`: The random unique identifier associated with 178 | a query, if any, that is currently awaiting a response. 179 | 180 | 11. `peer.inflight_origin_time`: The local clock time at which the 181 | current in-flight query (if any) was sent. 182 | 183 | There is some additional state related to NTS — a cache of cookies 184 | and shared keys — which works basically the same way as it does for 185 | NTP and we'll disregard it for the purposes of this explanation. 186 | 187 | At first run, nodes initialize `global_offset` such that the global 188 | clock matches their real-time clock. They periodically check the 189 | offset between the two and persist this offset to disk. This 190 | persisted value is used to recover a rough value with which to 191 | reinitialize `global_offset` after a reboot, but the error bounds 192 | on offsets recovered in this manner are considered infinite. 193 | 194 | Once per configured polling interval, clients send a query message to 195 | each of their peers, containing just a randomly-generated unique 196 | identifier. The sender updates `peer.inflight_id` and 197 | `peer.inflight_origin_time` to reflect the content of the packet and 198 | the time at which it was sent. If there was already another query in 199 | flight, the old query is assumed to have been dropped by the network 200 | and the new `inflight_id` and `inflight_origin_time` values 201 | overwrite the old ones. 202 | 203 | Servers respond immediately to any query they receive. The response 204 | contains: 205 | 206 | 1. `response_id`: A copy of the query's unique identifier. 207 | 208 | 2. `response_local`: A snapshot of the server's local clock. 209 | 210 | 3. `response_era`: The server's `era`. 211 | 212 | 4. `response_global_offset`: The server's `global_offset`. 213 | 214 | When the client receives the response, it processes it as follows: 215 | 216 | 1. Set `now` to a snapshot of the local clock at the moment the 217 | response was received. 218 | 219 | 2. Verify that `peer.inflight_id` is non-null and matches 220 | `response_id`. If not, discard the response. Otherwise, set 221 | `peer.inflight_id` to null and continue. 222 | 223 | 3. Set `peer.global_offset` to `response_global_offset`. 224 | 225 | 4. Compute `rtt` as `now - peer.inflight_origin_time`. 226 | 227 | 5. If this is the first response seen so far from this peer, or if 228 | `peer.era` does not match the era contained in the response, 229 | skip to step 8. 230 | 231 | 6. Compute the following lower-is-better quality metric for the 232 | current best measurement we have from this peer: 233 | `Q = peer.rtt/2 + 2 * drift * (now - peer.origin_time)`. This 234 | represents the worst-case error in estimating the offset between 235 | this node's local clock and the peer's local clock, taking into account 236 | network asymmetry and clock drift. Drift is multiplied by 2 237 | because the two clocks could each be drifting in opposite 238 | directions. 239 | 240 | 7. Compute this quality metric for the new measurement: 241 | `Q' = rtt/2 + 2 * drift * (now - peer.inflight_origin_time)`. If `Q' > Q`, 242 | then the old measurement is better than the new one, so keep 243 | it and return without further processing. 244 | 245 | 8. Set `peer.rtt` to `rtt`, `peer.origin_time` to 246 | `peer.inflight_origin_time`, and `peer.era` to `response_era`. 247 | 248 | 9. Set `peer.local_offset` to `response_clock + rtt/2 - now`. 249 | 250 | Now with the newly updated clock values from the peer, recompute 251 | `global_offset` and `error`: 252 | 253 | 10. For each peer `p`, compute an estimate `est = p.local_offset + 254 | p.global_offset` and error `err = p.rtt/2 + 2 * drift * (now - p.origin_time)`, 255 | giving an interval `(est - err, est + err)`. Create lists of all resulting 256 | minima and maxima. 257 | 258 | 11. If we ourselves are a participant in consensus, insert 259 | `global_offset` into the list of minima and the list of maxima. 260 | 261 | 12. Sort both lists. Discard the `f` lowest minima and the `f` 262 | highest maxima. Let `min'` equal the lowest remaining minimum 263 | and `max'` equal the highest remaining maximum. Let 264 | `global_offset' = (max' + min')/2`. Let `error' = (max' - min')/2`. 265 | This averaging method — discarding the `f` highest and 266 | lowest and taking the midpoint of the remaining range — is due 267 | to ["A New Fault-Tolerant Algorithm for Clock Synchronization" 268 | (Welch and Lynch 269 | 1988)](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.462.89&rep=rep1&type=pdf) 270 | and is crucial for achieving Byzantine fault tolerance. 271 | 272 | 13. Determine whether the new global offset and error are consistent 273 | with the old one. Let `age = now - last_update`. Let `min = 274 | global_offset - error` and let `max = global_offset + error`. 275 | Let `drift_limit = 2 * age * drift`. Now check that 276 | `min' > min - drift_limit` and `max' < max + drift_limit`. If this 277 | check fails, return without further processing. (This step is not 278 | necessary for ensuring synchronization, but without it, a MitM 279 | adversary could cause time throughout the network to advance 280 | too quickly or too slowly, by delaying query messages but 281 | not response messages or vice versa.) 282 | 283 | 14. Set `last_update` to `now`, `global_offset` to `global_offset'`, 284 | and `error` to `error'`. 285 | 286 | This completes our description of the protocol. Applications 287 | consuming time from Byztime query the current values of 288 | `global_offset`, `error`, and `last_update`. The global time is 289 | `local_time + global_offset`, with error bounds of 290 | `±(error + 2*drift*(local_time - last_update))`. 291 | 292 | Estimates of global time are not frequency-stable: they jump 293 | discontinuously with each update and can move backward. It's up to 294 | the application how to deal with this. `libbyztime` includes support 295 | for clamping the results of successive calls to `get_global_time()` 296 | to make them consistent with each other. 297 | 298 | ## Caveats 299 | 300 | Akamai has been using Byztime in business-critical applications 301 | since early 2020 and it has been very stable for us. However, until 302 | two specific issues are resolved, this software should be considered 303 | beta: 304 | 305 | 1. We are likely to make some backward-incompatible changes to 306 | Byztime's wire protocol. Byztime currently uses [NTS-KE 307 | codepoints](https://www.iana.org/assignments/nts/nts.xhtml) in 308 | the Experimental Use range; we plan to obtain and use permanent 309 | allocations from IANA. We also will likely change the format of 310 | Byztime's time packets, currently based on Protobufs, to a 311 | bespoke fixed-field format, in order to make parsing more 312 | predictable and make it easier to ensure to that request size 313 | matches response size. We plan to have a single flag-day release 314 | that makes all these changes at once, and then commit to 315 | backward-compatibility thereafter. 316 | 317 | 2. Some of Byztime's statistics-and-health reporting capabilities 318 | have have been removed for this open-source release because they 319 | depend on Akamai-internal infrastructure to function. We plan 320 | to redesign and reimplement this functionality around open 321 | standards. 322 | -------------------------------------------------------------------------------- /byztimed/benches/main.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2020, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | #[macro_use] 5 | extern crate bencher; 6 | extern crate rand; 7 | extern crate tempfile; 8 | 9 | use bencher::Bencher; 10 | use std::path; 11 | use tokio_rustls::rustls; 12 | 13 | fn bench_respond_to_time_request(bench: &mut Bencher) { 14 | let timedata_path = tempfile::NamedTempFile::new().unwrap(); 15 | let store_path = tempfile::tempdir().unwrap(); 16 | let config = byztimed::config::Config { 17 | timedata: path::PathBuf::from(timedata_path.path()), 18 | secret_store: path::PathBuf::from(store_path.path()), 19 | logging: vec![], 20 | log_format: None, 21 | ro_mode: false, 22 | bind_host: std::net::IpAddr::V6(std::net::Ipv6Addr::UNSPECIFIED), 23 | bind_port: 0, 24 | poll_interval: 8.0, 25 | drift_ppb: 250_000, 26 | tls_acceptor: tokio_rustls::TlsAcceptor::from(std::sync::Arc::new( 27 | rustls::ServerConfig::new(rustls::NoClientAuth::new()), 28 | )), 29 | peers: std::collections::HashMap::new(), 30 | }; 31 | 32 | let core_state = byztimed::core::CoreState::initialize(&config).unwrap(); 33 | let secret_store = byztimed::store::SecretStore::new(&config.secret_store).unwrap(); 34 | 35 | let mut request_buf = Vec::new(); 36 | let mut response_buf = Vec::with_capacity(65535); 37 | let mut rng = rand::thread_rng(); 38 | let unique_id = byztimed::core::UniqueId::default(); 39 | let c2s = byztimed::aead::keygen(&mut rng); 40 | let s2c = byztimed::aead::keygen(&mut rng); 41 | let (master_key_id, master_key) = secret_store.get_cached_current_master_key(); 42 | let cookie = byztimed::cookie::seal_cookie( 43 | &byztimed::cookie::CookieData { c2s, s2c }, 44 | &master_key, 45 | master_key_id, 46 | &mut rng, 47 | ); 48 | byztimed::time_client::serialize_time_request(&mut request_buf, &unique_id, &c2s, cookie, 1); 49 | 50 | let core_state_mutex = std::sync::RwLock::new(core_state); 51 | bench.iter(|| { 52 | byztimed::time_server::respond_to_time_request( 53 | request_buf.as_ref(), 54 | &mut response_buf, 55 | &core_state_mutex, 56 | &secret_store, 57 | ) 58 | }) 59 | } 60 | 61 | benchmark_group!(time_server_benches, bench_respond_to_time_request); 62 | benchmark_main!(time_server_benches); 63 | -------------------------------------------------------------------------------- /byztimed/build.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2020, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | extern crate prost_build; 5 | 6 | fn main() { 7 | let mut prost_build = prost_build::Config::new(); 8 | prost_build.type_attribute( 9 | ".", 10 | "#[cfg_attr(test, derive(quickcheck_derive::Arbitrary))]", 11 | ); 12 | prost_build 13 | .compile_protos(&["src/wire.proto"], &["src/"]) 14 | .unwrap(); 15 | } 16 | -------------------------------------------------------------------------------- /byztimed/src/aead.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //!Convenience wrapper around the `aead` and `aes_siv` crates 5 | 6 | pub use aead::{Aead, NewAead, Payload}; 7 | pub use aes_siv::{Aes128SivAead, Aes256SivAead}; 8 | 9 | use aead::generic_array::{ArrayLength, GenericArray}; 10 | use rand::{CryptoRng, RngCore}; 11 | use std::panic; 12 | 13 | pub type Aes128SivNonce = GenericArray::NonceSize>; 14 | pub type Aes256SivNonce = GenericArray::NonceSize>; 15 | pub type Aes128SivKey = GenericArray::KeySize>; 16 | pub type Aes256SivKey = GenericArray::KeySize>; 17 | 18 | ///Error returned by `GenericArrayExt` methods if the slice passed in is the wrong length. 19 | #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] 20 | pub struct LengthMismatchError; 21 | 22 | ///Annoyingly, GenericArray's `from_slice` constructors panic if the slice is the wrong length. 23 | ///This extension trait adds methods that catch the panic and return an error result instead. 24 | pub trait GenericArrayExt 25 | where 26 | N: ArrayLength, 27 | { 28 | fn try_from_slice(slice: &[T]) -> Result<&GenericArray, LengthMismatchError> 29 | where 30 | T: panic::RefUnwindSafe, 31 | { 32 | panic::catch_unwind(move || GenericArray::from_slice(slice)) 33 | .map_err(|_| LengthMismatchError {}) 34 | } 35 | 36 | fn try_clone_from_slice(slice: &[T]) -> Result, LengthMismatchError> 37 | where 38 | T: panic::RefUnwindSafe + Clone, 39 | { 40 | panic::catch_unwind(move || GenericArray::clone_from_slice(slice)) 41 | .map_err(|_| LengthMismatchError {}) 42 | } 43 | } 44 | 45 | impl GenericArrayExt for GenericArray where N: ArrayLength {} 46 | 47 | pub fn keygen(rand: &mut R) -> Aes128SivKey { 48 | let mut key = Aes128SivKey::default(); 49 | rand.fill_bytes(key.as_mut_slice()); 50 | key 51 | } 52 | 53 | #[cfg(test)] 54 | pub fn keygen_test(rand: &mut R) -> Aes128SivKey { 55 | let mut key = Aes128SivKey::default(); 56 | rand.fill_bytes(key.as_mut_slice()); 57 | key 58 | } 59 | -------------------------------------------------------------------------------- /byztimed/src/config.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Configuration representation and configuration file parsing 5 | 6 | use crate::logging::LogConfig; 7 | use crate::ntske; 8 | use crate::peer_name::PeerName; 9 | use serde::Deserialize; 10 | use serde_json as cfgformat; 11 | use std::collections::HashMap; 12 | use std::fmt; 13 | use std::fs; 14 | use std::net; 15 | use std::path::{Path, PathBuf}; 16 | use std::sync::Arc; 17 | use std::vec::Vec; 18 | use tokio_rustls::rustls; 19 | use tokio_rustls::webpki::{DNSName, DNSNameRef}; 20 | 21 | ///Default port number. Set to 0 until we have an IANA allocation, 22 | /// which is treated as making it mandatory to configure. 23 | const DEFAULT_PORT: u16 = 0; 24 | 25 | ///Default polling interval in seconds 26 | const DEFAULT_POLL_INTERVAL: f64 = 8.0; 27 | 28 | ///Default upper bound on clock drift rate, in parts per billion 29 | const DEFAULT_DRIFT_PPB: i64 = 250_000; 30 | 31 | ///Contents of a peer entry in the config file, rawly deserialized 32 | /// from serde. 33 | #[derive(Debug, Clone, Deserialize)] 34 | struct RawPeerConfig { 35 | host: String, 36 | port: Option, 37 | dist: Option, 38 | cert_name: Option, 39 | authorities: Option, 40 | } 41 | 42 | ///Enumeration of log levels that can appear as values in the 43 | /// `logging` map. Isomorphic to `log::LevelFilter`, but we need our 44 | /// own version so we can derive `Deserialize` for it. 45 | #[derive(Debug, Clone, Deserialize)] 46 | #[serde(rename_all = "lowercase")] 47 | enum RawLevelFilter { 48 | Error, 49 | Warn, 50 | Info, 51 | Debug, 52 | Trace, 53 | } 54 | 55 | ///Top-level contents of the config file, rawly deserialized from 56 | /// serde. 57 | #[derive(Debug, Clone, Deserialize)] 58 | struct RawConfig { 59 | timedata: String, 60 | secret_store: String, 61 | logging: Option>, 62 | log_format: Option, 63 | ro_mode: Option, 64 | bind_host: Option, 65 | bind_port: Option, 66 | poll_interval: Option, 67 | drift_ppb: Option, 68 | key: Option, 69 | cert: Option, 70 | authorities: Option, 71 | peers: HashMap, 72 | } 73 | 74 | impl Into for RawLevelFilter { 75 | fn into(self) -> log::LevelFilter { 76 | match self { 77 | RawLevelFilter::Error => log::LevelFilter::Error, 78 | RawLevelFilter::Warn => log::LevelFilter::Warn, 79 | RawLevelFilter::Info => log::LevelFilter::Info, 80 | RawLevelFilter::Debug => log::LevelFilter::Debug, 81 | RawLevelFilter::Trace => log::LevelFilter::Trace, 82 | } 83 | } 84 | } 85 | 86 | ///"Cooked" version of a peer entry, semantically validated and with 87 | /// defaults filled in 88 | #[derive(Clone)] 89 | pub struct PeerConfig { 90 | ///Hostname at which to contact this peer 91 | pub host: String, 92 | ///Port on which to contact this peer 93 | pub port: u16, 94 | ///Lower bound on peer's physical distance in meters 95 | pub dist: i64, 96 | ///Subject DNS name to expect when validating the peer's X.509 97 | /// certificate 98 | pub cert_name: DNSName, 99 | ///Connector for NTS-KE client sessions 100 | pub tls_connector: tokio_rustls::TlsConnector, 101 | } 102 | 103 | ///"Cooked" representation of a configuration, semantically validated and with 104 | /// defaults filled in 105 | #[derive(Clone)] 106 | pub struct Config { 107 | ///Path to the timedata file 108 | pub timedata: PathBuf, 109 | ///Path to the secret store 110 | pub secret_store: PathBuf, 111 | ///Vector of logging targets 112 | pub logging: Vec, 113 | ///Log format string 114 | pub log_format: Option, 115 | ///Whether we're running in read-only mode 116 | pub ro_mode: bool, 117 | ///Host address to bind the server to 118 | pub bind_host: net::IpAddr, 119 | ///Port to bind the server to 120 | pub bind_port: u16, 121 | ///Polling interval in seconds 122 | pub poll_interval: f64, 123 | ///Upper bound on clock drift rate in parts per billion 124 | pub drift_ppb: i64, 125 | ///Acceptor for NTS-KE server sessions 126 | pub tls_acceptor: tokio_rustls::TlsAcceptor, 127 | ///Map of peers to their configuraitons 128 | pub peers: HashMap>, 129 | } 130 | 131 | ///A semantic error in the configuration file 132 | #[derive(Clone, Debug)] 133 | pub struct SemanticError { 134 | ///Name of the peer entry to which this error pertains, or `None` for the global section 135 | pub section: Option, 136 | ///Text of the error message 137 | pub text: &'static str, 138 | } 139 | 140 | ///An error in the configuration file 141 | #[derive(Debug)] 142 | pub enum ConfigError { 143 | Syntactic(cfgformat::Error), 144 | Semantic(Vec), 145 | } 146 | 147 | impl fmt::Display for ConfigError { 148 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 149 | use ConfigError::*; 150 | match self { 151 | Syntactic(e) => write!(f, "Syntax error in configuration file: {}", e), 152 | Semantic(evec) => { 153 | for e in evec { 154 | match &e.section { 155 | Some(section) => { 156 | write!(f, "In configuration of peer '{}': {}", section, e.text)? 157 | } 158 | None => write!(f, "In global section of configuration file: {}", e.text)?, 159 | } 160 | } 161 | Ok(()) 162 | } 163 | } 164 | } 165 | } 166 | 167 | impl std::error::Error for ConfigError {} 168 | 169 | fn make_tls_client_config>( 170 | authorities_path: &Authorities, 171 | section: Option<&PeerName>, 172 | errors: &mut Vec, 173 | ) -> rustls::ClientConfig { 174 | let mut store = rustls::RootCertStore::empty(); 175 | 176 | match fs::File::open(Path::new(authorities_path.as_ref())) { 177 | Ok(f) => { 178 | let mut bufreader = std::io::BufReader::new(f); 179 | match store.add_pem_file(&mut bufreader) { 180 | Ok((valid, invalid)) => { 181 | if valid == 0 { 182 | if invalid == 0 { 183 | errors.push(SemanticError { 184 | section: section.cloned(), 185 | text: "`authorities` file contains no certificate authorities", 186 | }) 187 | } else { 188 | errors.push(SemanticError { 189 | section: section.cloned(), 190 | text: "`authorities` file contains only invalid certificate authorities" 191 | }) 192 | } 193 | } 194 | } 195 | Err(_) => errors.push(SemanticError { 196 | section: section.cloned(), 197 | text: "`authorities` file is not a valid PEM file", 198 | }), 199 | } 200 | } 201 | Err(_) => errors.push(SemanticError { 202 | section: section.cloned(), 203 | text: "`authorities` file could not be opened", 204 | }), 205 | } 206 | 207 | let mut tls_config = rustls::ClientConfig::new(); 208 | tls_config.root_store = store; 209 | tls_config.alpn_protocols = vec![ntske::NTSKE_ALPN.to_vec()]; 210 | tls_config 211 | } 212 | 213 | ///Semantically validate a parsed configuration and fill in defaults 214 | fn cook_config(raw: RawConfig) -> Result> { 215 | let mut errors: Vec = Vec::new(); 216 | 217 | let bind_host = match raw.bind_host { 218 | Some(ip) => ip.parse().unwrap_or_else(|_| { 219 | errors.push(SemanticError { 220 | section: None, 221 | text: "`bind_host` must be an IP address", 222 | }); 223 | net::IpAddr::V6(net::Ipv6Addr::UNSPECIFIED) 224 | }), 225 | None => net::IpAddr::V6(net::Ipv6Addr::UNSPECIFIED), 226 | }; 227 | let bind_port = raw.bind_port.unwrap_or(DEFAULT_PORT); 228 | let poll_interval = raw.poll_interval.unwrap_or(DEFAULT_POLL_INTERVAL); 229 | let drift_ppb = raw.drift_ppb.unwrap_or(DEFAULT_DRIFT_PPB); 230 | let ro_mode = raw.ro_mode.unwrap_or(false); 231 | 232 | let rawlogging = raw.logging.unwrap_or_else(|| { 233 | let mut h = HashMap::new(); 234 | h.insert(String::from("STDOUT"), RawLevelFilter::Info); 235 | h 236 | }); 237 | 238 | let mut logging = Vec::with_capacity(rawlogging.len()); 239 | logging.extend(rawlogging.into_iter().map(|(k, rawlevel)| { 240 | let level = rawlevel.into(); 241 | if k.as_str() == "STDOUT" { 242 | LogConfig::ConsoleLog(log4rs::append::console::Target::Stdout, level) 243 | } else if k.as_str() == "STDERR" { 244 | LogConfig::ConsoleLog(log4rs::append::console::Target::Stderr, level) 245 | } else { 246 | LogConfig::FileLog(PathBuf::from(k), level) 247 | } 248 | })); 249 | 250 | if !ro_mode && bind_port == 0 { 251 | errors.push(SemanticError { 252 | section: None, 253 | text: "`bind_port` must be provided when not in ro_mode", 254 | }); 255 | } 256 | 257 | let global_client_tls_config = raw 258 | .authorities 259 | .map(|authorities| Arc::new(make_tls_client_config(&authorities, None, &mut errors))); 260 | 261 | if !ro_mode && (raw.key.is_none() || raw.cert.is_none()) { 262 | errors.push(SemanticError { 263 | section: None, 264 | text: "`cert` and `key` must be provided when not in ro_mode", 265 | }); 266 | } 267 | 268 | let mut server_tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new()); 269 | server_tls_config.alpn_protocols = vec![ntske::NTSKE_ALPN.to_vec()]; 270 | 271 | if let (Some(cert_path), Some(key_path)) = (raw.cert, raw.key) { 272 | let maybe_certs = fs::File::open(Path::new(&cert_path)) 273 | .map_err(|_| { 274 | errors.push(SemanticError { 275 | section: None, 276 | text: "`cert` file could not be opened", 277 | }) 278 | }) 279 | .and_then(|f| { 280 | let mut bufreader = std::io::BufReader::new(f); 281 | rustls::internal::pemfile::certs(&mut bufreader).map_err(|()| { 282 | errors.push(SemanticError { 283 | section: None, 284 | text: "`cert` file does not contain valid PEM", 285 | }) 286 | }) 287 | }); 288 | 289 | let maybe_keys = fs::File::open(Path::new(&key_path)) 290 | .map_err(|_| { 291 | errors.push(SemanticError { 292 | section: None, 293 | text: "`key` file could not be opened", 294 | }) 295 | }) 296 | .and_then(|f| { 297 | let mut bufreader = std::io::BufReader::new(f); 298 | rustls::internal::pemfile::pkcs8_private_keys(&mut bufreader).map_err(|()| { 299 | errors.push(SemanticError { 300 | section: None, 301 | text: "`key` file does not contain valid PEM", 302 | }) 303 | }) 304 | }); 305 | 306 | if let (Ok(certs), Ok(mut keys)) = (maybe_certs, maybe_keys) { 307 | if let Some(key) = keys.pop() { 308 | if server_tls_config.set_single_cert(certs, key).is_err() { 309 | errors.push(SemanticError { 310 | section: None, 311 | text: "`key` file is not valid PKCS#8", 312 | }) 313 | } 314 | } else { 315 | errors.push(SemanticError { 316 | section: None, 317 | text: "`key` file does not contain any PKCS#8-encoded keys (your key may be in the wrong format; see manual for more info)" 318 | }) 319 | } 320 | } 321 | } else if !ro_mode { 322 | errors.push(SemanticError { 323 | section: None, 324 | text: "`cert` and `key` must be provided when not in ro_mode", 325 | }); 326 | } 327 | 328 | let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(server_tls_config)); 329 | 330 | let mut peers = HashMap::with_capacity(raw.peers.len()); 331 | 332 | for (peer_name_string, rawpeer) in raw.peers { 333 | let peer_name = PeerName::new(peer_name_string); 334 | let host = rawpeer.host; 335 | let port = rawpeer.port.unwrap_or_else(|| { 336 | if DEFAULT_PORT == 0 { 337 | errors.push(SemanticError { 338 | section: Some(peer_name.clone()), 339 | text: "A `port` must be specified for each peer", 340 | }); 341 | }; 342 | DEFAULT_PORT 343 | }); 344 | 345 | let dist = rawpeer.dist.unwrap_or(0); 346 | let cert_name_string = rawpeer.cert_name.unwrap_or_else(|| host.clone()); 347 | let cert_name = DNSNameRef::try_from_ascii_str(cert_name_string.as_str()) 348 | .unwrap_or_else(|_| { 349 | errors.push(SemanticError { 350 | section: Some(peer_name.clone()), 351 | text: "`cert_name` is not a syntactically-valid DNS name", 352 | }); 353 | DNSNameRef::try_from_ascii_str("bogus.invalid").unwrap() 354 | }) 355 | .to_owned(); 356 | 357 | let client_tls_config = match rawpeer.authorities { 358 | Some(authorities) => Arc::new(make_tls_client_config( 359 | &authorities, 360 | Some(&peer_name), 361 | &mut errors, 362 | )), 363 | None => match &global_client_tls_config { 364 | Some(client_tls_config) => client_tls_config.clone(), 365 | None => { 366 | errors.push(SemanticError { 367 | section: Some(peer_name.clone()), 368 | text: "No `authorities` was specified for this peer and no global default was given", 369 | }); 370 | Arc::new(rustls::ClientConfig::new()) 371 | } 372 | }, 373 | }; 374 | 375 | let tls_connector = tokio_rustls::TlsConnector::from(client_tls_config); 376 | 377 | peers.insert( 378 | peer_name, 379 | Arc::new(PeerConfig { 380 | host, 381 | port, 382 | dist, 383 | cert_name, 384 | tls_connector, 385 | }), 386 | ); 387 | } 388 | 389 | if errors.is_empty() { 390 | Ok(Config { 391 | timedata: PathBuf::from(raw.timedata), 392 | secret_store: PathBuf::from(raw.secret_store), 393 | logging, 394 | log_format: raw.log_format, 395 | ro_mode, 396 | bind_host, 397 | bind_port, 398 | poll_interval, 399 | drift_ppb, 400 | tls_acceptor, 401 | peers, 402 | }) 403 | } else { 404 | Err(errors) 405 | } 406 | } 407 | 408 | impl Config { 409 | ///Parse and validate a configuration and return the parsed result 410 | pub fn parse>(config: S) -> Result { 411 | cook_config(cfgformat::from_str(config.as_ref()).map_err(ConfigError::Syntactic)?) 412 | .map_err(ConfigError::Semantic) 413 | } 414 | } 415 | -------------------------------------------------------------------------------- /byztimed/src/cookie.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! NTS cookie handling 5 | 6 | use crate::aead::*; 7 | use crate::ntske::AEAD_ALGORITHM_AES_SIV_CMAC_256; 8 | use crate::wire::{Cookie, UnwrappedCookie}; 9 | use prost::Message; 10 | use rand::{CryptoRng, RngCore}; 11 | 12 | ///Plaintext contents of a cookie 13 | #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] 14 | pub struct CookieData { 15 | ///The client-to-server key 16 | pub c2s: Aes128SivKey, 17 | ///The server-to-client key 18 | pub s2c: Aes128SivKey, 19 | } 20 | 21 | ///Decrypt a cookie, using the given callback to look up the master key by its ID. 22 | pub fn open_cookie, F: FnOnce(u32) -> Option>( 23 | cookie: C, 24 | get_master_key: F, 25 | ) -> Option { 26 | let cookie_msg = Cookie::decode(cookie.as_ref()).ok()?; 27 | let master_key = get_master_key(cookie_msg.key_id)?; 28 | 29 | let aead = Aes128SivAead::new(&master_key); 30 | let plaintext = aead 31 | .decrypt( 32 | Aes128SivNonce::try_from_slice(&cookie_msg.nonce).ok()?, 33 | cookie_msg.ciphertext.as_slice(), 34 | ) 35 | .ok()?; 36 | 37 | let unwrapped_cookie = UnwrappedCookie::decode(plaintext.as_ref()).ok()?; 38 | 39 | if unwrapped_cookie.alg_id != AEAD_ALGORITHM_AES_SIV_CMAC_256.0 as u32 { 40 | return None; 41 | } 42 | 43 | Some(CookieData { 44 | c2s: Aes128SivKey::try_clone_from_slice(unwrapped_cookie.c2s.as_slice()).ok()?, 45 | s2c: Aes128SivKey::try_clone_from_slice(unwrapped_cookie.s2c.as_slice()).ok()?, 46 | }) 47 | } 48 | 49 | ///Encrypt a cookie using the given master key 50 | pub fn seal_cookie( 51 | cookie_data: &CookieData, 52 | master_key: &Aes128SivKey, 53 | master_key_id: u32, 54 | rand: &mut R, 55 | ) -> Vec { 56 | let aead = Aes128SivAead::new(master_key); 57 | 58 | let unwrapped_cookie = UnwrappedCookie { 59 | alg_id: AEAD_ALGORITHM_AES_SIV_CMAC_256.0 as u32, 60 | c2s: Vec::from(cookie_data.c2s.as_slice()), 61 | s2c: Vec::from(cookie_data.s2c.as_slice()), 62 | }; 63 | 64 | let mut plaintext = Vec::with_capacity(unwrapped_cookie.encoded_len()); 65 | unwrapped_cookie 66 | .encode(&mut plaintext) 67 | .expect("Failed to serialize cookie plaintext"); 68 | 69 | let mut nonce = Aes128SivNonce::default(); 70 | rand.fill_bytes(nonce.as_mut_slice()); 71 | 72 | let ciphertext = aead 73 | .encrypt(&nonce, plaintext.as_slice()) 74 | .expect("Failed to encrypt cookie"); 75 | 76 | let cookie = Cookie { 77 | nonce: nonce.to_vec(), 78 | key_id: master_key_id, 79 | ciphertext, 80 | }; 81 | 82 | let mut out = Vec::with_capacity(cookie.encoded_len()); 83 | cookie 84 | .encode(&mut out) 85 | .expect("Failed to serialize cookie ciphertext"); 86 | out 87 | } 88 | 89 | #[cfg(test)] 90 | mod tests { 91 | use super::*; 92 | use quickcheck::*; 93 | use quickcheck_macros::quickcheck; 94 | 95 | #[derive(Debug, Clone)] 96 | struct ArbitraryKey(Aes128SivKey); 97 | 98 | impl Arbitrary for ArbitraryKey { 99 | fn arbitrary(g: &mut G) -> ArbitraryKey { 100 | let mut key = Aes128SivKey::default(); 101 | g.fill_bytes(&mut key); 102 | ArbitraryKey(key) 103 | } 104 | } 105 | 106 | impl Arbitrary for CookieData { 107 | fn arbitrary(g: &mut G) -> CookieData { 108 | CookieData { 109 | c2s: ArbitraryKey::arbitrary(g).0, 110 | s2c: ArbitraryKey::arbitrary(g).0, 111 | } 112 | } 113 | } 114 | 115 | #[quickcheck] 116 | fn round_trip(unwrapped: CookieData, master_key: ArbitraryKey, key_id: u32) -> bool { 117 | let mut rng = rand::thread_rng(); 118 | let get_key = |id| { 119 | if id == key_id { 120 | Some(master_key.0) 121 | } else { 122 | None 123 | } 124 | }; 125 | 126 | open_cookie( 127 | &seal_cookie(&unwrapped, &master_key.0, key_id, &mut rng), 128 | get_key, 129 | ) == Some(unwrapped) 130 | } 131 | 132 | #[quickcheck] 133 | fn bad_cookie(cookie: Vec, master_key: ArbitraryKey) -> bool { 134 | let get_key = |_| Some(master_key.0); 135 | 136 | open_cookie(&cookie.as_slice(), get_key) == None 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /byztimed/src/core.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Core state machine 5 | //! 6 | //! This module implements the core logic of Byztime, basically what's 7 | //! described in in the Byztime paper. The types it passes in and out 8 | //! are just abstract representations of Byztime messages; it doesn't 9 | //! do any network IO or know anything about wire formats or 10 | //! cryptography. It is, however, responsible for updating the timedata 11 | //! file, so state updates will be visible to clients. 12 | 13 | use crate::config::*; 14 | use crate::peer_name::PeerName; 15 | use byztime::*; 16 | use std::collections::*; 17 | use std::io; 18 | use std::iter::FromIterator; 19 | 20 | #[cfg(test)] 21 | use quickcheck::{Arbitrary, Gen}; 22 | 23 | ///Unique identifier for a time request 24 | pub type UniqueId = [u8; 16]; 25 | 26 | ///The time that it takes for light to travel 10**9 meters through a vacuum 27 | fn light_gigameter() -> Timestamp { 28 | Timestamp::new(3, 335_640_952) 29 | } 30 | 31 | ///Everything we know about the state of a particular peer's clock 32 | #[derive(Debug, Clone, Eq, Ord, PartialEq, PartialOrd)] 33 | struct PeerClock { 34 | ///Era of peer's clock 35 | era: Era, 36 | ///Estimate of peer's local clock minus our local clock 37 | local_offset: Timestamp, 38 | ///Peer's estimate of global clock minus peer's local clock 39 | global_offset: Timestamp, 40 | ///Round-trip time of best clock sample 41 | rtt: Timestamp, 42 | ///Time (according to local clock) when best clock sample was acquired 43 | origin_time: Timestamp, 44 | } 45 | 46 | impl PeerClock { 47 | ///Returns an estimate of (global clock - our local clock) based on information from this peer 48 | fn offset(&self) -> Timestamp { 49 | self.local_offset + self.global_offset 50 | } 51 | 52 | ///Returns the maximum absolute estimation error of local_offset. 53 | /// 54 | /// * `drift_ppb`: Upper bound on rate of clock drift, in parts per billion 55 | /// * `dist`: Lower bound on our physical distance from the peer, in meters 56 | /// * `as_of`: Local time as of which to compute the error bound 57 | fn error(&self, drift_ppb: i64, dist: i64, as_of: &Timestamp) -> Timestamp { 58 | let age = *as_of - self.origin_time; 59 | let light_time = light_gigameter().scale(2 * dist); 60 | self.rtt.halve() - light_time.halve() + age.scale(2 * drift_ppb) 61 | } 62 | } 63 | 64 | ///Everything we know about a particular peer 65 | #[derive(Debug, Clone, Eq, Ord, PartialEq, PartialOrd)] 66 | struct PeerState { 67 | ///Physical distance in meters 68 | dist: i64, 69 | ///Unique-id of any in-flight query to this peer 70 | inflight: Option, 71 | ///Origin timestamp of any in-flight query to this peer 72 | origin_time: Option, 73 | ///State of peer's clock, if known 74 | clock: Option, 75 | } 76 | 77 | ///Semantic representation of a Byztime query packet 78 | #[derive(Debug, Copy, Clone, Eq, Ord, PartialEq, PartialOrd)] 79 | pub struct Query { 80 | ///An arbitrary bytestring uniquely identifying this query 81 | pub unique_id: UniqueId, 82 | } 83 | 84 | #[cfg(test)] 85 | impl Arbitrary for Query { 86 | fn arbitrary(g: &mut G) -> Query { 87 | let mut unique_id = [0; 16]; 88 | for byte in &mut unique_id { 89 | *byte = u8::arbitrary(g); 90 | } 91 | Query { unique_id } 92 | } 93 | } 94 | 95 | ///Semantic representation of a Byztime response packet 96 | #[derive(Debug, Clone, Eq, Ord, PartialEq, PartialOrd)] 97 | pub struct Response { 98 | ///Unique identifer that was passed in the corresponding query 99 | pub unique_id: UniqueId, 100 | ///Era of our local clock 101 | pub era: Era, 102 | ///Our local clock value 103 | pub local_clock: Timestamp, 104 | ///Our estimate of (global clock - our local clock) 105 | pub global_offset: Timestamp, 106 | } 107 | 108 | #[cfg(test)] 109 | impl Arbitrary for Response { 110 | fn arbitrary(g: &mut G) -> Response { 111 | let mut unique_id = [0; 16]; 112 | g.fill_bytes(&mut unique_id); 113 | Response { 114 | unique_id, 115 | era: Era::arbitrary(g), 116 | local_clock: Timestamp::arbitrary(g), 117 | global_offset: Timestamp::arbitrary(g), 118 | } 119 | } 120 | } 121 | 122 | ///The Byztime state machine 123 | pub struct CoreState { 124 | ///Read-write handle to the timedata file 125 | ctx: ProviderContext, 126 | ///Whether we're in read-only mode (just querying consensus, not participating) 127 | ro_mode: bool, 128 | ///Upper bound on clock drift rate in parts per billion 129 | drift_ppb: i64, 130 | ///Our clock era 131 | era: Era, 132 | //Map of UniqueIds to the peer they are in flight for 133 | inflight: HashMap, 134 | ///State of each peer 135 | peers: HashMap, 136 | } 137 | 138 | ///Health statistics 139 | #[derive(Debug, Clone)] 140 | pub struct HealthStats { 141 | pub real_time: Timestamp, 142 | pub global_time: Timestamp, 143 | pub max_error: Timestamp, 144 | pub est_error: f64, 145 | } 146 | 147 | impl CoreState { 148 | ///Initialize ourselves from the configuration file 149 | pub fn initialize(config: &Config) -> io::Result { 150 | Ok(CoreState { 151 | ctx: ProviderContext::open(&config.timedata)?, 152 | ro_mode: config.ro_mode, 153 | drift_ppb: config.drift_ppb, 154 | era: Era::get()?, 155 | inflight: HashMap::new(), 156 | peers: HashMap::from_iter(config.peers.iter().map(|(peer_name, peerconfig)| { 157 | ( 158 | peer_name.clone(), 159 | PeerState { 160 | dist: peerconfig.dist, 161 | inflight: None, 162 | origin_time: None, 163 | clock: None, 164 | }, 165 | ) 166 | })), 167 | }) 168 | } 169 | 170 | ///Called upon receiving a query, and returns the corresponding 171 | ///response. The caller is responsible for promptly transmitting 172 | ///the response to its proper destination. 173 | pub fn on_query(&self, query: &Query) -> io::Result { 174 | Ok(Response { 175 | unique_id: query.unique_id, 176 | local_clock: Timestamp::local_time()?, 177 | era: self.era, 178 | global_offset: self.ctx.offset_quick(), 179 | }) 180 | } 181 | 182 | ///Called at each polling interval for each peer, and returns a 183 | ///query to send to that peer. The caller is responsible for 184 | ///prompty transmitting the query to its proper destination. 185 | pub fn on_tick( 186 | &mut self, 187 | peer_name: &PeerName, 188 | rng: &mut R, 189 | ) -> io::Result { 190 | let peerstate = self.peers.get_mut(peer_name).expect("unknown peer"); 191 | let mut unique_id = [0; 16]; 192 | rng.fill_bytes(&mut unique_id); 193 | 194 | let result = Query { unique_id }; 195 | if let Some(old_inflight) = peerstate.inflight { 196 | self.inflight.remove(&old_inflight); 197 | } 198 | 199 | self.inflight.insert(unique_id, peer_name.clone()); 200 | peerstate.inflight = Some(unique_id); 201 | peerstate.origin_time = Some(Timestamp::local_time()?); 202 | Ok(result) 203 | } 204 | 205 | ///Called periodically in single-node configurations to keep the timedata 206 | ///file's error bounds updated 207 | pub fn on_single_node_tick(&mut self) -> io::Result<()> { 208 | let now = Timestamp::local_time()?; 209 | self.update_offset(now) 210 | } 211 | 212 | ///Called just before actually sending a query over the network, 213 | ///to give us a more accurate origin timestamp. 214 | pub fn on_departure(&mut self, peer_name: &PeerName) -> io::Result<()> { 215 | let peerstate = self.peers.get_mut(peer_name).expect("unknown peer"); 216 | peerstate.origin_time = Some(Timestamp::local_time()?); 217 | Ok(()) 218 | } 219 | 220 | pub fn lookup_peer(&self, unique_id: &UniqueId) -> Option { 221 | self.inflight.get(unique_id).cloned() 222 | } 223 | 224 | ///Called upon receiving a response. Updates state, including 225 | ///updating the timedata file. 226 | pub fn on_response(&mut self, response: &Response, dest_time: Timestamp) -> io::Result<()> { 227 | if let Some(peer) = self.inflight.remove(&response.unique_id) { 228 | self.update_peer_state(response, &peer, dest_time)?; 229 | self.update_offset(dest_time) 230 | } else { 231 | Ok(()) 232 | } 233 | } 234 | 235 | pub fn get_health_stats(&self) -> io::Result { 236 | let real_time = Timestamp::real_time()?; 237 | let local_time = Timestamp::local_time()?; 238 | let (min, offset, max) = self.ctx.offset()?; 239 | let global_time = local_time + offset; 240 | 241 | let max_error = max.halve() - min.halve(); 242 | 243 | let n_peers = self.peers.len(); 244 | let n = if self.ro_mode { n_peers } else { n_peers + 1 }; 245 | let f = (n + 1) / 3; 246 | let mut peer_squared_offsets = Vec::from_iter(self.peers.values().map(|peer| { 247 | peer.clock.as_ref().map_or(std::f64::INFINITY, |clock| { 248 | let offset = clock.offset() - self.ctx.offset_quick(); 249 | let float_offset = 250 | offset.seconds() as f64 + offset.nanoseconds() as f64 / 1_000_000_000 as f64; 251 | float_offset * float_offset 252 | }) 253 | })); 254 | peer_squared_offsets.sort_by(|a, b| a.partial_cmp(b).unwrap()); 255 | peer_squared_offsets.truncate(n - f); 256 | let est_error = 257 | peer_squared_offsets.iter().sum::().sqrt() / (peer_squared_offsets.len() as f64); 258 | 259 | Ok(HealthStats { 260 | real_time, 261 | global_time, 262 | max_error, 263 | est_error, 264 | }) 265 | } 266 | 267 | ///Update the offset between the global clock and the system clock. 268 | /// 269 | ///The only thing this is used for is persistence across reboots; 270 | /// so that if we have just rebooted and not yet re-contacted any 271 | /// of our peers, we can use system time to give a sane estimate 272 | /// of global time, albeit one with infinite error bounds. 273 | pub fn update_real_offset(&mut self) -> io::Result<()> { 274 | self.ctx.update_real_offset() 275 | } 276 | 277 | ///Update our `PeerClock` based on a response from that peer. Caller is responsible 278 | ///for checking that the response corresponds to an in-flight request. 279 | fn update_peer_state( 280 | &mut self, 281 | response: &Response, 282 | peer_name: &PeerName, 283 | dest_time: Timestamp, 284 | ) -> io::Result<()> { 285 | let peerstate = self.peers.get_mut(peer_name).expect("unknown peer"); 286 | 287 | //Check when the request corresponding to this response was sent 288 | let origin_time = peerstate 289 | .origin_time 290 | .expect("Called update_peer_state with nothing in flight"); 291 | peerstate.inflight = None; 292 | peerstate.origin_time = None; 293 | 294 | let rtt = dest_time - origin_time; //Round trip time 295 | let xmit_time = origin_time.halve() + dest_time.halve(); //Estimate of when the response left the peer 296 | let new_quality = rtt.halve() + rtt.scale(2 * self.drift_ppb); //Quality metric of this sample (lower is better) 297 | 298 | peerstate.clock = Some(match &peerstate.clock { 299 | //If we have no sample other than this one, accept it. 300 | None => PeerClock { 301 | era: response.era, 302 | local_offset: response.local_clock.saturating_sub(xmit_time), 303 | global_offset: response.global_offset.saturating_normalize(), 304 | rtt, 305 | origin_time, 306 | }, 307 | 308 | //Otherwise, only accept it if it's of better quality than what we already have. 309 | Some(peer_clock) => { 310 | let old_age = origin_time - peer_clock.origin_time; 311 | let old_quality = peer_clock.rtt.halve() + old_age.scale(2 * self.drift_ppb); 312 | if new_quality < old_quality || response.era != peer_clock.era { 313 | //Either it's of better quality (remember, lower 314 | //is better), or the peer's clock era has changed 315 | //in which case whatever we had before is now 316 | //worthless. Accept the sample. 317 | PeerClock { 318 | era: response.era, 319 | local_offset: response.local_clock.saturating_sub(xmit_time), 320 | global_offset: response.global_offset.saturating_normalize(), 321 | rtt, 322 | origin_time, 323 | } 324 | } else { 325 | //Otherwise, just update the peer's `global_offset` and 326 | //leave everything else the same. 327 | PeerClock { 328 | era: peer_clock.era, 329 | local_offset: peer_clock.local_offset, 330 | global_offset: response.global_offset.saturating_normalize(), 331 | rtt: peer_clock.rtt, 332 | origin_time: peer_clock.origin_time, 333 | } 334 | } 335 | } 336 | }); 337 | 338 | Ok(()) 339 | } 340 | 341 | ///Recompute our global offset estimate from newly-updated peer clocks. 342 | fn update_offset(&mut self, as_of: Timestamp) -> io::Result<()> { 343 | let (my_min, my_est, my_max) = self.ctx.offset()?; 344 | 345 | //From each peer, we obtain an estimate of 346 | // (peer's estimate of (peer global clock - our local clock)), 347 | // which is computed as 348 | // (our estimate of (peer's local clock - our local clock)) + 349 | // (peer's estimate of (global clock - peer's local clock)). 350 | // Note the construction of this: we're estimating an estimate. 351 | // The error bounds on *our* estimate are determined by network 352 | // latency between ourself and the peer. 353 | 354 | //Enumerate lower bounds on the value described above. 355 | let mut minima = { 356 | let peer_min_iter = self 357 | .peers 358 | .values() 359 | .map(|ref peer_state| match &peer_state.clock { 360 | None => { 361 | //For any peer we haven't contacted since the 362 | // last time the daemon restarted, we can 363 | // safely substitute the aggregate lower bound 364 | // computed from prior runs. If this the daemon's 365 | // first startup since the last reboot, libbyztime 366 | // will have inserted an INT_MIN-like lower bound 367 | // for us. 368 | my_min 369 | } 370 | Some(clock) => clock.offset().saturating_sub(clock.error( 371 | self.drift_ppb, 372 | peer_state.dist, 373 | &as_of, 374 | )), 375 | }); 376 | 377 | //Include our own clock in the list only if we're a participant in consensus, 378 | //i.e., we're not running in read-only mode. 379 | if self.ro_mode { 380 | Vec::from_iter(peer_min_iter) 381 | } else { 382 | //We can estimate our *own* estimate perfectly, so the lower bound on my_est is just my_est. 383 | Vec::from_iter(peer_min_iter.chain(std::iter::once(my_est))) 384 | } 385 | }; 386 | 387 | //Now do the same for upper bound 388 | let mut maxima = { 389 | let peer_max_iter = self 390 | .peers 391 | .values() 392 | .map(|ref peer_state| match &peer_state.clock { 393 | None => my_max, 394 | Some(clock) => clock.offset().saturating_add(clock.error( 395 | self.drift_ppb, 396 | peer_state.dist, 397 | &as_of, 398 | )), 399 | }); 400 | 401 | if self.ro_mode { 402 | Vec::from_iter(peer_max_iter) 403 | } else { 404 | Vec::from_iter(peer_max_iter.chain(std::iter::once(my_est))) 405 | } 406 | }; 407 | 408 | //Sort these bounds 409 | minima.sort_unstable(); 410 | maxima.sort_unstable(); 411 | 412 | //Compute f, the number of faulty peers we can tolerate 413 | let n_peers = self.peers.len(); 414 | let n = if self.ro_mode { n_peers } else { n_peers + 1 }; 415 | let f = (n - 1) / 3; 416 | 417 | //Now by discarding the f lowest lower bounds and the f highest upper bounds, 418 | // we can directly estimate (global clock - our local clock), rather than 419 | // estimating estimates like we have so far. 420 | let lo = minima[f]; //The f+1'th lowest lower bound 421 | let hi = maxima[n - 1 - f]; //The f+1'th highest upper bound 422 | let offset = lo.halve() + hi.halve(); //Midpoint of lo and hi 423 | let error = hi.halve() - lo.halve(); //Maximum absolute estimation error 424 | 425 | //Verify that the newly-computed estimate is consistent with 426 | //the old one to within the drift rate. If so, record it. 427 | let (old_offset, old_error, old_as_of) = self.ctx.offset_raw(); 428 | let old_min = old_offset.saturating_sub(old_error); 429 | let old_max = old_offset.saturating_add(old_error); 430 | let new_min = lo; 431 | let new_max = hi; 432 | let age = as_of.saturating_sub(old_as_of); 433 | let max_drift = age.scale(2 * self.drift_ppb); 434 | 435 | if new_min >= old_min - max_drift && new_max <= old_max + max_drift { 436 | self.ctx.set_offset(offset, error, as_of) 437 | } else { 438 | Ok(()) 439 | } 440 | } 441 | } 442 | -------------------------------------------------------------------------------- /byztimed/src/lib.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! THIS CRATE IS NOT A LIBRARY. 5 | //! 6 | //! The API exposed by this crate is intended only for internal use 7 | //! by the `byztimed` binary and carries no stability guarantees 8 | //! whatsoever. See for 9 | //! user documentation. This crate is semantically versioned on the 10 | //! format of its configuration file and wire protocol. That is, it 11 | //! will interoperate with peers running compatible versions, and 12 | //! minor version bumps should not require the user to make any 13 | //! changes to the configuration file. 14 | 15 | pub mod aead; 16 | pub mod config; 17 | pub mod core; 18 | pub mod logging; 19 | ///Generated code for serializing and deserializing Byztime's protobuf-based wire format 20 | pub mod wire { 21 | include!(concat!(env!("OUT_DIR"), "/byztimed.wire.rs")); 22 | ///The amount of padding, in addition to space needed for extra 23 | /// cookies, that has to be added to a request to make it equal in 24 | /// length to the anticpated response 25 | pub const EXTRA_PADDING: usize = 39; 26 | } 27 | pub mod cookie; 28 | pub mod ntske; 29 | pub mod peer_name; 30 | pub mod store; 31 | pub mod time_client; 32 | pub mod time_server; 33 | 34 | #[cfg(test)] 35 | mod time_test; 36 | -------------------------------------------------------------------------------- /byztimed/src/logging.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Configuration for log4rs 5 | //! 6 | //! log4rs is a very flexible logging framework – much more flexible than we need it to be. 7 | //! This module translates the much simpler configuration language we expose through the 8 | //! Byztime config file into a log4rs configuration and initializes log4rs accordingly. 9 | 10 | use log4rs::append::console::Target; 11 | use log4rs::config::*; 12 | use log4rs::encode::pattern::PatternEncoder; 13 | use std::boxed::Box; 14 | use std::fmt; 15 | use std::io; 16 | use std::path; 17 | 18 | pub type LogHandle = log4rs::Handle; 19 | 20 | ///A logging target — either a file or STDOUT or STDERR — coupled with 21 | /// a minimum severity level 22 | pub enum LogConfig { 23 | ConsoleLog(Target, log::LevelFilter), 24 | FileLog(path::PathBuf, log::LevelFilter), 25 | } 26 | 27 | //log4rs is missing derived traits for Target, so we have to manually reimplement this. 28 | fn clone_target(target: &Target) -> Target { 29 | match target { 30 | Target::Stdout => Target::Stdout, 31 | Target::Stderr => Target::Stderr, 32 | } 33 | } 34 | 35 | impl LogConfig { 36 | fn name(&self) -> &str { 37 | match self { 38 | LogConfig::ConsoleLog(target, _) => match target { 39 | Target::Stdout => "STDOUT", 40 | Target::Stderr => "STDERR", 41 | }, 42 | LogConfig::FileLog(path, _) => path.to_str().expect("malformed UTF-8 in path name"), 43 | } 44 | } 45 | 46 | fn level(&self) -> log::LevelFilter { 47 | match self { 48 | LogConfig::ConsoleLog(_, level) => *level, 49 | LogConfig::FileLog(_, level) => *level, 50 | } 51 | } 52 | 53 | fn filter(&self) -> Box { 54 | Box::new(log4rs::filter::threshold::ThresholdFilter::new( 55 | self.level(), 56 | )) 57 | } 58 | 59 | fn append(&self, pattern: Option<&str>) -> io::Result> { 60 | match self { 61 | LogConfig::ConsoleLog(target, _) => Ok(Box::new( 62 | log4rs::append::console::ConsoleAppender::builder() 63 | .target(clone_target(target)) 64 | .encoder(Box::new(PatternEncoder::new( 65 | pattern.unwrap_or("{d} {l} {t} - {m}{n}"), 66 | ))) 67 | .build(), 68 | )), 69 | LogConfig::FileLog(path, _) => Ok(Box::new( 70 | log4rs::append::file::FileAppender::builder() 71 | .encoder(Box::new(PatternEncoder::new( 72 | pattern.unwrap_or("{d} {l} {t} - {m}{n}"), 73 | ))) 74 | .build(path)?, 75 | )), 76 | } 77 | } 78 | 79 | fn appender(&self, pattern: Option<&str>) -> io::Result { 80 | Ok(Appender::builder() 81 | .filter(self.filter()) 82 | .build(self.name(), self.append(pattern)?)) 83 | } 84 | } 85 | 86 | //`Target` is missing a derived `Debug` trait so for `LogConfig` we have to implement 87 | // `Debug` manually rather than deriving it. 88 | impl fmt::Debug for LogConfig { 89 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 90 | match self { 91 | LogConfig::ConsoleLog(Target::Stdout, level) => { 92 | write!(f, "ConsoleLog(Stdout, {:?})", level) 93 | } 94 | LogConfig::ConsoleLog(Target::Stderr, level) => { 95 | write!(f, "ConsoleLog(Stderr, {:?})", level) 96 | } 97 | LogConfig::FileLog(path, level) => write!(f, "FileLog({:?}, {:?})", path, level), 98 | } 99 | } 100 | } 101 | 102 | //Ditto for `Clone`. 103 | impl Clone for LogConfig { 104 | fn clone(&self) -> LogConfig { 105 | match self { 106 | LogConfig::ConsoleLog(target, filter) => { 107 | LogConfig::ConsoleLog(clone_target(target), *filter) 108 | } 109 | LogConfig::FileLog(path, filter) => LogConfig::FileLog(path.clone(), *filter), 110 | } 111 | } 112 | } 113 | 114 | ///Build a log4rs config from a sequence of `LogConfig`s returned by the iterator 115 | fn build_config<'a, I: IntoIterator>( 116 | cfgs: I, 117 | pattern: Option<&str>, 118 | ) -> io::Result { 119 | let mut config_builder = Config::builder(); 120 | let mut root_builder = Root::builder(); 121 | for cfg in cfgs.into_iter() { 122 | config_builder = config_builder.appender(cfg.appender(pattern)?); 123 | root_builder = root_builder.appender(cfg.name()); 124 | } 125 | let root = root_builder.build(log::LevelFilter::Trace); 126 | Ok(config_builder 127 | .build(root) 128 | .expect("While building log config")) 129 | } 130 | 131 | ///Initialize logging 132 | pub fn init_logging<'a, I: IntoIterator>( 133 | cfgs: I, 134 | pattern: Option<&str>, 135 | ) -> io::Result { 136 | Ok(log4rs::init_config(build_config(cfgs, pattern)?).expect("While initializing logging")) 137 | } 138 | 139 | ///Reinitialize logging (useful for reopening log files after they've been rotated) 140 | pub fn reinit_logging<'a, I: IntoIterator>( 141 | cfgs: I, 142 | pattern: Option<&str>, 143 | handle: &LogHandle, 144 | ) -> io::Result<()> { 145 | handle.set_config(build_config(cfgs, pattern)?); 146 | Ok(()) 147 | } 148 | 149 | #[cfg(test)] 150 | mod tests { 151 | use super::*; 152 | use log::info; 153 | use std::fs; 154 | 155 | //This test is broken. Its intent is to check that log rotation 156 | // works, but it panics on init_logging() because cargo's unit 157 | // test framework has already initialized logging before our unit 158 | // test ever gets invoked. There's currently no way to fix this, 159 | // but we can keep the code around in case there ever is. For now 160 | // there's no #[test] attribute on this function, so it won't be 161 | // run. 162 | #[allow(dead_code)] 163 | fn log_rotation() { 164 | let tempdir = tempfile::TempDir::new().unwrap(); 165 | let mut logpath = path::PathBuf::from(tempdir.path()); 166 | let mut logpath_rotated = logpath.clone(); 167 | logpath.push("logfile"); 168 | logpath_rotated.push("logfile.old"); 169 | let logconfig = vec![LogConfig::FileLog(logpath.clone(), log::LevelFilter::Info)]; 170 | let handle = init_logging(&logconfig, None).unwrap(); 171 | info!("PRE-ROTATION"); 172 | fs::rename(&logpath, &logpath_rotated).unwrap(); 173 | info!("MID-ROTATION"); 174 | reinit_logging(&logconfig, None, &handle).unwrap(); 175 | info!("POST-ROTATION"); 176 | 177 | //Here we would check for the strings "PRE-ROTATION" and 178 | // "MID-ROTATION" in logfile.old, and "POST-ROTATION" in 179 | // logfile. 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /byztimed/src/main.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | use byztimed::config::{Config, ConfigError}; 5 | use byztimed::core; 6 | use byztimed::logging::{init_logging, reinit_logging, LogHandle}; 7 | use byztimed::ntske; 8 | use byztimed::peer_name::PeerName; 9 | use byztimed::store::{SecretStore, StoreError}; 10 | use byztimed::time_client; 11 | use byztimed::time_server; 12 | use std::fmt; 13 | use std::fs; 14 | use std::future::Future; 15 | use std::net; 16 | use std::path; 17 | use std::process; 18 | use std::sync::*; 19 | use tokio::io; 20 | use tokio::net::{TcpListener, UdpSocket}; 21 | use tokio::signal::unix::{signal, SignalKind}; 22 | use tokio::sync::mpsc; 23 | use tokio::time; 24 | 25 | #[macro_use] 26 | extern crate log; 27 | 28 | struct GlobalState { 29 | cfg: Config, 30 | core_state: RwLock, 31 | secret_store: SecretStore, 32 | shutdown_sender: mpsc::UnboundedSender>, 33 | } 34 | 35 | #[derive(Debug)] 36 | ///Enumeration of errors that will make us terminate the program 37 | enum FatalError { 38 | ArgumentError(clap::Error), 39 | ConfigReadError(io::Error), 40 | ConfigDecodeError(std::string::FromUtf8Error), 41 | ConfigErrors(ConfigError), 42 | LogInitError(io::Error), 43 | TimedataError(io::Error), 44 | StoreError(StoreError), 45 | ResolverError(trust_dns_resolver::error::ResolveError), 46 | TcpBindError(io::Error), 47 | UdpServerBindError(io::Error), 48 | UdpClientBindError(io::Error), 49 | ChildTaskError(io::Error), 50 | ChildTaskJoinError(tokio::task::JoinError), 51 | } 52 | 53 | impl fmt::Display for FatalError { 54 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 55 | use FatalError::*; 56 | match self { 57 | ArgumentError(e) => e.fmt(f), 58 | ConfigReadError(e) => write!(f, "Reading configuration file: {}", e), 59 | ConfigDecodeError(e) => write!(f, "UTF-8 decoding configuration file: {}", e), 60 | ConfigErrors(e) => e.fmt(f), 61 | LogInitError(e) => write!(f, "Initializing logging: {}", e), 62 | TimedataError(e) => write!(f, "Opening timedata file: {}", e), 63 | StoreError(e) => write!(f, "Opening secret store: {}", e), 64 | ResolverError(e) => write!(f, "Initializing DNS resolver: {}", e), 65 | TcpBindError(e) => write!(f, "Binding NTS-KE server socket: {}", e), 66 | UdpServerBindError(e) => write!(f, "Binding server UDP socket: {}", e), 67 | UdpClientBindError(e) => write!(f, "Binding client UDP socket: {}", e), 68 | ChildTaskError(e) => write!(f, "IO error in child task: {}", e), 69 | ChildTaskJoinError(e) => write!(f, "Child task join error: {}", e), 70 | } 71 | } 72 | } 73 | 74 | impl std::error::Error for FatalError { 75 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 76 | use FatalError::*; 77 | match self { 78 | ArgumentError(e) => Some(e), 79 | ConfigReadError(e) => Some(e), 80 | ConfigDecodeError(e) => Some(e), 81 | ConfigErrors(e) => Some(e), 82 | LogInitError(e) => Some(e), 83 | TimedataError(e) => Some(e), 84 | StoreError(_) => None, //StoreError doesn't implement std::error::Error 85 | ResolverError(_) => None, 86 | TcpBindError(e) => Some(e), 87 | UdpServerBindError(e) => Some(e), 88 | UdpClientBindError(e) => Some(e), 89 | ChildTaskError(e) => Some(e), 90 | ChildTaskJoinError(e) => Some(e), 91 | } 92 | } 93 | } 94 | 95 | fn main() { 96 | let runtime = tokio::runtime::Runtime::new().unwrap(); 97 | 98 | if let Err(e) = runtime.block_on(async_main()) { 99 | eprintln!("{}", e); 100 | process::exit(1) 101 | } 102 | } 103 | 104 | async fn async_main() -> Result<(), FatalError> { 105 | /* Parse the command line */ 106 | let matches = clap::App::new(env!("CARGO_PKG_NAME")) 107 | .version(env!("CARGO_PKG_VERSION")) 108 | .author(env!("CARGO_PKG_AUTHORS")) 109 | .about(env!("CARGO_PKG_DESCRIPTION")) 110 | .arg( 111 | clap::Arg::with_name("cfg_file") 112 | .help("Path to configuration file") 113 | .required(true) 114 | .index(1), 115 | ) 116 | .get_matches_safe() 117 | .map_err(FatalError::ArgumentError)?; 118 | 119 | /* Parse the configuration file */ 120 | let cfg_path = path::Path::new(matches.value_of_os("cfg_file").unwrap()); 121 | let cfg_bytestring = fs::read(cfg_path).map_err(FatalError::ConfigReadError)?; 122 | let cfg_contents = String::from_utf8(cfg_bytestring).map_err(FatalError::ConfigDecodeError)?; 123 | let cfg = Config::parse(&cfg_contents).map_err(FatalError::ConfigErrors)?; 124 | 125 | /* Initialize logging */ 126 | let log_handle = init_logging(&cfg.logging, cfg.log_format.as_ref().map(|s| s.as_str())) 127 | .map_err(FatalError::LogInitError)?; 128 | 129 | /* Open the timedata file and secret store and initialize various bits of 130 | global state. */ 131 | 132 | let (shutdown_sender, mut shutdown_receiver) = mpsc::unbounded_channel(); 133 | 134 | let state = Arc::new(GlobalState { 135 | core_state: RwLock::new( 136 | core::CoreState::initialize(&cfg).map_err(FatalError::TimedataError)?, 137 | ), 138 | secret_store: SecretStore::new(&cfg.secret_store.as_path()) 139 | .map_err(FatalError::StoreError)?, 140 | shutdown_sender, 141 | cfg, 142 | }); 143 | 144 | /* Initialize the DNS resolver */ 145 | let resolver = Arc::new( 146 | trust_dns_resolver::AsyncResolver::tokio_from_system_conf() 147 | .map_err(FatalError::ResolverError)?, 148 | ); 149 | 150 | /* Spawn signal handlers */ 151 | watch( 152 | state.clone(), 153 | shutdown_signal_task(state.clone(), SignalKind::interrupt(), "SIGINT"), 154 | "SIGINT handler", 155 | ) 156 | .await; 157 | watch( 158 | state.clone(), 159 | shutdown_signal_task(state.clone(), SignalKind::terminate(), "SIGTERM"), 160 | "SIGTERM handler", 161 | ) 162 | .await; 163 | watch( 164 | state.clone(), 165 | logrotate_signal_task(log_handle, state.clone(), SignalKind::hangup(), "SIGHUP"), 166 | "SIGHUP handler", 167 | ) 168 | .await; 169 | 170 | /* Spawn a task for periodically updating our real-time offset */ 171 | watch( 172 | state.clone(), 173 | update_real_offset_task(state.clone()), 174 | "real offset updater", 175 | ) 176 | .await; 177 | 178 | /* Bind server sockets and spawn associated tasks */ 179 | if !state.cfg.ro_mode { 180 | let server_addr = net::SocketAddr::new(state.cfg.bind_host, state.cfg.bind_port); 181 | let ntske_server_socket = TcpListener::bind(&server_addr) 182 | .await 183 | .map_err(FatalError::TcpBindError)?; 184 | let time_server_socket = UdpSocket::bind(&server_addr) 185 | .await 186 | .map_err(FatalError::UdpServerBindError)?; 187 | 188 | watch( 189 | state.clone(), 190 | ntske_server_task(state.clone(), ntske_server_socket), 191 | "NTS-KE server", 192 | ) 193 | .await; 194 | watch( 195 | state.clone(), 196 | time_server_task(state.clone(), time_server_socket), 197 | "time server", 198 | ) 199 | .await; 200 | } 201 | 202 | /* Bind client socket and spawn assoicated tasks */ 203 | if !state.cfg.peers.is_empty() { 204 | let time_client_socket = Arc::new( 205 | UdpSocket::bind(&net::SocketAddr::new( 206 | net::IpAddr::V6(net::Ipv6Addr::UNSPECIFIED), 207 | 0, 208 | )) 209 | .await 210 | .map_err(FatalError::UdpClientBindError)?, 211 | ); 212 | 213 | watch( 214 | state.clone(), 215 | tick_task(state.clone(), resolver, time_client_socket.clone()), 216 | "tick handler", 217 | ) 218 | .await; 219 | watch( 220 | state.clone(), 221 | time_response_task(state.clone(), time_client_socket), 222 | "time response handler", 223 | ) 224 | .await; 225 | } else { 226 | watch( 227 | state.clone(), 228 | single_node_tick_task(state.clone()), 229 | "single-node tick handler", 230 | ) 231 | .await; 232 | } 233 | 234 | info!("Started"); 235 | 236 | /* Wait for a shutdown signal and then exit */ 237 | shutdown_receiver.recv().await.unwrap() 238 | } 239 | 240 | ///Spawn the provided task, then spawn another task that supervises its join handle 241 | /// and signals for shutdown if it errored or panicked 242 | async fn watch> + Send + 'static>( 243 | state: Arc, 244 | f: F, 245 | task_desc: &'static str, 246 | ) { 247 | let join_handle = tokio::spawn(f); 248 | tokio::spawn(async move { 249 | match join_handle.await { 250 | Ok(Ok(())) => (), 251 | Ok(Err(e)) => { 252 | error!( 253 | "Bailing out due to IO error in child task '{}': {}", 254 | task_desc, e 255 | ); 256 | state 257 | .shutdown_sender 258 | .send(Err(FatalError::ChildTaskError(e))) 259 | .unwrap_or(()); 260 | } 261 | Err(e) => { 262 | //Cancellations are benign, it just means the runtime 263 | // is shutting down. Usually the watcher task will 264 | // get shut down as well before it ever gets far 265 | // enough to observe the JoinError, but according to 266 | // https://github.com/tokio-rs/tokio/issues/2077#issuecomment-572671950 267 | // this isn't guaranteed. Scraping the error's 268 | // to_string() to determine whether it's a 269 | // cancellation or something else is super ugly, but 270 | // right now it's all that's available. I've 271 | // submitted 272 | // https://github.com/tokio-rs/tokio/pull/2051 in 273 | // order to have something cleaner, but as of this 274 | // comment it hasn't yet been merged. 275 | if e.to_string() != "cancelled" { 276 | error!( 277 | "Bailing out due to join error in child task '{}': {}", 278 | task_desc, e 279 | ); 280 | state 281 | .shutdown_sender 282 | .send(Err(FatalError::ChildTaskJoinError(e))) 283 | .unwrap_or(()); 284 | } 285 | } 286 | } 287 | }); 288 | } 289 | 290 | ///Task for serving NTS-KE requests 291 | async fn ntske_server_task(state: Arc, listener: TcpListener) -> io::Result<()> { 292 | loop { 293 | match listener.accept().await { 294 | Ok((tcp_stream, peer_addr)) => { 295 | let child_state = state.clone(); 296 | tokio::spawn(time::timeout(time::Duration::new(5, 0), async move { 297 | let (master_key_id, master_key) = 298 | child_state.secret_store.get_cached_current_master_key(); 299 | match child_state 300 | .cfg 301 | .tls_acceptor 302 | .clone() 303 | .accept(tcp_stream) 304 | .await 305 | { 306 | Ok(mut tls_stream) => { 307 | match ntske::serve_ntske(master_key, master_key_id, &mut tls_stream) 308 | .await 309 | { 310 | Ok(()) => debug!("Successful NTS-KE session with {}", peer_addr), 311 | Err(e) => debug!("In NTS-KE session with {}: {}", peer_addr, e), 312 | } 313 | } 314 | Err(e) => debug!("In NTS-KE handshake with {}: {}", peer_addr, e), 315 | } 316 | })); 317 | } 318 | //Yes, this is non-fatal. accept(2) can return errors for 319 | // a lot of silly, transient reasons like EHOSTUNREACH 320 | Err(e) => debug!("Accepting a TCP connection: {}", e), 321 | } 322 | } 323 | } 324 | 325 | ///Task for serving response to time requests 326 | async fn time_server_task(state: Arc, mut socket: UdpSocket) -> io::Result<()> { 327 | time_server::serve_time(&mut socket, &state.core_state, &state.secret_store).await 328 | } 329 | 330 | ///Task for sending out time requests every polling interval 331 | async fn tick_task( 332 | state: Arc, 333 | resolver: Arc, 334 | socket: Arc, 335 | ) -> io::Result<()> { 336 | if state.cfg.peers.is_empty() { 337 | return Ok(()); 338 | } 339 | let tick_period = 340 | time::Duration::from_secs_f64(state.cfg.poll_interval / state.cfg.peers.len() as f64); 341 | let mut interval = time::interval(tick_period); 342 | 343 | let peers: Vec = state.cfg.peers.keys().cloned().collect(); 344 | let mut next_peer = 0; 345 | loop { 346 | interval.tick().await; 347 | debug!("Tick!"); 348 | 349 | /* Round-robin cycle through list of peers */ 350 | let peer_name = &peers[next_peer]; 351 | if next_peer == peers.len() - 1 { 352 | next_peer = 0; 353 | } else { 354 | next_peer += 1; 355 | } 356 | 357 | let peer_config = state.cfg.peers.get(&peer_name).unwrap().clone(); 358 | let my_state = state.clone(); 359 | let my_peer_name = peer_name.clone(); 360 | let my_resolver = resolver.clone(); 361 | let my_socket = socket.clone(); 362 | 363 | tokio::spawn(async move { 364 | time_client::send_time_request( 365 | &my_resolver, 366 | my_socket.as_ref(), 367 | &my_peer_name, 368 | &peer_config, 369 | &my_state.core_state, 370 | &my_state.secret_store, 371 | ) 372 | .await 373 | .unwrap_or_else(|e| { 374 | log!(e.level(), "On tick for peer '{}': {}", my_peer_name, e); 375 | }) 376 | }); 377 | } 378 | } 379 | 380 | async fn single_node_tick_task(state: Arc) -> io::Result<()> { 381 | let tick_period = time::Duration::from_secs(1); 382 | let mut interval = time::interval(tick_period); 383 | 384 | loop { 385 | interval.tick().await; 386 | state.core_state.write().unwrap().on_single_node_tick()?; 387 | } 388 | } 389 | 390 | ///Task for handling incoming responses to our time queries 391 | async fn time_response_task( 392 | state: Arc, 393 | receiver: Arc, 394 | ) -> io::Result<()> { 395 | time_client::time_response_listener(receiver.as_ref(), &state.core_state, &state.secret_store) 396 | .await 397 | } 398 | 399 | ///Task for periodically updating our real-time offset 400 | async fn update_real_offset_task(state: Arc) -> io::Result<()> { 401 | let tick_period = time::Duration::new(64, 0); 402 | let mut interval = time::interval(tick_period); 403 | loop { 404 | interval.tick().await; 405 | debug!("Updating real offset"); 406 | state 407 | .core_state 408 | .write() 409 | .unwrap() 410 | .update_real_offset() 411 | .map_err(io::Error::from)?; 412 | } 413 | } 414 | 415 | ///SIGINT/SIGTERM handler 416 | async fn shutdown_signal_task( 417 | state: Arc, 418 | sig_kind: SignalKind, 419 | signame: &'static str, 420 | ) -> io::Result<()> { 421 | let mut signal_stream = signal(sig_kind)?; 422 | 423 | signal_stream.recv().await; 424 | info!("Received {}, shutting down", signame); 425 | state.shutdown_sender.send(Ok(())).unwrap_or(()); 426 | Ok(()) 427 | } 428 | 429 | ///SIGHUP handler 430 | async fn logrotate_signal_task( 431 | log_handle: LogHandle, 432 | state: Arc, 433 | sig_kind: SignalKind, 434 | signame: &'static str, 435 | ) -> io::Result<()> { 436 | let mut signal_stream = signal(sig_kind)?; 437 | 438 | loop { 439 | signal_stream.recv().await; 440 | info!("{} received, reinitializing logging", signame); 441 | reinit_logging( 442 | &state.cfg.logging, 443 | state.cfg.log_format.as_ref().map(|s| s.as_str()), 444 | &log_handle, 445 | )?; 446 | info!("Logging reinitialized"); 447 | } 448 | } 449 | -------------------------------------------------------------------------------- /byztimed/src/peer_name.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Interned strings identifying peers 5 | 6 | use std::cmp::Ordering; 7 | use std::convert::{AsRef, From}; 8 | use std::fmt; 9 | use std::hash::{Hash, Hasher}; 10 | use std::sync::Arc; 11 | 12 | /// Interned string identifying a peer 13 | // 14 | /// A `PeerName` wraps a referece-counted string. `PeerName`'s `Eq` 15 | /// and `Ord` instances are defined using pointer comparison. A 16 | /// `PeerName` will compare equal to another `PeerName` that it was 17 | /// `clone()`d from, but not to one that was constructed from a 18 | /// different call to `PeerName::new`, even if the two are equal 19 | /// as strings. 20 | #[derive(Debug, Clone)] 21 | pub struct PeerName(Arc); 22 | 23 | impl PeerName { 24 | pub fn new(name: String) -> PeerName { 25 | PeerName(Arc::new(name)) 26 | } 27 | } 28 | 29 | impl From> for PeerName { 30 | fn from(other: Arc) -> PeerName { 31 | PeerName(other) 32 | } 33 | } 34 | 35 | impl From for Arc { 36 | fn from(other: PeerName) -> Arc { 37 | other.0 38 | } 39 | } 40 | 41 | impl AsRef for PeerName { 42 | fn as_ref(&self) -> &String { 43 | self.0.as_ref() 44 | } 45 | } 46 | 47 | impl AsRef for PeerName { 48 | fn as_ref(&self) -> &str { 49 | self.0.as_ref().as_ref() 50 | } 51 | } 52 | 53 | impl AsRef<[u8]> for PeerName { 54 | fn as_ref(&self) -> &[u8] { 55 | self.0.as_ref().as_ref() 56 | } 57 | } 58 | 59 | impl PartialEq for PeerName { 60 | fn eq(&self, other: &PeerName) -> bool { 61 | Arc::ptr_eq(&self.0, &other.0) 62 | } 63 | } 64 | 65 | impl Eq for PeerName {} 66 | 67 | impl Ord for PeerName { 68 | fn cmp(&self, other: &PeerName) -> Ordering { 69 | (self.0.as_ref() as *const String).cmp(&(other.0.as_ref() as *const String)) 70 | } 71 | } 72 | 73 | impl PartialOrd for PeerName { 74 | fn partial_cmp(&self, other: &PeerName) -> Option { 75 | Some(self.cmp(other)) 76 | } 77 | } 78 | 79 | impl Hash for PeerName { 80 | fn hash(&self, state: &mut H) { 81 | (self.0.as_ref() as *const String).hash(state) 82 | } 83 | } 84 | 85 | impl fmt::Display for PeerName { 86 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 87 | self.0.as_ref().fmt(f) 88 | } 89 | } 90 | 91 | #[cfg(test)] 92 | mod tests { 93 | use super::*; 94 | #[test] 95 | fn cloned_eq() { 96 | let peer1 = PeerName::new("test".into()); 97 | let peer2 = peer1.clone(); 98 | assert_eq!(peer1, peer2); 99 | } 100 | 101 | #[test] 102 | fn new_neq() { 103 | let peer1 = PeerName::new("test".into()); 104 | let peer2 = PeerName::new("test".into()); 105 | assert_ne!(peer1, peer2); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /byztimed/src/time_client.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Send time requests and process responses 5 | 6 | use crate::aead::*; 7 | use crate::config::PeerConfig; 8 | use crate::core; 9 | use crate::ntske; 10 | use crate::peer_name::PeerName; 11 | use crate::store::{SecretStore, StoreError}; 12 | use crate::wire; 13 | use bytes::Buf; 14 | use byztime::{Era, Timestamp}; 15 | use log::{debug, log, trace}; 16 | use prost::Message; 17 | use rand::RngCore; 18 | use std::convert::TryFrom; 19 | use std::fmt; 20 | use std::net::SocketAddr; 21 | use std::sync::RwLock; 22 | use tokio::io; 23 | use tokio::net; 24 | 25 | ///Enumeration of errors that can occur when sending a request 26 | #[derive(Debug)] 27 | pub enum RequestError { 28 | ResolveError(trust_dns_resolver::error::ResolveError), 29 | CookieLookupError(StoreError), 30 | C2SLookupError(StoreError), 31 | TcpError(io::Error), 32 | TlsHandshakeError(io::Error), 33 | TlsSessionError(io::Error), 34 | NtskeProblem(ntske::NtskeProblem), 35 | NtskeNoCookies, 36 | CredentialSaveError(StoreError), 37 | CoreTickError(io::Error), 38 | CoreDepartureError(io::Error), 39 | UdpSocketError(io::Error), 40 | } 41 | 42 | impl RequestError { 43 | ///Level that this error should be logged at 44 | pub fn level(&self) -> log::Level { 45 | use log::Level::*; 46 | use RequestError::*; 47 | match self { 48 | ResolveError(_) => Warn, 49 | CookieLookupError(_) => Error, 50 | C2SLookupError(_) => Error, 51 | TcpError(_) => Warn, 52 | TlsHandshakeError(_) => Warn, 53 | TlsSessionError(_) => Warn, 54 | NtskeProblem(_) => Warn, 55 | NtskeNoCookies => Warn, 56 | CredentialSaveError(_) => Error, 57 | CoreTickError(_) => Error, 58 | CoreDepartureError(_) => Error, 59 | UdpSocketError(_) => Error, 60 | } 61 | } 62 | } 63 | 64 | impl fmt::Display for RequestError { 65 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 66 | use RequestError::*; 67 | match self { 68 | ResolveError(e) => write!(f, "Resolving DNS: {}", e), 69 | CookieLookupError(e) => write!(f, "Looking up cookie from store: {}", e), 70 | C2SLookupError(e) => write!(f, "Looking up C2S key from store: {}", e), 71 | TcpError(e) => write!(f, "Establishing TCP connection for NTS-KE: {}", e), 72 | TlsHandshakeError(e) => write!(f, "During TLS handshake: {}", e), 73 | TlsSessionError(e) => write!(f, "In TLS session: {}", e), 74 | NtskeProblem(e) => write!(f, "In NTS-KE response: {:?}", e), 75 | NtskeNoCookies => write!(f, "NTS-KE succeeded but no cookies were returned"), 76 | CredentialSaveError(e) => write!(f, "Saving credentials to store: {}", e), 77 | CoreTickError(e) => write!(f, "Handling tick in core: {}", e), 78 | CoreDepartureError(e) => write!(f, "Updating origin timestamp: {}", e), 79 | UdpSocketError(e) => write!(f, "Sending on UDP socket: {}", e), 80 | } 81 | } 82 | } 83 | 84 | impl std::error::Error for RequestError {} 85 | 86 | pub fn serialize_time_request( 87 | out: &mut Vec, 88 | unique_id: &core::UniqueId, 89 | c2s: &Aes128SivKey, 90 | cookie: Vec, 91 | cookies_requested: usize, 92 | ) { 93 | let plaintext = wire::Request { 94 | num_cookies: cookies_requested as u32, 95 | }; 96 | 97 | trace!( 98 | "Encoding plaintext for time request {:x?}: {:?}", 99 | unique_id, 100 | plaintext 101 | ); 102 | 103 | let mut plaintext_serialized = Vec::with_capacity(plaintext.encoded_len()); 104 | plaintext 105 | .encode(&mut plaintext_serialized) 106 | .expect("Error encoding plaintext for time request"); 107 | 108 | let cookie_len = cookie.len(); 109 | 110 | let mut nonce = Aes128SivNonce::default(); 111 | rand::thread_rng().fill_bytes(nonce.as_mut_slice()); 112 | 113 | let ad = wire::RequestAd { 114 | unique_id: unique_id.to_vec(), 115 | cookie, 116 | }; 117 | 118 | trace!( 119 | "Encoding associated data for time request {:x?}: {:?}", 120 | unique_id, 121 | ad 122 | ); 123 | 124 | let mut ad_serialized = Vec::with_capacity(ad.encoded_len()); 125 | ad.encode(&mut ad_serialized) 126 | .expect("Error encoding associated data for time request"); 127 | 128 | let aead_c2s = Aes128SivAead::new(c2s); 129 | 130 | let ciphertext = aead_c2s 131 | .encrypt( 132 | &nonce, 133 | Payload { 134 | aad: &ad_serialized, 135 | msg: &plaintext_serialized, 136 | }, 137 | ) 138 | .expect("Error encrypting time request"); 139 | 140 | let padding = vec![0; cookie_len * cookies_requested.saturating_sub(1) + wire::EXTRA_PADDING]; 141 | 142 | let packet = wire::Packet { 143 | msg: Some(wire::packet::Msg::Request(wire::RequestEnvelope { 144 | nonce: nonce.to_vec(), 145 | ad: ad_serialized, 146 | ciphertext, 147 | padding, 148 | })), 149 | }; 150 | 151 | trace!( 152 | "Encoding packet for time request {:x?}: {:?}", 153 | unique_id, 154 | packet 155 | ); 156 | 157 | out.reserve(packet.encoded_len()); 158 | packet 159 | .encode(out) 160 | .expect("Error encoding packet for time request"); 161 | } 162 | 163 | ///Send a time request 164 | /// 165 | ///Resolve `peer_config.host` using `resolver`. Take keys and cookies 166 | /// from `secret_store`. If they aren't there, run NTS-KE to obtain 167 | /// them. Send a time request over `socket_mutex` and record in 168 | /// `core_state` that it's in flight. 169 | pub async fn send_time_request( 170 | resolver: &trust_dns_resolver::TokioAsyncResolver, 171 | socket: &tokio::net::UdpSocket, 172 | peer_name: &PeerName, 173 | peer_config: &PeerConfig, 174 | core_state: &RwLock, 175 | secret_store: &SecretStore, 176 | ) -> Result<(), RequestError> { 177 | let ip_addr = resolver 178 | .lookup_ip(peer_config.host.as_str()) 179 | .await 180 | .map_err(RequestError::ResolveError)? 181 | .into_iter() 182 | .next() 183 | .expect("Got empty iterator from DNS lookup"); 184 | 185 | debug!( 186 | "Resolved DNS for peer '{}': {} -> {}", 187 | peer_name, peer_config.host, ip_addr 188 | ); 189 | 190 | let peer_addr = SocketAddr::new(ip_addr, peer_config.port); 191 | 192 | //These two secret_store calls each use separate transactions, so 193 | // it's possible to get a cookie that doesn't correspond to to the 194 | // c2s key if the results of an NTS-KE exchange get committed in 195 | // between the two calls. This can be elicited in testing by 196 | // setting an extremely short polling interval. Preventing this 197 | // would be easy — just add a method to SecretStore that fetches 198 | // both the C2S key and the cookie in a single transaction — but 199 | // it wouldn't actually improve anything because the new S2C key 200 | // will still get committed right afterward and we won't be able 201 | // to decrypt the server's response. The problem is harmless in 202 | // any case because we'll just recover on the next tick. Worst 203 | // that happens is that NTS-KE gets run twice rather than just 204 | // once. 205 | let (c2s, cookie, cookies_left) = match ( 206 | secret_store 207 | .get_c2s_key(peer_name) 208 | .map_err(RequestError::C2SLookupError)?, 209 | secret_store 210 | .take_cookie(peer_name) 211 | .map_err(RequestError::CookieLookupError)?, 212 | ) { 213 | (Some(c2s), (Some(cookie), cookies_left)) => (c2s, cookie, cookies_left), 214 | _ => { 215 | let tcp_stream = net::TcpStream::connect(&peer_addr) 216 | .await 217 | .map_err(RequestError::TcpError)?; 218 | debug!( 219 | "TCP connection established for NTS-KE with peer '{}'", 220 | peer_name 221 | ); 222 | let mut tls_stream = peer_config 223 | .tls_connector 224 | .connect(peer_config.cert_name.as_ref(), tcp_stream) 225 | .await 226 | .map_err(RequestError::TlsHandshakeError)?; 227 | debug!("TLS handshake completed with peer '{}'", peer_name); 228 | let mut ntske_output = ntske::request_ntske(&mut tls_stream) 229 | .await 230 | .map_err(RequestError::TlsSessionError)? 231 | .map_err(RequestError::NtskeProblem)?; 232 | debug!("Successful NTS-KE with peer '{}'", peer_name); 233 | let my_cookie = ntske_output 234 | .cookies 235 | .pop() 236 | .ok_or(RequestError::NtskeNoCookies)?; 237 | let cookies_left = ntske_output.cookies.len(); 238 | secret_store 239 | .set_credentials( 240 | peer_name, 241 | &ntske_output.c2s, 242 | &ntske_output.s2c, 243 | ntske_output.cookies.as_slice(), 244 | ) 245 | .map_err(RequestError::CredentialSaveError)?; 246 | debug!( 247 | "Stored session keys and {} cookies for peer '{}'", 248 | cookies_left, peer_name 249 | ); 250 | (ntske_output.c2s, my_cookie, cookies_left) 251 | } 252 | }; 253 | 254 | let query = core_state 255 | .write() 256 | .unwrap() 257 | .on_tick(peer_name, &mut rand::thread_rng()) 258 | .map_err(RequestError::CoreTickError)?; 259 | let cookies_requested = if cookies_left > 7 { 260 | 1 261 | } else { 262 | 8 - cookies_left 263 | }; 264 | 265 | let mut send_buf = Vec::new(); 266 | serialize_time_request( 267 | &mut send_buf, 268 | &query.unique_id, 269 | &c2s, 270 | cookie, 271 | cookies_requested, 272 | ); 273 | 274 | core_state 275 | .write() 276 | .unwrap() 277 | .on_departure(peer_name) 278 | .map_err(RequestError::CoreDepartureError)?; 279 | 280 | debug!("Sending time request to peer '{}'", peer_name); 281 | 282 | socket 283 | .send_to(send_buf.as_slice(), &peer_addr) 284 | .await 285 | .map_err(RequestError::UdpSocketError)?; 286 | Ok(()) 287 | } 288 | 289 | ///Enumeration of errors that can occur when processing a time response 290 | #[derive(Debug)] 291 | pub enum ResponseError { 292 | DestTimeError(io::Error), 293 | PacketDecodingError(prost::DecodeError), 294 | NotAResponse, 295 | AdDecodingError(prost::DecodeError), 296 | WrongNonceLength, 297 | WrongUniqueIdLength, 298 | UnrecognizedErrorResponse, 299 | NonMatchingUniqueId, 300 | S2CLookupError(PeerName, StoreError), 301 | S2CNotFound(PeerName), 302 | DecryptionFailure(PeerName), 303 | PlaintextDecodingError(PeerName, prost::DecodeError), 304 | WrongEraLength(PeerName), 305 | NoLocalClock(PeerName), 306 | NoGlobalOffset(PeerName), 307 | CoreError(PeerName, io::Error), 308 | StoreCookiesError(PeerName, StoreError), 309 | StoreClearError(PeerName, StoreError), 310 | } 311 | 312 | impl ResponseError { 313 | fn level(&self) -> log::Level { 314 | use log::Level::*; 315 | use ResponseError::*; 316 | match self { 317 | DestTimeError(_) => Error, 318 | PacketDecodingError(_) => Debug, 319 | NotAResponse => Debug, 320 | AdDecodingError(_) => Debug, 321 | WrongNonceLength => Debug, 322 | WrongUniqueIdLength => Debug, 323 | UnrecognizedErrorResponse => Debug, 324 | NonMatchingUniqueId => Debug, 325 | S2CLookupError(_, _) => Error, 326 | S2CNotFound(_) => Warn, 327 | DecryptionFailure(_) => Warn, 328 | PlaintextDecodingError(_, _) => Warn, 329 | WrongEraLength(_) => Warn, 330 | NoLocalClock(_) => Warn, 331 | NoGlobalOffset(_) => Warn, 332 | CoreError(_, _) => Error, 333 | StoreCookiesError(_, _) => Error, 334 | StoreClearError(_, _) => Error, 335 | } 336 | } 337 | } 338 | 339 | impl fmt::Display for ResponseError { 340 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 341 | use ResponseError::*; 342 | match self { 343 | DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e), 344 | PacketDecodingError(e) => write!(f, "Decoding packet: {}", e), 345 | NotAResponse => write!(f, "Not a response packet"), 346 | AdDecodingError(e) => write!(f, "Decoding associated data: {}", e), 347 | WrongNonceLength => write!(f, "Wrong nonce length"), 348 | WrongUniqueIdLength => write!(f, "Wrong unique-ID length"), 349 | UnrecognizedErrorResponse => write!(f, "Unrecognized error response"), 350 | NonMatchingUniqueId => { 351 | write!(f, "Unique-ID does not correspond to any in-flight request") 352 | } 353 | S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e), 354 | S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer), 355 | DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer), 356 | PlaintextDecodingError(peer, e) => { 357 | write!(f, "Decoding plaintext send by peer '{}': {}", peer, e) 358 | } 359 | WrongEraLength(peer) => write!( 360 | f, 361 | "Response from peer '{}' has an era of the wrong length", 362 | peer 363 | ), 364 | NoLocalClock(peer) => write!( 365 | f, 366 | "Response from peer '{}' is missing its local-clock field", 367 | peer 368 | ), 369 | NoGlobalOffset(peer) => write!( 370 | f, 371 | "Response from peer '{}' is missing its global-offset field", 372 | peer 373 | ), 374 | CoreError(peer, e) => write!( 375 | f, 376 | "Updating core state for response from peer '{}': {}", 377 | peer, e 378 | ), 379 | StoreCookiesError(peer, e) => write!( 380 | f, 381 | "Writing new cookies from peer '{}' to secret store: {}", 382 | peer, e 383 | ), 384 | StoreClearError(peer, e) => write!( 385 | f, 386 | "Clearing secret store in response to crypto-NAK from peer '{}': {}", 387 | peer, e 388 | ), 389 | } 390 | } 391 | } 392 | 393 | impl std::error::Error for ResponseError {} 394 | 395 | ///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html) 396 | pub struct ResponseEnvelopeData { 397 | unique_id: core::UniqueId, 398 | nonce: Aes128SivNonce, 399 | ad: Vec, 400 | ciphertext: Vec, 401 | } 402 | 403 | ///Data extracted from a crypto-NAK response 404 | pub struct CryptoNakData { 405 | unique_id: core::UniqueId, 406 | } 407 | 408 | ///Deserialize a time response as far as the envelope, but don't try to decrypt it 409 | pub fn deserialize_response_envelope( 410 | response: Response, 411 | ) -> Result, ResponseError> { 412 | let packet = wire::Packet::decode(response).map_err(ResponseError::PacketDecodingError)?; 413 | trace!("Deserialized time response packet: {:?}", packet); 414 | 415 | match packet.msg { 416 | Some(wire::packet::Msg::Response(envelope)) => { 417 | let ad = wire::ResponseAd::decode(envelope.ad.as_ref()) 418 | .map_err(ResponseError::AdDecodingError)?; 419 | let nonce = Aes128SivNonce::try_clone_from_slice(envelope.nonce.as_slice()) 420 | .map_err(|_| ResponseError::WrongNonceLength)?; 421 | let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice()) 422 | .map_err(|_| ResponseError::WrongUniqueIdLength)?; 423 | Ok(Ok(ResponseEnvelopeData { 424 | unique_id, 425 | nonce, 426 | ad: envelope.ad, 427 | ciphertext: envelope.ciphertext, 428 | })) 429 | } 430 | Some(wire::packet::Msg::Error(error)) => { 431 | let unique_id = core::UniqueId::try_from(error.unique_id.as_slice()) 432 | .map_err(|_| ResponseError::WrongUniqueIdLength)?; 433 | match error.error { 434 | Some(wire::error::Error::CryptoNak(_)) => Ok(Err(CryptoNakData { unique_id })), 435 | _ => Err(ResponseError::UnrecognizedErrorResponse), 436 | } 437 | } 438 | _ => Err(ResponseError::NotAResponse), 439 | } 440 | } 441 | 442 | ///Deserialize the plaintext of a time response, returning cookies and 443 | /// a [`core::Response`](../core/struct.Response.html). 444 | pub fn deserialize_response_plaintext( 445 | peer_name: &PeerName, 446 | unique_id: &core::UniqueId, 447 | plaintext: Plaintext, 448 | ) -> Result<(Vec>, core::Response), ResponseError> { 449 | let response = wire::Response::decode(plaintext) 450 | .map_err(|e| ResponseError::PlaintextDecodingError(peer_name.clone(), e))?; 451 | trace!("Deserialized time response plaintext: {:?}", response); 452 | let era = Era(<[u8; 16]>::try_from(response.era.as_slice()) 453 | .map_err(|_| ResponseError::WrongEraLength(peer_name.clone()))?); 454 | 455 | let global_offset = response 456 | .offset 457 | .ok_or_else(|| ResponseError::NoGlobalOffset(peer_name.clone()))?; 458 | let local_clock = response 459 | .local_clock 460 | .ok_or_else(|| ResponseError::NoLocalClock(peer_name.clone()))?; 461 | Ok(( 462 | response.cookies, 463 | core::Response { 464 | era, 465 | unique_id: *unique_id, 466 | global_offset: Timestamp::new( 467 | global_offset.seconds as i64, 468 | global_offset.nanoseconds as i64, 469 | ), 470 | local_clock: Timestamp::new(local_clock.seconds as i64, local_clock.nanoseconds as i64), 471 | }, 472 | )) 473 | } 474 | 475 | ///Process a time response 476 | /// 477 | ///Deserialize and decrypt the `response` using `secret_store` to look up keys. 478 | /// Pass the response to `core_state`. Add any returned cookies to the store. 479 | pub fn handle_time_response( 480 | response: Response, 481 | core_state: &RwLock, 482 | secret_store: &SecretStore, 483 | ) -> Result<(), ResponseError> { 484 | let dest_time = Timestamp::local_time().map_err(ResponseError::DestTimeError)?; 485 | 486 | match deserialize_response_envelope(response)? { 487 | Ok(envelope) => { 488 | let peer_name = core_state 489 | .read() 490 | .unwrap() 491 | .lookup_peer(&envelope.unique_id) 492 | .ok_or(ResponseError::NonMatchingUniqueId)?; 493 | //It's possible for S2CNotFound to happen when request B 494 | // crosses request A on the wire, and response B is a 495 | // crypto-NAK which causes us to clear our 496 | // credentials. This can readily be elicited in testing 497 | // setting an extremely short polling interval, but should 498 | // never normally happen in production, barring 499 | // adversarial behavior by the network or the peer. If it 500 | // does, it's harmless; we'll log it at WARN level and 501 | // recover on the next tick. 502 | let s2c = secret_store 503 | .get_s2c_key(&peer_name) 504 | .map_err(|e| ResponseError::S2CLookupError(peer_name.clone(), e))? 505 | .ok_or_else(|| ResponseError::S2CNotFound(peer_name.clone()))?; 506 | let aead_s2c = Aes128SivAead::new(&s2c); 507 | let plaintext = aead_s2c 508 | .decrypt( 509 | &envelope.nonce, 510 | Payload { 511 | aad: &envelope.ad, 512 | msg: &envelope.ciphertext, 513 | }, 514 | ) 515 | .map_err(|_| ResponseError::DecryptionFailure(peer_name.clone()))?; 516 | let (cookies, response) = deserialize_response_plaintext( 517 | &peer_name, 518 | &envelope.unique_id, 519 | plaintext.as_ref(), 520 | )?; 521 | core_state 522 | .write() 523 | .unwrap() 524 | .on_response(&response, dest_time) 525 | .map_err(|e| ResponseError::CoreError(peer_name.clone(), e))?; 526 | 527 | secret_store 528 | .give_cookies(&peer_name, cookies) 529 | .map_err(|e| ResponseError::StoreCookiesError(peer_name.clone(), e))?; 530 | debug!( 531 | "Successfully handled time response from peer '{}'", 532 | peer_name 533 | ); 534 | Ok(()) 535 | } 536 | Err(crypto_nak) => { 537 | let peer_name = core_state 538 | .read() 539 | .unwrap() 540 | .lookup_peer(&crypto_nak.unique_id) 541 | .ok_or(ResponseError::NonMatchingUniqueId)?; 542 | debug!("Received crypto-NAK from peer '{}'", peer_name); 543 | secret_store 544 | .clear_peer(&peer_name) 545 | .map_err(|e| ResponseError::StoreClearError(peer_name.clone(), e))?; 546 | Ok(()) 547 | } 548 | } 549 | } 550 | 551 | ///Listen for time response and process them 552 | /// 553 | ///Listen forever on `socket`. Process any responses that come in. If any 554 | /// errors occur, log them and continue. 555 | pub async fn time_response_listener( 556 | socket: &tokio::net::UdpSocket, 557 | core_state: &RwLock, 558 | secret_store: &SecretStore, 559 | ) -> io::Result<()> { 560 | let mut recv_buf = [0; 65535]; 561 | loop { 562 | let (recv_size, peer_addr) = socket.recv_from(&mut recv_buf).await?; 563 | if let Err(e) = handle_time_response(&recv_buf[0..recv_size], core_state, secret_store) { 564 | log!( 565 | e.level(), 566 | "Handling time response from {}: {}", 567 | peer_addr, 568 | e 569 | ); 570 | } 571 | } 572 | } 573 | -------------------------------------------------------------------------------- /byztimed/src/time_server.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | //! Process time requests and send responses 5 | 6 | use crate::aead::*; 7 | use crate::cookie; 8 | use crate::core; 9 | use crate::store::SecretStore; 10 | use crate::wire; 11 | use byztime::{Era, Timestamp}; 12 | use log::{debug, error, trace}; 13 | use prost::Message; 14 | use rand::RngCore; 15 | use std::cmp; 16 | use std::convert::TryFrom; 17 | use std::fmt; 18 | use std::sync::RwLock; 19 | use tokio::io; 20 | use tokio::net; 21 | 22 | ///Information extracted from a deserialized and decrypted time request 23 | #[derive(Debug, Clone)] 24 | pub struct RequestData { 25 | ///The unique-id sent with the request 26 | pub unique_id: core::UniqueId, 27 | ///The number of cookies requested 28 | pub num_cookies: usize, 29 | ///Decypted contents of the cookie sent with the request 30 | pub cookie_data: cookie::CookieData, 31 | } 32 | 33 | ///Information necessary to form a response to a time request 34 | #[derive(Debug, Clone)] 35 | pub struct ResponseData { 36 | ///The unique-id sent with the request 37 | pub unique_id: core::UniqueId, 38 | ///The number of cookies that were requsted 39 | pub num_cookies: usize, 40 | ///Decypted contents of the cookie sent with the request 41 | pub cookie_data: cookie::CookieData, 42 | ///Our clock era 43 | pub era: Era, 44 | ///Value of our local clock 45 | pub local_clock: Timestamp, 46 | ///Our current estimate of (global clock - local clock) 47 | pub global_offset: Timestamp, 48 | ///Master key for encrypting new cookies 49 | pub master_key: Aes128SivKey, 50 | ///Master key ID 51 | pub master_key_id: u32, 52 | } 53 | 54 | ///Enumeration of anything that can be wrong with a request 55 | #[derive(Debug, Clone)] 56 | pub enum RequestError { 57 | PacketDecodingError(prost::DecodeError), 58 | NotARequest, 59 | AdDecodingError(prost::DecodeError), 60 | WrongUniqueIdLength, 61 | NonDecryptableCookie(core::UniqueId), 62 | WrongNonceLength, 63 | NonDecryptableCiphertext(core::UniqueId), 64 | PlaintextDecodingError(prost::DecodeError), 65 | NotEnoughPadding, 66 | } 67 | 68 | impl fmt::Display for RequestError { 69 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 70 | use RequestError::*; 71 | 72 | match self { 73 | PacketDecodingError(e) => write!(f, "Packet decoding error: {}", e), 74 | NotARequest => write!(f, "Not a request"), 75 | AdDecodingError(e) => write!(f, "Associated data decoding error: {}", e), 76 | WrongUniqueIdLength => write!(f, "Wrong unique-ID length"), 77 | NonDecryptableCookie(_) => write!(f, "Non-decryptable cookie"), 78 | WrongNonceLength => write!(f, "Wrong nonce length"), 79 | NonDecryptableCiphertext(_) => write!(f, "Non-decryptable ciphertext"), 80 | PlaintextDecodingError(e) => write!(f, "Plaintext decoding error: {}", e), 81 | NotEnoughPadding => write!(f, "Not enough padding"), 82 | } 83 | } 84 | } 85 | 86 | impl std::error::Error for RequestError {} 87 | 88 | ///Deserialize a time request 89 | /// 90 | /// Fully deserialize and decrypt `request`, using `get_master_key` to 91 | /// look up master keys by ID 92 | pub fn deserialize_request< 93 | Request: bytes::Buf, 94 | GetMasterKey: FnOnce(u32) -> Option, 95 | >( 96 | request: Request, 97 | get_master_key: GetMasterKey, 98 | ) -> Result { 99 | let packet = wire::Packet::decode(request).map_err(RequestError::PacketDecodingError)?; 100 | trace!("Deserialized time request packet: {:?}", packet); 101 | 102 | let envelope = match packet.msg { 103 | Some(wire::packet::Msg::Request(envelope)) => Ok(envelope), 104 | _ => Err(RequestError::NotARequest), 105 | }?; 106 | 107 | let padding_len = envelope.padding.len(); 108 | 109 | let ad = 110 | wire::RequestAd::decode(envelope.ad.as_ref()).map_err(RequestError::AdDecodingError)?; 111 | 112 | trace!("Deserialized time request associated data: {:?}", ad); 113 | 114 | let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice()) 115 | .map_err(|_| RequestError::WrongUniqueIdLength)?; 116 | 117 | let cookie_len = ad.cookie.len(); 118 | 119 | let cookie_data = cookie::open_cookie(&ad.cookie, get_master_key) 120 | .ok_or(RequestError::NonDecryptableCookie(unique_id))?; 121 | 122 | let aead_c2s = Aes128SivAead::new(&cookie_data.c2s); 123 | 124 | let nonce = Aes128SivNonce::try_from_slice(&envelope.nonce) 125 | .map_err(|_| RequestError::WrongNonceLength)?; 126 | 127 | let plaintext_serialized = aead_c2s 128 | .decrypt( 129 | nonce, 130 | Payload { 131 | aad: &envelope.ad, 132 | msg: &envelope.ciphertext, 133 | }, 134 | ) 135 | .map_err(|_| RequestError::NonDecryptableCiphertext(unique_id))?; 136 | 137 | let plaintext = wire::Request::decode(plaintext_serialized.as_ref()) 138 | .map_err(RequestError::PlaintextDecodingError)?; 139 | 140 | trace!("Deserialized time request plaintext: {:?}", plaintext); 141 | 142 | let num_cookies = cmp::min(plaintext.num_cookies, 8) as usize; 143 | 144 | if padding_len < cookie_len.saturating_mul(num_cookies.saturating_sub(1)) + wire::EXTRA_PADDING 145 | { 146 | return Err(RequestError::NotEnoughPadding); 147 | } 148 | 149 | Ok(RequestData { 150 | unique_id, 151 | num_cookies, 152 | cookie_data, 153 | }) 154 | } 155 | 156 | ///Construct a crypto-NAK error response 157 | pub fn serialize_crypto_nak(out: &mut Vec, unique_id: &core::UniqueId) { 158 | let packet = wire::Packet { 159 | msg: Some(wire::packet::Msg::Error(wire::Error { 160 | unique_id: unique_id.to_vec(), 161 | error: Some(wire::error::Error::CryptoNak(wire::CryptoNak {})), 162 | })), 163 | }; 164 | out.reserve(packet.encoded_len()); 165 | packet.encode(out).expect("Error encoding crypto-NAK"); 166 | } 167 | 168 | ///Construct a time response 169 | pub fn serialize_response(out: &mut Vec, response_data: &ResponseData) { 170 | let mut cookies = Vec::with_capacity(response_data.num_cookies); 171 | for _ in 0..response_data.num_cookies { 172 | let cookie = cookie::seal_cookie( 173 | &response_data.cookie_data, 174 | &response_data.master_key, 175 | response_data.master_key_id, 176 | &mut rand::thread_rng(), 177 | ); 178 | cookies.push(cookie); 179 | } 180 | 181 | let plaintext = wire::Response { 182 | era: response_data.era.0.to_vec(), 183 | local_clock: Some(wire::Timestamp { 184 | seconds: response_data.local_clock.seconds(), 185 | nanoseconds: response_data.local_clock.nanoseconds() as u32, 186 | }), 187 | offset: Some(wire::Timestamp { 188 | seconds: response_data.global_offset.seconds(), 189 | nanoseconds: response_data.global_offset.nanoseconds() as u32, 190 | }), 191 | cookies, 192 | }; 193 | 194 | trace!("Serializing time response plaintext: {:?}", plaintext); 195 | 196 | let mut plaintext_serialized = Vec::with_capacity(plaintext.encoded_len()); 197 | plaintext 198 | .encode(&mut plaintext_serialized) 199 | .expect("Error encoding plaintext in time response"); 200 | let ad = wire::ResponseAd { 201 | unique_id: response_data.unique_id.to_vec(), 202 | }; 203 | 204 | let mut nonce = Aes128SivNonce::default(); 205 | rand::thread_rng().fill_bytes(nonce.as_mut_slice()); 206 | 207 | trace!("Serializing time response associated data: {:?}", ad); 208 | 209 | let mut ad_serialized = Vec::with_capacity(ad.encoded_len()); 210 | ad.encode(&mut ad_serialized) 211 | .expect("Error encoding associated data in time response"); 212 | 213 | let aead_s2c = Aes128SivAead::new(&response_data.cookie_data.s2c); 214 | let ciphertext = aead_s2c 215 | .encrypt( 216 | &nonce, 217 | Payload { 218 | aad: &ad_serialized, 219 | msg: &plaintext_serialized, 220 | }, 221 | ) 222 | .expect("Failed to encrypt time response"); 223 | 224 | let packet = wire::Packet { 225 | msg: Some(wire::packet::Msg::Response(wire::ResponseEnvelope { 226 | ad: ad_serialized, 227 | nonce: nonce.to_vec(), 228 | ciphertext, 229 | })), 230 | }; 231 | 232 | trace!("Serializing time response packet: {:?}", packet); 233 | 234 | out.reserve(packet.encoded_len()); 235 | packet 236 | .encode(out) 237 | .expect("Error encoding packet in time response"); 238 | } 239 | 240 | ///Costruct a response to a time request 241 | /// 242 | ///Parses the request in `recv_buf` and places an appropriate response in `send_buf`. 243 | /// Returns as follows: 244 | /// * `Ok(Ok(()))`: We're replying normally 245 | /// * `(Ok(Err(e, true)))`: There was a problem with the request we should send back an error response 246 | /// * `(Ok(Err(e, false)))`: The was a problem with the request and it's too malformed to reply to 247 | /// * `Err(e)`: We hit an internal error querying core_state 248 | pub fn respond_to_time_request( 249 | recv_buf: Request, 250 | send_buf: &mut Vec, 251 | core_state: &RwLock, 252 | secret_store: &SecretStore, 253 | ) -> io::Result> { 254 | let get_master_key = |key_id| secret_store.get_cached_master_key(key_id); 255 | 256 | match deserialize_request(recv_buf, get_master_key) { 257 | Ok(request_data) => { 258 | let (master_key_id, master_key) = secret_store.get_cached_current_master_key(); 259 | let core_response = core_state 260 | .read() 261 | .unwrap() 262 | .on_query(&core::Query { 263 | unique_id: request_data.unique_id, 264 | }) 265 | .map_err(io::Error::from)?; 266 | let response_data = ResponseData { 267 | unique_id: core_response.unique_id, 268 | num_cookies: request_data.num_cookies, 269 | cookie_data: request_data.cookie_data, 270 | era: core_response.era, 271 | local_clock: core_response.local_clock, 272 | global_offset: core_response.global_offset, 273 | master_key, 274 | master_key_id, 275 | }; 276 | send_buf.clear(); 277 | serialize_response(send_buf, &response_data); 278 | Ok(Ok(())) 279 | } 280 | Err(request_problem) => { 281 | let maybe_unique_id = match request_problem { 282 | RequestError::NonDecryptableCookie(unique_id) => Some(unique_id), 283 | RequestError::NonDecryptableCiphertext(unique_id) => Some(unique_id), 284 | _ => None, 285 | }; 286 | 287 | if let Some(ref unique_id) = maybe_unique_id { 288 | send_buf.clear(); 289 | serialize_crypto_nak(send_buf, unique_id); 290 | Ok(Err((request_problem, true))) 291 | } else { 292 | Ok(Err((request_problem, false))) 293 | } 294 | } 295 | } 296 | } 297 | 298 | ///Serve time 299 | /// 300 | /// Listen forever on `socket` and reply to any time requests received. 301 | pub async fn serve_time( 302 | socket: &mut net::UdpSocket, 303 | core_state: &RwLock, 304 | secret_store: &SecretStore, 305 | ) -> io::Result<()> { 306 | let mut recv_buf = [0; 65535]; 307 | let mut send_buf = Vec::with_capacity(65535); 308 | 309 | loop { 310 | let (recv_size, peer_addr) = socket.recv_from(&mut recv_buf).await?; 311 | debug!( 312 | "Time server got packet of length {} from {}", 313 | recv_size, peer_addr 314 | ); 315 | 316 | match respond_to_time_request( 317 | &recv_buf[0..recv_size], 318 | &mut send_buf, 319 | core_state, 320 | secret_store, 321 | ) { 322 | Ok(Ok(())) => { 323 | debug!( 324 | "Sending time response of length {} to {}", 325 | send_buf.len(), 326 | peer_addr 327 | ); 328 | socket.send_to(send_buf.as_slice(), peer_addr).await?; 329 | } 330 | 331 | Ok(Err((problem, should_reply))) => { 332 | debug!("Error in time request from {}: {}", peer_addr, problem); 333 | if should_reply { 334 | socket.send_to(send_buf.as_slice(), peer_addr).await?; 335 | } 336 | } 337 | 338 | Err(e) => error!( 339 | "From CoreState::on_query(), responding to time request from {}: {}", 340 | peer_addr, e 341 | ), 342 | } 343 | } 344 | } 345 | -------------------------------------------------------------------------------- /byztimed/src/time_test.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::aead::*; 5 | use crate::config; 6 | use crate::cookie; 7 | use crate::core; 8 | use crate::peer_name::PeerName; 9 | use crate::store; 10 | use crate::time_client; 11 | use crate::time_server; 12 | 13 | use std::collections; 14 | use std::net; 15 | use std::path; 16 | use std::sync::{Arc, RwLock}; 17 | use tokio_rustls::rustls; 18 | use tokio_rustls::webpki::DNSNameRef; 19 | 20 | #[test] 21 | fn round_trip() { 22 | /* Set up a fake server */ 23 | let server_timedata_path = tempfile::NamedTempFile::new().unwrap(); 24 | let server_store_path = tempfile::tempdir().unwrap(); 25 | let server_config = config::Config { 26 | timedata: path::PathBuf::from(server_timedata_path.path()), 27 | secret_store: path::PathBuf::from(server_store_path.path()), 28 | logging: vec![], 29 | ro_mode: false, 30 | bind_host: net::IpAddr::V6(net::Ipv6Addr::UNSPECIFIED), 31 | bind_port: 0, 32 | poll_interval: 8.0, 33 | drift_ppb: 250_000, 34 | tls_acceptor: tokio_rustls::TlsAcceptor::from(Arc::new(rustls::ServerConfig::new( 35 | rustls::NoClientAuth::new(), 36 | ))), 37 | log_format: None, 38 | peers: collections::HashMap::new(), 39 | }; 40 | let server_core_state_lock = RwLock::new(core::CoreState::initialize(&server_config).unwrap()); 41 | let server_secret_store = store::SecretStore::new(&server_config.secret_store).unwrap(); 42 | 43 | /* ...and a fake client */ 44 | let client_timedata_path = tempfile::NamedTempFile::new().unwrap(); 45 | let client_store_path = tempfile::tempdir().unwrap(); 46 | let peer_name = PeerName::new("server".into()); 47 | let mut client_peers = collections::HashMap::new(); 48 | client_peers.insert( 49 | peer_name.clone(), 50 | Arc::new(config::PeerConfig { 51 | host: "".into(), 52 | port: 0, 53 | dist: 0, 54 | cert_name: DNSNameRef::try_from_ascii_str("bogus.invalid") 55 | .unwrap() 56 | .to_owned(), 57 | tls_connector: tokio_rustls::TlsConnector::from(Arc::new(rustls::ClientConfig::new())), 58 | }), 59 | ); 60 | 61 | let client_config = config::Config { 62 | timedata: path::PathBuf::from(client_timedata_path.path()), 63 | secret_store: path::PathBuf::from(client_store_path.path()), 64 | logging: vec![], 65 | ro_mode: true, 66 | bind_host: net::IpAddr::V6(net::Ipv6Addr::UNSPECIFIED), 67 | bind_port: 0, 68 | poll_interval: 8.0, 69 | drift_ppb: 250_000, 70 | tls_acceptor: tokio_rustls::TlsAcceptor::from(Arc::new(rustls::ServerConfig::new( 71 | rustls::NoClientAuth::new(), 72 | ))), 73 | log_format: None, 74 | peers: client_peers, 75 | }; 76 | let client_core_state_lock = RwLock::new(core::CoreState::initialize(&client_config).unwrap()); 77 | let client_secret_store = store::SecretStore::new(&client_config.secret_store).unwrap(); 78 | 79 | /* Create some credentials and populate the client's secret store */ 80 | let (master_key_id, master_key) = server_secret_store.get_cached_current_master_key(); 81 | let mut rng = rand::thread_rng(); 82 | let c2s = keygen(&mut rng); 83 | let s2c = keygen(&mut rng); 84 | let cookie = cookie::seal_cookie( 85 | &cookie::CookieData { c2s, s2c }, 86 | &master_key, 87 | master_key_id, 88 | &mut rng, 89 | ); 90 | client_secret_store 91 | .set_credentials(&peer_name, &c2s, &s2c, &[cookie.clone()]) 92 | .unwrap(); 93 | 94 | /* Make a time request */ 95 | let mut request_buf = Vec::new(); 96 | let query = client_core_state_lock 97 | .write() 98 | .unwrap() 99 | .on_tick(&peer_name, &mut rng) 100 | .unwrap(); 101 | time_client::serialize_time_request(&mut request_buf, &query.unique_id, &c2s, cookie, 1); 102 | 103 | /* Serve a response */ 104 | let mut response_buf = Vec::with_capacity(65535); 105 | time_server::respond_to_time_request( 106 | request_buf.as_ref(), 107 | &mut response_buf, 108 | &server_core_state_lock, 109 | &server_secret_store, 110 | ) 111 | .unwrap() 112 | .unwrap(); 113 | 114 | /* Interpret the response */ 115 | time_client::handle_time_response( 116 | response_buf.as_ref(), 117 | &client_core_state_lock, 118 | &client_secret_store, 119 | ) 120 | .unwrap(); 121 | } 122 | -------------------------------------------------------------------------------- /byztimed/src/wire.proto: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | syntax = "proto3"; 5 | 6 | package byztimed.wire; 7 | 8 | /* Top-level message used for all time packets */ 9 | message Packet { 10 | /* Enumeration of message types */ 11 | oneof msg { 12 | RequestEnvelope request = 1; /* A request packet */ 13 | ResponseEnvelope response = 2; /* A non-error response packet */ 14 | Error error = 3; /* An error response packet */ 15 | }; 16 | }; 17 | 18 | /* Encrypted envelope for a time request */ 19 | message RequestEnvelope { 20 | bytes ad = 1; /* Serialized RequestAD */ 21 | bytes nonce = 2; 22 | bytes ciphertext = 3; /* Encrypted serialized Request */ 23 | bytes padding = 4; 24 | }; 25 | 26 | /* Associated data for a time query */ 27 | message RequestAD { 28 | bytes unique_id = 1; /* 16-byte string uniquely identifying this request */ 29 | bytes cookie = 2; /* An NTS cookie. This a serialized Cookie message, but the client should consider it opaque. */ 30 | }; 31 | 32 | /* Plaintext of a time request */ 33 | message Request { 34 | uint32 num_cookies = 1; /* Number of cookies the client is asking the server to send back */ 35 | }; 36 | 37 | /* Encrypted envelope for a response to a time query */ 38 | message ResponseEnvelope { 39 | bytes ad = 1; /* Serialized ResponseAD */ 40 | bytes nonce = 2; /* Nonce for AEAD */ 41 | bytes ciphertext = 3; /* Encrypted serialized Response */ 42 | }; 43 | 44 | /* Associated data for a response to a time query */ 45 | message ResponseAD { 46 | bytes unique_id = 1; /* 16-byte unique identifier echoed from the request we're responding to */ 47 | }; 48 | 49 | /* Plaintext of a response to a time query */ 50 | message Response { 51 | bytes era = 1; /* This server's current era */ 52 | Timestamp local_clock = 2; /* This server's local clock */ 53 | Timestamp offset = 3; /* Estimate of (global clock - local clock) */ 54 | repeated bytes cookies = 4; /* New NTS cookies for the receiver to send with future queries */ 55 | }; 56 | 57 | /* A count of seconds and nanoseconds */ 58 | message Timestamp { 59 | int64 seconds = 1; 60 | fixed32 nanoseconds = 2; 61 | }; 62 | 63 | /* An error response */ 64 | message Error { 65 | bytes unique_id = 1; /* 16-byte unique identifier echoed from the erroneous request */ 66 | /* Enumeration of error types */ 67 | oneof error { 68 | CryptoNak crypto_nak = 2; /* We couldn't decrypt the sender's request */ 69 | }; 70 | }; 71 | 72 | /* Empty message which could carry details of a crypto-NAK error. */ 73 | message CryptoNak { 74 | }; 75 | 76 | /* This message gets serialized to form a cookie */ 77 | message Cookie { 78 | fixed32 key_id = 1; /* ID of the master key used to encrypt this cookie */ 79 | bytes nonce = 2; /* Nonce used to encrypt this cookie */ 80 | bytes ciphertext = 3; /* Encrypted serialized UnwrappedCookie */ 81 | }; 82 | 83 | /* Plaintext contents of a cookie */ 84 | message UnwrappedCookie { 85 | uint32 alg_id = 1; /* The negotiated AEAD algorithm for this session */ 86 | bytes c2s = 2; /* The C2S key for this session */ 87 | bytes s2c = 3; /* The S2C key for this session */ 88 | }; -------------------------------------------------------------------------------- /byztimed/tests/1node.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | extern crate byztimed; 5 | extern crate nix; 6 | extern crate tempfile; 7 | 8 | #[macro_use] 9 | extern crate lazy_static; 10 | 11 | use std::env; 12 | use std::fs; 13 | use std::io; 14 | use std::path; 15 | use std::process; 16 | use std::thread; 17 | use std::time; 18 | 19 | use byztime::Context; 20 | 21 | mod common; 22 | use common::*; 23 | 24 | const GORGIAS_CONFIG: &'static str = r#"{ 25 | "timedata": "@tempdir@/gorgias.timedata", 26 | "secret_store": "@tempdir@/gorgias.store", 27 | "bind_port": @gorgias_port@, 28 | "key": "@certdir@/gorgias.key", 29 | "cert": "@certdir@/gorgias.crt", 30 | "authorities": "@certdir@/trent.crt", 31 | "logging": { "@tempdir@/gorgias.log": "debug" }, 32 | "peers": {} 33 | }"#; 34 | 35 | ///Greatest allowed (max - min) in any peer for a successful test, in nanoseconds 36 | const SPAN_LIMIT: i64 = 25_000_000; 37 | 38 | #[test] 39 | fn one_node() { 40 | match std::env::var_os("BYZTIMED_SAVE_INTEGRATION_TEST_OUTPUT") { 41 | None => { 42 | let temp_dir = tempfile::tempdir().unwrap(); 43 | run_one_node(temp_dir.path()) 44 | } 45 | Some(dir) => run_one_node(dir.as_ref()), 46 | } 47 | } 48 | 49 | fn run_one_node(temp_dir: &path::Path) { 50 | let testbin_path = env::current_exe().unwrap(); 51 | let testbin_dir = testbin_path.parent().unwrap(); 52 | let bin_dir = testbin_dir.parent().unwrap(); 53 | let mut byztime_bin_path = bin_dir.to_owned(); 54 | byztime_bin_path.push("byztimed"); 55 | byztime_bin_path.set_extension(env::consts::EXE_EXTENSION); 56 | assert!(byztime_bin_path.exists()); 57 | 58 | /* Find the certificate directory */ 59 | let mut cert_dir = path::PathBuf::new(); 60 | cert_dir.push(env!("CARGO_MANIFEST_DIR")); 61 | cert_dir.push("tests"); 62 | cert_dir.push("test_certs"); 63 | assert!(cert_dir.exists()); 64 | 65 | let our_port = find_ports(1); 66 | 67 | let config_contents = GORGIAS_CONFIG 68 | .replace("/", &path::MAIN_SEPARATOR.to_string()) 69 | .replace("@gorgias_port@", &our_port[0].to_string()) 70 | .replace("@tempdir@", temp_dir.to_str().unwrap()) 71 | .replace("@certdir@", cert_dir.to_str().unwrap()); 72 | 73 | //Write configuration file 74 | let mut config_path = path::PathBuf::new(); 75 | config_path.push(temp_dir); 76 | config_path.push("gorgias.json"); 77 | fs::write(&config_path, &config_contents).unwrap(); 78 | 79 | //Remove any existing timedata file 80 | let mut timedata_path = path::PathBuf::new(); 81 | timedata_path.push(temp_dir); 82 | timedata_path.push("gorgias.timedata"); 83 | if let Err(e) = fs::remove_file(&timedata_path) { 84 | assert!(e.kind() == io::ErrorKind::NotFound); 85 | } 86 | 87 | //Create the store path 88 | let mut store_path = path::PathBuf::new(); 89 | store_path.push(temp_dir); 90 | store_path.push("gorgias.store"); 91 | fs::create_dir(&store_path).unwrap(); 92 | 93 | //Capture stderr 94 | let mut stderr_path = path::PathBuf::new(); 95 | stderr_path.push(temp_dir); 96 | stderr_path.push("gorgias.stderr"); 97 | let stderr = fs::File::create(stderr_path).unwrap(); 98 | 99 | let mut child: ChildWrapper = process::Command::new(&byztime_bin_path) 100 | .arg(&config_path) 101 | .stderr(stderr) 102 | .spawn() 103 | .unwrap() 104 | .into(); 105 | 106 | thread::sleep(time::Duration::from_secs(2)); 107 | 108 | nix::sys::signal::kill( 109 | nix::unistd::Pid::from_raw(child.id() as i32), 110 | nix::sys::signal::Signal::SIGTERM, 111 | ) 112 | .unwrap(); 113 | 114 | thread::sleep(time::Duration::from_secs(1)); 115 | let _ = child.kill(); 116 | 117 | let output = child.wait_with_output().unwrap(); 118 | assert!(output.stderr.is_empty()); 119 | if !output.status.success() { 120 | panic!("Output status: {}", output.status) 121 | } 122 | 123 | let mut log_path = path::PathBuf::new(); 124 | log_path.push(temp_dir); 125 | log_path.push("gorgias.log"); 126 | let log_contents = String::from_utf8(fs::read(&log_path).unwrap()).unwrap(); 127 | assert!(log_contents.find("ERROR").is_none()); 128 | 129 | let ctx = byztime::ConsumerContext::open(timedata_path.as_ref()).unwrap(); 130 | let (min, _, max) = ctx.offset().unwrap(); 131 | ctx.close().unwrap(); 132 | 133 | assert!(max - min < byztime::Timestamp::new(0, SPAN_LIMIT)); 134 | } 135 | -------------------------------------------------------------------------------- /byztimed/tests/4node_local.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | extern crate byztimed; 5 | extern crate nix; 6 | extern crate tempfile; 7 | 8 | #[macro_use] 9 | extern crate lazy_static; 10 | 11 | use byztime::Context; 12 | use std::env; 13 | use std::fs; 14 | use std::io; 15 | use std::io::Write; 16 | use std::path; 17 | use std::process; 18 | use std::thread; 19 | use std::time; 20 | 21 | mod common; 22 | use common::*; 23 | 24 | const ALICE_CONFIG: &'static str = r#"{ 25 | "timedata": "@tempdir@/alice.timedata", 26 | "secret_store": "@tempdir@/alice.store", 27 | "bind_port": @alice_port@, 28 | "key": "@certdir@/alice.key", 29 | "cert": "@certdir@/alice.crt", 30 | "authorities": "@certdir@/trent.crt", 31 | "logging": { "@tempdir@/alice.log": "debug" }, 32 | "poll_interval": 0.5, 33 | "peers": { 34 | "bob": { 35 | "host": "127.0.0.1", 36 | "port": @bob_port@, 37 | "cert_name": "bob.test" 38 | }, 39 | "charlie": { 40 | "host": "127.0.0.1", 41 | "port": @charlie_port@, 42 | "cert_name": "charlie.test" 43 | }, 44 | "dave": { 45 | "host": "127.0.0.1", 46 | "port": @dave_port@, 47 | "cert_name": "dave.test" 48 | } 49 | } 50 | }"#; 51 | 52 | const BOB_CONFIG: &'static str = r#"{ 53 | "timedata": "@tempdir@/bob.timedata", 54 | "secret_store": "@tempdir@/bob.store", 55 | "bind_port": @bob_port@, 56 | "key": "@certdir@/bob.key", 57 | "cert": "@certdir@/bob.crt", 58 | "authorities": "@certdir@/trent.crt", 59 | "logging": { "@tempdir@/bob.log": "debug" }, 60 | "poll_interval": 0.5, 61 | "peers": { 62 | "alice": { 63 | "host": "127.0.0.1", 64 | "port": @alice_port@, 65 | "cert_name": "alice.test" 66 | }, 67 | "charlie": { 68 | "host": "127.0.0.1", 69 | "port": @charlie_port@, 70 | "cert_name": "charlie.test" 71 | }, 72 | "dave": { 73 | "host": "127.0.0.1", 74 | "port": @dave_port@, 75 | "cert_name": "dave.test" 76 | } 77 | } 78 | }"#; 79 | 80 | const CHARLIE_CONFIG: &'static str = r#"{ 81 | "timedata": "@tempdir@/charlie.timedata", 82 | "secret_store": "@tempdir@/charlie.store", 83 | "bind_port": @charlie_port@, 84 | "key": "@certdir@/charlie.key", 85 | "cert": "@certdir@/charlie.crt", 86 | "authorities": "@certdir@/trent.crt", 87 | "logging": { "@tempdir@/charlie.log": "debug" }, 88 | "poll_interval": 0.5, 89 | "peers": { 90 | "alice": { 91 | "host": "127.0.0.1", 92 | "port": @alice_port@, 93 | "cert_name": "alice.test" 94 | }, 95 | "bob": { 96 | "host": "127.0.0.1", 97 | "port": @bob_port@, 98 | "cert_name": "bob.test" 99 | }, 100 | "dave": { 101 | "host": "127.0.0.1", 102 | "port": @dave_port@, 103 | "cert_name": "dave.test" 104 | } 105 | } 106 | }"#; 107 | 108 | const DAVE_CONFIG: &'static str = r#"{ 109 | "timedata": "@tempdir@/dave.timedata", 110 | "secret_store": "@tempdir@/dave.store", 111 | "bind_port": @dave_port@, 112 | "key": "@certdir@/dave.key", 113 | "cert": "@certdir@/dave.crt", 114 | "authorities": "@certdir@/trent.crt", 115 | "logging": { "@tempdir@/dave.log": "debug" }, 116 | "poll_interval": 0.25, 117 | "peers": { 118 | "alice": { 119 | "host": "127.0.0.1", 120 | "port": @alice_port@, 121 | "cert_name": "alice.test" 122 | }, 123 | "bob": { 124 | "host": "127.0.0.1", 125 | "port": @bob_port@, 126 | "cert_name": "bob.test" 127 | }, 128 | "charlie": { 129 | "host": "127.0.0.1", 130 | "port": @charlie_port@, 131 | "cert_name": "charlie.test" 132 | } 133 | } 134 | }"#; 135 | 136 | ///Greatest allowed (max - min) in any peer for a successful test, in nanoseconds 137 | const SPAN_LIMIT: i64 = 25_000_000; 138 | 139 | ///Greatest allowed range in estimates for a successful test, in nanoseconds 140 | const DISPERSION_LIMIT: i64 = 1_000_000; 141 | 142 | #[test] 143 | fn four_node_local() { 144 | match std::env::var_os("BYZTIMED_SAVE_INTEGRATION_TEST_OUTPUT") { 145 | None => { 146 | let temp_dir = tempfile::tempdir().unwrap(); 147 | run_four_node_local(temp_dir.path()) 148 | } 149 | Some(dir) => run_four_node_local(dir.as_ref()), 150 | } 151 | } 152 | 153 | fn run_four_node_local(temp_dir: &path::Path) { 154 | /* Find the byztimed binary */ 155 | let testbin_path = env::current_exe().unwrap(); 156 | let testbin_dir = testbin_path.parent().unwrap(); 157 | let bin_dir = testbin_dir.parent().unwrap(); 158 | let mut byztime_bin_path = bin_dir.to_owned(); 159 | byztime_bin_path.push("byztimed"); 160 | byztime_bin_path.set_extension(env::consts::EXE_EXTENSION); 161 | assert!(byztime_bin_path.exists()); 162 | 163 | /* Find the certificate directory */ 164 | let mut cert_dir = path::PathBuf::new(); 165 | cert_dir.push(env!("CARGO_MANIFEST_DIR")); 166 | cert_dir.push("tests"); 167 | cert_dir.push("test_certs"); 168 | assert!(cert_dir.exists()); 169 | 170 | let mut children = Vec::::new(); 171 | 172 | let our_ports = find_ports(4); 173 | 174 | for (config_filename, timedata_filename, store_dirname, stderr_filename, config_template) in &[ 175 | ( 176 | "alice.json", 177 | "alice.timedata", 178 | "alice.store", 179 | "alice.stderr", 180 | ALICE_CONFIG, 181 | ), 182 | ( 183 | "bob.json", 184 | "bob.timedata", 185 | "bob.store", 186 | "bob.stderr", 187 | BOB_CONFIG, 188 | ), 189 | ( 190 | "charlie.json", 191 | "charlie.timedata", 192 | "charlie.store", 193 | "charlie.stderr", 194 | CHARLIE_CONFIG, 195 | ), 196 | ( 197 | "dave.json", 198 | "dave.timedata", 199 | "dave.store", 200 | "dave.stderr", 201 | DAVE_CONFIG, 202 | ), 203 | ] { 204 | //Generate configuration file 205 | let config_contents = config_template 206 | .replace("/", &path::MAIN_SEPARATOR.to_string()) 207 | .replace("@alice_port@", &our_ports[0].to_string()) 208 | .replace("@bob_port@", &our_ports[1].to_string()) 209 | .replace("@charlie_port@", &our_ports[2].to_string()) 210 | .replace("@dave_port@", &our_ports[3].to_string()) 211 | .replace("@tempdir@", temp_dir.to_str().unwrap()) 212 | .replace("@certdir@", cert_dir.to_str().unwrap()); 213 | 214 | //Write configuration file 215 | let mut config_path = path::PathBuf::new(); 216 | config_path.push(temp_dir); 217 | config_path.push(config_filename); 218 | fs::write(&config_path, &config_contents).unwrap(); 219 | 220 | //Remove any existing timedata file 221 | let mut timedata_path = path::PathBuf::new(); 222 | timedata_path.push(temp_dir); 223 | timedata_path.push(timedata_filename); 224 | if let Err(e) = fs::remove_file(&timedata_path) { 225 | assert!(e.kind() == io::ErrorKind::NotFound); 226 | } 227 | 228 | //Create the store path 229 | let mut store_path = path::PathBuf::new(); 230 | store_path.push(temp_dir); 231 | store_path.push(store_dirname); 232 | fs::create_dir(&store_path).unwrap(); 233 | 234 | //Capture stderr 235 | let mut stderr_path = path::PathBuf::new(); 236 | stderr_path.push(temp_dir); 237 | stderr_path.push(stderr_filename); 238 | let stderr = fs::File::create(stderr_path).unwrap(); 239 | 240 | children.push( 241 | process::Command::new(&byztime_bin_path) 242 | .arg(&config_path) 243 | .stderr(stderr) 244 | .spawn() 245 | .unwrap() 246 | .into(), 247 | ); 248 | thread::sleep(time::Duration::from_nanos(200_000_000)); 249 | } 250 | 251 | //Let everyone run for 3 seconds then send SIGTERM 252 | thread::sleep(time::Duration::from_secs(3)); 253 | for child in &children { 254 | nix::sys::signal::kill( 255 | nix::unistd::Pid::from_raw(child.id() as i32), 256 | nix::sys::signal::Signal::SIGTERM, 257 | ) 258 | .unwrap(); 259 | } 260 | 261 | //Give a second to handle the SIGTERM, then send SIGKILL 262 | thread::sleep(time::Duration::from_secs(1)); 263 | for child in &mut children { 264 | let _ = child.kill(); 265 | } 266 | 267 | //Collect exit status and assert success 268 | for child in children { 269 | let output = child.wait_with_output().unwrap(); 270 | assert!(output.stderr.is_empty()); 271 | if !output.status.success() { 272 | panic!("Output status: {}", output.status) 273 | } 274 | } 275 | 276 | //Assert absence of errors in the log files 277 | for log_file in &["alice.log", "bob.log", "charlie.log", "dave.log"] { 278 | let mut log_path = path::PathBuf::new(); 279 | log_path.push(temp_dir); 280 | log_path.push(log_file); 281 | let log_contents = String::from_utf8(fs::read(&log_path).unwrap()).unwrap(); 282 | assert!(log_contents.find("ERROR").is_none()); 283 | } 284 | 285 | let mut mins = Vec::new(); 286 | let mut ests = Vec::new(); 287 | let mut maxs = Vec::new(); 288 | let mut spans = Vec::new(); 289 | 290 | let mut summary_path = path::PathBuf::new(); 291 | summary_path.push(temp_dir); 292 | summary_path.push("summary"); 293 | let mut summary_file = fs::File::create(&summary_path).unwrap(); 294 | 295 | //Read the timedata files left behind 296 | for timedata_file in &[ 297 | "alice.timedata", 298 | "bob.timedata", 299 | "charlie.timedata", 300 | "dave.timedata", 301 | ] { 302 | let mut timedata_path = path::PathBuf::new(); 303 | timedata_path.push(temp_dir); 304 | timedata_path.push(timedata_file); 305 | 306 | let ctx = byztime::ConsumerContext::open(timedata_path.as_ref()).unwrap(); 307 | let (min, est, max) = ctx.offset().unwrap(); 308 | mins.push(min); 309 | ests.push(est); 310 | maxs.push(max); 311 | spans.push(max - min); 312 | 313 | writeln!( 314 | summary_file, 315 | "{}: min = {}; est = {}; max = {}; span = {}", 316 | timedata_file, 317 | min, 318 | est, 319 | max, 320 | max - min 321 | ) 322 | .unwrap(); 323 | } 324 | 325 | mins.sort(); 326 | ests.sort(); 327 | maxs.sort(); 328 | spans.sort(); 329 | 330 | writeln!(summary_file, "").unwrap(); 331 | writeln!(summary_file, "Worst span: {}", spans[3]).unwrap(); 332 | writeln!(summary_file, "Dispersion: {}", ests[3] - ests[0]).unwrap(); 333 | 334 | //Assert that all error bounds are reasonbaly small 335 | for span in spans { 336 | assert!(span > byztime::Timestamp::new(0, 0)); 337 | assert!(span < byztime::Timestamp::new(0, SPAN_LIMIT)); 338 | } 339 | 340 | //Assert that all error ranges overlap 341 | assert!(maxs[0] > mins[3]); 342 | 343 | //Assert that all estimates agree reasonably 344 | assert!(ests[3] - ests[0] < byztime::Timestamp::new(0, DISPERSION_LIMIT)); 345 | } 346 | -------------------------------------------------------------------------------- /byztimed/tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | //Copyright 2021, Akamai Technologies, Inc. 2 | //SPDX-License-Identifier: Apache-2.0 3 | 4 | use std::io; 5 | use std::mem; 6 | use std::net::{IpAddr, Ipv6Addr, SocketAddr, TcpListener, UdpSocket}; 7 | use std::ops; 8 | use std::process; 9 | use std::sync::Mutex; 10 | 11 | lazy_static! { 12 | static ref CUR_PORT: Mutex = Mutex::new(49151); 13 | } 14 | 15 | ///Find available ports to use for byztimed tests 16 | /// 17 | /// Returns a vector of `num_ports` port numbers that are available 18 | /// for both UDP and TCP. Every invocation will return a set not used 19 | /// by any prior invocation, so that if this function is used by 20 | /// multiple concurrent tests they won't race with each other. There 21 | /// is inevitably still a race with other unrelated processes on the 22 | /// system which might grab the ports between when we call this 23 | /// function and when byztimed starts up. 24 | pub fn find_ports(num_ports: usize) -> Vec { 25 | let mut ports = Vec::with_capacity(num_ports); 26 | let mut cur_port = CUR_PORT.lock().unwrap(); 27 | 28 | while ports.len() < num_ports { 29 | if *cur_port == 65535 { 30 | panic!("Couldn't find enough available ports"); 31 | } 32 | *cur_port += 1; 33 | let addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), *cur_port); 34 | let udp_result = UdpSocket::bind(&addr); 35 | let tcp_result = TcpListener::bind(&addr); 36 | if udp_result.is_ok() && tcp_result.is_ok() { 37 | ports.push(*cur_port); 38 | } 39 | } 40 | 41 | ports 42 | } 43 | 44 | ///Wrapper around a Child that kills it and collects it exit status 45 | /// when dropped 46 | pub struct ChildWrapper { 47 | child: Option, 48 | } 49 | 50 | impl ChildWrapper { 51 | pub fn wait_with_output(mut self) -> io::Result { 52 | mem::replace(&mut self.child, None) 53 | .unwrap() 54 | .wait_with_output() 55 | } 56 | } 57 | 58 | impl Drop for ChildWrapper { 59 | fn drop(&mut self) { 60 | if let Some(ref mut child) = self.child { 61 | if child.try_wait().unwrap().is_none() { 62 | let _ = child.kill(); 63 | child.wait().unwrap(); 64 | } 65 | } 66 | } 67 | } 68 | 69 | impl ops::Deref for ChildWrapper { 70 | type Target = process::Child; 71 | fn deref(&self) -> &process::Child { 72 | self.child.as_ref().unwrap() 73 | } 74 | } 75 | 76 | impl ops::DerefMut for ChildWrapper { 77 | fn deref_mut(&mut self) -> &mut process::Child { 78 | self.child.as_mut().unwrap() 79 | } 80 | } 81 | 82 | impl From for ChildWrapper { 83 | fn from(child: process::Child) -> ChildWrapper { 84 | ChildWrapper { child: Some(child) } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/.gitignore: -------------------------------------------------------------------------------- 1 | *.timedata 2 | *.timedata.lock 3 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/alice.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBOjCB4aADAgECAhQui5r9uS3gfiSYYIccZJFKguA06zAKBggqhkjOPQQDAjAQ 3 | MQ4wDAYDVQQDDAV0cmVudDAeFw0xOTEyMTgyMjU1MTVaFw0yOTEyMTUyMjU1MTVa 4 | MBAxDjAMBgNVBAMMBWFsaWNlMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEciG+ 5 | QPsR/fCpzwiNNx9PYzvXvPEo+JWUwdS3G5a9W/zs87NxlfcqsGontRUYwkOsONgQ 6 | cku1rwfs8GaL8BaOXKMZMBcwFQYDVR0RBA4wDIIKYWxpY2UudGVzdDAKBggqhkjO 7 | PQQDAgNIADBFAiBd04e/7jF2WIsUKI0k8cJ9qeA7XARtE2yWkA6vSe0GvQIhAIx4 8 | R1R4le7Ovkiw07uktJbH8+6+Sp35UgIi6GQ9wTmV 9 | -----END CERTIFICATE----- 10 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/alice.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIHJMHICAQAwEDEOMAwGA1UEAwwFYWxpY2UwWTATBgcqhkjOPQIBBggqhkjOPQMB 3 | BwNCAARyIb5A+xH98KnPCI03H09jO9e88Sj4lZTB1Lcblr1b/Ozzs3GV9yqwaie1 4 | FRjCQ6w42BByS7WvB+zwZovwFo5coAAwCgYIKoZIzj0EAwIDRwAwRAIgChEBAHOq 5 | vYSO1QpeEtSmfAeUc0xTKgNjZlgxJkQAKy8CIEFlIJz2wfL+ozgXyjrrrQ/KiKTH 6 | wyWkNYSQlwgEzU9A 7 | -----END CERTIFICATE REQUEST----- 8 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/alice.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgeiIbcHQXYicOfkml 3 | OkZwYCPklo+ogTZ8gaTIhCj7jrKhRANCAARyIb5A+xH98KnPCI03H09jO9e88Sj4 4 | lZTB1Lcblr1b/Ozzs3GV9yqwaie1FRjCQ6w42BByS7WvB+zwZovwFo5c 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/bob.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBNjCB3aADAgECAhQui5r9uS3gfiSYYIccZJFKguA07DAKBggqhkjOPQQDAjAQ 3 | MQ4wDAYDVQQDDAV0cmVudDAeFw0xOTEyMTgyMjU1MTVaFw0yOTEyMTUyMjU1MTVa 4 | MA4xDDAKBgNVBAMMA2JvYjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDg54LFt 5 | YnIyqjqWRyAJKTZurIIER4yAu8qDAAhLEJ3jdv6y8c3g2onBUnrbyVbkqrwTyvpq 6 | kuFIwiOX+ujcBeWjFzAVMBMGA1UdEQQMMAqCCGJvYi50ZXN0MAoGCCqGSM49BAMC 7 | A0gAMEUCIDnoeyrIHYCdGDIvfPNLJCF5NmqBuk8oWOtQH6MOJkKFAiEAjZ+LaQg7 8 | Hkesa2eQJ0NA75FB0YMdx6No9un5CsCrZDQ= 9 | -----END CERTIFICATE----- 10 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/bob.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIHIMHACAQAwDjEMMAoGA1UEAwwDYm9iMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD 3 | QgAEODngsW1icjKqOpZHIAkpNm6sggRHjIC7yoMACEsQneN2/rLxzeDaicFSetvJ 4 | VuSqvBPK+mqS4UjCI5f66NwF5aAAMAoGCCqGSM49BAMCA0gAMEUCIFyQc72wUkvg 5 | GvAA7ChcDW7pQkHJzC3thLjIzw1WMVGLAiEA5l/6gNtN4y8oyeF1aVBWH0bY4ofQ 6 | eeYyLcr1rb9k8qI= 7 | -----END CERTIFICATE REQUEST----- 8 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/bob.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgAbAqjG0jtwz7bzpc 3 | LnT/boAcLZgFsDBVPmtCGmPjaqahRANCAAQ4OeCxbWJyMqo6lkcgCSk2bqyCBEeM 4 | gLvKgwAISxCd43b+svHN4NqJwVJ628lW5Kq8E8r6apLhSMIjl/ro3AXl 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/charlie.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBPjCB5aADAgECAhQui5r9uS3gfiSYYIccZJFKguA07TAKBggqhkjOPQQDAjAQ 3 | MQ4wDAYDVQQDDAV0cmVudDAeFw0xOTEyMTgyMjU1MTVaFw0yOTEyMTUyMjU1MTVa 4 | MBIxEDAOBgNVBAMMB2NoYXJsaWUwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARI 5 | E46U4fCUZwo+ErxEwXGYLpLPtTrysLXvUFHmzITnaLd7kF2YGQrLA72xVLO641YC 6 | CJpwMwy7qsJuLvNZafTToxswGTAXBgNVHREEEDAOggxjaGFybGllLnRlc3QwCgYI 7 | KoZIzj0EAwIDSAAwRQIhAJ+WvPH+69DDUnwzRf6+6AWV+VGpfaayqWRLcWYX9Ine 8 | AiARlrStbJmYjGfRYFIf4e8qBKZCnfEvsGSbWkFJ3rYdZQ== 9 | -----END CERTIFICATE----- 10 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/charlie.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIHNMHQCAQAwEjEQMA4GA1UEAwwHY2hhcmxpZTBZMBMGByqGSM49AgEGCCqGSM49 3 | AwEHA0IABEgTjpTh8JRnCj4SvETBcZguks+1OvKwte9QUebMhOdot3uQXZgZCssD 4 | vbFUs7rjVgIImnAzDLuqwm4u81lp9NOgADAKBggqhkjOPQQDAgNJADBGAiEAmrDj 5 | HygQeGl/nWUYgupOckTol8Hm6YiezRcRI8zLmcwCIQDJoxfQktD/4r1Dsg4OGmGT 6 | p7UAmPU0WLipXHycPmaGQw== 7 | -----END CERTIFICATE REQUEST----- 8 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/charlie.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgBNKIWikzd87RbKNh 3 | BGqFTmBOZlJDFazk+mJHQ1uE7luhRANCAARIE46U4fCUZwo+ErxEwXGYLpLPtTry 4 | sLXvUFHmzITnaLd7kF2YGQrLA72xVLO641YCCJpwMwy7qsJuLvNZafTT 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/dave.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBOTCB36ADAgECAhQui5r9uS3gfiSYYIccZJFKguA07jAKBggqhkjOPQQDAjAQ 3 | MQ4wDAYDVQQDDAV0cmVudDAeFw0xOTEyMTgyMjU1MTVaFw0yOTEyMTUyMjU1MTVa 4 | MA8xDTALBgNVBAMMBGRhdmUwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARPXXGS 5 | Vk8Wg7N59zio9+w7sz20zH10BRdrnfeOZRSKmJb1tV6W5acsRoOqCVVqWqYzBbvx 6 | MfLkvmYZ++25EyvdoxgwFjAUBgNVHREEDTALgglkYXZlLnRlc3QwCgYIKoZIzj0E 7 | AwIDSQAwRgIhAMd87t3PYnGHhvZ+mEAMXKv1VZ1Bh7PLlzsGbkdeHed0AiEAoOFE 8 | AwSh7fxQNvInViKF8TLX/VEqIKQGrfDn24Wvyu4= 9 | -----END CERTIFICATE----- 10 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/dave.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIHKMHECAQAwDzENMAsGA1UEAwwEZGF2ZTBZMBMGByqGSM49AgEGCCqGSM49AwEH 3 | A0IABE9dcZJWTxaDs3n3OKj37DuzPbTMfXQFF2ud945lFIqYlvW1XpblpyxGg6oJ 4 | VWpapjMFu/Ex8uS+Zhn77bkTK92gADAKBggqhkjOPQQDAgNJADBGAiEApryKCjAK 5 | 3EfjNadl0o1a06J6sSjQi/7wkPoZg2CEhUYCIQCdUh+XLm/nUsofxXtt8A/GbjRy 6 | jobvFMwvur5TDDkiJg== 7 | -----END CERTIFICATE REQUEST----- 8 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/dave.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgkZGeMTEdypk6UUVZ 3 | FvXtsmrgnc2hUtW8WW1ZCGIZOwyhRANCAARPXXGSVk8Wg7N59zio9+w7sz20zH10 4 | BRdrnfeOZRSKmJb1tV6W5acsRoOqCVVqWqYzBbvxMfLkvmYZ++25Eyvd 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/gen-x509.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #Copyright 2020, Akamai Technologies, Inc. 3 | #SPDX-License-Identifier: Apache-2.0 4 | 5 | for node in alice bob charlie dave gorgias trent; do 6 | openssl ecparam -name prime256v1 -genkey -noout | openssl pkcs8 -topk8 -nocrypt -out ${node}.key 7 | done 8 | 9 | openssl req -new -subj "/CN=trent" -key trent.key -out trent.csr 10 | openssl x509 -req -in trent.csr -signkey trent.key -out trent.crt -days 3650 -extfile openssl.cnf -extensions v3_ca 11 | 12 | for node in alice bob charlie dave gorgias; do 13 | openssl req -new -subj "/CN=${node}" -key ${node}.key -out ${node}.csr 14 | openssl x509 -req -in ${node}.csr -CA trent.crt -CAkey trent.key \ 15 | -CAcreateserial -out ${node}.crt -days 3650 \ 16 | -extfile <(cat ./openssl.cnf <(printf "[SAN]\nsubjectAltName=DNS:${node}.test"))\ 17 | -extensions SAN 18 | done 19 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/gorgias.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBPTCB5aADAgECAhQui5r9uS3gfiSYYIccZJFKguA07zAKBggqhkjOPQQDAjAQ 3 | MQ4wDAYDVQQDDAV0cmVudDAeFw0yMDAyMDUyMDI1MzNaFw0zMDAyMDIyMDI1MzNa 4 | MBIxEDAOBgNVBAMMB2dvcmdpYXMwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQQ 5 | Ocy1DCH9nPmlXoGip4yTlUsTgpcFP8a0N6rIwfu8PFiV87DeoqvD8m0ajztj/3yW 6 | s7bEkpEzHpBdFF48jPaYoxswGTAXBgNVHREEEDAOggxnb3JnaWFzLnRlc3QwCgYI 7 | KoZIzj0EAwIDRwAwRAIgQm5xezoMwtgIH3BCIhTEFpVKtXccNsC+MEuPWNKq0JIC 8 | IBqyrUUyhK5xU65dAOu2JdppFYcpEXs3j2G9dWZlU/YO 9 | -----END CERTIFICATE----- 10 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/gorgias.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIHLMHQCAQAwEjEQMA4GA1UEAwwHZ29yZ2lhczBZMBMGByqGSM49AgEGCCqGSM49 3 | AwEHA0IABBA5zLUMIf2c+aVegaKnjJOVSxOClwU/xrQ3qsjB+7w8WJXzsN6iq8Py 4 | bRqPO2P/fJaztsSSkTMekF0UXjyM9pigADAKBggqhkjOPQQDAgNHADBEAiBZuLQ/ 5 | 5ICxwkMDssG4GuXC9QTpBNN1CsyFZ6fTYo2CswIgSja4A9N/gDlIWBULJGtnYX8c 6 | L9emLYwHHBqqNpuUF7k= 7 | -----END CERTIFICATE REQUEST----- 8 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/gorgias.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgHE7rRo2XLzgMNBN8 3 | Mi7FXTUWYYQD/9Tbyxh4tp0xFNShRANCAAQQOcy1DCH9nPmlXoGip4yTlUsTgpcF 4 | P8a0N6rIwfu8PFiV87DeoqvD8m0ajztj/3yWs7bEkpEzHpBdFF48jPaY 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/openssl.cnf: -------------------------------------------------------------------------------- 1 | # 2 | # OpenSSL example configuration file. 3 | # This is mostly being used for generation of certificate requests. 4 | # 5 | 6 | # Note that you can include other files from the main configuration 7 | # file using the .include directive. 8 | #.include filename 9 | 10 | # This definition stops the following lines choking if HOME isn't 11 | # defined. 12 | HOME = . 13 | 14 | # Extra OBJECT IDENTIFIER info: 15 | #oid_file = $ENV::HOME/.oid 16 | oid_section = new_oids 17 | 18 | # To use this configuration file with the "-extfile" option of the 19 | # "openssl x509" utility, name here the section containing the 20 | # X.509v3 extensions to use: 21 | # extensions = 22 | # (Alternatively, use a configuration file that has only 23 | # X.509v3 extensions in its main [= default] section.) 24 | 25 | [ new_oids ] 26 | 27 | # We can add new OIDs in here for use by 'ca', 'req' and 'ts'. 28 | # Add a simple OID like this: 29 | # testoid1=1.2.3.4 30 | # Or use config file substitution like this: 31 | # testoid2=${testoid1}.5.6 32 | 33 | # Policies used by the TSA examples. 34 | tsa_policy1 = 1.2.3.4.1 35 | tsa_policy2 = 1.2.3.4.5.6 36 | tsa_policy3 = 1.2.3.4.5.7 37 | 38 | #################################################################### 39 | [ ca ] 40 | default_ca = CA_default # The default ca section 41 | 42 | #################################################################### 43 | [ CA_default ] 44 | 45 | dir = ./demoCA # Where everything is kept 46 | certs = $dir/certs # Where the issued certs are kept 47 | crl_dir = $dir/crl # Where the issued crl are kept 48 | database = $dir/index.txt # database index file. 49 | #unique_subject = no # Set to 'no' to allow creation of 50 | # several certs with same subject. 51 | new_certs_dir = $dir/newcerts # default place for new certs. 52 | 53 | certificate = $dir/cacert.pem # The CA certificate 54 | serial = $dir/serial # The current serial number 55 | crlnumber = $dir/crlnumber # the current crl number 56 | # must be commented out to leave a V1 CRL 57 | crl = $dir/crl.pem # The current CRL 58 | private_key = $dir/private/cakey.pem# The private key 59 | 60 | x509_extensions = usr_cert # The extensions to add to the cert 61 | 62 | # Comment out the following two lines for the "traditional" 63 | # (and highly broken) format. 64 | name_opt = ca_default # Subject Name options 65 | cert_opt = ca_default # Certificate field options 66 | 67 | # Extension copying option: use with caution. 68 | # copy_extensions = copy 69 | 70 | # Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs 71 | # so this is commented out by default to leave a V1 CRL. 72 | # crlnumber must also be commented out to leave a V1 CRL. 73 | # crl_extensions = crl_ext 74 | 75 | default_days = 365 # how long to certify for 76 | default_crl_days= 30 # how long before next CRL 77 | default_md = default # use public key default MD 78 | preserve = no # keep passed DN ordering 79 | 80 | # A few difference way of specifying how similar the request should look 81 | # For type CA, the listed attributes must be the same, and the optional 82 | # and supplied fields are just that :-) 83 | policy = policy_match 84 | 85 | # For the CA policy 86 | [ policy_match ] 87 | countryName = match 88 | stateOrProvinceName = match 89 | organizationName = match 90 | organizationalUnitName = optional 91 | commonName = supplied 92 | emailAddress = optional 93 | 94 | # For the 'anything' policy 95 | # At this point in time, you must list all acceptable 'object' 96 | # types. 97 | [ policy_anything ] 98 | countryName = optional 99 | stateOrProvinceName = optional 100 | localityName = optional 101 | organizationName = optional 102 | organizationalUnitName = optional 103 | commonName = supplied 104 | emailAddress = optional 105 | 106 | #################################################################### 107 | [ req ] 108 | default_bits = 2048 109 | default_keyfile = privkey.pem 110 | distinguished_name = req_distinguished_name 111 | attributes = req_attributes 112 | x509_extensions = v3_ca # The extensions to add to the self signed cert 113 | 114 | # Passwords for private keys if not present they will be prompted for 115 | # input_password = secret 116 | # output_password = secret 117 | 118 | # This sets a mask for permitted string types. There are several options. 119 | # default: PrintableString, T61String, BMPString. 120 | # pkix : PrintableString, BMPString (PKIX recommendation before 2004) 121 | # utf8only: only UTF8Strings (PKIX recommendation after 2004). 122 | # nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings). 123 | # MASK:XXXX a literal mask value. 124 | # WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings. 125 | string_mask = utf8only 126 | 127 | # req_extensions = v3_req # The extensions to add to a certificate request 128 | 129 | [ req_distinguished_name ] 130 | countryName = Country Name (2 letter code) 131 | countryName_default = AU 132 | countryName_min = 2 133 | countryName_max = 2 134 | 135 | stateOrProvinceName = State or Province Name (full name) 136 | stateOrProvinceName_default = Some-State 137 | 138 | localityName = Locality Name (eg, city) 139 | 140 | 0.organizationName = Organization Name (eg, company) 141 | 0.organizationName_default = Internet Widgits Pty Ltd 142 | 143 | # we can do this but it is not needed normally :-) 144 | #1.organizationName = Second Organization Name (eg, company) 145 | #1.organizationName_default = World Wide Web Pty Ltd 146 | 147 | organizationalUnitName = Organizational Unit Name (eg, section) 148 | #organizationalUnitName_default = 149 | 150 | commonName = Common Name (e.g. server FQDN or YOUR name) 151 | commonName_max = 64 152 | 153 | emailAddress = Email Address 154 | emailAddress_max = 64 155 | 156 | # SET-ex3 = SET extension number 3 157 | 158 | [ req_attributes ] 159 | challengePassword = A challenge password 160 | challengePassword_min = 4 161 | challengePassword_max = 20 162 | 163 | unstructuredName = An optional company name 164 | 165 | [ usr_cert ] 166 | 167 | # These extensions are added when 'ca' signs a request. 168 | 169 | # This goes against PKIX guidelines but some CAs do it and some software 170 | # requires this to avoid interpreting an end user certificate as a CA. 171 | 172 | basicConstraints=CA:FALSE 173 | 174 | # Here are some examples of the usage of nsCertType. If it is omitted 175 | # the certificate can be used for anything *except* object signing. 176 | 177 | # This is OK for an SSL server. 178 | # nsCertType = server 179 | 180 | # For an object signing certificate this would be used. 181 | # nsCertType = objsign 182 | 183 | # For normal client use this is typical 184 | # nsCertType = client, email 185 | 186 | # and for everything including object signing: 187 | # nsCertType = client, email, objsign 188 | 189 | # This is typical in keyUsage for a client certificate. 190 | # keyUsage = nonRepudiation, digitalSignature, keyEncipherment 191 | 192 | # This will be displayed in Netscape's comment listbox. 193 | nsComment = "OpenSSL Generated Certificate" 194 | 195 | # PKIX recommendations harmless if included in all certificates. 196 | subjectKeyIdentifier=hash 197 | authorityKeyIdentifier=keyid,issuer 198 | 199 | # This stuff is for subjectAltName and issuerAltname. 200 | # Import the email address. 201 | # subjectAltName=email:copy 202 | # An alternative to produce certificates that aren't 203 | # deprecated according to PKIX. 204 | # subjectAltName=email:move 205 | 206 | # Copy subject details 207 | # issuerAltName=issuer:copy 208 | 209 | #nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem 210 | #nsBaseUrl 211 | #nsRevocationUrl 212 | #nsRenewalUrl 213 | #nsCaPolicyUrl 214 | #nsSslServerName 215 | 216 | # This is required for TSA certificates. 217 | # extendedKeyUsage = critical,timeStamping 218 | 219 | [ v3_req ] 220 | 221 | # Extensions to add to a certificate request 222 | 223 | basicConstraints = CA:FALSE 224 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 225 | 226 | [ v3_ca ] 227 | 228 | 229 | # Extensions for a typical CA 230 | 231 | 232 | # PKIX recommendation. 233 | 234 | subjectKeyIdentifier=hash 235 | 236 | authorityKeyIdentifier=keyid:always,issuer 237 | 238 | basicConstraints = critical,CA:true 239 | 240 | # Key usage: this is typical for a CA certificate. However since it will 241 | # prevent it being used as an test self-signed certificate it is best 242 | # left out by default. 243 | # keyUsage = cRLSign, keyCertSign 244 | 245 | # Some might want this also 246 | # nsCertType = sslCA, emailCA 247 | 248 | # Include email address in subject alt name: another PKIX recommendation 249 | # subjectAltName=email:copy 250 | # Copy issuer details 251 | # issuerAltName=issuer:copy 252 | 253 | # DER hex encoding of an extension: beware experts only! 254 | # obj=DER:02:03 255 | # Where 'obj' is a standard or added object 256 | # You can even override a supported extension: 257 | # basicConstraints= critical, DER:30:03:01:01:FF 258 | 259 | [ crl_ext ] 260 | 261 | # CRL extensions. 262 | # Only issuerAltName and authorityKeyIdentifier make any sense in a CRL. 263 | 264 | # issuerAltName=issuer:copy 265 | authorityKeyIdentifier=keyid:always 266 | 267 | [ proxy_cert_ext ] 268 | # These extensions should be added when creating a proxy certificate 269 | 270 | # This goes against PKIX guidelines but some CAs do it and some software 271 | # requires this to avoid interpreting an end user certificate as a CA. 272 | 273 | basicConstraints=CA:FALSE 274 | 275 | # Here are some examples of the usage of nsCertType. If it is omitted 276 | # the certificate can be used for anything *except* object signing. 277 | 278 | # This is OK for an SSL server. 279 | # nsCertType = server 280 | 281 | # For an object signing certificate this would be used. 282 | # nsCertType = objsign 283 | 284 | # For normal client use this is typical 285 | # nsCertType = client, email 286 | 287 | # and for everything including object signing: 288 | # nsCertType = client, email, objsign 289 | 290 | # This is typical in keyUsage for a client certificate. 291 | # keyUsage = nonRepudiation, digitalSignature, keyEncipherment 292 | 293 | # This will be displayed in Netscape's comment listbox. 294 | nsComment = "OpenSSL Generated Certificate" 295 | 296 | # PKIX recommendations harmless if included in all certificates. 297 | subjectKeyIdentifier=hash 298 | authorityKeyIdentifier=keyid,issuer 299 | 300 | # This stuff is for subjectAltName and issuerAltname. 301 | # Import the email address. 302 | # subjectAltName=email:copy 303 | # An alternative to produce certificates that aren't 304 | # deprecated according to PKIX. 305 | # subjectAltName=email:move 306 | 307 | # Copy subject details 308 | # issuerAltName=issuer:copy 309 | 310 | #nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem 311 | #nsBaseUrl 312 | #nsRevocationUrl 313 | #nsRenewalUrl 314 | #nsCaPolicyUrl 315 | #nsSslServerName 316 | 317 | # This really needs to be in place for it to be a proxy certificate. 318 | proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo 319 | 320 | #################################################################### 321 | [ tsa ] 322 | 323 | default_tsa = tsa_config1 # the default TSA section 324 | 325 | [ tsa_config1 ] 326 | 327 | # These are used by the TSA reply generation only. 328 | dir = ./demoCA # TSA root directory 329 | serial = $dir/tsaserial # The current serial number (mandatory) 330 | crypto_device = builtin # OpenSSL engine to use for signing 331 | signer_cert = $dir/tsacert.pem # The TSA signing certificate 332 | # (optional) 333 | certs = $dir/cacert.pem # Certificate chain to include in reply 334 | # (optional) 335 | signer_key = $dir/private/tsakey.pem # The TSA private key (optional) 336 | signer_digest = sha256 # Signing digest to use. (Optional) 337 | default_policy = tsa_policy1 # Policy if request did not specify it 338 | # (optional) 339 | other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional) 340 | digests = sha1, sha256, sha384, sha512 # Acceptable message digests (mandatory) 341 | accuracy = secs:1, millisecs:500, microsecs:100 # (optional) 342 | clock_precision_digits = 0 # number of digits after dot. (optional) 343 | ordering = yes # Is ordering defined for timestamps? 344 | # (optional, default: no) 345 | tsa_name = yes # Must the TSA name be included in the reply? 346 | # (optional, default: no) 347 | ess_cert_id_chain = no # Must the ESS cert id chain be included? 348 | # (optional, default: no) 349 | ess_cert_id_alg = sha1 # algorithm to compute certificate 350 | # identifier (optional, default: sha1) 351 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/trent.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIBdjCCARugAwIBAgIUPnfUkfWc6SSRQ+0cV+gImeGVJG4wCgYIKoZIzj0EAwIw 3 | EDEOMAwGA1UEAwwFdHJlbnQwHhcNMTkxMjE4MjI1NTE1WhcNMjkxMjE1MjI1NTE1 4 | WjAQMQ4wDAYDVQQDDAV0cmVudDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEgR 5 | mRryDZ3uZNTdIEVK2gZEQZkLIygvqh8HIJbpuwM55/FIEeM8u7H2snsg0YW1rtrO 6 | Kzmx73aypDC3siSCZ4KjUzBRMB0GA1UdDgQWBBSqdOjt98kc/s90LMaylHEIIdvf 7 | vzAfBgNVHSMEGDAWgBSqdOjt98kc/s90LMaylHEIIdvfvzAPBgNVHRMBAf8EBTAD 8 | AQH/MAoGCCqGSM49BAMCA0kAMEYCIQCDQ1ZTEdjMty90GwH1z0AyxWhstWzCtFq6 9 | wl33MH+JYQIhAIqJlLbJOYaCpbASBLy7SrFMwDs+/QPafiOoT7YP4wWU 10 | -----END CERTIFICATE----- 11 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/trent.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIHKMHICAQAwEDEOMAwGA1UEAwwFdHJlbnQwWTATBgcqhkjOPQIBBggqhkjOPQMB 3 | BwNCAARIEZka8g2d7mTU3SBFStoGREGZCyMoL6ofByCW6bsDOefxSBHjPLux9rJ7 4 | INGFta7azis5se92sqQwt7IkgmeCoAAwCgYIKoZIzj0EAwIDSAAwRQIhAJDxKcNp 5 | Z3Z4hCuxjpfrv/5DXIjxZYsSiz2xhyqVtcpJAiAnxJDaIdpDbi0vOiauG6hqpwuE 6 | HQElrhfrpLXZdqGccQ== 7 | -----END CERTIFICATE REQUEST----- 8 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/trent.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgcloVrvVNPoIzOVU9 3 | anEPZq2+syvHxPXIU6obuj3he9qhRANCAARIEZka8g2d7mTU3SBFStoGREGZCyMo 4 | L6ofByCW6bsDOefxSBHjPLux9rJ7INGFta7azis5se92sqQwt7IkgmeC 5 | -----END PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /byztimed/tests/test_certs/trent.srl: -------------------------------------------------------------------------------- 1 | 2E8B9AFDB92DE07E249860871C64914A82E034EF 2 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | 2 | let moz_overlay = import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz); 3 | nixpkgs = import { overlays = [ moz_overlay ]; }; 4 | rustChannel = nixpkgs.latest.rustChannels.stable; in 5 | with nixpkgs; 6 | stdenv.mkDerivation { 7 | name = "byztimed"; 8 | nativeBuildInputs = [ doxygen pkgconfig rustChannel.rust protobuf openssl ]; 9 | 10 | PROTOC = "${protobuf}/bin/protoc"; 11 | PROTOC_INCLUDE = "${protobuf}/include"; 12 | } 13 | --------------------------------------------------------------------------------