├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── azure-pipelines.yml ├── examples ├── print_folder_mapping.rs ├── print_live_folder_mapping.rs └── test_iter_records.rs ├── src ├── bin │ ├── listen_usn.rs │ └── rusty_usn.rs ├── error.rs ├── flags.rs ├── lib.rs ├── liveusn │ ├── error.rs │ ├── listener.rs │ ├── live.rs │ ├── mod.rs │ ├── ntfs.rs │ └── winfuncs.rs ├── mapping.rs ├── record.rs ├── usn.rs ├── usn_err.rs └── utils.rs └── tests ├── error_tests.rs ├── live_ntfs_tests.rs ├── record_tests.rs ├── reference_tests.rs └── win_tests.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [1.5.0] - 2019-01-07 8 | ### Changed 9 | - updated to mft 0.5 10 | 11 | ## [1.5.0] - 2019-12-01 12 | ### Added 13 | - Major improvements for file path enumeration for listen_usn (now v1.1.0) 14 | - Minor code cleanup 15 | 16 | ## [1.4.0] - 2019-07-25 17 | ### Added 18 | - Version 3 records for live monitoring and full path resolution for monitor. 19 | 20 | ## [1.3.0] - 2019-07-25 21 | ### Added 22 | - added print_folder_mapping example tool 23 | - added path enumeration to rusty_usn tool (now v1.2.0) 24 | 25 | ### Changed 26 | - file attributes are now flags and not a integer 27 | 28 | ## [1.2.0] - 2019-06-14 29 | ### Added 30 | - listen_usn tool (0.1.0) 31 | 32 | ### Changed 33 | - changed Windows build scripts for azure pipeline 34 | 35 | ## [1.1.0] - 2019-06-01 36 | ### Added 37 | - `_source` to output 38 | - directory processing 39 | 40 | ## [1.0.0] - 2019-05-27 41 | ### Changed 42 | - Rewrite and removal of features -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rusty_usn" 3 | description = "A fast and cross platform USN Parser written in Rust that outputs to JSONL" 4 | version = "1.5.1" 5 | authors = ["Matthew Seyer"] 6 | edition = "2018" 7 | homepage = "https://github.com/forensicmatt/RustyUsn" 8 | repository = "https://github.com/forensicmatt/RustyUsn" 9 | license = "Apache-2.0" 10 | readme = "README.md" 11 | 12 | [dependencies] 13 | clap = "2" 14 | log = "0.4" 15 | hex = "0.3" 16 | fern = "0.5" 17 | time = "0.1" 18 | chrono = "0.4" 19 | regex = "1" 20 | lazy_static = "1.3.0" 21 | bitflags = "1.0" 22 | encoding = "0.2" 23 | serde = "1.0" 24 | serde_json = "1.0" 25 | byteorder = "1.3.1" 26 | winstructs = "0.3.0" 27 | lru = "0.1.17" 28 | rayon = {version = "1.0.3", optional = true} 29 | 30 | [dependencies.mft] 31 | version = "0.5" 32 | 33 | [dependencies.winapi] 34 | version = "0.3" 35 | features = [ 36 | "winioctl", 37 | "ioapiset" 38 | ] 39 | optional = true 40 | 41 | [features] 42 | default = ["multithreading"] 43 | multithreading = ["rayon"] 44 | windows = ["winapi"] 45 | 46 | [[bin]] 47 | name = "rusty_usn" 48 | 49 | [[bin]] 50 | name = "listen_usn" 51 | required-features = ["windows"] 52 | 53 | [[example]] 54 | name = "print_live_folder_mapping" 55 | required-features = ["windows"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 Matthew Seyer 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://dev.azure.com/matthewseyer/dfir/_apis/build/status/forensicmatt.RustyUsn?branchName=master)](https://dev.azure.com/matthewseyer/dfir/_build/latest?definitionId=1&branchName=master) 2 | # RustyUsn 3 | A fast and cross platform USN Parser written in Rust. Output is [JSONL](http://jsonlines.org/). 4 | 5 | # Tools 6 | There are currently two tools associated with this package. rusty_usn and listen_usn. Not currently implement records for usn record version 4 or version 3 with utilized 128 bit references. 7 | (Works for Version 3 still using 64 bit references in place). 8 | 9 | ## rust_usn 10 | 11 | ``` 12 | rusty_usn 1.2.0 13 | Matthew Seyer 14 | USN Parser written in Rust. Output is JSONL. 15 | 16 | USAGE: 17 | rusty_usn.exe [OPTIONS] 18 | 19 | FLAGS: 20 | -h, --help Prints help information 21 | -V, --version Prints version information 22 | 23 | OPTIONS: 24 | -d, --debug Debug level to use. [possible values: Off, Error, Warn, Info, Debug, Trace] 25 | -m, --mft The MFT to use for creating folder mapping. 26 | -s, --source The source to parse. If the source is a directory, the directoy will be recursed looking 27 | for any files that end with '$J'. (Do not use a directory if using an MFT file.) 28 | -t, --threads Sets the number of worker threads, defaults to number of CPU cores. If the --mft option 29 | is used, the tool can only run single threaded. [default: 0] 30 | ``` 31 | 32 | ### Output 33 | Records are written to stdout as jsonl. 34 | 35 | ``` 36 | {"_offset":40018936,"_source":"C:\\Test\\$UsnJrnl.J","file_attributes":"FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM","file_name":"lastalive0.dat","file_name_length":28,"file_name_offset":60,"file_reference":{"entry":61346,"sequence":10},"full_name":"[root]/Windows/ServiceProfiles/LocalService/AppData/Local/lastalive0.dat","major_version":2,"minor_version":0,"parent_reference":{"entry":83529,"sequence":2},"reason":"USN_REASON_CLOSE | USN_REASON_DATA_EXTEND | USN_REASON_DATA_TRUNCATION","record_length":88,"security_id":0,"source_info":"(empty)","timestamp":"2019-03-20T21:35:52.322741Z","usn":558015480} 37 | {"_offset":40018848,"_source":"C:\\Test\\$UsnJrnl.J","file_attributes":"FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM","file_name":"lastalive0.dat","file_name_length":28,"file_name_offset":60,"file_reference":{"entry":61346,"sequence":10},"full_name":"[root]/Windows/ServiceProfiles/LocalService/AppData/Local/lastalive0.dat","major_version":2,"minor_version":0,"parent_reference":{"entry":83529,"sequence":2},"reason":"USN_REASON_DATA_EXTEND | USN_REASON_DATA_TRUNCATION","record_length":88,"security_id":0,"source_info":"(empty)","timestamp":"2019-03-20T21:35:52.322741Z","usn":558015392} 38 | ``` 39 | 40 | ## listen_usn 41 | A tool that uses the Windows API to listen to USN changes for a given volume in real-time. Output is JSONL. Note 42 | that this tools requires the "windows" feature (which is not on by default) to be built. This is required for the build 43 | process to complete on non-windows platforms. (see the **build** section of this README) 44 | 45 | Also note, the _offset field in output is currently the value of the buffer returned by the Windows API. Don't be supprised to see lots of the same offset for this tool's output. 46 | 47 | ``` 48 | listen_usn 0.1.0 49 | Matthew Seyer 50 | USN listener written in Rust. Output is JSONL. 51 | 52 | USAGE: 53 | listen_usn.exe [FLAGS] [OPTIONS] 54 | 55 | FLAGS: 56 | -h, --help Prints help information 57 | -p, --historical List historical records along with listening to new changes. 58 | -V, --version Prints version information 59 | 60 | OPTIONS: 61 | -d, --debug Debug level to use. [possible values: Off, Error, Warn, Info, Debug, Trace] 62 | -s, --source The source volume to listen to. (example: '\\.\C:') 63 | ``` 64 | 65 | 66 | # Carve USN from Unallocated 67 | To extract unallocated from an image, use the Sleuthkit's `blkls` with the `-A` option and redirect to a file. Pass that file into rusty_usn.exe. 68 | 69 | 1. Use TSK to extract out unallocated data. 70 | ``` 71 | D:\Tools\sleuthkit-4.6.6-win32\bin>mmls D:\Images\CTF_DEFCON_2018\Image3-Desktop\Desktop-Disk0.e01 72 | DOS Partition Table 73 | Offset Sector: 0 74 | Units are in 512-byte sectors 75 | 76 | Slot Start End Length Description 77 | 000: Meta 0000000000 0000000000 0000000001 Primary Table (#0) 78 | 001: ------- 0000000000 0001126399 0001126400 Unallocated 79 | 002: 000:000 0001126400 0103904587 0102778188 NTFS / exFAT (0x07) 80 | 003: ------- 0103904588 0103905279 0000000692 Unallocated 81 | 004: 000:001 0103905280 0104855551 0000950272 Unknown Type (0x27) 82 | 005: ------- 0104855552 0104857599 0000002048 Unallocated 83 | 84 | D:\Tools\sleuthkit-4.6.6-win32\bin>blkls -A -o 1126400 D:\Images\CTF_DEFCON_2018\Image3-Desktop\Desktop-Disk0.e01 > D:\Images\CTF_DEFCON_2018\Image3-Desktop\Desktop-Disk0.unallocated 85 | ``` 86 | 87 | 2. Parse the unallocated extracted file with rust_usn.exe. 88 | ``` 89 | D:\Tools\RustyTools>rusty_usn.exe -s D:\Images\CTF_DEFCON_2018\Image3-Desktop\Desktop-Disk0.unallocated > D:\Testing\unallocated-usn.jsonl 90 | ``` 91 | 92 | 3. Count records recovered. 93 | ``` 94 | D:\Tools\RustyTools>rg -U -c "" D:\Testing\unallocated-usn.jsonl 95 | 1558102 96 | ``` 97 | 98 | ## Build 99 | If you are building on windows and want `listen_usn.exe` you will need to build with the `windows` feature as it is not on by default. Use: `cargo build --all-features --release` for compiling with Rust in Windows. Use `cargo build --release` for non-Windows systems. 100 | 101 | Currently using Rust 1.36.0 Nightly. 102 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | branches: 3 | include: ['*'] 4 | tags: 5 | include: ['*'] 6 | 7 | strategy: 8 | matrix: 9 | windows-stable: 10 | imageName: 'vs2017-win2016' 11 | target: 'x86_64-pc-windows-msvc' 12 | rustup_toolchain: stable 13 | mac-stable: 14 | imageName: 'macos-10.13' 15 | target: 'x86_64-apple-darwin' 16 | rustup_toolchain: stable 17 | linux-stable: 18 | imageName: 'ubuntu-16.04' 19 | target: 'x86_64-unknown-linux-gnu' 20 | rustup_toolchain: stable 21 | 22 | pool: 23 | vmImage: $(imageName) 24 | 25 | steps: 26 | - script: | 27 | curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $RUSTUP_TOOLCHAIN 28 | echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" 29 | displayName: Install rust 30 | condition: ne( variables['Agent.OS'], 'Windows_NT' ) 31 | - script: | 32 | curl -sSf -o rustup-init.exe https://win.rustup.rs 33 | rustup-init.exe -y --default-toolchain %RUSTUP_TOOLCHAIN% 34 | echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin" 35 | displayName: Windows install rust 36 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 37 | 38 | - script: cargo build --all-features --release 39 | displayName: Cargo build Windows_NT 40 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 41 | - script: cargo build --release 42 | displayName: Cargo build NOT Windows_NT 43 | condition: ne( variables['Agent.OS'], 'Windows_NT' ) 44 | 45 | - script: cargo test --all-features 46 | displayName: Cargo test Windows_NT 47 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 48 | - script: cargo test 49 | displayName: Cargo test NOT Windows_NT 50 | condition: ne( variables['Agent.OS'], 'Windows_NT' ) 51 | 52 | - bash: | 53 | MY_TAG="$(Build.SourceBranch)" 54 | MY_TAG=${MY_TAG#refs/tags/} 55 | echo $MY_TAG 56 | echo "##vso[task.setvariable variable=build.my_tag]$MY_TAG" 57 | displayName: "Create tag variable" 58 | - bash: | 59 | DATE="$(date +%Y-%m-%d)" 60 | echo "##vso[task.setvariable variable=build.date]$DATE" 61 | displayName: "Create date variable" 62 | 63 | - bash: | 64 | echo "##vso[task.setvariable variable=build.binary_name]rusty_usn.exe" 65 | displayName: "Create rusty_usn variable (Windows_NT)" 66 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 67 | - bash: | 68 | echo "##vso[task.setvariable variable=build.binary_name]rusty_usn" 69 | displayName: "Create rusty_usn variable (NOT Windows_NT)" 70 | condition: ne( variables['Agent.OS'], 'Windows_NT' ) 71 | 72 | - bash: | 73 | echo "##vso[task.setvariable variable=build.binary_name2]listen_usn.exe" 74 | displayName: "Create listen_usn variable (Windows_NT)" 75 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 76 | 77 | - task: CopyFiles@2 78 | displayName: Copy rusty_usn 79 | inputs: 80 | sourceFolder: '$(Build.SourcesDirectory)/target/release' 81 | contents: | 82 | $(build.binary_name) 83 | targetFolder: '$(Build.BinariesDirectory)' 84 | 85 | - task: CopyFiles@2 86 | displayName: Copy listen_usn 87 | inputs: 88 | sourceFolder: '$(Build.SourcesDirectory)/target/release' 89 | contents: | 90 | $(build.binary_name2) 91 | targetFolder: '$(Build.BinariesDirectory)' 92 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 93 | 94 | - task: ArchiveFiles@2 95 | displayName: Gather assets (Non-Windows) 96 | inputs: 97 | rootFolderOrFile: '$(Build.BinariesDirectory)' 98 | archiveType: 'tar' 99 | tarCompression: 'gz' 100 | archiveFile: '$(Build.ArtifactStagingDirectory)/rusty_usn-$(build.my_tag)-$(TARGET).tar.gz' 101 | condition: ne( variables['Agent.OS'], 'Windows_NT' ) 102 | - task: ArchiveFiles@2 103 | displayName: Gather assets (Windows) 104 | inputs: 105 | rootFolderOrFile: '$(Build.BinariesDirectory)' 106 | archiveType: 'zip' 107 | archiveFile: '$(Build.ArtifactStagingDirectory)/rusty_usn-$(build.my_tag)-$(TARGET).zip' 108 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 109 | 110 | - task: GithubRelease@0 111 | displayName: Add GithubRelease (Non-Windows) 112 | condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/'), ne( variables['Agent.OS'], 'Windows_NT' )) 113 | inputs: 114 | gitHubConnection: 'Github' 115 | repositoryName: 'forensicmatt/RustyUsn' 116 | action: 'edit' 117 | target: '$(build.sourceVersion)' 118 | tagSource: 'manual' 119 | tag: '$(build.my_tag)' 120 | assets: '$(Build.ArtifactStagingDirectory)/rusty_usn-$(build.my_tag)-$(TARGET).tar.gz' 121 | title: '$(build.my_tag) - $(build.date)' 122 | assetUploadMode: 'replace' 123 | addChangeLog: false 124 | - task: GithubRelease@0 125 | displayName: Add GithubRelease (Windows) 126 | condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags/'), eq( variables['Agent.OS'], 'Windows_NT' )) 127 | inputs: 128 | gitHubConnection: 'Github' 129 | repositoryName: 'forensicmatt/RustyUsn' 130 | action: 'edit' 131 | target: '$(build.sourceVersion)' 132 | tagSource: 'manual' 133 | tag: '$(build.my_tag)' 134 | assets: '$(Build.ArtifactStagingDirectory)/rusty_usn-$(build.my_tag)-$(TARGET).zip' 135 | title: '$(build.my_tag) - $(build.date)' 136 | assetUploadMode: 'replace' 137 | addChangeLog: false -------------------------------------------------------------------------------- /examples/print_folder_mapping.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate clap; 4 | use log::LevelFilter; 5 | use std::process::exit; 6 | use clap::{App, Arg, ArgMatches}; 7 | use rusty_usn::mapping::FolderMapping; 8 | 9 | static VERSION: &'static str = "0.0.1"; 10 | 11 | 12 | fn make_app<'a, 'b>() -> App<'a, 'b> { 13 | let source_arg = Arg::with_name("source") 14 | .short("s") 15 | .long("source") 16 | .value_name("PATH") 17 | .help("The mft file.") 18 | .takes_value(true); 19 | 20 | let entry_arg = Arg::with_name("entry") 21 | .long("entry") 22 | .value_name("ENTRY") 23 | .help("The entry to lookup.") 24 | .requires("sequence") 25 | .takes_value(true); 26 | 27 | let sequence_arg = Arg::with_name("sequence") 28 | .long("sequence") 29 | .value_name("SEQUENCE") 30 | .help("The sequence to lookup.") 31 | .takes_value(true); 32 | 33 | let verbose = Arg::with_name("debug") 34 | .short("-d") 35 | .long("debug") 36 | .value_name("DEBUG") 37 | .takes_value(true) 38 | .possible_values(&["Off", "Error", "Warn", "Info", "Debug", "Trace"]) 39 | .help("Debug level to use."); 40 | 41 | App::new("print_folder_mapping") 42 | .version(VERSION) 43 | .author("Matthew Seyer ") 44 | .about("Print folder mapping from mft.") 45 | .arg(source_arg) 46 | .arg(entry_arg) 47 | .arg(sequence_arg) 48 | .arg(verbose) 49 | } 50 | 51 | 52 | fn set_debug_level(matches: &ArgMatches){ 53 | // Get the possible logging level supplied by the user 54 | let message_level = match matches.is_present("debug") { 55 | true => { 56 | match matches.value_of("debug") { 57 | Some("Off") => LevelFilter::Off, 58 | Some("Error") => LevelFilter::Error, 59 | Some("Warn") => LevelFilter::Warn, 60 | Some("Info") => LevelFilter::Info, 61 | Some("Debug") => LevelFilter::Debug, 62 | Some("Trace") => LevelFilter::Trace, 63 | Some(unknown) => { 64 | eprintln!("Unknown debug level [{}]", unknown); 65 | exit(-1); 66 | }, 67 | None => { 68 | LevelFilter::Off 69 | } 70 | } 71 | }, 72 | false => LevelFilter::Off 73 | }; 74 | 75 | // Create logging with debug level that prints to stderr 76 | let result = fern::Dispatch::new() 77 | .format(|out, message, record| { 78 | out.finish(format_args!( 79 | "{}[{}][{}] {}", 80 | chrono::Local::now().format("[%Y-%m-%d %H:%M:%S]"), 81 | record.target(), 82 | record.level(), 83 | message 84 | )) 85 | }) 86 | .level(message_level) 87 | .chain(std::io::stderr()) 88 | .apply(); 89 | 90 | // Ensure that logger was dispatched 91 | match result { 92 | Ok(_) => trace!("Logging as been initialized!"), 93 | Err(error) => { 94 | eprintln!("Error initializing fern logging: {}", error); 95 | exit(-1); 96 | } 97 | } 98 | } 99 | 100 | 101 | fn main() { 102 | let app = make_app(); 103 | let options = app.get_matches(); 104 | 105 | set_debug_level(&options); 106 | 107 | let source_mft = match options.is_present("source") { 108 | true => { 109 | match options.value_of("source") { 110 | Some(path_location) => { 111 | path_location 112 | }, 113 | None => { 114 | eprintln!("print_folder_mapping requires a source file."); 115 | exit(-1); 116 | } 117 | } 118 | }, 119 | false => { 120 | eprintln!("print_folder_mapping requires a source file."); 121 | exit(-1); 122 | } 123 | }; 124 | 125 | let mut mapping = match FolderMapping::from_mft_path( 126 | source_mft 127 | ) { 128 | Ok(mapping) => mapping, 129 | Err(error) => { 130 | eprintln!("error creating mapping: {}", error); 131 | exit(-1); 132 | } 133 | }; 134 | 135 | if options.is_present("entry") { 136 | let entry = options.value_of("entry").unwrap().parse::().unwrap(); 137 | let sequence = options.value_of("sequence").unwrap().parse::().unwrap(); 138 | 139 | let full_path = match mapping.enumerate_path(entry, sequence) { 140 | Some(path) => path, 141 | None => { 142 | eprintln!("No mapping found for {}-{}", entry, sequence); 143 | exit(-1); 144 | } 145 | }; 146 | println!("Full path for {}-{}: {}", entry, sequence, full_path); 147 | } else { 148 | println!("{:?}", mapping); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /examples/print_live_folder_mapping.rs: -------------------------------------------------------------------------------- 1 | use rusty_usn::liveusn::live::WindowsLiveNtfs; 2 | use std::process::exit; 3 | use serde_json; 4 | 5 | fn main() { 6 | let live_ntfs = match WindowsLiveNtfs::from_volume_path(r"\\.\C:") { 7 | Ok(ntfs) => ntfs, 8 | Err(error) => { 9 | eprintln!("Error creating WindowsLiveNtfs: {:?}", error); 10 | exit(-1); 11 | } 12 | }; 13 | 14 | eprintln!("creating live folder mapping..."); 15 | let folder_mapping = live_ntfs.get_folder_mapping(); 16 | let json_str = serde_json::to_string( 17 | &folder_mapping 18 | ).unwrap(); 19 | 20 | println!("{}", json_str); 21 | } -------------------------------------------------------------------------------- /examples/test_iter_records.rs: -------------------------------------------------------------------------------- 1 | use rusty_usn::usn; 2 | 3 | fn main() { 4 | let raw_buffer: &[u8] = &[ 5 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 6 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00, 7 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 8 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x20,0x0A,0x80,0xBC,0x04,0x00,0x00,0x00, 9 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x80,0x00,0x00,0x00,0x00, 10 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 11 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 12 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00, 13 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 14 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x80,0x0A,0x80,0xBC,0x04,0x00,0x00,0x00, 15 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 16 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 17 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 18 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00, 19 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 20 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0xE0,0x0A,0x80,0xBC,0x04,0x00,0x00,0x00, 21 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x80,0x00,0x00,0x00,0x00, 22 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 23 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 24 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00, 25 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 26 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x40,0x0B,0x80,0xBC,0x04,0x00,0x00,0x00, 27 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 28 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 29 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 30 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00, 31 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 32 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0xA0,0x0B,0x80,0xBC,0x04,0x00,0x00,0x00, 33 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x80,0x00,0x00,0x00,0x00, 34 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 35 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 36 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00, 37 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 38 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x00,0x0C,0x80,0xBC,0x04,0x00,0x00,0x00, 39 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 40 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00 41 | ]; 42 | 43 | let iter = usn::IterRecords::new( 44 | String::from("Test source"), 45 | raw_buffer.iter().cloned().collect(), 46 | 0, 47 | raw_buffer.len() 48 | ); 49 | 50 | for this in iter { 51 | println!("{:?}", this); 52 | } 53 | } -------------------------------------------------------------------------------- /src/bin/listen_usn.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate clap; 4 | extern crate chrono; 5 | use std::thread; 6 | use std::sync::mpsc; 7 | use log::LevelFilter; 8 | use std::process::exit; 9 | use serde_json::value::Value; 10 | use clap::{App, Arg, ArgMatches}; 11 | use std::sync::mpsc::{Sender, Receiver}; 12 | use rusty_usn::liveusn::listener::UsnVolumeListener; 13 | 14 | static VERSION: &'static str = "1.1.0"; 15 | 16 | 17 | fn make_app<'a, 'b>() -> App<'a, 'b> { 18 | let source_arg = Arg::with_name("source") 19 | .short("s") 20 | .long("source") 21 | .value_name("PATH") 22 | .help("The source volume to listen to. (example: '\\\\.\\C:')") 23 | .takes_value(true); 24 | 25 | let historical_arg = Arg::with_name("historical") 26 | .short("p") 27 | .long("historical") 28 | .help("List historical records along with listening to new changes."); 29 | 30 | let verbose = Arg::with_name("debug") 31 | .short("-d") 32 | .long("debug") 33 | .value_name("DEBUG") 34 | .takes_value(true) 35 | .possible_values(&["Off", "Error", "Warn", "Info", "Debug", "Trace"]) 36 | .help("Debug level to use."); 37 | 38 | App::new("listen_usn") 39 | .version(VERSION) 40 | .author("Matthew Seyer ") 41 | .about("USN listener written in Rust. Output is JSONL.") 42 | .arg(source_arg) 43 | .arg(historical_arg) 44 | .arg(verbose) 45 | } 46 | 47 | 48 | fn set_debug_level(matches: &ArgMatches){ 49 | // Get the possible logging level supplied by the user 50 | let message_level = match matches.is_present("debug") { 51 | true => { 52 | match matches.value_of("debug") { 53 | Some("Off") => LevelFilter::Off, 54 | Some("Error") => LevelFilter::Error, 55 | Some("Warn") => LevelFilter::Warn, 56 | Some("Info") => LevelFilter::Info, 57 | Some("Debug") => LevelFilter::Debug, 58 | Some("Trace") => LevelFilter::Trace, 59 | Some(unknown) => { 60 | eprintln!("Unknown debug level [{}]", unknown); 61 | exit(-1); 62 | }, 63 | None => { 64 | LevelFilter::Off 65 | } 66 | } 67 | }, 68 | false => LevelFilter::Off 69 | }; 70 | 71 | // Create logging with debug level that prints to stderr 72 | let result = fern::Dispatch::new() 73 | .format(|out, message, record| { 74 | out.finish(format_args!( 75 | "{}[{}][{}] {}", 76 | chrono::Local::now().format("[%Y-%m-%d %H:%M:%S]"), 77 | record.target(), 78 | record.level(), 79 | message 80 | )) 81 | }) 82 | .level(message_level) 83 | .chain(std::io::stderr()) 84 | .apply(); 85 | 86 | // Ensure that logger was dispatched 87 | match result { 88 | Ok(_) => trace!("Logging as been initialized!"), 89 | Err(error) => { 90 | eprintln!("Error initializing fern logging: {}", error); 91 | exit(-1); 92 | } 93 | } 94 | } 95 | 96 | 97 | fn process_volume(volume_str: &str, options: &ArgMatches) { 98 | info!("listening on {}", volume_str); 99 | let historical_flag = options.is_present("historical"); 100 | 101 | let (tx, rx): (Sender, Receiver) = mpsc::channel(); 102 | 103 | let volume_listener = UsnVolumeListener::new( 104 | volume_str.to_string(), 105 | historical_flag, 106 | tx.clone() 107 | ); 108 | 109 | let _thread = thread::spawn(move || { 110 | volume_listener.listen_to_volume() 111 | }); 112 | 113 | loop{ 114 | match rx.recv() { 115 | Ok(entry) => { 116 | let json_str = serde_json::to_string( 117 | &entry 118 | ).unwrap(); 119 | println!("{}", json_str); 120 | }, 121 | Err(_) => panic!("Worker threads disconnected before the solution was found!"), 122 | } 123 | } 124 | } 125 | 126 | 127 | fn main() { 128 | let app = make_app(); 129 | let options = app.get_matches(); 130 | 131 | set_debug_level(&options); 132 | 133 | let source_volume = match options.is_present("source") { 134 | true => { 135 | match options.value_of("source") { 136 | Some(path_location) => { 137 | path_location 138 | }, 139 | None => { 140 | eprintln!("listen_usn requires a source volume."); 141 | exit(-1); 142 | } 143 | } 144 | }, 145 | false => { 146 | eprintln!("listen_usn requires a source volume."); 147 | exit(-1); 148 | } 149 | }; 150 | 151 | process_volume(source_volume, &options); 152 | } 153 | -------------------------------------------------------------------------------- /src/bin/rusty_usn.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate clap; 4 | extern crate chrono; 5 | use std::fs; 6 | use std::path::Path; 7 | use log::LevelFilter; 8 | use std::process::exit; 9 | use serde_json::value::Value; 10 | use clap::{App, Arg, ArgMatches}; 11 | use rusty_usn::mapping::FolderMapping; 12 | use rusty_usn::usn::{UsnParserSettings, UsnParser}; 13 | use rusty_usn::record::UsnEntry; 14 | use rusty_usn::flags; 15 | 16 | static VERSION: &'static str = "1.2.0"; 17 | 18 | 19 | fn is_a_non_negative_number(value: String) -> Result<(), String> { 20 | match value.parse::() { 21 | Ok(_) => Ok(()), 22 | Err(_) => Err("Expected value to be a positive number.".to_owned()), 23 | } 24 | } 25 | 26 | 27 | fn make_app<'a, 'b>() -> App<'a, 'b> { 28 | let source_arg = Arg::with_name("source") 29 | .short("s") 30 | .long("source") 31 | .value_name("PATH") 32 | .help("The source to parse. If the source is a directory, the directoy will \ 33 | be recursed looking for any files that end with '$J'. (Do not use a directory \ 34 | if using an MFT file.)") 35 | .takes_value(true); 36 | 37 | let usn_arg = Arg::with_name("mft") 38 | .short("m") 39 | .long("mft") 40 | .value_name("MFT") 41 | .help("The MFT to use for creating folder mapping.") 42 | .takes_value(true); 43 | 44 | let thread_count = Arg::with_name("threads") 45 | .short("-t") 46 | .long("--threads") 47 | .default_value("0") 48 | .validator(is_a_non_negative_number) 49 | .help("Sets the number of worker threads, defaults to number of CPU cores. \ 50 | If the --mft option is used, the tool can only run single threaded."); 51 | 52 | let verbose = Arg::with_name("debug") 53 | .short("-d") 54 | .long("debug") 55 | .value_name("DEBUG") 56 | .takes_value(true) 57 | .possible_values(&["Off", "Error", "Warn", "Info", "Debug", "Trace"]) 58 | .help("Debug level to use."); 59 | 60 | App::new("rusty_usn") 61 | .version(VERSION) 62 | .author("Matthew Seyer ") 63 | .about("USN Parser written in Rust. Output is JSONL.") 64 | .arg(source_arg) 65 | .arg(usn_arg) 66 | .arg(thread_count) 67 | .arg(verbose) 68 | } 69 | 70 | 71 | fn set_debug_level(matches: &ArgMatches){ 72 | // Get the possible logging level supplied by the user 73 | let message_level = match matches.is_present("debug") { 74 | true => { 75 | match matches.value_of("debug") { 76 | Some("Off") => LevelFilter::Off, 77 | Some("Error") => LevelFilter::Error, 78 | Some("Warn") => LevelFilter::Warn, 79 | Some("Info") => LevelFilter::Info, 80 | Some("Debug") => LevelFilter::Debug, 81 | Some("Trace") => LevelFilter::Trace, 82 | Some(unknown) => { 83 | eprintln!("Unknown debug level [{}]", unknown); 84 | exit(-1); 85 | }, 86 | None => { 87 | LevelFilter::Off 88 | } 89 | } 90 | }, 91 | false => LevelFilter::Off 92 | }; 93 | 94 | // Create logging with debug level that prints to stderr 95 | let result = fern::Dispatch::new() 96 | .format(|out, message, record| { 97 | out.finish(format_args!( 98 | "{}[{}][{}] {}", 99 | chrono::Local::now().format("[%Y-%m-%d %H:%M:%S]"), 100 | record.target(), 101 | record.level(), 102 | message 103 | )) 104 | }) 105 | .level(message_level) 106 | .chain(std::io::stderr()) 107 | .apply(); 108 | 109 | // Ensure that logger was dispatched 110 | match result { 111 | Ok(_) => trace!("Logging as been initialized!"), 112 | Err(error) => { 113 | eprintln!("Error initializing fern logging: {}", error); 114 | exit(-1); 115 | } 116 | } 117 | } 118 | 119 | 120 | fn is_directory(source: &str)->bool{ 121 | // Check if a source is a directory 122 | let metadata = match fs::metadata(source) { 123 | Ok(meta) => meta, 124 | Err(error) => { 125 | eprintln!("{} does not exists. {}", source, error); 126 | exit(-1); 127 | } 128 | }; 129 | 130 | let file_type = metadata.file_type(); 131 | file_type.is_dir() 132 | } 133 | 134 | 135 | fn process_directory(directory: &str, options: &ArgMatches) { 136 | for dir_reader in fs::read_dir(directory) { 137 | for entry_result in dir_reader { 138 | match entry_result { 139 | Ok(entry) => { 140 | let path = entry.path(); 141 | if path.is_file() { 142 | let path_string = path.into_os_string().into_string().unwrap(); 143 | if path_string.to_lowercase().ends_with("$j"){ 144 | process_file( 145 | &path_string, &options 146 | ); 147 | } 148 | } else if path.is_dir(){ 149 | let path_string = path.into_os_string().into_string().unwrap(); 150 | process_directory( 151 | &path_string, &options 152 | ); 153 | } 154 | }, 155 | Err(error) => { 156 | eprintln!("Error reading {} [{:?}]", directory, error); 157 | } 158 | } 159 | } 160 | } 161 | } 162 | 163 | 164 | fn process_file(file_location: &str, options: &ArgMatches) { 165 | info!("processing {}", file_location); 166 | 167 | let thread_option = options 168 | .value_of("threads") 169 | .and_then(|value| Some(value.parse::().expect("used validator"))); 170 | 171 | let mut threads = match (cfg!(feature = "multithreading"), thread_option) { 172 | (true, Some(number)) => number, 173 | (true, None) => 0, 174 | (false, _) => { 175 | eprintln!("turned on threads, but library was compiled without `multithreading` feature!"); 176 | 1 177 | } 178 | }; 179 | 180 | let mut folder_mapping: Option = None; 181 | 182 | if options.is_present("mft") { 183 | if threads != 1 { 184 | threads = 1; 185 | eprintln!("When using MFT to create folder map, threads can only be 1."); 186 | } 187 | 188 | let mft_path = options.value_of("mft").unwrap(); 189 | folder_mapping = match FolderMapping::from_mft_path(mft_path){ 190 | Ok(mapping) => Some(mapping), 191 | Err(err) => { 192 | eprintln!("Error creating folder mapping. {}", err); 193 | exit(-1); 194 | } 195 | }; 196 | } 197 | 198 | let config = UsnParserSettings::new().thread_count(threads); 199 | 200 | let mut parser = match UsnParser::from_path(file_location) { 201 | Ok(parser) => parser.with_configuration(config), 202 | Err(error) => { 203 | eprintln!("Error creating parser for {}: {}", file_location, error); 204 | return; 205 | } 206 | }; 207 | 208 | if folder_mapping.is_some(){ 209 | // Because we are going to enumerate folder names, we must 210 | // iterate records from the newest to oldest inorder to correctly 211 | // enumerate the paths. This means we must store all the records 212 | // because they are parsed from oldest to newest. Unfortunately, 213 | // this does take up more memory. 214 | let mut mapping = folder_mapping.unwrap(); 215 | let mut entry_list: Vec:: = Vec::new(); 216 | for record in parser.records(){ 217 | entry_list.push(record); 218 | } 219 | entry_list.reverse(); 220 | 221 | for entry in entry_list { 222 | let mut entry_json_value = entry.to_json_value().unwrap(); 223 | let json_map = entry_json_value.as_object_mut().unwrap(); 224 | 225 | let record = entry.record; 226 | 227 | let reason = record.get_reason_code(); 228 | let file_attributes = record.get_file_attributes(); 229 | let file_reference = record.get_file_reference(); 230 | let parent_reference = record.get_parent_reference(); 231 | let file_name = record.get_file_name(); 232 | 233 | if file_attributes.contains(flags::FileAttributes::FILE_ATTRIBUTE_DIRECTORY){ 234 | // Add mapping on a delete or rename old 235 | if reason.contains(flags::Reason::USN_REASON_FILE_DELETE) || 236 | reason.contains(flags::Reason::USN_REASON_RENAME_OLD_NAME) { 237 | mapping.add_mapping( 238 | file_reference, 239 | file_name.clone(), 240 | parent_reference 241 | ); 242 | } 243 | } 244 | 245 | // Enumerate the path of this record from the FolderMapping 246 | let full_path = match mapping.enumerate_path( 247 | parent_reference.entry, 248 | parent_reference.sequence 249 | ){ 250 | Some(path) => path, 251 | None => "[Unknown]".to_string() 252 | }; 253 | 254 | // Create teh fullname string 255 | let full_name = format!("{}/{}", full_path, file_name); 256 | 257 | // Add the fullname string to the json record 258 | let fn_value = Value::String(full_name); 259 | json_map.insert("full_name".to_string(), fn_value); 260 | 261 | // Create a json string to print 262 | let json_str = serde_json::to_string(&json_map).unwrap(); 263 | println!("{}", json_str); 264 | } 265 | } else{ 266 | for record in parser.records(){ 267 | let json_str = serde_json::to_string( 268 | &record.to_json_value().unwrap() 269 | ).unwrap(); 270 | 271 | println!("{}", json_str); 272 | } 273 | } 274 | } 275 | 276 | 277 | fn main() { 278 | let app = make_app(); 279 | let options = app.get_matches(); 280 | 281 | set_debug_level(&options); 282 | 283 | let source_location = match options.is_present("source") { 284 | true => { 285 | match options.value_of("source") { 286 | Some(path_location) => { 287 | // Verify that the supplied path exists 288 | if !Path::new(path_location).exists() { 289 | eprintln!("{} does not exist.", path_location); 290 | exit(-1); 291 | } 292 | 293 | path_location 294 | }, 295 | None => { 296 | eprintln!("usn_dump requires a source to parse."); 297 | exit(-1); 298 | } 299 | } 300 | }, 301 | false => { 302 | eprintln!("usn_dump requires a source to parse."); 303 | exit(-1); 304 | } 305 | }; 306 | 307 | if is_directory(source_location) { 308 | process_directory(source_location, &options); 309 | } else { 310 | process_file(source_location, &options); 311 | } 312 | } 313 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::fmt::Display; 3 | use std::io; 4 | use serde_json::error::Error as SjError; 5 | use winstructs::err::Error as WinstructError; 6 | 7 | #[derive(Debug)] 8 | pub enum ErrorKind { 9 | InvalidUsnRecord, 10 | InvalidUsnV2Record, 11 | InvalidUsnV3Record, 12 | UnsupportedVersion, 13 | WinstructError, 14 | Utf16DecodeError, 15 | IoError, 16 | SerdeJsonError, 17 | ValueError, 18 | } 19 | 20 | /// USN Record Parsing Error 21 | #[derive(Debug)] 22 | pub struct UsnError { 23 | pub message: String, 24 | pub kind: ErrorKind, 25 | } 26 | 27 | impl UsnError{ 28 | #[allow(dead_code)] 29 | pub fn json_value_error(msg: String) -> Self { 30 | UsnError { 31 | message: msg, 32 | kind: ErrorKind::ValueError, 33 | } 34 | } 35 | 36 | #[allow(dead_code)] 37 | pub fn utf16_decode_error(msg: String) -> Self { 38 | UsnError { 39 | message: msg, 40 | kind: ErrorKind::Utf16DecodeError, 41 | } 42 | } 43 | 44 | #[allow(dead_code)] 45 | pub fn unsupported_usn_version(msg: String) -> Self { 46 | UsnError { 47 | message: msg, 48 | kind: ErrorKind::UnsupportedVersion, 49 | } 50 | } 51 | 52 | #[allow(dead_code)] 53 | pub fn invalid_record(msg: String) -> Self { 54 | UsnError { 55 | message: msg, 56 | kind: ErrorKind::InvalidUsnRecord, 57 | } 58 | } 59 | 60 | #[allow(dead_code)] 61 | pub fn invalid_v2_record(msg: String) -> Self { 62 | UsnError { 63 | message: msg, 64 | kind: ErrorKind::InvalidUsnV3Record, 65 | } 66 | } 67 | 68 | #[allow(dead_code)] 69 | pub fn invalid_usn_record_length(msg: String) -> Self { 70 | UsnError { 71 | message: msg, 72 | kind: ErrorKind::InvalidUsnRecord, 73 | } 74 | } 75 | } 76 | 77 | impl From for UsnError { 78 | fn from(err: io::Error) -> Self { 79 | UsnError { 80 | message: format!("{}", err), 81 | kind: ErrorKind::IoError, 82 | } 83 | } 84 | } 85 | 86 | impl From for UsnError { 87 | fn from(err: SjError) -> Self { 88 | UsnError { 89 | message: format!("{}", err), 90 | kind: ErrorKind::SerdeJsonError, 91 | } 92 | } 93 | } 94 | 95 | impl From for UsnError { 96 | fn from(err: WinstructError) -> Self { 97 | UsnError { 98 | message: format!("{}", err), 99 | kind: ErrorKind::IoError, 100 | } 101 | } 102 | } 103 | 104 | impl Display for UsnError { 105 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 106 | writeln!(f, "{}", self.message) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/flags.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use serde::ser; 3 | 4 | 5 | bitflags! { 6 | pub struct FileAttributes: u32 { 7 | const FILE_ATTRIBUTE_ARCHIVE = 0x0000_0020; 8 | const FILE_ATTRIBUTE_COMPRESSED = 0x0000_0800; 9 | const FILE_ATTRIBUTE_DEVICE = 0x0000_0040; 10 | const FILE_ATTRIBUTE_DIRECTORY = 0x0000_0010; 11 | const FILE_ATTRIBUTE_ENCRYPTED = 0x0000_4000; 12 | const FILE_ATTRIBUTE_HIDDEN = 0x0000_0002; 13 | const FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x0000_8000; 14 | const FILE_ATTRIBUTE_NORMAL = 0x0000_0080; 15 | const FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x0000_2000; 16 | const FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x0002_0000; 17 | const FILE_ATTRIBUTE_OFFLINE = 0x0000_1000; 18 | const FILE_ATTRIBUTE_READONLY = 0x0000_0001; 19 | const FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x0040_0000; 20 | const FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x0004_0000; 21 | const FILE_ATTRIBUTE_REPARSE_POINT = 0x0000_0400; 22 | const FILE_ATTRIBUTE_SPARSE_FILE = 0x0000_0200; 23 | const FILE_ATTRIBUTE_SYSTEM = 0x0000_0004; 24 | const FILE_ATTRIBUTE_TEMPORARY = 0x0000_0100; 25 | const FILE_ATTRIBUTE_VIRTUAL = 0x0000_1000; 26 | } 27 | } 28 | bitflags! { 29 | pub struct Reason: u32 { 30 | const USN_REASON_BASIC_INFO_CHANGE = 0x0000_8000; 31 | const USN_REASON_CLOSE = 0x8000_0000; 32 | const USN_REASON_COMPRESSION_CHANGE = 0x0002_0000; 33 | const USN_REASON_DATA_EXTEND = 0x0000_0002; 34 | const USN_REASON_DATA_OVERWRITE = 0x0000_0001; 35 | const USN_REASON_DATA_TRUNCATION = 0x0000_0004; 36 | const USN_REASON_EA_CHANGE = 0x0000_0400; 37 | const USN_REASON_ENCRYPTION_CHANGE = 0x0004_0000; 38 | const USN_REASON_FILE_CREATE = 0x0000_0100; 39 | const USN_REASON_FILE_DELETE = 0x0000_0200; 40 | const USN_REASON_HARD_LINK_CHANGE = 0x0001_0000; 41 | const USN_REASON_INDEXABLE_CHANGE = 0x0000_4000; 42 | const USN_REASON_INTEGRITY_CHANGE = 0x0080_0000; 43 | const USN_REASON_NAMED_DATA_EXTEND = 0x0000_0020; 44 | const USN_REASON_NAMED_DATA_OVERWRITE = 0x0000_0010; 45 | const USN_REASON_NAMED_DATA_TRUNCATION = 0x0000_0040; 46 | const USN_REASON_OBJECT_ID_CHANGE = 0x0008_0000; 47 | const USN_REASON_RENAME_NEW_NAME = 0x0000_2000; 48 | const USN_REASON_RENAME_OLD_NAME = 0x0000_1000; 49 | const USN_REASON_REPARSE_POINT_CHANGE = 0x0010_0000; 50 | const USN_REASON_SECURITY_CHANGE = 0x0000_0800; 51 | const USN_REASON_STREAM_CHANGE = 0x0020_0000; 52 | const USN_REASON_TRANSACTED_CHANGE = 0x0040_0000; 53 | } 54 | } 55 | bitflags! { 56 | pub struct SourceInfo: u32 { 57 | const USN_SOURCE_AUXILIARY_DATA = 0x0000_0002; 58 | const USN_SOURCE_DATA_MANAGEMENT = 0x0000_0001; 59 | const USN_SOURCE_REPLICATION_MANAGEMENT = 0x0000_0004; 60 | const USN_SOURCE_CLIENT_REPLICATION_MANAGEMENT = 0x0000_0008; 61 | } 62 | } 63 | 64 | impl fmt::Display for FileAttributes { 65 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 66 | write!(f,"{}",self.bits()) 67 | } 68 | } 69 | 70 | impl ser::Serialize for FileAttributes { 71 | fn serialize(&self, serializer: S) -> Result 72 | where S: ser::Serializer 73 | { 74 | serializer.serialize_str(&format!("{:?}", self)) 75 | } 76 | } 77 | 78 | impl fmt::Display for Reason { 79 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 80 | write!(f,"{}",self.bits()) 81 | } 82 | } 83 | 84 | impl ser::Serialize for Reason { 85 | fn serialize(&self, serializer: S) -> Result 86 | where S: ser::Serializer 87 | { 88 | serializer.serialize_str(&format!("{:?}", self)) 89 | } 90 | } 91 | 92 | impl fmt::Display for SourceInfo { 93 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 94 | write!(f,"{}",self.bits()) 95 | } 96 | } 97 | 98 | impl ser::Serialize for SourceInfo { 99 | fn serialize(&self, serializer: S) -> Result 100 | where S: ser::Serializer 101 | { 102 | serializer.serialize_str(&format!("{:?}", self)) 103 | } 104 | } -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] extern crate log; 2 | #[macro_use] extern crate bitflags; 3 | #[macro_use] extern crate lazy_static; 4 | 5 | // Our modules 6 | pub mod usn; 7 | pub mod record; 8 | pub mod error; 9 | pub mod utils; 10 | pub mod flags; 11 | pub mod liveusn; 12 | pub mod mapping; 13 | 14 | 15 | use std::io; 16 | use std::io::{Read, Seek, SeekFrom}; 17 | 18 | pub trait ReadSeek: Read + Seek { 19 | fn tell(&mut self) -> io::Result { 20 | self.seek(SeekFrom::Current(0)) 21 | } 22 | } 23 | 24 | impl ReadSeek for T {} 25 | -------------------------------------------------------------------------------- /src/liveusn/error.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::ptr; 3 | use mft::err::Error as MftError; 4 | use winapi::shared::ntdef::WCHAR; 5 | use winapi::um::winbase::{ 6 | FormatMessageW, 7 | FORMAT_MESSAGE_FROM_SYSTEM, 8 | FORMAT_MESSAGE_IGNORE_INSERTS, 9 | }; 10 | use winapi::um::errhandlingapi::GetLastError; 11 | 12 | 13 | #[derive(Debug)] 14 | pub enum ErrorKind { 15 | IoError, 16 | MftError, 17 | InvalidUsnJournalData, 18 | MftAttributeError, 19 | WindowsError 20 | } 21 | 22 | #[derive(Debug)] 23 | pub struct UsnLiveError { 24 | pub message: String, 25 | pub kind: ErrorKind, 26 | } 27 | 28 | impl UsnLiveError { 29 | #[allow(dead_code)] 30 | pub fn unable_to_get_name_attr(message: &str) -> Self{ 31 | UsnLiveError { 32 | message: message.to_owned(), 33 | kind: ErrorKind::MftAttributeError 34 | } 35 | } 36 | 37 | #[allow(dead_code)] 38 | pub fn from_windows_error_code(err_code: u32) -> Self{ 39 | let err_str = format_win_error( 40 | Some(err_code) 41 | ); 42 | 43 | UsnLiveError { 44 | message: err_str, 45 | kind: ErrorKind::WindowsError 46 | } 47 | } 48 | 49 | #[allow(dead_code)] 50 | pub fn from_windows_last_error() -> Self{ 51 | let err_str = format_win_error(None); 52 | UsnLiveError { 53 | message: err_str, 54 | kind: ErrorKind::WindowsError 55 | } 56 | } 57 | 58 | #[allow(dead_code)] 59 | pub fn invalid_usn_journal_data(size: usize)->Self{ 60 | let err_str = format!("Unknown size for UsnJournalData structure: {}", size); 61 | 62 | UsnLiveError { 63 | message: err_str, 64 | kind: ErrorKind::InvalidUsnJournalData 65 | } 66 | } 67 | 68 | #[allow(dead_code)] 69 | pub fn invalid_thing(message: &str)->Self{ 70 | UsnLiveError { 71 | message: message.to_owned(), 72 | kind: ErrorKind::WindowsError 73 | } 74 | } 75 | } 76 | 77 | impl From for UsnLiveError { 78 | fn from(err: MftError) -> Self { 79 | UsnLiveError { 80 | message: format!("{}", err), 81 | kind: ErrorKind::MftError 82 | } 83 | } 84 | } 85 | 86 | impl From for UsnLiveError { 87 | fn from(err: io::Error) -> Self { 88 | UsnLiveError { 89 | message: format!("{}", err), 90 | kind: ErrorKind::IoError, 91 | } 92 | } 93 | } 94 | 95 | 96 | pub fn format_win_error(error_code: Option) -> String { 97 | let mut message_buffer = [0 as WCHAR; 2048]; 98 | let error_num: u32 = match error_code { 99 | Some(code) => code, 100 | None => unsafe { GetLastError() } 101 | }; 102 | 103 | let message_size = unsafe { 104 | FormatMessageW( 105 | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 106 | ptr::null_mut(), 107 | error_num, 108 | 0, 109 | message_buffer.as_mut_ptr(), 110 | message_buffer.len() as u32, 111 | ptr::null_mut(), 112 | ) 113 | }; 114 | 115 | if message_size == 0 { 116 | return format_win_error(None); 117 | } else { 118 | let err_msg = String::from_utf16( 119 | &message_buffer[..message_size as usize] 120 | ).unwrap(); 121 | return err_msg; 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/liveusn/listener.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::fs::File; 3 | use std::process::exit; 4 | use std::time::Duration; 5 | use std::sync::mpsc::Sender; 6 | use serde_json::value::Value; 7 | use byteorder::{ByteOrder, LittleEndian}; 8 | use crate::flags; 9 | use crate::record::EntryMeta; 10 | use crate::liveusn::winfuncs::{ 11 | query_usn_journal, 12 | read_usn_journal, 13 | }; 14 | use crate::usn::IterRecordsByIndex; 15 | use crate::liveusn::error::UsnLiveError; 16 | use crate::liveusn::live::WindowsLiveNtfs; 17 | use crate::liveusn::ntfs::ReadUsnJournalData; 18 | 19 | 20 | pub struct UsnVolumeListener { 21 | source: String, 22 | sleep_ms: u64, 23 | historical_flag: bool, 24 | sender: Sender 25 | } 26 | 27 | impl UsnVolumeListener { 28 | pub fn new(source: String, historical_flag: bool, sender: Sender) -> Self { 29 | let sleep_ms = 100; 30 | 31 | UsnVolumeListener { 32 | source, 33 | sleep_ms, 34 | historical_flag, 35 | sender 36 | } 37 | } 38 | 39 | pub fn listen_to_volume(self) -> Result<(), UsnLiveError> { 40 | let live_volume = WindowsLiveNtfs::from_volume_path( 41 | &self.source 42 | )?; 43 | 44 | let mut mapping = live_volume.get_folder_mapping(); 45 | 46 | let file_handle = match File::open(self.source.clone()) { 47 | Ok(handle) => handle, 48 | Err(error) => { 49 | eprintln!("{}", error); 50 | exit(-1); 51 | } 52 | }; 53 | 54 | let usn_journal_data = match query_usn_journal(&file_handle) { 55 | Ok(journal_info) => { 56 | debug!("{:#?}", journal_info); 57 | journal_info 58 | }, 59 | Err(error) => { 60 | eprintln!("{:?}", error); 61 | exit(-1); 62 | } 63 | }; 64 | 65 | let mut next_start_usn: u64 = usn_journal_data.get_next_usn(); 66 | let catch_up_usn = next_start_usn; 67 | 68 | if self.historical_flag { 69 | next_start_usn = 0; 70 | } 71 | 72 | loop { 73 | let mut buffer = vec![0u8; 4096]; 74 | 75 | let read_data = ReadUsnJournalData::from_usn_journal_data( 76 | usn_journal_data.clone() 77 | ).with_start_usn(next_start_usn); 78 | 79 | let count: u64 = match read_usn_journal(&file_handle, read_data, &mut buffer) { 80 | Ok(buffer) => { 81 | // The first 8 bytes are the usn of the next record NOT in the buffer, 82 | // use this value as the next_start_usn 83 | next_start_usn = LittleEndian::read_u64( 84 | &buffer[0..8] 85 | ); 86 | 87 | let entry_meta = EntryMeta::new( 88 | &self.source, 0 89 | ); 90 | 91 | let record_iterator = IterRecordsByIndex::new( 92 | entry_meta, 93 | buffer[8..].to_vec() 94 | ); 95 | 96 | let mut record_count: u64 = 0; 97 | for usn_entry in record_iterator { 98 | let entry_usn = usn_entry.record.get_usn(); 99 | let file_name = usn_entry.record.get_file_name(); 100 | let file_ref = usn_entry.record.get_file_reference(); 101 | let reason_code = usn_entry.record.get_reason_code(); 102 | let parent_ref = usn_entry.record.get_parent_reference(); 103 | let file_attributes = usn_entry.record.get_file_attributes(); 104 | 105 | if file_attributes.contains(flags::FileAttributes::FILE_ATTRIBUTE_DIRECTORY){ 106 | if reason_code.contains(flags::Reason::USN_REASON_RENAME_OLD_NAME) { 107 | // We can remove old names from the mapping because we no longer need these. 108 | // On new names, we add the name to the mapping. 109 | mapping.remove_mapping( 110 | file_ref 111 | ); 112 | } 113 | else if reason_code.contains(flags::Reason::USN_REASON_FILE_DELETE) { 114 | // If we are starting from historical entries, we need to add deleted 115 | // entries to the map until we catch up to the current system, then we can 116 | // start removing deleted entries. This is because our mapping cannot 117 | // get unallocated entries from the MFT via the Windows API. 118 | if self.historical_flag && entry_usn < catch_up_usn { 119 | mapping.add_mapping( 120 | file_ref, 121 | file_name.clone(), 122 | parent_ref 123 | ) 124 | } else { 125 | mapping.remove_mapping( 126 | file_ref 127 | ); 128 | } 129 | } else if reason_code.contains(flags::Reason::USN_REASON_RENAME_NEW_NAME) || 130 | reason_code.contains(flags::Reason::USN_REASON_FILE_CREATE) { 131 | // If its a new name or creation, we need to updated the mapping 132 | mapping.add_mapping( 133 | file_ref, 134 | file_name.clone(), 135 | parent_ref 136 | ) 137 | } 138 | } 139 | 140 | // Enumerate the path of this record from the FolderMapping 141 | let full_path = match mapping.enumerate_path( 142 | parent_ref.entry, 143 | parent_ref.sequence 144 | ){ 145 | Some(path) => path, 146 | None => "[]".to_string() 147 | }; 148 | 149 | let mut entry_value = match usn_entry.to_json_value(){ 150 | Ok(value) => value, 151 | Err(e) => { 152 | eprintln!("Error serializing entry to json value {:?}: {:?}", usn_entry, e); 153 | continue; 154 | } 155 | }; 156 | 157 | let full_file_name = format!("{}/{}", &full_path, &file_name); 158 | 159 | let map = entry_value.as_object_mut().unwrap(); 160 | map.insert( 161 | "full_path".to_string(), 162 | Value::String(full_file_name) 163 | ); 164 | 165 | match self.sender.send(entry_value) { 166 | Ok(_) => { 167 | record_count += 1; 168 | }, 169 | Err(error) => { 170 | eprintln!("error sending usn entry: {:?}", error); 171 | } 172 | } 173 | } 174 | 175 | record_count 176 | }, 177 | Err(error) => { 178 | println!("{:#?}", error); 179 | break 180 | } 181 | }; 182 | 183 | // need to sleep to minimize resources 184 | if count == 0 { 185 | thread::sleep( 186 | Duration::from_millis( 187 | self.sleep_ms 188 | ) 189 | ); 190 | } 191 | } 192 | 193 | Ok(()) 194 | } 195 | } -------------------------------------------------------------------------------- /src/liveusn/live.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::Read; 3 | use mft::MftEntry; 4 | use byteorder::{ReadBytesExt, LittleEndian}; 5 | use crate::mapping::FolderMapping; 6 | use crate::liveusn::winfuncs; 7 | use crate::liveusn::error::UsnLiveError; 8 | use crate::liveusn::ntfs::NtfsVolumeData; 9 | use winstructs::ntfs::mft_reference::MftReference; 10 | 11 | 12 | #[derive(Debug)] 13 | pub struct MftOutputBuffer { 14 | file_reference_number: u64, 15 | file_record_length: u32, 16 | file_record_buffer: Vec 17 | } 18 | 19 | impl MftOutputBuffer { 20 | pub fn from_buffer(mut raw_buffer: T) -> Result { 21 | let file_reference_number = raw_buffer.read_u64::()?; 22 | let file_record_length = raw_buffer.read_u32::()?; 23 | let mut file_record_buffer = vec![0; file_record_length as usize]; 24 | 25 | raw_buffer.read_exact(&mut file_record_buffer)?; 26 | 27 | Ok( 28 | MftOutputBuffer { 29 | file_reference_number, 30 | file_record_length, 31 | file_record_buffer 32 | } 33 | ) 34 | } 35 | 36 | pub fn buffer_as_hex(&self) -> String { 37 | hex::encode(&self.file_record_buffer) 38 | } 39 | 40 | pub fn as_entry(&self) -> Result { 41 | Ok(MftEntry::from_buffer_skip_fixup( 42 | self.file_record_buffer.clone(), 43 | self.file_reference_number 44 | )?) 45 | } 46 | } 47 | 48 | 49 | /// Struct for interacting with a live NTFS volume via Windows API 50 | /// 51 | #[derive(Debug)] 52 | pub struct WindowsLiveNtfs { 53 | volume_path: String, 54 | volume_handle: File, 55 | ntfs_volume_data: NtfsVolumeData 56 | } 57 | impl WindowsLiveNtfs { 58 | pub fn from_volume_path(volume_path: &str) -> Result { 59 | let file_handle = File::open(&volume_path)?; 60 | let ntfs_volume_data = winfuncs::get_ntfs_volume_data( 61 | &file_handle 62 | )?; 63 | 64 | Ok( 65 | WindowsLiveNtfs { 66 | volume_path: volume_path.to_string(), 67 | volume_handle: file_handle, 68 | ntfs_volume_data: ntfs_volume_data 69 | } 70 | ) 71 | } 72 | 73 | pub fn get_folder_mapping(self) -> FolderMapping { 74 | // Create the folder mapping 75 | let mut folder_mapping = FolderMapping::new(); 76 | 77 | // Iterate over live MFT entries 78 | let entry_iter = self.get_entry_iterator(); 79 | for entry_result in entry_iter { 80 | match entry_result { 81 | Ok(entry) => { 82 | // We only want directories 83 | if !entry.is_dir() { 84 | continue; 85 | } 86 | 87 | let mut l_entry = entry.header.record_number; 88 | let mut l_sequence = entry.header.sequence; 89 | 90 | // if entry is child, set entry and sequence to parent 91 | if entry.header.base_reference.entry != 0 { 92 | l_entry = entry.header.base_reference.entry; 93 | l_sequence = entry.header.base_reference.sequence; 94 | } 95 | 96 | // Get the best name attribute or 97 | let fn_attr = match entry.find_best_name_attribute() { 98 | Some(fn_attr) => fn_attr, 99 | None => continue 100 | }; 101 | 102 | // Entry reference for our key 103 | let entry_reference = MftReference::new( 104 | l_entry, 105 | l_sequence 106 | ); 107 | 108 | // Add this entry to the folder mapping 109 | folder_mapping.add_mapping( 110 | entry_reference, 111 | fn_attr.name, 112 | fn_attr.parent 113 | ); 114 | }, 115 | Err(error) => { 116 | eprintln!("{:?}", error); 117 | } 118 | } 119 | } 120 | 121 | folder_mapping 122 | } 123 | 124 | fn get_entry_buffer(&mut self, entry: i64) -> Result { 125 | let raw_buffer = winfuncs::query_file_record( 126 | &self.volume_handle, 127 | entry, 128 | self.ntfs_volume_data.bytes_per_file_record_segment 129 | )?; 130 | 131 | MftOutputBuffer::from_buffer( 132 | &raw_buffer[..] 133 | ) 134 | } 135 | 136 | pub fn get_entry(&mut self, entry: i64) -> Result { 137 | let mft_buffer = self.get_entry_buffer(entry)?; 138 | mft_buffer.as_entry() 139 | } 140 | 141 | pub fn get_max_entry(&self) -> u64 { 142 | self.ntfs_volume_data.get_max_entry() 143 | } 144 | 145 | pub fn get_entry_iterator(self) -> LiveMftEntryIterator { 146 | let last_entry = self.get_max_entry(); 147 | 148 | LiveMftEntryIterator { 149 | live_ntfs: self, 150 | current_entry: last_entry as i64 - 1 151 | } 152 | } 153 | } 154 | 155 | 156 | /// Iterator to iterate mft entries on a live NTFS volume. The iterator 157 | /// returns entries in reverse order (highest to lowest) which maximizes 158 | /// performance due to Windows API because FSCTL_GET_NTFS_FILE_RECORD 159 | /// retrieves the first file record that is in use and is of a lesser than or equal 160 | /// ordinal value to the requested file reference number. 161 | /// The current entry must start at the highest to lowest and be one less than 162 | /// the max entry 163 | /// 164 | pub struct LiveMftEntryIterator { 165 | live_ntfs: WindowsLiveNtfs, 166 | current_entry: i64 167 | } 168 | impl Iterator for LiveMftEntryIterator { 169 | type Item = Result; 170 | 171 | // It is fastest to iterate file entries from highest to lowest becuase 172 | // the Windows API fetches the lowest allocated entry if an entry is queried 173 | // that is unallocated. This prevents us from having to iterate through blocks 174 | // of unallocated entries (in which case the same entry will be returned till the 175 | // next allocated) until we find the next allocated. 176 | fn next(&mut self) -> Option> { 177 | while self.current_entry >= 0 { 178 | // Get MFT entry for current entry 179 | let mft_entry = match self.live_ntfs.get_entry( 180 | self.current_entry as i64 181 | ) { 182 | Ok(entry) => entry, 183 | Err(error) => { 184 | self.current_entry -= 1; 185 | return Some(Err(error)) 186 | } 187 | }; 188 | 189 | // Deincrement the entry by 1 190 | self.current_entry = mft_entry.header.record_number as i64 - 1; 191 | 192 | return Some(Ok(mft_entry)); 193 | } 194 | 195 | None 196 | } 197 | } -------------------------------------------------------------------------------- /src/liveusn/mod.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "windows")] 2 | pub mod ntfs; 3 | pub mod error; 4 | pub mod live; 5 | pub mod listener; 6 | pub mod winfuncs; -------------------------------------------------------------------------------- /src/liveusn/ntfs.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | use byteorder::{ByteOrder, LittleEndian}; 3 | use crate::liveusn::error::UsnLiveError; 4 | 5 | 6 | /// This structure represents a NTFS_VOLUME_DATA_BUFFER structure 7 | /// https://msdn.microsoft.com/en-us/windows/desktop/aa365256 8 | /// 96 Bytes 9 | #[derive(Serialize, Debug)] 10 | pub struct NtfsVolumeData { 11 | pub volume_serial_number: i64, 12 | pub number_sectors: i64, 13 | pub total_clusters: i64, 14 | pub free_clusters: i64, 15 | pub total_reserved: i64, 16 | pub bytes_per_sector: u32, 17 | pub bytes_per_cluster: u32, 18 | pub bytes_per_file_record_segment: u32, 19 | pub clusters_per_file_record_segment: u32, 20 | pub mft_valid_data_length: i64, 21 | pub mft_start_lcn: i64, 22 | pub mft_2_start_lcn: i64, 23 | pub mft_zone_start: i64, 24 | pub mft_zone_end: i64, 25 | pub ntfs_extended_volume_data: Option, 26 | } 27 | impl NtfsVolumeData { 28 | pub fn from_buffer(buffer: &[u8]) -> Self { 29 | let buffer_size = buffer.len(); 30 | let volume_serial_number = LittleEndian::read_i64(&buffer[0..8]); 31 | let number_sectors = LittleEndian::read_i64(&buffer[8..16]); 32 | let total_clusters = LittleEndian::read_i64(&buffer[16..24]); 33 | let free_clusters = LittleEndian::read_i64(&buffer[24..32]); 34 | let total_reserved = LittleEndian::read_i64(&buffer[32..40]); 35 | let bytes_per_sector = LittleEndian::read_u32(&buffer[40..44]); 36 | let bytes_per_cluster = LittleEndian::read_u32(&buffer[44..48]); 37 | let bytes_per_file_record_segment = LittleEndian::read_u32(&buffer[48..52]); 38 | let clusters_per_file_record_segment = LittleEndian::read_u32(&buffer[52..56]); 39 | let mft_valid_data_length = LittleEndian::read_i64(&buffer[56..64]); 40 | let mft_start_lcn = LittleEndian::read_i64(&buffer[64..72]); 41 | let mft_2_start_lcn = LittleEndian::read_i64(&buffer[72..80]); 42 | let mft_zone_start = LittleEndian::read_i64(&buffer[80..88]); 43 | let mft_zone_end = LittleEndian::read_i64(&buffer[88..96]); 44 | 45 | let mut ntfs_extended_volume_data = None; 46 | if buffer_size >= 128 as usize { 47 | ntfs_extended_volume_data = Some( 48 | NtfsExtendedVolumeData::from_buffer( 49 | &buffer[96..] 50 | ) 51 | ) 52 | } 53 | 54 | NtfsVolumeData { 55 | volume_serial_number, 56 | number_sectors, 57 | total_clusters, 58 | free_clusters, 59 | total_reserved, 60 | bytes_per_sector, 61 | bytes_per_cluster, 62 | bytes_per_file_record_segment, 63 | clusters_per_file_record_segment, 64 | mft_valid_data_length, 65 | mft_start_lcn, 66 | mft_2_start_lcn, 67 | mft_zone_start, 68 | mft_zone_end, 69 | ntfs_extended_volume_data 70 | } 71 | } 72 | 73 | pub fn get_max_entry(&self) -> u64 { 74 | self.mft_valid_data_length as u64 / self.bytes_per_file_record_segment as u64 75 | } 76 | } 77 | 78 | 79 | /// This structure represents a NTFS_EXTENDED_VOLUME_DATA structure 80 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ns-winioctl-ntfs_extended_volume_data 81 | /// 32 Bytes 82 | #[derive(Serialize, Debug)] 83 | pub struct NtfsExtendedVolumeData { 84 | pub byte_count: u32, 85 | pub major_version: u16, 86 | pub minor_version: u16, 87 | pub bytes_per_physical_sector: u32, 88 | pub lfs_major_version: u16, 89 | pub lfs_minor_version: u16, 90 | pub max_device_trim_extent_count: u32, 91 | pub max_device_trim_byte_count: u32, 92 | pub max_volume_trim_extent_count: u32, 93 | pub max_volume_trim_byte_count: u32, 94 | } 95 | impl NtfsExtendedVolumeData { 96 | pub fn from_buffer(buffer: &[u8]) -> Self { 97 | let byte_count = LittleEndian::read_u32(&buffer[0..4]); 98 | let major_version = LittleEndian::read_u16(&buffer[4..6]); 99 | let minor_version = LittleEndian::read_u16(&buffer[6..8]); 100 | let bytes_per_physical_sector = LittleEndian::read_u32(&buffer[8..12]); 101 | let lfs_major_version = LittleEndian::read_u16(&buffer[12..14]); 102 | let lfs_minor_version = LittleEndian::read_u16(&buffer[14..16]); 103 | let max_device_trim_extent_count = LittleEndian::read_u32(&buffer[16..20]); 104 | let max_device_trim_byte_count = LittleEndian::read_u32(&buffer[20..24]); 105 | let max_volume_trim_extent_count = LittleEndian::read_u32(&buffer[24..28]); 106 | let max_volume_trim_byte_count = LittleEndian::read_u32(&buffer[28..32]); 107 | 108 | NtfsExtendedVolumeData { 109 | byte_count, 110 | major_version, 111 | minor_version, 112 | bytes_per_physical_sector, 113 | lfs_major_version, 114 | lfs_minor_version, 115 | max_device_trim_extent_count, 116 | max_device_trim_byte_count, 117 | max_volume_trim_extent_count, 118 | max_volume_trim_byte_count, 119 | } 120 | } 121 | } 122 | 123 | 124 | /// Wrapper for the different USN_JOURNAL_DATA versions. 125 | #[derive(Debug, Clone)] 126 | pub enum UsnJournalData { 127 | V0(UsnJournalDataV0), 128 | V1(UsnJournalDataV1), 129 | V2(UsnJournalDataV2) 130 | } 131 | impl UsnJournalData { 132 | pub fn new(buffer: &[u8]) -> Result { 133 | match buffer.len() { 134 | 56 => { 135 | return Ok( 136 | UsnJournalData::V0( 137 | UsnJournalDataV0::new(&buffer) 138 | ) 139 | ); 140 | }, 141 | 60 => { 142 | return Ok( 143 | UsnJournalData::V1( 144 | UsnJournalDataV1::new(&buffer) 145 | ) 146 | ); 147 | }, 148 | 80 => { 149 | return Ok( 150 | UsnJournalData::V2( 151 | UsnJournalDataV2::new(&buffer) 152 | ) 153 | ); 154 | }, 155 | other => { 156 | return Err( 157 | UsnLiveError::invalid_usn_journal_data(other) 158 | ); 159 | } 160 | } 161 | } 162 | 163 | pub fn get_next_usn(&self) -> u64 { 164 | match self { 165 | UsnJournalData::V0(jd) => jd.next_usn, 166 | UsnJournalData::V1(jd) => jd.next_usn, 167 | UsnJournalData::V2(jd) => jd.next_usn, 168 | } 169 | } 170 | } 171 | 172 | 173 | /// Represents a USN_JOURNAL_DATA_V0 structure 174 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ns-winioctl-usn_journal_data_v0 175 | /// Size 56 176 | #[derive(Debug, Clone)] 177 | pub struct UsnJournalDataV0 { 178 | usn_jounral_id: u64, 179 | first_usn: u64, 180 | next_usn: u64, 181 | lowest_valid_usn: u64, 182 | max_usn: u64, 183 | maximum_size: u64, 184 | allocation_delta: u64, 185 | } 186 | impl UsnJournalDataV0 { 187 | fn new(buffer: &[u8]) -> UsnJournalDataV0 { 188 | let usn_jounral_id = LittleEndian::read_u64(&buffer[0..8]); 189 | let first_usn = LittleEndian::read_u64(&buffer[8..16]); 190 | let next_usn = LittleEndian::read_u64(&buffer[16..24]); 191 | let lowest_valid_usn = LittleEndian::read_u64(&buffer[24..32]); 192 | let max_usn = LittleEndian::read_u64(&buffer[32..40]); 193 | let maximum_size = LittleEndian::read_u64(&buffer[40..48]); 194 | let allocation_delta = LittleEndian::read_u64(&buffer[48..56]); 195 | 196 | return UsnJournalDataV0 { 197 | usn_jounral_id, 198 | first_usn, 199 | next_usn, 200 | lowest_valid_usn, 201 | max_usn, 202 | maximum_size, 203 | allocation_delta, 204 | } 205 | } 206 | } 207 | 208 | 209 | /// Represents a USN_JOURNAL_DATA_V1 structure 210 | /// https://docs.microsoft.com/en-us/windows/desktop/api/winioctl/ns-winioctl-usn_journal_data_v1 211 | /// Size 60 212 | #[derive(Debug, Clone)] 213 | pub struct UsnJournalDataV1 { 214 | usn_jounral_id: u64, 215 | first_usn: u64, 216 | next_usn: u64, 217 | lowest_valid_usn: u64, 218 | max_usn: u64, 219 | maximum_size: u64, 220 | allocation_delta: u64, 221 | min_major_version: u16, 222 | max_major_version: u16, 223 | } 224 | impl UsnJournalDataV1 { 225 | fn new(buffer: &[u8]) -> UsnJournalDataV1 { 226 | let usn_jounral_id = LittleEndian::read_u64(&buffer[0..8]); 227 | let first_usn = LittleEndian::read_u64(&buffer[8..16]); 228 | let next_usn = LittleEndian::read_u64(&buffer[16..24]); 229 | let lowest_valid_usn = LittleEndian::read_u64(&buffer[24..32]); 230 | let max_usn = LittleEndian::read_u64(&buffer[32..40]); 231 | let maximum_size = LittleEndian::read_u64(&buffer[40..48]); 232 | let allocation_delta = LittleEndian::read_u64(&buffer[48..56]); 233 | let min_major_version = LittleEndian::read_u16(&buffer[56..58]); 234 | let max_major_version = LittleEndian::read_u16(&buffer[58..60]); 235 | 236 | return UsnJournalDataV1 { 237 | usn_jounral_id, 238 | first_usn, 239 | next_usn, 240 | lowest_valid_usn, 241 | max_usn, 242 | maximum_size, 243 | allocation_delta, 244 | min_major_version, 245 | max_major_version, 246 | } 247 | } 248 | } 249 | 250 | 251 | /// Represents a USN_JOURNAL_DATA_V2 structure 252 | /// https://docs.microsoft.com/en-us/windows/desktop/api/winioctl/ns-winioctl-usn_journal_data_v2 253 | /// Size 80 254 | #[derive(Debug, Clone)] 255 | pub struct UsnJournalDataV2 { 256 | usn_jounral_id: u64, 257 | first_usn: u64, 258 | next_usn: u64, 259 | lowest_valid_usn: u64, 260 | max_usn: u64, 261 | maximum_size: u64, 262 | allocation_delta: u64, 263 | min_major_version: u16, 264 | max_major_version: u16, 265 | flags: u32, 266 | range_track_chunk_size: u64, 267 | range_track_file_size_threshold: i64, 268 | } 269 | impl UsnJournalDataV2 { 270 | fn new(buffer: &[u8]) -> UsnJournalDataV2 { 271 | let usn_jounral_id = LittleEndian::read_u64(&buffer[0..8]); 272 | let first_usn = LittleEndian::read_u64(&buffer[8..16]); 273 | let next_usn = LittleEndian::read_u64(&buffer[16..24]); 274 | let lowest_valid_usn = LittleEndian::read_u64(&buffer[24..32]); 275 | let max_usn = LittleEndian::read_u64(&buffer[32..40]); 276 | let maximum_size = LittleEndian::read_u64(&buffer[40..48]); 277 | let allocation_delta = LittleEndian::read_u64(&buffer[48..56]); 278 | let min_major_version = LittleEndian::read_u16(&buffer[56..58]); 279 | let max_major_version = LittleEndian::read_u16(&buffer[58..60]); 280 | let flags = LittleEndian::read_u32(&buffer[60..64]); 281 | let range_track_chunk_size = LittleEndian::read_u64(&buffer[64..72]); 282 | let range_track_file_size_threshold = LittleEndian::read_i64(&buffer[72..80]); 283 | 284 | return UsnJournalDataV2 { 285 | usn_jounral_id, 286 | first_usn, 287 | next_usn, 288 | lowest_valid_usn, 289 | max_usn, 290 | maximum_size, 291 | allocation_delta, 292 | min_major_version, 293 | max_major_version, 294 | flags, 295 | range_track_chunk_size, 296 | range_track_file_size_threshold, 297 | } 298 | } 299 | } 300 | 301 | 302 | /// Wrapper for the different READ_USN_JOURNAL_DATA versions. 303 | #[derive(Debug, Clone)] 304 | pub enum ReadUsnJournalData { 305 | V0(ReadUsnJournalDataV0), 306 | V1(ReadUsnJournalDataV1), 307 | } 308 | impl ReadUsnJournalData { 309 | pub fn from_usn_journal_data(journal_data: UsnJournalData) -> ReadUsnJournalData { 310 | match journal_data { 311 | UsnJournalData::V0(journal_data_v0) => { 312 | return ReadUsnJournalData::V0( 313 | ReadUsnJournalDataV0::new( 314 | journal_data_v0.first_usn, 315 | journal_data_v0.usn_jounral_id 316 | ) 317 | ); 318 | }, 319 | UsnJournalData::V1(journal_data_v1) => { 320 | return ReadUsnJournalData::V1( 321 | ReadUsnJournalDataV1::new( 322 | journal_data_v1.first_usn, 323 | journal_data_v1.usn_jounral_id, 324 | journal_data_v1.min_major_version, 325 | journal_data_v1.max_major_version 326 | ) 327 | ); 328 | }, 329 | UsnJournalData::V2(journal_data_v2) => { 330 | return ReadUsnJournalData::V1( 331 | ReadUsnJournalDataV1::new( 332 | journal_data_v2.first_usn, 333 | journal_data_v2.usn_jounral_id, 334 | journal_data_v2.min_major_version, 335 | journal_data_v2.max_major_version 336 | ) 337 | ); 338 | } 339 | } 340 | } 341 | 342 | pub fn with_reason_mask(mut self, reason_mask: u32) -> Self { 343 | match self { 344 | ReadUsnJournalData::V0(ref mut read_data_v0) => { 345 | read_data_v0.reason_mask = reason_mask 346 | }, 347 | ReadUsnJournalData::V1(ref mut read_data_v1) => { 348 | read_data_v1.reason_mask = reason_mask 349 | } 350 | } 351 | 352 | self 353 | } 354 | 355 | pub fn with_start_usn(mut self, start_usn: u64) -> Self { 356 | match self { 357 | ReadUsnJournalData::V0(ref mut read_data_v0) => { 358 | read_data_v0.start_usn = start_usn 359 | }, 360 | ReadUsnJournalData::V1(ref mut read_data_v1) => { 361 | read_data_v1.start_usn = start_usn 362 | } 363 | } 364 | 365 | self 366 | } 367 | } 368 | 369 | 370 | /// Represents a READ_USN_JOURNAL_DATA_V0 structure 371 | /// https://docs.microsoft.com/en-us/windows/desktop/api/winioctl/ns-winioctl-read_usn_journal_data_v0 372 | /// Size 40 373 | #[derive(Debug, Clone)] 374 | #[repr(C)] 375 | pub struct ReadUsnJournalDataV0 { 376 | start_usn: u64, 377 | reason_mask: u32, 378 | return_only_on_close: u32, 379 | timeout: u64, 380 | bytes_to_wait_for: u64, 381 | usn_journal_id: u64, 382 | } 383 | impl ReadUsnJournalDataV0 { 384 | fn new(start_usn: u64, usn_journal_id: u64) -> ReadUsnJournalDataV0 { 385 | let reason_mask = 0xffffffff; 386 | let return_only_on_close = 0; 387 | let timeout = 0; 388 | let bytes_to_wait_for = 0; 389 | 390 | return ReadUsnJournalDataV0 { 391 | start_usn, 392 | reason_mask, 393 | return_only_on_close, 394 | timeout, 395 | bytes_to_wait_for, 396 | usn_journal_id, 397 | } 398 | } 399 | } 400 | 401 | 402 | /// Represents a READ_USN_JOURNAL_DATA_V1 structure 403 | /// https://docs.microsoft.com/en-us/windows/desktop/api/winioctl/ns-winioctl-read_usn_journal_data_v1 404 | /// Size 44 405 | #[derive(Debug, Clone)] 406 | #[repr(C)] 407 | pub struct ReadUsnJournalDataV1 { 408 | start_usn: u64, 409 | reason_mask: u32, 410 | return_only_on_close: u32, 411 | timeout: u64, 412 | bytes_to_wait_for: u64, 413 | usn_journal_id: u64, 414 | min_major_version: u16, 415 | max_major_version: u16, 416 | } 417 | impl ReadUsnJournalDataV1 { 418 | fn new( 419 | start_usn: u64, usn_journal_id: u64, 420 | min_major_version: u16, max_major_version: u16 421 | ) -> ReadUsnJournalDataV1 { 422 | let reason_mask = 0xffffffff; 423 | let return_only_on_close = 0; 424 | let timeout = 0; 425 | let bytes_to_wait_for = 0; 426 | 427 | return ReadUsnJournalDataV1 { 428 | start_usn, 429 | reason_mask, 430 | return_only_on_close, 431 | timeout, 432 | bytes_to_wait_for, 433 | usn_journal_id, 434 | min_major_version, 435 | max_major_version, 436 | } 437 | } 438 | } 439 | -------------------------------------------------------------------------------- /src/liveusn/winfuncs.rs: -------------------------------------------------------------------------------- 1 | use std::ptr; 2 | use std::mem; 3 | use std::fs::File; 4 | use winapi::um::winioctl::{ 5 | FSCTL_QUERY_USN_JOURNAL, 6 | FSCTL_READ_USN_JOURNAL, 7 | FSCTL_GET_NTFS_FILE_RECORD, 8 | FSCTL_GET_NTFS_VOLUME_DATA, 9 | NTFS_FILE_RECORD_INPUT_BUFFER 10 | }; 11 | use winapi::ctypes::c_void; 12 | use winapi::um::winnt::LARGE_INTEGER; 13 | use std::os::windows::io::AsRawHandle; 14 | use winapi::um::ioapiset::DeviceIoControl; 15 | use crate::liveusn::error::UsnLiveError; 16 | use crate::liveusn::ntfs; 17 | 18 | 19 | /// Query FSCTL_GET_NTFS_VOLUME_DATA to get the NTFS volume data. 20 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ni-winioctl-fsctl_get_ntfs_volume_data 21 | /// 22 | pub fn get_ntfs_volume_data(volume_handle: &File) -> Result { 23 | let mut bytes_read = 0; 24 | let mut output_buffer = vec![0u8; 128]; 25 | 26 | let result = unsafe { 27 | DeviceIoControl( 28 | volume_handle.as_raw_handle() as *mut c_void, 29 | FSCTL_GET_NTFS_VOLUME_DATA, 30 | ptr::null_mut(), 31 | 0, 32 | output_buffer.as_mut_ptr() as *mut _, 33 | output_buffer.len() as u32, 34 | &mut bytes_read, 35 | ptr::null_mut(), 36 | ) 37 | }; 38 | 39 | if result == 0 { 40 | return Err( 41 | UsnLiveError::from_windows_last_error() 42 | ); 43 | } 44 | 45 | debug!("[output_buffer] DeviceIoControl->FSCTL_GET_NTFS_VOLUME_DATA: {}", hex::encode(&output_buffer)); 46 | 47 | Ok( 48 | ntfs::NtfsVolumeData::from_buffer( 49 | &output_buffer[..] 50 | ) 51 | ) 52 | } 53 | 54 | 55 | /// Query FSCTL_GET_NTFS_FILE_RECORD to get an entries' NTFS_FILE_RECORD_OUTPUT_BUFFER 56 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ni-winioctl-fsctl_get_ntfs_file_record 57 | /// 58 | pub fn query_file_record(volume_handle: &File, entry: i64, entry_size: u32) -> Result, UsnLiveError> { 59 | let mut bytes_read = 0; 60 | let buffer_size = (entry_size + 12) as usize; 61 | let mut output_buffer = vec![0u8; buffer_size]; 62 | 63 | let result = unsafe { 64 | let mut entry_reference = mem::zeroed::(); 65 | *entry_reference.QuadPart_mut() = entry; 66 | 67 | // Input buffer 68 | let mut input_buffer = NTFS_FILE_RECORD_INPUT_BUFFER { 69 | FileReferenceNumber: entry_reference 70 | }; 71 | 72 | DeviceIoControl( 73 | volume_handle.as_raw_handle() as *mut c_void, 74 | FSCTL_GET_NTFS_FILE_RECORD, 75 | &mut input_buffer as *mut _ as *mut c_void, 76 | mem::size_of::() as u32, 77 | output_buffer.as_mut_ptr() as *mut _, 78 | output_buffer.len() as u32, 79 | &mut bytes_read, 80 | ptr::null_mut() 81 | ) 82 | }; 83 | 84 | if result == 0 { 85 | return Err( 86 | UsnLiveError::from_windows_last_error() 87 | ); 88 | } else { 89 | output_buffer.truncate( 90 | bytes_read as usize 91 | ); 92 | } 93 | 94 | Ok(output_buffer) 95 | } 96 | 97 | 98 | /// Query FSCTL_QUERY_USN_JOURNAL to get UsnJournalData which is an enum for 99 | /// READ_USN_JOURNAL_DATA_V0, READ_USN_JOURNAL_DATA_V1, READ_USN_JOURNAL_DATA_V2 structures. 100 | /// https://docs.microsoft.com/en-us/windows/desktop/api/winioctl/ni-winioctl-fsctl_query_usn_journal 101 | /// 102 | pub fn query_usn_journal(volume_handle: &File) -> Result { 103 | let mut output_buffer = [0u8; 80]; 104 | let mut bytes_read = 0; 105 | 106 | let result = unsafe { 107 | DeviceIoControl( 108 | volume_handle.as_raw_handle(), 109 | FSCTL_QUERY_USN_JOURNAL, 110 | ptr::null_mut(), 111 | 0, 112 | output_buffer.as_mut_ptr() as *mut _, 113 | output_buffer.len() as u32, 114 | &mut bytes_read, 115 | ptr::null_mut() 116 | ) 117 | }; 118 | 119 | if result == 0 { 120 | return Err( 121 | UsnLiveError::from_windows_last_error() 122 | ); 123 | } else { 124 | return ntfs::UsnJournalData::new( 125 | &output_buffer[..bytes_read as usize] 126 | ); 127 | } 128 | } 129 | 130 | 131 | /// Query FSCTL_READ_USN_JOURNAL 132 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ni-winioctl-fsctl_read_usn_journal 133 | /// 134 | pub fn read_usn_journal<'a> ( 135 | volume_handle: &File, 136 | read_jounral_data: ntfs::ReadUsnJournalData, 137 | record_buffer: &'a mut [u8] 138 | ) -> Result<&'a [u8], UsnLiveError> { 139 | let mut bytes_read: u32 = 0; 140 | 141 | let result = match read_jounral_data { 142 | ntfs::ReadUsnJournalData::V0(mut read_data_v0) => { 143 | unsafe { 144 | DeviceIoControl( 145 | volume_handle.as_raw_handle(), 146 | FSCTL_READ_USN_JOURNAL, 147 | &mut read_data_v0 as *mut _ as *mut c_void, 148 | mem::size_of::() as u32, 149 | record_buffer.as_mut_ptr() as *mut _, 150 | record_buffer.len() as u32, 151 | &mut bytes_read, 152 | ptr::null_mut() 153 | ) 154 | } 155 | }, 156 | ntfs::ReadUsnJournalData::V1(mut read_data_v1) => { 157 | unsafe { 158 | DeviceIoControl( 159 | volume_handle.as_raw_handle(), 160 | FSCTL_READ_USN_JOURNAL, 161 | &mut read_data_v1 as *mut _ as *mut c_void, 162 | mem::size_of::() as u32, 163 | record_buffer.as_mut_ptr() as *mut _, 164 | record_buffer.len() as u32, 165 | &mut bytes_read, 166 | ptr::null_mut() 167 | ) 168 | } 169 | }, 170 | }; 171 | 172 | if result == 0 { 173 | return Err( 174 | UsnLiveError::from_windows_last_error() 175 | ); 176 | } else { 177 | return Ok( 178 | &record_buffer[..bytes_read as usize] 179 | ) 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/mapping.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::fmt; 3 | use mft::MftParser; 4 | use crate::ReadSeek; 5 | use serde::Serialize; 6 | use lru::LruCache; 7 | use std::collections::HashMap; 8 | use winstructs::ntfs::mft_reference::MftReference; 9 | use serde::ser::{Serializer, SerializeMap}; 10 | 11 | 12 | #[derive(Serialize, Debug)] 13 | pub struct EntryMapping { 14 | pub name: String, 15 | pub parent: MftReference, 16 | } 17 | 18 | 19 | pub struct FolderMapping { 20 | pub mapping: HashMap, 21 | pub cache: LruCache 22 | } 23 | 24 | impl fmt::Debug for FolderMapping { 25 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 26 | write!(f, "FolderMapping {{ mapping: {:?}, cache: LruCache }}", self.mapping) 27 | } 28 | } 29 | 30 | impl FolderMapping { 31 | pub fn new() -> Self { 32 | let mapping: HashMap = HashMap::new(); 33 | let cache: LruCache = LruCache::new(100); 34 | 35 | FolderMapping { 36 | mapping, 37 | cache 38 | } 39 | } 40 | 41 | pub fn contains_reference(&self, entry_reference: &MftReference) -> bool { 42 | self.mapping.contains_key( 43 | entry_reference 44 | ) 45 | } 46 | 47 | pub fn from_mft_path(filename: &str) -> Result { 48 | let mapping: HashMap = HashMap::new(); 49 | let mut parser = MftParser::from_path(filename).unwrap(); 50 | let cache: LruCache = LruCache::new(100); 51 | let mut folder_mapping = FolderMapping { 52 | mapping, 53 | cache 54 | }; 55 | 56 | folder_mapping.build_folder_mapping( 57 | &mut parser 58 | ); 59 | 60 | Ok(folder_mapping) 61 | } 62 | 63 | pub fn build_folder_mapping(&mut self, mft_parser: &mut MftParser) { 64 | for entry in mft_parser.iter_entries() { 65 | match entry { 66 | Ok(e) => { 67 | if e.is_dir() { 68 | let mut l_entry = e.header.record_number; 69 | let mut l_sequence = e.header.sequence; 70 | 71 | if !e.is_allocated() { 72 | l_sequence -= 1; 73 | } 74 | 75 | // if entry is child, set entry and sequence to parent 76 | if e.header.base_reference.entry != 0 { 77 | l_entry = e.header.base_reference.entry; 78 | l_sequence = e.header.base_reference.sequence; 79 | } 80 | 81 | let file_name_attr = match e.find_best_name_attribute() { 82 | Some(fn_attr) => fn_attr, 83 | None => continue 84 | }; 85 | 86 | let entry_map = EntryMapping{ 87 | name: file_name_attr.name, 88 | parent: file_name_attr.parent 89 | }; 90 | 91 | let entry_reference = MftReference::new( 92 | l_entry, 93 | l_sequence 94 | ); 95 | 96 | self.mapping.insert( 97 | entry_reference, 98 | entry_map 99 | ); 100 | } 101 | else { 102 | continue 103 | } 104 | } 105 | Err(err) => { 106 | eprintln!("{}", err); 107 | } 108 | } 109 | } 110 | } 111 | 112 | pub fn remove_mapping(&mut self, entry_reference: MftReference) { 113 | self.mapping.remove( 114 | &entry_reference 115 | ); 116 | } 117 | 118 | pub fn add_mapping(&mut self, entry_reference: MftReference, name: String, parent: MftReference) { 119 | let entry_map = EntryMapping { 120 | name: name, 121 | parent: parent 122 | }; 123 | 124 | // If there is a cached entry for this reference, we need to remove it 125 | // so that it can be recreated with the new mapping. 126 | self.cache.pop( 127 | &entry_reference 128 | ); 129 | 130 | self.mapping.insert( 131 | entry_reference, 132 | entry_map 133 | ); 134 | } 135 | 136 | fn enumerate_path_queue(&self, lookup_ref: &MftReference, path_queue: &mut Vec) { 137 | if lookup_ref.entry != 5 { 138 | match self.mapping.get(&lookup_ref) { 139 | Some(folder_map) => { 140 | path_queue.push(folder_map.name.clone()); 141 | 142 | self.enumerate_path_queue( 143 | &folder_map.parent, 144 | path_queue 145 | ); 146 | }, 147 | None => { 148 | path_queue.push("[]".to_string()); 149 | } 150 | } 151 | } else { 152 | path_queue.push("[root]".to_string()); 153 | } 154 | } 155 | 156 | pub fn enumerate_path(&mut self, entry: u64, sequence: u16) -> Option { 157 | let lookup_ref = MftReference { 158 | entry, sequence 159 | }; 160 | 161 | match self.cache.get_mut(&lookup_ref) { 162 | Some(full_path) => { 163 | return Some(full_path.clone()); 164 | }, 165 | None => { 166 | let mut path_queue: Vec = Vec::new(); 167 | 168 | self.enumerate_path_queue( 169 | &lookup_ref, 170 | &mut path_queue 171 | ); 172 | 173 | path_queue.reverse(); 174 | let full_path = path_queue.join("/"); 175 | 176 | self.cache.put( 177 | lookup_ref, 178 | full_path.clone() 179 | ); 180 | 181 | return Some(full_path); 182 | } 183 | } 184 | } 185 | } 186 | 187 | impl Serialize for FolderMapping { 188 | fn serialize(&self, serializer: S) -> Result 189 | where 190 | S: Serializer, 191 | { 192 | let mut map = serializer.serialize_map(Some(self.mapping.len()))?; 193 | for (k, v) in &self.mapping { 194 | map.serialize_entry( 195 | &k.entry, &v 196 | )?; 197 | } 198 | map.end() 199 | } 200 | } -------------------------------------------------------------------------------- /src/record.rs: -------------------------------------------------------------------------------- 1 | use std::io::Read; 2 | use chrono::{DateTime, Utc}; 3 | use encoding::all::UTF_16LE; 4 | use encoding::{DecoderTrap, Encoding}; 5 | use winstructs::ntfs::mft_reference::MftReference; 6 | use byteorder::{ByteOrder, ReadBytesExt, LittleEndian}; 7 | use serde::ser::{SerializeStruct}; 8 | use serde::ser; 9 | use serde::Serialize; 10 | use serde_json::{Value}; 11 | use crate::flags; 12 | use crate::error::UsnError; 13 | use crate::utils::u64_to_datetime; 14 | 15 | 16 | #[derive(Debug)] 17 | pub struct UsnEntry { 18 | pub meta: EntryMeta, 19 | pub record: UsnRecord, 20 | } 21 | impl UsnEntry { 22 | pub fn new(meta: EntryMeta, version: u16, mut reader: R)-> Result{ 23 | let record = UsnRecord::new( 24 | version, 25 | &mut reader 26 | )?; 27 | 28 | Ok(UsnEntry { 29 | meta: meta, 30 | record: record, 31 | }) 32 | } 33 | 34 | pub fn to_json_value(&self) -> Result { 35 | self.record.to_json_value( 36 | Some( 37 | self.meta.to_json_value()? 38 | ) 39 | ) 40 | } 41 | } 42 | 43 | 44 | /// EntryMeta is addon info describing where the UsnRecord was found. 45 | /// 46 | #[derive(Serialize, Debug, Clone)] 47 | pub struct EntryMeta { 48 | #[serde(rename(serialize = "meta__source"))] 49 | pub source: String, 50 | #[serde(rename(serialize = "meta__offset"))] 51 | pub offset: u64, 52 | } 53 | impl EntryMeta { 54 | pub fn new(source: &str, offset: u64) -> Self { 55 | EntryMeta { 56 | source: source.to_string(), 57 | offset: offset, 58 | } 59 | } 60 | 61 | pub fn to_json_value(&self) -> Result { 62 | Ok(serde_json::to_value(&self)?) 63 | } 64 | } 65 | 66 | 67 | /// UsnRecord represents the multiple possible versions of the UsnRecord 68 | #[derive(Serialize, Debug)] 69 | #[serde(untagged)] 70 | pub enum UsnRecord { 71 | V2(UsnRecordV2), 72 | V3(UsnRecordV3) 73 | } 74 | impl UsnRecord { 75 | pub fn new(version: u16, mut reader: R)-> Result { 76 | if version == 2 { 77 | let usn_record_v2 = UsnRecordV2::new( 78 | &mut reader 79 | )?; 80 | Ok(UsnRecord::V2(usn_record_v2)) 81 | } 82 | else if version == 3 { 83 | let usn_record_v3 = UsnRecordV3::new( 84 | &mut reader 85 | )?; 86 | Ok(UsnRecord::V3(usn_record_v3)) 87 | } 88 | else { 89 | Err(UsnError::unsupported_usn_version( 90 | format!("Unsupported USN version {}", version) 91 | )) 92 | } 93 | } 94 | 95 | pub fn get_usn(&self) -> u64 { 96 | match self { 97 | UsnRecord::V2(ref record) => record.usn.clone(), 98 | UsnRecord::V3(ref record) => record.usn.clone(), 99 | } 100 | } 101 | 102 | pub fn get_file_name(&self) -> String { 103 | match self { 104 | UsnRecord::V2(ref record) => record.file_name.clone(), 105 | UsnRecord::V3(ref record) => record.file_name.clone(), 106 | } 107 | } 108 | 109 | pub fn get_file_attributes(&self) -> flags::FileAttributes { 110 | match self { 111 | UsnRecord::V2(record) => record.file_attributes, 112 | UsnRecord::V3(record) => record.file_attributes, 113 | } 114 | } 115 | 116 | pub fn get_reason_code(&self) -> flags::Reason { 117 | match self { 118 | UsnRecord::V2(record) => record.reason, 119 | UsnRecord::V3(record) => record.reason, 120 | } 121 | } 122 | 123 | pub fn get_file_reference(&self) -> MftReference { 124 | match self { 125 | UsnRecord::V2(record) => record.file_reference, 126 | UsnRecord::V3(record) => record.file_reference.as_mft_reference(), 127 | } 128 | } 129 | 130 | pub fn get_parent_reference(&self) -> MftReference { 131 | match self { 132 | UsnRecord::V2(record) => record.parent_reference, 133 | UsnRecord::V3(record) => record.parent_reference.as_mft_reference(), 134 | } 135 | } 136 | 137 | pub fn to_json_value(&self, additional: Option) -> Result { 138 | let mut this_value = serde_json::to_value(&self)?; 139 | 140 | match additional { 141 | Some(additional_value) => { 142 | let value_map = match this_value.as_object_mut() { 143 | Some(map) => map, 144 | None => return Err( 145 | UsnError::json_value_error( 146 | format!("Record json value's object is none. {:?}", self) 147 | ) 148 | ) 149 | }; 150 | 151 | let additional_map = match additional_value.as_object() { 152 | Some(map) => map.to_owned(), 153 | None => return Err( 154 | UsnError::json_value_error( 155 | format!("additional value's object is none. {:?}", additional_value) 156 | ) 157 | ) 158 | }; 159 | 160 | value_map.extend(additional_map); 161 | }, 162 | None => {} 163 | } 164 | 165 | Ok(this_value) 166 | } 167 | } 168 | 169 | 170 | /// Represents a USN_RECORD_V2 structure 171 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ns-winioctl-usn_record_v2 172 | /// 173 | #[derive(Serialize, Debug)] 174 | pub struct UsnRecordV2 { 175 | pub record_length: u32, 176 | pub major_version: u16, 177 | pub minor_version: u16, 178 | pub file_reference: MftReference, 179 | pub parent_reference: MftReference, 180 | pub usn: u64, 181 | pub timestamp: DateTime, 182 | pub reason: flags::Reason, 183 | pub source_info: flags::SourceInfo, 184 | pub security_id: u32, 185 | pub file_attributes: flags::FileAttributes, 186 | pub file_name_length: u16, 187 | pub file_name_offset: u16, 188 | pub file_name: String 189 | } 190 | 191 | impl UsnRecordV2 { 192 | pub fn new(mut buffer: T) -> Result { 193 | let record_length = buffer.read_u32::()?; 194 | 195 | // Do some length checks 196 | if record_length == 0 { 197 | return Err( 198 | UsnError::invalid_v2_record( 199 | "Record length is 0.".to_string() 200 | ) 201 | ); 202 | } 203 | if record_length > 1024 { 204 | return Err( 205 | UsnError::invalid_v2_record( 206 | "Record length is over 1024.".to_string() 207 | ) 208 | ); 209 | } 210 | 211 | let major_version = buffer.read_u16::()?; 212 | if major_version != 2 { 213 | return Err( 214 | UsnError::invalid_v2_record( 215 | "Major version is not 2".to_string() 216 | ) 217 | ); 218 | } 219 | 220 | let minor_version = buffer.read_u16::()?; 221 | if minor_version != 0 { 222 | return Err( 223 | UsnError::invalid_v2_record( 224 | "Minor version is not 0".to_string() 225 | ) 226 | ); 227 | } 228 | 229 | let file_reference = MftReference::from_reader(&mut buffer)?; 230 | let parent_reference = MftReference::from_reader(&mut buffer)?; 231 | let usn = buffer.read_u64::()?; 232 | let timestamp = u64_to_datetime( 233 | buffer.read_u64::()? 234 | ); 235 | let reason = flags::Reason::from_bits_truncate(buffer.read_u32::()?); 236 | let source_info = flags::SourceInfo::from_bits_truncate(buffer.read_u32::()?); 237 | let security_id = buffer.read_u32::()?; 238 | let file_attributes = flags::FileAttributes::from_bits_truncate(buffer.read_u32::()?); 239 | let file_name_length = buffer.read_u16::()?; 240 | let file_name_offset = buffer.read_u16::()?; 241 | 242 | let mut name_buffer = vec![0; file_name_length as usize]; 243 | buffer.read_exact(&mut name_buffer)?; 244 | 245 | let file_name = match UTF_16LE.decode(&name_buffer, DecoderTrap::Ignore) { 246 | Ok(file_name) => file_name, 247 | Err(error) => { 248 | return Err(UsnError::utf16_decode_error( 249 | format!( 250 | "Error Decoding Name [hex buffer: {}]: {:?}", 251 | hex::encode(&name_buffer), 252 | error 253 | ) 254 | )); 255 | }, 256 | }; 257 | 258 | Ok( 259 | UsnRecordV2 { 260 | record_length, 261 | major_version, 262 | minor_version, 263 | file_reference, 264 | parent_reference, 265 | usn, 266 | timestamp, 267 | reason, 268 | source_info, 269 | security_id, 270 | file_attributes, 271 | file_name_length, 272 | file_name_offset, 273 | file_name 274 | } 275 | ) 276 | } 277 | } 278 | 279 | 280 | /// Represents a 128 bit file reference 281 | /// 282 | #[derive(Debug)] 283 | pub struct Ntfs128Reference(pub u128); 284 | 285 | impl Ntfs128Reference { 286 | pub fn as_u128(&self) -> u128 { 287 | self.0 288 | } 289 | 290 | pub fn as_mft_reference(&self) -> MftReference { 291 | MftReference::from( 292 | LittleEndian::read_u64( 293 | &self.0.to_le_bytes()[0..8] 294 | ) 295 | ) 296 | } 297 | } 298 | 299 | impl ser::Serialize for Ntfs128Reference { 300 | fn serialize(&self, serializer: S) -> Result 301 | where 302 | S: ser::Serializer, 303 | { 304 | let mut state = serializer.serialize_struct("Ntfs128Reference", 3)?; 305 | state.serialize_field("u128", &self.as_u128().to_string())?; 306 | let mft_reference = self.as_mft_reference(); 307 | state.serialize_field("entry", &mft_reference.entry)?; 308 | state.serialize_field("sequence", &mft_reference.sequence)?; 309 | state.end() 310 | } 311 | } 312 | 313 | /// Represents a USN_RECORD_V3 structure 314 | /// https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ns-winioctl-usn_record_v3 315 | /// 316 | #[derive(Serialize, Debug)] 317 | pub struct UsnRecordV3 { 318 | pub record_length: u32, 319 | pub major_version: u16, 320 | pub minor_version: u16, 321 | pub file_reference: Ntfs128Reference, 322 | pub parent_reference: Ntfs128Reference, 323 | pub usn: u64, 324 | pub timestamp: DateTime, 325 | pub reason: flags::Reason, 326 | pub source_info: flags::SourceInfo, 327 | pub security_id: u32, 328 | pub file_attributes: flags::FileAttributes, 329 | pub file_name_length: u16, 330 | pub file_name_offset: u16, 331 | pub file_name: String 332 | } 333 | impl UsnRecordV3 { 334 | pub fn new(mut buffer: T) -> Result { 335 | let record_length = buffer.read_u32::()?; 336 | 337 | // Do some length checks 338 | if record_length == 0 { 339 | return Err( 340 | UsnError::invalid_record( 341 | "Record length is 0.".to_string() 342 | ) 343 | ); 344 | } 345 | if record_length > 1024 { 346 | return Err( 347 | UsnError::invalid_record( 348 | "Record length is over 1024.".to_string() 349 | ) 350 | ); 351 | } 352 | 353 | let major_version = buffer.read_u16::()?; 354 | if major_version != 3 { 355 | return Err( 356 | UsnError::invalid_record( 357 | "Major version is not 3".to_string() 358 | ) 359 | ); 360 | } 361 | 362 | let minor_version = buffer.read_u16::()?; 363 | if minor_version != 0 { 364 | return Err( 365 | UsnError::invalid_record( 366 | "Minor version is not 0".to_string() 367 | ) 368 | ); 369 | } 370 | 371 | let file_reference = Ntfs128Reference( 372 | buffer.read_u128::()? 373 | ); 374 | let parent_reference = Ntfs128Reference( 375 | buffer.read_u128::()? 376 | ); 377 | 378 | let usn = buffer.read_u64::()?; 379 | let timestamp = u64_to_datetime( 380 | buffer.read_u64::()? 381 | ); 382 | let reason = flags::Reason::from_bits_truncate(buffer.read_u32::()?); 383 | let source_info = flags::SourceInfo::from_bits_truncate(buffer.read_u32::()?); 384 | let security_id = buffer.read_u32::()?; 385 | let file_attributes = flags::FileAttributes::from_bits_truncate(buffer.read_u32::()?); 386 | let file_name_length = buffer.read_u16::()?; 387 | let file_name_offset = buffer.read_u16::()?; 388 | 389 | let mut name_buffer = vec![0; file_name_length as usize]; 390 | buffer.read_exact(&mut name_buffer)?; 391 | 392 | let file_name = match UTF_16LE.decode(&name_buffer, DecoderTrap::Ignore) { 393 | Ok(file_name) => file_name, 394 | Err(error) => { 395 | return Err(UsnError::utf16_decode_error( 396 | format!( 397 | "Error Decoding Name [hex buffer: {}]: {:?}", 398 | hex::encode(&name_buffer), 399 | error 400 | ) 401 | )); 402 | }, 403 | }; 404 | 405 | Ok( 406 | UsnRecordV3 { 407 | record_length, 408 | major_version, 409 | minor_version, 410 | file_reference, 411 | parent_reference, 412 | usn, 413 | timestamp, 414 | reason, 415 | source_info, 416 | security_id, 417 | file_attributes, 418 | file_name_length, 419 | file_name_offset, 420 | file_name 421 | } 422 | ) 423 | } 424 | } -------------------------------------------------------------------------------- /src/usn.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "multithreading")] 2 | use rayon; 3 | use std::io; 4 | use regex::bytes; 5 | use std::cmp::max; 6 | use std::fs::File; 7 | use std::io::SeekFrom; 8 | #[cfg(feature = "multithreading")] 9 | use rayon::prelude::*; 10 | use std::collections::VecDeque; 11 | use byteorder::{ByteOrder, LittleEndian}; 12 | use crate::ReadSeek; 13 | use crate::record::{EntryMeta, UsnEntry}; 14 | 15 | 16 | // This is the size of data chunks 17 | const SIZE_CHUNK: usize = 17408; 18 | // This is the size of the search within a chunk 19 | // It is smaller than the chunk size to garentee that the last record found is complete 20 | // It has been noticed that generally usn records are paged in 4096 byte pages. I have not 21 | // observed usn records overlaping the 4096 offset and are zero padded to the 4096 mark. 22 | const SIZE_SEARCH: usize = 16384; 23 | 24 | lazy_static! { 25 | static ref RE_USN: bytes::Regex = bytes::Regex::new( 26 | "(?-u)..\x00\x00(\x02|\x03)\x00\x00\x00" 27 | ).expect("Regex Error"); 28 | } 29 | 30 | 31 | pub struct UsnParserSettings{ 32 | thread_count: usize 33 | } 34 | 35 | impl Default for UsnParserSettings { 36 | fn default() -> Self { 37 | UsnParserSettings { 38 | thread_count: 0 39 | } 40 | } 41 | } 42 | 43 | impl UsnParserSettings { 44 | pub fn new() -> UsnParserSettings { 45 | UsnParserSettings::default() 46 | } 47 | 48 | /// Sets the number of worker threads. 49 | /// `0` will let rayon decide. 50 | /// 51 | #[cfg(feature = "multithreading")] 52 | pub fn thread_count(mut self, thread_count: usize) -> Self { 53 | self.thread_count = if thread_count == 0 { 54 | rayon::current_num_threads() 55 | } else { 56 | thread_count 57 | }; 58 | self 59 | } 60 | 61 | /// Does nothing and emits a warning when complied without multithreading. 62 | #[cfg(not(feature = "multithreading"))] 63 | pub fn thread_count(mut self, _thread_count: usize) -> Self { 64 | warn!("Setting num_threads has no effect when compiling without multithreading support."); 65 | self.thread_count = 1; 66 | self 67 | } 68 | } 69 | 70 | 71 | pub struct UsnParser { 72 | inner_handle: T, 73 | source: String, 74 | handle_size: u64, 75 | settings: UsnParserSettings 76 | } 77 | 78 | impl UsnParser { 79 | pub fn from_path(filename: &str) -> Result { 80 | let file_handle = File::open(filename)?; 81 | 82 | Self::from_read_seek( 83 | filename.to_string(), 84 | file_handle 85 | ) 86 | } 87 | } 88 | 89 | impl UsnParser { 90 | pub fn from_read_seek(source: String, mut inner_handle: T) -> Result { 91 | // We need to get the end offset to determine the size 92 | let end_offset = inner_handle.seek(SeekFrom::End(0))?; 93 | 94 | // Seek back to the beginning 95 | inner_handle.seek(SeekFrom::Start(0))?; 96 | 97 | Ok( Self { 98 | inner_handle: inner_handle, 99 | source: source, 100 | handle_size: end_offset, 101 | settings: UsnParserSettings::default() 102 | }) 103 | } 104 | 105 | pub fn with_configuration(mut self, configuration: UsnParserSettings) -> Self { 106 | self.settings = configuration; 107 | self 108 | } 109 | 110 | pub fn get_chunk_iterator(&mut self) -> IterFileChunks { 111 | IterFileChunks{ 112 | parser: self, 113 | chunk_size: SIZE_CHUNK, 114 | search_size: SIZE_SEARCH, 115 | chunk_start_offset: 0, 116 | } 117 | } 118 | 119 | pub fn into_chunk_iterator(self) -> IntoIterFileChunks { 120 | IntoIterFileChunks { 121 | parser: self, 122 | chunk_size: SIZE_CHUNK, 123 | search_size: SIZE_SEARCH, 124 | chunk_start_offset: 0, 125 | } 126 | } 127 | 128 | pub fn records(&mut self) -> impl Iterator + '_ { 129 | let num_threads = max(self.settings.thread_count, 1); 130 | 131 | let mut chunks = self.get_chunk_iterator(); 132 | 133 | let records_per_chunk = std::iter::from_fn(move || 134 | { 135 | // Allocate some chunks in advance, so they can be parsed in parallel. 136 | let mut list_of_chunks = Vec::with_capacity(num_threads); 137 | 138 | for _ in 0..num_threads { 139 | if let Some(chunk) = chunks.next() { 140 | list_of_chunks.push(chunk); 141 | }; 142 | } 143 | 144 | // We only stop once no chunks can be allocated. 145 | if list_of_chunks.is_empty() { 146 | None 147 | } else { 148 | #[cfg(feature = "multithreading")] 149 | let chunk_iter = list_of_chunks.into_par_iter(); 150 | 151 | #[cfg(not(feature = "multithreading"))] 152 | let chunk_iter = list_of_chunks.into_iter(); 153 | 154 | // Serialize the records in each chunk. 155 | let iterators: Vec> = chunk_iter 156 | .map(|data_chunk| data_chunk.get_records() 157 | ) 158 | .collect(); 159 | 160 | Some(iterators.into_iter().flatten()) 161 | } 162 | } 163 | ); 164 | 165 | records_per_chunk.flatten() 166 | } 167 | } 168 | 169 | pub struct IterFileChunks<'c, T: ReadSeek> { 170 | parser: &'c mut UsnParser, 171 | // The chunk size is larger than the parse size to ensure complete end record 172 | chunk_size: usize, 173 | // The parse size is smaller than the chunk size to ensure there is overlap for complete end record 174 | search_size: usize, 175 | // This is the relative offset of this chunk 176 | chunk_start_offset: u64, 177 | } 178 | 179 | impl <'c, T: ReadSeek> Iterator for IterFileChunks <'c, T> { 180 | type Item = DataChunk; 181 | 182 | fn next(&mut self) -> Option<::Item> { 183 | while self.chunk_start_offset < self.parser.handle_size { 184 | // Create buffer for our data chunk 185 | let mut buffer = vec![0u8; self.chunk_size]; 186 | 187 | // Get the current offset 188 | let current_offset = self.chunk_start_offset; 189 | 190 | // Seek to where we start our chunk 191 | match self.parser.inner_handle.seek( 192 | SeekFrom::Start(current_offset) 193 | ) { 194 | Ok(_) => {}, 195 | Err(error) => { 196 | error!("{}", error); 197 | break; 198 | } 199 | } 200 | 201 | // Read into buffer 202 | let _bytes_read = match self.parser.inner_handle.read( 203 | buffer.as_mut_slice() 204 | ){ 205 | Ok(bytes_read) => bytes_read, 206 | Err(error) => { 207 | error!("{}", error); 208 | return None 209 | } 210 | }; 211 | 212 | // Set the next chunk's offset 213 | // Increment by search size and not chunk size 214 | self.chunk_start_offset += self.search_size as u64; 215 | 216 | // Return data chunk 217 | return Some( 218 | DataChunk{ 219 | source: self.parser.source.to_owned(), 220 | offset: current_offset, 221 | search_size: self.search_size, 222 | data: buffer 223 | } 224 | ); 225 | } 226 | 227 | None 228 | } 229 | } 230 | 231 | 232 | pub struct IntoIterFileChunks { 233 | parser: UsnParser, 234 | chunk_size: usize, 235 | search_size: usize, 236 | chunk_start_offset: u64, 237 | } 238 | 239 | impl Iterator for IntoIterFileChunks { 240 | type Item = DataChunk; 241 | 242 | fn next(&mut self) -> Option<::Item> { 243 | while self.chunk_start_offset < self.parser.handle_size { 244 | // Create buffer for our data chunk 245 | let mut buffer = vec![0u8; self.chunk_size]; 246 | 247 | // Get the current offset 248 | let current_offset = self.chunk_start_offset; 249 | 250 | // Seek to where we start our chunk 251 | match self.parser.inner_handle.seek( 252 | SeekFrom::Start(current_offset) 253 | ) { 254 | Ok(_) => {}, 255 | Err(error) => { 256 | error!("{}", error); 257 | break; 258 | } 259 | } 260 | 261 | // Read into buffer 262 | let _bytes_read = match self.parser.inner_handle.read( 263 | buffer.as_mut_slice() 264 | ){ 265 | Ok(bytes_read) => bytes_read, 266 | Err(error) => { 267 | error!("{}", error); 268 | return None 269 | } 270 | }; 271 | 272 | // Set the next chunk's offset 273 | // Increment by search size and not chunk size 274 | self.chunk_start_offset += self.search_size as u64; 275 | 276 | // Return data chunk 277 | return Some( 278 | DataChunk{ 279 | source: self.parser.source.to_owned(), 280 | offset: current_offset, 281 | search_size: self.search_size, 282 | data: buffer 283 | } 284 | ); 285 | } 286 | 287 | None 288 | } 289 | } 290 | 291 | 292 | #[derive(Debug)] 293 | pub struct DataChunk { 294 | source: String, 295 | offset: u64, 296 | search_size: usize, 297 | data: Vec 298 | } 299 | 300 | impl DataChunk { 301 | pub fn get_records(self) -> Vec { 302 | trace!("Getting records for ChunkData at offset: {}", self.offset); 303 | 304 | let record_iterator = self.get_record_iterator(); 305 | 306 | let records: Vec = record_iterator.collect(); 307 | 308 | return records; 309 | } 310 | 311 | pub fn get_record_iterator(self) -> IterRecords { 312 | IterRecords::new( 313 | self.source, 314 | self.data, 315 | self.offset, 316 | self.search_size 317 | ) 318 | } 319 | } 320 | 321 | #[derive(Debug)] 322 | pub struct IterRecords { 323 | source: String, 324 | block: Vec, 325 | start_offset: u64, 326 | match_offsets: VecDeque, 327 | } 328 | 329 | impl IterRecords { 330 | pub fn new(source: String, block: Vec, start_offset: u64, search_size: usize) -> IterRecords { 331 | let match_offsets: VecDeque = RE_USN.find_iter(&block[0..search_size]) 332 | .map(|m| m.start() as u64) 333 | .collect(); 334 | 335 | IterRecords { 336 | source, 337 | block, 338 | start_offset, 339 | match_offsets 340 | } 341 | } 342 | } 343 | 344 | impl Iterator for IterRecords { 345 | type Item = UsnEntry; 346 | 347 | fn next(&mut self) -> Option { 348 | loop { 349 | // start of hit 350 | let start_of_hit = match self.match_offsets.pop_front(){ 351 | Some(offset) => offset, 352 | None => break 353 | }; 354 | 355 | // index starts at start of hit offset 356 | let i = start_of_hit as usize; 357 | 358 | // the entries' absolute offset 359 | let entry_offset = self.start_offset + start_of_hit; 360 | 361 | // validate record length is 8 byte aligned 362 | let record_length = LittleEndian::read_u32(&self.block[i..i+4]); 363 | if record_length % 8 != 0 { 364 | debug!("not 8 byte aligned at offset {}", entry_offset); 365 | continue; 366 | } 367 | 368 | // Check versions 369 | let major = LittleEndian::read_u16(&self.block[i+4..i+6]); 370 | 371 | let usn_entry = match major { 372 | 2 => { 373 | let minor = LittleEndian::read_u16(&self.block[i+6..i+8]); 374 | 375 | // validate minor version 376 | if minor != 0 { 377 | debug!("minor version does not match major at offset {}", entry_offset); 378 | continue; 379 | } 380 | 381 | // validate name offset 382 | let name_offset = LittleEndian::read_u16(&self.block[i+58..i+60]); 383 | if name_offset != 60 { 384 | debug!("name offset does not match 60 at offset {}", entry_offset); 385 | continue; 386 | } 387 | 388 | // Create Entry Meta 389 | let entry_meta = EntryMeta::new( 390 | &self.source, 391 | entry_offset 392 | ); 393 | 394 | // Parse entry 395 | let entry = match UsnEntry::new( 396 | entry_meta, 397 | 2, 398 | &self.block[start_of_hit as usize ..] 399 | ) { 400 | Ok(entry) => entry, 401 | Err(error) => { 402 | debug!("error at offset {}: {}", entry_offset, error); 403 | continue; 404 | } 405 | }; 406 | 407 | entry 408 | }, 409 | other => { 410 | debug!("Version not handled: {}; offset: {}", other, entry_offset); 411 | continue; 412 | } 413 | }; 414 | 415 | return Some(usn_entry); 416 | } 417 | 418 | None 419 | } 420 | } 421 | 422 | 423 | /// This iterator iterates records from a buffer by index. 424 | /// 425 | #[derive(Debug)] 426 | pub struct IterRecordsByIndex { 427 | meta: EntryMeta, 428 | block: Vec, 429 | index: usize, 430 | } 431 | 432 | impl IterRecordsByIndex { 433 | pub fn new(meta: EntryMeta, block: Vec) -> Self { 434 | IterRecordsByIndex { 435 | meta: meta, 436 | block: block, 437 | index: 0 438 | } 439 | } 440 | } 441 | 442 | impl Iterator for IterRecordsByIndex { 443 | type Item = UsnEntry; 444 | 445 | fn next(&mut self) -> Option { 446 | while self.index < self.block.len() { 447 | self.meta.offset += self.index as u64; 448 | 449 | let record_length = LittleEndian::read_u32( 450 | &self.block[self.index..self.index+4] 451 | ); 452 | if record_length % 8 != 0 { 453 | debug!("not 8 byte aligned at offset {}", self.index); 454 | self.index += 8; 455 | continue; 456 | } 457 | 458 | // Check versions 459 | let major = LittleEndian::read_u16( 460 | &self.block[self.index+4..self.index+6] 461 | ); 462 | let minor = LittleEndian::read_u16( 463 | &self.block[self.index+6..self.index+8] 464 | ); 465 | 466 | let usn_entry = match major { 467 | 2 => { 468 | // validate minor version 469 | if minor != 0 { 470 | debug!("minor version does not match major at offset {}", self.index); 471 | self.index += 8; 472 | continue; 473 | } 474 | 475 | // validate name offset 476 | let name_offset = LittleEndian::read_u16( 477 | &self.block[self.index+58..self.index+60] 478 | ); 479 | if name_offset != 60 { 480 | debug!("name offset does not match 60 at offset {}", self.index); 481 | self.index += 8; 482 | continue; 483 | } 484 | 485 | // Parse entry 486 | let entry = match UsnEntry::new( 487 | self.meta.clone(), 488 | 2, 489 | &self.block[self.index as usize ..] 490 | ) { 491 | Ok(entry) => entry, 492 | Err(error) => { 493 | debug!("error at offset {}: {}", self.index, error); 494 | self.index += 8; 495 | continue; 496 | } 497 | }; 498 | 499 | self.index += record_length as usize; 500 | 501 | entry 502 | }, 503 | 3 => { 504 | debug!("entry: {}", hex::encode(&self.block[self.index as usize .. self.index as usize + record_length as usize])); 505 | // validate minor version 506 | if minor != 0 { 507 | debug!("minor version does not match major at offset {}", self.index); 508 | self.index += 8; 509 | continue; 510 | } 511 | 512 | // validate name offset 513 | let name_offset = LittleEndian::read_u16( 514 | &self.block[self.index+74..self.index+76] 515 | ); 516 | if name_offset != 76 { 517 | debug!("name offset [{}] does not match 76 at offset {}", name_offset, self.index); 518 | self.index += 8; 519 | continue; 520 | } 521 | 522 | // Parse entry 523 | let entry = match UsnEntry::new( 524 | self.meta.clone(), 525 | 3, 526 | &self.block[self.index as usize ..] 527 | ) { 528 | Ok(entry) => entry, 529 | Err(error) => { 530 | debug!("error at offset {}: {}", self.index, error); 531 | self.index += 8; 532 | continue; 533 | } 534 | }; 535 | 536 | self.index += record_length as usize; 537 | 538 | entry 539 | } 540 | other => { 541 | debug!("Version not handled: {}; offset: {}", other, self.index); 542 | self.index += 8; 543 | continue; 544 | } 545 | }; 546 | 547 | return Some(usn_entry); 548 | } 549 | 550 | None 551 | } 552 | } -------------------------------------------------------------------------------- /src/usn_err.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::fmt::Display; 3 | use std::result::Result as StdResult; 4 | use std::io; 5 | use winstructs::err::Error as WinstructError; 6 | 7 | pub type Result = StdResult; 8 | 9 | #[derive(Debug)] 10 | pub enum ErrorKind { 11 | InvalidRecord, 12 | InvalidUsnV2Record, 13 | UnsupportedVersion, 14 | WinstructError, 15 | IoError, 16 | } 17 | 18 | /// USN Record Parsing Error 19 | #[derive(Debug)] 20 | pub struct UsnError { 21 | /// Formated error message 22 | pub message: String, 23 | /// The type of error 24 | pub kind: ErrorKind, 25 | /// Any additional information passed along, such as the argument name that caused the error 26 | pub info: Option>, 27 | } 28 | 29 | impl UsnError{ 30 | #[allow(dead_code)] 31 | pub fn invalid_length(err: String)->Self{ 32 | UsnError { 33 | message: format!("{}",err), 34 | kind: ErrorKind::InvalidRecord, 35 | info: Some(vec![]), 36 | } 37 | } 38 | 39 | #[allow(dead_code)] 40 | pub fn invalid_v2_record(err: String)->Self{ 41 | UsnError { 42 | message: format!("{}",err), 43 | kind: ErrorKind::InvalidUsnV2Record, 44 | info: Some(vec![]), 45 | } 46 | } 47 | #[allow(dead_code)] 48 | pub fn unsupported_version(err: String)->Self{ 49 | UsnError { 50 | message: format!("{}",err), 51 | kind: ErrorKind::UnsupportedVersion, 52 | info: Some(vec![]), 53 | } 54 | } 55 | #[allow(dead_code)] 56 | pub fn io_error(err: String)->Self{ 57 | UsnError { 58 | message: format!("{}",err), 59 | kind: ErrorKind::InvalidUsnV2Record, 60 | info: Some(vec![]), 61 | } 62 | } 63 | } 64 | 65 | impl From for UsnError { 66 | fn from(err: io::Error) -> Self { 67 | UsnError { 68 | message: format!("{}",err), 69 | kind: ErrorKind::IoError, 70 | info: Some(vec![]), 71 | } 72 | } 73 | } 74 | 75 | impl From for UsnError { 76 | fn from(err: WinstructError) -> Self { 77 | UsnError { 78 | message: format!("{}",err), 79 | kind: ErrorKind::IoError, 80 | info: Some(vec![]), 81 | } 82 | } 83 | } 84 | 85 | impl Display for UsnError { 86 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "{}", self.message) } 87 | } 88 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use time::Duration; 2 | use chrono::{DateTime, NaiveDate, Utc}; 3 | 4 | 5 | /// Convert a u64 Windows 100 nanosecond timestamp to a chrono DateTime 6 | /// 7 | pub fn u64_to_datetime(timestamp_u64: u64) -> DateTime { 8 | DateTime::from_utc( 9 | NaiveDate::from_ymd(1601, 1, 1) 10 | .and_hms_nano(0, 0, 0, 0) 11 | + Duration::microseconds( 12 | (timestamp_u64 / 10) as i64 13 | ), 14 | Utc, 15 | ) 16 | } -------------------------------------------------------------------------------- /tests/error_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate rusty_usn; 2 | 3 | #[cfg(feature = "windows")] 4 | #[test] 5 | fn win_error_code_test() { 6 | use rusty_usn::liveusn::error::format_win_error; 7 | 8 | let error_str = format_win_error( 9 | Some(2) 10 | ); 11 | 12 | assert_eq!(error_str, "The system cannot find the file specified.\r\n"); 13 | } -------------------------------------------------------------------------------- /tests/live_ntfs_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate rusty_usn; 2 | 3 | 4 | #[cfg(feature = "windows")] 5 | #[test] 6 | fn live_get_entry_test() { 7 | use rusty_usn::liveusn::live; 8 | 9 | // Must be admin for this to work 10 | let mut live_ntfs = match live::WindowsLiveNtfs::from_volume_path( 11 | r"\\.\C:" 12 | ) { 13 | Ok(l) => l, 14 | Err(e) => { 15 | if e.message == "Access is denied. (os error 5)" { 16 | eprintln!("{:?}", e); 17 | return; 18 | } 19 | 20 | panic!(e); 21 | } 22 | }; 23 | 24 | let mft_entry = live_ntfs.get_entry(0).unwrap(); 25 | let mft_name = match mft_entry.find_best_name_attribute() { 26 | Some(attr) => attr.name, 27 | None => { 28 | eprintln!("Error getting mft entry name for record 0. {:?}", mft_entry); 29 | panic!("Error getting mft entry name for record 0. {:?}", mft_entry); 30 | } 31 | }; 32 | assert_eq!(mft_name, r"$MFT"); 33 | } 34 | 35 | #[cfg(feature = "windows")] 36 | #[test] 37 | fn live_volume_info_test() { 38 | use std::fs::File; 39 | use rusty_usn::liveusn::winfuncs; 40 | 41 | // Must be admin for this to work 42 | let file_handle = match File::open(r"\\.\C:") { 43 | Ok(f) => f, 44 | Err(e) => { 45 | eprintln!("{:?}", e); 46 | return; 47 | } 48 | }; 49 | 50 | let _volume_data = winfuncs::get_ntfs_volume_data( 51 | &file_handle 52 | ).unwrap(); 53 | } 54 | 55 | #[cfg(feature = "windows")] 56 | #[test] 57 | fn parse_live_volume_data_test() { 58 | use rusty_usn::liveusn::ntfs; 59 | 60 | let volume_buffer: &[u8] = &[ 61 | 0x23,0x6E,0x46,0x0A,0xA3,0x46,0x0A,0xA8,0xFF,0x77,0x7F,0x3B,0x00,0x00,0x00,0x00, 62 | 0xFF,0xEE,0x6F,0x07,0x00,0x00,0x00,0x00,0xA3,0x64,0xA1,0x00,0x00,0x00,0x00,0x00, 63 | 0x70,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x10,0x00,0x00, 64 | 0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xCC,0x2A,0x00,0x00,0x00,0x00, 65 | 0x00,0x00,0x0C,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 66 | 0xE0,0x03,0x10,0x06,0x00,0x00,0x00,0x00,0x60,0x54,0x10,0x06,0x00,0x00,0x00,0x00, 67 | 0x20,0x00,0x00,0x00,0x03,0x00,0x01,0x00,0x00,0x02,0x00,0x00,0x02,0x00,0x00,0x00, 68 | 0x00,0x01,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0x3E,0x00,0x00,0x00,0x00,0x00,0x00,0x40, 69 | ]; 70 | 71 | let volume_data = ntfs::NtfsVolumeData::from_buffer(volume_buffer); 72 | 73 | let volume_data_json_str = serde_json::to_string( 74 | &volume_data 75 | ).unwrap(); 76 | 77 | assert_eq!(&volume_data_json_str, r#"{"volume_serial_number":-6338175859504550365,"number_sectors":998209535,"total_clusters":124776191,"free_clusters":10577059,"total_reserved":5232,"bytes_per_sector":512,"bytes_per_cluster":4096,"bytes_per_file_record_segment":1024,"clusters_per_file_record_segment":0,"mft_valid_data_length":718012416,"mft_start_lcn":786432,"mft_2_start_lcn":2,"mft_zone_start":101712864,"mft_zone_end":101733472,"ntfs_extended_volume_data":{"byte_count":32,"major_version":3,"minor_version":1,"bytes_per_physical_sector":512,"lfs_major_version":2,"lfs_minor_version":0,"max_device_trim_extent_count":256,"max_device_trim_byte_count":4294967295,"max_volume_trim_extent_count":62,"max_volume_trim_byte_count":1073741824}}"#); 78 | 79 | assert_eq!(volume_data.volume_serial_number, -6338175859504550365); 80 | assert_eq!(volume_data.number_sectors, 998209535); 81 | assert_eq!(volume_data.total_clusters, 124776191); 82 | assert_eq!(volume_data.free_clusters, 10577059); 83 | assert_eq!(volume_data.total_reserved, 5232); 84 | assert_eq!(volume_data.bytes_per_sector, 512); 85 | assert_eq!(volume_data.bytes_per_cluster, 4096); 86 | assert_eq!(volume_data.bytes_per_file_record_segment, 1024); 87 | assert_eq!(volume_data.clusters_per_file_record_segment, 0); 88 | assert_eq!(volume_data.mft_valid_data_length, 718012416); 89 | assert_eq!(volume_data.mft_start_lcn, 786432); 90 | assert_eq!(volume_data.mft_2_start_lcn, 2); 91 | assert_eq!(volume_data.mft_zone_start, 101712864); 92 | assert_eq!(volume_data.mft_zone_end, 101733472); 93 | } 94 | -------------------------------------------------------------------------------- /tests/record_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate rusty_usn; 2 | extern crate serde_json; 3 | use std::io::Cursor; 4 | use rusty_usn::record; 5 | 6 | 7 | #[test] 8 | fn usn_record_json_value_test() { 9 | let v3_record_buffer: &[u8] = &[ 10 | 0x70,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0xB9,0x8A,0x00,0x00,0x00,0x00,0x02,0x00, 11 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC8,0x07,0x00,0x00,0x00,0x00,0x02,0x00, 12 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x78,0xA2,0x9A,0x01,0x00,0x00,0x00, 13 | 0xE9,0xB6,0x4E,0x4D,0xE0,0x65,0xD5,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 14 | 0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x20,0x00,0x4C,0x00,0x43,0x00,0x49,0x00, 15 | 0x44,0x00,0x6F,0x00,0x77,0x00,0x6E,0x00,0x6C,0x00,0x6F,0x00,0x61,0x00,0x64,0x00, 16 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 17 | ]; 18 | 19 | let v3_record = match record::UsnRecord::new(3, v3_record_buffer) { 20 | Ok(record) => record, 21 | Err(error) => { 22 | eprintln!("{:?}", error); 23 | panic!(error); 24 | } 25 | }; 26 | 27 | let v3_json_value = v3_record.to_json_value(None).unwrap(); 28 | assert_eq!(&v3_json_value.to_string(), r#"{"file_attributes":"FILE_ATTRIBUTE_ARCHIVE","file_name":"CIDownloader.log","file_name_length":32,"file_name_offset":76,"file_reference":{"entry":35513,"sequence":2,"u128":"562949953456825"},"major_version":3,"minor_version":0,"parent_reference":{"entry":1992,"sequence":2,"u128":"562949953423304"},"reason":"USN_REASON_DATA_EXTEND","record_length":112,"security_id":0,"source_info":"(empty)","timestamp":"2019-09-08T00:56:52.138160Z","usn":6889306208}"#); 29 | 30 | let record_meta = record::EntryMeta::new( 31 | "Test Buffer", 32 | 0 33 | ); 34 | 35 | let v3_json_value_additional = v3_record.to_json_value( 36 | Some(record_meta.to_json_value().unwrap()) 37 | ).unwrap(); 38 | 39 | assert_eq!(&v3_json_value_additional.to_string(), r#"{"file_attributes":"FILE_ATTRIBUTE_ARCHIVE","file_name":"CIDownloader.log","file_name_length":32,"file_name_offset":76,"file_reference":{"entry":35513,"sequence":2,"u128":"562949953456825"},"major_version":3,"meta__offset":0,"meta__source":"Test Buffer","minor_version":0,"parent_reference":{"entry":1992,"sequence":2,"u128":"562949953423304"},"reason":"USN_REASON_DATA_EXTEND","record_length":112,"security_id":0,"source_info":"(empty)","timestamp":"2019-09-08T00:56:52.138160Z","usn":6889306208}"#); 40 | } 41 | 42 | #[test] 43 | fn usn_records_json_test() { 44 | let v3_record_buffer: &[u8] = &[ 45 | 0x70,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0xB9,0x8A,0x00,0x00,0x00,0x00,0x02,0x00, 46 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC8,0x07,0x00,0x00,0x00,0x00,0x02,0x00, 47 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x78,0xA2,0x9A,0x01,0x00,0x00,0x00, 48 | 0xE9,0xB6,0x4E,0x4D,0xE0,0x65,0xD5,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 49 | 0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x20,0x00,0x4C,0x00,0x43,0x00,0x49,0x00, 50 | 0x44,0x00,0x6F,0x00,0x77,0x00,0x6E,0x00,0x6C,0x00,0x6F,0x00,0x61,0x00,0x64,0x00, 51 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 52 | ]; 53 | 54 | let v3_record = match record::UsnRecord::new(3, v3_record_buffer) { 55 | Ok(record) => record, 56 | Err(error) => panic!(error) 57 | }; 58 | 59 | let v3_json_str = serde_json::to_string(&v3_record).unwrap(); 60 | assert_eq!(v3_json_str, r#"{"record_length":112,"major_version":3,"minor_version":0,"file_reference":{"u128":"562949953456825","entry":35513,"sequence":2},"parent_reference":{"u128":"562949953423304","entry":1992,"sequence":2},"usn":6889306208,"timestamp":"2019-09-08T00:56:52.138160Z","reason":"USN_REASON_DATA_EXTEND","source_info":"(empty)","security_id":0,"file_attributes":"FILE_ATTRIBUTE_ARCHIVE","file_name_length":32,"file_name_offset":76,"file_name":"CIDownloader.log"}"#); 61 | 62 | 63 | // Record v2 Test 64 | let v2_record_buffer: &[u8] = &[ 65 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 66 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x80,0xBC,0x04,0x00,0x00,0x00, 67 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 68 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 69 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 70 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 71 | ]; 72 | 73 | let v2_record = match record::UsnRecord::new(2, v2_record_buffer) { 74 | Ok(record) => record, 75 | Err(error) => panic!(error) 76 | }; 77 | let v2_json_str = serde_json::to_string( 78 | &v2_record 79 | ).unwrap(); 80 | 81 | assert_eq!(v2_json_str, r#"{"record_length":96,"major_version":2,"minor_version":0,"file_reference":{"entry":115,"sequence":37224},"parent_reference":{"entry":141883,"sequence":7},"usn":20342374400,"timestamp":"2013-10-19T12:16:53.276040Z","reason":"USN_REASON_DATA_EXTEND","source_info":"(empty)","security_id":0,"file_attributes":"FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED","file_name_length":32,"file_name_offset":60,"file_name":"BTDevManager.log"}"#); 82 | } 83 | 84 | #[test] 85 | fn usn_record_v3_test() { 86 | let record_buffer: &[u8] = &[ 87 | 0x70,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0xB9,0x8A,0x00,0x00,0x00,0x00,0x02,0x00, 88 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC8,0x07,0x00,0x00,0x00,0x00,0x02,0x00, 89 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x78,0xA2,0x9A,0x01,0x00,0x00,0x00, 90 | 0xE9,0xB6,0x4E,0x4D,0xE0,0x65,0xD5,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 91 | 0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x20,0x00,0x4C,0x00,0x43,0x00,0x49,0x00, 92 | 0x44,0x00,0x6F,0x00,0x77,0x00,0x6E,0x00,0x6C,0x00,0x6F,0x00,0x61,0x00,0x64,0x00, 93 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 94 | ]; 95 | 96 | let record = match record::UsnRecordV3::new(&mut Cursor::new(record_buffer)) { 97 | Ok(record) => record, 98 | Err(error) => panic!(error) 99 | }; 100 | 101 | assert_eq!(record.record_length, 112); 102 | assert_eq!(record.major_version, 3); 103 | assert_eq!(record.minor_version, 0); 104 | assert_eq!(record.file_reference.0, 562949953456825); 105 | 106 | let file_ref = record.file_reference.as_mft_reference(); 107 | assert_eq!(file_ref.entry, 35513); 108 | assert_eq!(file_ref.sequence, 2); 109 | 110 | assert_eq!(record.parent_reference.0, 562949953423304); 111 | let parent_ref = record.parent_reference.as_mft_reference(); 112 | assert_eq!(parent_ref.entry, 1992); 113 | assert_eq!(parent_ref.sequence, 2); 114 | 115 | 116 | assert_eq!(record.usn, 6889306208); 117 | assert_eq!(format!("{}", record.timestamp), "2019-09-08 00:56:52.138160 UTC"); 118 | assert_eq!(record.reason.bits(), 2); 119 | assert_eq!(record.source_info.bits(), 0); 120 | assert_eq!(record.security_id, 0); 121 | assert_eq!(record.file_attributes.bits(), 32); 122 | assert_eq!(record.file_name_length, 32); 123 | assert_eq!(record.file_name_offset, 76); 124 | assert_eq!(record.file_name, "CIDownloader.log"); 125 | } 126 | 127 | #[test] 128 | fn usn_record_v3_json_test() { 129 | let record_buffer: &[u8] = &[ 130 | 0x70,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0xB9,0x8A,0x00,0x00,0x00,0x00,0x02,0x00, 131 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC8,0x07,0x00,0x00,0x00,0x00,0x02,0x00, 132 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x78,0xA2,0x9A,0x01,0x00,0x00,0x00, 133 | 0xE9,0xB6,0x4E,0x4D,0xE0,0x65,0xD5,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 134 | 0x00,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x20,0x00,0x4C,0x00,0x43,0x00,0x49,0x00, 135 | 0x44,0x00,0x6F,0x00,0x77,0x00,0x6E,0x00,0x6C,0x00,0x6F,0x00,0x61,0x00,0x64,0x00, 136 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 137 | ]; 138 | 139 | let record = match record::UsnRecordV3::new(&mut Cursor::new(record_buffer)) { 140 | Ok(record) => record, 141 | Err(error) => panic!(error) 142 | }; 143 | 144 | let json_str = serde_json::to_string(&record).unwrap(); 145 | assert_eq!(json_str, r#"{"record_length":112,"major_version":3,"minor_version":0,"file_reference":{"u128":"562949953456825","entry":35513,"sequence":2},"parent_reference":{"u128":"562949953423304","entry":1992,"sequence":2},"usn":6889306208,"timestamp":"2019-09-08T00:56:52.138160Z","reason":"USN_REASON_DATA_EXTEND","source_info":"(empty)","security_id":0,"file_attributes":"FILE_ATTRIBUTE_ARCHIVE","file_name_length":32,"file_name_offset":76,"file_name":"CIDownloader.log"}"#); 146 | } 147 | 148 | #[test] 149 | fn usn_record_v2_test() { 150 | let record_buffer: &[u8] = &[ 151 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 152 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x80,0xBC,0x04,0x00,0x00,0x00, 153 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 154 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 155 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 156 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 157 | ]; 158 | 159 | let record = match record::UsnRecordV2::new(&mut Cursor::new(record_buffer)) { 160 | Ok(record) => record, 161 | Err(error) => panic!(error) 162 | }; 163 | 164 | assert_eq!(record.record_length, 96); 165 | assert_eq!(record.major_version, 2); 166 | assert_eq!(record.minor_version, 0); 167 | assert_eq!(record.file_reference.entry, 115); 168 | assert_eq!(record.file_reference.sequence, 37224); 169 | assert_eq!(record.parent_reference.entry, 141883); 170 | assert_eq!(record.parent_reference.sequence, 7); 171 | assert_eq!(record.usn, 20342374400); 172 | assert_eq!(format!("{}", record.timestamp), "2013-10-19 12:16:53.276040 UTC"); 173 | assert_eq!(record.reason.bits(), 2); 174 | assert_eq!(record.source_info.bits(), 0); 175 | assert_eq!(record.security_id, 0); 176 | assert_eq!(record.file_attributes.bits(), 8224); 177 | assert_eq!(record.file_name_length, 32); 178 | assert_eq!(record.file_name_offset, 60); 179 | assert_eq!(record.file_name, "BTDevManager.log"); 180 | } 181 | 182 | #[test] 183 | fn usn_record_v2_json_test() { 184 | let record_buffer: &[u8] = &[ 185 | 0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x73,0x00,0x00,0x00,0x00,0x00,0x68,0x91, 186 | 0x3B,0x2A,0x02,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x80,0xBC,0x04,0x00,0x00,0x00, 187 | 0x53,0xC7,0x8B,0x18,0xC5,0xCC,0xCE,0x01,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 188 | 0x00,0x00,0x00,0x00,0x20,0x20,0x00,0x00,0x20,0x00,0x3C,0x00,0x42,0x00,0x54,0x00, 189 | 0x44,0x00,0x65,0x00,0x76,0x00,0x4D,0x00,0x61,0x00,0x6E,0x00,0x61,0x00,0x67,0x00, 190 | 0x65,0x00,0x72,0x00,0x2E,0x00,0x6C,0x00,0x6F,0x00,0x67,0x00,0x00,0x00,0x00,0x00 191 | ]; 192 | 193 | let record = match record::UsnRecordV2::new(&mut Cursor::new(record_buffer)) { 194 | Ok(record) => record, 195 | Err(error) => panic!(error) 196 | }; 197 | let json_str = serde_json::to_string( 198 | &record 199 | ).unwrap(); 200 | 201 | assert_eq!(json_str, r#"{"record_length":96,"major_version":2,"minor_version":0,"file_reference":{"entry":115,"sequence":37224},"parent_reference":{"entry":141883,"sequence":7},"usn":20342374400,"timestamp":"2013-10-19T12:16:53.276040Z","reason":"USN_REASON_DATA_EXTEND","source_info":"(empty)","security_id":0,"file_attributes":"FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED","file_name_length":32,"file_name_offset":60,"file_name":"BTDevManager.log"}"#); 202 | } 203 | -------------------------------------------------------------------------------- /tests/reference_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate rusty_usn; 2 | extern crate serde_json; 3 | use rusty_usn::record; 4 | use byteorder::{ByteOrder, LittleEndian}; 5 | 6 | 7 | #[test] 8 | fn reference_128() { 9 | let ref_buffer: &[u8] = &[ 10 | 0xC8,0x07,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 11 | ]; 12 | 13 | let ref128 = record::Ntfs128Reference( 14 | LittleEndian::read_u128(&ref_buffer[0..16]) 15 | ); 16 | 17 | assert_eq!(ref128.0, 562949953423304); 18 | let file_ref = ref128.as_mft_reference(); 19 | assert_eq!(file_ref.entry, 1992); 20 | assert_eq!(file_ref.sequence, 2); 21 | 22 | let json_str = serde_json::to_string( 23 | &ref128 24 | ).unwrap(); 25 | assert_eq!(json_str, r#"{"u128":"562949953423304","entry":1992,"sequence":2}"#); 26 | } -------------------------------------------------------------------------------- /tests/win_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate rusty_usn; 2 | 3 | #[cfg(feature = "windows")] 4 | #[test] 5 | fn query_test() { 6 | use std::fs::File; 7 | 8 | use rusty_usn::liveusn::winfuncs::{ 9 | query_usn_journal 10 | }; 11 | 12 | let file_handle = match File::open("\\\\.\\C:") { 13 | Ok(handle) => handle, 14 | Err(error) => { 15 | eprintln!("{:?}", error); 16 | return; 17 | } 18 | }; 19 | 20 | match query_usn_journal(&file_handle) { 21 | Ok(journal_info) => { 22 | println!("{:#?}", journal_info); 23 | }, 24 | Err(error) => panic!(error) 25 | } 26 | } --------------------------------------------------------------------------------