├── .clippy.toml ├── .dockerignore ├── .github ├── README.md ├── actions │ └── proof-params-download │ │ └── action.yml └── workflows │ ├── ci.yml │ └── proof-params-generate.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── COPYRIGHT ├── Cargo.toml ├── Dockerfile-ci ├── Dockerfile-profile ├── FUNDING.json ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── RELEASE.md ├── SECURITY.md ├── audits ├── Sigma-Prime-Protocol-Labs-Filecoin-Proofs-Security-Review-v2.1.pdf ├── protocolai-audit-20200728.pdf ├── protocollabs-report-20211210.pdf └── synporepsecurityauditreport-05-2023.pdf ├── build.rs ├── fil-proofs-param ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── parameters.json ├── scripts │ ├── pin-params.sh │ └── verify-parameters-json.sh ├── src │ ├── bin │ │ ├── fakeipfsadd.rs │ │ ├── paramcache.rs │ │ ├── paramfetch.rs │ │ ├── parampublish.rs │ │ └── srspublish.rs │ └── lib.rs └── tests │ ├── paramfetch │ ├── mod.rs │ ├── session.rs │ └── support │ │ └── mod.rs │ ├── parampublish │ ├── mod.rs │ ├── prompts_to_publish.rs │ ├── read_metadata_files.rs │ ├── support │ │ ├── mod.rs │ │ └── session.rs │ └── write_json_manifest.rs │ ├── suite.rs │ └── support │ └── mod.rs ├── fil-proofs-tooling ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── build.rs ├── scripts │ ├── aggregate-benchmarks.sh │ ├── benchy.sh │ ├── micro.sh │ ├── retry.sh │ ├── run-remote.sh │ ├── with-dots.sh │ └── with-lock.sh └── src │ ├── bin │ ├── benchy │ │ ├── hash_fns.rs │ │ ├── main.rs │ │ ├── merkleproofs.rs │ │ ├── porep.rs │ │ ├── window_post.rs │ │ ├── window_post_fake.rs │ │ └── winning_post.rs │ ├── check_parameters │ │ └── main.rs │ ├── circuitinfo │ │ └── main.rs │ ├── fdlimit │ │ └── main.rs │ ├── gen_graph_cache │ │ └── main.rs │ ├── gpu-cpu-test │ │ ├── README.md │ │ └── main.rs │ ├── micro.rs │ ├── settings │ │ └── main.rs │ └── update_tree_r_cache │ │ └── main.rs │ ├── lib.rs │ ├── measure.rs │ ├── metadata.rs │ └── shared.rs ├── filecoin-hashers ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md └── src │ ├── blake2s.rs │ ├── lib.rs │ ├── poseidon.rs │ ├── poseidon_types.rs │ ├── sha256.rs │ └── types.rs ├── filecoin-proofs ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches │ ├── aggregation.rs │ └── preprocessing.rs ├── parameters.json ├── src │ ├── api │ │ ├── fake_seal.rs │ │ ├── mod.rs │ │ ├── post_util.rs │ │ ├── seal.rs │ │ ├── update.rs │ │ ├── util.rs │ │ ├── window_post.rs │ │ └── winning_post.rs │ ├── caches.rs │ ├── chunk_iter.rs │ ├── commitment_reader.rs │ ├── constants.rs │ ├── lib.rs │ ├── param.rs │ ├── parameters.rs │ ├── pieces.rs │ └── types │ │ ├── bytes_amount.rs │ │ ├── mod.rs │ │ ├── piece_info.rs │ │ ├── porep_config.rs │ │ ├── porep_proof_partitions.rs │ │ ├── post_config.rs │ │ ├── post_proof_partitions.rs │ │ ├── private_replica_info.rs │ │ ├── public_replica_info.rs │ │ ├── sector_class.rs │ │ ├── sector_size.rs │ │ ├── sector_update_config.rs │ │ └── update_proof_partitions.rs └── tests │ ├── aggregate_proof_bytes │ ├── api.rs │ ├── constants.rs │ ├── mod.rs │ ├── pieces.rs │ ├── regression.rs │ ├── seal_regression_records-v16.json │ └── seal_regression_records-v18.json ├── fr32 ├── Cargo.toml ├── benches │ └── fr.rs └── src │ ├── convert.rs │ ├── lib.rs │ ├── padding.rs │ └── reader.rs ├── issue_template.md ├── parameters.json ├── proptest-regressions └── crypto │ └── sloth.txt ├── release.toml ├── releases ├── Cargo.lock.v11.0.0 ├── Cargo.lock.v11.0.1 ├── Cargo.lock.v11.0.2 ├── Cargo.lock.v13.0.0 ├── Cargo.lock.v14.0.0 ├── Cargo.lock.v15.0.0 ├── Cargo.lock.v16.0.0 ├── Cargo.lock.v16.1.0 ├── Cargo.lock.v17.0.0 ├── Cargo.lock.v18.0.0 ├── Cargo.lock.v18.1.0 └── Cargo.lock.v19.0.0 ├── rust-fil-proofs.config.toml.sample ├── rust-toolchain ├── scripts ├── bench-parser.sh ├── package-release.sh └── publish-release.sh ├── sha2raw ├── Cargo.toml ├── README.md └── src │ ├── consts.rs │ ├── lib.rs │ ├── platform.rs │ ├── sha256.rs │ ├── sha256_intrinsics.rs │ └── sha256_utils.rs ├── srs-inner-product.json ├── storage-proofs-core ├── Cargo.toml ├── README.md ├── benches │ ├── blake2s.rs │ ├── drgraph.rs │ ├── merkle.rs │ ├── misc.rs │ ├── sha256.rs │ └── xor.rs ├── parameters.json ├── src │ ├── api_version.rs │ ├── cache_key.rs │ ├── compound_proof.rs │ ├── crypto │ │ ├── aes.rs │ │ ├── feistel.rs │ │ ├── mod.rs │ │ ├── sloth.rs │ │ └── xor.rs │ ├── data.rs │ ├── drgraph.rs │ ├── error.rs │ ├── gadgets │ │ ├── constraint.rs │ │ ├── encode.rs │ │ ├── insertion.rs │ │ ├── mod.rs │ │ ├── multipack.rs │ │ ├── por.rs │ │ ├── uint64.rs │ │ ├── variables.rs │ │ └── xor.rs │ ├── lib.rs │ ├── measurements.rs │ ├── merkle │ │ ├── builders.rs │ │ ├── mod.rs │ │ ├── proof.rs │ │ └── tree.rs │ ├── multi_proof.rs │ ├── parameter_cache.rs │ ├── partitions.rs │ ├── pieces.rs │ ├── por.rs │ ├── proof.rs │ ├── sector.rs │ ├── settings.rs │ ├── test_helper.rs │ └── util.rs ├── srs-inner-product.json └── tests │ ├── por_circuit.rs │ ├── por_compound.rs │ └── por_vanilla.rs ├── storage-proofs-porep ├── Cargo.toml ├── README.md ├── benches │ ├── encode.rs │ └── parents.rs ├── build.rs ├── parent_cache.json ├── src │ ├── encode.rs │ ├── lib.rs │ └── stacked │ │ ├── circuit │ │ ├── column.rs │ │ ├── column_proof.rs │ │ ├── create_label.rs │ │ ├── hash.rs │ │ ├── mod.rs │ │ ├── params.rs │ │ └── proof.rs │ │ ├── mod.rs │ │ └── vanilla │ │ ├── cache.rs │ │ ├── challenges.rs │ │ ├── clear_files.rs │ │ ├── column.rs │ │ ├── column_proof.rs │ │ ├── cores.rs │ │ ├── create_label │ │ ├── mod.rs │ │ ├── multi.rs │ │ └── single.rs │ │ ├── encoding_proof.rs │ │ ├── graph.rs │ │ ├── hash.rs │ │ ├── labeling_proof.rs │ │ ├── macros.rs │ │ ├── memory_handling.rs │ │ ├── mod.rs │ │ ├── params.rs │ │ ├── proof.rs │ │ ├── proof_scheme.rs │ │ └── utils.rs └── tests │ ├── common.rs │ ├── stacked_circuit.rs │ ├── stacked_compound.rs │ └── stacked_vanilla.rs ├── storage-proofs-post ├── Cargo.toml ├── README.md ├── src │ ├── fallback │ │ ├── circuit.rs │ │ ├── compound.rs │ │ ├── mod.rs │ │ ├── utils.rs │ │ └── vanilla.rs │ └── lib.rs └── tests │ ├── fallback_circuit.rs │ ├── fallback_compound.rs │ └── fallback_vanilla.rs └── storage-proofs-update ├── Cargo.toml ├── README.md ├── src ├── challenges.rs ├── circuit.rs ├── compound.rs ├── constants.rs ├── gadgets.rs ├── lib.rs ├── poseidon │ ├── circuit.rs │ ├── compound.rs │ ├── mod.rs │ └── vanilla.rs └── vanilla.rs └── tests ├── circuit.rs ├── circuit_poseidon.rs ├── common └── mod.rs └── compound.rs /.clippy.toml: -------------------------------------------------------------------------------- 1 | type-complexity-threshold = 400 -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | /target/* 3 | -------------------------------------------------------------------------------- /.github/README.md: -------------------------------------------------------------------------------- 1 | GitHub Actions setup 2 | ==================== 3 | 4 | The main CI is happening in the [`ci.yml`], this is where the tests are run. 5 | 6 | There is a separate workflow called [`proof-params-generate.yml`], that pre-generates the Groth16 parameter files needed for testing. Those parameters are stored as GitHub Artifacts, which are then downloaded by any workflow that needs them. Those artifacts are available for 90 days. Hence this job runs every two months to regenerate them, but also to have enough time in case something breaks. 7 | 8 | The [`proof-params-download` Action] is a helper for downloading the pre-generated Groth16 parameters. 9 | 10 | [`ci.yml`]: ./workflows/ci.yml 11 | [`proof-params-generate.yml`]: ./workflows/proof-params-generate.yml 12 | [`proof-params-download` Action]: ./actions/proof-params-download/action.yml 13 | -------------------------------------------------------------------------------- /.github/actions/proof-params-download/action.yml: -------------------------------------------------------------------------------- 1 | name: Download proof params 2 | 3 | inputs: 4 | github-token: 5 | description: 'A secrets.GITHUB_TOKEN' 6 | required: true 7 | 8 | runs: 9 | using: 'composite' 10 | steps: 11 | - name: Get run-id from latest proof-params-generate workflow run 12 | uses: octokit/request-action@v2.x 13 | id: latest-proof-params 14 | with: 15 | route: GET /repos/filecoin-project/rust-fil-proofs/actions/workflows/proof-params-generate.yml/runs?per_page=1 16 | env: 17 | GITHUB_TOKEN: ${{ inputs.github-token }} 18 | - name: Download all Artifacts from proof-params-generate workflow 19 | uses: actions/download-artifact@v4 20 | with: 21 | #pattern: proof-params-v28-n-${{ inputs.parameters-json-hash }}-* 22 | pattern: proof-params-v28-n-${{ hashFiles('filecoin-proofs/parameters.json') }}-* 23 | path: /var/tmp/filecoin-proof-parameters/ 24 | merge-multiple: true 25 | run-id: ${{ fromJson(steps.latest-proof-params.outputs.data).workflow_runs[0].id }} 26 | github-token: ${{ inputs.github-token }} 27 | - name: Obtain Filecoin IPP parameter file 28 | shell: bash 29 | # paramfetch is using `ipget` which currently always times out on CI, 30 | # hence get this file via HTTP instead. 31 | #cargo run --release --bin paramfetch -- -a -j srs-inner-product.json 32 | run: 'curl https://proofs.filecoin.io/v28-fil-inner-product-v1.srs --output /var/tmp/filecoin-proof-parameters/v28-fil-inner-product-v1.srs' 33 | - name: List parameter files 34 | shell: bash 35 | run: ls -al /var/tmp/filecoin-proof-parameters/ 36 | -------------------------------------------------------------------------------- /.github/workflows/proof-params-generate.yml: -------------------------------------------------------------------------------- 1 | name: Proof params generate 2 | 3 | on: 4 | # Make it also possible to trigger it manually. 5 | workflow_dispatch: 6 | schedule: 7 | # The artifacts are available for 90 days. Run this workflow every two 8 | # months, so that in case of a failure, there's enough time to fix it. 9 | - cron: "0 0 1 */2 *" 10 | 11 | env: 12 | # Faster crates.io index checkout. 13 | CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse 14 | 15 | jobs: 16 | proof-params: 17 | runs-on: ubuntu-latest 18 | name: Generate proof parameters and cache them 19 | strategy: 20 | # Run the parameter generation in separate jobs, so that they can run in 21 | # parallel. 22 | matrix: 23 | size: [2048, 4096, 16384, 32768] 24 | 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - name: Generate proof parameters 29 | run: cargo run --release --no-default-features --bin paramcache -- --sector-sizes=${{ matrix.size }} 30 | 31 | - name: Upload proof parameters as artifact 32 | uses: actions/upload-artifact@v4 33 | with: 34 | name: proof-params-v28-n-${{ hashFiles('filecoin-proofs/parameters.json') }}-${{ matrix.size }} 35 | path: /var/tmp/filecoin-proof-parameters/ 36 | compression-level: 0 37 | if-no-files-found: error 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .criterion 5 | **/*.h 6 | heaptrack* 7 | .bencher 8 | *.profile 9 | *.heap 10 | rust-fil-proofs.config.toml 11 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | This library is dual-licensed under Apache 2.0 and MIT terms. 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "fil-proofs-param", 4 | "fil-proofs-tooling", 5 | "filecoin-hashers", 6 | "filecoin-proofs", 7 | "fr32", 8 | "sha2raw", 9 | "storage-proofs-core", 10 | "storage-proofs-porep", 11 | "storage-proofs-post", 12 | "storage-proofs-update", 13 | ] 14 | 15 | [workspace.dependencies] 16 | fil-proofs-param = { path = "fil-proofs-param", default-features = false, version = "~14.0.0" } 17 | fil-proofs-tooling = { path = "fil-proofs-tooling", default-features = false, version = "~19.0.0" } 18 | filecoin-hashers = { path = "filecoin-hashers", default-features = false, version = "~14.0.0" } 19 | filecoin-proofs = { path = "filecoin-proofs", default-features = false, version = "~19.0.0" } 20 | fr32 = { path = "fr32", default-features = false, version = "~12.0.0" } 21 | sha2raw = { path = "sha2raw", default-features = false, version = "~14.0.0" } 22 | storage-proofs-core = { path = "storage-proofs-core", default-features = false, version = "~19.0.0" } 23 | storage-proofs-porep = { path = "storage-proofs-porep", default-features = false, version = "~19.0.0" } 24 | storage-proofs-post = { path = "storage-proofs-post", default-features = false, version = "~19.0.0" } 25 | storage-proofs-update = { path = "storage-proofs-update", default-features = false, version = "~19.0.0" } 26 | # Sorted alphabetically 27 | anyhow = "1.0.23" 28 | bellperson = "0.26.0" 29 | bincode = "1.1.2" 30 | blake2b_simd = "1.0.0" 31 | blake2s_simd = "1.0.0" 32 | blstrs = "0.7.0" 33 | byte-slice-cast = "1.0.0" 34 | byteorder = "1.3.4" 35 | cpu-time = "1.0.0" 36 | criterion = "0.3.2" 37 | dialoguer = "0.10.0" 38 | fdlimit = "0.3" 39 | ff = "0.13.0" 40 | fil_logger = "0.1.6" 41 | generic-array = "0.14.4" 42 | gperftools = "0.2" 43 | hex = "0.4.2" 44 | humansize = "1.1.0" 45 | itertools = "0.13" 46 | lazy_static = "1.2" 47 | log = "0.4.7" 48 | memmap2 = "0.5.6" 49 | merkletree = "0.23.0" 50 | neptune = { version = "11.0.0", features = ["bls", "arity2", "arity4", "arity8", "arity11", "arity16", "arity24", "arity36"] } 51 | num_cpus = "1.10.1" 52 | pretty_assertions = "1.2.0" 53 | rand = "0.8" 54 | rand_xorshift = "0.3.0" 55 | rayon = "1.3.0" 56 | serde = "1.0" 57 | serde_json = "1.0" 58 | sha2 = "0.10.2" 59 | structopt = "0.3.12" 60 | tempfile = "3" 61 | thiserror = "2" 62 | typenum = "1.11.2" 63 | -------------------------------------------------------------------------------- /Dockerfile-ci: -------------------------------------------------------------------------------- 1 | # Dockerfile for CircleCI 2 | # build with 3 | # `docker build -t filecoin/rust:latest -f ./Dockerfile-ci .` 4 | # rebuild: `docker build --pull --no-cache -t filecoin/rust:latest -f ./Dockerfile-ci .` 5 | 6 | FROM debian:stretch 7 | 8 | # Some of the dependencies I need to build a few libraries, 9 | # personalize to your needs. You can use multi-stage builds 10 | # to produce a lightweight image. 11 | RUN apt-get update && \ 12 | apt-get install -y curl file gcc g++ git make openssh-client \ 13 | autoconf automake cmake libtool libcurl4-openssl-dev libssl-dev \ 14 | libelf-dev libdw-dev binutils-dev zlib1g-dev libiberty-dev wget \ 15 | xz-utils pkg-config python clang ocl-icd-opencl-dev libhwloc-dev 16 | 17 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 18 | 19 | ENV PATH "$PATH:/root/.cargo/bin" 20 | ENV RUSTFLAGS "-C link-dead-code" 21 | ENV CFG_RELEASE_CHANNEL "nightly" 22 | 23 | RUN bash -l -c 'echo $(rustc --print sysroot)/lib >> /etc/ld.so.conf' 24 | RUN bash -l -c 'echo /usr/local/lib >> /etc/ld.so.conf' 25 | RUN ldconfig 26 | -------------------------------------------------------------------------------- /Dockerfile-profile: -------------------------------------------------------------------------------- 1 | # How to build and run this Dockerfile: 2 | # 3 | # ``` 4 | # RUST_FIL_PROOFS=`pwd` # path to `rust-fil-proofs` 5 | # docker --log-level debug build --progress tty --file Dockerfile-profile --tag rust-cpu-profile . 6 | # docker run -it -v $RUST_FIL_PROOFS:/code/ rust-cpu-profile 7 | # ``` 8 | 9 | FROM rust 10 | 11 | # Get all the dependencies 12 | # ------------------------ 13 | 14 | # Copied from: github.com/filecoin-project/rust-fil-proofs/blob/master/Dockerfile-ci 15 | RUN apt-get update && \ 16 | apt-get install -y curl file gcc g++ git make openssh-client \ 17 | autoconf automake cmake libtool libcurl4-openssl-dev libssl-dev \ 18 | libelf-dev libdw-dev binutils-dev zlib1g-dev libiberty-dev wget \ 19 | xz-utils pkg-config python clang 20 | 21 | # `gperftools` and dependencies (`libunwind`) 22 | # ------------------------------------------- 23 | 24 | ENV GPERFTOOLS_VERSION="2.7" 25 | ENV LIBUNWIND_VERSION="0.99-beta" 26 | 27 | ENV HOME="/root" 28 | ENV DOWNLOADS=${HOME}/downloads 29 | RUN mkdir -p ${DOWNLOADS} 30 | RUN echo ${DOWNLOADS} 31 | WORKDIR ${DOWNLOADS} 32 | 33 | RUN wget http://download.savannah.gnu.org/releases/libunwind/libunwind-${LIBUNWIND_VERSION}.tar.gz --output-document ${DOWNLOADS}/libunwind-${LIBUNWIND_VERSION}.tar.gz 34 | RUN tar -xvf ${DOWNLOADS}/libunwind-${LIBUNWIND_VERSION}.tar.gz 35 | WORKDIR ${DOWNLOADS}/libunwind-${LIBUNWIND_VERSION} 36 | RUN ./configure 37 | RUN make 38 | RUN make install 39 | WORKDIR ${DOWNLOADS} 40 | 41 | RUN wget https://github.com/gperftools/gperftools/releases/download/gperftools-${GPERFTOOLS_VERSION}/gperftools-${GPERFTOOLS_VERSION}.tar.gz --output-document ${DOWNLOADS}/gperftools-${GPERFTOOLS_VERSION}.tar.gz 42 | RUN tar -xvf ${DOWNLOADS}/gperftools-${GPERFTOOLS_VERSION}.tar.gz 43 | WORKDIR ${DOWNLOADS}/gperftools-${GPERFTOOLS_VERSION} 44 | RUN ./configure 45 | RUN make install 46 | WORKDIR ${DOWNLOADS} 47 | 48 | ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib 49 | # FIXME: `gperftools` installs the library (`make install`) in 50 | # `/usr/local/lib` by default but Debian/Ubuntu don't look there 51 | # now, the correct `--prefix` should be added to the command. 52 | 53 | # Install latest toolchain used by `rust-fil-proofs` 54 | # -------------------------------------------------- 55 | 56 | RUN rustup default nightly-2019-07-15 57 | # FIXME: The lastest version used should be dynamically obtained form the `rust-fil-proofs` repo 58 | # and not hard-coded here. 59 | 60 | # Ready to run 61 | # ------------ 62 | 63 | WORKDIR /code 64 | 65 | CMD \ 66 | cargo update \ 67 | && \ 68 | cargo build \ 69 | -p filecoin-proofs \ 70 | --release \ 71 | --example stacked \ 72 | --features \ 73 | cpu-profile \ 74 | -Z package-features \ 75 | && \ 76 | RUST_BACKTRACE=full \ 77 | RUST_LOG=trace \ 78 | target/release/examples/stacked \ 79 | --size 1024 \ 80 | && \ 81 | pprof target/release/examples/stacked replicate.profile || bash 82 | -------------------------------------------------------------------------------- /FUNDING.json: -------------------------------------------------------------------------------- 1 | { 2 | "drips": { 3 | "ethereum": { 4 | "ownedBy": "0xDDa061De7284C07B02bf26E12874171eDB95D987" 5 | }, 6 | "filecoin": { 7 | "ownedBy": "0xDDa061De7284C07B02bf26E12874171eDB95D987" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Filecoin Project 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 4 | 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) 6 | 7 | ## Reporting a non security bug 8 | 9 | For non-critical bugs, please simply file a GitHub issue on this repo. 10 | -------------------------------------------------------------------------------- /audits/Sigma-Prime-Protocol-Labs-Filecoin-Proofs-Security-Review-v2.1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/rust-fil-proofs/6ac97456910ec7487684f7deccc183ea16cc5e17/audits/Sigma-Prime-Protocol-Labs-Filecoin-Proofs-Security-Review-v2.1.pdf -------------------------------------------------------------------------------- /audits/protocolai-audit-20200728.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/rust-fil-proofs/6ac97456910ec7487684f7deccc183ea16cc5e17/audits/protocolai-audit-20200728.pdf -------------------------------------------------------------------------------- /audits/protocollabs-report-20211210.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/rust-fil-proofs/6ac97456910ec7487684f7deccc183ea16cc5e17/audits/protocollabs-report-20211210.pdf -------------------------------------------------------------------------------- /audits/synporepsecurityauditreport-05-2023.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/rust-fil-proofs/6ac97456910ec7487684f7deccc183ea16cc5e17/audits/synporepsecurityauditreport-05-2023.pdf -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn is_compiled_for_64_bit_arch() -> bool { 2 | cfg!(target_pointer_width = "64") 3 | } 4 | 5 | fn main() { 6 | assert!( 7 | is_compiled_for_64_bit_arch(), 8 | "must be built for 64-bit architectures" 9 | ); 10 | } 11 | -------------------------------------------------------------------------------- /fil-proofs-param/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fil-proofs-param" 3 | description = "Filecoin parameter cli tools." 4 | version = "14.0.0" 5 | authors = ["dignifiedquire ", "laser ", "porcuquine "] 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | filecoin-proofs.workspace = true 13 | storage-proofs-core.workspace = true 14 | storage-proofs-porep.workspace = true 15 | storage-proofs-post.workspace = true 16 | storage-proofs-update.workspace = true 17 | # Sorted alphabetically 18 | anyhow.workspace = true 19 | blake2b_simd.workspace = true 20 | dialoguer.workspace = true 21 | env_proxy = "0.4" 22 | fil_logger.workspace = true 23 | flate2 = { version = "1.0.9", features = ["rust_backend"] } 24 | gperftools = { workspace = true, optional = true } 25 | humansize.workspace = true 26 | indicatif = "0.16.2" 27 | itertools.workspace = true 28 | lazy_static.workspace = true 29 | log.workspace = true 30 | pbr = "1.0" 31 | rand.workspace = true 32 | reqwest = { version = "0.12", default-features = false, features = ["blocking", "native-tls-vendored"] } 33 | serde_json.workspace = true 34 | structopt.workspace = true 35 | tar = "0.4.26" 36 | 37 | [dev-dependencies] 38 | # Sorted alphabetically 39 | tempfile.workspace = true 40 | failure = "0.1.7" 41 | rexpect = "0.4.0" 42 | 43 | [features] 44 | default = ["opencl"] 45 | cpu-profile = ["gperftools"] 46 | heap-profile = ["gperftools/heap"] 47 | simd = ["storage-proofs-core/simd"] 48 | asm = ["storage-proofs-core/asm"] 49 | cuda = ["storage-proofs-core/cuda", "storage-proofs-porep/cuda", "storage-proofs-post/cuda", "storage-proofs-update/cuda"] 50 | opencl = ["storage-proofs-core/opencl", "storage-proofs-porep/opencl", "storage-proofs-post/opencl", "storage-proofs-update/opencl"] 51 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 52 | # setting is ignored, no `TemporaryAux` file will be written. 53 | fixed-rows-to-discard = ["filecoin-proofs/fixed-rows-to-discard", "storage-proofs-core/fixed-rows-to-discard", "storage-proofs-porep/fixed-rows-to-discard", "storage-proofs-post/fixed-rows-to-discard", "storage-proofs-update/fixed-rows-to-discard"] 54 | -------------------------------------------------------------------------------- /fil-proofs-param/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /fil-proofs-param/README.md: -------------------------------------------------------------------------------- 1 | # Filecoin Parameters 2 | 3 | > Parameter related utilities for Filecoin. 4 | 5 | 6 | Available tools are 7 | 8 | - `paramcache` 9 | - `paramfetch` 10 | - `parampublish` 11 | - `fakeipfsadd` 12 | 13 | # Running `parampublish` with Mocked `ipfs` Binary 14 | 15 | ``` 16 | $ cargo build --bin fakeipfsadd --bin parampublish 17 | $ ./target/debug/parampublish --ipfs-bin=./target/debug/fakeipfsadd [-a] 18 | ``` 19 | 20 | ## License 21 | 22 | MIT or Apache 2.0 23 | -------------------------------------------------------------------------------- /fil-proofs-param/parameters.json: -------------------------------------------------------------------------------- 1 | ../parameters.json -------------------------------------------------------------------------------- /fil-proofs-param/scripts/pin-params.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # pin-params.sh 5 | # 6 | # - Post the directory of params to cluster.ipfs.io 7 | # - Grab the CID for the previous params from proofs.filecoin.io 8 | # - TODO: Add the old params as a `prev` dir to the new params dir to keep them around. 9 | # - Pin the new cid on cluster 10 | # - Publish the new cid as a dnslink to proofs.filecoin.io 11 | # - The gateways will pin the new dir by checking proofs.filecoin.io hourly. 12 | # 13 | # Requires: 14 | # - `ipfs-cluster-ctl` - download from https://dist.ipfs.io/#ipfs-cluster-ctl 15 | # - `npx`, as provide `npm` >= v6 16 | # - `ipfs` 17 | # 18 | # You _must_ provide the following env vars 19 | # 20 | # - CLUSTER_TOKEN - the basic auth string as "username:password" 21 | # - DNSIMPLE_TOKEN - an api key for a dnsimple account with a zone for proofs.filecoin.io 22 | # 23 | # Optional: you can override the input dir by passing a path as the first param. 24 | # 25 | # Usage: 26 | # CLUSTER_TOKEN="user:pass" DNSIMPLE_TOKEN="xyz" ./pin-params.sh 27 | # 28 | 29 | INPUT_DIR=${1:-"/var/tmp/filecoin-proof-parameters"} 30 | : "${CLUSTER_TOKEN:?please set CLUSTER_TOKEN env var}" 31 | : "${DNSIMPLE_TOKEN:?please set DNSIMPLE_TOKEN env var}" 32 | 33 | echo "checking $INPUT_DIR" 34 | 35 | # Grab the version number from the files in the dir. 36 | # Fail if more than 1 version or doesnt match a version string like vNN, e.g v12 37 | if ls -A $INPUT_DIR &> /dev/null; then 38 | # version will be a list if there is more than one... 39 | VERSION=$(ls $INPUT_DIR | sort -r | cut -c 1-3 | uniq) 40 | echo found $VERSION 41 | 42 | if [[ $(echo $VERSION | wc -w) -eq 1 && $VERSION =~ ^v[0-9]+ ]]; then 43 | # we have 1 version, lets go... 44 | COUNT=$(ls -l $INPUT_DIR | wc -l | xargs echo -n) 45 | echo "adding $COUNT files to ipfs..." 46 | 47 | else 48 | echo "Error: input dir should contain just the current version of the params" 49 | exit 1 50 | fi 51 | else 52 | echo "Error: input dir '$INPUT_DIR' should contain the params" 53 | exit 1 54 | fi 55 | 56 | CLUSTER_HOST="/dnsaddr/filecoin.collab.ipfscluster.io" 57 | ADDITIONAL_CLUSTER_HOST="/dnsaddr/cluster.ipfs.io" 58 | CLUSTER_PIN_NAME="filecoin-proof-parameters-$VERSION" 59 | DNSLINK_DOMAIN="proofs.filecoin.io" 60 | 61 | # Add and pin to collab cluster. After this it will be on 1 peer and pin requests 62 | # will have been triggered for the others. 63 | ROOT_CID=$(ipfs-cluster-ctl \ 64 | --host $CLUSTER_HOST \ 65 | --basic-auth $CLUSTER_TOKEN \ 66 | add --quieter \ 67 | --local \ 68 | --name $CLUSTER_PIN_NAME \ 69 | --recursive $INPUT_DIR ) 70 | 71 | echo "ok! root cid is $ROOT_CID" 72 | 73 | # Pin to main cluster additionally. 74 | ipfs-cluster-ctl \ 75 | --host $ADDITIONAL_CLUSTER_HOST \ 76 | --basic-auth $CLUSTER_TOKEN \ 77 | pin add $ROOT_CID \ 78 | --no-status 79 | 80 | echo "ok! Pin request sent to additional cluster" 81 | 82 | # Publish the new cid to the dnslink 83 | npx dnslink-dnsimple --domain $DNSLINK_DOMAIN --link "/ipfs/$ROOT_CID" 84 | 85 | echo "done!" 86 | -------------------------------------------------------------------------------- /fil-proofs-param/scripts/verify-parameters-json.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script verifies that a given `.params` file (and the corresponding 4 | # `.vk` file) is part of `parameters.json` and has the correct digest. 5 | # 6 | # This script runs on POSIX compatible shells. You need to have standard 7 | # utilities (`basename`, `head`, `grep`) as well as have `jq` and `b2sum` 8 | # installed. 9 | # 10 | # The inputs are a `parameter.json` file and a `.params' file. 11 | 12 | if [ "${#}" -ne 2 ]; then 13 | echo "Verify that a given .params file (and the corresponding .vk file)" 14 | echo "is part of parameters.json and has the correct digest." 15 | echo "" 16 | echo "Usage: $(basename "${0}") parameters.json parameter-file.params" 17 | exit 1 18 | fi 19 | 20 | if ! command -v b2sum >/dev/null 2>&1 21 | then 22 | echo "ERROR: 'b2sum' needs to be installed." 23 | exit 1 24 | fi 25 | 26 | if ! command -v jq >/dev/null 2>&1 27 | then 28 | echo "ERROR: 'jq' needs to be installed." 29 | exit 1 30 | fi 31 | 32 | PARAMS_JSON=${1} 33 | PARAMS_ID="${2%.*}" 34 | 35 | PARAMS_FILE="${PARAMS_ID}.params" 36 | VK_FILE="${PARAMS_ID}.vk" 37 | 38 | # Transforms the `parameters.json` into a string that consists of digest and 39 | # filename pairs. 40 | PARAMS_JSON_DATA=$(jq -r 'to_entries[] | "\(.value.digest) \(.key)"' "${PARAMS_JSON}") 41 | 42 | VK_HASH_SHORT=$(b2sum "${VK_FILE}"|head --bytes 32) 43 | if echo "${PARAMS_JSON_DATA}"|grep --silent "${VK_HASH_SHORT} ${VK_FILE}"; then 44 | echo "ok Correct digest of VK file was found in ${PARAMS_JSON}." 45 | else 46 | echo "not ok ERROR: Digest of VK file was *not* found/correct in ${PARAMS_JSON}." 47 | exit 1 48 | fi 49 | 50 | PARAMS_HASH_SHORT=$(b2sum "${PARAMS_FILE}"|head --bytes 32) 51 | if echo "${PARAMS_JSON_DATA}"|grep --silent "${PARAMS_HASH_SHORT} ${PARAMS_FILE}"; then 52 | echo "ok Correct digest of params file was found in ${PARAMS_JSON}." 53 | else 54 | echo "not ok ERROR: Digest of params file was *not* found/correct in ${PARAMS_JSON}." 55 | exit 1 56 | fi 57 | 58 | echo "# Verification successfully completed." 59 | -------------------------------------------------------------------------------- /fil-proofs-param/src/bin/fakeipfsadd.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io; 3 | 4 | use blake2b_simd::State as Blake2b; 5 | use structopt::StructOpt; 6 | 7 | #[derive(Debug, StructOpt)] 8 | #[structopt( 9 | name = "fakeipfsadd", 10 | version = "0.1", 11 | about = "This program is used to simulate the `ipfs add` command while testing. It accepts a \ 12 | path to a file and writes 32 characters of its hex-encoded BLAKE2b checksum to stdout. \ 13 | Note that the real `ipfs add` command computes and emits a CID." 14 | )] 15 | enum Cli { 16 | Add { 17 | #[structopt(help = "Positional argument for the path to the file to add.")] 18 | file_path: String, 19 | #[structopt(short = "Q", help = "Simulates the -Q argument to `ipfs add`.")] 20 | _quieter: bool, 21 | }, 22 | } 23 | 24 | impl Cli { 25 | fn file_path(&self) -> &str { 26 | match self { 27 | Cli::Add { file_path, .. } => file_path, 28 | } 29 | } 30 | } 31 | 32 | pub fn main() { 33 | let cli = Cli::from_args(); 34 | 35 | let mut src_file = File::open(cli.file_path()) 36 | .unwrap_or_else(|_| panic!("failed to open file: {}", cli.file_path())); 37 | 38 | let mut hasher = Blake2b::new(); 39 | io::copy(&mut src_file, &mut hasher).expect("failed to write BLAKE2b bytes to hasher"); 40 | let hex_string: String = hasher.finalize().to_hex()[..32].into(); 41 | println!("{}", hex_string) 42 | } 43 | -------------------------------------------------------------------------------- /fil-proofs-param/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness)] 2 | #![warn(clippy::unwrap_used)] 3 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/paramfetch/support/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod session; 2 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/parampublish/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod prompts_to_publish; 2 | pub mod read_metadata_files; 3 | pub mod support; 4 | pub mod write_json_manifest; 5 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/parampublish/prompts_to_publish.rs: -------------------------------------------------------------------------------- 1 | use failure::Error as FailureError; 2 | use storage_proofs_core::parameter_cache::CacheEntryMetadata; 3 | 4 | use crate::parampublish::support::session::ParamPublishSessionBuilder; 5 | 6 | #[test] 7 | fn ignores_files_unrecognized_extensions() -> Result<(), FailureError> { 8 | let to_create = vec!["v1-aaa.vk", "v1-aaa.params", "v1-bbb.txt", "ddd"]; 9 | 10 | let (mut session, _) = ParamPublishSessionBuilder::new() 11 | .with_session_timeout_ms(1000) 12 | .with_files(&to_create) 13 | .with_metadata("v1-aaa.meta", &CacheEntryMetadata { sector_size: 1024 }) 14 | .list_all_files() 15 | .build(); 16 | 17 | session.exp_string("found 3 param files in cache dir")?; 18 | session.exp_string("found 1 file triples")?; 19 | session.exp_string("Select files to publish")?; 20 | session.exp_string("v1-aaa.params (1 KiB)")?; 21 | session.exp_string("v1-aaa.vk (1 KiB)")?; 22 | session.send_line("")?; 23 | session.exp_string("no params selected, exiting")?; 24 | 25 | std::fs::remove_dir_all(session._cache_dir.path())?; 26 | 27 | Ok(()) 28 | } 29 | 30 | #[test] 31 | fn displays_sector_size_in_prompt() -> Result<(), FailureError> { 32 | let to_create = vec!["v1-aaa.vk", "v1-aaa.params", "v1-xxx.vk", "v1-xxx.params"]; 33 | 34 | let (mut session, _) = ParamPublishSessionBuilder::new() 35 | .with_session_timeout_ms(1000) 36 | .with_files(&to_create) 37 | .with_metadata("v1-aaa.meta", &CacheEntryMetadata { sector_size: 2048 }) 38 | .with_metadata("v1-xxx.meta", &CacheEntryMetadata { sector_size: 1024 }) 39 | .list_all_files() 40 | .build(); 41 | 42 | session.exp_string("found 6 param files in cache dir")?; 43 | session.exp_string("found 2 file triples")?; 44 | session.exp_string("Select files to publish")?; 45 | session.exp_string("v1-xxx.params (1 KiB)")?; 46 | session.exp_string("v1-xxx.vk (1 KiB)")?; 47 | session.exp_string("v1-aaa.params (2 KiB)")?; 48 | session.exp_string("v1-aaa.vk (2 KiB)")?; 49 | session.send_line("")?; 50 | session.exp_string("no params selected, exiting")?; 51 | 52 | std::fs::remove_dir_all(session._cache_dir.path())?; 53 | 54 | Ok(()) 55 | } 56 | 57 | #[test] 58 | fn no_assets_no_prompt() -> Result<(), FailureError> { 59 | let (mut session, _) = ParamPublishSessionBuilder::new() 60 | .with_session_timeout_ms(1000) 61 | .build(); 62 | 63 | session.exp_string("found 0 param files in cache dir")?; 64 | session.exp_string("no file triples found, exiting")?; 65 | 66 | std::fs::remove_dir_all(session._cache_dir.path())?; 67 | 68 | Ok(()) 69 | } 70 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/parampublish/read_metadata_files.rs: -------------------------------------------------------------------------------- 1 | use failure::Error as FailureError; 2 | 3 | use crate::parampublish::support::session::ParamPublishSessionBuilder; 4 | 5 | #[test] 6 | fn fails_if_missing_metadata_file() -> Result<(), FailureError> { 7 | // missing the corresponding .meta file 8 | let filenames = vec!["v12-aaa.vk", "v12-aaa.params"]; 9 | 10 | let (mut session, _) = ParamPublishSessionBuilder::new() 11 | .with_session_timeout_ms(1000) 12 | .with_files(&filenames) 13 | .build(); 14 | 15 | session.exp_string("found 2 param files in cache dir")?; 16 | session.exp_string("no file triples found, exiting")?; 17 | 18 | std::fs::remove_dir_all(session._cache_dir.path())?; 19 | 20 | Ok(()) 21 | } 22 | 23 | #[test] 24 | fn fails_if_malformed_metadata_file() -> Result<(), FailureError> { 25 | // A malformed v11-aaa.meta file. 26 | let mut malformed: &[u8] = &[42]; 27 | 28 | let (mut session, _) = ParamPublishSessionBuilder::new() 29 | .with_session_timeout_ms(1000) 30 | .with_files(&["v11-aaa.vk", "v11-aaa.params"]) 31 | .with_file_and_bytes("v11-aaa.meta", &mut malformed) 32 | .build(); 33 | 34 | session.exp_string("found 3 param files in cache dir")?; 35 | session.exp_string("found 1 file triples")?; 36 | session.exp_string("failed to parse .meta file")?; 37 | session.exp_string("exiting")?; 38 | 39 | std::fs::remove_dir_all(session._cache_dir.path())?; 40 | 41 | Ok(()) 42 | } 43 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/parampublish/support/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod session; 2 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/parampublish/write_json_manifest.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::fs::File; 3 | use std::path::Path; 4 | 5 | use storage_proofs_core::parameter_cache::{CacheEntryMetadata, ParameterData}; 6 | 7 | use crate::{ 8 | parampublish::support::session::ParamPublishSessionBuilder, 9 | support::{tmp_manifest, FakeIpfsBin}, 10 | }; 11 | 12 | #[test] 13 | fn writes_json_manifest() -> Result<(), failure::Error> { 14 | let filenames = vec!["v10-aaa.vk", "v10-aaa.params"]; 15 | 16 | let manifest_path = tmp_manifest(None)?; 17 | 18 | let ipfs = FakeIpfsBin::new(); 19 | 20 | let (mut session, files_in_cache) = ParamPublishSessionBuilder::new() 21 | .with_session_timeout_ms(1000) 22 | .with_files(&filenames) 23 | .with_metadata("v10-aaa.meta", &CacheEntryMetadata { sector_size: 1234 }) 24 | .write_manifest_to(manifest_path.clone()) 25 | .with_ipfs_bin(&ipfs) 26 | .build(); 27 | 28 | // compute checksums from files added to cache to compare with 29 | // manifest entries after publishing completes 30 | let cache_checksums = filename_to_checksum(&ipfs, files_in_cache.as_ref()); 31 | 32 | session.exp_string("Select a version")?; 33 | // There is only one version of parameters, accept that one 34 | session.send_line("")?; 35 | //session.exp_regex(".*Select the sizes to publish.*")?; 36 | session.exp_string("Select sizes to publish")?; 37 | // There is only one size, accept that one 38 | session.send_line(" ")?; 39 | 40 | // wait for confirmation... 41 | session.exp_string("2 files to publish")?; 42 | session.exp_string("finished publishing files")?; 43 | 44 | // read the manifest file from disk and verify that it is well 45 | // formed and contains the expected keys 46 | let manifest_file = File::open(&manifest_path)?; 47 | let manifest_map: BTreeMap = serde_json::from_reader(manifest_file)?; 48 | 49 | // ensure that each filename exists in the manifest and that its 50 | // cid matches that which was produced from the `ipfs add` command 51 | for filename in filenames.iter().cloned() { 52 | if let (Some(m_entry), Some(expected)) = 53 | (manifest_map.get(filename), cache_checksums.get(filename)) 54 | { 55 | assert_eq!( 56 | &m_entry.cid, expected, 57 | "manifest does not include digest produced by ipfs add for {}", 58 | filename 59 | ); 60 | } else { 61 | panic!("{} must be present in both manifest and cache", filename); 62 | } 63 | } 64 | 65 | let parent_dir = std::path::Path::new(&manifest_path) 66 | .parent() 67 | .expect("failed to get parent dir"); 68 | std::fs::remove_file(&manifest_path)?; 69 | std::fs::remove_dir(parent_dir)?; 70 | std::fs::remove_dir_all(session._cache_dir.path())?; 71 | 72 | Ok(()) 73 | } 74 | 75 | /// Produce a map of filename (not path) to the checksum produced by the ipfs 76 | /// binary. 77 | fn filename_to_checksum>( 78 | ipfs_bin: &FakeIpfsBin, 79 | paths: &[P], 80 | ) -> BTreeMap { 81 | paths.iter().fold(BTreeMap::new(), |mut acc, item| { 82 | acc.insert( 83 | item.as_ref() 84 | .file_name() 85 | .and_then(|os_str| os_str.to_str()) 86 | .map(|s| s.to_string()) 87 | .unwrap_or_else(|| "".to_string()), 88 | ipfs_bin 89 | .compute_checksum(item) 90 | .expect("failed to compute checksum"), 91 | ); 92 | acc 93 | }) 94 | } 95 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/suite.rs: -------------------------------------------------------------------------------- 1 | mod paramfetch; 2 | mod parampublish; 3 | mod support; 4 | -------------------------------------------------------------------------------- /fil-proofs-param/tests/support/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::env; 3 | use std::fs::File; 4 | use std::path::{Path, PathBuf}; 5 | use std::process::Command; 6 | use std::thread; 7 | use std::time::Duration; 8 | 9 | use failure::format_err; 10 | use rexpect::{session::PtyReplSession, spawn_bash}; 11 | use storage_proofs_core::parameter_cache::ParameterData; 12 | use tempfile::tempdir; 13 | 14 | pub struct FakeIpfsBin { 15 | bin_path: PathBuf, 16 | } 17 | 18 | impl FakeIpfsBin { 19 | pub fn new() -> FakeIpfsBin { 20 | FakeIpfsBin { 21 | bin_path: cargo_bin("fakeipfsadd"), 22 | } 23 | } 24 | 25 | pub fn compute_checksum>(&self, path: P) -> Result { 26 | let output = Command::new(&self.bin_path) 27 | .arg("add") 28 | .arg("-Q") 29 | .arg(path.as_ref()) 30 | .output()?; 31 | 32 | if !output.status.success() { 33 | Err(format_err!( 34 | "{:?} produced non-zero exit code", 35 | &self.bin_path 36 | )) 37 | } else { 38 | Ok(String::from_utf8(output.stdout)?.trim().to_string()) 39 | } 40 | } 41 | 42 | pub fn bin_path(&self) -> &Path { 43 | &self.bin_path 44 | } 45 | } 46 | 47 | /// Get the path of the target directory. 48 | pub fn target_dir() -> PathBuf { 49 | env::current_exe() 50 | .ok() 51 | .map(|mut path| { 52 | path.pop(); 53 | if path.ends_with("deps") { 54 | path.pop(); 55 | } 56 | path 57 | }) 58 | .expect("failed to get current exe path") 59 | } 60 | 61 | /// Look up the path to a cargo-built binary within an integration test. 62 | pub fn cargo_bin>(name: S) -> PathBuf { 63 | target_dir().join(format!("{}{}", name.as_ref(), env::consts::EXE_SUFFIX)) 64 | } 65 | 66 | /// Spawn a pty and, if an error is produced, retry with linear backoff (to 5s). 67 | pub fn spawn_bash_with_retries( 68 | retries: u8, 69 | timeout: Option, 70 | ) -> Result { 71 | let result = spawn_bash(timeout); 72 | if result.is_ok() || retries == 0 { 73 | result 74 | } else { 75 | let sleep_d = Duration::from_millis(5000 / u64::from(retries)); 76 | eprintln!( 77 | "failed to spawn pty: {} retries remaining - sleeping {:?}", 78 | retries, sleep_d 79 | ); 80 | thread::sleep(sleep_d); 81 | spawn_bash_with_retries(retries - 1, timeout) 82 | } 83 | } 84 | 85 | /// Create a parameters.json manifest file in a temp directory and return its 86 | /// path. 87 | pub fn tmp_manifest( 88 | opt_manifest: Option>, 89 | ) -> Result { 90 | let manifest_dir = tempdir()?; 91 | let mut pbuf = manifest_dir.keep(); 92 | pbuf.push("parameters.json"); 93 | 94 | let mut file = File::create(&pbuf)?; 95 | if let Some(map) = opt_manifest { 96 | // JSON encode the manifest and write bytes to temp file 97 | serde_json::to_writer(&mut file, &map)?; 98 | } 99 | 100 | Ok(pbuf) 101 | } 102 | -------------------------------------------------------------------------------- /fil-proofs-tooling/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .criterion 5 | **/*.h 6 | heaptrack* 7 | .bencher 8 | logging-toolkit 9 | *.profile 10 | *.heap 11 | rust-fil-proofs.config.toml 12 | -------------------------------------------------------------------------------- /fil-proofs-tooling/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fil-proofs-tooling" 3 | description = "Tooling for rust-fil-proofs" 4 | version = "19.0.0" 5 | authors = ["dignifiedquire "] 6 | license = "MIT OR Apache-2.0" 7 | publish = false 8 | edition = "2018" 9 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 10 | readme = "README.md" 11 | 12 | [dependencies] 13 | storage-proofs-core.workspace = true 14 | storage-proofs-porep.workspace = true 15 | storage-proofs-post.workspace = true 16 | filecoin-proofs.workspace = true 17 | filecoin-hashers = { workspace = true, features = ["poseidon", "blake2s", "sha256"] } 18 | # Sorted alphabetically 19 | anyhow.workspace = true 20 | bellperson.workspace = true 21 | bincode.workspace = true 22 | blake2s_simd.workspace = true 23 | blstrs.workspace = true 24 | byte-unit = "4.0.14" 25 | bytefmt = "0.1.7" 26 | chrono = { version = "0.4.7", features = ["serde"] } 27 | clap = { version = "3.1.6", features = ["derive"] } 28 | commandspec = "0.12.2" 29 | cpu-time.workspace = true 30 | dialoguer.workspace = true 31 | fdlimit.workspace = true 32 | fil_logger.workspace = true 33 | flexi_logger = "0.22.3" 34 | generic-array.workspace = true 35 | humansize.workspace = true 36 | log.workspace = true 37 | memmap2.workspace = true 38 | merkletree.workspace = true 39 | rand.workspace = true 40 | rand_xorshift.workspace = true 41 | rayon.workspace = true 42 | regex = "1.3.7" 43 | serde = { workspace = true, features = ["derive"] } 44 | serde_json.workspace = true 45 | structopt.workspace = true 46 | sysinfo = { version = "0.28.4", default-features = false } 47 | tempfile.workspace = true 48 | time = "0.3.9" 49 | typenum.workspace = true 50 | 51 | [build-dependencies] 52 | vergen = { version = "8.1.1", features = ["build", "git", "gitcl"] } 53 | 54 | [features] 55 | default = ["opencl", "measurements"] 56 | cuda = [ 57 | "storage-proofs-core/cuda", 58 | "storage-proofs-porep/cuda", 59 | "storage-proofs-post/cuda", 60 | "filecoin-proofs/cuda", 61 | "bellperson/cuda", 62 | "filecoin-hashers/cuda", 63 | ] 64 | opencl = [ 65 | "storage-proofs-core/opencl", 66 | "storage-proofs-porep/opencl", 67 | "storage-proofs-post/opencl", 68 | "filecoin-proofs/opencl", 69 | "bellperson/opencl", 70 | "filecoin-hashers/opencl", 71 | ] 72 | measurements = ["storage-proofs-core/measurements"] 73 | profile = ["storage-proofs-core/profile", "measurements"] 74 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 75 | # setting is ignored, no `TemporaryAux` file will be written. 76 | fixed-rows-to-discard = [ 77 | "filecoin-proofs/fixed-rows-to-discard", 78 | "storage-proofs-core/fixed-rows-to-discard", 79 | "storage-proofs-porep/fixed-rows-to-discard", 80 | "storage-proofs-post/fixed-rows-to-discard", 81 | ] 82 | 83 | [target.'cfg(target_arch = "x86_64")'.dependencies] 84 | raw-cpuid = "10.3.0" 85 | -------------------------------------------------------------------------------- /fil-proofs-tooling/LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Filecoin Project 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 4 | 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /fil-proofs-tooling/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /fil-proofs-tooling/build.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use vergen::EmitBuilder; 3 | 4 | fn main() -> Result<(), Box> { 5 | // Emits the `VERGEN_GIT_SHA` and `VERGEN_GIT_COMMIT_TIMESTAMP` environment variables. 6 | EmitBuilder::builder().all_git().emit()?; 7 | Ok(()) 8 | } 9 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/aggregate-benchmarks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | stacked_path=$1 6 | micro_path=$2 7 | hash_constraints_path=$3 8 | window_post_path=$4 9 | 10 | jq --sort-keys -s '{ benchmarks: { "stacked-benchmarks": { outputs: { "max-resident-set-size-kb": .[0] } } } } * .[1]' \ 11 | <(jq '.["max-resident-set-size-kb"]' $stacked_path) \ 12 | <(jq -s '.[0] * { benchmarks: { "hash-constraints": .[1], "stacked-benchmarks": .[2], "micro-benchmarks": .[3], "window-post-benchmarks": .[4] } }' \ 13 | <(jq 'del (.benchmarks)' $micro_path) \ 14 | <(jq '.benchmarks' $hash_constraints_path) \ 15 | <(jq '.benchmarks' $stacked_path) \ 16 | <(jq '.benchmarks' $micro_path) \ 17 | <(jq '.benchmarks' $window_post_path)) 18 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/benchy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | which jq >/dev/null || { printf '%s\n' "error: jq" >&2; exit 1; } 4 | 5 | BENCHY_STDOUT=$(mktemp) 6 | GTIME_STDERR=$(mktemp) 7 | JQ_STDERR=$(mktemp) 8 | 9 | GTIME_BIN="env time" 10 | GTIME_ARG="-f '{ \"max-resident-set-size-kb\": %M }' cargo run --quiet --bin benchy --release -- ${@}" 11 | 12 | if [[ $(env time --version 2>&1) != *"GNU"* ]]; then 13 | if [[ $(/usr/bin/time --version 2>&1) != *"GNU"* ]]; then 14 | if [[ $(env gtime --version 2>&1) != *"GNU"* ]]; then 15 | printf '%s\n' "error: GNU time not installed" >&2 16 | exit 1 17 | else 18 | GTIME_BIN="gtime" 19 | fi 20 | else 21 | GTIME_BIN="/usr/bin/time" 22 | fi 23 | fi 24 | 25 | CMD="${GTIME_BIN} ${GTIME_ARG}" 26 | 27 | eval "RUST_BACKTRACE=1 RUSTFLAGS=\"-Awarnings -C target-cpu=native\" ${CMD}" > $BENCHY_STDOUT 2> $GTIME_STDERR 28 | 29 | GTIME_EXIT_CODE=$? 30 | 31 | jq -s '.[0] * .[1]' $BENCHY_STDOUT $GTIME_STDERR 2> $JQ_STDERR 32 | 33 | JQ_EXIT_CODE=$? 34 | 35 | if [[ ! $GTIME_EXIT_CODE -eq 0 || ! $JQ_EXIT_CODE -eq 0 ]]; then 36 | >&2 echo "*********************************************" 37 | >&2 echo "* benchy failed - dumping debug information *" 38 | >&2 echo "*********************************************" 39 | >&2 echo "" 40 | >&2 echo "" 41 | >&2 echo "${CMD}" 42 | >&2 echo "" 43 | >&2 echo "" 44 | >&2 echo "" 45 | >&2 echo "$(cat $GTIME_STDERR)" 46 | >&2 echo "" 47 | >&2 echo "" 48 | >&2 echo "" 49 | >&2 echo "$(cat $BENCHY_STDOUT)" 50 | >&2 echo "" 51 | >&2 echo "" 52 | >&2 echo "" 53 | >&2 echo "$(cat $JQ_STDERR)" 54 | >&2 echo "" 55 | exit 1 56 | fi 57 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/micro.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MICRO_SDERR=$(mktemp) 4 | MICRO_SDOUT=$(mktemp) 5 | JQ_STDERR=$(mktemp) 6 | 7 | CMD="cargo run --bin micro --release ${@}" 8 | 9 | eval "RUST_BACKTRACE=1 RUSTFLAGS=\"-Awarnings -C target-cpu=native\" ${CMD}" 1> $MICRO_SDOUT 2> $MICRO_SDERR 10 | 11 | MICRO_EXIT_CODE=$? 12 | 13 | cat $MICRO_SDOUT | jq '.' 2> $JQ_STDERR 14 | 15 | JQ_EXIT_CODE=$? 16 | 17 | if [[ ! $MICRO_EXIT_CODE -eq 0 || ! $JQ_EXIT_CODE -eq 0 ]]; then 18 | >&2 echo "********************************************" 19 | >&2 echo "* micro failed - dumping debug information *" 20 | >&2 echo "********************************************" 21 | >&2 echo "" 22 | >&2 echo "" 23 | >&2 echo "${CMD}" 24 | >&2 echo "" 25 | >&2 echo "" 26 | >&2 echo "" 27 | >&2 echo "$(cat $MICRO_SDERR)" 28 | >&2 echo "" 29 | >&2 echo "" 30 | >&2 echo "" 31 | >&2 echo "$(cat $MICRO_SDOUT)" 32 | >&2 echo "" 33 | >&2 echo "" 34 | >&2 echo "" 35 | >&2 echo "$(cat $JQ_STDERR)" 36 | >&2 echo "" 37 | exit 1 38 | fi 39 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/retry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Inspired by https://gist.github.com/reacocard/28611bfaa2395072119464521d48729a 4 | 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | 9 | # Retry a command on a particular exit code, up to a max number of attempts, 10 | # with exponential backoff. 11 | # Invocation: 12 | # err_retry exit_code attempts sleep_multiplier 13 | # exit_code: The exit code to retry on. 14 | # attempts: The number of attempts to make. 15 | # sleep_millis: Multiplier for sleep between attempts. Examples: 16 | # If multiplier is 1000, sleep intervals are 1, 4, 9, 16, etc. seconds. 17 | # If multiplier is 5000, sleep intervals are 5, 20, 45, 80, 125, etc. seconds. 18 | 19 | exit_code=$1 20 | attempts=$2 21 | sleep_millis=$3 22 | shift 3 23 | 24 | for attempt in `seq 1 $attempts`; do 25 | # This weird construction lets us capture return codes under -o errexit 26 | "$@" && rc=$? || rc=$? 27 | 28 | if [[ ! $rc -eq $exit_code ]]; then 29 | exit $rc 30 | fi 31 | 32 | if [[ $attempt -eq $attempts ]]; then 33 | exit $rc 34 | fi 35 | 36 | sleep_ms="$(($attempt * $attempt * $sleep_millis))" 37 | 38 | sleep_seconds=$(echo "scale=2; ${sleep_ms}/1000" | bc) 39 | 40 | (>&2 echo "sleeping ${sleep_seconds}s and then retrying ($((attempt + 1))/${attempts})") 41 | 42 | sleep "${sleep_seconds}" 43 | done 44 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/run-remote.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CMDS=$(cat < /dev/null 18 | 19 | # Make sure hwloc library is available on the remote host. 20 | apt-get -y -q install libhwloc-dev > /dev/null 2>&1 21 | 22 | # Make sure rust is installed on the remote host. 23 | curl https://sh.rustup.rs -sSf | sh -s -- -y > /dev/null 2>&1 24 | source $HOME/.cargo/env /dev/null 2>&1 25 | 26 | git clone -b $1 --single-branch https://github.com/filecoin-project/rust-fil-proofs.git \$_metrics_dir || true 27 | 28 | cd \$_metrics_dir 29 | 30 | ./fil-proofs-tooling/scripts/retry.sh 42 10 60000 \ 31 | ./fil-proofs-tooling/scripts/with-lock.sh 42 /tmp/metrics.lock \ 32 | ./fil-proofs-tooling/scripts/with-dots.sh \ 33 | ${@:3} 34 | EOF 35 | ) 36 | 37 | ssh -q $2 "$CMDS" 38 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/with-dots.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | trap cleanup EXIT 4 | 5 | cleanup() { 6 | kill $DOT_PID 7 | } 8 | 9 | ( 10 | sleep 1 11 | while true; do 12 | (printf "." >&2) 13 | sleep 1 14 | done 15 | ) & 16 | DOT_PID=$! 17 | 18 | $@ 19 | -------------------------------------------------------------------------------- /fil-proofs-tooling/scripts/with-lock.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Inspired by http://mywiki.wooledge.org/BashFAQ/045 4 | 5 | failure_code=$1 6 | lockdir=$2 7 | shift 2 8 | 9 | # Check to make sure that the process which owns the lock, if one exists, is 10 | # still alive. If the process is not alive, release the lock. 11 | for lockdir_pid in $(find "$lockdir" -type f -exec basename {} \; 2> /dev/null) 12 | do 13 | if ! ps -p "${lockdir_pid}" > /dev/null 14 | then 15 | (>&2 echo "cleaning up leaked lock (pid=${lockdir_pid}, path=${lockdir})") 16 | rm -rf "${lockdir}" 17 | fi 18 | done 19 | 20 | if mkdir "$lockdir" > /dev/null 2>&1 21 | then 22 | (>&2 echo "successfully acquired lock (pid=$$, path=${lockdir})") 23 | 24 | # Create a file to track the process id that acquired the lock. This 25 | # is used to prevent leaks if the lock isn't relinquished correctly. 26 | touch "$lockdir/$$" 27 | 28 | # Unlock (by removing dir and pid file) when the script finishes. 29 | trap '(>&2 echo "relinquishing lock (${lockdir})"); rm -rf "$lockdir"' EXIT 30 | 31 | # Execute command 32 | "$@" 33 | else 34 | (>&2 echo "failed to acquire lock (path=${lockdir})") 35 | exit "$failure_code" 36 | fi 37 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/bin/benchy/hash_fns.rs: -------------------------------------------------------------------------------- 1 | use bellperson::gadgets::boolean::Boolean; 2 | use bellperson::util_cs::test_cs::TestConstraintSystem; 3 | use bellperson::ConstraintSystem; 4 | use blstrs::Scalar as Fr; 5 | use fil_proofs_tooling::metadata::Metadata; 6 | use rand::RngCore; 7 | use serde::Serialize; 8 | use storage_proofs_core::util::{bits_to_bytes, bytes_into_boolean_vec, bytes_into_boolean_vec_be}; 9 | 10 | fn blake2s_count(bytes: usize) -> anyhow::Result { 11 | let rng = &mut rand::thread_rng(); 12 | 13 | let mut cs = TestConstraintSystem::::new(); 14 | let mut data = vec![0u8; bytes]; 15 | rng.fill_bytes(&mut data); 16 | 17 | let data_bits: Vec = { 18 | let mut cs = cs.namespace(|| "data"); 19 | bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()) 20 | .expect("failed to convert to boolean vector") 21 | }; 22 | 23 | let personalization = vec![0u8; 8]; 24 | let out: Vec = 25 | bellperson::gadgets::blake2s::blake2s(&mut cs, &data_bits, &personalization)? 26 | .into_iter() 27 | .map(|b| b.get_value().expect("failed to get bool value")) 28 | .collect(); 29 | 30 | assert!(cs.is_satisfied(), "constraints not satisfied"); 31 | 32 | let expected = blake2s_simd::blake2s(&data); 33 | assert_eq!( 34 | expected.as_ref(), 35 | &bits_to_bytes(&out[..])[..], 36 | "circuit and non circuit do not match" 37 | ); 38 | 39 | Ok(Report { 40 | hash_fn: "blake2s".into(), 41 | bytes, 42 | constraints: cs.num_constraints(), 43 | }) 44 | } 45 | 46 | fn sha256_count(bytes: usize) -> anyhow::Result { 47 | let mut rng = rand::thread_rng(); 48 | 49 | let mut cs = TestConstraintSystem::::new(); 50 | let mut data = vec![0u8; bytes]; 51 | rng.fill_bytes(&mut data); 52 | 53 | let data_bits: Vec = { 54 | let mut cs = cs.namespace(|| "data"); 55 | bytes_into_boolean_vec_be(&mut cs, Some(data.as_slice()), data.len()) 56 | .expect("failed to convert bytes into boolean vector big endian") 57 | }; 58 | 59 | let _out: Vec = bellperson::gadgets::sha256::sha256(&mut cs, &data_bits)? 60 | .into_iter() 61 | .map(|b| b.get_value().expect("failed to get bool value")) 62 | .collect(); 63 | 64 | assert!(cs.is_satisfied(), "constraints not satisfied"); 65 | 66 | Ok(Report { 67 | hash_fn: "sha256".into(), 68 | bytes, 69 | constraints: cs.num_constraints(), 70 | }) 71 | } 72 | 73 | #[derive(Serialize)] 74 | #[serde(rename_all = "kebab-case")] 75 | struct Report { 76 | hash_fn: String, 77 | constraints: usize, 78 | bytes: usize, 79 | } 80 | 81 | pub fn run() -> anyhow::Result<()> { 82 | let reports = vec![ 83 | blake2s_count(32)?, 84 | blake2s_count(64)?, 85 | blake2s_count(128)?, 86 | blake2s_count(256)?, 87 | sha256_count(32)?, 88 | sha256_count(64)?, 89 | sha256_count(128)?, 90 | sha256_count(256)?, 91 | ]; 92 | 93 | // print reports 94 | let wrapped = Metadata::wrap(reports)?; 95 | serde_json::to_writer(std::io::stdout(), &wrapped)?; 96 | 97 | Ok(()) 98 | } 99 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/bin/benchy/merkleproofs.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{create_dir, remove_dir_all}; 2 | use std::time::{SystemTime, UNIX_EPOCH}; 3 | 4 | use anyhow::{ensure, Result}; 5 | use filecoin_hashers::Hasher; 6 | use filecoin_proofs::with_shape; 7 | use log::{debug, info}; 8 | use rand::{thread_rng, Rng}; 9 | use storage_proofs_core::merkle::{ 10 | generate_tree, get_base_tree_count, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper, 11 | }; 12 | use storage_proofs_core::util::default_rows_to_discard; 13 | use typenum::Unsigned; 14 | 15 | fn generate_proofs( 16 | rng: &mut R, 17 | tree: &MerkleTreeWrapper< 18 | ::Hasher, 19 | ::Store, 20 | ::Arity, 21 | ::SubTreeArity, 22 | ::TopTreeArity, 23 | >, 24 | base_tree_nodes: usize, 25 | nodes: usize, 26 | proofs_count: usize, 27 | validate: bool, 28 | ) -> Result<()> { 29 | let proofs_count = if proofs_count >= nodes { 30 | info!( 31 | "requested {} proofs, but instead challenging all {} nodes sequentially", 32 | proofs_count, nodes 33 | ); 34 | 35 | nodes 36 | } else { 37 | proofs_count 38 | }; 39 | 40 | info!( 41 | "creating {} inclusion proofs over {} nodes (validate enabled? {})", 42 | proofs_count, nodes, validate 43 | ); 44 | 45 | let rows_to_discard = default_rows_to_discard( 46 | base_tree_nodes, 47 | ::Arity::to_usize(), 48 | ); 49 | for i in 0..proofs_count { 50 | let challenge = if proofs_count == nodes { 51 | i 52 | } else { 53 | rng.gen_range(0..nodes) 54 | }; 55 | debug!("challenge[{}] = {}", i, challenge); 56 | let proof = tree 57 | .gen_cached_proof(challenge, Some(rows_to_discard)) 58 | .expect("failed to generate proof"); 59 | if validate { 60 | ensure!(proof.validate(challenge), "failed to validate proof"); 61 | } 62 | } 63 | 64 | Ok(()) 65 | } 66 | 67 | pub fn run_merkleproofs_bench( 68 | size: usize, 69 | proofs_count: usize, 70 | validate: bool, 71 | ) -> Result<()> { 72 | let tree_count = get_base_tree_count::(); 73 | let base_tree_leaves = 74 | size / std::mem::size_of::<::Domain>() / tree_count; 75 | 76 | let timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis(); 77 | let temp_path = std::env::temp_dir().join(format!("merkle-proof-bench-{}", timestamp)); 78 | create_dir(&temp_path)?; 79 | 80 | let mut rng = thread_rng(); 81 | info!( 82 | "generating merkle tree for sector size {} [base_tree_leaves {}, tree_count {}]", 83 | size, base_tree_leaves, tree_count 84 | ); 85 | let (_data, tree) = generate_tree::( 86 | &mut rng, 87 | base_tree_leaves * tree_count, 88 | Some(temp_path.clone()), 89 | ); 90 | generate_proofs::<_, Tree>( 91 | &mut rng, 92 | &tree, 93 | base_tree_leaves, 94 | base_tree_leaves * tree_count, 95 | proofs_count, 96 | validate, 97 | )?; 98 | 99 | remove_dir_all(&temp_path)?; 100 | 101 | Ok(()) 102 | } 103 | 104 | pub fn run(size: usize, proofs_count: usize, validate: bool) -> Result<()> { 105 | with_shape!( 106 | size as u64, 107 | run_merkleproofs_bench, 108 | size, 109 | proofs_count, 110 | validate 111 | ) 112 | } 113 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/bin/check_parameters/main.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use anyhow::Result; 4 | use clap::{Arg, Command}; 5 | 6 | use storage_proofs_core::parameter_cache::{read_cached_params, Bls12GrothParams}; 7 | 8 | fn run_map(parameter_file: &Path) -> Result { 9 | read_cached_params(parameter_file) 10 | } 11 | 12 | fn main() { 13 | fil_logger::init(); 14 | 15 | let map_cmd = Command::new("map").about("build mapped parameters").arg( 16 | Arg::new("param") 17 | .long("parameter-file") 18 | .help("The parameter file to map") 19 | .required(true) 20 | .takes_value(true), 21 | ); 22 | 23 | let matches = Command::new("check_parameters") 24 | .version("0.1") 25 | .subcommand(map_cmd) 26 | .get_matches(); 27 | 28 | match matches.subcommand() { 29 | Some(("map", m)) => { 30 | let parameter_file_str = m.value_of_t::("param").expect("param failed"); 31 | run_map(Path::new(¶meter_file_str)).expect("run_map failed"); 32 | } 33 | _ => panic!("Unrecognized subcommand"), 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/bin/fdlimit/main.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | fil_logger::init(); 3 | 4 | match fdlimit::raise_fd_limit() { 5 | Ok(fdlimit::Outcome::LimitRaised { from, to }) => { 6 | println!("File descriptor limit was raised from {from} to {to}"); 7 | } 8 | Ok(fdlimit::Outcome::Unsupported) => { 9 | panic!("failed to raise fd limit: unsupported") 10 | } 11 | Err(e) => { 12 | panic!("failed to raise fd limit: {}", e) 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/bin/gpu-cpu-test/README.md: -------------------------------------------------------------------------------- 1 | GPU CPU Test 2 | ============ 3 | 4 | This is a test utility to test whether it works to prioritize certain proofs. When a proof is prioritized, it will run on the GPU and all other proofs will be pushed to the CPU. 5 | 6 | This utility is meant to be run manually. It spawns multiple threads/processes that run proofs. Those get killed after 5 minutes of running. The overall test runs longer as some input data needs to be generated. By default, one thread/process will always be prioritized to run on the GPU. The other one might be moved to the CPU. 7 | 8 | To check whether the prioritization is working, run it first with default parameters: 9 | 10 | $ RUST_LOG=debug cargo run --release --bin gpu-cpu-test 11 | 12 | Occasionally you should see log messaged like 13 | 14 | 2020-05-15T12:35:48.680 366073 low-02 WARN bellperson::gpu::locks > GPU acquired by a high priority process! Freeing up Multiexp kernels... 15 | 16 | 17 | which indicate that the high priority proof indeed pushes lower priority ones down from the GPU onto the CPU. 18 | 19 | Once the test is completed there should be log messages that contain the results, the number of proofs run per thread: 20 | 21 | Thread high info: RunInfo { elapsed: 301.714277787s, iterations: 51 } 22 | Thread low-01 info: RunInfo { elapsed: 306.615414259s, iterations: 15 } 23 | Thread low-02 info: RunInfo { elapsed: 303.641817512s, iterations: 17 } 24 | 25 | The high priority proof clearly was able to run more proofs than the lower priority ones. 26 | 27 | To double check the result, you can also run the test without special priorities. Then the number of proofs run should be similar across all the threads as you can see below (the first thread is always called `high` even if it doesn't run with high priority): 28 | 29 | $ RUST_LOG=debug cargo run --release --bin gpu-cpu-test -- --gpu-stealing=false 30 | Thread high info: RunInfo { elapsed: 307.515676843s, iterations: 34 } 31 | Thread low-01 info: RunInfo { elapsed: 305.585567866s, iterations: 34 } 32 | Thread low-02 info: RunInfo { elapsed: 302.7105106s, iterations: 34 } 33 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/bin/settings/main.rs: -------------------------------------------------------------------------------- 1 | use storage_proofs_core::settings::SETTINGS; 2 | 3 | fn main() { 4 | println!("{:#?}", *SETTINGS); 5 | } 6 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] 2 | #![warn(clippy::unwrap_used)] 3 | #![warn(clippy::needless_collect)] 4 | 5 | pub mod measure; 6 | pub mod metadata; 7 | pub mod shared; 8 | pub use measure::{measure, FuncMeasurement}; 9 | pub use metadata::Metadata; 10 | pub use shared::{create_replica, create_replicas}; 11 | -------------------------------------------------------------------------------- /fil-proofs-tooling/src/measure.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use anyhow::Result; 4 | use cpu_time::ProcessTime; 5 | 6 | pub struct FuncMeasurement { 7 | pub cpu_time: Duration, 8 | pub wall_time: Duration, 9 | pub return_value: T, 10 | } 11 | 12 | pub fn measure(f: F) -> Result> 13 | where 14 | F: FnOnce() -> Result, 15 | { 16 | let cpu_time_start = ProcessTime::now(); 17 | let wall_start_time = Instant::now(); 18 | 19 | let x = f()?; 20 | 21 | Ok(FuncMeasurement { 22 | cpu_time: cpu_time_start.elapsed(), 23 | wall_time: wall_start_time.elapsed(), 24 | return_value: x, 25 | }) 26 | } 27 | -------------------------------------------------------------------------------- /filecoin-hashers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "filecoin-hashers" 3 | description = "Hashers used in filecoin and their abstractions." 4 | version = "14.0.0" 5 | authors = ["dignifiedquire ", "porcuquine "] 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | # Sorted alphabetically 13 | anyhow.workspace = true 14 | bellperson.workspace = true 15 | blake2s_simd = { workspace = true, optional = true } 16 | blstrs.workspace = true 17 | ff.workspace = true 18 | generic-array.workspace = true 19 | hex.workspace = true 20 | lazy_static = { workspace = true, optional = true } 21 | merkletree.workspace = true 22 | neptune = { workspace = true, optional = true } 23 | rand.workspace = true 24 | serde.workspace = true 25 | sha2 = { workspace = true, optional = true } 26 | 27 | [features] 28 | default = ["opencl", "blake2s", "poseidon", "sha256"] 29 | 30 | cuda = ["bellperson/cuda", "neptune/cuda"] 31 | opencl = ["bellperson/opencl", "neptune/opencl"] 32 | 33 | # available hashers 34 | blake2s = ["blake2s_simd"] 35 | poseidon = ["neptune", "lazy_static"] 36 | sha256 = ["sha2"] 37 | 38 | [dev-dependencies] 39 | rand_xorshift.workspace = true 40 | serde_json.workspace = true 41 | -------------------------------------------------------------------------------- /filecoin-hashers/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /filecoin-hashers/README.md: -------------------------------------------------------------------------------- 1 | # Filecoin Hashers 2 | 3 | > Different hash functions and abstraction over them. 4 | 5 | 6 | Available hashers are 7 | 8 | - `blake2s` 9 | - `poseidon` 10 | - `sha2 256` 11 | 12 | ## License 13 | 14 | MIT or Apache 2.0 15 | -------------------------------------------------------------------------------- /filecoin-hashers/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] 2 | #![allow(clippy::upper_case_acronyms)] 3 | #![warn(clippy::unwrap_used)] 4 | #![warn(clippy::from_over_into)] 5 | #![warn(clippy::wrong_self_convention)] 6 | 7 | #[cfg(feature = "blake2s")] 8 | pub mod blake2s; 9 | #[cfg(feature = "poseidon")] 10 | pub mod poseidon; 11 | #[cfg(feature = "poseidon")] 12 | mod poseidon_types; 13 | #[cfg(feature = "sha256")] 14 | pub mod sha256; 15 | 16 | mod types; 17 | 18 | pub use self::types::*; 19 | -------------------------------------------------------------------------------- /filecoin-hashers/src/poseidon_types.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use blstrs::Scalar as Fr; 4 | use generic_array::typenum::{U0, U11, U16, U2, U24, U36, U4, U8}; 5 | use lazy_static::lazy_static; 6 | use neptune::{poseidon::PoseidonConstants, Arity}; 7 | 8 | pub type PoseidonBinaryArity = U2; 9 | pub type PoseidonQuadArity = U4; 10 | pub type PoseidonOctArity = U8; 11 | 12 | /// Arity to use by default for `hash_md` with poseidon. 13 | pub type PoseidonMDArity = U36; 14 | 15 | /// Arity to use for hasher implementations (Poseidon) which are specialized at compile time. 16 | /// Must match PoseidonArity 17 | pub const MERKLE_TREE_ARITY: usize = 2; 18 | 19 | lazy_static! { 20 | pub static ref POSEIDON_CONSTANTS_2: PoseidonConstants:: = PoseidonConstants::new(); 21 | pub static ref POSEIDON_CONSTANTS_4: PoseidonConstants:: = PoseidonConstants::new(); 22 | pub static ref POSEIDON_CONSTANTS_8: PoseidonConstants:: = PoseidonConstants::new(); 23 | pub static ref POSEIDON_CONSTANTS_16: PoseidonConstants:: = PoseidonConstants::new(); 24 | pub static ref POSEIDON_CONSTANTS_24: PoseidonConstants:: = PoseidonConstants::new(); 25 | pub static ref POSEIDON_CONSTANTS_36: PoseidonConstants:: = PoseidonConstants::new(); 26 | pub static ref POSEIDON_CONSTANTS_11: PoseidonConstants:: = PoseidonConstants::new(); 27 | pub static ref POSEIDON_MD_CONSTANTS: PoseidonConstants:: = 28 | PoseidonConstants::new(); 29 | } 30 | 31 | pub trait PoseidonArity: Arity + Send + Sync + Clone + Debug { 32 | #[allow(non_snake_case)] 33 | fn PARAMETERS() -> &'static PoseidonConstants; 34 | } 35 | 36 | impl PoseidonArity for U0 { 37 | fn PARAMETERS() -> &'static PoseidonConstants { 38 | unreachable!("dummy implementation, do not ever call me") 39 | } 40 | } 41 | 42 | impl PoseidonArity for U2 { 43 | fn PARAMETERS() -> &'static PoseidonConstants { 44 | &POSEIDON_CONSTANTS_2 45 | } 46 | } 47 | 48 | impl PoseidonArity for U4 { 49 | fn PARAMETERS() -> &'static PoseidonConstants { 50 | &POSEIDON_CONSTANTS_4 51 | } 52 | } 53 | 54 | impl PoseidonArity for U8 { 55 | fn PARAMETERS() -> &'static PoseidonConstants { 56 | &POSEIDON_CONSTANTS_8 57 | } 58 | } 59 | 60 | impl PoseidonArity for U11 { 61 | fn PARAMETERS() -> &'static PoseidonConstants { 62 | &POSEIDON_CONSTANTS_11 63 | } 64 | } 65 | 66 | impl PoseidonArity for U16 { 67 | fn PARAMETERS() -> &'static PoseidonConstants { 68 | &POSEIDON_CONSTANTS_16 69 | } 70 | } 71 | impl PoseidonArity for U24 { 72 | fn PARAMETERS() -> &'static PoseidonConstants { 73 | &POSEIDON_CONSTANTS_24 74 | } 75 | } 76 | impl PoseidonArity for U36 { 77 | fn PARAMETERS() -> &'static PoseidonConstants { 78 | &POSEIDON_CONSTANTS_36 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /filecoin-hashers/src/types.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use std::hash::Hash as StdHash; 3 | 4 | #[cfg(feature = "poseidon")] 5 | pub use crate::poseidon_types::*; 6 | 7 | use bellperson::{ 8 | gadgets::{boolean::Boolean, num::AllocatedNum}, 9 | ConstraintSystem, SynthesisError, 10 | }; 11 | use blstrs::Scalar as Fr; 12 | use ff::PrimeField; 13 | use merkletree::{ 14 | hash::{Algorithm as LightAlgorithm, Hashable as LightHashable}, 15 | merkle::Element, 16 | }; 17 | use rand::RngCore; 18 | use serde::{de::DeserializeOwned, Serialize}; 19 | 20 | pub trait Domain: 21 | Ord 22 | + Copy 23 | + Clone 24 | + AsRef<[u8]> 25 | + Default 26 | + Debug 27 | + Eq 28 | + Send 29 | + Sync 30 | + From 31 | + From<::Repr> 32 | + Into 33 | + Serialize 34 | + DeserializeOwned 35 | + Element 36 | + StdHash 37 | { 38 | #[allow(clippy::wrong_self_convention)] 39 | fn into_bytes(&self) -> Vec; 40 | fn try_from_bytes(raw: &[u8]) -> anyhow::Result; 41 | /// Write itself into the given slice, LittleEndian bytes. 42 | fn write_bytes(&self, _: &mut [u8]) -> anyhow::Result<()>; 43 | 44 | fn random(rng: &mut R) -> Self; 45 | } 46 | 47 | pub trait HashFunction: Clone + Debug + Send + Sync + LightAlgorithm { 48 | fn hash(data: &[u8]) -> T; 49 | fn hash2(a: &T, b: &T) -> T; 50 | fn hash_md(input: &[T]) -> T { 51 | // Default to binary. 52 | assert!(input.len() > 1, "hash_md needs more than one element."); 53 | input 54 | .iter() 55 | .skip(1) 56 | .fold(input[0], |acc, elt| Self::hash2(&acc, elt)) 57 | } 58 | 59 | fn hash_leaf(data: &dyn LightHashable) -> T { 60 | let mut a = Self::default(); 61 | data.hash(&mut a); 62 | let item_hash = a.hash(); 63 | a.leaf(item_hash) 64 | } 65 | 66 | fn hash_single_node(data: &dyn LightHashable) -> T { 67 | let mut a = Self::default(); 68 | data.hash(&mut a); 69 | a.hash() 70 | } 71 | 72 | fn hash_leaf_circuit>( 73 | mut cs: CS, 74 | left: &AllocatedNum, 75 | right: &AllocatedNum, 76 | height: usize, 77 | ) -> Result, SynthesisError> { 78 | let left_bits = left.to_bits_le(cs.namespace(|| "left num into bits"))?; 79 | let right_bits = right.to_bits_le(cs.namespace(|| "right num into bits"))?; 80 | 81 | Self::hash_leaf_bits_circuit(cs, &left_bits, &right_bits, height) 82 | } 83 | 84 | fn hash_multi_leaf_circuit>( 85 | cs: CS, 86 | leaves: &[AllocatedNum], 87 | height: usize, 88 | ) -> Result, SynthesisError>; 89 | 90 | fn hash_md_circuit>( 91 | _cs: &mut CS, 92 | _elements: &[AllocatedNum], 93 | ) -> Result, SynthesisError> { 94 | unimplemented!(); 95 | } 96 | 97 | fn hash_leaf_bits_circuit>( 98 | _cs: CS, 99 | _left: &[Boolean], 100 | _right: &[Boolean], 101 | _height: usize, 102 | ) -> Result, SynthesisError> { 103 | unimplemented!(); 104 | } 105 | 106 | fn hash_circuit>( 107 | cs: CS, 108 | bits: &[Boolean], 109 | ) -> Result, SynthesisError>; 110 | 111 | fn hash2_circuit( 112 | cs: CS, 113 | a: &AllocatedNum, 114 | b: &AllocatedNum, 115 | ) -> Result, SynthesisError> 116 | where 117 | CS: ConstraintSystem; 118 | } 119 | 120 | pub trait Hasher: Clone + Debug + Eq + Default + Send + Sync { 121 | type Domain: Domain + LightHashable + AsRef; 122 | type Function: HashFunction; 123 | 124 | fn name() -> String; 125 | } 126 | -------------------------------------------------------------------------------- /filecoin-proofs/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .criterion 5 | heaptrack* 6 | -------------------------------------------------------------------------------- /filecoin-proofs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "filecoin-proofs" 3 | description = "The Filecoin specific aspects of storage-proofs, including a C based FFI, to generate and verify proofs." 4 | version = "19.0.0" 5 | authors = ["dignifiedquire ", "laser ", "porcuquine "] 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | filecoin-hashers = { workspace = true, features = ["poseidon", "sha256"] } 13 | fr32.workspace = true 14 | storage-proofs-core.workspace = true 15 | storage-proofs-porep.workspace = true 16 | storage-proofs-post.workspace = true 17 | storage-proofs-update.workspace = true 18 | # Sorted alphabetically 19 | anyhow.workspace = true 20 | bellperson.workspace = true 21 | bincode.workspace = true 22 | blake2b_simd.workspace = true 23 | blstrs.workspace = true 24 | ff.workspace = true 25 | generic-array.workspace = true 26 | gperftools = { workspace = true, optional = true } 27 | hex.workspace = true 28 | iowrap = "0.2.1" 29 | lazy_static.workspace = true 30 | log.workspace = true 31 | memmap2.workspace = true 32 | merkletree.workspace = true 33 | once_cell = "1.8.0" 34 | rand.workspace = true 35 | rayon.workspace = true 36 | serde = { workspace = true, features = ["rc", "derive"] } 37 | serde_json.workspace = true 38 | sha2.workspace = true 39 | typenum.workspace = true 40 | file-lock = { version = "2.1.10", optional = true } 41 | 42 | [dev-dependencies] 43 | # Sorted alphabetically 44 | criterion.workspace = true 45 | fil_logger.workspace = true 46 | rand_xorshift.workspace = true 47 | tempfile.workspace = true 48 | walkdir = "2.3.2" 49 | 50 | [features] 51 | default = ["opencl"] 52 | cpu-profile = ["gperftools"] 53 | heap-profile = ["gperftools/heap"] 54 | simd = ["storage-proofs-core/simd"] 55 | asm = ["storage-proofs-core/asm"] 56 | cuda = [ 57 | "storage-proofs-core/cuda", 58 | "storage-proofs-porep/cuda", 59 | "storage-proofs-post/cuda", 60 | "storage-proofs-update/cuda", 61 | "bellperson/cuda", 62 | "filecoin-hashers/cuda", 63 | ] 64 | cuda-supraseal = [ 65 | "storage-proofs-core/cuda-supraseal", 66 | "storage-proofs-porep/cuda", 67 | "storage-proofs-post/cuda", 68 | "storage-proofs-update/cuda", 69 | "bellperson/cuda-supraseal", 70 | "filecoin-hashers/cuda", 71 | ] 72 | opencl = [ 73 | "storage-proofs-core/opencl", 74 | "storage-proofs-porep/opencl", 75 | "storage-proofs-post/opencl", 76 | "storage-proofs-update/opencl", 77 | "bellperson/opencl", 78 | "filecoin-hashers/opencl", 79 | ] 80 | multicore-sdr = ["storage-proofs-porep/multicore-sdr"] 81 | big-tests = [] 82 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 83 | # setting is ignored, no `TemporaryAux` file will be written. 84 | fixed-rows-to-discard = [ 85 | "storage-proofs-core/fixed-rows-to-discard", 86 | "storage-proofs-porep/fixed-rows-to-discard", 87 | "storage-proofs-post/fixed-rows-to-discard", 88 | "storage-proofs-update/fixed-rows-to-discard", 89 | ] 90 | persist-regression-proofs = ["dep:file-lock"] 91 | 92 | [[bench]] 93 | name = "preprocessing" 94 | harness = false 95 | 96 | [[bench]] 97 | name = "aggregation" 98 | harness = false 99 | -------------------------------------------------------------------------------- /filecoin-proofs/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /filecoin-proofs/README.md: -------------------------------------------------------------------------------- 1 | # Filecoin Proofs 2 | 3 | > The Filecoin specific aspects of `storage-proofs`, including a C based FFI, to generate and verify proofs. 4 | 5 | 6 | ## License 7 | 8 | MIT or Apache 2.0 9 | -------------------------------------------------------------------------------- /filecoin-proofs/parameters.json: -------------------------------------------------------------------------------- 1 | ../parameters.json -------------------------------------------------------------------------------- /filecoin-proofs/src/api/fake_seal.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::path::Path; 3 | 4 | use anyhow::Result; 5 | use filecoin_hashers::{Domain, Hasher}; 6 | use rand::{thread_rng, Rng}; 7 | use storage_proofs_core::merkle::MerkleTreeTrait; 8 | use storage_proofs_porep::stacked::StackedDrg; 9 | 10 | use crate::{ 11 | api::util, 12 | constants::DefaultPieceHasher, 13 | types::{Commitment, PoRepConfig}, 14 | }; 15 | 16 | pub fn fauxrep, S: AsRef, Tree: 'static + MerkleTreeTrait>( 17 | porep_config: &PoRepConfig, 18 | cache_path: R, 19 | out_path: S, 20 | ) -> Result { 21 | let mut rng = thread_rng(); 22 | fauxrep_aux::<_, R, S, Tree>(&mut rng, porep_config, cache_path, out_path) 23 | } 24 | 25 | pub fn fauxrep_aux, T: AsRef, Tree: 'static + MerkleTreeTrait>( 26 | mut rng: &mut R, 27 | porep_config: &PoRepConfig, 28 | cache_path: S, 29 | out_path: T, 30 | ) -> Result { 31 | let sector_bytes = porep_config.padded_bytes_amount().0; 32 | 33 | { 34 | // Create a sector full of null bytes at `out_path`. 35 | let file = File::create(&out_path)?; 36 | file.set_len(sector_bytes)?; 37 | } 38 | 39 | let fake_comm_c = ::Domain::random(&mut rng); 40 | let (comm_r, p_aux) = StackedDrg::::fake_replicate_phase2( 41 | fake_comm_c, 42 | out_path, 43 | &cache_path, 44 | sector_bytes as usize, 45 | )?; 46 | 47 | util::persist_p_aux::(&p_aux, cache_path.as_ref())?; 48 | 49 | let mut commitment = [0u8; 32]; 50 | commitment[..].copy_from_slice(&comm_r.into_bytes()[..]); 51 | Ok(commitment) 52 | } 53 | 54 | pub fn fauxrep2, S: AsRef, Tree: 'static + MerkleTreeTrait>( 55 | cache_path: R, 56 | existing_p_aux_path: S, 57 | ) -> Result { 58 | let mut rng = thread_rng(); 59 | 60 | let fake_comm_c = ::Domain::random(&mut rng); 61 | 62 | let (comm_r, p_aux) = 63 | StackedDrg::::fake_comm_r(fake_comm_c, existing_p_aux_path)?; 64 | 65 | util::persist_p_aux::(&p_aux, cache_path.as_ref())?; 66 | 67 | let mut commitment = [0u8; 32]; 68 | commitment[..].copy_from_slice(&comm_r.into_bytes()[..]); 69 | Ok(commitment) 70 | } 71 | -------------------------------------------------------------------------------- /filecoin-proofs/src/chunk_iter.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, Read}; 2 | 3 | use iowrap::ReadMany; 4 | 5 | /// The number of bytes that are read from the reader at once. 6 | const READER_CHUNK_SIZE: usize = 4096; 7 | 8 | // Based on 9 | // https://stackoverflow.com/questions/73145503/iterator-for-reading-file-chunks/73145594#73145594 10 | /// Chunks the given reader to the given size. 11 | /// 12 | /// If the end is reached and there are a few bytes left, that don't fill a full chunk, those bytes 13 | /// are returned. 14 | pub struct ChunkIterator { 15 | reader: R, 16 | chunk_size: usize, 17 | } 18 | 19 | impl ChunkIterator { 20 | /// Return a new iterator with a default chunk size of 4KiB. 21 | pub fn new(reader: R) -> Self { 22 | Self { 23 | reader, 24 | chunk_size: READER_CHUNK_SIZE, 25 | } 26 | } 27 | 28 | pub const fn chunk_size(&self) -> usize { 29 | self.chunk_size 30 | } 31 | } 32 | 33 | impl Iterator for ChunkIterator { 34 | type Item = io::Result>; 35 | 36 | fn next(&mut self) -> Option { 37 | let mut buffer = vec![0u8; self.chunk_size]; 38 | match self.reader.read_many(&mut buffer) { 39 | Ok(bytes_read) if bytes_read == self.chunk_size => Some(Ok(buffer)), 40 | // A position of 0 indicates end of file. 41 | Ok(0) => None, 42 | Ok(bytes_read) => Some(Ok(buffer[..bytes_read].to_vec())), 43 | Err(error) => Some(Err(error)), 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /filecoin-proofs/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] 2 | #![warn(clippy::unwrap_used)] 3 | #![warn(clippy::unnecessary_wraps)] 4 | #![allow(clippy::upper_case_acronyms)] 5 | 6 | #[cfg(all(feature = "cuda", feature = "cuda-supraseal"))] 7 | compile_error!( 8 | "The `cuda` and `cuda-supraseal` cannot be enabled at the same time, choose one of them." 9 | ); 10 | 11 | pub mod caches; 12 | pub mod chunk_iter; 13 | pub mod constants; 14 | pub mod param; 15 | pub mod parameters; 16 | pub mod pieces; 17 | pub mod types; 18 | 19 | mod api; 20 | mod commitment_reader; 21 | 22 | pub use api::*; 23 | pub use chunk_iter::ChunkIterator; 24 | pub use commitment_reader::*; 25 | pub use constants::*; 26 | pub use types::*; 27 | -------------------------------------------------------------------------------- /filecoin-proofs/src/param.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::ffi::OsStr; 3 | use std::fs::File; 4 | use std::io; 5 | use std::path::{Path, PathBuf}; 6 | 7 | use anyhow::{Context, Result}; 8 | use blake2b_simd::State as Blake2b; 9 | use storage_proofs_core::parameter_cache::{ 10 | parameter_cache_dir, CacheEntryMetadata, PARAMETER_METADATA_EXT, 11 | }; 12 | 13 | // Produces an absolute path to a file within the cache 14 | pub fn get_full_path_for_file_within_cache(filename: &str) -> PathBuf { 15 | let mut path = parameter_cache_dir(); 16 | path.push(filename); 17 | path 18 | } 19 | 20 | // Produces a BLAKE2b checksum for a file within the cache 21 | pub fn get_digest_for_file_within_cache(filename: &str) -> Result { 22 | let path = get_full_path_for_file_within_cache(filename); 23 | let mut file = File::open(&path).with_context(|| format!("could not open path={:?}", path))?; 24 | let mut hasher = Blake2b::new(); 25 | 26 | io::copy(&mut file, &mut hasher)?; 27 | 28 | Ok(hasher.finalize().to_hex()[..32].into()) 29 | } 30 | 31 | // Predicate which matches the provided extension against the given filename 32 | pub fn has_extension, P: AsRef>(filename: P, ext: S) -> bool { 33 | filename 34 | .as_ref() 35 | .extension() 36 | .and_then(OsStr::to_str) 37 | .map(|s| s == ext.as_ref()) 38 | .unwrap_or(false) 39 | } 40 | 41 | // Adds a file extension to the given filename 42 | pub fn add_extension(filename: &str, ext: &str) -> String { 43 | format!("{}.{}", filename, ext) 44 | } 45 | 46 | /// Builds a map from a parameter_id (file in cache) to metadata. 47 | pub fn parameter_id_to_metadata_map( 48 | parameter_ids: &[String], 49 | ) -> Result> { 50 | let mut map: BTreeMap = Default::default(); 51 | 52 | for parameter_id in parameter_ids { 53 | let filename = add_extension(parameter_id, PARAMETER_METADATA_EXT); 54 | let file_path = get_full_path_for_file_within_cache(&filename); 55 | let file = File::open(&file_path) 56 | .with_context(|| format!("could not open path={:?}", file_path))?; 57 | 58 | let meta = serde_json::from_reader(file)?; 59 | 60 | map.insert(parameter_id.to_string(), meta); 61 | } 62 | 63 | Ok(map) 64 | } 65 | 66 | /// Maps the name of a file in the cache to its parameter id. For example, 67 | /// ABCDEF.vk corresponds to parameter id ABCDEF. 68 | pub fn filename_to_parameter_id<'a, P: AsRef + 'a>(filename: P) -> Option { 69 | filename 70 | .as_ref() 71 | .file_stem() 72 | .and_then(OsStr::to_str) 73 | .map(ToString::to_string) 74 | } 75 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/piece_info.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Debug, Formatter}; 2 | 3 | use anyhow::{ensure, Result}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use crate::types::{Commitment, UnpaddedBytesAmount}; 7 | 8 | #[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)] 9 | pub struct PieceInfo { 10 | pub commitment: Commitment, 11 | pub size: UnpaddedBytesAmount, 12 | } 13 | 14 | impl Debug for PieceInfo { 15 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { 16 | fmt.debug_struct("PieceInfo") 17 | .field("commitment", &hex::encode(self.commitment)) 18 | .field("size", &self.size) 19 | .finish() 20 | } 21 | } 22 | 23 | impl PieceInfo { 24 | pub fn new(commitment: Commitment, size: UnpaddedBytesAmount) -> Result { 25 | ensure!(commitment != [0; 32], "Invalid all zero commitment"); 26 | Ok(PieceInfo { commitment, size }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/porep_proof_partitions.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Clone, Copy, Debug, Serialize, Deserialize)] 4 | pub struct PoRepProofPartitions(pub u8); 5 | 6 | impl From for usize { 7 | fn from(x: PoRepProofPartitions) -> Self { 8 | x.0 as usize 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/post_config.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::Result; 4 | use storage_proofs_core::{ 5 | api_version::ApiVersion, 6 | merkle::MerkleTreeTrait, 7 | parameter_cache::{ 8 | parameter_cache_metadata_path, parameter_cache_params_path, 9 | parameter_cache_verifying_key_path, CacheableParameters, 10 | }, 11 | }; 12 | use storage_proofs_post::fallback::{FallbackPoStCircuit, FallbackPoStCompound}; 13 | 14 | use crate::{ 15 | parameters::{window_post_public_params, winning_post_public_params}, 16 | types::{PaddedBytesAmount, SectorSize, UnpaddedBytesAmount}, 17 | }; 18 | 19 | #[derive(Clone, Debug)] 20 | pub struct PoStConfig { 21 | pub sector_size: SectorSize, 22 | pub challenge_count: usize, 23 | pub sector_count: usize, 24 | pub typ: PoStType, 25 | /// High priority (always runs on GPU) == true 26 | pub priority: bool, 27 | pub api_version: ApiVersion, 28 | } 29 | 30 | #[derive(Debug, Clone, PartialEq, Eq)] 31 | pub enum PoStType { 32 | Winning, 33 | Window, 34 | } 35 | 36 | impl From for PaddedBytesAmount { 37 | fn from(x: PoStConfig) -> Self { 38 | let PoStConfig { sector_size, .. } = x; 39 | PaddedBytesAmount::from(sector_size) 40 | } 41 | } 42 | 43 | impl From for UnpaddedBytesAmount { 44 | fn from(x: PoStConfig) -> Self { 45 | let PoStConfig { sector_size, .. } = x; 46 | PaddedBytesAmount::from(sector_size).into() 47 | } 48 | } 49 | 50 | impl PoStConfig { 51 | pub fn padded_sector_size(&self) -> PaddedBytesAmount { 52 | PaddedBytesAmount::from(self.sector_size) 53 | } 54 | 55 | pub fn unpadded_sector_size(&self) -> UnpaddedBytesAmount { 56 | PaddedBytesAmount::from(self.sector_size).into() 57 | } 58 | 59 | /// Returns the cache identifier as used by `storage-proofs::paramater_cache`. 60 | pub fn get_cache_identifier(&self) -> Result { 61 | match self.typ { 62 | PoStType::Winning => { 63 | let params = winning_post_public_params::(self)?; 64 | 65 | Ok( as CacheableParameters< 66 | FallbackPoStCircuit, 67 | _, 68 | >>::cache_identifier(¶ms)) 69 | } 70 | PoStType::Window => { 71 | let params = window_post_public_params::(self)?; 72 | 73 | Ok( as CacheableParameters< 74 | FallbackPoStCircuit, 75 | _, 76 | >>::cache_identifier(¶ms)) 77 | } 78 | } 79 | } 80 | 81 | pub fn get_cache_metadata_path(&self) -> Result { 82 | let id = self.get_cache_identifier::()?; 83 | Ok(parameter_cache_metadata_path(&id)) 84 | } 85 | 86 | pub fn get_cache_verifying_key_path(&self) -> Result { 87 | let id = self.get_cache_identifier::()?; 88 | Ok(parameter_cache_verifying_key_path(&id)) 89 | } 90 | 91 | pub fn get_cache_params_path(&self) -> Result { 92 | let id = self.get_cache_identifier::()?; 93 | Ok(parameter_cache_params_path(&id)) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/post_proof_partitions.rs: -------------------------------------------------------------------------------- 1 | use crate::{constants::SINGLE_PARTITION_PROOF_LEN, types::PoStProofBytesAmount}; 2 | 3 | #[derive(Clone, Copy, Debug)] 4 | pub struct PoStProofPartitions(pub u8); 5 | 6 | impl From for PoStProofBytesAmount { 7 | fn from(x: PoStProofPartitions) -> Self { 8 | PoStProofBytesAmount(SINGLE_PARTITION_PROOF_LEN * usize::from(x)) 9 | } 10 | } 11 | 12 | impl From for usize { 13 | fn from(x: PoStProofPartitions) -> Self { 14 | x.0 as usize 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/public_replica_info.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::hash::Hash; 3 | 4 | use anyhow::{ensure, Result}; 5 | use filecoin_hashers::Domain; 6 | 7 | use crate::{api::as_safe_commitment, types::Commitment}; 8 | 9 | /// The minimal information required about a replica, in order to be able to verify 10 | /// a PoSt over it. 11 | #[derive(Clone, Debug, PartialEq, Eq, Hash)] 12 | pub struct PublicReplicaInfo { 13 | /// The replica commitment. 14 | comm_r: Commitment, 15 | } 16 | 17 | impl Ord for PublicReplicaInfo { 18 | fn cmp(&self, other: &Self) -> Ordering { 19 | self.comm_r.as_ref().cmp(other.comm_r.as_ref()) 20 | } 21 | } 22 | 23 | impl PartialOrd for PublicReplicaInfo { 24 | fn partial_cmp(&self, other: &Self) -> Option { 25 | Some(self.cmp(other)) 26 | } 27 | } 28 | 29 | impl PublicReplicaInfo { 30 | pub fn new(comm_r: Commitment) -> Result { 31 | ensure!(comm_r != [0; 32], "Invalid all zero commitment (comm_r)"); 32 | Ok(PublicReplicaInfo { comm_r }) 33 | } 34 | 35 | pub fn safe_comm_r(&self) -> Result { 36 | as_safe_commitment(&self.comm_r, "comm_r") 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/sector_class.rs: -------------------------------------------------------------------------------- 1 | use storage_proofs_core::api_version::ApiVersion; 2 | 3 | use crate::types::{PoRepConfig, PoRepProofPartitions, SectorSize}; 4 | 5 | #[derive(Clone, Copy, Debug)] 6 | pub struct SectorClass { 7 | pub sector_size: SectorSize, 8 | pub partitions: PoRepProofPartitions, 9 | pub porep_id: [u8; 32], 10 | pub api_version: ApiVersion, 11 | } 12 | 13 | impl From for PoRepConfig { 14 | fn from(x: SectorClass) -> Self { 15 | let SectorClass { 16 | sector_size, 17 | partitions, 18 | porep_id, 19 | api_version, 20 | } = x; 21 | PoRepConfig { 22 | sector_size, 23 | partitions, 24 | porep_id, 25 | api_version, 26 | api_features: vec![], 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/sector_size.rs: -------------------------------------------------------------------------------- 1 | use fr32::to_unpadded_bytes; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use crate::types::{PaddedBytesAmount, UnpaddedBytesAmount}; 5 | 6 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] 7 | pub struct SectorSize(pub u64); 8 | 9 | impl From for SectorSize { 10 | fn from(size: u64) -> Self { 11 | SectorSize(size) 12 | } 13 | } 14 | 15 | impl From for UnpaddedBytesAmount { 16 | fn from(x: SectorSize) -> Self { 17 | UnpaddedBytesAmount(to_unpadded_bytes(x.0)) 18 | } 19 | } 20 | 21 | impl From for PaddedBytesAmount { 22 | fn from(x: SectorSize) -> Self { 23 | PaddedBytesAmount(x.0) 24 | } 25 | } 26 | 27 | impl From for u64 { 28 | fn from(x: SectorSize) -> Self { 29 | x.0 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/sector_update_config.rs: -------------------------------------------------------------------------------- 1 | use storage_proofs_core::util::NODE_SIZE; 2 | use storage_proofs_update::constants::{h_default, partition_count}; 3 | 4 | use crate::types::{PoRepConfig, SectorSize, UpdateProofPartitions}; 5 | 6 | #[derive(Clone, Copy, Debug)] 7 | pub struct SectorUpdateConfig { 8 | pub sector_size: SectorSize, 9 | pub nodes_count: usize, 10 | pub update_partitions: UpdateProofPartitions, 11 | pub h: usize, 12 | } 13 | 14 | impl SectorUpdateConfig { 15 | pub fn from_porep_config(porep_config: &PoRepConfig) -> Self { 16 | let nodes_count = u64::from(porep_config.sector_size) as usize / NODE_SIZE; 17 | 18 | SectorUpdateConfig { 19 | sector_size: porep_config.sector_size, 20 | nodes_count, 21 | update_partitions: UpdateProofPartitions::from(partition_count(nodes_count)), 22 | h: h_default(nodes_count), 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /filecoin-proofs/src/types/update_proof_partitions.rs: -------------------------------------------------------------------------------- 1 | // Update Proof Partitions are the number of partitions used in the 2 | // EmptySectorUpdate code paths per-sector size 3 | #[derive(Clone, Copy, Debug)] 4 | pub struct UpdateProofPartitions(pub u8); 5 | 6 | impl From for usize { 7 | fn from(x: UpdateProofPartitions) -> Self { 8 | x.0 as usize 9 | } 10 | } 11 | 12 | impl From for UpdateProofPartitions { 13 | fn from(x: usize) -> Self { 14 | UpdateProofPartitions(x as u8) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /filecoin-proofs/tests/aggregate_proof_bytes: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/rust-fil-proofs/6ac97456910ec7487684f7deccc183ea16cc5e17/filecoin-proofs/tests/aggregate_proof_bytes -------------------------------------------------------------------------------- /filecoin-proofs/tests/constants.rs: -------------------------------------------------------------------------------- 1 | use filecoin_proofs::{ 2 | with_shape, SECTOR_SIZE_16_MIB, SECTOR_SIZE_1_GIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, 3 | SECTOR_SIZE_4_KIB, SECTOR_SIZE_512_MIB, SECTOR_SIZE_64_GIB, SECTOR_SIZE_8_MIB, 4 | }; 5 | use generic_array::typenum::Unsigned; 6 | use storage_proofs_core::merkle::MerkleTreeTrait; 7 | 8 | fn canonical_shape(sector_size: u64) -> (usize, usize, usize) { 9 | // This could perhaps be cleaned up, but I think it expresses the intended constraints 10 | // and is consistent with our current hard-coded size->shape mappings. 11 | assert_eq!(sector_size.count_ones(), 1); 12 | let log_byte_size = sector_size.trailing_zeros(); 13 | let log_nodes = log_byte_size - 5; // 2^5 = 32-byte nodes 14 | 15 | let max_tree_log = 3; // Largest allowable arity. The optimal shape. 16 | 17 | let log_max_base = 27; // 4 GiB worth of nodes 18 | let log_base = max_tree_log; // Base must be oct trees.x 19 | let log_in_base = u32::min(log_max_base, (log_nodes / log_base) * log_base); // How many nodes in base? 20 | 21 | let log_upper = log_nodes - log_in_base; // Nodes in sub and upper combined. 22 | let log_rem = log_upper % max_tree_log; // Remainder after filling optimal trees. 23 | 24 | let (log_sub, log_top) = { 25 | // Are the upper trees empty? 26 | if log_upper > 0 { 27 | // Do we need a remainder tree? 28 | if log_rem == 0 { 29 | (Some(max_tree_log), None) // No remainder tree, fill the sub tree optimall.y 30 | } else { 31 | // Need a remainder tree. 32 | 33 | // Do we have room for another max tree? 34 | if log_upper > max_tree_log { 35 | // There is room. Use the sub tree for as much overflow as we can fit optimally. 36 | // And put the rest in the top tree. 37 | (Some(max_tree_log), Some(log_rem)) 38 | } else { 39 | // Can't fit another max tree. 40 | // Just put the remainder in the sub tree. 41 | (Some(log_rem), None) 42 | } 43 | } 44 | } else { 45 | // Upper trees are empty. 46 | (None, None) 47 | } 48 | }; 49 | 50 | let base = 1 << log_base; 51 | let sub = if let Some(l) = log_sub { 1 << l } else { 0 }; 52 | let top = if let Some(l) = log_top { 1 << l } else { 0 }; 53 | 54 | (base, sub, top) 55 | } 56 | 57 | fn arities_to_usize() -> (usize, usize, usize) { 58 | ( 59 | Tree::Arity::to_usize(), 60 | Tree::SubTreeArity::to_usize(), 61 | Tree::TopTreeArity::to_usize(), 62 | ) 63 | } 64 | 65 | #[test] 66 | fn test_with_shape_macro() { 67 | test_with_shape_macro_aux(SECTOR_SIZE_2_KIB); 68 | test_with_shape_macro_aux(SECTOR_SIZE_4_KIB); 69 | test_with_shape_macro_aux(SECTOR_SIZE_8_MIB); 70 | test_with_shape_macro_aux(SECTOR_SIZE_16_MIB); 71 | test_with_shape_macro_aux(SECTOR_SIZE_512_MIB); 72 | test_with_shape_macro_aux(SECTOR_SIZE_1_GIB); 73 | test_with_shape_macro_aux(SECTOR_SIZE_32_GIB); 74 | test_with_shape_macro_aux(SECTOR_SIZE_64_GIB); 75 | } 76 | 77 | fn test_with_shape_macro_aux(sector_size: u64) { 78 | let expected = canonical_shape(sector_size); 79 | let arities = with_shape!(sector_size, arities_to_usize); 80 | assert_eq!( 81 | arities, expected, 82 | "Wrong shape for sector size {}: have {:?} but need {:?}.", 83 | sector_size, arities, expected 84 | ); 85 | } 86 | -------------------------------------------------------------------------------- /fr32/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fr32" 3 | version = "12.0.0" 4 | authors = ["dignifiedquire "] 5 | description = "Filecoin proofs Fr/32-byte conversion tooling" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | 10 | [dependencies] 11 | # Sorted alphabetically 12 | anyhow.workspace = true 13 | blstrs.workspace = true 14 | byte-slice-cast.workspace = true 15 | byteorder.workspace = true 16 | ff.workspace = true 17 | thiserror.workspace = true 18 | 19 | [dev-dependencies] 20 | # Sorted alphabetically 21 | bitvec = "0.17" 22 | criterion.workspace = true 23 | itertools.workspace = true 24 | pretty_assertions.workspace = true 25 | rand.workspace = true 26 | rand_xorshift.workspace = true 27 | 28 | [[bench]] 29 | name = "fr" 30 | harness = false 31 | -------------------------------------------------------------------------------- /fr32/benches/fr.rs: -------------------------------------------------------------------------------- 1 | use blstrs::Scalar as Fr; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use ff::Field; 4 | use fr32::{bytes_into_fr, fr_into_bytes}; 5 | use rand::thread_rng; 6 | 7 | fn fr_benchmark(c: &mut Criterion) { 8 | c.bench_function("fr-to-bytes-32", move |b| { 9 | let mut rng = thread_rng(); 10 | let fr = Fr::random(&mut rng); 11 | 12 | b.iter(|| black_box(fr_into_bytes(&fr))) 13 | }); 14 | 15 | c.bench_function("bytes-32-to-fr", move |b| { 16 | let mut rng = thread_rng(); 17 | let fr = Fr::random(&mut rng); 18 | let bytes = fr_into_bytes(&fr); 19 | 20 | b.iter(|| black_box(bytes_into_fr(&bytes).unwrap())) 21 | }); 22 | } 23 | 24 | criterion_group!(benches, fr_benchmark); 25 | criterion_main!(benches); 26 | -------------------------------------------------------------------------------- /fr32/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod convert; 2 | mod padding; 3 | mod reader; 4 | 5 | pub use convert::*; 6 | pub use padding::*; 7 | pub use reader::*; 8 | -------------------------------------------------------------------------------- /issue_template.md: -------------------------------------------------------------------------------- 1 | ### Description 2 | 3 | ### Acceptance criteria 4 | 5 | ### Risks + pitfalls 6 | 7 | ### Where to begin 8 | -------------------------------------------------------------------------------- /proptest-regressions/crypto/sloth.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | xs 2333654024 1879057395 1148234502 4254212597 # shrinks to key = Fr(FrRepr([0, 0, 0, 0])), plaintext = Fr(FrRepr([6433232347557286543, 14241240406459990354, 3113366375994539378, 2168360237581758600])) 8 | -------------------------------------------------------------------------------- /release.toml: -------------------------------------------------------------------------------- 1 | consolidate-commits = false 2 | -------------------------------------------------------------------------------- /rust-fil-proofs.config.toml.sample: -------------------------------------------------------------------------------- 1 | # To use this configuration, copy this file to './rust-fil-proofs.config.toml'. 2 | 3 | # The location to store downloaded parameter files required for proofs. 4 | parameter_cache = "/var/tmp/filecoin-proofs-parameters/" 5 | 6 | # The location to store the on-disk parents cache. 7 | parent_cache = "/var/tmp/filecoin-parents" 8 | # The max number of parent cache elements to have mapped in RAM at a time. 9 | sdr_parents_cache_size = 2_048 10 | 11 | # This enables the use of the GPU for column tree building. 12 | use_gpu_column_builder = false 13 | # If the GPU is used for column building, this is the batch size to send to the GPU at a time. 14 | max_gpu_column_batch_size = 400_000 15 | # This is the batch size for writing out the column tree elements to disk after it's generated. 16 | column_write_batch_size = 262_144 17 | 18 | # This enables the use of the GPU for tree r last building. 19 | use_gpu_tree_builder = false 20 | # If the GPU is used for tree r last building, this is the batch size to send to the GPU at a time. 21 | max_gpu_tree_batch_size = 700_000 22 | 23 | # This setting affects tree_r_last (MerkleTree) generation and access 24 | # and determines the size of the on disk tree caches. This value MUST 25 | # NOT be changed after tree_r_last caches have been generated on your 26 | # system, as any remaining will no longer be accessible. A tool 27 | # exists called 'update_tree_r_last' that can rebuild cache files if 28 | # it's required, but updating this setting is NOT recommended. 29 | rows_to_discard = 2 30 | 31 | # This value is defaulted to the number of cores available on your system. 32 | #window_post_synthesis_num_cpus = 8 33 | 34 | # This enables multicore SDR replication 35 | use_multicore_sdr = false 36 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.83.0 2 | -------------------------------------------------------------------------------- /scripts/bench-parser.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | name="" 3 | samples="" 4 | time="" 5 | slope="" 6 | rsqr="" 7 | mean="" 8 | stddev="" 9 | median="" 10 | medabsdev="" 11 | 12 | function as_list { 13 | out=$(echo $@ | sed "s/ /\", \"/g") 14 | echo "[\"$out\"]" 15 | } 16 | 17 | results=[] 18 | index=0 19 | 20 | while IFS= read line; do 21 | if [[ $line =~ ^Benchmarking ]]; then 22 | if [[ -z $name ]]; then 23 | name=`echo "$line" | cut -d' ' -f2-` 24 | fi 25 | fi 26 | if [[ "$line" =~ Collecting ]]; then 27 | samples=$(echo "$line" | cut -d'C' -f2 | cut -d' ' -f2) 28 | fi 29 | if [[ "$line" =~ time: ]]; then 30 | time=$(echo "$line" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4, $5$6 }') 31 | fi 32 | if [[ "$line" =~ ^slope ]]; then 33 | slope=$(echo "$line" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }') 34 | rsqr=$(echo "$line" | cut -d'[' -f3 | cut -d']' -f1 | awk '{ print $1, $2 }') 35 | fi 36 | if [[ "$line" =~ ^mean ]]; then 37 | mean=$(echo "$line" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }') 38 | stddev=$(echo "$line" | cut -d'[' -f3 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }') 39 | fi 40 | if [[ "$line" =~ ^median ]]; then 41 | median=$(echo "$line" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }') 42 | medabsdev=$(echo "$line" | cut -d'[' -f3 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }') 43 | results[index]=" { 44 | \"name\": "\"$name\"", 45 | \"samples\": $samples, 46 | \"time\": $(as_list $time), 47 | \"slope\": $(as_list $slope), 48 | \"R^2\": $(as_list $rsqr), 49 | \"mean\": $(as_list $mean), 50 | \"std. dev.\": $(as_list $stddev), 51 | \"median\": $(as_list $median), 52 | \"med. abs. dev.\": $(as_list $medabsdev) 53 | }" 54 | name="" 55 | index=$((index+1)) 56 | fi 57 | done 58 | 59 | count=$((index-1)) 60 | 61 | if [ "$count" -ge "1" ]; then 62 | echo "[" 63 | for n in ${!results[@]}; do 64 | printf "${results[$n]}" 65 | if [ "$n" -ne "$count" ]; then 66 | echo ", " 67 | else 68 | echo 69 | fi 70 | done 71 | echo "]" 72 | fi 73 | -------------------------------------------------------------------------------- /scripts/package-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Eeuo pipefail 4 | 5 | if [ -z "$1" ]; then 6 | TAR_FILE=`mktemp`.tar.gz 7 | else 8 | TAR_FILE=$1 9 | fi 10 | 11 | TAR_PATH=`mktemp -d` 12 | 13 | mkdir -p $TAR_PATH 14 | mkdir -p $TAR_PATH/bin 15 | mkdir -p $TAR_PATH/misc 16 | 17 | cp filecoin-proofs/parameters.json $TAR_PATH/misc/ 18 | cp target/release/paramcache $TAR_PATH/bin/ 19 | cp target/release/paramfetch $TAR_PATH/bin/ 20 | 21 | pushd $TAR_PATH 22 | 23 | tar -czf $TAR_FILE ./* 24 | 25 | popd 26 | 27 | rm -rf $TAR_PATH 28 | 29 | echo $TAR_FILE 30 | -------------------------------------------------------------------------------- /scripts/publish-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Eeuo pipefail 4 | 5 | RELEASE_NAME="$CIRCLE_PROJECT_REPONAME-`uname`" 6 | RELEASE_FILE="/tmp/$RELEASE_NAME.tar.gz" 7 | RELEASE_TAG="${CIRCLE_SHA1:0:16}" 8 | 9 | # make sure we have a token set, api requests won't work otherwise 10 | if [ -z $GITHUB_TOKEN ]; then 11 | echo "\$GITHUB_TOKEN not set, publish failed" 12 | exit 1 13 | fi 14 | 15 | echo "preparing release file" 16 | 17 | `dirname $0`/package-release.sh $RELEASE_FILE 18 | 19 | echo "release file created: $RELEASE_FILE" 20 | 21 | # see if the release already exists by tag 22 | RELEASE_RESPONSE=` 23 | curl \ 24 | --header "Authorization: token $GITHUB_TOKEN" \ 25 | "https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases/tags/$RELEASE_TAG" 26 | ` 27 | 28 | RELEASE_ID=`echo $RELEASE_RESPONSE | jq '.id'` 29 | 30 | if [ "$RELEASE_ID" = "null" ]; then 31 | echo "creating release" 32 | 33 | RELEASE_DATA="{ 34 | \"tag_name\": \"$RELEASE_TAG\", 35 | \"target_commitish\": \"$CIRCLE_SHA1\", 36 | \"name\": \"$RELEASE_TAG\", 37 | \"body\": \"\" 38 | }" 39 | 40 | # create it if it doesn't exist yet 41 | RELEASE_RESPONSE=` 42 | curl \ 43 | --request POST \ 44 | --header "Authorization: token $GITHUB_TOKEN" \ 45 | --header "Content-Type: application/json" \ 46 | --data "$RELEASE_DATA" \ 47 | "https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases" 48 | ` 49 | else 50 | echo "release already exists" 51 | fi 52 | 53 | RELEASE_UPLOAD_URL=`echo $RELEASE_RESPONSE | jq -r '.upload_url' | cut -d'{' -f1` 54 | 55 | curl \ 56 | --request POST \ 57 | --header "Authorization: token $GITHUB_TOKEN" \ 58 | --header "Content-Type: application/octet-stream" \ 59 | --data-binary "@$RELEASE_FILE" \ 60 | "$RELEASE_UPLOAD_URL?name=$(basename $RELEASE_FILE)" 61 | 62 | echo "release file uploaded" 63 | -------------------------------------------------------------------------------- /sha2raw/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sha2raw" 3 | version = "14.0.0" 4 | authors = ["RustCrypto Developers", "Friedel Ziegelmayer "] 5 | license = "MIT OR Apache-2.0" 6 | description = "SHA-2 hash function" 7 | documentation = "https://docs.rs/sha2raw" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | keywords = ["crypto", "sha2", "hash", "digest"] 10 | categories = ["cryptography", "no-std"] 11 | edition = "2018" 12 | 13 | [dependencies] 14 | # Sorted alphabetically 15 | byteorder.workspace = true 16 | digest = "0.10.3" 17 | fake-simd = "0.1" 18 | lazy_static.workspace = true 19 | opaque-debug = "0.3" 20 | sha2-asm = { version = "0.6", optional = true } 21 | 22 | [target.'cfg(target_arch = "x86_64")'.dependencies] 23 | cpufeatures = "0.2.2" 24 | 25 | [dev-dependencies] 26 | # Sorted alphabetically 27 | rand.workspace = true 28 | rand_xorshift.workspace = true 29 | sha2.workspace = true 30 | 31 | [features] 32 | default = ["asm"] 33 | asm = ["sha2-asm"] 34 | 35 | 36 | -------------------------------------------------------------------------------- /sha2raw/README.md: -------------------------------------------------------------------------------- 1 | # sha2raw 2 | 3 | 4 | > Implementation of Sha256 with a focus on hashing fixed sizes chunks, that do not require padding. Based on [sha2](https://docs.rs/sha2). 5 | -------------------------------------------------------------------------------- /sha2raw/src/consts.rs: -------------------------------------------------------------------------------- 1 | use fake_simd::u32x4; 2 | 3 | pub const STATE_LEN: usize = 8; 4 | pub const BLOCK_LEN: usize = 16; 5 | 6 | /// Constants necessary for SHA-256 family of digests. 7 | pub const K32: [u32; 64] = [ 8 | 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 9 | 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 10 | 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 11 | 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 12 | 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 13 | 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 14 | 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 15 | 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, 16 | ]; 17 | 18 | /// Constants necessary for SHA-256 family of digests. 19 | pub const K32X4: [u32x4; 16] = [ 20 | u32x4(K32[3], K32[2], K32[1], K32[0]), 21 | u32x4(K32[7], K32[6], K32[5], K32[4]), 22 | u32x4(K32[11], K32[10], K32[9], K32[8]), 23 | u32x4(K32[15], K32[14], K32[13], K32[12]), 24 | u32x4(K32[19], K32[18], K32[17], K32[16]), 25 | u32x4(K32[23], K32[22], K32[21], K32[20]), 26 | u32x4(K32[27], K32[26], K32[25], K32[24]), 27 | u32x4(K32[31], K32[30], K32[29], K32[28]), 28 | u32x4(K32[35], K32[34], K32[33], K32[32]), 29 | u32x4(K32[39], K32[38], K32[37], K32[36]), 30 | u32x4(K32[43], K32[42], K32[41], K32[40]), 31 | u32x4(K32[47], K32[46], K32[45], K32[44]), 32 | u32x4(K32[51], K32[50], K32[49], K32[48]), 33 | u32x4(K32[55], K32[54], K32[53], K32[52]), 34 | u32x4(K32[59], K32[58], K32[57], K32[56]), 35 | u32x4(K32[63], K32[62], K32[61], K32[60]), 36 | ]; 37 | 38 | pub static H256: [u32; STATE_LEN] = [ 39 | 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, 40 | ]; 41 | -------------------------------------------------------------------------------- /sha2raw/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! An implementation of the [SHA-2][1] cryptographic hash algorithms. 2 | 3 | // Give relevant error messages if the user tries to enable AArch64 asm on unsupported platforms. 4 | 5 | #![deny(clippy::all, clippy::perf, clippy::correctness)] 6 | #![allow(clippy::unreadable_literal)] 7 | 8 | pub use digest::Digest; 9 | 10 | mod consts; 11 | mod platform; 12 | mod sha256; 13 | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] 14 | mod sha256_intrinsics; 15 | mod sha256_utils; 16 | 17 | pub use sha256::Sha256; 18 | -------------------------------------------------------------------------------- /sha2raw/src/platform.rs: -------------------------------------------------------------------------------- 1 | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] 2 | use crate::sha256_intrinsics; 3 | use crate::sha256_utils; 4 | 5 | #[allow(dead_code)] 6 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 7 | enum Platform { 8 | Portable, 9 | #[cfg(feature = "asm")] 10 | Asm, 11 | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] 12 | Sha, 13 | } 14 | 15 | #[derive(Clone, Copy, Debug)] 16 | pub struct Implementation(Platform); 17 | 18 | impl Implementation { 19 | pub fn detect() -> Self { 20 | // Try the different implementations in order of how fast/modern they are. 21 | #[cfg(target_arch = "x86_64")] 22 | { 23 | if let Some(sha_impl) = Self::sha_if_supported() { 24 | return sha_impl; 25 | } 26 | } 27 | #[cfg(feature = "asm")] 28 | { 29 | if let Some(asm_impl) = Self::asm_if_supported() { 30 | return asm_impl; 31 | } 32 | } 33 | 34 | Self::portable() 35 | } 36 | 37 | pub fn portable() -> Self { 38 | Implementation(Platform::Portable) 39 | } 40 | 41 | #[cfg(target_arch = "x86_64")] 42 | #[allow(unreachable_code)] 43 | pub fn sha_if_supported() -> Option { 44 | // Use raw_cpuid instead of is_x86_feature_detected, to ensure the check 45 | // never happens at compile time. 46 | cpufeatures::new!(cpuid_sha, "sha"); 47 | 48 | let is_runtime_ok = cpuid_sha::get(); 49 | 50 | #[cfg(target_feature = "sha")] 51 | { 52 | if !is_runtime_ok { 53 | println!("WARN: sha-ni not available, falling back"); 54 | } 55 | } 56 | 57 | // Make sure this computer actually supports it 58 | if is_runtime_ok { 59 | return Some(Implementation(Platform::Sha)); 60 | } 61 | 62 | None 63 | } 64 | 65 | #[cfg(feature = "asm")] 66 | pub fn asm_if_supported() -> Option { 67 | Some(Implementation(Platform::Asm)) 68 | } 69 | 70 | #[inline] 71 | pub fn compress256(self, state: &mut [u32; 8], blocks: &[&[u8]]) { 72 | match self.0 { 73 | Platform::Portable => { 74 | sha256_utils::compress256(state, blocks); 75 | } 76 | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] 77 | Platform::Sha => { 78 | unsafe { sha256_intrinsics::compress256(state, blocks) }; 79 | } 80 | #[cfg(feature = "asm")] 81 | Platform::Asm => { 82 | let mut buffer = [0u8; 64]; 83 | for block in blocks.chunks(2) { 84 | buffer[..32].copy_from_slice(block[0]); 85 | buffer[32..].copy_from_slice(block[1]); 86 | sha2_asm::compress256(state, &[buffer]); 87 | } 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /sha2raw/src/sha256.rs: -------------------------------------------------------------------------------- 1 | use byteorder::{ByteOrder, BE}; 2 | use lazy_static::lazy_static; 3 | 4 | use crate::{consts::H256, platform::Implementation}; 5 | 6 | lazy_static! { 7 | static ref IMPL: Implementation = Implementation::detect(); 8 | } 9 | 10 | #[derive(Clone)] 11 | pub struct Sha256 { 12 | len: u64, 13 | state: [u32; 8], 14 | } 15 | 16 | impl Default for Sha256 { 17 | fn default() -> Self { 18 | Sha256 { 19 | len: 0, 20 | state: H256, 21 | } 22 | } 23 | } 24 | 25 | impl Sha256 { 26 | pub fn new() -> Self { 27 | Sha256::default() 28 | } 29 | 30 | pub fn digest(blocks: &[&[u8]]) -> [u8; 32] { 31 | let mut sha = Sha256::new(); 32 | sha.input(blocks); 33 | sha.finish() 34 | } 35 | 36 | pub fn input(&mut self, blocks: &[&[u8]]) { 37 | debug_assert_eq!(blocks.len() % 2, 0, "invalid block length"); 38 | 39 | self.len += (blocks.len() as u64) << 8; 40 | 41 | IMPL.compress256(&mut self.state, blocks); 42 | } 43 | 44 | pub fn finish(mut self) -> [u8; 32] { 45 | let mut block0 = [0u8; 32]; 46 | let mut block1 = [0u8; 32]; 47 | 48 | // Append single 1 bit 49 | block0[0] = 0b1000_0000; 50 | 51 | // Write L as 64 big endian integer 52 | let l = self.len; 53 | block1[32 - 8..].copy_from_slice(&l.to_be_bytes()[..]); 54 | 55 | IMPL.compress256(&mut self.state, &[&block0[..], &block1[..]][..]); 56 | 57 | let mut out = [0u8; 32]; 58 | BE::write_u32_into(&self.state, &mut out); 59 | out 60 | } 61 | 62 | pub fn finish_with(mut self, block0: &[u8]) -> [u8; 32] { 63 | debug_assert_eq!(block0.len(), 32); 64 | 65 | let mut block1 = [0u8; 32]; 66 | 67 | // Append single 1 bit 68 | block1[0] = 0b1000_0000; 69 | 70 | // Write L as 64 big endian integer 71 | let l = self.len + 256; 72 | block1[32 - 8..].copy_from_slice(&l.to_be_bytes()[..]); 73 | 74 | IMPL.compress256(&mut self.state, &[block0, &block1[..]][..]); 75 | 76 | let mut out = [0u8; 32]; 77 | BE::write_u32_into(&self.state, &mut out); 78 | out 79 | } 80 | } 81 | 82 | opaque_debug::implement!(Sha256); 83 | 84 | #[cfg(test)] 85 | mod tests { 86 | use super::*; 87 | 88 | use rand::{RngCore, SeedableRng}; 89 | use rand_xorshift::XorShiftRng; 90 | use sha2::{Digest, Sha256 as Original}; 91 | 92 | #[test] 93 | fn test_fuzz_simple() { 94 | fuzz(10); 95 | } 96 | 97 | #[test] 98 | #[ignore] 99 | fn test_fuzz_long() { 100 | fuzz(1_000); 101 | } 102 | 103 | fn fuzz(n: usize) { 104 | let rng = &mut XorShiftRng::from_seed([ 105 | 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 106 | 0xbc, 0xe5, 107 | ]); 108 | for k in 1..n { 109 | for _ in 0..100 { 110 | let mut input = vec![0u8; 64 * k]; 111 | rng.fill_bytes(&mut input); 112 | let chunked = input.chunks(32).collect::>(); 113 | assert_eq!(&Sha256::digest(&chunked)[..], &Original::digest(&input)[..]) 114 | } 115 | } 116 | 117 | for k in (1..n).step_by(2) { 118 | for _ in 0..100 { 119 | let mut input = vec![0u8; 32 * k]; 120 | rng.fill_bytes(&mut input); 121 | let mut hasher = Sha256::new(); 122 | for chunk in input.chunks(64) { 123 | if chunk.len() == 64 { 124 | hasher.input(&[&chunk[..32], &chunk[32..]]); 125 | } 126 | } 127 | assert_eq!(input.len() % 64, 32); 128 | let hash = hasher.finish_with(&input[input.len() - 32..]); 129 | 130 | assert_eq!( 131 | &hash[..], 132 | &Original::digest(&input)[..], 133 | "input: {:?}", 134 | &input 135 | ); 136 | } 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /srs-inner-product.json: -------------------------------------------------------------------------------- 1 | { 2 | "v28-fil-inner-product-v1.srs": { 3 | "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", 4 | "digest": "ae20310138f5ba81451d723f858e3797", 5 | "sector_size": 0 6 | } 7 | } -------------------------------------------------------------------------------- /storage-proofs-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage-proofs-core" 3 | version = "19.0.0" 4 | authors = ["dignifiedquire "] 5 | description = "Core parts for proofs of storage" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [lib] 12 | bench = false 13 | 14 | [dependencies] 15 | filecoin-hashers = { workspace = true, features = ["sha256", "poseidon"] } 16 | fr32.workspace = true 17 | # Sorted alphabetically 18 | aes = "0.8.1" 19 | anyhow.workspace = true 20 | bellperson.workspace = true 21 | blake2b_simd.workspace = true 22 | blstrs.workspace = true 23 | byteorder.workspace = true 24 | cbc = { version = "0.1.2", features = ["std"] } 25 | config = { version = "0.14", default-features = false, features = ["toml"] } 26 | cpu-time = { workspace = true, optional = true } 27 | ff.workspace = true 28 | fs2 = "0.4" 29 | generic-array.workspace = true 30 | gperftools = { workspace = true, optional = true } 31 | itertools.workspace = true 32 | lazy_static.workspace = true 33 | log.workspace = true 34 | memmap2.workspace = true 35 | merkletree.workspace = true 36 | num_cpus.workspace = true 37 | rand.workspace = true 38 | rand_chacha = "0.3" 39 | rayon.workspace = true 40 | semver = "1.0.6" 41 | serde = { workspace = true, features = ["derive"]} 42 | serde_json.workspace = true 43 | sha2.workspace = true 44 | thiserror.workspace = true 45 | 46 | [dev-dependencies] 47 | sha2raw.workspace = true 48 | filecoin-hashers = { workspace = true, features = ["blake2s", "sha256", "poseidon"] } 49 | # Sorted alphabetically 50 | blake2s_simd.workspace = true 51 | criterion.workspace = true 52 | pretty_assertions.workspace = true 53 | proptest = "1.0.0" 54 | rand_xorshift.workspace = true 55 | tempfile.workspace = true 56 | 57 | [features] 58 | default = ["opencl"] 59 | simd = [] 60 | asm = ["sha2/sha2-asm"] 61 | big-sector-sizes-bench = [] 62 | measurements = ["cpu-time", "gperftools"] 63 | profile = ["measurements"] 64 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 65 | # setting is ignored, no `TemporaryAux` file will be written. 66 | fixed-rows-to-discard = [] 67 | 68 | cuda = ["bellperson/cuda", "filecoin-hashers/cuda"] 69 | cuda-supraseal = ["bellperson/cuda-supraseal", "filecoin-hashers/cuda"] 70 | opencl = ["bellperson/opencl", "filecoin-hashers/opencl"] 71 | 72 | [[bench]] 73 | name = "sha256" 74 | harness = false 75 | 76 | [[bench]] 77 | name = "blake2s" 78 | harness = false 79 | 80 | [[bench]] 81 | name = "drgraph" 82 | harness = false 83 | 84 | [[bench]] 85 | name = "xor" 86 | harness = false 87 | 88 | [[bench]] 89 | name = "merkle" 90 | harness = false 91 | 92 | [[bench]] 93 | name = "misc" 94 | harness = false 95 | -------------------------------------------------------------------------------- /storage-proofs-core/README.md: -------------------------------------------------------------------------------- 1 | # Storage Proofs Core 2 | 3 | ## License 4 | 5 | MIT or Apache 2.0 6 | -------------------------------------------------------------------------------- /storage-proofs-core/benches/blake2s.rs: -------------------------------------------------------------------------------- 1 | use bellperson::{ 2 | gadgets::{ 3 | blake2s::blake2s as blake2s_circuit, 4 | boolean::{AllocatedBit, Boolean}, 5 | }, 6 | groth16::{create_random_proof, generate_random_parameters}, 7 | util_cs::bench_cs::BenchCS, 8 | Circuit, ConstraintSystem, SynthesisError, 9 | }; 10 | use blake2s_simd::blake2s; 11 | use blstrs::{Bls12, Scalar as Fr}; 12 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 13 | use rand::{thread_rng, Rng}; 14 | 15 | struct Blake2sExample<'a> { 16 | data: &'a [Option], 17 | } 18 | 19 | impl Circuit for Blake2sExample<'_> { 20 | fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { 21 | let data: Vec = self 22 | .data 23 | .iter() 24 | .enumerate() 25 | .map(|(i, b)| { 26 | Ok(Boolean::from(AllocatedBit::alloc( 27 | cs.namespace(|| format!("bit {}", i)), 28 | *b, 29 | )?)) 30 | }) 31 | .collect::, SynthesisError>>()?; 32 | 33 | let cs = cs.namespace(|| "blake2s"); 34 | let personalization = vec![0u8; 8]; 35 | let _res = blake2s_circuit(cs, &data, &personalization)?; 36 | Ok(()) 37 | } 38 | } 39 | 40 | fn blake2s_benchmark(c: &mut Criterion) { 41 | let params = vec![32, 64, 10 * 32]; 42 | 43 | let mut group = c.benchmark_group("non-circuit"); 44 | for bytes in params { 45 | group.bench_function(format!("hash-blake2s-{}", bytes), |b| { 46 | let mut rng = thread_rng(); 47 | let data: Vec = (0..bytes).map(|_| rng.gen()).collect(); 48 | 49 | b.iter(|| black_box(blake2s(&data))) 50 | }); 51 | } 52 | 53 | group.finish(); 54 | } 55 | 56 | fn blake2s_circuit_benchmark(c: &mut Criterion) { 57 | let mut rng1 = thread_rng(); 58 | let groth_params = 59 | generate_random_parameters::(Blake2sExample { data: &[None; 256] }, &mut rng1) 60 | .unwrap(); 61 | 62 | let params = vec![32]; 63 | 64 | let mut group = c.benchmark_group("hash-blake2s-circuit"); 65 | for bytes in params { 66 | group.bench_function(format!("create-proof-{}", bytes), |b| { 67 | let mut rng = thread_rng(); 68 | let data: Vec> = (0..bytes * 8).map(|_| Some(rng.gen())).collect(); 69 | 70 | b.iter(|| { 71 | let proof = create_random_proof( 72 | Blake2sExample { 73 | data: data.as_slice(), 74 | }, 75 | &groth_params, 76 | &mut rng, 77 | ) 78 | .unwrap(); 79 | 80 | black_box(proof) 81 | }); 82 | }); 83 | group 84 | .bench_function("synthesize", |b| { 85 | let mut rng = thread_rng(); 86 | let data: Vec> = (0..bytes * 8).map(|_| Some(rng.gen())).collect(); 87 | b.iter(|| { 88 | let mut cs = BenchCS::::new(); 89 | 90 | Blake2sExample { 91 | data: data.as_slice(), 92 | } 93 | .synthesize(&mut cs) 94 | .unwrap(); 95 | 96 | black_box(cs) 97 | }); 98 | }) 99 | .sample_size(20); 100 | } 101 | 102 | group.finish(); 103 | } 104 | 105 | criterion_group!(benches, blake2s_benchmark, blake2s_circuit_benchmark); 106 | criterion_main!(benches); 107 | -------------------------------------------------------------------------------- /storage-proofs-core/benches/drgraph.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | use filecoin_hashers::poseidon::PoseidonHasher; 3 | use storage_proofs_core::{ 4 | api_version::ApiVersion, 5 | drgraph::{BucketGraph, Graph, BASE_DEGREE}, 6 | }; 7 | 8 | #[allow(clippy::unit_arg)] 9 | fn drgraph(c: &mut Criterion) { 10 | let params = vec![12, 24, 128, 1024]; 11 | 12 | let mut group = c.benchmark_group("sample"); 13 | for n in params { 14 | group.bench_function(format!("bucket/m=6-{}", n), |b| { 15 | let graph = 16 | BucketGraph::::new(n, BASE_DEGREE, 0, [32; 32], ApiVersion::V1_1_0) 17 | .unwrap(); 18 | 19 | b.iter(|| { 20 | let mut parents = vec![0; 6]; 21 | black_box(graph.parents(2, &mut parents).unwrap()); 22 | }) 23 | }); 24 | } 25 | 26 | group.finish(); 27 | } 28 | 29 | criterion_group!(benches, drgraph); 30 | criterion_main!(benches); 31 | -------------------------------------------------------------------------------- /storage-proofs-core/benches/merkle.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use filecoin_hashers::{ 4 | poseidon::PoseidonDomain, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, 5 | }; 6 | use rand::{thread_rng, Rng}; 7 | use storage_proofs_core::merkle::{create_base_merkle_tree, BinaryMerkleTree}; 8 | 9 | fn merkle_benchmark_sha256(c: &mut Criterion) { 10 | let params = if cfg!(feature = "big-sector-sizes-bench") { 11 | vec![128, 1024, 1_048_576] 12 | } else { 13 | vec![128, 1024] 14 | }; 15 | 16 | let mut group = c.benchmark_group("merkletree-binary"); 17 | for n_nodes in params { 18 | group.bench_function(format!("sha256-{}", n_nodes), |b| { 19 | let mut rng = thread_rng(); 20 | let data: Vec = (0..32 * n_nodes).map(|_| rng.gen()).collect(); 21 | b.iter(|| { 22 | black_box( 23 | create_base_merkle_tree::>(None, n_nodes, &data) 24 | .unwrap(), 25 | ) 26 | }) 27 | }); 28 | } 29 | 30 | group.finish(); 31 | } 32 | 33 | fn merkle_benchmark_poseidon(c: &mut Criterion) { 34 | let params = if cfg!(feature = "big-sector-sizes-bench") { 35 | vec![64, 128, 1024, 1_048_576] 36 | } else { 37 | vec![64, 128, 1024] 38 | }; 39 | 40 | let mut group = c.benchmark_group("merkletree-binary"); 41 | for n_nodes in params { 42 | group.bench_function(format!("poseidon-{}", n_nodes), |b| { 43 | let mut rng = thread_rng(); 44 | let mut data: Vec = Vec::with_capacity(32 * n_nodes); 45 | (0..n_nodes) 46 | .try_for_each(|_| -> Result<()> { 47 | let node = PoseidonDomain::random(&mut rng); 48 | data.extend(node.into_bytes()); 49 | Ok(()) 50 | }) 51 | .expect("failed to generate data"); 52 | 53 | b.iter(|| { 54 | black_box( 55 | create_base_merkle_tree::>( 56 | None, n_nodes, &data, 57 | ) 58 | .unwrap(), 59 | ) 60 | }) 61 | }); 62 | } 63 | 64 | group.finish(); 65 | } 66 | 67 | criterion_group!(benches, merkle_benchmark_sha256, merkle_benchmark_poseidon); 68 | criterion_main!(benches); 69 | -------------------------------------------------------------------------------- /storage-proofs-core/benches/misc.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Seek, Write}; 2 | 3 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 4 | use rand::{thread_rng, Rng}; 5 | use tempfile::tempfile; 6 | 7 | fn read_bytes_benchmark(c: &mut Criterion) { 8 | let params = vec![32, 64, 512, 1024, 64 * 1024]; 9 | 10 | let mut group = c.benchmark_group("read"); 11 | for bytes in params { 12 | group.bench_function(format!("from_disk-{}", bytes), |b| { 13 | let mut rng = thread_rng(); 14 | let data: Vec = (0..bytes).map(|_| rng.gen()).collect(); 15 | 16 | let mut f = tempfile().unwrap(); 17 | f.write_all(&data).unwrap(); 18 | f.sync_all().unwrap(); 19 | 20 | b.iter(|| { 21 | let mut res = vec![0u8; bytes]; 22 | f.rewind().unwrap(); 23 | f.read_exact(&mut res).unwrap(); 24 | 25 | black_box(res) 26 | }) 27 | }); 28 | } 29 | 30 | group.finish(); 31 | } 32 | 33 | criterion_group!(benches, read_bytes_benchmark); 34 | criterion_main!(benches); 35 | -------------------------------------------------------------------------------- /storage-proofs-core/parameters.json: -------------------------------------------------------------------------------- 1 | ../parameters.json -------------------------------------------------------------------------------- /storage-proofs-core/src/cache_key.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Display, Formatter}; 2 | 3 | pub const LABEL_LAYER_KEY: &str = "layer"; 4 | 5 | #[derive(Debug, Copy, Clone)] 6 | pub enum CacheKey { 7 | PAux, 8 | TAux, 9 | CommDTree, 10 | CommCTree, 11 | CommRLastTree, 12 | } 13 | 14 | impl Display for CacheKey { 15 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 16 | match *self { 17 | CacheKey::PAux => write!(f, "p_aux"), 18 | CacheKey::TAux => write!(f, "t_aux"), 19 | CacheKey::CommDTree => write!(f, "tree-d"), 20 | CacheKey::CommCTree => write!(f, "tree-c"), 21 | CacheKey::CommRLastTree => write!(f, "tree-r-last"), 22 | } 23 | } 24 | } 25 | 26 | impl CacheKey { 27 | pub fn label_layer(layer: usize) -> String { 28 | format!("{}-{}", LABEL_LAYER_KEY, layer) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /storage-proofs-core/src/crypto/aes.rs: -------------------------------------------------------------------------------- 1 | use aes::cipher::block_padding::ZeroPadding; 2 | use aes::cipher::crypto_common::KeyIvInit; 3 | use aes::cipher::{BlockDecryptMut, BlockEncryptMut}; 4 | use anyhow::{ensure, Context}; 5 | 6 | use crate::error::Result; 7 | 8 | const IV: [u8; 16] = [0u8; 16]; 9 | 10 | pub fn encode(key: &[u8], plaintext: &[u8]) -> Result> { 11 | ensure!(key.len() == 32, "invalid key length"); 12 | 13 | let mode = cbc::Encryptor::::new_from_slices(key, &IV).context("invalid key")?; 14 | 15 | Ok(mode.encrypt_padded_vec_mut::(plaintext)) 16 | } 17 | 18 | pub fn decode(key: &[u8], ciphertext: &[u8]) -> Result> { 19 | ensure!(key.len() == 32, "invalid key length"); 20 | 21 | let mode = cbc::Decryptor::::new_from_slices(key, &IV).context("invalid key")?; 22 | 23 | let res = mode 24 | .decrypt_padded_vec_mut::(ciphertext) 25 | .context("failed to decrypt")?; 26 | Ok(res) 27 | } 28 | 29 | #[cfg(test)] 30 | mod tests { 31 | use super::*; 32 | 33 | use rand::{Rng, SeedableRng}; 34 | use rand_xorshift::XorShiftRng; 35 | 36 | use crate::TEST_SEED; 37 | 38 | #[test] 39 | fn test_aes() { 40 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 41 | 42 | for i in 0..10 { 43 | let key: Vec = (0..32).map(|_| rng.gen()).collect(); 44 | let plaintext: Vec = (0..(i + 1) * 32).map(|_| rng.gen()).collect(); 45 | 46 | let ciphertext = 47 | encode(key.as_slice(), plaintext.as_slice()).expect("failed to encode"); 48 | 49 | assert_ne!( 50 | plaintext, ciphertext, 51 | "plaintext and ciphertext are identical" 52 | ); 53 | assert_eq!(plaintext.len(), ciphertext.len()); 54 | 55 | let roundtrip = 56 | decode(key.as_slice(), ciphertext.as_slice()).expect("failed to decode"); 57 | assert_eq!(plaintext, roundtrip, "failed to roundtrip"); 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /storage-proofs-core/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | use sha2::{Digest, Sha256}; 2 | 3 | pub mod aes; 4 | pub mod feistel; 5 | pub mod sloth; 6 | pub mod xor; 7 | 8 | pub struct DomainSeparationTag(&'static str); 9 | 10 | pub const DRSAMPLE_DST: DomainSeparationTag = DomainSeparationTag("Filecoin_DRSample"); 11 | pub const FEISTEL_DST: DomainSeparationTag = DomainSeparationTag("Filecoin_Feistel"); 12 | 13 | pub fn derive_porep_domain_seed( 14 | domain_separation_tag: DomainSeparationTag, 15 | porep_id: [u8; 32], 16 | ) -> [u8; 32] { 17 | Sha256::new() 18 | .chain_update(domain_separation_tag.0) 19 | .chain_update(porep_id) 20 | .finalize() 21 | .into() 22 | } 23 | -------------------------------------------------------------------------------- /storage-proofs-core/src/crypto/sloth.rs: -------------------------------------------------------------------------------- 1 | use blstrs::Scalar as Fr; 2 | 3 | /// Sloth based encoding. 4 | #[inline] 5 | pub fn encode(key: &Fr, plaintext: &Fr) -> Fr { 6 | plaintext + key 7 | } 8 | 9 | /// Sloth based decoding. 10 | #[inline] 11 | pub fn decode(key: &Fr, ciphertext: &Fr) -> Fr { 12 | ciphertext - key 13 | } 14 | 15 | #[cfg(test)] 16 | mod tests { 17 | use super::*; 18 | 19 | use ff::PrimeField; 20 | use proptest::{prop_compose, proptest}; 21 | 22 | // the modulus from `bls12_381::Fr` 23 | // The definition of MODULUS and comment defining r come from blstrs/src/scalar.rs. 24 | // r = 52435875175126190479447740508185965837690552500527637822603658699938581184513 25 | const MODULUS: [u64; 4] = [ 26 | 0xffffffff00000001, 27 | 0x53bda402fffe5bfe, 28 | 0x3339d80809a1d805, 29 | 0x73eda753299d7d48, 30 | ]; 31 | 32 | #[test] 33 | fn sloth_bls_12() { 34 | let key = Fr::from_str_vartime("11111111").expect("from_str failed"); 35 | let plaintext = Fr::from_str_vartime("123456789").expect("from_str failed"); 36 | let ciphertext = encode(&key, &plaintext); 37 | let decrypted = decode(&key, &ciphertext); 38 | assert_eq!(plaintext, decrypted); 39 | assert_ne!(plaintext, ciphertext); 40 | } 41 | 42 | #[test] 43 | fn sloth_bls_12_fake() { 44 | let key = Fr::from_str_vartime("11111111").expect("from_str failed"); 45 | let key_fake = Fr::from_str_vartime("11111112").expect("from_str failed"); 46 | let plaintext = Fr::from_str_vartime("123456789").expect("from_str failed"); 47 | let ciphertext = encode(&key, &plaintext); 48 | let decrypted = decode(&key_fake, &ciphertext); 49 | assert_ne!(plaintext, decrypted); 50 | } 51 | 52 | prop_compose! { 53 | fn arb_fr()(a in 0..MODULUS[0], b in 0..MODULUS[1], c in 0..MODULUS[2], d in 0..MODULUS[3]) -> Fr { 54 | let mut le_bytes = [0u8; 32]; 55 | le_bytes[0..8].copy_from_slice(&a.to_le_bytes()); 56 | le_bytes[8..16].copy_from_slice(&b.to_le_bytes()); 57 | le_bytes[16..24].copy_from_slice(&c.to_le_bytes()); 58 | le_bytes[24..32].copy_from_slice(&d.to_le_bytes()); 59 | Fr::from_repr_vartime(le_bytes).expect("from_repr failed") 60 | } 61 | } 62 | proptest! { 63 | #[test] 64 | fn sloth_bls_roundtrip(key in arb_fr(), plaintext in arb_fr()) { 65 | let ciphertext = encode(&key, &plaintext); 66 | assert_eq!(decode(&key, &ciphertext), plaintext); 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /storage-proofs-core/src/crypto/xor.rs: -------------------------------------------------------------------------------- 1 | use anyhow::ensure; 2 | 3 | use crate::error::Result; 4 | 5 | /// Encodes plaintext by elementwise xoring with the passed in key. 6 | pub fn encode(key: &[u8], plaintext: &[u8]) -> Result> { 7 | xor(key, plaintext) 8 | } 9 | 10 | /// Decodes ciphertext by elementwise xoring with the passed in key. 11 | pub fn decode(key: &[u8], ciphertext: &[u8]) -> Result> { 12 | xor(key, ciphertext) 13 | } 14 | 15 | fn xor(key: &[u8], input: &[u8]) -> Result> { 16 | let key_len = key.len(); 17 | ensure!(key_len == 32, "Key must be 32 bytes."); 18 | 19 | Ok(input 20 | .iter() 21 | .enumerate() 22 | .map(|(i, byte)| byte ^ key[i % key_len]) 23 | .collect()) 24 | } 25 | 26 | #[cfg(test)] 27 | mod tests { 28 | use super::*; 29 | 30 | use rand::{Rng, SeedableRng}; 31 | use rand_xorshift::XorShiftRng; 32 | 33 | use crate::TEST_SEED; 34 | 35 | #[test] 36 | fn test_xor() { 37 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 38 | 39 | for i in 0..10 { 40 | let key: Vec = (0..32).map(|_| rng.gen()).collect(); 41 | let plaintext: Vec = (0..(i + 1) * 32).map(|_| rng.gen()).collect(); 42 | 43 | let ciphertext = 44 | encode(key.as_slice(), plaintext.as_slice()).expect("failed to encode"); 45 | 46 | assert_ne!( 47 | plaintext, ciphertext, 48 | "plaintext and ciphertext are identical" 49 | ); 50 | assert_eq!(plaintext.len(), ciphertext.len()); 51 | 52 | let roundtrip = 53 | decode(key.as_slice(), ciphertext.as_slice()).expect("failed to decode"); 54 | assert_eq!(plaintext, roundtrip, "failed to roundtrip"); 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /storage-proofs-core/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::any::Any; 2 | 3 | pub use anyhow::Result; 4 | 5 | use bellperson::SynthesisError; 6 | 7 | use crate::sector::SectorId; 8 | 9 | /// Custom error types 10 | #[derive(Debug, thiserror::Error)] 11 | pub enum Error { 12 | #[error("Could not create PieceInclusionProof (probably bad piece commitment: comm_p)")] 13 | BadPieceCommitment, 14 | #[error("Out of bounds access {} > {}", _0, _1)] 15 | OutOfBounds(usize, usize), 16 | #[error("mismatch of data, node_size and nodes {} != {} * {}", _0, _1, _2)] 17 | InvalidMerkleTreeArgs(usize, usize, usize), 18 | #[error("{}", _0)] 19 | Synthesis(#[from] SynthesisError), 20 | #[error("{}", _0)] 21 | Io(#[from] std::io::Error), 22 | #[error("tree root and commitment do not match")] 23 | InvalidCommitment, 24 | #[error("malformed input")] 25 | MalformedInput, 26 | #[error("malformed merkle tree")] 27 | MalformedMerkleTree, 28 | #[error("invalid input size")] 29 | InvalidInputSize, 30 | #[error("merkle tree generation error: {}", _0)] 31 | MerkleTreeGenerationError(String), 32 | #[error("Cannot (yet) generate inclusion proof for unaligned piece.")] 33 | UnalignedPiece, 34 | #[error("{}", _0)] 35 | Serde(#[from] serde_json::Error), 36 | #[error("unclassified error: {}", _0)] 37 | Unclassified(String), 38 | #[error("Missing Private Input {0} for sector {1}")] 39 | MissingPrivateInput(&'static str, u64), 40 | #[error("faulty sectors {:?}", _0)] 41 | FaultySectors(Vec), 42 | #[error("Invalid parameters file: {}", _0)] 43 | InvalidParameters(String), 44 | } 45 | 46 | impl From> for Error { 47 | fn from(inner: Box) -> Error { 48 | Error::Unclassified(format!("{:?}", dbg!(inner))) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /storage-proofs-core/src/gadgets/encode.rs: -------------------------------------------------------------------------------- 1 | use bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; 2 | use ff::PrimeField; 3 | 4 | use crate::gadgets::constraint; 5 | 6 | pub fn encode( 7 | mut cs: CS, 8 | key: &AllocatedNum, 9 | value: &AllocatedNum, 10 | ) -> Result, SynthesisError> 11 | where 12 | Scalar: PrimeField, 13 | CS: ConstraintSystem, 14 | { 15 | constraint::add(cs.namespace(|| "encode_add"), key, value) 16 | } 17 | 18 | pub fn decode( 19 | mut cs: CS, 20 | key: &AllocatedNum, 21 | value: &AllocatedNum, 22 | ) -> Result, SynthesisError> 23 | where 24 | Scalar: PrimeField, 25 | CS: ConstraintSystem, 26 | { 27 | constraint::sub(cs.namespace(|| "decode_sub"), value, key) 28 | } 29 | -------------------------------------------------------------------------------- /storage-proofs-core/src/gadgets/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod constraint; 2 | pub mod encode; 3 | pub mod insertion; 4 | pub mod por; 5 | pub mod uint64; 6 | pub mod variables; 7 | pub mod xor; 8 | -------------------------------------------------------------------------------- /storage-proofs-core/src/gadgets/multipack.rs: -------------------------------------------------------------------------------- 1 | use bellperson::gadgets::{ 2 | boolean::Boolean, 3 | num::{AllocatedNum, Num}, 4 | ConstraintSystem, SynthesisError, 5 | }; 6 | use ff::{Field, PrimeField, ScalarEngine}; 7 | 8 | /// Takes a sequence of booleans and exposes them as a single compact Num. 9 | pub fn pack_bits(mut cs: CS, bits: &[Boolean]) -> Result, SynthesisError> 10 | where 11 | E: ScalarEngine, 12 | CS: ConstraintSystem, 13 | { 14 | let mut num = Num::::zero(); 15 | let mut coeff = E::Fr::one(); 16 | for bit in bits.iter().take(E::Fr::CAPACITY as usize) { 17 | num = num.add_bool_with_coeff(CS::one(), &bit, coeff); 18 | 19 | coeff.double(); 20 | } 21 | 22 | let alloc_num = AllocatedNum::alloc(cs.namespace(|| "input"), || { 23 | num.get_value() 24 | .ok_or_else(|| SynthesisError::AssignmentMissing) 25 | })?; 26 | 27 | // num * 1 = input 28 | cs.enforce( 29 | || "packing constraint", 30 | |_| num.lc(E::Fr::one()), 31 | |lc| lc + CS::one(), 32 | |lc| lc + alloc_num.get_variable(), 33 | ); 34 | 35 | Ok(alloc_num) 36 | } 37 | -------------------------------------------------------------------------------- /storage-proofs-core/src/gadgets/variables.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Debug, Formatter}; 2 | 3 | use bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; 4 | use ff::PrimeField; 5 | 6 | use crate::error::Result; 7 | 8 | /// Root represents a root commitment which may be either a raw value or an already-allocated number. 9 | /// This allows subcomponents to depend on roots which may optionally be shared with their parent 10 | /// or sibling components. 11 | #[derive(Clone)] 12 | pub enum Root { 13 | Var(AllocatedNum), 14 | Val(Option), 15 | } 16 | 17 | impl Debug for Root { 18 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 19 | match self { 20 | Root::Var(num) => write!(f, "Root::Var({:?})", num.get_value()), 21 | Root::Val(val) => write!(f, "Root::Val({:?})", val), 22 | } 23 | } 24 | } 25 | 26 | impl Root { 27 | pub fn from_allocated>(allocated: AllocatedNum) -> Self { 28 | Root::Var(allocated) 29 | } 30 | 31 | pub fn allocated>( 32 | &self, 33 | cs: CS, 34 | ) -> Result, SynthesisError> { 35 | match self { 36 | Root::Var(allocated) => Ok(allocated.clone()), 37 | Root::Val(fr) => { 38 | AllocatedNum::alloc(cs, || fr.ok_or_else(|| SynthesisError::AssignmentMissing)) 39 | } 40 | } 41 | } 42 | 43 | pub fn var>(cs: CS, fr: Scalar) -> Result { 44 | Ok(Root::Var(AllocatedNum::alloc(cs, || Ok(fr))?)) 45 | } 46 | 47 | pub fn is_some(&self) -> bool { 48 | match self { 49 | Root::Var(_) => true, 50 | Root::Val(Some(_)) => true, 51 | Root::Val(None) => false, 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /storage-proofs-core/src/gadgets/xor.rs: -------------------------------------------------------------------------------- 1 | use bellperson::{gadgets::boolean::Boolean, ConstraintSystem, SynthesisError}; 2 | use ff::PrimeField; 3 | 4 | pub fn xor( 5 | cs: &mut CS, 6 | key: &[Boolean], 7 | input: &[Boolean], 8 | ) -> Result, SynthesisError> 9 | where 10 | Scalar: PrimeField, 11 | CS: ConstraintSystem, 12 | { 13 | let key_len = key.len(); 14 | assert_eq!(key_len, 32 * 8); 15 | 16 | input 17 | .iter() 18 | .enumerate() 19 | .map(|(i, byte)| { 20 | Boolean::xor( 21 | cs.namespace(|| format!("xor bit: {}", i)), 22 | byte, 23 | &key[i % key_len], 24 | ) 25 | }) 26 | .collect::, SynthesisError>>() 27 | } 28 | 29 | #[cfg(test)] 30 | mod tests { 31 | use super::*; 32 | 33 | use bellperson::util_cs::test_cs::TestConstraintSystem; 34 | use blstrs::Scalar as Fr; 35 | use rand::{Rng, SeedableRng}; 36 | use rand_xorshift::XorShiftRng; 37 | 38 | use crate::{ 39 | crypto::xor, 40 | util::{bits_to_bytes, bytes_into_boolean_vec}, 41 | TEST_SEED, 42 | }; 43 | 44 | #[test] 45 | fn test_xor_input_circuit() { 46 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 47 | 48 | for i in 0..10 { 49 | let mut cs = TestConstraintSystem::::new(); 50 | 51 | let key: Vec = (0..32).map(|_| rng.gen()).collect(); 52 | let data: Vec = (0..(i + 1) * 32).map(|_| rng.gen()).collect(); 53 | 54 | let key_bits: Vec = { 55 | let mut cs = cs.namespace(|| "key"); 56 | bytes_into_boolean_vec(&mut cs, Some(key.as_slice()), key.len()) 57 | .expect("bytes_into_boolean_vec failed") 58 | }; 59 | 60 | let data_bits: Vec = { 61 | let mut cs = cs.namespace(|| "data bits"); 62 | bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()) 63 | .expect("bytes_into_boolean_vec failed") 64 | }; 65 | 66 | let out_bits = 67 | xor(&mut cs, key_bits.as_slice(), data_bits.as_slice()).expect("xor failed"); 68 | 69 | assert!(cs.is_satisfied(), "constraints not satisfied"); 70 | assert_eq!(out_bits.len(), data_bits.len(), "invalid output length"); 71 | 72 | // convert Vec to Vec 73 | let actual = bits_to_bytes( 74 | out_bits 75 | .iter() 76 | .map(|v| v.get_value().expect("get_value failed")) 77 | .collect::>() 78 | .as_slice(), 79 | ); 80 | 81 | let expected = 82 | xor::encode(key.as_slice(), data.as_slice()).expect("xor::encode failed"); 83 | 84 | assert_eq!(expected, actual, "circuit and non circuit do not match"); 85 | 86 | // -- roundtrip 87 | let roundtrip_bits = { 88 | let mut cs = cs.namespace(|| "roundtrip"); 89 | xor(&mut cs, key_bits.as_slice(), out_bits.as_slice()).expect("xor faield") 90 | }; 91 | 92 | let roundtrip = bits_to_bytes( 93 | roundtrip_bits 94 | .iter() 95 | .map(|v| v.get_value().expect("get_value failed")) 96 | .collect::>() 97 | .as_slice(), 98 | ); 99 | 100 | assert_eq!(data, roundtrip, "failed to roundtrip"); 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /storage-proofs-core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] 2 | #![allow(clippy::many_single_char_names)] 3 | #![allow(clippy::unreadable_literal)] 4 | #![allow(clippy::type_repetition_in_bounds)] 5 | #![allow(clippy::upper_case_acronyms)] 6 | #![allow(clippy::redundant_slicing)] 7 | #![allow(clippy::unnecessary_wraps)] 8 | #![warn(clippy::unwrap_used)] 9 | #![warn(clippy::ptr_arg)] 10 | #![warn(clippy::unnecessary_lazy_evaluations)] 11 | 12 | use std::convert::TryInto; 13 | 14 | pub mod api_version; 15 | pub mod cache_key; 16 | pub mod compound_proof; 17 | pub mod crypto; 18 | pub mod data; 19 | pub mod drgraph; 20 | pub mod error; 21 | pub mod gadgets; 22 | pub mod measurements; 23 | pub mod merkle; 24 | pub mod multi_proof; 25 | pub mod parameter_cache; 26 | pub mod partitions; 27 | pub mod pieces; 28 | pub mod por; 29 | pub mod proof; 30 | pub mod sector; 31 | pub mod settings; 32 | pub mod test_helper; 33 | pub mod util; 34 | 35 | pub use data::Data; 36 | 37 | pub const TEST_SEED: [u8; 16] = [ 38 | 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5, 39 | ]; 40 | 41 | pub const MAX_LEGACY_POREP_REGISTERED_PROOF_ID: u64 = 4; 42 | 43 | pub type PoRepID = [u8; 32]; 44 | 45 | pub fn is_legacy_porep_id(porep_id: PoRepID) -> bool { 46 | // NOTE: Because we take only the first 8 bytes, we are actually examining the registered proof type id, 47 | // not the porep_id. The latter requires the full 32 bytes and includes the nonce. 48 | // We are, to some extent depending explictly on the strucuture of the `porep_id`. 49 | // Of course, it happens to be the case that only the 'legacy' ids in question can ever satisfy 50 | // this predicate, so the distinction is somewhat moot. However, for the sake of clarity in any future 51 | // use of `porep_id`, we should pay close attention to this. 52 | let id = u64::from_le_bytes( 53 | porep_id[..8] 54 | .try_into() 55 | .expect("8 bytes is always a valid u64"), 56 | ); 57 | id <= MAX_LEGACY_POREP_REGISTERED_PROOF_ID 58 | } 59 | -------------------------------------------------------------------------------- /storage-proofs-core/src/measurements.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "measurements")] 2 | use std::sync::{ 3 | mpsc::{channel, Receiver, Sender}, 4 | Mutex, 5 | }; 6 | use std::time::Duration; 7 | 8 | #[cfg(feature = "measurements")] 9 | use lazy_static::lazy_static; 10 | use serde::Serialize; 11 | 12 | #[cfg(feature = "measurements")] 13 | lazy_static! { 14 | pub static ref OP_MEASUREMENTS: ( 15 | Mutex>>, 16 | Mutex> 17 | ) = { 18 | // create asynchronous channel with unlimited buffer 19 | let (tx, rx) = channel(); 20 | (Mutex::new(Some(tx)), Mutex::new(rx)) 21 | }; 22 | } 23 | 24 | #[derive(Debug, Serialize)] 25 | #[serde(rename_all = "kebab-case")] 26 | pub struct OpMeasurement { 27 | pub op: Operation, 28 | pub cpu_time: Duration, 29 | pub wall_time: Duration, 30 | } 31 | 32 | #[derive(Debug, Serialize)] 33 | #[serde(rename_all = "kebab-case")] 34 | pub enum Operation { 35 | AddPiece, 36 | GeneratePieceCommitment, 37 | GenerateTreeC, 38 | GenerateTreeRLast, 39 | CommD, 40 | EncodeWindowTimeAll, 41 | WindowCommLeavesTime, 42 | PorepCommitTime, 43 | PostInclusionProofs, 44 | PostFinalizeTicket, 45 | PostReadChallengedRange, 46 | PostPartialTicketHash, 47 | } 48 | 49 | #[cfg(feature = "measurements")] 50 | pub fn measure_op(op: Operation, f: F) -> T 51 | where 52 | F: FnOnce() -> T, 53 | { 54 | use std::time::Instant; 55 | 56 | let cpu_time_start = cpu_time::ProcessTime::now(); 57 | let wall_start_time = Instant::now(); 58 | 59 | #[cfg(feature = "profile")] 60 | gperftools::profiler::PROFILER 61 | .lock() 62 | .unwrap() 63 | .start(format!("./{:?}.profile", op)) 64 | .unwrap(); 65 | let x = f(); 66 | #[cfg(feature = "profile")] 67 | gperftools::profiler::PROFILER 68 | .lock() 69 | .unwrap() 70 | .stop() 71 | .unwrap(); 72 | 73 | let opt_tx = OP_MEASUREMENTS 74 | .0 75 | .lock() 76 | .expect("acquire lock on tx side of perf channel"); 77 | 78 | if let Some(tx) = opt_tx.as_ref() { 79 | tx.clone() 80 | .send(OpMeasurement { 81 | op, 82 | cpu_time: cpu_time_start.elapsed(), 83 | wall_time: wall_start_time.elapsed(), 84 | }) 85 | .expect("failed to send to perf channel"); 86 | } 87 | 88 | x 89 | } 90 | 91 | #[cfg(not(feature = "measurements"))] 92 | pub fn measure_op(_: Operation, f: F) -> T 93 | where 94 | F: FnOnce() -> T, 95 | { 96 | f() 97 | } 98 | -------------------------------------------------------------------------------- /storage-proofs-core/src/merkle/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::len_without_is_empty)] 2 | 3 | use std::fs::File; 4 | 5 | pub use filecoin_hashers::Hasher; 6 | pub use merkletree::store::{DiskStore, ExternalReader, Store}; 7 | 8 | use generic_array::typenum::{U0, U2}; 9 | use merkletree::store::LevelCacheStore; 10 | 11 | mod builders; 12 | mod proof; 13 | mod tree; 14 | 15 | pub use builders::*; 16 | pub use proof::*; 17 | pub use tree::*; 18 | 19 | /// A tree that is fully persisted to disk. 20 | /// 21 | /// It's generic over the hash function `H`, the base arity `U`, sub-tree arity `V` and top-tree 22 | /// arity `W`. 23 | /// 24 | /// The base arity is used for for all levels up to the top. Non zero arties of the top-tree and/or 25 | /// sub-tree each add another layer on top. So a tree with e.g. `U = 8`, `V = 4`, `W = 2` would 26 | /// create a tree where the top level has two children, the levels below 4 children and all other 27 | /// levels below have 8 children. 28 | pub type DiskTree = MerkleTreeWrapper::Domain>, U, V, W>; 29 | 30 | /// A tree that is partially stored on disk, some levels are in memory. 31 | /// 32 | /// It's generic over the hash function `H`, the base arity `U`, sub-tree arity `V` and top tree 33 | /// arity `W`. 34 | /// 35 | /// The base arity is used for for all levels up to the top. Non zero arties of the top-tree and/or 36 | /// sub-tree each add another layer on top. So a tree with e.g. `U = 8`, `V = 4`, `W = 2` would 37 | /// create a tree where the top level has two children, the levels below 4 children and all other 38 | /// levels below have 8 children. 39 | pub type LCTree = 40 | MerkleTreeWrapper::Domain, File>, U, V, W>; 41 | 42 | /// A binary merkle tree, where all levels have arity 2. It's fully persisted to disk. 43 | pub type BinaryMerkleTree = DiskTree; 44 | -------------------------------------------------------------------------------- /storage-proofs-core/src/multi_proof.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Write}; 2 | 3 | use anyhow::{ensure, Context}; 4 | use bellperson::groth16::{self, PreparedVerifyingKey}; 5 | use blstrs::Bls12; 6 | 7 | use crate::error::Result; 8 | 9 | pub struct MultiProof<'a> { 10 | pub circuit_proofs: Vec>, 11 | pub verifying_key: &'a PreparedVerifyingKey, 12 | } 13 | 14 | const GROTH_PROOF_SIZE: usize = 192; 15 | 16 | impl<'a> MultiProof<'a> { 17 | pub fn new( 18 | groth_proofs: Vec>, 19 | verifying_key: &'a PreparedVerifyingKey, 20 | ) -> Self { 21 | MultiProof { 22 | circuit_proofs: groth_proofs, 23 | verifying_key, 24 | } 25 | } 26 | 27 | pub fn new_from_reader( 28 | partitions: Option, 29 | mut reader: R, 30 | verifying_key: &'a PreparedVerifyingKey, 31 | ) -> Result { 32 | let num_proofs = partitions.unwrap_or(1); 33 | 34 | let mut proof_vec: Vec = Vec::with_capacity(num_proofs * GROTH_PROOF_SIZE); 35 | reader.read_to_end(&mut proof_vec)?; 36 | 37 | Self::new_from_bytes(partitions, &proof_vec, verifying_key) 38 | } 39 | 40 | // Parallelizing reduces deserialization time for 10 proofs from 13ms to 2ms. 41 | pub fn new_from_bytes( 42 | partitions: Option, 43 | proof_bytes: &[u8], 44 | verifying_key: &'a PreparedVerifyingKey, 45 | ) -> Result { 46 | let num_proofs = partitions.unwrap_or(1); 47 | 48 | let proofs = groth16::Proof::read_many(proof_bytes, num_proofs)?; 49 | 50 | ensure!( 51 | num_proofs == proofs.len(), 52 | "expected {} proofs but found only {}", 53 | num_proofs, 54 | proofs.len() 55 | ); 56 | 57 | Ok(Self::new(proofs, verifying_key)) 58 | } 59 | 60 | pub fn write(&self, mut writer: W) -> Result<()> { 61 | for proof in &self.circuit_proofs { 62 | proof.write(&mut writer)? 63 | } 64 | Ok(()) 65 | } 66 | 67 | pub fn to_vec(&self) -> Result> { 68 | let mut out = Vec::new(); 69 | self.write(&mut out).context("known allocation target")?; 70 | Ok(out) 71 | } 72 | 73 | pub fn len(&self) -> usize { 74 | self.circuit_proofs.len() 75 | } 76 | 77 | pub fn is_empty(&self) -> bool { 78 | self.circuit_proofs.is_empty() 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /storage-proofs-core/src/partitions.rs: -------------------------------------------------------------------------------- 1 | pub type Partitions = Option; 2 | 3 | pub fn partition_count(partitions: Partitions) -> usize { 4 | match partitions { 5 | None => 1, 6 | Some(0) => panic!("cannot specify zero partitions"), 7 | Some(k) => k, 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /storage-proofs-core/src/proof.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use log::info; 4 | use serde::{de::DeserializeOwned, Serialize}; 5 | 6 | use crate::error::Result; 7 | 8 | /// The ProofScheme trait provides the methods that any proof scheme needs to implement. 9 | pub trait ProofScheme<'a> { 10 | type PublicParams: Clone; 11 | type SetupParams: Clone; 12 | type PublicInputs: Clone + Serialize + DeserializeOwned; 13 | type PrivateInputs; 14 | type Proof: Clone + Serialize + DeserializeOwned; 15 | type Requirements: Default; 16 | 17 | /// setup is used to generate public parameters from setup parameters in order to specialize 18 | /// a ProofScheme to the specific parameters required by a consumer. 19 | fn setup(_: &Self::SetupParams) -> Result; 20 | 21 | /// prove generates and returns a proof from public parameters, public inputs, and private inputs. 22 | fn prove( 23 | _: &Self::PublicParams, 24 | _: &Self::PublicInputs, 25 | _: &Self::PrivateInputs, 26 | ) -> Result; 27 | 28 | fn prove_all_partitions( 29 | pub_params: &Self::PublicParams, 30 | pub_in: &Self::PublicInputs, 31 | priv_in: &Self::PrivateInputs, 32 | partition_count: usize, 33 | ) -> Result> { 34 | info!("groth_proof_count: {}", partition_count); 35 | info!("generating {} groth proofs.", partition_count); 36 | 37 | let start = Instant::now(); 38 | 39 | let result = (0..partition_count) 40 | .map(|k| { 41 | info!("generating groth proof {}.", k); 42 | let start = Instant::now(); 43 | 44 | let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k)); 45 | let proof = Self::prove(pub_params, &partition_pub_in, priv_in); 46 | 47 | let proof_time = start.elapsed(); 48 | info!("groth_proof_time: {:?}", proof_time); 49 | 50 | proof 51 | }) 52 | .collect::>>(); 53 | 54 | let total_proof_time = start.elapsed(); 55 | info!("total_groth_proof_time: {:?}", total_proof_time); 56 | 57 | result 58 | } 59 | 60 | /// verify returns true if the supplied proof is valid for the given public parameter and public inputs. 61 | /// Note that verify does not have access to private inputs. 62 | /// Remember that proof is untrusted, and any data it provides MUST be validated as corresponding 63 | /// to the supplied public parameters and inputs. 64 | fn verify( 65 | _pub_params: &Self::PublicParams, 66 | _pub_inputs: &Self::PublicInputs, 67 | _proof: &Self::Proof, 68 | ) -> Result { 69 | unimplemented!(); 70 | } 71 | 72 | fn verify_all_partitions( 73 | pub_params: &Self::PublicParams, 74 | pub_in: &Self::PublicInputs, 75 | proofs: &[Self::Proof], 76 | ) -> Result { 77 | for (k, proof) in proofs.iter().enumerate() { 78 | let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k)); // 79 | 80 | if !Self::verify(pub_params, &partition_pub_in, proof)? { 81 | return Ok(false); 82 | } 83 | } 84 | 85 | Ok(true) 86 | } 87 | 88 | // This method must be specialized by concrete ProofScheme implementations which use partitions. 89 | fn with_partition(pub_in: Self::PublicInputs, _k: Option) -> Self::PublicInputs { 90 | pub_in 91 | } 92 | 93 | fn satisfies_requirements( 94 | _pub_params: &Self::PublicParams, 95 | _requirements: &Self::Requirements, 96 | _partitions: usize, 97 | ) -> bool { 98 | true 99 | } 100 | } 101 | 102 | #[derive(Default)] 103 | pub struct NoRequirements; 104 | -------------------------------------------------------------------------------- /storage-proofs-core/src/sector.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeSet; 2 | use std::fmt::{self, Display, Formatter}; 3 | 4 | use blstrs::Scalar as Fr; 5 | use byteorder::{ByteOrder, LittleEndian}; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | /// An ordered set of `SectorId`s. 9 | pub type OrderedSectorSet = BTreeSet; 10 | 11 | /// Identifier for a single sector. 12 | #[derive( 13 | Default, Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize, 14 | )] 15 | pub struct SectorId(u64); 16 | 17 | impl From for SectorId { 18 | fn from(n: u64) -> Self { 19 | SectorId(n) 20 | } 21 | } 22 | 23 | impl From for u64 { 24 | fn from(n: SectorId) -> Self { 25 | n.0 26 | } 27 | } 28 | 29 | impl From for Fr { 30 | fn from(n: SectorId) -> Self { 31 | Fr::from(n.0) 32 | } 33 | } 34 | 35 | impl Display for SectorId { 36 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 37 | write!(f, "SectorId({})", self.0) 38 | } 39 | } 40 | 41 | impl SectorId { 42 | pub fn as_fr_safe(self) -> [u8; 32] { 43 | let mut buf: [u8; 32] = [0; 32]; 44 | LittleEndian::write_u64(&mut buf[0..8], self.0); 45 | buf 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /storage-proofs-core/src/test_helper.rs: -------------------------------------------------------------------------------- 1 | use std::fs::OpenOptions; 2 | use std::io::Write; 3 | use std::path::Path; 4 | 5 | use memmap2::{MmapMut, MmapOptions}; 6 | 7 | pub fn setup_replica(data: &[u8], replica_path: &Path) -> MmapMut { 8 | let mut f = OpenOptions::new() 9 | .read(true) 10 | .write(true) 11 | .create(true) 12 | .truncate(true) 13 | .open(replica_path) 14 | .expect("Failed to create replica"); 15 | f.write_all(data).expect("Failed to write data to replica"); 16 | 17 | unsafe { 18 | MmapOptions::new() 19 | .map_mut(&f) 20 | .expect("Failed to back memory map with tempfile") 21 | } 22 | } 23 | 24 | #[macro_export] 25 | macro_rules! table_tests { 26 | ($property_test_func:ident { 27 | $( $(#[$attr:meta])* $test_name:ident( $( $param:expr ),* ); )+ 28 | }) => { 29 | $( 30 | $(#[$attr])* 31 | #[test] 32 | fn $test_name() { 33 | $property_test_func($( $param ),* ) 34 | } 35 | )+ 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /storage-proofs-core/srs-inner-product.json: -------------------------------------------------------------------------------- 1 | ../srs-inner-product.json -------------------------------------------------------------------------------- /storage-proofs-porep/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage-proofs-porep" 3 | version = "19.0.0" 4 | authors = ["dignifiedquire "] 5 | description = "Proofs of replication" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | filecoin-hashers = { workspace = true, features = ["poseidon", "sha256"] } 13 | fr32.workspace = true 14 | sha2raw.workspace = true 15 | storage-proofs-core.workspace = true 16 | # Sorted alphabetically 17 | anyhow.workspace = true 18 | bellperson.workspace = true 19 | bincode.workspace = true 20 | blake2b_simd.workspace = true 21 | blstrs.workspace = true 22 | byte-slice-cast.workspace = true 23 | byteorder.workspace = true 24 | chacha20 = "0.9.0" 25 | crossbeam = "0.8" 26 | fdlimit.workspace = true 27 | ff.workspace = true 28 | generic-array.workspace = true 29 | glob = "0.3.0" 30 | hex.workspace = true 31 | hwloc = { version = "2.2.0", optional = true, package = "hwloc2" } 32 | lazy_static.workspace = true 33 | libc = "0.2" 34 | log.workspace = true 35 | memmap2.workspace = true 36 | merkletree.workspace = true 37 | neptune.workspace = true 38 | num-bigint = "0.4.3" 39 | num-traits = "0.2" 40 | num_cpus.workspace = true 41 | pretty_assertions.workspace = true 42 | rayon.workspace = true 43 | serde = { workspace = true, features = ["derive"] } 44 | serde_json.workspace = true 45 | yastl = "0.1.2" 46 | 47 | [build-dependencies] 48 | rustversion = "1.0" 49 | 50 | [target."cfg(target_arch = \"aarch64\")".dependencies] 51 | sha2 = { workspace = true, features = ["compress", "asm"] } 52 | [target."cfg(not(target_arch = \"aarch64\"))".dependencies] 53 | sha2 = { workspace = true, features = ["compress"] } 54 | 55 | [dev-dependencies] 56 | filecoin-hashers = { workspace = true, features = [ 57 | "poseidon", 58 | "sha256", 59 | "blake2s", 60 | ] } 61 | # Sorted alphabetically 62 | criterion.workspace = true 63 | fil_logger.workspace = true 64 | rand.workspace = true 65 | rand_xorshift.workspace = true 66 | tempfile.workspace = true 67 | 68 | [features] 69 | default = ["opencl", "multicore-sdr"] 70 | cuda = [ 71 | "storage-proofs-core/cuda", 72 | "filecoin-hashers/cuda", 73 | "neptune/cuda", 74 | "bellperson/cuda", 75 | ] 76 | opencl = [ 77 | "storage-proofs-core/opencl", 78 | "filecoin-hashers/opencl", 79 | "neptune/opencl", 80 | "bellperson/opencl", 81 | ] 82 | isolated-testing = [] 83 | multicore-sdr = ["hwloc"] 84 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 85 | # setting is ignored, no `TemporaryAux` file will be written. 86 | fixed-rows-to-discard = ["storage-proofs-core/fixed-rows-to-discard"] 87 | cpu-profile = [] 88 | 89 | [[bench]] 90 | name = "encode" 91 | harness = false 92 | 93 | [[bench]] 94 | name = "parents" 95 | harness = false 96 | -------------------------------------------------------------------------------- /storage-proofs-porep/README.md: -------------------------------------------------------------------------------- 1 | # Storage Proofs PoRep 2 | 3 | ## License 4 | 5 | MIT or Apache 2.0 6 | -------------------------------------------------------------------------------- /storage-proofs-porep/benches/encode.rs: -------------------------------------------------------------------------------- 1 | use blstrs::Scalar as Fr; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; 3 | use ff::Field; 4 | use filecoin_hashers::{sha256::Sha256Hasher, Domain, Hasher}; 5 | use fr32::fr_into_bytes; 6 | use rand::thread_rng; 7 | use storage_proofs_core::api_version::ApiVersion; 8 | use storage_proofs_porep::stacked::{ 9 | create_label::single::{create_label, create_label_exp}, 10 | StackedBucketGraph, 11 | }; 12 | 13 | struct Pregenerated { 14 | data: Vec, 15 | replica_id: H::Domain, 16 | graph: StackedBucketGraph, 17 | } 18 | 19 | fn pregenerate_data(degree: usize) -> Pregenerated { 20 | assert_eq!(degree, 6 + 8); 21 | let mut rng = thread_rng(); 22 | let size = degree * 4 * 1024 * 1024; 23 | let api_version = ApiVersion::V1_0_0; 24 | let data: Vec = (0..size) 25 | .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng))) 26 | .collect(); 27 | let replica_id: H::Domain = H::Domain::random(&mut rng); 28 | 29 | let graph = StackedBucketGraph::::new_stacked(size, 6, 8, [32; 32], api_version).unwrap(); 30 | 31 | Pregenerated { 32 | data, 33 | replica_id, 34 | graph, 35 | } 36 | } 37 | 38 | fn kdf_benchmark(c: &mut Criterion) { 39 | let degree = 14; 40 | let Pregenerated { 41 | data, 42 | replica_id, 43 | graph, 44 | } = pregenerate_data::(degree); 45 | 46 | let mut group = c.benchmark_group("kdf"); 47 | group.sample_size(10); 48 | group.throughput(Throughput::Bytes( 49 | /* replica id + 37 parents + node id */ 39 * 32, 50 | )); 51 | 52 | group.bench_function("exp", |b| { 53 | let mut raw_data = data.clone(); 54 | raw_data.extend_from_slice(&data); 55 | let (data, exp_data) = raw_data.split_at_mut(data.len()); 56 | 57 | let graph = &graph; 58 | 59 | b.iter(|| { 60 | black_box(create_label_exp( 61 | graph, None, replica_id, &*exp_data, data, 1, 2, 62 | )) 63 | }) 64 | }); 65 | 66 | group.bench_function("non-exp", |b| { 67 | let mut data = data.clone(); 68 | let graph = &graph; 69 | 70 | b.iter(|| black_box(create_label(graph, None, replica_id, &mut data, 1, 2))) 71 | }); 72 | 73 | group.finish(); 74 | } 75 | 76 | criterion_group!(benches, kdf_benchmark); 77 | criterion_main!(benches); 78 | -------------------------------------------------------------------------------- /storage-proofs-porep/benches/parents.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | use filecoin_hashers::{blake2s::Blake2sHasher, sha256::Sha256Hasher, Hasher}; 3 | #[cfg(feature = "cpu-profile")] 4 | use gperftools::profiler::PROFILER; 5 | use storage_proofs_core::{ 6 | api_version::ApiVersion, 7 | drgraph::{Graph, BASE_DEGREE}, 8 | }; 9 | use storage_proofs_porep::stacked::{StackedBucketGraph, EXP_DEGREE}; 10 | 11 | #[cfg(feature = "cpu-profile")] 12 | #[inline(always)] 13 | fn start_profile(stage: &str) { 14 | PROFILER 15 | .lock() 16 | .unwrap() 17 | .start(format!("./{}.profile", stage)) 18 | .unwrap(); 19 | } 20 | 21 | #[cfg(not(feature = "cpu-profile"))] 22 | #[inline(always)] 23 | fn start_profile(_stage: &str) {} 24 | 25 | #[cfg(feature = "cpu-profile")] 26 | #[inline(always)] 27 | fn stop_profile() { 28 | PROFILER.lock().unwrap().stop().unwrap(); 29 | } 30 | 31 | #[cfg(not(feature = "cpu-profile"))] 32 | #[inline(always)] 33 | fn stop_profile() {} 34 | 35 | fn pregenerate_graph(size: usize, api_version: ApiVersion) -> StackedBucketGraph { 36 | StackedBucketGraph::::new_stacked(size, BASE_DEGREE, EXP_DEGREE, [32; 32], api_version) 37 | .unwrap() 38 | } 39 | 40 | fn parents_loop>(graph: &G, parents: &mut [u32]) { 41 | (0..graph.size()) 42 | .map(|node| graph.parents(node, parents).unwrap()) 43 | .collect() 44 | } 45 | 46 | #[allow(clippy::unit_arg)] 47 | fn parents_loop_benchmark(c: &mut Criterion) { 48 | let sizes = vec![10, 50, 1000]; 49 | 50 | let mut group = c.benchmark_group("parents in a loop"); 51 | for size in sizes { 52 | group.bench_function(format!("Blake2s-{}", size), |b| { 53 | let graph = pregenerate_graph::(size, ApiVersion::V1_1_0); 54 | let mut parents = vec![0; graph.degree()]; 55 | start_profile(&format!("parents-blake2s-{}", size)); 56 | b.iter(|| black_box(parents_loop::(&graph, &mut parents))); 57 | stop_profile(); 58 | }); 59 | group.bench_function(format!("Sha256-{}", size), |b| { 60 | let graph = pregenerate_graph::(size, ApiVersion::V1_1_0); 61 | let mut parents = vec![0; graph.degree()]; 62 | b.iter(|| black_box(parents_loop::(&graph, &mut parents))) 63 | }); 64 | } 65 | 66 | group.finish(); 67 | } 68 | 69 | criterion_group!(benches, parents_loop_benchmark); 70 | criterion_main!(benches); 71 | -------------------------------------------------------------------------------- /storage-proofs-porep/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("cargo::rustc-check-cfg=cfg(nightly)"); 3 | cfg_if_nightly() 4 | } 5 | 6 | #[rustversion::nightly] 7 | fn cfg_if_nightly() { 8 | println!("cargo:rustc-cfg=nightly"); 9 | } 10 | 11 | #[rustversion::not(nightly)] 12 | fn cfg_if_nightly() {} 13 | -------------------------------------------------------------------------------- /storage-proofs-porep/parent_cache.json: -------------------------------------------------------------------------------- 1 | { 2 | "v28-sdr-parent-21981246c370f9d76c7a77ab273d94bde0ceb4e938292334960bce05585dc117": { 3 | "sector_size": 34359738368, 4 | "digest": "93deeac5e3052b6927467d4b2641bb782f05491de18d510147c93eeedd8672da" 5 | }, 6 | "v28-sdr-parent-2aa9c77c3e58259481351cc4be2079cc71e1c9af39700866545c043bfa30fb42": { 7 | "sector_size": 536870912, 8 | "digest": "3adcc092423aa76d6a7184016893406da44dd974b219a89cd3ece25e4e3018f5" 9 | }, 10 | "v28-sdr-parent-3f0eef38bb48af1f48ad65e14eb85b4ebfc167cec18cd81764f6d998836c9899": { 11 | "sector_size": 2048, 12 | "digest": "3da49221e2ed55371b86d0bf3d6526fcf128af61bed904f966428db1b531750d" 13 | }, 14 | "v28-sdr-parent-4905486b7af19558ac3649bc6261411858b6add534438878c4ee3b29d8b9de0b": { 15 | "sector_size": 68719476736, 16 | "digest": "2698b74eb2606b55b98d8b095e18b6320b47f46e00075956d48640ccd1641b03" 17 | }, 18 | "v28-sdr-parent-494d91dc80f2df5272c4b9e129bc7ade9405225993af9fe34e6542a39a47554b": { 19 | "sector_size": 2048, 20 | "digest": "840057702eea7652cf97e04306c30fe57174714d90de156a25eddd6075c25b97" 21 | }, 22 | "v28-sdr-parent-5eed212119fd91aa6220a27f31a8966444a9381842bceb3a1ea61525bd47a5b5": { 23 | "sector_size": 8388608, 24 | "digest": "03cd13565ded97c240a5f52e54295ad127bd0461b57904cb3a4d79b097bbecab" 25 | }, 26 | "v28-sdr-parent-7ba215a1d2345774ab90b8cb1158d296e409d6068819d7b8c7baf0b25d63dc34": { 27 | "sector_size": 536870912, 28 | "digest": "b5877d1963793efebc261fd8fde4dd6bc59e6b5c7abf52617a4ee023b8dc173a" 29 | }, 30 | "v28-sdr-parent-8a99e8d6b6be7ab87a56b632e6739ff201c23ea14e99737c74690f0e265574d6": { 31 | "sector_size": 68719476736, 32 | "digest": "2778a732ad46a7dc18e0564dfdf59fd321dcde74ab476fd6d3c4e6735d7cd89c" 33 | }, 34 | "v28-sdr-parent-dcdabb0fbe4364bf0ac28b6a18c66de246409fa1a9020a00f33fb3e3053da6dc": { 35 | "sector_size": 8388608, 36 | "digest": "a76604f2f59f2744c7151653bbb1d8596b6b57d295e6fa6c1f0c41d725b502ce" 37 | }, 38 | "v28-sdr-parent-e1fa5d5b811ddbd118be3412c4a8c329156b8b8acc72632bca459455b5a05a13": { 39 | "sector_size": 34359738368, 40 | "digest": "3c4f9841fcc75aed8c695800e58d08480629f25af3a2aefd81904181d75cc0b6" 41 | } 42 | } -------------------------------------------------------------------------------- /storage-proofs-porep/src/encode.rs: -------------------------------------------------------------------------------- 1 | use blstrs::Scalar as Fr; 2 | use filecoin_hashers::Domain; 3 | 4 | pub fn encode(key: T, value: T) -> T { 5 | let value: Fr = value.into(); 6 | let mut result: Fr = key.into(); 7 | 8 | encode_fr(&mut result, value); 9 | result.into() 10 | } 11 | 12 | pub fn encode_fr(key: &mut Fr, value: Fr) { 13 | *key += value; 14 | } 15 | 16 | pub fn decode(key: T, value: T) -> T { 17 | let mut result: Fr = value.into(); 18 | let key: Fr = key.into(); 19 | 20 | result -= key; 21 | result.into() 22 | } 23 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] 2 | #![warn(clippy::unwrap_used)] 3 | #![cfg_attr(all(target_arch = "aarch64", nightly), feature(stdsimd))] 4 | #![warn(clippy::unnecessary_wraps)] 5 | 6 | pub mod stacked; 7 | 8 | mod encode; 9 | 10 | pub const MAX_LEGACY_POREP_REGISTERED_PROOF_ID: u64 = 4; 11 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/circuit/column.rs: -------------------------------------------------------------------------------- 1 | use bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; 2 | use blstrs::Scalar as Fr; 3 | use filecoin_hashers::Hasher; 4 | use storage_proofs_core::merkle::MerkleTreeTrait; 5 | 6 | use crate::stacked::{circuit::hash::hash_single_column, Column as VanillaColumn, PublicParams}; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct Column { 10 | rows: Vec>, 11 | } 12 | 13 | #[derive(Clone)] 14 | pub struct AllocatedColumn { 15 | rows: Vec>, 16 | } 17 | 18 | impl From> for Column { 19 | fn from(other: VanillaColumn) -> Self { 20 | let VanillaColumn { rows, .. } = other; 21 | 22 | Column { 23 | rows: rows.into_iter().map(|r| Some(r.into())).collect(), 24 | } 25 | } 26 | } 27 | 28 | impl Column { 29 | /// Create an empty `Column`, used in `blank_circuit`s. 30 | pub fn empty(params: &PublicParams) -> Self { 31 | Column { 32 | rows: vec![None; params.num_layers], 33 | } 34 | } 35 | 36 | /// Consume this column, and allocate its values in the circuit. 37 | pub fn alloc>( 38 | self, 39 | mut cs: CS, 40 | ) -> Result { 41 | let Self { rows } = self; 42 | 43 | let rows = rows 44 | .into_iter() 45 | .enumerate() 46 | .map(|(i, val)| { 47 | AllocatedNum::alloc(cs.namespace(|| format!("column_num_row_{}", i)), || { 48 | val.ok_or(SynthesisError::AssignmentMissing) 49 | }) 50 | }) 51 | .collect::, _>>()?; 52 | 53 | Ok(AllocatedColumn { rows }) 54 | } 55 | 56 | pub(crate) fn len(&self) -> usize { 57 | self.rows.len() 58 | } 59 | } 60 | 61 | impl AllocatedColumn { 62 | /// Creates the column hash of this column. 63 | pub fn hash>( 64 | &self, 65 | cs: CS, 66 | ) -> Result, SynthesisError> { 67 | hash_single_column(cs, &self.rows) 68 | } 69 | 70 | pub fn get_value(&self, layer: usize) -> &AllocatedNum { 71 | assert!(layer > 0, "layers are 1 indexed"); 72 | assert!( 73 | layer <= self.rows.len(), 74 | "layer {} out of range: 1..={}", 75 | layer, 76 | self.rows.len() 77 | ); 78 | &self.rows[layer - 1] 79 | } 80 | 81 | pub(crate) fn len(&self) -> usize { 82 | self.rows.len() 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/circuit/column_proof.rs: -------------------------------------------------------------------------------- 1 | use bellperson::{ConstraintSystem, SynthesisError}; 2 | use blstrs::Scalar as Fr; 3 | use filecoin_hashers::{Hasher, PoseidonArity}; 4 | use storage_proofs_core::{ 5 | drgraph::Graph, 6 | gadgets::por::AuthPath, 7 | merkle::{MerkleProofTrait, MerkleTreeTrait, Store}, 8 | }; 9 | 10 | use crate::stacked::{ 11 | circuit::column::{AllocatedColumn, Column}, 12 | vanilla::{ColumnProof as VanillaColumnProof, PublicParams}, 13 | }; 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct ColumnProof< 17 | H: Hasher, 18 | U: 'static + PoseidonArity, 19 | V: 'static + PoseidonArity, 20 | W: 'static + PoseidonArity, 21 | > { 22 | column: Column, 23 | inclusion_path: AuthPath, 24 | } 25 | 26 | impl< 27 | H: 'static + Hasher, 28 | U: 'static + PoseidonArity, 29 | V: 'static + PoseidonArity, 30 | W: 'static + PoseidonArity, 31 | > ColumnProof 32 | { 33 | /// Create an empty `ColumnProof`, used in `blank_circuit`s. 34 | pub fn empty< 35 | S: Store, 36 | Tree: MerkleTreeTrait, 37 | >( 38 | params: &PublicParams, 39 | ) -> Self { 40 | ColumnProof { 41 | column: Column::empty(params), 42 | inclusion_path: AuthPath::blank(params.graph.size()), 43 | } 44 | } 45 | 46 | /// Allocate the private inputs for this column proof, and return the inclusion path for verification. 47 | pub fn alloc>( 48 | self, 49 | mut cs: CS, 50 | ) -> Result<(AllocatedColumn, AuthPath), SynthesisError> { 51 | let ColumnProof { 52 | inclusion_path, 53 | column, 54 | } = self; 55 | 56 | let column = column.alloc(cs.namespace(|| "column"))?; 57 | 58 | Ok((column, inclusion_path)) 59 | } 60 | 61 | pub(crate) fn len(&self) -> usize { 62 | self.column.len() 63 | } 64 | } 65 | 66 | impl From> 67 | for ColumnProof 68 | { 69 | fn from(vanilla_proof: VanillaColumnProof) -> Self { 70 | let VanillaColumnProof { 71 | column, 72 | inclusion_proof, 73 | } = vanilla_proof; 74 | 75 | ColumnProof { 76 | column: column.into(), 77 | inclusion_path: inclusion_proof.as_options().into(), 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/circuit/hash.rs: -------------------------------------------------------------------------------- 1 | use bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; 2 | use blstrs::Scalar as Fr; 3 | use filecoin_hashers::{POSEIDON_CONSTANTS_11, POSEIDON_CONSTANTS_2}; 4 | use generic_array::typenum::{U11, U2}; 5 | use neptune::circuit::poseidon_hash; 6 | 7 | /// Hash a list of bits. 8 | pub fn hash_single_column( 9 | cs: CS, 10 | column: &[AllocatedNum], 11 | ) -> Result, SynthesisError> 12 | where 13 | CS: ConstraintSystem, 14 | { 15 | match column.len() { 16 | 2 => poseidon_hash::(cs, column.to_vec(), &*POSEIDON_CONSTANTS_2), 17 | 11 => poseidon_hash::(cs, column.to_vec(), &*POSEIDON_CONSTANTS_11), 18 | _ => panic!("unsupported column size: {}", column.len()), 19 | } 20 | } 21 | 22 | #[cfg(test)] 23 | mod tests { 24 | use super::*; 25 | 26 | use bellperson::util_cs::test_cs::TestConstraintSystem; 27 | use ff::Field; 28 | use filecoin_hashers::{poseidon::PoseidonHasher, HashFunction, Hasher}; 29 | use rand::SeedableRng; 30 | use rand_xorshift::XorShiftRng; 31 | use storage_proofs_core::TEST_SEED; 32 | 33 | use crate::stacked::vanilla::hash::hash_single_column as vanilla_hash_single_column; 34 | 35 | #[test] 36 | fn test_hash2_circuit() { 37 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 38 | 39 | for _ in 0..10 { 40 | let mut cs = TestConstraintSystem::::new(); 41 | 42 | let a = Fr::random(&mut rng); 43 | let b = Fr::random(&mut rng); 44 | 45 | let a_num = { 46 | let mut cs = cs.namespace(|| "a"); 47 | AllocatedNum::alloc(&mut cs, || Ok(a)).expect("alloc failed") 48 | }; 49 | 50 | let b_num = { 51 | let mut cs = cs.namespace(|| "b"); 52 | AllocatedNum::alloc(&mut cs, || Ok(b)).expect("alloc failed") 53 | }; 54 | 55 | let out = ::Function::hash2_circuit( 56 | cs.namespace(|| "hash2"), 57 | &a_num, 58 | &b_num, 59 | ) 60 | .expect("hash2 function failed"); 61 | 62 | assert!(cs.is_satisfied(), "constraints not satisfied"); 63 | assert_eq!(cs.num_constraints(), 311); 64 | 65 | let expected: Fr = 66 | ::Function::hash2(&a.into(), &b.into()).into(); 67 | 68 | assert_eq!( 69 | expected, 70 | out.get_value().expect("get_value failed"), 71 | "circuit and non circuit do not match" 72 | ); 73 | } 74 | } 75 | 76 | #[test] 77 | fn test_hash_single_column_circuit() { 78 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 79 | 80 | for _ in 0..1 { 81 | let mut cs = TestConstraintSystem::::new(); 82 | 83 | let vals = vec![Fr::random(&mut rng); 11]; 84 | let vals_opt = vals 85 | .iter() 86 | .enumerate() 87 | .map(|(i, v)| { 88 | AllocatedNum::alloc(cs.namespace(|| format!("num_{}", i)), || Ok(*v)) 89 | .expect("alloc failed") 90 | }) 91 | .collect::>(); 92 | 93 | let out = hash_single_column(cs.namespace(|| "hash_single_column"), &vals_opt) 94 | .expect("hash_single_column function failed"); 95 | 96 | assert!(cs.is_satisfied(), "constraints not satisfied"); 97 | assert_eq!(cs.num_constraints(), 598); 98 | 99 | let expected: Fr = vanilla_hash_single_column(&vals); 100 | 101 | assert_eq!( 102 | expected, 103 | out.get_value().expect("get_value failed"), 104 | "circuit and non circuit do not match" 105 | ); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/circuit/mod.rs: -------------------------------------------------------------------------------- 1 | mod column; 2 | mod column_proof; 3 | mod create_label; 4 | mod hash; 5 | mod params; 6 | mod proof; 7 | 8 | pub use create_label::*; 9 | pub use proof::{StackedCircuit, StackedCompound}; 10 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/mod.rs: -------------------------------------------------------------------------------- 1 | mod circuit; 2 | 3 | pub(crate) mod vanilla; 4 | 5 | pub use circuit::*; 6 | pub use vanilla::*; 7 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/clear_files.rs: -------------------------------------------------------------------------------- 1 | use std::{fs, path::Path}; 2 | 3 | use anyhow::{Context, Result}; 4 | use log::trace; 5 | use merkletree::store::StoreConfig; 6 | use storage_proofs_core::cache_key::{CacheKey, LABEL_LAYER_KEY}; 7 | 8 | use crate::stacked::vanilla::{ 9 | SYNTHETIC_POREP_VANILLA_PROOFS_EXT, SYNTHETIC_POREP_VANILLA_PROOFS_KEY, 10 | }; 11 | 12 | /// Removes all files that match the given glob pattern. 13 | fn remove_files_with_glob(glob_path: &Path) -> Result<()> { 14 | let globs = glob::glob(glob_path.to_str().expect("Path must be valid UTF-8")) 15 | .expect("Glob pattern must be valid"); 16 | for maybe_path in globs { 17 | let path = maybe_path?; 18 | fs::remove_file(&path).with_context(|| format!("Failed to delete {:?}", &path))? 19 | } 20 | Ok(()) 21 | } 22 | 23 | /// Discards all persisted merkle and layer data that is not needed for PoSt. 24 | pub fn clear_cache_dir(cache_path: &Path) -> Result<()> { 25 | let tree_d_path = StoreConfig::data_path(cache_path, &CacheKey::CommDTree.to_string()); 26 | if tree_d_path.exists() { 27 | fs::remove_file(&tree_d_path) 28 | .with_context(|| format!("Failed to delete {:?}", &tree_d_path))?; 29 | trace!("tree d deleted"); 30 | } 31 | 32 | // TreeC might be split into several sub-tree. They have the same file name, but a number 33 | // attached separated by a dash. Hence add a glob after the identifier. 34 | let tree_c_glob = StoreConfig::data_path(cache_path, &format!("{}*", CacheKey::CommCTree)); 35 | remove_files_with_glob(&tree_c_glob)?; 36 | trace!("tree c deleted"); 37 | 38 | let labels_glob = StoreConfig::data_path(cache_path, &format!("{}*", LABEL_LAYER_KEY)); 39 | remove_files_with_glob(&labels_glob)?; 40 | trace!("layers deleted"); 41 | 42 | Ok(()) 43 | } 44 | 45 | /// Ensure that any persisted vanilla proofs generated from synthetic porep are discarded. 46 | pub fn clear_synthetic_proofs(cache_path: &Path) -> Result<()> { 47 | let synth_proofs_path = cache_path.join(format!( 48 | "{}.{}", 49 | SYNTHETIC_POREP_VANILLA_PROOFS_KEY, SYNTHETIC_POREP_VANILLA_PROOFS_EXT 50 | )); 51 | if synth_proofs_path.exists() { 52 | trace!("removing synthetic proofs at {:?}", synth_proofs_path); 53 | fs::remove_file(&synth_proofs_path) 54 | .with_context(|| format!("Failed to delete {:?}", &synth_proofs_path)) 55 | } else { 56 | trace!( 57 | "persisted synthetic proofs do not exist at {:?}", 58 | synth_proofs_path 59 | ); 60 | 61 | Ok(()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/column.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use blstrs::Scalar as Fr; 4 | use filecoin_hashers::Hasher; 5 | use serde::{Deserialize, Serialize}; 6 | use storage_proofs_core::{ 7 | error::Result, 8 | merkle::{MerkleTreeTrait, Store}, 9 | }; 10 | 11 | use crate::stacked::vanilla::{column_proof::ColumnProof, hash::hash_single_column}; 12 | 13 | #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] 14 | pub struct Column { 15 | pub(crate) index: u32, 16 | pub(crate) rows: Vec, 17 | _h: PhantomData, 18 | } 19 | 20 | impl Column { 21 | pub fn new(index: u32, rows: Vec) -> Result { 22 | Ok(Column { 23 | index, 24 | rows, 25 | _h: PhantomData, 26 | }) 27 | } 28 | 29 | pub fn with_capacity(index: u32, capacity: usize) -> Result { 30 | Column::new(index, Vec::with_capacity(capacity)) 31 | } 32 | 33 | pub fn rows(&self) -> &[H::Domain] { 34 | &self.rows 35 | } 36 | 37 | pub fn index(&self) -> u32 { 38 | self.index 39 | } 40 | 41 | /// Calculate the column hashes `C_i = H(E_i, O_i)` for the passed in column. 42 | pub fn hash(&self) -> Fr { 43 | hash_single_column( 44 | &self 45 | .rows 46 | .iter() 47 | .copied() 48 | .map(Into::into) 49 | .collect::>(), 50 | ) 51 | } 52 | 53 | pub fn get_node_at_layer(&self, layer: usize) -> Result<&H::Domain> { 54 | assert!(layer > 0, "layer must be greater than 0"); 55 | let row_index = layer - 1; 56 | 57 | Ok(&self.rows[row_index]) 58 | } 59 | 60 | /// Create a column proof for this column. 61 | pub fn into_proof, Tree: MerkleTreeTrait>( 62 | self, 63 | tree_c: &Tree, 64 | ) -> Result> { 65 | let inclusion_proof = tree_c.gen_proof(self.index() as usize)?; 66 | ColumnProof::::from_column(self, inclusion_proof) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/column_proof.rs: -------------------------------------------------------------------------------- 1 | use blstrs::Scalar as Fr; 2 | use filecoin_hashers::Hasher; 3 | use log::trace; 4 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 5 | use storage_proofs_core::{error::Result, merkle::MerkleProofTrait}; 6 | 7 | use crate::stacked::vanilla::Column; 8 | 9 | #[derive(Debug, Clone, Serialize, Deserialize)] 10 | pub struct ColumnProof { 11 | #[serde(bound( 12 | serialize = "Column: Serialize", 13 | deserialize = "Column: Deserialize<'de>" 14 | ))] 15 | pub(crate) column: Column, 16 | #[serde(bound( 17 | serialize = "Proof: Serialize", 18 | deserialize = "Proof: DeserializeOwned" 19 | ))] 20 | pub(crate) inclusion_proof: Proof, 21 | } 22 | 23 | impl ColumnProof { 24 | #[inline] 25 | pub fn new( 26 | challenge: u32, 27 | col: Vec<::Domain>, 28 | inclusion_proof: Proof, 29 | ) -> Self { 30 | ColumnProof { 31 | column: Column::new(challenge, col).expect("column creation should not fail"), 32 | inclusion_proof, 33 | } 34 | } 35 | 36 | pub fn from_column(column: Column, inclusion_proof: Proof) -> Result { 37 | Ok(ColumnProof { 38 | column, 39 | inclusion_proof, 40 | }) 41 | } 42 | 43 | pub fn root(&self) -> ::Domain { 44 | self.inclusion_proof.root() 45 | } 46 | 47 | fn column(&self) -> &Column { 48 | &self.column 49 | } 50 | 51 | pub fn get_node_at_layer(&self, layer: usize) -> Result<&::Domain> { 52 | self.column().get_node_at_layer(layer) 53 | } 54 | 55 | pub fn column_hash(&self) -> Fr { 56 | self.column.hash() 57 | } 58 | 59 | pub fn verify( 60 | &self, 61 | challenge: u32, 62 | expected_root: &::Domain, 63 | ) -> bool { 64 | let c_i = self.column_hash(); 65 | 66 | check_eq!(&self.inclusion_proof.root(), expected_root); 67 | check!(self.inclusion_proof.validate_data(c_i.into())); 68 | check!(self.inclusion_proof.validate(challenge as usize)); 69 | 70 | true 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/create_label/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{self, create_dir_all, remove_file, rename, File}; 2 | use std::io::{self, BufReader}; 3 | use std::path::Path; 4 | 5 | use anyhow::Context; 6 | use filecoin_hashers::Hasher; 7 | use log::{info, warn}; 8 | use merkletree::{merkle::Element, store::StoreConfig}; 9 | use storage_proofs_core::{ 10 | cache_key::CacheKey, drgraph::Graph, error::Result, merkle::MerkleTreeTrait, 11 | }; 12 | 13 | use crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph}; 14 | 15 | #[cfg(feature = "multicore-sdr")] 16 | pub mod multi; 17 | pub mod single; 18 | 19 | /// Prepares the necessary `StoreConfig`s with which the layers are stored. 20 | /// Also checks for already existing layers and marks them as such. 21 | pub fn prepare_layers( 22 | graph: &StackedBucketGraph, 23 | cache_path: P, 24 | layers: usize, 25 | ) -> Vec 26 | where 27 | P: AsRef, 28 | { 29 | let label_configs = (1..=layers).map(|layer| StoreConfig { 30 | path: cache_path.as_ref().to_path_buf(), 31 | id: CacheKey::label_layer(layer), 32 | size: Some(graph.size()), 33 | rows_to_discard: 0, 34 | }); 35 | 36 | let mut states = Vec::with_capacity(layers); 37 | for (layer, label_config) in (1..=layers).zip(label_configs) { 38 | // Clear possible left over tmp files 39 | remove_tmp_layer(&label_config); 40 | 41 | // Check if this layer is already on disk 42 | let generated = is_layer_written::(graph, &label_config).unwrap_or_default(); 43 | if generated { 44 | // succesful load 45 | info!("found valid labels for layer {}", layer); 46 | } 47 | 48 | states.push(LayerState { 49 | config: label_config, 50 | generated, 51 | }); 52 | } 53 | 54 | states 55 | } 56 | 57 | /// Stores a layer atomically on disk, by writing first to `.tmp` and then renaming. 58 | pub fn write_layer(data: &[u8], config: &StoreConfig) -> Result<()> { 59 | let data_path = StoreConfig::data_path(&config.path, &config.id); 60 | let tmp_data_path = data_path.with_extension(".tmp"); 61 | 62 | if let Some(parent) = data_path.parent() { 63 | create_dir_all(parent).context("failed to create parent directories")?; 64 | } 65 | fs::write(&tmp_data_path, data).context("failed to write layer data")?; 66 | rename(tmp_data_path, data_path).context("failed to rename tmp data")?; 67 | 68 | Ok(()) 69 | } 70 | 71 | /// Reads a layer from disk, into the provided slice. 72 | pub fn read_layer(config: &StoreConfig, mut data: &mut [u8]) -> Result<()> { 73 | let data_path = StoreConfig::data_path(&config.path, &config.id); 74 | let file = File::open(data_path).context("failed to open layer")?; 75 | let mut buffered = BufReader::new(file); 76 | io::copy(&mut buffered, &mut data).context("failed to read layer")?; 77 | 78 | Ok(()) 79 | } 80 | 81 | pub fn remove_tmp_layer(config: &StoreConfig) { 82 | let data_path = StoreConfig::data_path(&config.path, &config.id); 83 | let tmp_data_path = data_path.with_extension(".tmp"); 84 | if tmp_data_path.exists() { 85 | if let Err(err) = remove_file(tmp_data_path) { 86 | warn!("failed to delete tmp file: {}", err); 87 | } 88 | } 89 | } 90 | 91 | /// Checks if the given layer is already written and of the right size. 92 | pub fn is_layer_written( 93 | graph: &StackedBucketGraph, 94 | config: &StoreConfig, 95 | ) -> Result { 96 | let data_path = StoreConfig::data_path(&config.path, &config.id); 97 | if !data_path.exists() { 98 | return Ok(false); 99 | } 100 | 101 | let file = File::open(&data_path)?; 102 | let metadata = file.metadata()?; 103 | let file_size = metadata.len() as usize; 104 | 105 | if file_size != graph.size() * ::Domain::byte_len() { 106 | return Ok(false); 107 | } 108 | 109 | Ok(true) 110 | } 111 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/encoding_proof.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use blstrs::Scalar as Fr; 4 | use filecoin_hashers::Hasher; 5 | use fr32::bytes_into_fr_repr_safe; 6 | use log::trace; 7 | use serde::{Deserialize, Serialize}; 8 | use sha2::{Digest, Sha256}; 9 | 10 | use crate::encode::encode; 11 | 12 | #[derive(Debug, Clone, Serialize, Deserialize)] 13 | pub struct EncodingProof { 14 | pub(crate) parents: Vec, 15 | pub(crate) layer_index: u32, 16 | pub(crate) node: u64, 17 | #[serde(skip)] 18 | _h: PhantomData, 19 | } 20 | 21 | impl EncodingProof { 22 | pub fn new(layer_index: u32, node: u64, parents: Vec) -> Self { 23 | EncodingProof { 24 | layer_index, 25 | node, 26 | parents, 27 | _h: PhantomData, 28 | } 29 | } 30 | 31 | fn create_key(&self, replica_id: &H::Domain) -> H::Domain { 32 | let mut hasher = Sha256::new(); 33 | let mut buffer = [0u8; 64]; 34 | 35 | // replica_id 36 | buffer[..32].copy_from_slice(AsRef::<[u8]>::as_ref(replica_id)); 37 | 38 | // layer index 39 | buffer[32..36].copy_from_slice(&(self.layer_index).to_be_bytes()); 40 | // node id 41 | buffer[36..44].copy_from_slice(&(self.node).to_be_bytes()); 42 | 43 | hasher.update(&buffer[..]); 44 | 45 | // parents 46 | for parent in &self.parents { 47 | hasher.update(AsRef::<[u8]>::as_ref(parent)); 48 | } 49 | 50 | bytes_into_fr_repr_safe(hasher.finalize().as_ref()).into() 51 | } 52 | 53 | pub fn verify( 54 | &self, 55 | replica_id: &H::Domain, 56 | exp_encoded_node: &H::Domain, 57 | decoded_node: &G::Domain, 58 | ) -> bool { 59 | let key = self.create_key(replica_id); 60 | 61 | let fr: Fr = (*decoded_node).into(); 62 | let encoded_node = encode(key, fr.into()); 63 | 64 | check_eq!(exp_encoded_node, &encoded_node); 65 | 66 | true 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/hash.rs: -------------------------------------------------------------------------------- 1 | use blstrs::Scalar as Fr; 2 | use filecoin_hashers::{POSEIDON_CONSTANTS_11, POSEIDON_CONSTANTS_2}; 3 | use neptune::poseidon::Poseidon; 4 | 5 | /// Hash all elements in the given column. 6 | pub fn hash_single_column(column: &[Fr]) -> Fr { 7 | match column.len() { 8 | 2 => { 9 | let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_2); 10 | hasher.hash() 11 | } 12 | 11 => { 13 | let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_11); 14 | hasher.hash() 15 | } 16 | _ => panic!("unsupported column size: {}", column.len()), 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/labeling_proof.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use filecoin_hashers::Hasher; 4 | use fr32::bytes_into_fr_repr_safe; 5 | use log::trace; 6 | use serde::{Deserialize, Serialize}; 7 | use sha2::{Digest, Sha256}; 8 | 9 | #[derive(Debug, Clone, Serialize, Deserialize)] 10 | pub struct LabelingProof { 11 | pub(crate) parents: Vec, 12 | pub(crate) layer_index: u32, 13 | pub(crate) node: u64, 14 | #[serde(skip)] 15 | _h: PhantomData, 16 | } 17 | 18 | impl LabelingProof { 19 | pub fn new(layer_index: u32, node: u64, parents: Vec) -> Self { 20 | LabelingProof { 21 | node, 22 | layer_index, 23 | parents, 24 | _h: PhantomData, 25 | } 26 | } 27 | 28 | fn create_label(&self, replica_id: &H::Domain) -> H::Domain { 29 | let mut hasher = Sha256::new(); 30 | let mut buffer = [0u8; 64]; 31 | 32 | // replica_id 33 | buffer[..32].copy_from_slice(AsRef::<[u8]>::as_ref(replica_id)); 34 | 35 | // layer index 36 | buffer[32..36].copy_from_slice(&(self.layer_index).to_be_bytes()); 37 | 38 | // node id 39 | buffer[36..44].copy_from_slice(&(self.node).to_be_bytes()); 40 | 41 | hasher.update(&buffer[..]); 42 | 43 | // parents 44 | for parent in &self.parents { 45 | let data = AsRef::<[u8]>::as_ref(parent); 46 | hasher.update(data); 47 | } 48 | 49 | bytes_into_fr_repr_safe(hasher.finalize().as_ref()).into() 50 | } 51 | 52 | pub fn verify(&self, replica_id: &H::Domain, expected_label: &H::Domain) -> bool { 53 | let label = self.create_label(replica_id); 54 | check_eq!(expected_label, &label); 55 | 56 | true 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /storage-proofs-porep/src/stacked/vanilla/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod macros; 3 | 4 | pub mod create_label; 5 | pub(crate) mod hash; 6 | 7 | mod cache; 8 | mod challenges; 9 | mod clear_files; 10 | mod column; 11 | mod column_proof; 12 | #[cfg(feature = "multicore-sdr")] 13 | mod cores; 14 | mod encoding_proof; 15 | mod graph; 16 | mod labeling_proof; 17 | #[cfg(feature = "multicore-sdr")] 18 | mod memory_handling; 19 | mod params; 20 | mod proof; 21 | mod proof_scheme; 22 | #[cfg(feature = "multicore-sdr")] 23 | mod utils; 24 | 25 | pub use challenges::{ 26 | synthetic::SYNTHETIC_POREP_VANILLA_PROOFS_EXT, synthetic::SYNTHETIC_POREP_VANILLA_PROOFS_KEY, 27 | ChallengeRequirements, Challenges, 28 | }; 29 | pub use clear_files::{clear_cache_dir, clear_synthetic_proofs}; 30 | pub use column::Column; 31 | pub use column_proof::ColumnProof; 32 | pub use encoding_proof::EncodingProof; 33 | pub use graph::{StackedBucketGraph, StackedGraph, EXP_DEGREE}; 34 | pub use labeling_proof::LabelingProof; 35 | pub use params::*; 36 | pub use proof::{StackedDrg, TreeRElementData, TOTAL_PARENTS}; 37 | -------------------------------------------------------------------------------- /storage-proofs-porep/tests/common.rs: -------------------------------------------------------------------------------- 1 | use std::fs::remove_file; 2 | use std::io::Result; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use filecoin_hashers::Hasher; 6 | use merkletree::store::StoreConfig; 7 | use storage_proofs_core::{ 8 | cache_key::CacheKey, 9 | data::Data, 10 | merkle::{get_base_tree_count, split_config, MerkleTreeTrait}, 11 | }; 12 | use storage_proofs_porep::stacked::{PersistentAux, PublicParams, StackedDrg, Tau, TemporaryAux}; 13 | 14 | // This method should ONLY be used in purposed test code. 15 | #[allow(dead_code)] 16 | pub(crate) fn remove_replica_and_tree_r( 17 | cache_path: &Path, 18 | ) -> Result<()> { 19 | let replica_path = cache_path.join("replica-path"); 20 | let tree_r_last_config = StoreConfig { 21 | path: cache_path.to_path_buf(), 22 | id: CacheKey::CommRLastTree.to_string(), 23 | size: Some(0), 24 | rows_to_discard: 0, 25 | }; 26 | let tree_count = get_base_tree_count::(); 27 | if tree_count > 1 { 28 | let configs = 29 | split_config(tree_r_last_config, tree_count).expect("Failed to split configs"); 30 | for config in configs { 31 | let cur_path = StoreConfig::data_path(&config.path, &config.id); 32 | remove_file(cur_path).expect("Failed to remove TreeR"); 33 | } 34 | } else { 35 | let cur_path = StoreConfig::data_path(&tree_r_last_config.path, &tree_r_last_config.id); 36 | remove_file(cur_path).expect("Failed to remove TreeR"); 37 | } 38 | remove_file(replica_path) 39 | } 40 | 41 | #[allow(clippy::type_complexity)] 42 | pub fn transform_and_replicate_layers( 43 | pp: &PublicParams, 44 | replica_id: &::Domain, 45 | data: Data<'_>, 46 | cache_dir: PathBuf, 47 | replica_path: PathBuf, 48 | ) -> ( 49 | Tau<::Domain, ::Domain>, 50 | ( 51 | PersistentAux<::Domain>, 52 | TemporaryAux, 53 | ), 54 | ) { 55 | let (labels, _) = StackedDrg::::replicate_phase1(pp, replica_id, &cache_dir) 56 | .expect("failed to generate labels"); 57 | StackedDrg::replicate_phase2(pp, labels, data, None, cache_dir, replica_path) 58 | .expect("failed to transform") 59 | } 60 | -------------------------------------------------------------------------------- /storage-proofs-post/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage-proofs-post" 3 | version = "19.0.0" 4 | authors = ["dignifiedquire "] 5 | license = "MIT OR Apache-2.0" 6 | description = "Proofs of Space Time" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | storage-proofs-core.workspace = true 13 | filecoin-hashers = { workspace = true, features = ["poseidon", "sha256"]} 14 | # Sorted alphabetically 15 | anyhow.workspace = true 16 | bellperson.workspace = true 17 | blstrs.workspace = true 18 | byteorder.workspace = true 19 | ff.workspace = true 20 | generic-array.workspace = true 21 | log.workspace = true 22 | rayon.workspace = true 23 | serde = { workspace = true, features = ["derive"]} 24 | sha2.workspace = true 25 | 26 | [dev-dependencies] 27 | # Sorted alphabetically 28 | pretty_assertions.workspace = true 29 | rand.workspace = true 30 | rand_xorshift.workspace = true 31 | tempfile.workspace = true 32 | 33 | [features] 34 | default = ["opencl"] 35 | cuda = ["storage-proofs-core/cuda", "filecoin-hashers/cuda"] 36 | opencl = ["storage-proofs-core/opencl", "filecoin-hashers/opencl"] 37 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 38 | # setting is ignored, no `TemporaryAux` file will be written. 39 | fixed-rows-to-discard = ["storage-proofs-core/fixed-rows-to-discard"] 40 | -------------------------------------------------------------------------------- /storage-proofs-post/README.md: -------------------------------------------------------------------------------- 1 | # Storage Proofs PoSt 2 | 3 | ## License 4 | 5 | MIT or Apache 2.0 6 | -------------------------------------------------------------------------------- /storage-proofs-post/src/fallback/mod.rs: -------------------------------------------------------------------------------- 1 | mod circuit; 2 | mod compound; 3 | mod utils; 4 | mod vanilla; 5 | 6 | pub use circuit::*; 7 | pub use compound::*; 8 | pub use utils::*; 9 | pub use vanilla::*; 10 | -------------------------------------------------------------------------------- /storage-proofs-post/src/fallback/utils.rs: -------------------------------------------------------------------------------- 1 | use storage_proofs_core::api_version::ApiVersion; 2 | 3 | /// Selects the challenge index used to determine the leaf challenge for PoSt 4 | pub fn get_challenge_index( 5 | api_version: ApiVersion, 6 | sector_index: usize, 7 | challenge_count_per_sector: usize, 8 | challenge_index: usize, 9 | ) -> u64 { 10 | (match api_version { 11 | ApiVersion::V1_0_0 | ApiVersion::V1_1_0 => { 12 | sector_index * challenge_count_per_sector + challenge_index 13 | } 14 | ApiVersion::V1_2_0 => challenge_index, 15 | } as u64) 16 | } 17 | -------------------------------------------------------------------------------- /storage-proofs-post/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)] 2 | #![warn(clippy::unwrap_used)] 3 | 4 | pub mod fallback; 5 | -------------------------------------------------------------------------------- /storage-proofs-update/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "storage-proofs-update" 3 | version = "19.0.0" 4 | authors = ["dignifiedquire "] 5 | description = "Proof of SDR-PoRep CC-Sector Update" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | repository = "https://github.com/filecoin-project/rust-fil-proofs" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | filecoin-hashers = { workspace = true, features = ["poseidon", "sha256"] } 13 | fr32.workspace = true 14 | storage-proofs-core.workspace = true 15 | storage-proofs-porep.workspace = true 16 | # Sorted alphabetically 17 | merkletree.workspace = true 18 | rayon.workspace = true 19 | serde = { workspace = true, features = ["derive"] } 20 | ff.workspace = true 21 | bellperson.workspace = true 22 | blstrs.workspace = true 23 | log.workspace = true 24 | generic-array.workspace = true 25 | anyhow.workspace = true 26 | neptune.workspace = true 27 | lazy_static.workspace = true 28 | memmap2.workspace = true 29 | 30 | [dev-dependencies] 31 | # Sorted alphabetically 32 | tempfile.workspace = true 33 | rand.workspace = true 34 | rand_xorshift.workspace = true 35 | 36 | [features] 37 | default = ["opencl", "multicore-sdr"] 38 | isolated-testing = [] 39 | opencl = [ 40 | "storage-proofs-core/opencl", 41 | "storage-proofs-porep/opencl", 42 | "filecoin-hashers/opencl", 43 | "neptune/opencl", 44 | "bellperson/opencl", 45 | ] 46 | cuda = [ 47 | "storage-proofs-core/cuda", 48 | "storage-proofs-porep/cuda", 49 | "filecoin-hashers/cuda", 50 | "neptune/cuda", 51 | "bellperson/cuda", 52 | ] 53 | multicore-sdr = [ 54 | "storage-proofs-porep/multicore-sdr", 55 | ] 56 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 57 | # setting is ignored, no `TemporaryAux` file will be written. 58 | fixed-rows-to-discard = ["storage-proofs-core/fixed-rows-to-discard", "storage-proofs-porep/fixed-rows-to-discard"] 59 | -------------------------------------------------------------------------------- /storage-proofs-update/README.md: -------------------------------------------------------------------------------- 1 | # Storage Proofs Update 2 | 3 | ## License 4 | 5 | MIT or Apache 2.0 6 | -------------------------------------------------------------------------------- /storage-proofs-update/src/compound.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use blstrs::Scalar as Fr; 4 | 5 | use storage_proofs_core::{ 6 | compound_proof::{CircuitComponent, CompoundProof}, 7 | error::Result, 8 | merkle::MerkleTreeTrait, 9 | parameter_cache::CacheableParameters, 10 | }; 11 | 12 | use crate::{ 13 | circuit, constants::TreeRHasher, EmptySectorUpdate, EmptySectorUpdateCircuit, PartitionProof, 14 | PublicInputs, PublicParams, 15 | }; 16 | 17 | pub struct EmptySectorUpdateCompound 18 | where 19 | TreeR: MerkleTreeTrait, 20 | { 21 | pub _tree_r: PhantomData, 22 | } 23 | 24 | impl CacheableParameters, PublicParams> 25 | for EmptySectorUpdateCompound 26 | where 27 | TreeR: MerkleTreeTrait, 28 | { 29 | fn cache_prefix() -> String { 30 | format!("empty-sector-update-{}", TreeR::display()) 31 | } 32 | } 33 | 34 | impl CompoundProof<'_, EmptySectorUpdate, EmptySectorUpdateCircuit> 35 | for EmptySectorUpdateCompound 36 | where 37 | TreeR: 'static + MerkleTreeTrait, 38 | { 39 | // Generates a partition circuit's public-inputs. If the `k` argument is `Some` we overwrite 40 | // `pub_inputs.k` with the `k` argument's value, otherwise if the `k` argument is `None` we use 41 | // `pub_inputs.k` as the circuit's public-input. 42 | fn generate_public_inputs( 43 | pub_inputs: &PublicInputs, 44 | pub_params: &PublicParams, 45 | k: Option, 46 | ) -> Result> { 47 | // Prioritize the partition-index provided via the `k` argument; default to `pub_inputs.k`. 48 | let k = k.unwrap_or(pub_inputs.k); 49 | 50 | let PublicInputs { 51 | comm_r_old, 52 | comm_d_new, 53 | comm_r_new, 54 | h, 55 | .. 56 | } = *pub_inputs; 57 | 58 | let pub_inputs_circ = circuit::PublicInputs::new( 59 | pub_params.sector_nodes, 60 | k, 61 | h, 62 | comm_r_old, 63 | comm_d_new, 64 | comm_r_new, 65 | ); 66 | 67 | Ok(pub_inputs_circ.to_vec()) 68 | } 69 | 70 | // Generates a partition's circuit. If the `k` argument is `Some` we overwrite `pub_inputs.k` 71 | // with the `k` argument's value, otherwise if the `k` argument is `None` we use `pub_inputs.k` 72 | // as the circuit's public-input. 73 | fn circuit( 74 | pub_inputs: &PublicInputs, 75 | _priv_inputs: as CircuitComponent>::ComponentPrivateInputs, 76 | vanilla_proof: &PartitionProof, 77 | pub_params: &PublicParams, 78 | k: Option, 79 | ) -> Result> { 80 | // Prioritize the partition-index provided via the `k` argument; default to `pub_inputs.k`. 81 | let k = k.unwrap_or(pub_inputs.k); 82 | 83 | let PublicInputs { 84 | comm_r_old, 85 | comm_d_new, 86 | comm_r_new, 87 | h, 88 | .. 89 | } = *pub_inputs; 90 | 91 | let pub_inputs = circuit::PublicInputs::new( 92 | pub_params.sector_nodes, 93 | k, 94 | h, 95 | comm_r_old, 96 | comm_d_new, 97 | comm_r_new, 98 | ); 99 | 100 | let priv_inputs = circuit::PrivateInputs::new( 101 | vanilla_proof.comm_c, 102 | &vanilla_proof.apex_leafs, 103 | &vanilla_proof.challenge_proofs, 104 | ); 105 | 106 | Ok(EmptySectorUpdateCircuit { 107 | pub_params: pub_params.clone(), 108 | pub_inputs, 109 | priv_inputs, 110 | }) 111 | } 112 | 113 | fn blank_circuit(pub_params: &PublicParams) -> EmptySectorUpdateCircuit { 114 | EmptySectorUpdateCircuit::blank(pub_params.clone()) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /storage-proofs-update/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod circuit; 2 | pub mod compound; 3 | pub mod constants; 4 | pub(crate) mod gadgets; 5 | pub mod poseidon; 6 | pub mod vanilla; 7 | 8 | mod challenges; 9 | 10 | pub use self::challenges::Challenges; 11 | pub use self::circuit::EmptySectorUpdateCircuit; 12 | pub use self::compound::EmptySectorUpdateCompound; 13 | pub use self::vanilla::{ 14 | phi, rho, ChallengeProof, EmptySectorUpdate, PartitionProof, PrivateInputs, PublicInputs, 15 | PublicParams, SetupParams, 16 | }; 17 | -------------------------------------------------------------------------------- /storage-proofs-update/src/poseidon/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod circuit; 2 | pub mod compound; 3 | pub mod vanilla; 4 | 5 | pub use circuit::EmptySectorUpdateCircuit; 6 | pub use compound::EmptySectorUpdateCompound; 7 | pub use vanilla::EmptySectorUpdate; 8 | --------------------------------------------------------------------------------