├── .github ├── actions │ └── configure-environment │ │ └── action.yml └── workflows │ ├── ci.yml │ ├── release-check.yml │ └── releaser.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE-APACHE ├── LICENSE-MIT ├── Makefile ├── README.md ├── RELEASE.md ├── SECURITY.md ├── bls.go ├── bls_test.go ├── build.sh ├── cgo ├── blockstore.go ├── bls.go ├── const.go ├── errors.go ├── extern.go ├── fvm.go ├── helpers.go ├── helpers_test.go ├── interface.go ├── libs.go ├── proofs.go ├── registry.go ├── types.go └── util.go ├── cgoleakdetect └── runner.go ├── distributed.go ├── fvm.go ├── fvm_test.go ├── go.mod ├── go.sum ├── headerstubs ├── stdarg.h ├── stdbool.h ├── stddef.h ├── stdint.h └── stdlib.h ├── install-filcrypto ├── mkreleaselog ├── parameters.json ├── proofs.go ├── proofs_test.go ├── run_tests.sh ├── rust ├── Cargo.lock ├── Cargo.toml ├── filcrypto.pc.template ├── filecoin.pc ├── rust-toolchain.toml ├── rustc-target-features-optimized.json ├── scripts │ ├── build-release.sh │ ├── package-release.sh │ └── publish-release.sh └── src │ ├── bls │ ├── api.rs │ └── mod.rs │ ├── fvm │ ├── blockstore │ │ ├── cgo.rs │ │ └── mod.rs │ ├── cgo │ │ ├── error.rs │ │ ├── externs.rs │ │ └── mod.rs │ ├── engine.rs │ ├── externs.rs │ ├── machine.rs │ ├── mod.rs │ └── types.rs │ ├── lib.rs │ ├── proofs │ ├── api.rs │ ├── helpers.rs │ ├── mod.rs │ └── types.rs │ └── util │ ├── api.rs │ ├── mod.rs │ └── types.rs ├── sector_update.go ├── srs-inner-product.json ├── types.go ├── version.go ├── version.json └── workflows.go /.github/actions/configure-environment/action.yml: -------------------------------------------------------------------------------- 1 | name: Configure Environment Variables 2 | description: Configure environment variables for Filecoin FFI 3 | 4 | runs: 5 | using: 'composite' 6 | steps: 7 | # The installation instructions can be found at https://developer.nvidia.com/cuda-downloads 8 | - if: runner.os == 'Linux' && runner.arch == 'ARM64' 9 | run: | 10 | # Install the CUDA toolkit 11 | wget -q https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/sbsa/cuda-ubuntu2404.pin 12 | sudo mv cuda-ubuntu2404.pin /etc/apt/preferences.d/cuda-repository-pin-600 13 | wget -q https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda-repo-ubuntu2404-12-8-local_12.8.0-570.86.10-1_arm64.deb 14 | sudo dpkg -i cuda-repo-ubuntu2404-12-8-local_12.8.0-570.86.10-1_arm64.deb 15 | sudo cp /var/cuda-repo-ubuntu2404-12-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ 16 | sudo apt-get update 17 | sudo apt-get install --no-install-recommends -y cuda-toolkit-12-8 18 | # Symlink the stubs 19 | sudo mkdir -p /usr/lib/aarch64-linux-gnu/stubs 20 | sudo ln -s /usr/local/cuda-12.8/lib64/stubs/libcuda.so /usr/lib/aarch64-linux-gnu/stubs/libcuda.so.1 21 | sudo ln -s /usr/local/cuda-12.8/lib64/stubs/libcuda.so /usr/lib/aarch64-linux-gnu/stubs/libcuda.so 22 | # Add the the stubs to the environment 23 | echo "LD_LIBRARY_PATH=/usr/lib/aarch64-linux-gnu/stubs:${LD_LIBRARY_PATH}" >> $GITHUB_ENV 24 | echo "LIBRARY_PATH=/usr/lib/aarch64-linux-gnu/stubs:${LIBRARY_PATH}" >> $GITHUB_ENV 25 | # Add the nvcc to the path 26 | echo "/usr/local/cuda-12.8/bin" >> $GITHUB_PATH 27 | shell: bash 28 | # The version in the default registry is outdated (12.0.1 as of 2025-02-02) but it is quicker to install 29 | - if: runner.os == 'Linux' && runner.arch == 'X64' 30 | run: | 31 | sudo apt-get update 32 | sudo apt-get install --no-install-recommends -y nvidia-cuda-toolkit 33 | shell: bash 34 | - run: | 35 | echo "FIL_PROOFS_PARAMETER_CACHE=${GITHUB_WORKSPACE}/filecoin-proof-parameters/" >> $GITHUB_ENV 36 | echo 'GO111MODULE=on' >> $GITHUB_ENV 37 | echo 'RUST_LOG=info' >> $GITHUB_ENV 38 | echo "GOPATH=${HOME}/go" >> $GITHUB_ENV 39 | echo "CARGO_TERM_COLOR=never" >> $GITHUB_ENV 40 | shell: bash 41 | - run: | 42 | echo "/usr/local/go/bin" >> $GITHUB_PATH 43 | echo "${GOPATH}/bin" >> $GITHUB_PATH 44 | echo "${HOME}/.cargo/bin" >> $GITHUB_PATH 45 | echo "${HOME}/.bin" >> $GITHUB_PATH 46 | shell: bash 47 | - if: runner.os == 'macOS' 48 | run: | 49 | echo "CPATH=$(brew --prefix)/include" >> $GITHUB_ENV 50 | echo "LIBRARY_PATH=$(brew --prefix)/lib" >> $GITHUB_ENV 51 | shell: bash 52 | - if: runner.os == 'Linux' 53 | run: | 54 | sudo apt-get update 55 | sudo apt-get install --no-install-recommends -y valgrind ocl-icd-opencl-dev libssl-dev libhwloc-dev pkgconf 56 | shell: bash 57 | - if: runner.os == 'macOS' 58 | run: | 59 | HOMEBREW_NO_AUTO_UPDATE=1 brew install md5sha1sum hwloc 60 | shell: bash 61 | - uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 62 | with: 63 | toolchain: 1.73 64 | - uses: actions/setup-go@v5 65 | with: 66 | go-version: '1.23' 67 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | workflow_dispatch: 9 | inputs: 10 | save: 11 | description: 'Save Filecoin parameters' 12 | required: false 13 | default: 'false' 14 | publish: 15 | description: 'Publish the static library' 16 | required: false 17 | default: 'false' 18 | run-leak-detector: 19 | description: 'Run the CGO leak detector' 20 | required: false 21 | default: 'false' 22 | ref: 23 | description: 'The ref to build' 24 | required: false 25 | 26 | defaults: 27 | run: 28 | shell: bash 29 | 30 | concurrency: 31 | group: ${{ github.workflow }}-${{ github.ref }} 32 | cancel-in-progress: ${{ github.event_name == 'pull_request' }} 33 | 34 | permissions: 35 | contents: read 36 | 37 | # Can we apply these to the entire workflow? 38 | env: 39 | # Build the kernel only for the single architecture. This should reduce 40 | # the overall compile-time significantly. 41 | EC_GPU_CUDA_NVCC_ARGS: --fatbin --gpu-architecture=sm_75 --generate-code=arch=compute_75,code=sm_75 42 | BELLMAN_CUDA_NVCC_ARGS: --fatbin --gpu-architecture=sm_75 --generate-code=arch=compute_75,code=sm_75 43 | NEPTUNE_CUDA_NVCC_ARGS: --fatbin --gpu-architecture=sm_75 --generate-code=arch=compute_75,code=sm_75 44 | DEBIAN_FRONTEND: noninteractive 45 | 46 | jobs: 47 | check: 48 | name: Check code style and run linters 49 | runs-on: ubuntu-latest 50 | steps: 51 | - uses: actions/checkout@v4 52 | with: 53 | submodules: recursive 54 | - uses: ./.github/actions/configure-environment 55 | - if: github.event.inputs.ref != '' 56 | uses: actions/checkout@v4 57 | with: 58 | submodules: recursive 59 | ref: ${{ github.event.inputs.ref }} 60 | - name: Run shellcheck 61 | run: shellcheck ./install-filcrypto 62 | - name: Run cargo fmt 63 | run: | 64 | rustup component add rustfmt 65 | cargo fmt --manifest-path ./rust/Cargo.toml --all -- --check 66 | - name: Run cargo clippy 67 | run: cd rust && cargo clippy --all-targets --features blst-portable,opencl -- -D warnings 68 | - name: Run go fmt 69 | # `! go fmt ./... 2>&1 | read"` doesn't work, this one does, thanks 70 | # https://carsonip.me/posts/go-fmt-and-ci/ 71 | run: | 72 | output=$(go fmt ./...) 73 | echo "${output}" 74 | test -z "${output}" 75 | - name: Run various linters 76 | run: | 77 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.1 78 | make go-lint 79 | cgo-bindings: 80 | name: Build and test CGO bindings (${{ matrix.runner.name }}) 81 | runs-on: ${{ matrix.runner.machine }} 82 | strategy: 83 | matrix: 84 | runner: 85 | - name: "linux-x86_64" 86 | machine: ["self-hosted", "linux", "x64", "4xlarge"] 87 | - name: "linux-arm64" 88 | machine: ["self-hosted", "linux", "arm64", "4xlarge"] 89 | - name: "macos-latest" 90 | machine: 'macos-latest' 91 | fail-fast: false 92 | steps: 93 | - run: echo "Running on $RUNNER_OS $RUNNER_ARCH" 94 | - uses: actions/checkout@v4 95 | with: 96 | submodules: recursive 97 | - uses: ./.github/actions/configure-environment 98 | - if: github.event.inputs.ref != '' 99 | uses: actions/checkout@v4 100 | with: 101 | submodules: recursive 102 | ref: ${{ github.event.inputs.ref }} 103 | - if: runner.os == 'macOS' 104 | run: cd rust && cargo fetch 105 | - name: Build project 106 | run: make 107 | - name: Build project without CGO 108 | run: env CGO_ENABLED=0 go build . 109 | - if: runner.os == 'Linux' 110 | uses: actions/cache/restore@v3 111 | with: 112 | key: v28-proof-params-${{ runner.os }}-${{ runner.arch }} 113 | path: | 114 | ./filecoin-proof-parameters 115 | - if: runner.os == 'Linux' 116 | name: Obtain Filecoin parameters 117 | run: | 118 | DIR=$(pwd) 119 | cd $(mktemp -d) 120 | go install github.com/filecoin-project/go-paramfetch/paramfetch@latest 121 | $GOPATH/bin/paramfetch 2048 "${DIR}/parameters.json" "${DIR}/srs-inner-product.json" 122 | - if: runner.os == 'Linux' && (github.event == 'push' || github.event.inputs.save == 'true') 123 | uses: actions/cache/save@v3 124 | with: 125 | key: v28-proof-params-${{ runner.os }}-${{ runner.arch }} 126 | path: | 127 | ./filecoin-proof-parameters 128 | - if: runner.os == 'Linux' 129 | run: cd rust && rustup target add wasm32-unknown-unknown 130 | - if: github.event.inputs.run-leak-detector == 'true' 131 | run: make cgo-leakdetect 132 | - if: runner.os == 'Linux' 133 | run: cd rust && FIL_PROOFS_PARAMETER_CACHE="${GITHUB_WORKSPACE}/filecoin-proof-parameters/" RUST_LOG=info cargo test --all --release && cd .. 134 | - if: runner.os == 'Linux' 135 | run: GOEXPERIMENT=cgocheck2 RUST_LOG=info go test -timeout 60m 136 | - if: runner.os == 'macOS' 137 | name: Build project and tests, but don't actually run the tests (used to verify that build/link works with Darwin) 138 | run: GOEXPERIMENT=cgocheck2 RUST_LOG=info go test -run=^$ 139 | supraseal: 140 | name: Build with CUDA supraseal 141 | runs-on: ubuntu-latest 142 | env: 143 | CC: gcc-12 144 | CXX: g++-12 145 | NVCC_PREPEND_FLAGS: "-ccbin /usr/bin/g++-12" 146 | steps: 147 | - uses: actions/checkout@v4 148 | with: 149 | submodules: recursive 150 | - uses: ./.github/actions/configure-environment 151 | - if: github.event.inputs.ref != '' 152 | uses: actions/checkout@v4 153 | with: 154 | submodules: recursive 155 | ref: ${{ github.event.inputs.ref }} 156 | - name: Build project with `FFI_USE_CUDA_SUPRASEAL=1` 157 | run: FFI_BUILD_FROM_SOURCE=1 FFI_USE_CUDA_SUPRASEAL=1 make 158 | -------------------------------------------------------------------------------- /.github/workflows/release-check.yml: -------------------------------------------------------------------------------- 1 | name: Release Checker 2 | 3 | on: 4 | pull_request_target: 5 | paths: ["version.json"] 6 | types: [ opened, synchronize, reopened, labeled, unlabeled ] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | release-check: 19 | uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 20 | with: 21 | sources: '["version.json"]' 22 | upload-release-assets: 23 | needs: [release-check] 24 | if: fromJSON(needs.release-check.outputs.json)['version.json'] 25 | name: Publish the static library (${{ matrix.runner }}) 26 | runs-on: ${{ matrix.runner }} 27 | strategy: 28 | matrix: 29 | runner: ['ubuntu-latest', ['self-hosted', 'linux', 'arm64', 'xlarge'], 'macos-latest'] 30 | fail-fast: false 31 | steps: 32 | - run: echo "Running on $RUNNER_OS $RUNNER_ARCH" 33 | - uses: actions/checkout@v4 34 | with: 35 | submodules: recursive 36 | - uses: ./.github/actions/configure-environment 37 | - if: runner.os == 'macOS' 38 | run: | 39 | rustup target add x86_64-apple-darwin 40 | cargo fetch 41 | working-directory: rust 42 | - if: runner.os == 'Linux' 43 | name: Build and publish the standard release 44 | env: 45 | GITHUB_TOKEN: ${{ github.token }} 46 | GITHUB_RELEASE_URL: ${{ github.api_url }}/repos/${{ github.repository }}/releases/${{ fromJSON(needs.release-check.outputs.json)['version.json'].id }} 47 | run: | 48 | REPOSITORY_NAME=${GITHUB_REPOSITORY##*/} 49 | 50 | TARBALL_PATH="/tmp/${REPOSITORY_NAME}-$(uname)-$(uname -m)-standard.tar.gz" 51 | RELEASE_NAME="${REPOSITORY_NAME}-$(uname)-$(uname -m)-standard" 52 | 53 | # Note: the blst dependency uses the portable configuration for maximum compatibility 54 | ./scripts/build-release.sh build --verbose --no-default-features --features multicore-sdr,opencl,blst-portable 55 | ./scripts/package-release.sh $TARBALL_PATH 56 | ./scripts/publish-release.sh $TARBALL_PATH $RELEASE_NAME 57 | working-directory: rust 58 | - if: runner.os == 'Linux' 59 | name: Build the optimized release 60 | run: | 61 | REPOSITORY_NAME=${GITHUB_REPOSITORY##*/} 62 | 63 | TARBALL_PATH="/tmp/${REPOSITORY_NAME}-$(uname)-$(uname -m)-optimized.tar.gz" 64 | RUSTFLAGS="-C target-feature=$(cat rustc-target-features-optimized.json | jq -r '.[].rustc_target_feature' | tr '\n' ',')" 65 | 66 | ./scripts/build-release.sh build --verbose --no-default-features --features multicore-sdr,opencl 67 | ./scripts/package-release.sh $TARBALL_PATH 68 | working-directory: rust 69 | - if: runner.os == 'macOS' 70 | name: Build and publish the universal standard release 71 | env: 72 | GITHUB_TOKEN: ${{ github.token }} 73 | GITHUB_RELEASE_URL: ${{ github.api_url }}/repos/${{ github.repository }}/releases/${{ fromJSON(needs.release-check.outputs.json)['version.json'].id }} 74 | run: | 75 | REPOSITORY_NAME=${GITHUB_REPOSITORY##*/} 76 | 77 | RELEASE_NAME="${REPOSITORY_NAME}-$(uname)-standard" 78 | TARBALL_PATH="/tmp/${RELEASE_NAME}.tar.gz" 79 | 80 | # Note: the blst dependency uses the portable configuration for maximum compatibility 81 | ./scripts/build-release.sh lipo --verbose --no-default-features --features multicore-sdr,opencl,blst-portable 82 | ./scripts/package-release.sh $TARBALL_PATH 83 | ./scripts/publish-release.sh $TARBALL_PATH $RELEASE_NAME 84 | working-directory: rust 85 | -------------------------------------------------------------------------------- /.github/workflows/releaser.yml: -------------------------------------------------------------------------------- 1 | name: Releaser 2 | 3 | on: 4 | push: 5 | paths: ["version.json"] 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.sha }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | releaser: 17 | uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 18 | with: 19 | sources: '["version.json"]' 20 | secrets: 21 | UCI_GITHUB_TOKEN: ${{ secrets.UCI_GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.rs.bk 2 | **/include 3 | **/paramcache 4 | **/target 5 | .install-filcrypto 6 | filcrypto.h 7 | filcrypto.pc 8 | filecoin.h 9 | filecoin.pc 10 | *.a 11 | simulator 12 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Filecoin Project 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 4 | 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DEPS:=filcrypto.h filcrypto.pc libfilcrypto.a 2 | 3 | export CARGO_TARGET_DIR=target 4 | 5 | all: $(DEPS) 6 | .PHONY: all 7 | 8 | # Create a file so that parallel make doesn't call `./install-filcrypto` for 9 | # each of the deps 10 | $(DEPS): .install-filcrypto ; 11 | 12 | .install-filcrypto: rust 13 | go clean -cache -testcache 14 | ./install-filcrypto 15 | @touch $@ 16 | 17 | clean: 18 | go clean -cache -testcache 19 | rm -rf $(DEPS) .install-filcrypto 20 | rm -f ./runner 21 | cd rust && cargo clean && cd .. 22 | .PHONY: clean 23 | 24 | go-lint: $(DEPS) 25 | golangci-lint run -v --concurrency 2 --new-from-rev origin/master --timeout 10m 26 | .PHONY: go-lint 27 | 28 | shellcheck: 29 | shellcheck install-filcrypto 30 | 31 | lint: shellcheck go-lint 32 | 33 | cgo-leakdetect: runner 34 | valgrind --leak-check=full --show-leak-kinds=definite ./runner 35 | .PHONY: cgo-leakdetect 36 | 37 | runner: $(DEPS) 38 | rm -f ./runner 39 | go build -o ./runner ./cgoleakdetect/ 40 | .PHONY: runner 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status][circleci-image]][circleci-link] 2 | 3 | # Filecoin FFI 4 | 5 | > C and CGO bindings for Filecoin's Rust libraries, i.e: [proofs](https://github.com/filecoin-project/rust-fil-proofs) and [ref-fvm](https://github.com/filecoin-project/ref-fvm). This repository is built to enable the reference implementation of Filecoin, [Lotus](https://github.com/filecoin-project/lotus), to consume the Rust libraries that are needed. 6 | 7 | ## Building 8 | 9 | To build and install libfilcrypto, its header file and pkg-config manifest, run: 10 | 11 | ```shell 12 | make 13 | ``` 14 | 15 | To optionally authenticate with GitHub for assets download (to increase API limits) set `GITHUB_TOKEN` to personal access token. 16 | 17 | If no precompiled static library is available for your operating system, the build tooling will attempt to compile a static library from local Rust sources. 18 | 19 | ### Installation notes 20 | 21 | By default, building this will download a pre-built binary of the ffi. The advantages for downloading it are faster build times, and not requiring a rust toolchain and build environment. 22 | 23 | The disadvantage to downloading the pre-built binary is that it will not be optimized for your specific hardware. This means that if raw performance is of utmost importance to you, it's highly recommended that you build from source. 24 | 25 | ### Building from Source 26 | 27 | To opt out of downloading precompiled assets, set `FFI_BUILD_FROM_SOURCE=1`: 28 | 29 | To allow portable building of the `blst` dependency, set `FFI_USE_BLST_PORTABLE=1`: 30 | 31 | ```shell 32 | rm .install-filcrypto \ 33 | ; make clean \ 34 | ; FFI_BUILD_FROM_SOURCE=1 FFI_USE_BLST_PORTABLE=1 make 35 | ``` 36 | 37 | Alternatively, to build a maximally portable binary without optimizing for the current CPU, set `FFI_PORTABLE=1`: 38 | 39 | ```shell 40 | rm .install-filcrypto \ 41 | ; make clean \ 42 | ; FFI_BUILD_FROM_SOURCE=1 FFI_PORTABLE=1 make 43 | ``` 44 | 45 | By default, a 'gpu' option is used in the proofs library. This feature is also used in FFI unless explicitly disabled. To disable building with the 'gpu' dependency, set `FFI_USE_GPU=0`: 46 | 47 | ```shell 48 | rm .install-filcrypto \ 49 | ; make clean \ 50 | ; FFI_BUILD_FROM_SOURCE=1 FFI_USE_GPU=0 make 51 | ``` 52 | 53 | #### GPU support 54 | 55 | CUDA for GPU support is now enabled by default in the proofs library. This feature can optionally be replaced by OpenCL by using `FFI_USE_OPENCL=1` set in the environment when building from source. Using `FFI_PORTABLE=1` will also enable OpenCL support. Alternatively, if the CUDA toolkit (such as `nvcc`) cannot be located in the environment, OpenCL support is used instead. To disable GPU support entirely, set `FFI_USE_GPU=0` in the environment when building from source. 56 | 57 | There is experimental support for faster C2 named "SupraSeal". To enable it, set `FFI_USE_CUDA_SUPRASEAL=1`. It's specific to CUDA and won't work with OpenCL. 58 | 59 | ```shell 60 | rm .install-filcrypto \ 61 | ; make clean \ 62 | ; FFI_BUILD_FROM_SOURCE=1 make 63 | ``` 64 | 65 | By default, a 'multicore-sdr' option is used in the proofs library. This feature is also used in FFI unless explicitly disabled. To disable building with the 'multicore-sdr' dependency, set `FFI_USE_MULTICORE_SDR=0`: 66 | 67 | ```shell 68 | rm .install-filcrypto \ 69 | ; make clean \ 70 | ; FFI_BUILD_FROM_SOURCE=1 FFI_USE_MULTICORE_SDR=0 make 71 | ``` 72 | 73 | ## Updating rust-fil-proofs (via rust-filecoin-proofs-api) 74 | 75 | If rust-fil-proofs has changed from commit X to Y and you wish to get Y into the filecoin-ffi project, you need to do a few things: 76 | 77 | 1. Update the rust-filecoin-proofs-api [Cargo.toml][1] file to point to Y 78 | 2. Run `cd rust && cargo update -p "filecoin-proofs-api"` from the root of the filecoin-ffi project 79 | 3. After the previous step alters your Cargo.lock file, commit and push 80 | 81 | ## go get 82 | 83 | `go get` needs some additional steps in order to work as expected. 84 | 85 | Get the source, add this repo as a submodule to your repo, build it and point to it: 86 | 87 | ```shell 88 | $ go get github.com/filecoin-project/filecoin-ffi 89 | $ git submodule add https://github.com/filecoin-project/filecoin-ffi.git extern/filecoin-ffi 90 | $ make -C extern/filecoin-ffi 91 | $ go mod edit -replace=github.com/filecoin-project/filecoin-ffi=./extern/filecoin-ffi 92 | ``` 93 | 94 | ## Updating the Changelog 95 | 96 | The `mkreleaselog` script (in the project root) can be used to generate a good 97 | portion of the filecoin-ffi changelog. For historical reasons, the script must 98 | be run from the root of a filecoin-ffi checkout which is in your `$GOPATH`. 99 | 100 | Run it like so: 101 | 102 | ```shell 103 | ./mkreleaselog v0.25.0 v0.26.0 > /tmp/v0.26.0.notes.txt 104 | ``` 105 | 106 | ## Contribution 107 | 108 | ### Maintainers 109 | 110 | The core maintainers of this repository are: 111 | - @Filoz 112 | - [Elliptic Research](https://www.ellipticresearch.com/) 113 | 114 | Maintainers are not only the contributors of this repository, but also exercise a range of editorial responsibilities to keep the repository organized for the OSS contributors, that includes triage the issues, review and merge/close PRs, publish releases and so on. 115 | 116 | ### Development Guidelines (WIP) 117 | 118 | #### CI Builds 119 | 120 | To start a CI job to build binaries off of a commit push a tag starting with the character `v`, i.e. `v1.22.0-rc2`. 121 | 122 | #### Branches 123 | 124 | `master` is considered as the development branch of this repository. Changes being introduced to master must be tested (programmable and/or manual). The head of the master will be tagged and released upon the merge of each PR automatically. 125 | 126 | We will cooperates with the [lotus' releases and it's testing flows](https://github.com/filecoin-project/lotus/blob/0c91b0dc1012c3e54b305a76bb25fb68390adf9d/LOTUS_RELEASE_FLOW.md?plain=1#L50) to confirm whether a tagged release is production ready: 127 | 128 | *Non-consensus breaking changes* 129 | - All PRs introducing non-consensus breaking changes can be merged to master as long they have maintainers' approvals. 130 | - Roughly on a monthly basis, lotus will integrate ffi's head in `master` branch, for it's new feature release, and carry it through the testing flows. 131 | - `release/lotus-vX` will be created to determine the commit that lotus integrates in the corresponding release. 132 | - If any bug is found during the testing, the fix should land in master then get backported to `release/lotus-vX`. The updated commit should be integrated into lotus and getting tested. Repeat the steps until it can be considered as stable. 133 | 134 | *Consensus breaking changes* 135 | - Consensus breaking changes should be developed in it's own branch, (branch name is suggested to be: feature branches `feat/` or bug fix branches `bug/`). 136 | - Consensus breaking changes that are scoped into the next immediate network upgrade shall land in `next` branch first. The maintainers are responsible to coordinate on when to land `next` to `master` according to lotus mandatory(network upgrade) release schedules. 137 | - A new dev branch should be created and contributors are responsible to rebase the branch onto `master`/`next` as needed. 138 | 139 | #### Versioning 140 | 141 | The versioning in Filecoin-FFI currently follows the Lotus versioning. For example, if you are cutting a release for Lotus v1.28.0-rc1, the Filecoin-FFI release will be named v1.28.0-rc1 as well. (Note: Lotus versioning will be refactored in the near future as part of [lotus #12072](https://github.com/filecoin-project/lotus/issues/12072).) 142 | 143 | #### [Release Process](RELEASE.md) 144 | 145 | ## License 146 | 147 | This repository is dual-licensed under Apache 2.0 and MIT terms. 148 | 149 | [1]: https://github.com/filecoin-project/rust-filecoin-proofs-api/commit/61fde0e581cc38abc4e13dbe96145c9ad2f1f0f5 150 | 151 | [circleci-image]: https://circleci.com/gh/filecoin-project/filecoin-ffi.svg?branch=master&style=shield 152 | [circleci-link]: https://app.circleci.com/pipelines/github/filecoin-project/filecoin-ffi?branch=master 153 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Process 2 | 3 | This document describes the process for releasing a new version of the `filecoin-ffi` project. 4 | 5 | ## Current State 6 | 7 | 1. Create a pull request which updates the `version` in the [top-level `version.json` file](https://github.com/filecoin-project/filecoin-ffi/blob/master/version.json). 8 | - Title the PR `chore: X.Y.Z release` 9 | 2. On pull request creation, a [Release Checker](.github/workflows/release-check.yml) workflow will run. It will perform the following actions: 10 | 1. Extract the version from the top-level `version.json` file. 11 | 2. Check if a git tag for the version already exists. Continue only if it does not. 12 | 3. Create a draft GitHub release with the version as the tag. (A git tag with this version string will be created when the release is published.) 13 | 4. Comment on the pull request with a link to the draft release. 14 | 5. Build the project for Linux (X64), Linux (ARM64), and MacOS. 15 | 7. Upload the built assets to the draft release (replace any existing assets with the same name). 16 | 3. On pull request merge, a [Releaser](.github/workflows/release.yml) workflow will run. It will perform the following actions: 17 | 1. Extract the version from the top-level `version.json` file. 18 | 2. Check if a git tag for the version already exists. Continue only if it does not. 19 | 3. Check if a draft GitHub release with the version as the tag exists. 20 | 4. If the draft release exists, publish it. Otherwise, create and publish a new release with the version as the git tag. Publishing the release creates the git tag. 21 | 22 | ## Known Limitations 23 | 24 | 1. If one pushes an update to the `version` in the top-level `version.json` file without creating a pull request, the Release Checker workflow will not run. Hence, the release assets will not be automatically built and uploaded. 25 | 26 | ## Possible Improvements 27 | 28 | 1. Add a check to the [Releaser](.github/workflows/release.yml) workflow to ensure that the created/published release contains the expected assets. If it does not, create them and run the [publish-release.sh](rust/scripts/publish-release.sh) script to upload the missing assets. 29 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) 6 | 7 | ## Reporting a non security bug 8 | 9 | For non-critical bugs, please simply file a GitHub issue on this repo. 10 | -------------------------------------------------------------------------------- /bls.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package ffi 5 | 6 | // #cgo linux LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-unresolved-symbols=ignore-all 7 | // #cgo darwin LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-undefined,dynamic_lookup 8 | // #cgo pkg-config: ${SRCDIR}/filcrypto.pc 9 | // #include "./filcrypto.h" 10 | import "C" 11 | import ( 12 | "github.com/filecoin-project/filecoin-ffi/cgo" 13 | ) 14 | 15 | // Hash computes the digest of a message 16 | func Hash(message Message) Digest { 17 | digest := cgo.Hash(cgo.AsSliceRefUint8(message)) 18 | if digest == nil { 19 | return Digest{} 20 | } 21 | return *digest 22 | } 23 | 24 | // Verify verifies that a signature is the aggregated signature of digests - pubkeys 25 | func Verify(signature *Signature, digests []Digest, publicKeys []PublicKey) bool { 26 | // prep data 27 | flattenedDigests := make([]byte, DigestBytes*len(digests)) 28 | for idx, digest := range digests { 29 | copy(flattenedDigests[(DigestBytes*idx):(DigestBytes*(1+idx))], digest[:]) 30 | } 31 | 32 | flattenedPublicKeys := make([]byte, PublicKeyBytes*len(publicKeys)) 33 | for idx, publicKey := range publicKeys { 34 | copy(flattenedPublicKeys[(PublicKeyBytes*idx):(PublicKeyBytes*(1+idx))], publicKey[:]) 35 | } 36 | 37 | return cgo.Verify( 38 | cgo.AsSliceRefUint8(signature[:]), 39 | cgo.AsSliceRefUint8(flattenedDigests), 40 | cgo.AsSliceRefUint8(flattenedPublicKeys), 41 | ) 42 | } 43 | 44 | // HashVerify verifies that a signature is the aggregated signature of hashed messages. 45 | func HashVerify(signature *Signature, messages []Message, publicKeys []PublicKey) bool { 46 | var flattenedMessages []byte 47 | messagesSizes := make([]uint, len(messages)) 48 | for idx := range messages { 49 | flattenedMessages = append(flattenedMessages, messages[idx]...) 50 | messagesSizes[idx] = uint(len(messages[idx])) 51 | } 52 | 53 | flattenedPublicKeys := make([]byte, PublicKeyBytes*len(publicKeys)) 54 | for idx, publicKey := range publicKeys { 55 | copy(flattenedPublicKeys[(PublicKeyBytes*idx):(PublicKeyBytes*(1+idx))], publicKey[:]) 56 | } 57 | 58 | return cgo.HashVerify( 59 | cgo.AsSliceRefUint8(signature[:]), 60 | cgo.AsSliceRefUint8(flattenedMessages), 61 | cgo.AsSliceRefUint(messagesSizes), 62 | cgo.AsSliceRefUint8(flattenedPublicKeys), 63 | ) 64 | } 65 | 66 | // Aggregate aggregates signatures together into a new signature. If the 67 | // provided signatures cannot be aggregated (due to invalid input or an 68 | // an operational error), Aggregate will return nil. 69 | func Aggregate(signatures []Signature) *Signature { 70 | // prep data 71 | flattenedSignatures := make([]byte, SignatureBytes*len(signatures)) 72 | for idx, sig := range signatures { 73 | copy(flattenedSignatures[(SignatureBytes*idx):(SignatureBytes*(1+idx))], sig[:]) 74 | } 75 | 76 | return cgo.Aggregate(cgo.AsSliceRefUint8(flattenedSignatures)) 77 | } 78 | 79 | // PrivateKeyGenerate generates a private key 80 | func PrivateKeyGenerate() PrivateKey { 81 | key := cgo.PrivateKeyGenerate() 82 | if key == nil { 83 | return PrivateKey{} 84 | } 85 | return *key 86 | } 87 | 88 | // PrivateKeyGenerate generates a private key in a predictable manner. 89 | func PrivateKeyGenerateWithSeed(seed PrivateKeyGenSeed) PrivateKey { 90 | ary := cgo.AsByteArray32(seed[:]) 91 | key := cgo.PrivateKeyGenerateWithSeed(&ary) 92 | if key == nil { 93 | return PrivateKey{} 94 | } 95 | return *key 96 | } 97 | 98 | // PrivateKeySign signs a message 99 | func PrivateKeySign(privateKey PrivateKey, message Message) *Signature { 100 | return cgo.PrivateKeySign(cgo.AsSliceRefUint8(privateKey[:]), cgo.AsSliceRefUint8(message)) 101 | } 102 | 103 | // PrivateKeyPublicKey gets the public key for a private key 104 | func PrivateKeyPublicKey(privateKey PrivateKey) *PublicKey { 105 | return cgo.PrivateKeyPublicKey(cgo.AsSliceRefUint8(privateKey[:])) 106 | } 107 | 108 | // CreateZeroSignature creates a zero signature, used as placeholder in filecoin. 109 | func CreateZeroSignature() Signature { 110 | signature := cgo.CreateZeroSignature() 111 | if signature == nil { 112 | return Signature{} 113 | } 114 | return *signature 115 | } 116 | -------------------------------------------------------------------------------- /bls_test.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestDeterministicPrivateKeyGeneration(t *testing.T) { 14 | for i := 0; i < 10000; i++ { 15 | var xs [32]byte 16 | n, err := rand.Read(xs[:]) 17 | require.NoError(t, err) 18 | require.Equal(t, len(xs), n) 19 | 20 | first := PrivateKeyGenerateWithSeed(xs) 21 | secnd := PrivateKeyGenerateWithSeed(xs) 22 | 23 | assert.Equal(t, first, secnd) 24 | } 25 | } 26 | 27 | func TestBLSSigningAndVerification(t *testing.T) { 28 | // generate private keys 29 | fooPrivateKey := PrivateKeyGenerate() 30 | barPrivateKey := PrivateKeyGenerate() 31 | 32 | // get the public keys for the private keys 33 | fooPublicKey := PrivateKeyPublicKey(fooPrivateKey) 34 | barPublicKey := PrivateKeyPublicKey(barPrivateKey) 35 | 36 | // make messages to sign with the keys 37 | fooMessage := Message("hello foo") 38 | barMessage := Message("hello bar!") 39 | 40 | // calculate the digests of the messages 41 | fooDigest := Hash(fooMessage) 42 | barDigest := Hash(barMessage) 43 | 44 | // get the signature when signing the messages with the private keys 45 | fooSignature := PrivateKeySign(fooPrivateKey, fooMessage) 46 | barSignature := PrivateKeySign(barPrivateKey, barMessage) 47 | 48 | // get the aggregateSign 49 | aggregateSign := Aggregate([]Signature{*fooSignature, *barSignature}) 50 | 51 | // assert the foo message was signed with the foo key 52 | assert.True(t, Verify(fooSignature, []Digest{fooDigest}, []PublicKey{*fooPublicKey})) 53 | 54 | // assert the bar message was signed with the bar key 55 | assert.True(t, Verify(barSignature, []Digest{barDigest}, []PublicKey{*barPublicKey})) 56 | 57 | // assert the foo message was signed with the foo key 58 | assert.True(t, HashVerify(fooSignature, []Message{fooMessage}, []PublicKey{*fooPublicKey})) 59 | 60 | // assert the bar message was signed with the bar key 61 | assert.True(t, HashVerify(barSignature, []Message{barMessage}, []PublicKey{*barPublicKey})) 62 | 63 | // assert the foo message was not signed by the bar key 64 | assert.False(t, Verify(fooSignature, []Digest{fooDigest}, []PublicKey{*barPublicKey})) 65 | 66 | // assert the bar/foo message was not signed by the foo/bar key 67 | assert.False(t, Verify(barSignature, []Digest{barDigest}, []PublicKey{*fooPublicKey})) 68 | assert.False(t, Verify(barSignature, []Digest{fooDigest}, []PublicKey{*barPublicKey})) 69 | assert.False(t, Verify(fooSignature, []Digest{barDigest}, []PublicKey{*fooPublicKey})) 70 | 71 | //assert the foo and bar message was signed with the foo and bar key 72 | assert.True(t, HashVerify(aggregateSign, []Message{fooMessage, barMessage}, []PublicKey{*fooPublicKey, *barPublicKey})) 73 | 74 | //assert the bar and foo message was not signed by the foo and bar key 75 | assert.False(t, HashVerify(aggregateSign, []Message{fooMessage, barMessage}, []PublicKey{*fooPublicKey})) 76 | } 77 | 78 | func BenchmarkBLSVerify(b *testing.B) { 79 | priv := PrivateKeyGenerate() 80 | 81 | msg := Message("this is a message that i will be signing") 82 | digest := Hash(msg) 83 | 84 | sig := PrivateKeySign(priv, msg) 85 | // fmt.Println("SIG SIZE: ", len(sig)) 86 | // fmt.Println("SIG: ", sig) 87 | pubk := PrivateKeyPublicKey(priv) 88 | 89 | b.ResetTimer() 90 | for i := 0; i < b.N; i++ { 91 | if !Verify(sig, []Digest{digest}, []PublicKey{*pubk}) { 92 | b.Fatal("failed to verify") 93 | } 94 | } 95 | } 96 | 97 | func TestBlsAggregateErrors(t *testing.T) { 98 | t.Run("no signatures", func(t *testing.T) { 99 | var empty []Signature 100 | out := Aggregate(empty) 101 | require.Nil(t, out) 102 | }) 103 | 104 | t.Run("nil signatures", func(t *testing.T) { 105 | out := Aggregate(nil) 106 | require.Nil(t, out) 107 | }) 108 | } 109 | 110 | func BenchmarkBLSVerifyBatch(b *testing.B) { 111 | b.Run("10", benchmarkBLSVerifyBatchSize(10)) 112 | b.Run("50", benchmarkBLSVerifyBatchSize(50)) 113 | b.Run("100", benchmarkBLSVerifyBatchSize(100)) 114 | b.Run("300", benchmarkBLSVerifyBatchSize(300)) 115 | b.Run("1000", benchmarkBLSVerifyBatchSize(1000)) 116 | b.Run("4000", benchmarkBLSVerifyBatchSize(4000)) 117 | } 118 | 119 | func benchmarkBLSVerifyBatchSize(size int) func(b *testing.B) { 120 | return func(b *testing.B) { 121 | var digests []Digest 122 | var sigs []Signature 123 | var pubks []PublicKey 124 | for i := 0; i < size; i++ { 125 | msg := Message(fmt.Sprintf("cats cats cats cats %d %d %d dogs", i, i, i)) 126 | digests = append(digests, Hash(msg)) 127 | priv := PrivateKeyGenerate() 128 | sig := PrivateKeySign(priv, msg) 129 | sigs = append(sigs, *sig) 130 | pubk := PrivateKeyPublicKey(priv) 131 | pubks = append(pubks, *pubk) 132 | } 133 | 134 | t := time.Now() 135 | agsig := Aggregate(sigs) 136 | fmt.Println("Aggregate took: ", time.Since(t)) 137 | 138 | b.ResetTimer() 139 | for i := 0; i < b.N; i++ { 140 | if !Verify(agsig, digests, pubks) { 141 | b.Fatal("failed to verify") 142 | } 143 | } 144 | } 145 | } 146 | 147 | func BenchmarkBLSHashAndVerify(b *testing.B) { 148 | priv := PrivateKeyGenerate() 149 | 150 | msg := Message("this is a message that i will be signing") 151 | sig := PrivateKeySign(priv, msg) 152 | 153 | // fmt.Println("SIG SIZE: ", len(sig)) 154 | // fmt.Println("SIG: ", sig) 155 | pubk := PrivateKeyPublicKey(priv) 156 | 157 | b.ResetTimer() 158 | for i := 0; i < b.N; i++ { 159 | digest := Hash(msg) 160 | if !Verify(sig, []Digest{digest}, []PublicKey{*pubk}) { 161 | b.Fatal("failed to verify") 162 | } 163 | } 164 | } 165 | 166 | func BenchmarkBLSHashVerify(b *testing.B) { 167 | priv := PrivateKeyGenerate() 168 | 169 | msg := Message("this is a message that i will be signing") 170 | sig := PrivateKeySign(priv, msg) 171 | 172 | // fmt.Println("SIG SIZE: ", len(sig)) 173 | // fmt.Println("SIG: ", sig) 174 | pubk := PrivateKeyPublicKey(priv) 175 | 176 | b.ResetTimer() 177 | for i := 0; i < b.N; i++ { 178 | if !HashVerify(sig, []Message{msg}, []PublicKey{*pubk}) { 179 | b.Fatal("failed to verify") 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | make clean 6 | cd rust 7 | rm -f Cargo.lock 8 | rustup target add x86_64-apple-darwin 9 | rustup target add aarch64-apple-darwin 10 | cargo update -p "filecoin-proofs-api" 11 | cargo install cargo-lipo 12 | cd .. 13 | FFI_BUILD_FROM_SOURCE=1 make 14 | go mod tidy 15 | -------------------------------------------------------------------------------- /cgo/blockstore.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "unsafe" 5 | 6 | blocks "github.com/ipfs/go-block-format" 7 | "github.com/ipfs/go-cid" 8 | ipld "github.com/ipfs/go-ipld-format" 9 | ) 10 | 11 | /* 12 | #include 13 | typedef const uint8_t* buf_t; 14 | */ 15 | import "C" 16 | 17 | func toCid(k C.buf_t, kLen C.int32_t) cid.Cid { 18 | type cidRepr struct { 19 | str string 20 | } 21 | return *(*cid.Cid)(unsafe.Pointer(&cidRepr{ 22 | str: C.GoStringN((*C.char)(unsafe.Pointer(k)), kLen), 23 | })) 24 | } 25 | 26 | //export cgo_blockstore_get 27 | func cgo_blockstore_get(handle C.uint64_t, k C.buf_t, kLen C.int32_t, block **C.uint8_t, size *C.int32_t) (res C.int32_t) { 28 | defer func() { 29 | if rerr := recover(); rerr != nil { 30 | logPanic(rerr) 31 | res = ErrPanic 32 | } 33 | }() 34 | 35 | c := toCid(k, kLen) 36 | externs, ctx := Lookup(uint64(handle)) 37 | if externs == nil { 38 | return ErrInvalidHandle 39 | } 40 | 41 | err := externs.View(ctx, c, func(data []byte) error { 42 | *block = (C.buf_t)(C.CBytes(data)) 43 | *size = C.int32_t(len(data)) 44 | return nil 45 | }) 46 | 47 | switch { 48 | case err == nil: 49 | return 0 50 | case ipld.IsNotFound(err): 51 | return ErrNotFound 52 | default: 53 | return ErrIO 54 | } 55 | } 56 | 57 | //export cgo_blockstore_put 58 | func cgo_blockstore_put(handle C.uint64_t, k C.buf_t, kLen C.int32_t, block C.buf_t, blockLen C.int32_t) (res C.int32_t) { 59 | defer func() { 60 | if rerr := recover(); rerr != nil { 61 | logPanic(rerr) 62 | res = ErrPanic 63 | } 64 | }() 65 | 66 | c := toCid(k, kLen) 67 | externs, ctx := Lookup(uint64(handle)) 68 | if externs == nil { 69 | return ErrInvalidHandle 70 | } 71 | b, _ := blocks.NewBlockWithCid(C.GoBytes(unsafe.Pointer(block), blockLen), c) 72 | if externs.Put(ctx, b) != nil { 73 | return ErrIO 74 | } 75 | return 0 76 | } 77 | 78 | //export cgo_blockstore_put_many 79 | func cgo_blockstore_put_many(handle C.uint64_t, lengths *C.int32_t, lengthsLen C.int32_t, blockBuf C.buf_t) (res C.int32_t) { 80 | defer func() { 81 | if rerr := recover(); rerr != nil { 82 | logPanic(rerr) 83 | res = ErrPanic 84 | } 85 | }() 86 | externs, ctx := Lookup(uint64(handle)) 87 | if externs == nil { 88 | return ErrInvalidHandle 89 | } 90 | // Get a reference to the lengths vector without copying. 91 | const MAX_LEN = 1 << 30 92 | if lengthsLen > MAX_LEN { 93 | return ErrInvalidArgument 94 | } 95 | 96 | lengthsGo := unsafe.Slice(lengths, lengthsLen) 97 | blocksGo := make([]blocks.Block, 0, lengthsLen) 98 | for _, length := range lengthsGo { 99 | if length > MAX_LEN { 100 | return ErrInvalidArgument 101 | } 102 | // get the next buffer. We could use C.GoBytes, but that copies. 103 | buf := unsafe.Slice((*byte)(unsafe.Pointer(blockBuf)), length) 104 | 105 | // read the CID. This function will copy the CID internally. 106 | cidLen, k, err := cid.CidFromBytes(buf) 107 | if err != nil { 108 | return ErrInvalidArgument 109 | } 110 | buf = buf[cidLen:] 111 | 112 | // Read the block and copy it. Unfortunately, our blockstore makes no guarantees 113 | // about not holding onto blocks. 114 | block := make([]byte, len(buf)) 115 | copy(block, buf) 116 | b, _ := blocks.NewBlockWithCid(block, k) 117 | 118 | // Add it to the batch. 119 | blocksGo = append(blocksGo, b) 120 | 121 | // Advance the block buffer. 122 | blockBuf = (C.buf_t)(unsafe.Pointer(uintptr(unsafe.Pointer(blockBuf)) + uintptr(length))) 123 | } 124 | if externs.PutMany(ctx, blocksGo) != nil { 125 | return ErrIO 126 | } 127 | return 0 128 | } 129 | 130 | //export cgo_blockstore_has 131 | func cgo_blockstore_has(handle C.uint64_t, k C.buf_t, kLen C.int32_t) (res C.int32_t) { 132 | defer func() { 133 | if rerr := recover(); rerr != nil { 134 | logPanic(rerr) 135 | res = ErrPanic 136 | } 137 | }() 138 | 139 | c := toCid(k, kLen) 140 | externs, ctx := Lookup(uint64(handle)) 141 | if externs == nil { 142 | return ErrInvalidHandle 143 | } 144 | has, err := externs.Has(ctx, c) 145 | switch { 146 | case err == nil: 147 | case ipld.IsNotFound(err): 148 | // Some old blockstores still return this. 149 | return 0 150 | default: 151 | return ErrIO 152 | } 153 | if has { 154 | return 1 155 | } 156 | return 0 157 | } 158 | -------------------------------------------------------------------------------- /cgo/bls.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | func Hash(message SliceRefUint8) *[96]byte { 12 | resp := (*ByteArray96)(C.hash((C.slice_ref_uint8_t)(message))) 13 | defer resp.destroy() 14 | return resp.copyAsArray() 15 | } 16 | 17 | func Aggregate(flattenedSignatures SliceRefUint8) *[96]byte { 18 | resp := (*ByteArray96)(C.aggregate((C.slice_ref_uint8_t)(flattenedSignatures))) 19 | defer resp.destroy() 20 | return resp.copyAsArray() 21 | } 22 | 23 | func Verify(signature SliceRefUint8, flattenedDigests SliceRefUint8, flattenedPublicKeys SliceRefUint8) bool { 24 | resp := C.verify((C.slice_ref_uint8_t)(signature), 25 | (C.slice_ref_uint8_t)(flattenedDigests), 26 | (C.slice_ref_uint8_t)(flattenedPublicKeys)) 27 | return bool(resp) 28 | } 29 | 30 | func HashVerify(signature SliceRefUint8, flattenedMessages SliceRefUint8, messageSizes SliceRefUint, flattenedPublicKeys SliceRefUint8) bool { 31 | resp := C.hash_verify((C.slice_ref_uint8_t)(signature), 32 | (C.slice_ref_uint8_t)(flattenedMessages), 33 | (C.slice_ref_size_t)(messageSizes), 34 | (C.slice_ref_uint8_t)(flattenedPublicKeys)) 35 | return bool(resp) 36 | } 37 | 38 | func PrivateKeyGenerate() *[32]byte { 39 | resp := (*ByteArray32)(C.private_key_generate()) 40 | defer resp.destroy() 41 | return resp.copyAsArray() 42 | } 43 | 44 | func PrivateKeyGenerateWithSeed(rawSeed *ByteArray32) *[32]byte { 45 | resp := (*ByteArray32)(C.private_key_generate_with_seed((*C.uint8_32_array_t)(rawSeed))) 46 | defer resp.destroy() 47 | return resp.copyAsArray() 48 | } 49 | 50 | func PrivateKeySign(rawPrivateKey SliceRefUint8, message SliceRefUint8) *[96]byte { 51 | resp := (*ByteArray96)(C.private_key_sign((C.slice_ref_uint8_t)(rawPrivateKey), 52 | (C.slice_ref_uint8_t)(message))) 53 | defer resp.destroy() 54 | return resp.copyAsArray() 55 | } 56 | 57 | func PrivateKeyPublicKey(rawPrivateKey SliceRefUint8) *[48]byte { 58 | resp := (*ByteArray48)(C.private_key_public_key((C.slice_ref_uint8_t)(rawPrivateKey))) 59 | defer resp.destroy() 60 | return resp.copyAsArray() 61 | } 62 | 63 | func CreateZeroSignature() *[96]byte { 64 | resp := (*ByteArray96)(C.create_zero_signature()) 65 | defer resp.destroy() 66 | return resp.copyAsArray() 67 | } 68 | -------------------------------------------------------------------------------- /cgo/const.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | const ( 12 | FCPResponseStatusNoError = C.F_C_P_RESPONSE_STATUS_NO_ERROR 13 | FCPResponseStatusUnclassifiedError = C.F_C_P_RESPONSE_STATUS_UNCLASSIFIED_ERROR 14 | FCPResponseStatusCallerError = C.F_C_P_RESPONSE_STATUS_CALLER_ERROR 15 | FCPResponseStatusReceiverError = C.F_C_P_RESPONSE_STATUS_RECEIVER_ERROR 16 | ) 17 | 18 | const ( 19 | RegisteredSealProofStackedDrg2KiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1 20 | RegisteredSealProofStackedDrg8MiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1 21 | RegisteredSealProofStackedDrg512MiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1 22 | RegisteredSealProofStackedDrg32GiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1 23 | RegisteredSealProofStackedDrg64GiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1 24 | RegisteredSealProofStackedDrg2KiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1_1 25 | RegisteredSealProofStackedDrg8MiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1_1 26 | RegisteredSealProofStackedDrg512MiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1_1 27 | RegisteredSealProofStackedDrg32GiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1_1 28 | RegisteredSealProofStackedDrg64GiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1_1 29 | RegisteredSealProofStackedDrg2KiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1_1__FEAT__SYNTHETIC_PO_REP 30 | RegisteredSealProofStackedDrg8MiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1_1__FEAT__SYNTHETIC_PO_REP 31 | RegisteredSealProofStackedDrg512MiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1_1__FEAT__SYNTHETIC_PO_REP 32 | RegisteredSealProofStackedDrg32GiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1_1__FEAT__SYNTHETIC_PO_REP 33 | RegisteredSealProofStackedDrg64GiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1_1__FEAT__SYNTHETIC_PO_REP 34 | RegisteredSealProofStackedDrg2KiBV1_2_Feat_NonInteractivePoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1_2__FEAT__NON_INTERACTIVE_PO_REP 35 | RegisteredSealProofStackedDrg8MiBV1_2_Feat_NonInteractivePoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1_2__FEAT__NON_INTERACTIVE_PO_REP 36 | RegisteredSealProofStackedDrg512MiBV1_2_Feat_NonInteractivePoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1_2__FEAT__NON_INTERACTIVE_PO_REP 37 | RegisteredSealProofStackedDrg32GiBV1_2_Feat_NonInteractivePoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1_2__FEAT__NON_INTERACTIVE_PO_REP 38 | RegisteredSealProofStackedDrg64GiBV1_2_Feat_NonInteractivePoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1_2__FEAT__NON_INTERACTIVE_PO_REP 39 | ) 40 | 41 | const ( 42 | RegisteredAggregationProofSnarkPackV1 = C.REGISTERED_AGGREGATION_PROOF_SNARK_PACK_V1 43 | RegisteredAggregationProofSnarkPackV2 = C.REGISTERED_AGGREGATION_PROOF_SNARK_PACK_V2 44 | ) 45 | 46 | const ( 47 | RegisteredPoStProofStackedDrgWinning2KiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING2_KI_B_V1 48 | RegisteredPoStProofStackedDrgWinning8MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING8_MI_B_V1 49 | RegisteredPoStProofStackedDrgWinning512MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING512_MI_B_V1 50 | RegisteredPoStProofStackedDrgWinning32GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING32_GI_B_V1 51 | RegisteredPoStProofStackedDrgWinning64GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING64_GI_B_V1 52 | RegisteredPoStProofStackedDrgWindow2KiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW2_KI_B_V1 53 | RegisteredPoStProofStackedDrgWindow8MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW8_MI_B_V1 54 | RegisteredPoStProofStackedDrgWindow512MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW512_MI_B_V1 55 | RegisteredPoStProofStackedDrgWindow32GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW32_GI_B_V1 56 | RegisteredPoStProofStackedDrgWindow64GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW64_GI_B_V1 57 | RegisteredPoStProofStackedDrgWindow2KiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW2_KI_B_V1_1 58 | RegisteredPoStProofStackedDrgWindow8MiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW8_MI_B_V1_1 59 | RegisteredPoStProofStackedDrgWindow512MiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW512_MI_B_V1_1 60 | RegisteredPoStProofStackedDrgWindow32GiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW32_GI_B_V1_1 61 | RegisteredPoStProofStackedDrgWindow64GiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW64_GI_B_V1_1 62 | ) 63 | 64 | const ( 65 | RegisteredUpdateProofStackedDrg2KiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG2_KI_B_V1 66 | RegisteredUpdateProofStackedDrg8MiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG8_MI_B_V1 67 | RegisteredUpdateProofStackedDrg512MiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG512_MI_B_V1 68 | RegisteredUpdateProofStackedDrg32GiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG32_GI_B_V1 69 | RegisteredUpdateProofStackedDrg64GiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG64_GI_B_V1 70 | ) 71 | -------------------------------------------------------------------------------- /cgo/errors.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | // #cgo linux LDFLAGS: ${SRCDIR}/../libfilcrypto.a -Wl,-unresolved-symbols=ignore-all 4 | // #cgo darwin LDFLAGS: ${SRCDIR}/../libfilcrypto.a -Wl,-undefined,dynamic_lookup 5 | // #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | // #include "../filcrypto.h" 7 | import "C" 8 | import ( 9 | "fmt" 10 | "os" 11 | "runtime/debug" 12 | ) 13 | 14 | const ( 15 | ErrInvalidHandle = C.FVM_ERROR_INVALID_HANDLE 16 | ErrNotFound = C.FVM_ERROR_NOT_FOUND 17 | ErrIO = C.FVM_ERROR_IO 18 | ErrInvalidArgument = C.FVM_ERROR_INVALID_ARGUMENT 19 | ErrPanic = C.FVM_ERROR_PANIC 20 | ) 21 | 22 | func logPanic(err interface{}) { 23 | fmt.Fprintf(os.Stderr, "panic in cgo externs: %s\n", err) 24 | debug.PrintStack() 25 | } 26 | -------------------------------------------------------------------------------- /cgo/extern.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #include 5 | typedef const uint8_t* buf_t; 6 | */ 7 | import "C" 8 | import ( 9 | "unsafe" 10 | 11 | "github.com/filecoin-project/go-address" 12 | 13 | "github.com/filecoin-project/go-state-types/abi" 14 | ) 15 | 16 | //export cgo_extern_get_chain_randomness 17 | func cgo_extern_get_chain_randomness( 18 | handle C.uint64_t, round C.int64_t, 19 | output C.buf_t, 20 | ) (res C.int32_t) { 21 | defer func() { 22 | if rerr := recover(); rerr != nil { 23 | logPanic(rerr) 24 | res = ErrPanic 25 | } 26 | }() 27 | 28 | out := unsafe.Slice((*byte)(unsafe.Pointer(output)), 32) 29 | externs, ctx := Lookup(uint64(handle)) 30 | if externs == nil { 31 | return ErrInvalidHandle 32 | } 33 | 34 | rand, err := externs.GetChainRandomness(ctx, abi.ChainEpoch(round)) 35 | 36 | switch err { 37 | case nil: 38 | copy(out[:], rand[:]) 39 | return 0 40 | default: 41 | return ErrIO 42 | } 43 | } 44 | 45 | //export cgo_extern_get_beacon_randomness 46 | func cgo_extern_get_beacon_randomness( 47 | handle C.uint64_t, round C.int64_t, 48 | output C.buf_t, 49 | ) (res C.int32_t) { 50 | defer func() { 51 | if rerr := recover(); rerr != nil { 52 | logPanic(rerr) 53 | res = ErrPanic 54 | } 55 | }() 56 | 57 | out := unsafe.Slice((*byte)(unsafe.Pointer(output)), 32) 58 | externs, ctx := Lookup(uint64(handle)) 59 | if externs == nil { 60 | return ErrInvalidHandle 61 | } 62 | 63 | rand, err := externs.GetBeaconRandomness(ctx, abi.ChainEpoch(round)) 64 | 65 | switch err { 66 | case nil: 67 | copy(out[:], rand[:]) 68 | return 0 69 | default: 70 | return ErrIO 71 | } 72 | } 73 | 74 | //export cgo_extern_verify_consensus_fault 75 | func cgo_extern_verify_consensus_fault( 76 | handle C.uint64_t, 77 | h1 C.buf_t, h1Len C.int32_t, 78 | h2 C.buf_t, h2Len C.int32_t, 79 | extra C.buf_t, extraLen C.int32_t, 80 | minerIdOut *C.uint64_t, 81 | epochOut *C.int64_t, 82 | faultOut *C.int64_t, 83 | gasUsedOut *C.int64_t, 84 | ) (res C.int32_t) { 85 | defer func() { 86 | if rerr := recover(); rerr != nil { 87 | logPanic(rerr) 88 | res = ErrPanic 89 | } 90 | }() 91 | 92 | externs, ctx := Lookup(uint64(handle)) 93 | if externs == nil { 94 | return ErrInvalidHandle 95 | } 96 | 97 | h1Go := C.GoBytes(unsafe.Pointer(h1), h1Len) 98 | h2Go := C.GoBytes(unsafe.Pointer(h2), h2Len) 99 | extraGo := C.GoBytes(unsafe.Pointer(extra), extraLen) 100 | 101 | faultRes, gas := externs.VerifyConsensusFault(ctx, h1Go, h2Go, extraGo) 102 | *gasUsedOut = C.int64_t(gas) 103 | *faultOut = C.int64_t(faultRes.Type) 104 | 105 | if faultRes.Type != ConsensusFaultNone { 106 | id, err := address.IDFromAddress(faultRes.Target) 107 | if err != nil { 108 | return ErrIO 109 | } 110 | *epochOut = C.int64_t(faultRes.Epoch) 111 | *minerIdOut = C.uint64_t(id) 112 | } 113 | 114 | return 0 115 | } 116 | 117 | //export cgo_extern_get_tipset_cid 118 | func cgo_extern_get_tipset_cid( 119 | handle C.uint64_t, 120 | epoch C.int64_t, 121 | output C.buf_t, 122 | outputLen C.int32_t, 123 | ) (res C.int32_t) { 124 | defer func() { 125 | if rerr := recover(); rerr != nil { 126 | logPanic(rerr) 127 | res = ErrPanic 128 | } 129 | }() 130 | 131 | externs, ctx := Lookup(uint64(handle)) 132 | if externs == nil { 133 | return ErrInvalidHandle 134 | } 135 | 136 | out := unsafe.Slice((*byte)(unsafe.Pointer(output)), outputLen) 137 | 138 | k, err := externs.TipsetCid(ctx, abi.ChainEpoch(epoch)) 139 | if err != nil { 140 | return ErrIO 141 | } 142 | if k.ByteLen() > int(outputLen) { 143 | return ErrInvalidArgument 144 | } 145 | copy(out, k.Bytes()) 146 | return 0 147 | } 148 | -------------------------------------------------------------------------------- /cgo/fvm.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | func CreateFvmMachine(fvmVersion FvmRegisteredVersion, chainEpoch, chainTimestamp, chainId, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, tracing, flushAllBlocks bool, blockstoreId, externsId uint64) (*FvmMachine, error) { 12 | resp := (*resultFvmMachine)(C.create_fvm_machine( 13 | (C.FvmRegisteredVersion_t)(fvmVersion), 14 | C.uint64_t(chainEpoch), 15 | C.uint64_t(chainTimestamp), 16 | C.uint64_t(chainId), 17 | C.uint64_t(baseFeeHi), 18 | C.uint64_t(baseFeeLo), 19 | C.uint64_t(baseCircSupplyHi), 20 | C.uint64_t(baseCircSupplyLo), 21 | C.uint32_t(networkVersion), 22 | (C.slice_ref_uint8_t)(stateRoot), 23 | C.bool(tracing), 24 | C.bool(flushAllBlocks), 25 | C.uint64_t(blockstoreId), 26 | C.uint64_t(externsId), 27 | )) 28 | // take out the pointer from the result to ensure it doesn't get freed 29 | executor := (*FvmMachine)(resp.value) 30 | resp.value = nil 31 | defer resp.destroy() 32 | 33 | if err := CheckErr(resp); err != nil { 34 | return nil, err 35 | } 36 | 37 | return executor, nil 38 | } 39 | 40 | func CreateFvmDebugMachine(fvmVersion FvmRegisteredVersion, chainEpoch, chainTimestamp, chainId, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, actorRedirect SliceRefUint8, tracing, flushAllBlocks bool, blockstoreId, externsId uint64) (*FvmMachine, error) { 41 | resp := (*resultFvmMachine)(C.create_fvm_debug_machine( 42 | (C.FvmRegisteredVersion_t)(fvmVersion), 43 | C.uint64_t(chainEpoch), 44 | C.uint64_t(chainTimestamp), 45 | C.uint64_t(chainId), 46 | C.uint64_t(baseFeeHi), 47 | C.uint64_t(baseFeeLo), 48 | C.uint64_t(baseCircSupplyHi), 49 | C.uint64_t(baseCircSupplyLo), 50 | C.uint32_t(networkVersion), 51 | (C.slice_ref_uint8_t)(stateRoot), 52 | (C.slice_ref_uint8_t)(actorRedirect), 53 | C.bool(tracing), 54 | C.bool(flushAllBlocks), 55 | C.uint64_t(blockstoreId), 56 | C.uint64_t(externsId), 57 | )) 58 | // take out the pointer from the result to ensure it doesn't get freed 59 | executor := (*FvmMachine)(resp.value) 60 | resp.value = nil 61 | defer resp.destroy() 62 | 63 | if err := CheckErr(resp); err != nil { 64 | return nil, err 65 | } 66 | 67 | return executor, nil 68 | } 69 | 70 | func FvmMachineExecuteMessage(executor *FvmMachine, message SliceRefUint8, chainLen, applyKind uint64) (FvmMachineExecuteResponseGo, error) { 71 | resp := (*resultFvmMachineExecuteResponse)(C.fvm_machine_execute_message( 72 | (*C.InnerFvmMachine_t)(executor), 73 | (C.slice_ref_uint8_t)(message), 74 | C.uint64_t(chainLen), 75 | C.uint64_t(applyKind), 76 | )) 77 | defer resp.destroy() 78 | 79 | if err := CheckErr(resp); err != nil { 80 | return FvmMachineExecuteResponseGo{}, err 81 | } 82 | 83 | return (FvmMachineExecuteResponse)(resp.value).copy(), nil 84 | } 85 | 86 | func FvmMachineFlush(executor *FvmMachine) ([]byte, error) { 87 | resp := (*resultSliceBoxedUint8)(C.fvm_machine_flush((*C.InnerFvmMachine_t)(executor))) 88 | defer resp.destroy() 89 | 90 | if err := CheckErr(resp); err != nil { 91 | return nil, err 92 | } 93 | return (SliceBoxedUint8)(resp.value).copy(), nil 94 | } 95 | -------------------------------------------------------------------------------- /cgo/helpers.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | import ( 11 | "errors" 12 | "unsafe" 13 | ) 14 | 15 | var ( 16 | emptyUint8 C.uint8_t = 0 17 | emptyUint64 C.uint64_t = 0 18 | emptyUint C.size_t = 0 19 | emptyAggregationInputs C.AggregationInputs_t = C.AggregationInputs_t{} 20 | emptyPublicReplicaInfo C.PublicReplicaInfo_t = C.PublicReplicaInfo_t{} 21 | emptyPrivateReplicaInfo C.PrivateReplicaInfo_t = C.PrivateReplicaInfo_t{} 22 | emptyPoStProof C.PoStProof_t = C.PoStProof_t{} 23 | emptyPublicPieceInfo C.PublicPieceInfo_t = C.PublicPieceInfo_t{} 24 | emptyByteArray32 C.uint8_32_array_t = C.uint8_32_array_t{} 25 | emptySliceBoxedUint8 C.slice_boxed_uint8_t = C.slice_boxed_uint8_t{} 26 | ) 27 | 28 | func AsSliceRefUint8(goBytes []byte) SliceRefUint8 { 29 | len := len(goBytes) 30 | 31 | if len == 0 { 32 | // can't take element 0 of an empty slice 33 | return SliceRefUint8{ 34 | ptr: &emptyUint8, 35 | len: C.size_t(len), 36 | } 37 | } 38 | return SliceRefUint8{ 39 | ptr: (*C.uint8_t)(unsafe.Pointer(&goBytes[0])), 40 | len: C.size_t(len), 41 | } 42 | } 43 | 44 | func AsSliceRefUint64(goBytes []uint64) SliceRefUint64 { 45 | len := len(goBytes) 46 | 47 | if len == 0 { 48 | // can't take element 0 of an empty slice 49 | return SliceRefUint64{ 50 | ptr: &emptyUint64, 51 | len: C.size_t(len), 52 | } 53 | } 54 | return SliceRefUint64{ 55 | ptr: (*C.uint64_t)(unsafe.Pointer(&goBytes[0])), 56 | len: C.size_t(len), 57 | } 58 | } 59 | 60 | func AllocSliceBoxedUint8(goBytes []byte) SliceBoxedUint8 { 61 | len := len(goBytes) 62 | 63 | ptr := (SliceBoxedUint8)(C.alloc_boxed_slice(C.size_t(len))) 64 | copy(ptr.slice(), goBytes) 65 | 66 | return ptr 67 | } 68 | 69 | func AsSliceRefUint(goSlice []uint) SliceRefUint { 70 | len := len(goSlice) 71 | 72 | if len == 0 { 73 | // can't take element 0 of an empty slice 74 | return SliceRefUint{ 75 | ptr: &emptyUint, 76 | len: C.size_t(len), 77 | } 78 | } 79 | 80 | return SliceRefUint{ 81 | ptr: (*C.size_t)(unsafe.Pointer(&goSlice[0])), 82 | len: C.size_t(len), 83 | } 84 | } 85 | 86 | func AsSliceRefAggregationInputs(goSlice []AggregationInputs) SliceRefAggregationInputs { 87 | len := len(goSlice) 88 | 89 | if len == 0 { 90 | // can't take element 0 of an empty slice 91 | return SliceRefAggregationInputs{ 92 | ptr: &emptyAggregationInputs, 93 | len: C.size_t(len), 94 | } 95 | } 96 | 97 | return SliceRefAggregationInputs{ 98 | ptr: (*C.AggregationInputs_t)(unsafe.Pointer(&goSlice[0])), 99 | len: C.size_t(len), 100 | } 101 | } 102 | 103 | func AsSliceRefPublicReplicaInfo(goSlice []PublicReplicaInfo) SliceRefPublicReplicaInfo { 104 | len := len(goSlice) 105 | 106 | if len == 0 { 107 | // can't take element 0 of an empty slice 108 | return SliceRefPublicReplicaInfo{ 109 | ptr: &emptyPublicReplicaInfo, 110 | len: C.size_t(len), 111 | } 112 | } 113 | 114 | return SliceRefPublicReplicaInfo{ 115 | ptr: (*C.PublicReplicaInfo_t)(unsafe.Pointer(&goSlice[0])), 116 | len: C.size_t(len), 117 | } 118 | } 119 | 120 | func AsSliceRefPrivateReplicaInfo(goSlice []PrivateReplicaInfo) SliceRefPrivateReplicaInfo { 121 | len := len(goSlice) 122 | 123 | if len == 0 { 124 | // can't take element 0 of an empty slice 125 | return SliceRefPrivateReplicaInfo{ 126 | ptr: &emptyPrivateReplicaInfo, 127 | len: C.size_t(len), 128 | } 129 | } 130 | 131 | return SliceRefPrivateReplicaInfo{ 132 | ptr: (*C.PrivateReplicaInfo_t)(unsafe.Pointer(&goSlice[0])), 133 | len: C.size_t(len), 134 | } 135 | } 136 | 137 | func AsSliceRefPoStProof(goSlice []PoStProof) SliceRefPoStProof { 138 | len := len(goSlice) 139 | 140 | if len == 0 { 141 | // can't take element 0 of an empty slice 142 | return SliceRefPoStProof{ 143 | ptr: &emptyPoStProof, 144 | len: C.size_t(len), 145 | } 146 | } 147 | 148 | return SliceRefPoStProof{ 149 | ptr: (*C.PoStProof_t)(unsafe.Pointer(&goSlice[0])), 150 | len: C.size_t(len), 151 | } 152 | } 153 | 154 | func AsSliceRefPublicPieceInfo(goSlice []PublicPieceInfo) SliceRefPublicPieceInfo { 155 | len := len(goSlice) 156 | 157 | if len == 0 { 158 | // can't take element 0 of an empty slice 159 | return SliceRefPublicPieceInfo{ 160 | ptr: &emptyPublicPieceInfo, 161 | len: C.size_t(len), 162 | } 163 | } 164 | 165 | return SliceRefPublicPieceInfo{ 166 | ptr: (*C.PublicPieceInfo_t)(unsafe.Pointer(&goSlice[0])), 167 | len: C.size_t(len), 168 | } 169 | } 170 | 171 | func AsSliceRefByteArray32(goSlice []ByteArray32) SliceRefByteArray32 { 172 | len := len(goSlice) 173 | 174 | if len == 0 { 175 | // can't take element 0 of an empty slice 176 | return SliceRefByteArray32{ 177 | ptr: &emptyByteArray32, 178 | len: C.size_t(len), 179 | } 180 | } 181 | 182 | return SliceRefByteArray32{ 183 | ptr: (*C.uint8_32_array_t)(unsafe.Pointer(&goSlice[0])), 184 | len: C.size_t(len), 185 | } 186 | } 187 | 188 | func AsSliceRefSliceBoxedUint8(goSlice []SliceBoxedUint8) SliceRefSliceBoxedUint8 { 189 | len := len(goSlice) 190 | 191 | if len == 0 { 192 | // can't take element 0 of an empty slice 193 | return SliceRefSliceBoxedUint8{ 194 | ptr: &emptySliceBoxedUint8, 195 | len: C.size_t(len), 196 | } 197 | } 198 | 199 | return SliceRefSliceBoxedUint8{ 200 | ptr: (*C.slice_boxed_uint8_t)(unsafe.Pointer(&goSlice[0])), 201 | len: C.size_t(len), 202 | } 203 | } 204 | 205 | func AsByteArray32(goSlice []byte) ByteArray32 { 206 | var ary ByteArray32 207 | l := len(goSlice) 208 | for idx := range goSlice { 209 | if idx < l { 210 | ary.idx[idx] = C.uchar(goSlice[idx]) 211 | } 212 | } 213 | return ary 214 | } 215 | 216 | // CheckErr returns `nil` if the `code` indicates success and an error otherwise. 217 | func CheckErr(resp result) error { 218 | if resp == nil { 219 | return errors.New("nil result from Filecoin FFI") 220 | } 221 | if resp.statusCode() == FCPResponseStatusNoError { 222 | return nil 223 | } 224 | 225 | return errors.New(string(resp.errorMsg().slice())) 226 | } 227 | 228 | func NewAggregationInputs(commR ByteArray32, commD ByteArray32, sectorId uint64, ticket ByteArray32, seed ByteArray32) AggregationInputs { 229 | return AggregationInputs{ 230 | comm_r: (C.uint8_32_array_t)(commR), 231 | comm_d: (C.uint8_32_array_t)(commD), 232 | sector_id: C.uint64_t(sectorId), 233 | ticket: (C.uint8_32_array_t)(ticket), 234 | seed: (C.uint8_32_array_t)(seed), 235 | } 236 | } 237 | 238 | func NewPrivateReplicaInfo(pp RegisteredPoStProof, cacheDirPath string, commR ByteArray32, replicaPath string, sectorId uint64) PrivateReplicaInfo { 239 | return PrivateReplicaInfo{ 240 | registered_proof: (C.RegisteredPoStProof_t)(pp), 241 | cache_dir_path: (C.slice_boxed_uint8_t)(AllocSliceBoxedUint8([]byte(cacheDirPath))), 242 | replica_path: (C.slice_boxed_uint8_t)(AllocSliceBoxedUint8([]byte(replicaPath))), 243 | sector_id: C.uint64_t(sectorId), 244 | comm_r: (C.uint8_32_array_t)(commR), 245 | } 246 | } 247 | 248 | func NewPublicReplicaInfo(pp RegisteredPoStProof, commR ByteArray32, sectorId uint64) PublicReplicaInfo { 249 | return PublicReplicaInfo{ 250 | registered_proof: (C.RegisteredPoStProof_t)(pp), 251 | sector_id: C.uint64_t(sectorId), 252 | comm_r: (C.uint8_32_array_t)(commR), 253 | } 254 | } 255 | 256 | func NewPoStProof(pp RegisteredPoStProof, proof []byte) PoStProof { 257 | return PoStProof{ 258 | registered_proof: (C.RegisteredPoStProof_t)(pp), 259 | proof: (C.slice_boxed_uint8_t)(AllocSliceBoxedUint8(proof)), 260 | } 261 | } 262 | 263 | func NewPublicPieceInfo(numBytes uint64, commP ByteArray32) PublicPieceInfo { 264 | return PublicPieceInfo{ 265 | num_bytes: C.uint64_t(numBytes), 266 | comm_p: (C.uint8_32_array_t)(commP), 267 | } 268 | } 269 | 270 | func NewPartitionSnarkProof(pp RegisteredPoStProof, proof []byte) PartitionSnarkProof { 271 | return PartitionSnarkProof{ 272 | registered_proof: (C.RegisteredPoStProof_t)(pp), 273 | proof: (C.slice_boxed_uint8_t)(AllocSliceBoxedUint8(proof)), 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /cgo/helpers_test.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "testing" 5 | "unsafe" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestAsSliceRefUint8(t *testing.T) { 11 | // some words 12 | foo := []byte("hello world") 13 | ref := AsSliceRefUint8(foo) 14 | assert.Equal(t, unsafe.Slice((*byte)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 15 | 16 | // empty 17 | foo = []byte("") 18 | ref = AsSliceRefUint8(foo) 19 | assert.Equal(t, unsafe.Slice((*byte)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 20 | } 21 | 22 | func TestAsSliceRefUint(t *testing.T) { 23 | foo := []uint{0, 1, 2} 24 | ref := AsSliceRefUint(foo) 25 | assert.Equal(t, unsafe.Slice((*uint)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 26 | 27 | // empty 28 | foo = []uint{} 29 | ref = AsSliceRefUint(foo) 30 | assert.Equal(t, unsafe.Slice((*uint)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 31 | } 32 | 33 | func TestByteArray32(t *testing.T) { 34 | foo := make([]byte, 32) 35 | for i := range foo { 36 | foo[i] = 1 37 | } 38 | ary := AsByteArray32(foo) 39 | assert.Equal(t, ary.slice(), foo) 40 | 41 | ary2 := ary.copy() 42 | assert.Equal(t, ary.slice(), ary2) 43 | 44 | // input too short 45 | aryShort := AsByteArray32([]byte{0, 1, 2}) 46 | slice := aryShort.slice() 47 | for i := range slice { 48 | if i == 0 { 49 | assert.Equal(t, slice[i], byte(0)) 50 | } else if i == 1 { 51 | assert.Equal(t, slice[i], byte(1)) 52 | } else if i == 2 { 53 | assert.Equal(t, slice[i], byte(2)) 54 | } else { 55 | assert.Equal(t, slice[i], byte(0)) 56 | } 57 | } 58 | } 59 | 60 | func TestAllocSliceBoxedUint8(t *testing.T) { 61 | foo := []byte("hello world") 62 | 63 | boxed := AllocSliceBoxedUint8(foo) 64 | defer boxed.Destroy() 65 | assert.Equal(t, boxed.slice(), foo) 66 | } 67 | -------------------------------------------------------------------------------- /cgo/interface.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/filecoin-project/go-address" 7 | "github.com/filecoin-project/go-state-types/abi" 8 | blockstore "github.com/ipfs/boxo/blockstore" 9 | "github.com/ipfs/go-cid" 10 | ) 11 | 12 | type ConsensusFault struct { 13 | // Address of the miner at fault (always an ID address). 14 | Target address.Address 15 | // Epoch of the fault, which is the higher epoch of the two blocks causing it. 16 | Epoch abi.ChainEpoch 17 | // Type of fault. 18 | Type ConsensusFaultType 19 | } 20 | 21 | type ConsensusFaultType int64 22 | 23 | const ( 24 | ConsensusFaultNone ConsensusFaultType = 0 25 | ConsensusFaultDoubleForkMining ConsensusFaultType = 1 26 | ConsensusFaultParentGrinding ConsensusFaultType = 2 27 | ConsensusFaultTimeOffsetMining ConsensusFaultType = 3 28 | ) 29 | 30 | type Externs interface { 31 | GetChainRandomness(ctx context.Context, epoch abi.ChainEpoch) ([32]byte, error) 32 | GetBeaconRandomness(ctx context.Context, epoch abi.ChainEpoch) ([32]byte, error) 33 | VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte) (*ConsensusFault, int64) 34 | TipsetCid(ctx context.Context, epoch abi.ChainEpoch) (cid.Cid, error) 35 | 36 | blockstore.Blockstore 37 | blockstore.Viewer 38 | } 39 | -------------------------------------------------------------------------------- /cgo/libs.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. -lfilcrypto 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | -------------------------------------------------------------------------------- /cgo/registry.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | mu sync.RWMutex 10 | registry map[uint64]registeredExterns 11 | nextId uint64 12 | ) 13 | 14 | type registeredExterns struct { 15 | context.Context 16 | Externs 17 | } 18 | 19 | // Register a new item and get a handle. 20 | func Register(ctx context.Context, externs Externs) uint64 { 21 | mu.Lock() 22 | defer mu.Unlock() 23 | if registry == nil { 24 | registry = make(map[uint64]registeredExterns) 25 | } 26 | id := nextId 27 | nextId++ 28 | registry[id] = registeredExterns{ctx, externs} 29 | return id 30 | } 31 | 32 | // Unregister a blockstore. 33 | // 34 | // WARNING: This method must be called at most _once_ with a handle previously returned by Register. 35 | func Unregister(handle uint64) { 36 | mu.Lock() 37 | defer mu.Unlock() 38 | 39 | delete(registry, handle) 40 | } 41 | 42 | // Lookup a blockstore by handle. 43 | func Lookup(handle uint64) (Externs, context.Context) { 44 | mu.RLock() 45 | externs := registry[handle] 46 | mu.RUnlock() 47 | 48 | return externs.Externs, externs.Context 49 | } 50 | -------------------------------------------------------------------------------- /cgo/util.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | func InitLogFd(fd int32) error { 12 | resp := (*resultVoid)(C.init_log_fd(C.int32_t(fd))) 13 | defer resp.destroy() 14 | 15 | if err := CheckErr(resp); err != nil { 16 | return err 17 | } 18 | 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /cgoleakdetect/runner.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | ffi "github.com/filecoin-project/filecoin-ffi" 11 | ) 12 | 13 | func main() { 14 | os.Setenv("RUST_LOG", "info") 15 | th := panicOnFailureTestHelper{} 16 | ffi.WorkflowGetGPUDevicesDoesNotProduceAnError(&th) 17 | ffi.WorkflowProofsLifecycle(&th) 18 | ffi.WorkflowRegisteredPoStProofFunctions(&th) 19 | ffi.WorkflowRegisteredSealProofFunctions(&th) 20 | } 21 | 22 | type panicOnFailureTestHelper struct{} 23 | 24 | func (p panicOnFailureTestHelper) AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { 25 | if expected != actual { 26 | panic(fmt.Sprintf("not equal: %+v, %+v, %+v", expected, actual, msgAndArgs)) 27 | } 28 | 29 | return true 30 | } 31 | 32 | func (p panicOnFailureTestHelper) AssertNoError(err error, msgAndArgs ...interface{}) bool { 33 | if err != nil { 34 | panic(fmt.Sprintf("there was an error: %+v, %+v", err, msgAndArgs)) 35 | } 36 | 37 | return true 38 | } 39 | 40 | func (p panicOnFailureTestHelper) AssertTrue(value bool, msgAndArgs ...interface{}) bool { 41 | if !value { 42 | panic(fmt.Sprintf("not true: %+v, %+v", value, msgAndArgs)) 43 | } 44 | 45 | return true 46 | } 47 | 48 | func (p panicOnFailureTestHelper) RequireEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { 49 | if expected != actual { 50 | panic(fmt.Sprintf("not equal: %+v, %+v, %+v", expected, actual, msgAndArgs)) 51 | } 52 | } 53 | 54 | func (p panicOnFailureTestHelper) RequireNoError(err error, msgAndArgs ...interface{}) { 55 | if err != nil { 56 | panic(fmt.Sprintf("there was an error: %+v, %+v", err, msgAndArgs)) 57 | } 58 | } 59 | 60 | func (p panicOnFailureTestHelper) RequireTrue(value bool, msgAndArgs ...interface{}) { 61 | if !value { 62 | panic(fmt.Sprintf("not true: %+v, %+v", value, msgAndArgs)) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /distributed.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package ffi 5 | 6 | import ( 7 | "github.com/filecoin-project/filecoin-ffi/cgo" 8 | "github.com/filecoin-project/go-state-types/abi" 9 | "github.com/filecoin-project/go-state-types/proof" 10 | ) 11 | 12 | type FallbackChallenges struct { 13 | Sectors []abi.SectorNumber 14 | Challenges map[abi.SectorNumber][]uint64 15 | } 16 | 17 | // GenerateWinningPoStSectorChallenge 18 | func GeneratePoStFallbackSectorChallenges( 19 | proofType abi.RegisteredPoStProof, 20 | minerID abi.ActorID, 21 | randomness abi.PoStRandomness, 22 | sectorIds []abi.SectorNumber, 23 | ) (*FallbackChallenges, error) { 24 | proverID, err := toProverID(minerID) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | pp, err := toFilRegisteredPoStProof(proofType) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | // this should be a simple cast.. 35 | sectorIdsRaw := make([]uint64, len(sectorIds)) 36 | for i := range sectorIds { 37 | sectorIdsRaw[i] = uint64(sectorIds[i]) 38 | } 39 | 40 | randomnessBytes := cgo.AsByteArray32(randomness) 41 | ids, challenges, err := cgo.GenerateFallbackSectorChallenges(pp, &randomnessBytes, cgo.AsSliceRefUint64(sectorIdsRaw), &proverID) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | out := FallbackChallenges{ 47 | Sectors: make([]abi.SectorNumber, len(ids)), 48 | Challenges: make(map[abi.SectorNumber][]uint64), 49 | } 50 | for idx := range ids { 51 | secNum := abi.SectorNumber(ids[idx]) 52 | out.Sectors[idx] = secNum 53 | out.Challenges[secNum] = challenges[idx] 54 | } 55 | 56 | return &out, nil 57 | } 58 | 59 | func GenerateSingleVanillaProof( 60 | replica PrivateSectorInfo, 61 | challenges []uint64, 62 | ) ([]byte, error) { 63 | 64 | rep, err := toFilPrivateReplicaInfo(replica) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | return cgo.GenerateSingleVanillaProof(rep, cgo.AsSliceRefUint64(challenges)) 70 | } 71 | 72 | func GenerateWinningPoStWithVanilla( 73 | proofType abi.RegisteredPoStProof, 74 | minerID abi.ActorID, 75 | randomness abi.PoStRandomness, 76 | proofs [][]byte, 77 | ) ([]proof.PoStProof, error) { 78 | pp, err := toFilRegisteredPoStProof(proofType) 79 | if err != nil { 80 | return nil, err 81 | } 82 | 83 | proverID, err := toProverID(minerID) 84 | if err != nil { 85 | return nil, err 86 | } 87 | fproofs, cleanup := toVanillaProofs(proofs) 88 | defer cleanup() 89 | 90 | randomnessBytes := cgo.AsByteArray32(randomness) 91 | resp, err := cgo.GenerateWinningPoStWithVanilla(pp, &randomnessBytes, &proverID, cgo.AsSliceRefSliceBoxedUint8(fproofs)) 92 | if err != nil { 93 | return nil, err 94 | } 95 | 96 | out, err := fromFilPoStProofs(resp) 97 | if err != nil { 98 | return nil, err 99 | } 100 | 101 | return out, nil 102 | } 103 | 104 | func GenerateWindowPoStWithVanilla( 105 | proofType abi.RegisteredPoStProof, 106 | minerID abi.ActorID, 107 | randomness abi.PoStRandomness, 108 | proofs [][]byte, 109 | ) ([]proof.PoStProof, error) { 110 | pp, err := toFilRegisteredPoStProof(proofType) 111 | if err != nil { 112 | return nil, err 113 | } 114 | 115 | proverID, err := toProverID(minerID) 116 | if err != nil { 117 | return nil, err 118 | } 119 | fproofs, cleaner := toVanillaProofs(proofs) 120 | defer cleaner() 121 | 122 | randomnessBytes := cgo.AsByteArray32(randomness) 123 | rawProofs, _, err := cgo.GenerateWindowPoStWithVanilla(pp, &randomnessBytes, &proverID, cgo.AsSliceRefSliceBoxedUint8(fproofs)) 124 | if err != nil { 125 | return nil, err 126 | } 127 | 128 | out, err := fromFilPoStProofs(rawProofs) 129 | if err != nil { 130 | return nil, err 131 | } 132 | 133 | return out, nil 134 | } 135 | 136 | type PartitionProof proof.PoStProof 137 | 138 | func GenerateSinglePartitionWindowPoStWithVanilla( 139 | proofType abi.RegisteredPoStProof, 140 | minerID abi.ActorID, 141 | randomness abi.PoStRandomness, 142 | proofs [][]byte, 143 | partitionIndex uint, 144 | ) (*PartitionProof, error) { 145 | pp, err := toFilRegisteredPoStProof(proofType) 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | proverID, err := toProverID(minerID) 151 | if err != nil { 152 | return nil, err 153 | } 154 | fproofs, cleaner := toVanillaProofs(proofs) 155 | defer cleaner() 156 | 157 | randomnessBytes := cgo.AsByteArray32(randomness) 158 | resp, _, err := cgo.GenerateSingleWindowPoStWithVanilla( 159 | pp, 160 | &randomnessBytes, 161 | &proverID, 162 | cgo.AsSliceRefSliceBoxedUint8(fproofs), 163 | partitionIndex, 164 | ) 165 | if err != nil { 166 | return nil, err 167 | } 168 | 169 | dpp, err := fromFilRegisteredPoStProof(resp.RegisteredProof) 170 | if err != nil { 171 | return nil, err 172 | } 173 | 174 | out := PartitionProof{ 175 | PoStProof: dpp, 176 | ProofBytes: resp.Proof, 177 | } 178 | 179 | return &out, nil 180 | } 181 | 182 | func MergeWindowPoStPartitionProofs( 183 | proofType abi.RegisteredPoStProof, 184 | partitionProofs []PartitionProof, 185 | ) (*proof.PoStProof, error) { 186 | pp, err := toFilRegisteredPoStProof(proofType) 187 | if err != nil { 188 | return nil, err 189 | } 190 | 191 | fproofs, cleaner := toPartitionProofs(partitionProofs) 192 | defer cleaner() 193 | 194 | resp, err := cgo.MergeWindowPoStPartitionProofs(pp, cgo.AsSliceRefSliceBoxedUint8(fproofs)) 195 | if err != nil { 196 | return nil, err 197 | } 198 | 199 | dpp, err := fromFilRegisteredPoStProof(resp.RegisteredProof) 200 | if err != nil { 201 | return nil, err 202 | } 203 | 204 | out := proof.PoStProof{ 205 | PoStProof: dpp, 206 | ProofBytes: resp.Proof, 207 | } 208 | 209 | return &out, nil 210 | } 211 | 212 | func toPartitionProofs(src []PartitionProof) ([]cgo.SliceBoxedUint8, func()) { 213 | out := make([]cgo.SliceBoxedUint8, len(src)) 214 | for idx := range out { 215 | out[idx] = cgo.AllocSliceBoxedUint8(src[idx].ProofBytes) 216 | } 217 | 218 | return out, makeCleanerSBU(out, len(src)) 219 | } 220 | -------------------------------------------------------------------------------- /fvm.go: -------------------------------------------------------------------------------- 1 | //go:build cgo && (amd64 || arm64 || riscv64) 2 | // +build cgo 3 | // +build amd64 arm64 riscv64 4 | 5 | package ffi 6 | 7 | // #cgo linux LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-unresolved-symbols=ignore-all 8 | // #cgo darwin LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-undefined,dynamic_lookup 9 | // #cgo pkg-config: ${SRCDIR}/filcrypto.pc 10 | // #include "./filcrypto.h" 11 | import "C" 12 | import ( 13 | "context" 14 | "fmt" 15 | gobig "math/big" 16 | "runtime" 17 | 18 | "github.com/filecoin-project/filecoin-ffi/cgo" 19 | "github.com/filecoin-project/go-state-types/abi" 20 | "github.com/filecoin-project/go-state-types/big" 21 | "github.com/filecoin-project/go-state-types/network" 22 | "github.com/ipfs/go-cid" 23 | "golang.org/x/xerrors" 24 | ) 25 | 26 | type FVM struct { 27 | executor *cgo.FvmMachine 28 | } 29 | 30 | const ( 31 | applyExplicit = iota 32 | applyImplicit 33 | ) 34 | 35 | type FVMOpts struct { 36 | FVMVersion uint64 37 | Externs cgo.Externs 38 | 39 | Epoch abi.ChainEpoch 40 | Timestamp uint64 41 | ChainID uint64 42 | BaseFee abi.TokenAmount 43 | BaseCircSupply abi.TokenAmount 44 | NetworkVersion network.Version 45 | StateBase cid.Cid 46 | Tracing bool 47 | FlushAllBlocks bool 48 | 49 | Debug bool 50 | ActorRedirect cid.Cid 51 | } 52 | 53 | // CreateFVM creates a new FVM instance. 54 | func CreateFVM(opts *FVMOpts) (*FVM, error) { 55 | baseFeeHi, baseFeeLo, err := splitBigInt(opts.BaseFee) 56 | if err != nil { 57 | return nil, xerrors.Errorf("invalid basefee: %w", err) 58 | } 59 | baseCircSupplyHi, baseCircSupplyLo, err := splitBigInt(opts.BaseCircSupply) 60 | if err != nil { 61 | return nil, xerrors.Errorf("invalid circ supply: %w", err) 62 | } 63 | 64 | exHandle := cgo.Register(context.TODO(), opts.Externs) 65 | var executor *cgo.FvmMachine 66 | if !opts.Debug { 67 | executor, err = cgo.CreateFvmMachine(cgo.FvmRegisteredVersion(opts.FVMVersion), 68 | uint64(opts.Epoch), 69 | opts.Timestamp, 70 | opts.ChainID, 71 | baseFeeHi, 72 | baseFeeLo, 73 | baseCircSupplyHi, 74 | baseCircSupplyLo, 75 | uint64(opts.NetworkVersion), 76 | cgo.AsSliceRefUint8(opts.StateBase.Bytes()), 77 | opts.Tracing, 78 | opts.FlushAllBlocks, 79 | exHandle, exHandle, 80 | ) 81 | } else { 82 | executor, err = cgo.CreateFvmDebugMachine(cgo.FvmRegisteredVersion(opts.FVMVersion), 83 | uint64(opts.Epoch), 84 | opts.Timestamp, 85 | opts.ChainID, 86 | baseFeeHi, 87 | baseFeeLo, 88 | baseCircSupplyHi, 89 | baseCircSupplyLo, 90 | uint64(opts.NetworkVersion), 91 | cgo.AsSliceRefUint8(opts.StateBase.Bytes()), 92 | cgo.AsSliceRefUint8(opts.ActorRedirect.Bytes()), 93 | true, 94 | opts.FlushAllBlocks, 95 | exHandle, exHandle, 96 | ) 97 | } 98 | 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | fvm := &FVM{ 104 | executor: executor, 105 | } 106 | runtime.SetFinalizer(fvm, func(f *FVM) { 107 | // Just to be extra safe 108 | if f.executor == nil { 109 | return 110 | } 111 | 112 | executor := f.executor 113 | f.executor = nil 114 | executor.Destroy() 115 | cgo.Unregister(exHandle) 116 | }) 117 | 118 | return fvm, nil 119 | } 120 | 121 | func (f *FVM) ApplyMessage(msgBytes []byte, chainLen uint) (*ApplyRet, error) { 122 | // NOTE: we need to call KeepAlive here (and below) because go doesn't guarantee that the 123 | // receiver will live to the end of the function. If we don't do this, go _will_ garbage 124 | // collect the FVM, causing us to run the finalizer while we're in the middle of using the 125 | // FVM. 126 | defer runtime.KeepAlive(f) 127 | resp, err := cgo.FvmMachineExecuteMessage( 128 | f.executor, 129 | cgo.AsSliceRefUint8(msgBytes), 130 | uint64(chainLen), 131 | applyExplicit, 132 | ) 133 | if err != nil { 134 | return nil, err 135 | } 136 | 137 | return buildResponse(resp) 138 | } 139 | 140 | func (f *FVM) ApplyImplicitMessage(msgBytes []byte) (*ApplyRet, error) { 141 | defer runtime.KeepAlive(f) 142 | resp, err := cgo.FvmMachineExecuteMessage( 143 | f.executor, 144 | cgo.AsSliceRefUint8(msgBytes), 145 | 0, // this isn't an on-chain message, so it has no chain length. 146 | applyImplicit, 147 | ) 148 | if err != nil { 149 | return nil, err 150 | } 151 | 152 | return buildResponse(resp) 153 | } 154 | 155 | func buildResponse(resp cgo.FvmMachineExecuteResponseGo) (*ApplyRet, error) { 156 | var eventsRoot *cid.Cid 157 | if len(resp.EventsRoot) > 0 { 158 | if eventsRootCid, err := cid.Cast(resp.EventsRoot); err != nil { 159 | return nil, fmt.Errorf("failed to cast events root CID: %w", err) 160 | } else { 161 | eventsRoot = &eventsRootCid 162 | } 163 | } 164 | 165 | return &ApplyRet{ 166 | Return: resp.ReturnVal, 167 | ExitCode: resp.ExitCode, 168 | GasUsed: int64(resp.GasUsed), 169 | MinerPenalty: reformBigInt(resp.PenaltyHi, resp.PenaltyLo), 170 | MinerTip: reformBigInt(resp.MinerTipHi, resp.MinerTipLo), 171 | BaseFeeBurn: reformBigInt(resp.BaseFeeBurnHi, resp.BaseFeeBurnLo), 172 | OverEstimationBurn: reformBigInt(resp.OverEstimationBurnHi, resp.OverEstimationBurnLo), 173 | Refund: reformBigInt(resp.RefundHi, resp.RefundLo), 174 | GasRefund: int64(resp.GasRefund), 175 | GasBurned: int64(resp.GasBurned), 176 | ExecTraceBytes: resp.ExecTrace, 177 | FailureInfo: resp.FailureInfo, 178 | EventsRoot: eventsRoot, 179 | EventsBytes: resp.Events, 180 | }, nil 181 | } 182 | 183 | func (f *FVM) Flush() (cid.Cid, error) { 184 | defer runtime.KeepAlive(f) 185 | stateRoot, err := cgo.FvmMachineFlush(f.executor) 186 | if err != nil { 187 | return cid.Undef, err 188 | } 189 | 190 | return cid.Cast(stateRoot) 191 | } 192 | 193 | type ApplyRet struct { 194 | Return []byte 195 | ExitCode uint64 196 | GasUsed int64 197 | MinerPenalty abi.TokenAmount 198 | MinerTip abi.TokenAmount 199 | BaseFeeBurn abi.TokenAmount 200 | OverEstimationBurn abi.TokenAmount 201 | Refund abi.TokenAmount 202 | GasRefund int64 203 | GasBurned int64 204 | ExecTraceBytes []byte 205 | FailureInfo string 206 | EventsRoot *cid.Cid 207 | EventsBytes []byte 208 | } 209 | 210 | // NOTE: We only support 64bit platforms 211 | 212 | // returns hi, lo 213 | func splitBigInt(i big.Int) (hi uint64, lo uint64, err error) { 214 | if i.Sign() < 0 { 215 | return 0, 0, xerrors.Errorf("negative number: %s", i) 216 | } 217 | words := i.Bits() 218 | switch len(words) { 219 | case 2: 220 | hi = uint64(words[1]) 221 | fallthrough 222 | case 1: 223 | lo = uint64(words[0]) 224 | case 0: 225 | default: 226 | return 0, 0, xerrors.Errorf("exceeds max bigint size: %s", i) 227 | } 228 | return hi, lo, nil 229 | } 230 | 231 | func reformBigInt(hi, lo uint64) big.Int { 232 | var words []gobig.Word 233 | if hi > 0 { 234 | words = []gobig.Word{gobig.Word(lo), gobig.Word(hi)} 235 | } else if lo > 0 { 236 | words = []gobig.Word{gobig.Word(lo)} 237 | } else { 238 | return big.Zero() 239 | } 240 | int := new(gobig.Int) 241 | int.SetBits(words) 242 | return big.NewFromGo(int) 243 | } 244 | -------------------------------------------------------------------------------- /fvm_test.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-state-types/big" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func checkSplitBigInt(t *testing.T, i big.Int, hi, lo uint64) { 12 | hiA, loA, err := splitBigInt(i) 13 | require.NoError(t, err) 14 | require.Equal(t, hi, hiA, "hi not equal") 15 | require.Equal(t, lo, loA, "lo not equal") 16 | } 17 | 18 | func TestSplitBigIntZero(t *testing.T) { 19 | checkSplitBigInt(t, big.Zero(), 0, 0) 20 | } 21 | 22 | func TestSplitBigIntOne(t *testing.T) { 23 | checkSplitBigInt(t, big.NewInt(1), 0, 1) 24 | } 25 | 26 | func TestSplitBigIntMax64(t *testing.T) { 27 | checkSplitBigInt(t, big.NewIntUnsigned(math.MaxUint64), 0, math.MaxUint64) 28 | } 29 | 30 | func TestSplitBigIntLarge(t *testing.T) { 31 | checkSplitBigInt(t, big.Mul(big.NewIntUnsigned(math.MaxUint64), big.NewInt(8)), 0x7, math.MaxUint64^0x7) 32 | } 33 | func TestSplitBigIntNeg(t *testing.T) { 34 | _, _, err := splitBigInt(big.NewInt(-1)) 35 | require.Error(t, err) 36 | } 37 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/filecoin-project/filecoin-ffi 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.7 6 | 7 | require ( 8 | github.com/filecoin-project/go-address v1.2.0 9 | github.com/filecoin-project/go-fil-commcid v0.2.0 10 | github.com/filecoin-project/go-state-types v0.16.0 11 | github.com/ipfs/boxo v0.20.0 12 | github.com/ipfs/go-block-format v0.2.0 13 | github.com/ipfs/go-cid v0.5.0 14 | github.com/ipfs/go-ipld-format v0.6.0 15 | github.com/pkg/errors v0.9.1 16 | github.com/stretchr/testify v1.10.0 17 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da 18 | ) 19 | 20 | require ( 21 | github.com/davecgh/go-spew v1.1.1 // indirect 22 | github.com/google/uuid v1.6.0 // indirect 23 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 24 | github.com/ipfs/bbloom v0.0.4 // indirect 25 | github.com/ipfs/go-datastore v0.6.0 // indirect 26 | github.com/ipfs/go-ipfs-util v0.0.3 // indirect 27 | github.com/ipfs/go-ipld-cbor v0.2.0 // indirect 28 | github.com/ipfs/go-log/v2 v2.5.1 // indirect 29 | github.com/ipfs/go-metrics-interface v0.0.1 // indirect 30 | github.com/jbenet/goprocess v0.1.4 // indirect 31 | github.com/klauspost/cpuid/v2 v2.2.8 // indirect 32 | github.com/kr/pretty v0.2.1 // indirect 33 | github.com/mattn/go-isatty v0.0.20 // indirect 34 | github.com/minio/sha256-simd v1.0.1 // indirect 35 | github.com/mr-tron/base58 v1.2.0 // indirect 36 | github.com/multiformats/go-base32 v0.1.0 // indirect 37 | github.com/multiformats/go-base36 v0.2.0 // indirect 38 | github.com/multiformats/go-multibase v0.2.0 // indirect 39 | github.com/multiformats/go-multihash v0.2.3 // indirect 40 | github.com/multiformats/go-varint v0.0.7 // indirect 41 | github.com/pmezard/go-difflib v1.0.0 // indirect 42 | github.com/polydawn/refmt v0.89.0 // indirect 43 | github.com/spaolacci/murmur3 v1.1.0 // indirect 44 | github.com/whyrusleeping/cbor-gen v0.3.1 // indirect 45 | go.uber.org/multierr v1.11.0 // indirect 46 | go.uber.org/zap v1.27.0 // indirect 47 | golang.org/x/crypto v0.36.0 // indirect 48 | golang.org/x/sys v0.31.0 // indirect 49 | gopkg.in/yaml.v3 v3.0.1 // indirect 50 | lukechampine.com/blake3 v1.3.0 // indirect 51 | ) 52 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 2 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 3 | github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= 4 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 6 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/filecoin-project/go-address v1.2.0 h1:NHmWUE/J7Pi2JZX3gZt32XuY69o9StVZeJxdBodIwOE= 8 | github.com/filecoin-project/go-address v1.2.0/go.mod h1:kQEQ4qZ99a51X7DjT9HiMT4yR6UwLJ9kznlxsOIeDAg= 9 | github.com/filecoin-project/go-crypto v0.1.0 h1:Pob2MphoipMbe/ksxZOMcQvmBHAd3sI/WEqcbpIsGI0= 10 | github.com/filecoin-project/go-crypto v0.1.0/go.mod h1:K9UFXvvoyAVvB+0Le7oGlKiT9mgA5FHOJdYQXEE8IhI= 11 | github.com/filecoin-project/go-fil-commcid v0.2.0 h1:B+5UX8XGgdg/XsdUpST4pEBviKkFOw+Fvl2bLhSKGpI= 12 | github.com/filecoin-project/go-fil-commcid v0.2.0/go.mod h1:8yigf3JDIil+/WpqR5zoKyP0jBPCOGtEqq/K1CcMy9Q= 13 | github.com/filecoin-project/go-state-types v0.16.0 h1:ajIREDzTGfq71ofIQ29iZR1WXxmkvd2nQNc6ApcP1wI= 14 | github.com/filecoin-project/go-state-types v0.16.0/go.mod h1:YCESyrqnyu17y0MazbV6Uwma5+BrMvEKEQp5QWeIf9g= 15 | github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= 16 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 17 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 18 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 19 | github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= 20 | github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= 21 | github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= 22 | github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= 23 | github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= 24 | github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= 25 | github.com/ipfs/boxo v0.20.0 h1:umUl7q1v5g5AX8FPLTnZBvvagLmT+V0Tt61EigP81ec= 26 | github.com/ipfs/boxo v0.20.0/go.mod h1:mwttn53Eibgska2DhVIj7ln3UViq7MVHRxOMb+ehSDM= 27 | github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= 28 | github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= 29 | github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= 30 | github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= 31 | github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= 32 | github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= 33 | github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= 34 | github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= 35 | github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= 36 | github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= 37 | github.com/ipfs/go-ipld-cbor v0.2.0 h1:VHIW3HVIjcMd8m4ZLZbrYpwjzqlVUfjLM7oK4T5/YF0= 38 | github.com/ipfs/go-ipld-cbor v0.2.0/go.mod h1:Cp8T7w1NKcu4AQJLqK0tWpd1nkgTxEVB5C6kVpLW6/0= 39 | github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= 40 | github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= 41 | github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= 42 | github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= 43 | github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= 44 | github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= 45 | github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= 46 | github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= 47 | github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= 48 | github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= 49 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= 50 | github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= 51 | github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= 52 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 53 | github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= 54 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 55 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 56 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 57 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 58 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 59 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 60 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 61 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 62 | github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= 63 | github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= 64 | github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= 65 | github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= 66 | github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= 67 | github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= 68 | github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= 69 | github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= 70 | github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= 71 | github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= 72 | github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= 73 | github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= 74 | github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= 75 | github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= 76 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 77 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 78 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 79 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 80 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 81 | github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= 82 | github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= 83 | github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 84 | github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= 85 | github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= 86 | github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= 87 | github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= 88 | github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= 89 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 90 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 91 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 92 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 93 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 94 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 95 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 96 | github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= 97 | github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= 98 | github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= 99 | github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= 100 | github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= 101 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 102 | gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA= 103 | gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8= 104 | gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q= 105 | gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02/go.mod h1:JTnUj0mpYiAsuZLmKjTx/ex3AtMowcCgnE7YNyCEP0I= 106 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 107 | go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= 108 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 109 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 110 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 111 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 112 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 113 | go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= 114 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 115 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 116 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 117 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 118 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 119 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 120 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 121 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 122 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 123 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 124 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 125 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 126 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 127 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 128 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 129 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 130 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 131 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 132 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 133 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 134 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 135 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 136 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 137 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 138 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 139 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 140 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 141 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 142 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 143 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 144 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 145 | golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 146 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 147 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 148 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 149 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= 150 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= 151 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 152 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 153 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 154 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 155 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 156 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 157 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 158 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 159 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 160 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 161 | lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= 162 | lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= 163 | -------------------------------------------------------------------------------- /headerstubs/stdarg.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/filecoin-ffi/027be4e47851bff343e411b44e4fbb0f270ca2c5/headerstubs/stdarg.h -------------------------------------------------------------------------------- /headerstubs/stdbool.h: -------------------------------------------------------------------------------- 1 | #ifndef _STDBOOL_H 2 | #define _STDBOOL_H 3 | 4 | 5 | #define bool _Bool 6 | #define true 1 7 | #define false 0 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /headerstubs/stddef.h: -------------------------------------------------------------------------------- 1 | typedef unsigned long int size_t; 2 | -------------------------------------------------------------------------------- /headerstubs/stdint.h: -------------------------------------------------------------------------------- 1 | typedef unsigned char uint8_t; 2 | typedef long long int32_t; 3 | typedef unsigned long long uint32_t; 4 | typedef long long int64_t; 5 | typedef unsigned long long uint64_t; 6 | typedef unsigned long long uintptr_t; /* only valid on 64bit systems */ 7 | 8 | -------------------------------------------------------------------------------- /headerstubs/stdlib.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/filecoin-ffi/027be4e47851bff343e411b44e4fbb0f270ca2c5/headerstubs/stdlib.h -------------------------------------------------------------------------------- /install-filcrypto: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=SC2155 enable=require-variable-braces 3 | 4 | set -Exeo pipefail 5 | auth_header=() 6 | if [ -n "${GITHUB_TOKEN}" ]; then 7 | auth_header=("-H" "Authorization: token ${GITHUB_TOKEN}") 8 | fi 9 | 10 | # set CWD to the root of filecoin-ffi 11 | # 12 | cd "$(dirname "${BASH_SOURCE[0]}")" 13 | 14 | # tracks where the Rust sources are were we to build locally instead of 15 | # downloading from GitHub Releases 16 | # 17 | rust_sources_dir="rust" 18 | 19 | # an array of values passed as 'target-feature' to the Rust compiler if we're 20 | # building an optimized libfilcrypto (which takes advantage of some perf-boosting 21 | # instruction sets) 22 | # 23 | #optimized_release_rustc_target_features=$(jq -r '.[].rustc_target_feature' < "${rust_sources_dir}/rustc-target-features-optimized.json") 24 | 25 | # each value in this area is checked against the "features" of the hosts CPU 26 | # in order to determine if the host is suitable for an optimized release 27 | # 28 | cpu_features_required_for_optimized_release=$(jq -r '.[].check_cpu_for_feature | select(. != null)' < "${rust_sources_dir}/rustc-target-features-optimized.json") 29 | 30 | main() { 31 | local __release_flags=$(get_release_flags) 32 | if [ "${FFI_BUILD_FROM_SOURCE}" != "1" ] && download_release_tarball __tarball_path "${rust_sources_dir}" "filecoin-ffi" "${__release_flags}"; then 33 | local __tmp_dir=$(mktemp -d) 34 | 35 | # silence shellcheck warning as the assignment happened in 36 | # `download_release_tarball()` 37 | # shellcheck disable=SC2154 38 | # extract downloaded tarball to temporary directory 39 | # 40 | tar -C "${__tmp_dir}" -xzf "${__tarball_path}" 41 | 42 | # copy build assets into root of filecoin-ffi 43 | # 44 | 45 | find -L "${__tmp_dir}" -type f -name filcrypto.h -exec cp -- "{}" . \; 46 | find -L "${__tmp_dir}" -type f -name libfilcrypto.a -exec cp -- "{}" . \; 47 | find -L "${__tmp_dir}" -type f -name filcrypto.pc -exec cp -- "{}" . \; 48 | 49 | check_installed_files 50 | 51 | (>&2 echo "[install-filcrypto/main] successfully installed prebuilt libfilcrypto") 52 | else 53 | (>&2 echo "[install-filcrypto/main] building libfilcrypto from local sources (dir = ${rust_sources_dir})") 54 | 55 | # build libfilcrypto (and corresponding header and pkg-config) 56 | # 57 | build_from_source "${rust_sources_dir}" "${__release_flags}" 58 | 59 | # copy from Rust's build directory (target) to root of filecoin-ffi 60 | # 61 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 62 | find -L "${rust_sources_dir}/target/universal/release" -type f -name libfilcrypto.a -exec cp -- "{}" . \; 63 | else 64 | find -L "${rust_sources_dir}/target/release" -type f -name libfilcrypto.a -exec cp -- "{}" . \; 65 | fi 66 | 67 | find -L "${rust_sources_dir}" -type f -name filcrypto.h -exec cp -- "{}" . \; 68 | find -L "${rust_sources_dir}" -type f -name filcrypto.pc -exec cp -- "{}" . \; 69 | 70 | pwd 71 | ls ./*filcrypto* 72 | 73 | check_installed_files 74 | 75 | (>&2 echo "[install-filcrypto/main] successfully built and installed libfilcrypto from source") 76 | fi 77 | } 78 | 79 | download_release_tarball() { 80 | local __resultvar=$1 81 | local __rust_sources_path=$2 82 | local __repo_name=$3 83 | local __release_flags=$4 84 | local __release_tags=$(git tag --points-at "$(git rev-parse HEAD)") 85 | local __release_tag=$(echo "${__release_tags}" | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+(-\w+)?$' | head -n 1) 86 | 87 | # If we're building from a commit that doesn't have a tag, we need to 88 | # compile from source. 89 | if [ -z "${__release_tag}" ]; then 90 | (>&2 echo "[download_release_tarball] failed to determine release tag") 91 | return 1 92 | fi 93 | 94 | local __release_tag_url="https://api.github.com/repos/filecoin-project/${__repo_name}/releases/tags/${__release_tag}" 95 | 96 | # Download the non-optimized standard release. 97 | release_flag_name="standard" 98 | 99 | # TODO: This function shouldn't make assumptions about how these releases' 100 | # names are constructed. Marginally less-bad would be to require that this 101 | # function's caller provide the release name. 102 | # 103 | if [ "$(uname -s)" = "Darwin" ]; then 104 | # For MacOS a universal library is used so naming convention is different 105 | local __release_name="${__repo_name}-$(uname)-${release_flag_name}" 106 | else 107 | local __release_name="${__repo_name}-$(uname)-$(uname -m)-${release_flag_name}" 108 | fi 109 | 110 | (>&2 echo "[download_release_tarball] acquiring release @ ${__release_tag}") 111 | 112 | local __release_response=$(curl "${auth_header[@]}" \ 113 | --retry 3 \ 114 | --location "${__release_tag_url}") 115 | 116 | local __release_url=$(echo "${__release_response}" | jq -r ".assets[] | select(.name | contains(\"${__release_name}\")) | .url") 117 | 118 | local __tar_path="/tmp/${__release_name}_$(basename "${__release_url}").tar.gz" 119 | 120 | if [[ -z "${__release_url}" ]]; then 121 | (>&2 echo "[download_release_tarball] failed to download release (tag URL: ${__release_tag_url}, response: ${__release_response})") 122 | return 1 123 | fi 124 | 125 | local __asset_url=$(curl "${auth_header[@]}" \ 126 | --head \ 127 | --retry 3 \ 128 | --header "Accept:application/octet-stream" \ 129 | --location \ 130 | --output /dev/null \ 131 | -w "%{url_effective}" \ 132 | "${__release_url}") 133 | 134 | if ! curl "${auth_header[@]}" --retry 3 --output "${__tar_path}" "${__asset_url}"; then 135 | (>&2 echo "[download_release_tarball] failed to download release asset (tag URL: ${__release_tag_url}, asset URL: ${__asset_url})") 136 | return 1 137 | fi 138 | 139 | # set $__resultvar (which the caller provided as $1), which is the poor 140 | # man's way of returning a value from a function in Bash 141 | # 142 | eval "${__resultvar}='${__tar_path}'" 143 | } 144 | 145 | build_from_source() { 146 | local __rust_sources_path=$1 147 | local __release_flags=$2 148 | local __repo_sha1=${FFI_GIT_COMMIT:-$(git rev-parse HEAD)} 149 | local __repo_sha1_truncated="${__repo_sha1:0:16}" 150 | 151 | (>&2 echo "building from source @ ${__repo_sha1_truncated}") 152 | 153 | if ! [ -x "$(command -v cargo)" ]; then 154 | (>&2 echo '[build_from_source] Error: cargo is not installed.') 155 | (>&2 echo '[build_from_source] install Rust toolchain to resolve this problem.') 156 | exit 1 157 | fi 158 | 159 | if ! [ -x "$(command -v rustup)" ]; then 160 | (>&2 echo '[build_from_source] Error: rustup is not installed.') 161 | (>&2 echo '[build_from_source] install Rust toolchain installer to resolve this problem.') 162 | exit 1 163 | fi 164 | 165 | pushd "${__rust_sources_path}" 166 | 167 | cargo --version 168 | 169 | # In the past we were only able to build universal binaries on x86_64, 170 | # for now we just keep that behaviour. This means that on aarch64 (e.g. 171 | # Apple M1) it's a native, non-universal binary. 172 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 173 | # Rustup only installs the correct versions for the current 174 | # architecture you're on. As we cross-compile to aarch64, we need to 175 | # make sure that toolchain is installes as well. 176 | rustup target add aarch64-apple-darwin 177 | build="lipo" 178 | else 179 | build="build" 180 | fi 181 | 182 | # Check for specified GPU support. 183 | if [ "${FFI_USE_CUDA_SUPRASEAL}" == "1" ]; then 184 | # If SupraSeal is enabled, just use the `cuda-supraseal` feature and 185 | # nothing else GPU related. 186 | gpu_flags=",cuda-supraseal" 187 | 188 | if [ ! "$(command -v nvcc)" ]; then 189 | echo "WARNING: Cannot find nvcc for CUDA support." 190 | echo "WARNING: For CUDA support, please ensure that the CUDA toolkit is properly installed." 191 | echo "WARNING: After installation, nvcc must be in the system path." 192 | echo "" 193 | echo "WARNING: Defaulting to OpenCL GPU support(!!!)" 194 | gpu_flags=",opencl" 195 | fi 196 | else 197 | # Check if GPU usage is to be disabled. 198 | if [ "${FFI_USE_GPU}" == "0" ]; then 199 | gpu_flags="" 200 | # Check if OpenCL support is specified or we're building on Darwin. 201 | elif [ "${FFI_USE_OPENCL}" == "1" ] || [ "${FFI_PORTABLE}" == "1" ] || [ "$(uname -s)" = "Darwin" ]; then 202 | gpu_flags=",opencl" 203 | else 204 | # If GPUs are enabled and SupraSeal is not, default to CUDA support 205 | # where possible. 206 | # First ensure that nvcc (as part of the CUDA toolkit) is available -- 207 | # if it's not warn that we are defaulting GPU to OpenCL instead. 208 | gpu_flags=",cuda" 209 | 210 | if [ ! "$(command -v nvcc)" ]; then 211 | echo "WARNING: Cannot find nvcc for CUDA support." 212 | echo "WARNING: For CUDA support, please ensure that the CUDA toolkit is properly installed." 213 | echo "WARNING: After installation, nvcc must be in the system path." 214 | echo "" 215 | echo "WARNING: Defaulting to OpenCL GPU support(!!!)" 216 | gpu_flags=",opencl" 217 | fi 218 | fi 219 | fi 220 | 221 | # Default to use multicore_sdr flags, unless specified to disable 222 | use_multicore_sdr="multicore-sdr" 223 | if [ "${FFI_USE_MULTICORE_SDR}" == "0" ]; then 224 | use_multicore_sdr="" 225 | fi 226 | 227 | # By default the number or rows to discard of the TreeRLast can be set via 228 | # `FIL_PROOFS_ROWS_TO_DISCARD`. When SupraSeal PC2 is used, then this 229 | # number is fixed. 230 | use_fixed_rows_to_discard="" 231 | if [ "${FFI_USE_FIXED_ROWS_TO_DISCARD}" == "1" ]; then 232 | use_fixed_rows_to_discard=",fixed-rows-to-discard" 233 | fi 234 | 235 | additional_flags="" 236 | # Add feature specific rust flags as needed here. 237 | if [ "${FFI_USE_BLST_PORTABLE}" == "1" ] || [ "${FFI_PORTABLE}" == "1" ]; then 238 | additional_flags="${additional_flags} --no-default-features --features ${use_multicore_sdr},blst-portable${gpu_flags}${use_fixed_rows_to_discard}" 239 | else 240 | additional_flags="${additional_flags} --no-default-features --features ${use_multicore_sdr}${gpu_flags}${use_fixed_rows_to_discard}" 241 | fi 242 | 243 | echo "Using additional build flags: ${additional_flags}" 244 | if [ -n "${__release_flags}" ]; then 245 | RUSTFLAGS="-C target-feature=${__release_flags}" ./scripts/build-release.sh ${build} "${additional_flags}" 246 | else 247 | ./scripts/build-release.sh ${build} "${additional_flags}" 248 | fi 249 | 250 | popd 251 | } 252 | 253 | get_release_flags() { 254 | if [ "${FFI_PORTABLE}" == "1" ]; then 255 | echo "" 256 | return 257 | fi 258 | 259 | local __features="" 260 | 261 | # determine where to look for CPU features 262 | # 263 | if [[ ! -f "/proc/cpuinfo" ]]; then 264 | (>&2 echo "[get_release_flags] no /proc/cpuinfo file; falling back to Darwin feature detection") 265 | __features=$(sysctl -a | grep machdep.cpu | tr '[:upper:]' '[:lower:]' | grep features) 266 | else 267 | #aarch64_uname=$(uname -a | grep aarch64) 268 | x86_64_uname=$(uname -a | grep x86_64) 269 | # shellcheck disable=SC2002 270 | if [ -n "${x86_64_uname}" ]; then 271 | __features=$(cat /proc/cpuinfo | grep flags | head -n 1) 272 | else 273 | # For now we assume aarch64. If another supported platform is added, explicitly check for it 274 | __features=$(cat /proc/cpuinfo | grep Features | head -n 1) 275 | fi 276 | fi 277 | 278 | # Maps cpu flag to rust flags (related to entries in rust/rustc-target-features-optimized.json) 279 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 280 | feature_map=("adx:+adx" "sha_ni:+sha" "sha2:+sha2" "avx2:+avx2" "sse4_2:+sse4.2" "sse4_1:+sse4.1") 281 | else 282 | feature_map=("adx:+adx" "sha_ni:+sha" "sha2:+sha2" "sse2:+sse2" "avx2:+avx2" "avx:+avx" "sse4_2:+sse4.2" "sse4_1:+sse4.1") 283 | fi 284 | 285 | target_features="" 286 | # check for the presence of each required CPU feature 287 | # 288 | # shellcheck disable=SC2068 # the splitting is intentional 289 | for x in ${cpu_features_required_for_optimized_release[@]}; do 290 | current_feature=$(echo "${__features}" | grep -c "${x}") 291 | if [ "1" = "${current_feature}" ]; then 292 | for feature in "${feature_map[@]}"; do 293 | key=${feature%%:*} 294 | if [ "${key}" == "${x}" ]; then 295 | val=${feature#*:} 296 | if [ -z "${target_features}" ]; then 297 | target_features="${val}" 298 | else 299 | target_features="${target_features},${val}" 300 | fi 301 | fi 302 | done 303 | fi 304 | done 305 | 306 | echo "${target_features}" 307 | } 308 | 309 | check_installed_files() { 310 | pwd 311 | ls ./*filcrypto* 312 | 313 | if [[ ! -f "./filcrypto.h" ]]; then 314 | (>&2 echo "[check_installed_files] failed to install filcrypto.h") 315 | exit 1 316 | fi 317 | 318 | if [[ ! -f "./libfilcrypto.a" ]]; then 319 | (>&2 echo "[check_installed_files] failed to install libfilcrypto.a") 320 | exit 1 321 | fi 322 | 323 | if [[ ! -f "./filcrypto.pc" ]]; then 324 | (>&2 echo "[check_installed_files] failed to install filcrypto.pc") 325 | exit 1 326 | fi 327 | } 328 | 329 | main "$@"; exit 330 | -------------------------------------------------------------------------------- /mkreleaselog: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # Note: This script is a modified version of the mkreleaselog script used by 4 | # the go-ipfs team. 5 | # 6 | # Usage: ./mkreleaselog v0.25.0 v0.26.0 > /tmp/release.log 7 | 8 | set -euo pipefail 9 | export GO111MODULE=on 10 | export GOPATH="$(go env GOPATH)" 11 | 12 | alias jq="jq --unbuffered" 13 | 14 | REPO_SUFFIXES_TO_STRIP=( 15 | "/v2" 16 | "/v3" 17 | "/v4" 18 | "/v5" 19 | "/v6" 20 | ) 21 | 22 | AUTHORS=( 23 | # orgs 24 | filecoin-project 25 | 26 | # Authors of personal repos used by filecoin-ffi that should be mentioned in the 27 | # release notes. 28 | xlab 29 | ) 30 | 31 | [[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})" 32 | 33 | [[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci\)$' 34 | 35 | NL=$'\n' 36 | 37 | msg() { 38 | echo "$*" >&2 39 | } 40 | 41 | statlog() { 42 | rpath="$GOPATH/src/$1" 43 | for s in $REPO_SUFFIXES_TO_STRIP; do 44 | rpath=${rpath%$s} 45 | done 46 | 47 | start="${2:-}" 48 | end="${3:-HEAD}" 49 | 50 | git -C "$rpath" log --shortstat --no-merges --pretty="tformat:%H%n%aN%n%aE" "$start..$end" | while 51 | read hash 52 | read name 53 | read email 54 | read _ # empty line 55 | read changes 56 | do 57 | changed=0 58 | insertions=0 59 | deletions=0 60 | while read count event; do 61 | if [[ "$event" =~ ^file ]]; then 62 | changed=$count 63 | elif [[ "$event" =~ ^insertion ]]; then 64 | insertions=$count 65 | elif [[ "$event" =~ ^deletion ]]; then 66 | deletions=$count 67 | else 68 | echo "unknown event $event" >&2 69 | exit 1 70 | fi 71 | done<<<"${changes//,/$NL}" 72 | 73 | jq -n \ 74 | --arg "hash" "$hash" \ 75 | --arg "name" "$name" \ 76 | --arg "email" "$email" \ 77 | --argjson "changed" "$changed" \ 78 | --argjson "insertions" "$insertions" \ 79 | --argjson "deletions" "$deletions" \ 80 | '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}' 81 | done 82 | } 83 | 84 | # Returns a stream of deps changed between $1 and $2. 85 | dep_changes() { 86 | { 87 | <"$1" 88 | <"$2" 89 | } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' 90 | } 91 | 92 | # resolve_commits resolves a git ref for each version. 93 | resolve_commits() { 94 | jq '. + {Ref: (.Version|capture("^((?.*)\\+incompatible|v.*-(0\\.)?[0-9]{14}-(?[a-f0-9]{12})|(?v.*))$") | .ref1 // .ref2 // .ref3)}' 95 | } 96 | 97 | pr_link() { 98 | local repo="$1" 99 | local prnum="$2" 100 | local ghname="${repo##github.com/}" 101 | printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum" 102 | } 103 | 104 | # Generate a release log for a range of commits in a single repo. 105 | release_log() { 106 | setopt local_options BASH_REMATCH 107 | 108 | local repo="$1" 109 | local start="$2" 110 | local end="${3:-HEAD}" 111 | local dir="$GOPATH/src/$repo" 112 | 113 | local commit pr 114 | git -C "$dir" log \ 115 | --format='tformat:%H %s' \ 116 | --first-parent \ 117 | "$start..$end" | 118 | while read commit subject; do 119 | # Skip gx-only PRs. 120 | git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" | 121 | grep -v "${IGNORED_FILES}" >/dev/null || continue 122 | 123 | if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then 124 | local prnum="${BASH_REMATCH[2]}" 125 | local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" 126 | printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" 127 | elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then 128 | local prnum="${BASH_REMATCH[2]}" 129 | printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" 130 | else 131 | printf -- "- %s\n" "$subject" 132 | fi 133 | done 134 | } 135 | 136 | indent() { 137 | sed -e 's/^/ /' 138 | } 139 | 140 | mod_deps() { 141 | go list -json -m all | jq 'select(.Version != null)' 142 | } 143 | 144 | ensure() { 145 | local repo="$1" 146 | for s in $REPO_SUFFIXES_TO_STRIP; do 147 | repo=${repo%$s} 148 | done 149 | 150 | local commit="$2" 151 | 152 | local rpath="$GOPATH/src/$repo" 153 | if [[ ! -d "$rpath" ]]; then 154 | msg "Cloning $repo..." 155 | git clone "http://$repo" "$rpath" >&2 156 | fi 157 | 158 | if ! git -C "$rpath" rev-parse --verify "$commit" >/dev/null; then 159 | msg "Fetching $repo..." 160 | git -C "$rpath" fetch --all >&2 161 | fi 162 | 163 | git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1 164 | } 165 | 166 | statsummary() { 167 | jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' | 168 | jq '. + {Lines: (.Deletions + .Insertions)}' 169 | } 170 | 171 | recursive_release_log() { 172 | local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}" 173 | local end="${2:-$(git rev-parse HEAD)}" 174 | local repo_root="$(git rev-parse --show-toplevel)" 175 | local package="$(cd "$repo_root" && go list)" 176 | 177 | if ! [[ "${GOPATH}/${package}" != "${repo_root}" ]]; then 178 | echo "This script requires the target package and all dependencies to live in a GOPATH." 179 | return 1 180 | fi 181 | 182 | ( 183 | local result=0 184 | local workspace="$(mktemp -d)" 185 | trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT 186 | cd "$workspace" 187 | 188 | echo "Computing old deps..." >&2 189 | git -C "$repo_root" show "$start:go.mod" >go.mod 190 | mod_deps | resolve_commits | jq -s > old_deps.json 191 | 192 | echo "Computing new deps..." >&2 193 | git -C "$repo_root" show "$end:go.mod" >go.mod 194 | mod_deps | resolve_commits | jq -s > new_deps.json 195 | 196 | rm -f go.mod go.sum 197 | 198 | printf -- "Generating Changelog for %s %s..%s\n" "$package" "$start" "$end" >&2 199 | 200 | printf -- "- %s:\n" "$package" 201 | release_log "$package" "$start" "$end" | indent 202 | 203 | statlog "$package" "$start" "$end" > statlog.json 204 | 205 | dep_changes old_deps.json new_deps.json | 206 | jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' | 207 | # Compute changelogs 208 | jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' | 209 | while read repo new new_ref old old_ref; do 210 | for s in $REPO_SUFFIXES_TO_STRIP; do 211 | repo=${repo%$s} 212 | done 213 | 214 | if ! ensure "$repo" "$new_ref"; then 215 | result=1 216 | local changelog="failed to fetch repo" 217 | else 218 | statlog "$repo" "$old_ref" "$new_ref" >> statlog.json 219 | local changelog="$(release_log "$repo" "$old_ref" "$new_ref")" 220 | fi 221 | if [[ -n "$changelog" ]]; then 222 | printf -- "- %s (%s -> %s):\n" "$repo" "$old" "$new" 223 | echo "$changelog" | indent 224 | fi 225 | done 226 | 227 | echo 228 | echo "Contributors" 229 | echo 230 | 231 | echo "| Contributor | Commits | Lines ± | Files Changed |" 232 | echo "|-------------|---------|---------|---------------|" 233 | statsummary ", 7 | "dignifiedquire ", 8 | "laser ", 9 | ] 10 | license = "MIT OR Apache-2.0" 11 | repository = "https://github.com/filecoin-project/filecoin-ffi" 12 | readme = "README.md" 13 | edition = "2021" 14 | resolver = "2" 15 | publish = false 16 | 17 | [lib] 18 | crate-type = ["rlib", "staticlib"] 19 | 20 | [dependencies] 21 | bls-signatures = { version = "0.15.0", default-features = false, features = [ 22 | "blst", 23 | ] } 24 | blstrs = "0.7" 25 | filepath = "0.1.3" 26 | group = "0.13" 27 | libc = "0.2.171" 28 | log = "0.4.27" 29 | fil_logger = "0.1.7" 30 | rand = "0.8" 31 | rand_chacha = "0.3.1" 32 | rayon = "1.10.0" 33 | anyhow = "1.0.97" 34 | serde_json = "1.0.140" 35 | rust-gpu-tools = { version = "0.7", optional = true, default-features = false } 36 | fvm4 = { package = "fvm", version = "~4.7.2", default-features = false, features = ["verify-signature", "nv27-dev"] } 37 | fvm4_shared = { package = "fvm_shared", version = "~4.7.2" } 38 | fvm3 = { package = "fvm", version = "~3.13.0", default-features = false } 39 | fvm3_shared = { package = "fvm_shared", version = "~3.13.0" } 40 | fvm2 = { package = "fvm", version = "~2.11.0", default-features = false } 41 | fvm2_shared = { package = "fvm_shared", version = "~2.11.0" } 42 | fvm_ipld_encoding = "0.5.3" 43 | fvm_ipld_blockstore = "0.3.1" 44 | num-traits = "0.2.19" 45 | cid = { version = "0.11.1", features = ["serde"], default-features = false } 46 | lazy_static = "1.5.0" 47 | serde = "1.0.219" 48 | safer-ffi = { version = "0.1.13", features = ["proc_macros"] } 49 | filecoin-proofs-api = { version = "18.1", default-features = false } 50 | yastl = "0.1.2" 51 | 52 | [dev-dependencies] 53 | memmap2 = "0.9" 54 | tempfile = "3.19.1" 55 | 56 | [features] 57 | default = ["cuda", "multicore-sdr"] 58 | blst-portable = ["bls-signatures/blst-portable", "blstrs/portable"] 59 | cuda = [ 60 | "filecoin-proofs-api/cuda", 61 | "rust-gpu-tools/cuda", 62 | "fvm2/cuda", 63 | "fvm3/cuda", 64 | "fvm4/cuda", 65 | ] 66 | cuda-supraseal = [ 67 | "filecoin-proofs-api/cuda-supraseal", 68 | "rust-gpu-tools/cuda", 69 | "fvm3/cuda-supraseal", 70 | "fvm4/cuda-supraseal", 71 | ] 72 | opencl = [ 73 | "filecoin-proofs-api/opencl", 74 | "rust-gpu-tools/opencl", 75 | "fvm2/opencl", 76 | "fvm3/opencl", 77 | "fvm4/opencl", 78 | ] 79 | multicore-sdr = ["filecoin-proofs-api/multicore-sdr"] 80 | c-headers = ["safer-ffi/headers"] 81 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 82 | # setting is ignored, no `TemporaryAux` file will be written. 83 | fixed-rows-to-discard = ["filecoin-proofs-api/fixed-rows-to-discard"] 84 | -------------------------------------------------------------------------------- /rust/filcrypto.pc.template: -------------------------------------------------------------------------------- 1 | Name: filcrypto 2 | Version: @VERSION@ 3 | Description: C bindings for Filecoin Proofs 4 | Libs: @PRIVATE_LIBS@ 5 | -------------------------------------------------------------------------------- /rust/filecoin.pc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/filecoin-project/filecoin-ffi/027be4e47851bff343e411b44e4fbb0f270ca2c5/rust/filecoin.pc -------------------------------------------------------------------------------- /rust/rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.86.0" 3 | components = ["clippy", "rustfmt"] 4 | -------------------------------------------------------------------------------- /rust/rustc-target-features-optimized.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "rustc_target_feature": "+adx", 4 | "check_cpu_for_feature": "adx" 5 | }, 6 | { 7 | "rustc_target_feature": "+sha", 8 | "check_cpu_for_feature": "sha_ni" 9 | }, 10 | { 11 | "rustc_target_feature": "+sha2", 12 | "check_cpu_for_feature": "sha2" 13 | }, 14 | { 15 | "rustc_target_feature": "+sse2", 16 | "check_cpu_for_feature": "sse2" 17 | }, 18 | { 19 | "rustc_target_feature": "+avx2", 20 | "check_cpu_for_feature": "avx2" 21 | }, 22 | { 23 | "rustc_target_feature": "+avx", 24 | "check_cpu_for_feature": "avx" 25 | }, 26 | { 27 | "rustc_target_feature": "+sse4.2", 28 | "check_cpu_for_feature": "sse4_2" 29 | }, 30 | { 31 | "rustc_target_feature": "+sse4.1", 32 | "check_cpu_for_feature": "sse4_1" 33 | } 34 | ] 35 | -------------------------------------------------------------------------------- /rust/scripts/build-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Exeo pipefail 4 | 5 | main() { 6 | if [[ -z "${1}" ]] 7 | then 8 | (>&2 echo '[build-release/main] Error: script requires a build action, e.g. ./build-release.sh [build|lipo]') 9 | exit 1 10 | fi 11 | 12 | local __action="${1}" 13 | 14 | # temporary place for storing build output (cannot use 'local', because 15 | # 'trap' is not going to have access to variables scoped to this function) 16 | # 17 | __build_output_log_tmp=$(mktemp) 18 | 19 | # clean up temp file on exit 20 | # 21 | trap '{ rm -f $__build_output_log_tmp; }' EXIT 22 | 23 | # build with RUSTFLAGS configured to output linker flags for native libs 24 | # 25 | local __rust_flags="--print native-static-libs ${RUSTFLAGS}" 26 | 27 | # shellcheck disable=SC2068 # the rest of the parameters should be split 28 | RUSTFLAGS="${__rust_flags}" \ 29 | cargo build \ 30 | --release --locked ${@:2} 2>&1 | tee ${__build_output_log_tmp} 31 | 32 | # parse build output for linker flags 33 | # 34 | local __linker_flags=$(cat ${__build_output_log_tmp} \ 35 | | grep native-static-libs\: \ 36 | | head -n 1 \ 37 | | cut -d ':' -f 3) 38 | 39 | echo "Linker Flags: ${__linker_flags}" 40 | # Build a universal binary when `lipo` is enabled, independent of which 41 | # architecture we are on. 42 | if [ "${__action}" = "lipo" ]; then 43 | # With lipo enabled, this replacement may not be necessary, 44 | # but leaving it in doesn't hurt as it does nothing if not 45 | # needed 46 | __linker_flags=$(echo ${__linker_flags} | sed 's/-lOpenCL/-framework OpenCL/g') 47 | echo "Using Linker Flags: ${__linker_flags}" 48 | 49 | # Build again for the other architecture. 50 | if [ "$(uname -m)" = "x86_64" ]; then 51 | __target="aarch64-apple-darwin" 52 | else 53 | __target="x86_64-apple-darwin" 54 | fi 55 | 56 | # shellcheck disable=SC2068 # the rest of the parameters should be split 57 | RUSTFLAGS="${__rust_flags}" \ 58 | cargo build \ 59 | --release --locked --target ${__target} ${@:2} 2>&1 \ 60 | | tee ${__build_output_log_tmp} 61 | 62 | # Create the universal binary/ 63 | lipo -create -output libfilcrypto.a \ 64 | target/release/libfilcrypto.a \ 65 | target/${__target}/release/libfilcrypto.a 66 | 67 | find . -type f -name "libfilcrypto.a" 68 | rm -f ./target/aarch64-apple-darwin/release/libfilcrypto.a 69 | rm -f ./target/x86_64-apple-darwin/release/libfilcrypto.a 70 | rm -f ./target/release/libfilcrypto.a 71 | echo "Eliminated non-universal binary libraries" 72 | find . -type f -name "libfilcrypto.a" 73 | fi 74 | 75 | # generate filcrypto.h 76 | # The header files are the same even without having any features enables, 77 | # this reduces the compile time and makes it work on more platforms. 78 | RUSTFLAGS="${__rust_flags}" HEADER_DIR="." \ 79 | cargo test --no-default-features --locked build_headers --features c-headers 80 | 81 | # generate pkg-config 82 | # 83 | sed -e "s;@VERSION@;$(git rev-parse HEAD);" \ 84 | -e "s;@PRIVATE_LIBS@;${__linker_flags};" "filcrypto.pc.template" > "filcrypto.pc" 85 | 86 | # ensure header file was built 87 | # 88 | find -L . -type f -name "filcrypto.h" | read 89 | 90 | # ensure the archive file was built 91 | # 92 | find -L . -type f -name "libfilcrypto.a" | read 93 | } 94 | 95 | main "$@"; exit 96 | -------------------------------------------------------------------------------- /rust/scripts/package-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Exeuo pipefail 4 | 5 | main() { 6 | if [[ -z "$1" ]] 7 | then 8 | (>&2 echo '[package-release/main] Error: script requires path to which it will write release (gzipped) tarball, e.g. "/tmp/filecoin-ffi-Darwin-standard.tar.tz"') 9 | exit 1 10 | fi 11 | 12 | local __tarball_output_path=$1 13 | 14 | # create temporary directory to hold build artifacts (must not be declared 15 | # with 'local' because we will use 'trap' to clean it up) 16 | # 17 | __tmp_dir=$(mktemp -d) 18 | 19 | (>&2 echo "[package-release/main] preparing release files") 20 | 21 | # clean up temp directory on exit 22 | # 23 | trap '{ rm -rf $__tmp_dir; }' EXIT 24 | 25 | # copy assets into temporary directory 26 | # 27 | find -L . -type f -name filcrypto.h -exec cp -- "{}" $__tmp_dir/ \; 28 | find -L . -type f -name libfilcrypto.a -exec cp -- "{}" $__tmp_dir/ \; 29 | find -L . -type f -name filcrypto.pc -exec cp -- "{}" $__tmp_dir/ \; 30 | 31 | # create gzipped tarball from contents of temporary directory 32 | # 33 | tar -czf $__tarball_output_path $__tmp_dir/* 34 | 35 | (>&2 echo "[package-release/main] release file created: $__tarball_output_path") 36 | } 37 | 38 | main "$@"; exit 39 | -------------------------------------------------------------------------------- /rust/scripts/publish-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Exeuo pipefail 4 | 5 | main() { 6 | if [[ -z "$1" ]] 7 | then 8 | (>&2 echo '[publish-release/main] Error: script requires a release (gzipped) tarball path, e.g. "/tmp/filecoin-ffi-Darwin-standard.tar.tz"') 9 | exit 1 10 | fi 11 | 12 | if [[ -z "$2" ]] 13 | then 14 | (>&2 echo '[publish-release/main] Error: script requires a release name, e.g. "filecoin-ffi-Darwin-standard" or "filecoin-ffi-Linux-standard"') 15 | exit 1 16 | fi 17 | 18 | local __release_file=$1 19 | local __release_url="${GITHUB_RELEASE_URL}" 20 | local __release_target="$(basename $__release_file)" 21 | 22 | # make sure we have a token set, api requests won't work otherwise 23 | if [ -z $GITHUB_TOKEN ]; then 24 | (>&2 echo "[publish-release/main] \$GITHUB_TOKEN not set, publish failed") 25 | exit 1 26 | fi 27 | 28 | # make sure we have a release url set 29 | if [ -z "$GITHUB_RELEASE_URL" ]; then 30 | (>&2 echo "[publish-release/main] \$GITHUB_RELEASE_URL not set, publish failed") 31 | exit 1 32 | fi 33 | 34 | # see if the release already exists by tag 35 | local __release_response=` 36 | curl \ 37 | --header "Authorization: token $GITHUB_TOKEN" \ 38 | "$__release_url" 39 | ` 40 | 41 | local __release_id=`echo $__release_response | jq '.id'` 42 | 43 | if [ "$__release_id" = "null" ]; then 44 | (>&2 echo '[publish-release/main] release does not exist') 45 | exit 1 46 | fi 47 | 48 | __release_target_asset=`echo $__release_response | jq -r ".assets | .[] | select(.name == \"$__release_target\")"` 49 | 50 | if [ -n "$__release_target_asset" ]; then 51 | (>&2 echo "[publish-release/main] $__release_target_asset already exists, deleting") 52 | 53 | __release_target_asset_url=`echo $__release_target_asset | jq -r '.url'` 54 | 55 | curl \ 56 | --request DELETE \ 57 | --header "Authorization: token $GITHUB_TOKEN" \ 58 | "$__release_target_asset_url" 59 | fi 60 | 61 | __release_upload_url=`echo $__release_response | jq -r '.upload_url' | cut -d'{' -f1` 62 | 63 | curl \ 64 | --request POST \ 65 | --header "Authorization: token $GITHUB_TOKEN" \ 66 | --header "Content-Type: application/octet-stream" \ 67 | --data-binary "@$__release_file" \ 68 | "$__release_upload_url?name=$__release_target" 69 | 70 | (>&2 echo '[publish-release/main] release file uploaded') 71 | } 72 | 73 | main "$@"; exit 74 | -------------------------------------------------------------------------------- /rust/src/bls/api.rs: -------------------------------------------------------------------------------- 1 | use bls_signatures::{ 2 | aggregate as aggregate_sig, hash as hash_sig, verify as verify_sig, 3 | verify_messages as verify_messages_sig, Error, PrivateKey, PublicKey, Serialize, Signature, 4 | }; 5 | use blstrs::{G2Affine, G2Projective}; 6 | use group::prime::PrimeCurveAffine; 7 | use group::GroupEncoding; 8 | 9 | use rand::rngs::OsRng; 10 | use rand::SeedableRng; 11 | use rand_chacha::ChaChaRng; 12 | use rayon::prelude::*; 13 | use safer_ffi::prelude::*; 14 | 15 | pub const SIGNATURE_BYTES: usize = 96; 16 | pub const PRIVATE_KEY_BYTES: usize = 32; 17 | pub const PUBLIC_KEY_BYTES: usize = 48; 18 | pub const DIGEST_BYTES: usize = 96; 19 | 20 | pub type BLSSignature = [u8; SIGNATURE_BYTES]; 21 | pub type BLSPrivateKey = [u8; PRIVATE_KEY_BYTES]; 22 | pub type BLSPublicKey = [u8; PUBLIC_KEY_BYTES]; 23 | pub type BLSDigest = [u8; DIGEST_BYTES]; 24 | 25 | /// Unwraps or returns the passed in value. 26 | macro_rules! try_ffi { 27 | ($res:expr, $val:expr) => {{ 28 | #[allow(clippy::blocks_in_conditions)] 29 | match $res { 30 | Ok(res) => res, 31 | Err(_) => return $val, 32 | } 33 | }}; 34 | } 35 | 36 | #[ffi_export] 37 | fn destroy_box_bls_digest(ptr: repr_c::Box) { 38 | drop(ptr); 39 | } 40 | 41 | #[ffi_export] 42 | fn destroy_box_bls_private_key(ptr: repr_c::Box) { 43 | drop(ptr); 44 | } 45 | 46 | #[ffi_export] 47 | fn destroy_box_bls_public_key(ptr: repr_c::Box) { 48 | drop(ptr); 49 | } 50 | #[ffi_export] 51 | fn destroy_box_bls_signature(ptr: repr_c::Box) { 52 | drop(ptr); 53 | } 54 | 55 | /// Compute the digest of a message 56 | /// 57 | /// # Arguments 58 | /// 59 | /// * `message` - reference to a message byte array 60 | #[ffi_export] 61 | pub fn hash(message: c_slice::Ref<'_, u8>) -> repr_c::Box { 62 | // call method 63 | let raw_digest = hash_sig(&message).to_bytes(); 64 | let digest: [u8; DIGEST_BYTES] = raw_digest.as_ref().try_into().expect("known size"); 65 | 66 | Box::new(digest).into() 67 | } 68 | 69 | /// Aggregate signatures together into a new signature 70 | /// 71 | /// # Arguments 72 | /// 73 | /// * `flattened_signatures` - byte array containing signatures 74 | /// 75 | /// Returns `None` on error. Result must be freed using `destroy_aggregate_response`. 76 | #[ffi_export] 77 | pub fn aggregate(flattened_signatures: c_slice::Ref<'_, u8>) -> Option> { 78 | // prep request 79 | let signatures = try_ffi!( 80 | flattened_signatures 81 | .par_chunks(SIGNATURE_BYTES) 82 | .map(|item| { Signature::from_bytes(item) }) 83 | .collect::, _>>(), 84 | None 85 | ); 86 | 87 | let mut signature: [u8; SIGNATURE_BYTES] = [0; SIGNATURE_BYTES]; 88 | 89 | let aggregated = try_ffi!(aggregate_sig(&signatures), None); 90 | aggregated 91 | .write_bytes(&mut signature.as_mut()) 92 | .expect("preallocated"); 93 | 94 | Some(Box::new(signature).into()) 95 | } 96 | 97 | /// Verify that a signature is the aggregated signature of hashes - pubkeys 98 | /// 99 | /// # Arguments 100 | /// 101 | /// * `signature` - signature byte array (SIGNATURE_BYTES long) 102 | /// * `flattened_digests` - byte array containing digests 103 | /// * `flattened_public_keys` - byte array containing public keys 104 | #[ffi_export] 105 | pub fn verify( 106 | signature: c_slice::Ref<'_, u8>, 107 | flattened_digests: c_slice::Ref<'_, u8>, 108 | flattened_public_keys: c_slice::Ref<'_, u8>, 109 | ) -> bool { 110 | // prep request 111 | let signature = try_ffi!(Signature::from_bytes(&signature), false); 112 | 113 | if flattened_digests.len() % DIGEST_BYTES != 0 { 114 | return false; 115 | } 116 | if flattened_public_keys.len() % PUBLIC_KEY_BYTES != 0 { 117 | return false; 118 | } 119 | 120 | if flattened_digests.len() / DIGEST_BYTES != flattened_public_keys.len() / PUBLIC_KEY_BYTES { 121 | return false; 122 | } 123 | 124 | let digests: Vec<_> = try_ffi!( 125 | flattened_digests 126 | .par_chunks(DIGEST_BYTES) 127 | .map(|item: &[u8]| { 128 | let mut digest = [0u8; DIGEST_BYTES]; 129 | digest.as_mut().copy_from_slice(item); 130 | 131 | let affine: Option = Option::from(G2Affine::from_compressed(&digest)); 132 | affine.map(Into::into).ok_or(Error::CurveDecode) 133 | }) 134 | .collect::, Error>>(), 135 | false 136 | ); 137 | 138 | let public_keys: Vec<_> = try_ffi!( 139 | flattened_public_keys 140 | .par_chunks(PUBLIC_KEY_BYTES) 141 | .map(|item| { PublicKey::from_bytes(item) }) 142 | .collect::>(), 143 | false 144 | ); 145 | 146 | verify_sig(&signature, digests.as_slice(), public_keys.as_slice()) 147 | } 148 | 149 | /// Verify that a signature is the aggregated signature of the hashed messages 150 | /// 151 | /// # Arguments 152 | /// 153 | /// * `signature` - signature byte array (SIGNATURE_BYTES long) 154 | /// * `messages` - array containing the pointers to the messages 155 | /// * `messages_sizes` - array containing the lengths of the messages 156 | /// * `messages_len` - length of the two messages arrays 157 | /// * `flattened_public_keys` - byte array containing public keys 158 | #[ffi_export] 159 | pub fn hash_verify( 160 | signature: c_slice::Ref<'_, u8>, 161 | flattened_messages: c_slice::Ref<'_, u8>, 162 | message_sizes: c_slice::Ref<'_, libc::size_t>, 163 | flattened_public_keys: c_slice::Ref<'_, u8>, 164 | ) -> bool { 165 | // prep request 166 | let signature = try_ffi!(Signature::from_bytes(&signature), false); 167 | 168 | // split the flattened message array into slices of individual messages to be hashed 169 | let mut messages: Vec<&[u8]> = Vec::with_capacity(message_sizes.len()); 170 | let mut offset = 0; 171 | for chunk_size in message_sizes.iter() { 172 | messages.push(&flattened_messages[offset..offset + *chunk_size]); 173 | offset += *chunk_size 174 | } 175 | 176 | if flattened_public_keys.len() % PUBLIC_KEY_BYTES != 0 { 177 | return false; 178 | } 179 | 180 | let public_keys: Vec<_> = try_ffi!( 181 | flattened_public_keys 182 | .par_chunks(PUBLIC_KEY_BYTES) 183 | .map(|item| { PublicKey::from_bytes(item) }) 184 | .collect::>(), 185 | false 186 | ); 187 | 188 | verify_messages_sig(&signature, &messages, &public_keys) 189 | } 190 | 191 | /// Generate a new private key 192 | #[ffi_export] 193 | pub fn private_key_generate() -> repr_c::Box { 194 | let mut raw_private_key: [u8; PRIVATE_KEY_BYTES] = [0; PRIVATE_KEY_BYTES]; 195 | PrivateKey::generate(&mut OsRng) 196 | .write_bytes(&mut raw_private_key.as_mut()) 197 | .expect("preallocated"); 198 | 199 | Box::new(raw_private_key).into() 200 | } 201 | 202 | /// Generate a new private key with seed 203 | /// 204 | /// **Warning**: Use this function only for testing or with very secure seeds 205 | /// 206 | /// # Arguments 207 | /// 208 | /// * `raw_seed` - a seed byte array with 32 bytes 209 | #[ffi_export] 210 | pub fn private_key_generate_with_seed(raw_seed: &[u8; 32]) -> repr_c::Box { 211 | let rng = &mut ChaChaRng::from_seed(*raw_seed); 212 | 213 | let mut raw_private_key: [u8; PRIVATE_KEY_BYTES] = [0; PRIVATE_KEY_BYTES]; 214 | PrivateKey::generate(rng) 215 | .write_bytes(&mut raw_private_key.as_mut()) 216 | .expect("preallocated"); 217 | 218 | Box::new(raw_private_key).into() 219 | } 220 | 221 | /// Sign a message with a private key and return the signature 222 | /// 223 | /// # Arguments 224 | /// 225 | /// * `raw_private_key` - private key byte array 226 | /// * `message` - message byte array 227 | /// 228 | /// Returns `None` when passed invalid arguments. 229 | #[ffi_export] 230 | pub fn private_key_sign( 231 | raw_private_key: c_slice::Ref<'_, u8>, 232 | message: c_slice::Ref<'_, u8>, 233 | ) -> Option> { 234 | let private_key = try_ffi!(PrivateKey::from_bytes(&raw_private_key), None); 235 | 236 | let mut raw_signature: [u8; SIGNATURE_BYTES] = [0; SIGNATURE_BYTES]; 237 | PrivateKey::sign(&private_key, &message[..]) 238 | .write_bytes(&mut raw_signature.as_mut()) 239 | .expect("preallocated"); 240 | 241 | Some(Box::new(raw_signature).into()) 242 | } 243 | 244 | /// Generate the public key for a private key 245 | /// 246 | /// # Arguments 247 | /// 248 | /// * `raw_private_key` - private key byte array 249 | /// 250 | /// Returns `None` when passed invalid arguments. 251 | #[ffi_export] 252 | pub fn private_key_public_key( 253 | raw_private_key: c_slice::Ref<'_, u8>, 254 | ) -> Option> { 255 | let private_key = try_ffi!(PrivateKey::from_bytes(&raw_private_key), None); 256 | 257 | let mut raw_public_key: [u8; PUBLIC_KEY_BYTES] = [0; PUBLIC_KEY_BYTES]; 258 | private_key 259 | .public_key() 260 | .write_bytes(&mut raw_public_key.as_mut()) 261 | .expect("preallocated"); 262 | 263 | Some(Box::new(raw_public_key).into()) 264 | } 265 | 266 | /// Returns a zero signature, used as placeholder in Filecoin. 267 | /// 268 | /// The return value is a pointer to a compressed signature in bytes, of length `SIGNATURE_BYTES` 269 | #[ffi_export] 270 | pub fn create_zero_signature() -> repr_c::Box { 271 | let sig: Signature = G2Affine::identity().into(); 272 | 273 | let mut raw_signature: [u8; SIGNATURE_BYTES] = [0; SIGNATURE_BYTES]; 274 | 275 | sig.write_bytes(&mut raw_signature.as_mut()) 276 | .expect("preallocated"); 277 | 278 | Box::new(raw_signature).into() 279 | } 280 | 281 | #[cfg(test)] 282 | mod tests { 283 | use super::*; 284 | 285 | #[test] 286 | fn key_verification() { 287 | let private_key = private_key_generate(); 288 | let public_key = private_key_public_key(private_key[..].into()).unwrap(); 289 | let message = b"hello world"; 290 | let digest = hash(message[..].into()); 291 | let signature = private_key_sign(private_key[..].into(), message[..].into()).unwrap(); 292 | let verified = verify( 293 | signature[..].into(), 294 | digest[..].into(), 295 | public_key[..].into(), 296 | ); 297 | 298 | assert!(verified); 299 | 300 | let message_sizes = [message.len()]; 301 | let flattened_messages = message; 302 | 303 | let verified = hash_verify( 304 | signature[..].into(), 305 | flattened_messages[..].into(), 306 | message_sizes[..].into(), 307 | public_key[..].into(), 308 | ); 309 | 310 | assert!(verified); 311 | 312 | let different_message = b"bye world"; 313 | let different_digest = hash(different_message[..].into()); 314 | let not_verified = verify( 315 | signature[..].into(), 316 | different_digest[..].into(), 317 | public_key[..].into(), 318 | ); 319 | 320 | assert!(!not_verified); 321 | 322 | // garbage verification 323 | let different_digest = [0, 1, 2, 3, 4]; 324 | let not_verified = verify( 325 | signature[..].into(), 326 | different_digest[..].into(), 327 | public_key[..].into(), 328 | ); 329 | 330 | assert!(!not_verified); 331 | } 332 | 333 | #[test] 334 | fn private_key_with_seed() { 335 | let seed = [5u8; 32]; 336 | let private_key = private_key_generate_with_seed(&seed); 337 | assert_eq!( 338 | &[ 339 | 56, 13, 181, 159, 37, 1, 12, 96, 45, 77, 254, 118, 103, 235, 218, 176, 220, 241, 340 | 142, 119, 206, 233, 83, 35, 26, 15, 118, 198, 192, 120, 179, 52 341 | ], 342 | &private_key[..], 343 | ); 344 | } 345 | 346 | #[test] 347 | fn test_zero_key() { 348 | let resp = create_zero_signature(); 349 | let sig = Signature::from_bytes(&(*resp)).unwrap(); 350 | 351 | assert_eq!(sig, Signature::from(G2Affine::identity())); 352 | } 353 | } 354 | -------------------------------------------------------------------------------- /rust/src/bls/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | -------------------------------------------------------------------------------- /rust/src/fvm/blockstore/cgo.rs: -------------------------------------------------------------------------------- 1 | use std::ptr; 2 | 3 | use anyhow::{anyhow, Result}; 4 | use cid::Cid; 5 | use fvm3_shared::MAX_CID_LEN; 6 | use fvm_ipld_blockstore::Blockstore; 7 | 8 | use super::super::cgo::*; 9 | 10 | /// The maximum amount of data to buffer in a batch before writing it to the underlying blockstore. 11 | const MAX_BUF_SIZE: usize = 64 << 20; // 64MiB 12 | /// The maximum number of blocks to buffer in a batch before before writing it to the underlying 13 | /// blockstore. This will allocate 0.5MiB of memory to store offsets. 14 | const MAX_BLOCK_BATCH: usize = 64 << 10; 15 | 16 | pub struct CgoBlockstore { 17 | handle: u64, 18 | } 19 | 20 | impl CgoBlockstore { 21 | /// Construct a new blockstore from a handle. 22 | pub fn new(handle: u64) -> CgoBlockstore { 23 | CgoBlockstore { handle } 24 | } 25 | } 26 | 27 | impl Blockstore for CgoBlockstore { 28 | fn has(&self, k: &Cid) -> Result { 29 | let k_bytes = k.to_bytes(); 30 | unsafe { 31 | match cgo_blockstore_has(self.handle, k_bytes.as_ptr(), k_bytes.len() as i32) { 32 | // We shouldn't get an "error not found" here, but there's no reason to be strict 33 | // about it. 34 | 0 => Ok(false), 35 | x if x == FvmError::NotFound as i32 => Ok(false), 36 | 1 => Ok(true), 37 | // Panic on unknown values. There's a bug in the program. 38 | r @ 2.. => panic!("invalid return value from has: {}", r), 39 | // Panic if the store isn't registered. This means something _very_ unsafe is going 40 | // on and there is a bug in the program. 41 | x if x == FvmError::InvalidHandle as i32 => { 42 | panic!("blockstore {} not registered", self.handle) 43 | } 44 | // Otherwise, return "other". We should add error codes in the future. 45 | e => Err(anyhow!("cgo blockstore 'has' failed with error code {}", e)), 46 | } 47 | } 48 | } 49 | 50 | fn get(&self, k: &Cid) -> Result>> { 51 | let k_bytes = k.to_bytes(); 52 | unsafe { 53 | let mut buf: *mut u8 = ptr::null_mut(); 54 | let mut size: i32 = 0; 55 | match cgo_blockstore_get( 56 | self.handle, 57 | k_bytes.as_ptr(), 58 | k_bytes.len() as i32, 59 | &mut buf, 60 | &mut size, 61 | ) { 62 | 0 => Ok(Some(Vec::from_raw_parts(buf, size as usize, size as usize))), 63 | r @ 1.. => panic!("invalid return value from get: {}", r), 64 | x if x == FvmError::InvalidHandle as i32 => { 65 | panic!("blockstore {} not registered", self.handle) 66 | } 67 | x if x == FvmError::NotFound as i32 => Ok(None), 68 | e => Err(anyhow!("cgo blockstore 'get' failed with error code {}", e)), 69 | } 70 | } 71 | } 72 | 73 | fn put_many_keyed(&self, blocks: I) -> Result<()> 74 | where 75 | Self: Sized, 76 | D: AsRef<[u8]>, 77 | I: IntoIterator, 78 | { 79 | fn flush_buffered(handle: u64, lengths: &mut Vec, buf: &mut Vec) -> Result<()> { 80 | if buf.is_empty() { 81 | return Ok(()); 82 | } 83 | 84 | unsafe { 85 | let result = cgo_blockstore_put_many( 86 | handle, 87 | lengths.as_ptr(), 88 | lengths.len() as i32, 89 | buf.as_ptr(), 90 | ); 91 | buf.clear(); 92 | lengths.clear(); 93 | 94 | match result { 95 | 0 => Ok(()), 96 | r @ 1.. => panic!("invalid return value from put_many: {}", r), 97 | x if x == FvmError::InvalidHandle as i32 => { 98 | panic!("blockstore {} not registered", handle) 99 | } 100 | // This error makes no sense. 101 | x if x == FvmError::NotFound as i32 => panic!("not found error on put"), 102 | e => Err(anyhow!("cgo blockstore 'put' failed with error code {}", e)), 103 | } 104 | } 105 | } 106 | 107 | let mut lengths = Vec::with_capacity(MAX_BLOCK_BATCH); 108 | let mut buf = Vec::with_capacity(MAX_BUF_SIZE); 109 | for (k, block) in blocks { 110 | let block = block.as_ref(); 111 | // We limit both the max number of blocks and the max buffer size. Technically, we could 112 | // _just_ limit the buffer size as that should bound the number of blocks. However, 113 | // bounding the maximum number of blocks means we can allocate the vector up-front and 114 | // avoids any re-allocation, copying, etc. 115 | if lengths.len() >= MAX_BLOCK_BATCH 116 | || MAX_CID_LEN + block.len() + buf.len() > MAX_BUF_SIZE 117 | { 118 | flush_buffered(self.handle, &mut lengths, &mut buf)?; 119 | } 120 | 121 | let start = buf.len(); 122 | k.write_bytes(&mut buf)?; 123 | buf.extend_from_slice(block); 124 | let size = buf.len() - start; 125 | lengths.push(size as i32); 126 | } 127 | flush_buffered(self.handle, &mut lengths, &mut buf) 128 | } 129 | 130 | fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> { 131 | let k_bytes = k.to_bytes(); 132 | unsafe { 133 | match cgo_blockstore_put( 134 | self.handle, 135 | k_bytes.as_ptr(), 136 | k_bytes.len() as i32, 137 | block.as_ptr(), 138 | block.len() as i32, 139 | ) { 140 | 0 => Ok(()), 141 | r @ 1.. => panic!("invalid return value from put: {}", r), 142 | x if x == FvmError::InvalidHandle as i32 => { 143 | panic!("blockstore {} not registered", self.handle) 144 | } 145 | // This error makes no sense. 146 | x if x == FvmError::NotFound as i32 => panic!("not found error on put"), 147 | e => Err(anyhow!("cgo blockstore 'put' failed with error code {}", e)), 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /rust/src/fvm/blockstore/mod.rs: -------------------------------------------------------------------------------- 1 | mod cgo; 2 | pub use cgo::*; 3 | -------------------------------------------------------------------------------- /rust/src/fvm/cgo/error.rs: -------------------------------------------------------------------------------- 1 | //! Error codes used by the cgo bridge (blockstore/externs). These are used by both rust and go, so 2 | //! don't remove them even if they seem dead. 3 | 4 | use safer_ffi::prelude::*; 5 | 6 | #[derive_ReprC] 7 | #[repr(i32)] 8 | #[derive(PartialEq, Eq, Debug, Copy, Clone)] 9 | pub enum FvmError { 10 | /// The error code returned by cgo if the blockstore handle isn't valid. 11 | InvalidHandle = -1, 12 | /// The error code returned by cgo when the block isn't found. 13 | NotFound = -2, 14 | /// The error code returned by cgo when there's some underlying system error. 15 | Io = -3, 16 | /// The error code returned by cgo when an argument is invalid. 17 | InvalidArgument = -4, 18 | /// The error code returned by cgo when the application panics. 19 | Panic = -5, 20 | } 21 | 22 | // Dummy to make safer-ffi export the error enum 23 | #[ffi_export] 24 | fn dummy(_error: FvmError) { 25 | panic!("Don't call me"); 26 | } 27 | -------------------------------------------------------------------------------- /rust/src/fvm/cgo/externs.rs: -------------------------------------------------------------------------------- 1 | //! The externs/blockstore implemented by the go side of the cgo bridge. 2 | 3 | extern "C" { 4 | pub fn cgo_blockstore_get( 5 | store: u64, 6 | k: *const u8, 7 | k_len: i32, 8 | block: *mut *mut u8, 9 | size: *mut i32, 10 | ) -> i32; 11 | 12 | pub fn cgo_blockstore_put( 13 | store: u64, 14 | k: *const u8, 15 | k_len: i32, 16 | block: *const u8, 17 | block_len: i32, 18 | ) -> i32; 19 | 20 | pub fn cgo_blockstore_put_many( 21 | store: u64, 22 | lengths: *const i32, 23 | lengths_len: i32, 24 | blocks: *const u8, 25 | ) -> i32; 26 | 27 | pub fn cgo_blockstore_has(store: u64, k: *const u8, k_len: i32) -> i32; 28 | 29 | pub fn cgo_extern_get_chain_randomness( 30 | handle: u64, 31 | round: i64, 32 | randomness: *mut [u8; 32], 33 | ) -> i32; 34 | 35 | pub fn cgo_extern_get_beacon_randomness( 36 | handle: u64, 37 | round: i64, 38 | randomness: *mut [u8; 32], 39 | ) -> i32; 40 | 41 | pub fn cgo_extern_verify_consensus_fault( 42 | handle: u64, 43 | h1: *const u8, 44 | h1_len: i32, 45 | h2: *const u8, 46 | h2_len: i32, 47 | extra: *const u8, 48 | extra_len: i32, 49 | miner_id: *mut u64, 50 | epoch: *mut i64, 51 | fault: *mut i64, 52 | gas_used: *mut i64, 53 | ) -> i32; 54 | 55 | pub fn cgo_extern_get_tipset_cid( 56 | handle: u64, 57 | epoch: i64, 58 | output: *mut u8, 59 | output_len: i32, 60 | ) -> i32; 61 | } 62 | -------------------------------------------------------------------------------- /rust/src/fvm/cgo/mod.rs: -------------------------------------------------------------------------------- 1 | mod externs; 2 | pub use externs::*; 3 | 4 | mod error; 5 | pub use error::*; 6 | -------------------------------------------------------------------------------- /rust/src/fvm/externs.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Context}; 2 | 3 | use fvm2::externs::{Consensus as Consensus2, Externs as Externs2, Rand as Rand2}; 4 | use fvm3::externs::{Chain as Chain3, Consensus as Consensus3, Externs as Externs3, Rand as Rand3}; 5 | use fvm4::externs::{Chain as Chain4, Consensus as Consensus4, Externs as Externs4, Rand as Rand4}; 6 | 7 | use fvm2_shared::address::Address as Address2; 8 | use fvm3_shared::address::Address as Address3; 9 | use fvm4_shared::address::Address; 10 | 11 | use fvm4_shared::clock::ChainEpoch; 12 | 13 | use fvm2_shared::consensus::{ 14 | ConsensusFault as ConsensusFault2, ConsensusFaultType as ConsensusFaultType2, 15 | }; 16 | use fvm3_shared::consensus::{ 17 | ConsensusFault as ConsensusFault3, ConsensusFaultType as ConsensusFaultType3, 18 | }; 19 | use fvm4_shared::consensus::ConsensusFault as ConsensusFault4; 20 | 21 | use num_traits::FromPrimitive; 22 | 23 | use super::cgo::*; 24 | 25 | /// An implementation of [`fvm::externs::Externs`] that can call out to go. See the `cgo` directory 26 | /// in this repo for the go side. 27 | /// 28 | /// Importantly, this allows Filecoin client written in go to expose chain randomness and consensus 29 | /// fault verification to the FVM. 30 | pub struct CgoExterns { 31 | handle: u64, 32 | } 33 | 34 | impl CgoExterns { 35 | /// Construct a new externs from a handle. 36 | pub fn new(handle: u64) -> CgoExterns { 37 | CgoExterns { handle } 38 | } 39 | } 40 | 41 | impl Rand4 for CgoExterns { 42 | fn get_chain_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 43 | unsafe { 44 | let mut buf = [0u8; 32]; 45 | match cgo_extern_get_chain_randomness(self.handle, round, &mut buf) { 46 | 0 => Ok(buf), 47 | r @ 1.. => panic!("invalid return value from has: {}", r), 48 | x if x == FvmError::InvalidHandle as i32 => { 49 | panic!("extern {} not registered", self.handle) 50 | } 51 | e => Err(anyhow!( 52 | "cgo extern 'get_chain_randomness' failed with error code {}", 53 | e 54 | )), 55 | } 56 | } 57 | } 58 | 59 | fn get_beacon_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 60 | unsafe { 61 | let mut buf = [0u8; 32]; 62 | match cgo_extern_get_beacon_randomness(self.handle, round, &mut buf) { 63 | 0 => Ok(buf), 64 | r @ 1.. => panic!("invalid return value from has: {}", r), 65 | x if x == FvmError::InvalidHandle as i32 => { 66 | panic!("extern {} not registered", self.handle) 67 | } 68 | e => Err(anyhow!( 69 | "cgo extern 'get_beacon_randomness' failed with error code {}", 70 | e 71 | )), 72 | } 73 | } 74 | } 75 | } 76 | 77 | impl Rand3 for CgoExterns { 78 | fn get_chain_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 79 | Rand4::get_chain_randomness(self, round) 80 | } 81 | 82 | fn get_beacon_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 83 | Rand4::get_beacon_randomness(self, round) 84 | } 85 | } 86 | 87 | impl Rand2 for CgoExterns { 88 | fn get_chain_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 89 | Rand4::get_chain_randomness(self, round) 90 | } 91 | 92 | fn get_beacon_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 93 | Rand4::get_beacon_randomness(self, round) 94 | } 95 | } 96 | 97 | impl Consensus4 for CgoExterns { 98 | fn verify_consensus_fault( 99 | &self, 100 | h1: &[u8], 101 | h2: &[u8], 102 | extra: &[u8], 103 | ) -> anyhow::Result<(Option, i64)> { 104 | unsafe { 105 | let mut miner_id: u64 = 0; 106 | let mut epoch: i64 = 0; 107 | let mut fault_type: i64 = 0; 108 | let mut gas_used: i64 = 0; 109 | match cgo_extern_verify_consensus_fault( 110 | self.handle, 111 | h1.as_ptr(), 112 | h1.len() as i32, 113 | h2.as_ptr(), 114 | h2.len() as i32, 115 | extra.as_ptr(), 116 | extra.len() as i32, 117 | &mut miner_id, 118 | &mut epoch, 119 | &mut fault_type, 120 | &mut gas_used, 121 | ) { 122 | 0 => Ok(( 123 | match fault_type { 124 | 0 => None, 125 | _ => Some(ConsensusFault4 { 126 | target: Address::new_id(miner_id), 127 | epoch, 128 | fault_type: FromPrimitive::from_i64(fault_type) 129 | .context("invalid fault type")?, 130 | }), 131 | }, 132 | gas_used, 133 | )), 134 | r @ 1.. => panic!("invalid return value from has: {}", r), 135 | x if x == FvmError::InvalidHandle as i32 => { 136 | panic!("extern {} not registered", self.handle) 137 | } 138 | e => Err(anyhow!( 139 | "cgo extern 'verify_consensus_fault' failed with error code {}", 140 | e 141 | )), 142 | } 143 | } 144 | } 145 | } 146 | 147 | impl Consensus3 for CgoExterns { 148 | fn verify_consensus_fault( 149 | &self, 150 | h1: &[u8], 151 | h2: &[u8], 152 | extra: &[u8], 153 | ) -> anyhow::Result<(Option, i64)> { 154 | let res = Consensus4::verify_consensus_fault(self, h1, h2, extra); 155 | match res { 156 | Ok((Some(res), x)) => Ok(( 157 | Some(ConsensusFault3 { 158 | target: Address3::from_bytes(&res.target.to_bytes()).unwrap(), 159 | epoch: res.epoch, 160 | fault_type: ConsensusFaultType3::from_u8(res.fault_type as u8).unwrap(), 161 | }), 162 | x, 163 | )), 164 | Ok((None, x)) => Ok((None, x)), 165 | Err(x) => Err(x), 166 | } 167 | } 168 | } 169 | 170 | impl Consensus2 for CgoExterns { 171 | fn verify_consensus_fault( 172 | &self, 173 | h1: &[u8], 174 | h2: &[u8], 175 | extra: &[u8], 176 | ) -> anyhow::Result<(Option, i64)> { 177 | let res = Consensus4::verify_consensus_fault(self, h1, h2, extra); 178 | match res { 179 | Ok((Some(res), x)) => Ok(( 180 | Some(ConsensusFault2 { 181 | target: Address2::from_bytes(&res.target.to_bytes()).unwrap(), 182 | epoch: res.epoch, 183 | fault_type: ConsensusFaultType2::from_u8(res.fault_type as u8).unwrap(), 184 | }), 185 | x, 186 | )), 187 | Ok((None, x)) => Ok((None, x)), 188 | Err(x) => Err(x), 189 | } 190 | } 191 | } 192 | 193 | impl Chain4 for CgoExterns { 194 | fn get_tipset_cid(&self, epoch: ChainEpoch) -> anyhow::Result { 195 | unsafe { 196 | let mut buf = [0; fvm4_shared::MAX_CID_LEN]; 197 | match cgo_extern_get_tipset_cid(self.handle, epoch, buf.as_mut_ptr(), buf.len() as i32) 198 | { 199 | 0 => Ok(buf[..].try_into()?), 200 | r @ 1.. => panic!("invalid return value from has: {}", r), 201 | x if x == FvmError::InvalidHandle as i32 => { 202 | panic!("extern {} not registered", self.handle) 203 | } 204 | e => Err(anyhow!( 205 | "cgo extern 'get_tipset_cid' failed with error code {}", 206 | e 207 | )), 208 | } 209 | } 210 | } 211 | } 212 | 213 | impl Chain3 for CgoExterns { 214 | fn get_tipset_cid(&self, epoch: ChainEpoch) -> anyhow::Result { 215 | Chain4::get_tipset_cid(self, epoch) 216 | } 217 | } 218 | 219 | impl Externs4 for CgoExterns {} 220 | impl Externs3 for CgoExterns {} 221 | impl Externs2 for CgoExterns {} 222 | -------------------------------------------------------------------------------- /rust/src/fvm/mod.rs: -------------------------------------------------------------------------------- 1 | mod blockstore; 2 | mod cgo; 3 | mod externs; 4 | 5 | pub mod engine; 6 | pub mod machine; 7 | pub mod types; 8 | 9 | pub use cgo::FvmError; 10 | -------------------------------------------------------------------------------- /rust/src/fvm/types.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::non_canonical_clone_impl)] 2 | 3 | use std::sync::Mutex; 4 | 5 | use safer_ffi::prelude::*; 6 | 7 | use super::engine::CgoExecutor; 8 | 9 | #[derive_ReprC] 10 | #[repr(u8)] 11 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 12 | pub enum FvmRegisteredVersion { 13 | V1, 14 | } 15 | 16 | #[derive_ReprC] 17 | #[repr(opaque)] 18 | #[derive(Default)] 19 | pub struct InnerFvmMachine { 20 | pub(crate) machine: Option>>, 21 | } 22 | 23 | pub type FvmMachine = Option>; 24 | 25 | #[derive_ReprC] 26 | #[repr(C)] 27 | #[derive(Default)] 28 | pub struct FvmMachineExecuteResponse { 29 | pub exit_code: u64, 30 | pub return_val: Option>, 31 | pub gas_used: u64, 32 | pub penalty_hi: u64, 33 | pub penalty_lo: u64, 34 | pub miner_tip_hi: u64, 35 | pub miner_tip_lo: u64, 36 | pub base_fee_burn_hi: u64, 37 | pub base_fee_burn_lo: u64, 38 | pub over_estimation_burn_hi: u64, 39 | pub over_estimation_burn_lo: u64, 40 | pub refund_hi: u64, 41 | pub refund_lo: u64, 42 | pub gas_refund: u64, 43 | pub gas_burned: u64, 44 | pub exec_trace: Option>, 45 | pub failure_info: Option, 46 | pub events: Option>, 47 | pub events_root: Option>, 48 | } 49 | -------------------------------------------------------------------------------- /rust/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all)] 2 | #![allow(clippy::missing_safety_doc)] 3 | #![allow(clippy::upper_case_acronyms)] 4 | 5 | pub mod bls; 6 | pub mod fvm; 7 | pub mod proofs; 8 | pub mod util; 9 | 10 | // Generates the headers. 11 | // Run `HEADER_DIR= cargo test --locked build_headers --features c-headers` to build 12 | #[safer_ffi::cfg_headers] 13 | #[test] 14 | fn build_headers() -> std::io::Result<()> { 15 | use std::env; 16 | use std::path::Path; 17 | 18 | let header_dir = env::var("HEADER_DIR").expect("Missing \"HEADER_DIR\""); 19 | let out_dir = Path::new(&header_dir); 20 | let hdr_out = out_dir.join("filcrypto.h"); 21 | 22 | safer_ffi::headers::builder() 23 | .to_file(&hdr_out)? 24 | .generate()?; 25 | 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /rust/src/proofs/helpers.rs: -------------------------------------------------------------------------------- 1 | use std::collections::btree_map::BTreeMap; 2 | 3 | use anyhow::Result; 4 | use filecoin_proofs_api::{self as api, SectorId}; 5 | use safer_ffi::prelude::*; 6 | 7 | use super::types::{PrivateReplicaInfo, PublicReplicaInfo, RegisteredPoStProof}; 8 | use crate::util::types::as_path_buf; 9 | 10 | #[derive(Debug, Clone)] 11 | struct PublicReplicaInfoTmp { 12 | pub registered_proof: RegisteredPoStProof, 13 | pub comm_r: [u8; 32], 14 | pub sector_id: u64, 15 | } 16 | 17 | pub fn to_public_replica_info_map( 18 | replicas: c_slice::Ref, 19 | ) -> BTreeMap { 20 | use rayon::prelude::*; 21 | 22 | let replicas = replicas 23 | .iter() 24 | .map(|ffi_info| PublicReplicaInfoTmp { 25 | sector_id: ffi_info.sector_id, 26 | registered_proof: ffi_info.registered_proof, 27 | comm_r: ffi_info.comm_r, 28 | }) 29 | .collect::>(); 30 | 31 | replicas 32 | .into_par_iter() 33 | .map(|info| { 34 | let PublicReplicaInfoTmp { 35 | registered_proof, 36 | comm_r, 37 | sector_id, 38 | } = info; 39 | 40 | ( 41 | SectorId::from(sector_id), 42 | api::PublicReplicaInfo::new(registered_proof.into(), comm_r), 43 | ) 44 | }) 45 | .collect() 46 | } 47 | 48 | #[derive(Debug, Clone)] 49 | struct PrivateReplicaInfoTmp { 50 | pub registered_proof: RegisteredPoStProof, 51 | pub cache_dir_path: std::path::PathBuf, 52 | pub comm_r: [u8; 32], 53 | pub replica_path: std::path::PathBuf, 54 | pub sector_id: u64, 55 | } 56 | 57 | pub fn to_private_replica_info_map( 58 | replicas: c_slice::Ref, 59 | ) -> Result> { 60 | use rayon::prelude::*; 61 | 62 | let replicas: Vec<_> = replicas 63 | .iter() 64 | .map(|ffi_info| { 65 | let cache_dir_path = as_path_buf(&ffi_info.cache_dir_path)?; 66 | let replica_path = as_path_buf(&ffi_info.replica_path)?; 67 | 68 | Ok(PrivateReplicaInfoTmp { 69 | registered_proof: ffi_info.registered_proof, 70 | cache_dir_path, 71 | comm_r: ffi_info.comm_r, 72 | replica_path, 73 | sector_id: ffi_info.sector_id, 74 | }) 75 | }) 76 | .collect::>()?; 77 | 78 | let map = replicas 79 | .into_par_iter() 80 | .map(|info| { 81 | let PrivateReplicaInfoTmp { 82 | registered_proof, 83 | cache_dir_path, 84 | comm_r, 85 | replica_path, 86 | sector_id, 87 | } = info; 88 | 89 | ( 90 | SectorId::from(sector_id), 91 | api::PrivateReplicaInfo::new( 92 | registered_proof.into(), 93 | comm_r, 94 | cache_dir_path, 95 | replica_path, 96 | ), 97 | ) 98 | }) 99 | .collect(); 100 | 101 | Ok(map) 102 | } 103 | -------------------------------------------------------------------------------- /rust/src/proofs/mod.rs: -------------------------------------------------------------------------------- 1 | mod helpers; 2 | 3 | pub mod api; 4 | pub mod types; 5 | -------------------------------------------------------------------------------- /rust/src/util/api.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::os::unix::io::FromRawFd; 3 | use std::sync::Once; 4 | 5 | use anyhow::anyhow; 6 | use safer_ffi::prelude::*; 7 | use safer_ffi::slice::slice_boxed; 8 | 9 | use super::types::{ 10 | catch_panic_response, catch_panic_response_no_log, GpuDeviceResponse, InitLogFdResponse, 11 | }; 12 | 13 | /// Protects the init off the logger. 14 | static LOG_INIT: Once = Once::new(); 15 | 16 | /// Ensures the logger is initialized. 17 | pub fn init_log() { 18 | LOG_INIT.call_once(|| { 19 | fil_logger::init(); 20 | }); 21 | } 22 | /// Initialize the logger with a file to log into 23 | /// 24 | /// Returns `None` if there is already an active logger 25 | pub fn init_log_with_file(file: File) -> Option<()> { 26 | if LOG_INIT.is_completed() { 27 | None 28 | } else { 29 | LOG_INIT.call_once(|| { 30 | fil_logger::init_with_file(file); 31 | }); 32 | Some(()) 33 | } 34 | } 35 | 36 | /// Serialize the GPU device names into a vector 37 | #[cfg(any(feature = "opencl", feature = "cuda", feature = "cuda-supraseal"))] 38 | fn get_gpu_devices_internal() -> Vec> { 39 | let devices = rust_gpu_tools::Device::all(); 40 | 41 | devices 42 | .into_iter() 43 | .map(|d| d.name().into_bytes().into_boxed_slice().into()) 44 | .collect() 45 | } 46 | 47 | // Return empty vector for GPU devices if cuda and opencl are disabled 48 | #[cfg(not(any(feature = "opencl", feature = "cuda", feature = "cuda-supraseal")))] 49 | fn get_gpu_devices_internal() -> Vec> { 50 | Vec::new() 51 | } 52 | 53 | /// Returns an array of strings containing the device names that can be used. 54 | #[ffi_export] 55 | pub fn get_gpu_devices() -> repr_c::Box { 56 | catch_panic_response("get_gpu_devices", || { 57 | let devices = get_gpu_devices_internal(); 58 | Ok(devices.into_boxed_slice().into()) 59 | }) 60 | } 61 | 62 | /// Initializes the logger with a file descriptor where logs will be logged into. 63 | /// 64 | /// This is usually a pipe that was opened on the receiving side of the logs. The logger is 65 | /// initialized on the invocation, subsequent calls won't have any effect. 66 | /// 67 | /// This function must be called right at the start, before any other call. Else the logger will 68 | /// be initializes implicitely and log to stderr. 69 | #[ffi_export] 70 | pub fn init_log_fd(log_fd: libc::c_int) -> repr_c::Box { 71 | catch_panic_response_no_log(|| { 72 | let file = unsafe { File::from_raw_fd(log_fd) }; 73 | 74 | if init_log_with_file(file).is_none() { 75 | return Err(anyhow!("There is already an active logger. `init_log_fd()` needs to be called before any other FFI function is called.")); 76 | } 77 | Ok(()) 78 | }) 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | 84 | #[cfg(any(feature = "opencl", feature = "cuda"))] 85 | #[test] 86 | #[allow(clippy::needless_collect)] 87 | fn test_get_gpu_devices() { 88 | use crate::util::api::get_gpu_devices; 89 | use crate::util::types::destroy_gpu_device_response; 90 | 91 | let resp = get_gpu_devices(); 92 | assert!(resp.error_msg.is_empty()); 93 | 94 | let strings = &resp.value; 95 | 96 | let devices: Vec<&str> = strings 97 | .iter() 98 | .map(|s| std::str::from_utf8(s).unwrap()) 99 | .collect(); 100 | 101 | assert_eq!(devices.len(), resp.value.len()); 102 | 103 | destroy_gpu_device_response(resp); 104 | } 105 | 106 | #[test] 107 | #[ignore] 108 | #[cfg(target_os = "linux")] 109 | fn test_init_log_fd() { 110 | /* 111 | 112 | Warning: This test is leaky. When run alongside other (Rust) tests in 113 | this project, `[flexi_logger] writing log line failed` lines will be 114 | observed in stderr, and various unrelated tests will fail. 115 | 116 | - @laser 20200725 117 | 118 | */ 119 | use std::env; 120 | use std::fs::File; 121 | use std::io::{BufRead, BufReader, Write}; 122 | use std::os::unix::io::FromRawFd; 123 | 124 | use crate::util::api::init_log_fd; 125 | use crate::util::types::{destroy_init_log_fd_response, FCPResponseStatus}; 126 | 127 | let mut fds: [libc::c_int; 2] = [0; 2]; 128 | let res = unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC) }; 129 | if res != 0 { 130 | panic!("Cannot create pipe"); 131 | } 132 | let [read_fd, write_fd] = fds; 133 | 134 | let mut reader = unsafe { BufReader::new(File::from_raw_fd(read_fd)) }; 135 | let mut writer = unsafe { File::from_raw_fd(write_fd) }; 136 | 137 | // Without setting this env variable there won't be any log output 138 | env::set_var("RUST_LOG", "debug"); 139 | 140 | let resp = init_log_fd(write_fd); 141 | destroy_init_log_fd_response(resp); 142 | 143 | log::info!("a log message"); 144 | 145 | // Write a newline so that things don't block even if the logging doesn't work 146 | writer.write_all(b"\n").unwrap(); 147 | 148 | let mut log_message = String::new(); 149 | reader.read_line(&mut log_message).unwrap(); 150 | 151 | assert!(log_message.ends_with("a log message\n")); 152 | 153 | // Now test that there is an error when we try to init it again 154 | let resp_error = init_log_fd(write_fd); 155 | assert_ne!(resp_error.status_code, FCPResponseStatus::NoError); 156 | destroy_init_log_fd_response(resp_error); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /rust/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod types; 3 | -------------------------------------------------------------------------------- /rust/src/util/types.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, mem::MaybeUninit, ops::Deref, panic, path::PathBuf, str::Utf8Error}; 2 | 3 | use safer_ffi::prelude::*; 4 | 5 | use super::api::init_log; 6 | 7 | #[derive_ReprC] 8 | #[repr(i32)] 9 | #[derive(PartialEq, Eq, Debug, Copy, Clone)] 10 | pub enum FCPResponseStatus { 11 | // Don't use FCPSuccess, since that complicates description of 'successful' verification. 12 | NoError = 0, 13 | UnclassifiedError = 1, 14 | CallerError = 2, 15 | ReceiverError = 3, 16 | } 17 | 18 | #[cfg(target_os = "linux")] 19 | pub fn as_path_buf(bytes: &[u8]) -> std::result::Result { 20 | use std::ffi::OsStr; 21 | use std::os::unix::ffi::OsStrExt; 22 | 23 | Ok(OsStr::from_bytes(bytes).into()) 24 | } 25 | 26 | #[cfg(not(target_os = "linux"))] 27 | pub fn as_path_buf(bytes: &[u8]) -> std::result::Result { 28 | std::str::from_utf8(bytes).map(Into::into) 29 | } 30 | 31 | #[cfg(test)] 32 | #[cfg(target_os = "linux")] 33 | pub fn as_bytes(path: &std::path::Path) -> &[u8] { 34 | use std::os::unix::ffi::OsStrExt; 35 | 36 | path.as_os_str().as_bytes() 37 | } 38 | 39 | #[cfg(all(test, not(target_os = "linux")))] 40 | pub fn as_bytes(path: &std::path::Path) -> &[u8] { 41 | path.to_str().unwrap().as_bytes() 42 | } 43 | 44 | #[derive_ReprC] 45 | #[repr(C)] 46 | #[derive(Clone)] 47 | pub struct Result { 48 | pub status_code: FCPResponseStatus, 49 | pub error_msg: c_slice::Box, 50 | pub value: T, 51 | } 52 | 53 | impl Deref for Result { 54 | type Target = T; 55 | 56 | fn deref(&self) -> &Self::Target { 57 | &self.value 58 | } 59 | } 60 | 61 | impl Default for Result { 62 | fn default() -> Self { 63 | Result { 64 | status_code: FCPResponseStatus::NoError, 65 | error_msg: Default::default(), 66 | value: Default::default(), 67 | } 68 | } 69 | } 70 | 71 | impl From> for Result 72 | where 73 | T: Sized + Default, 74 | E: Display, 75 | { 76 | fn from(r: std::result::Result) -> Self { 77 | match r { 78 | Ok(value) => Self::ok(value), 79 | Err(e) => Self::err(e.to_string().into_bytes().into_boxed_slice()), 80 | } 81 | } 82 | } 83 | 84 | impl From for Result 85 | where 86 | T: Sized, 87 | { 88 | fn from(value: T) -> Self { 89 | Self { 90 | status_code: FCPResponseStatus::NoError, 91 | error_msg: Default::default(), 92 | value, 93 | } 94 | } 95 | } 96 | 97 | impl Result { 98 | pub fn ok(value: T) -> Self { 99 | Result { 100 | status_code: FCPResponseStatus::NoError, 101 | error_msg: Default::default(), 102 | value, 103 | } 104 | } 105 | 106 | pub unsafe fn into_boxed_raw(self) -> *mut Result { 107 | Box::into_raw(Box::new(self)) 108 | } 109 | 110 | pub fn err_with_default(err: impl Into>, value: T) -> Self { 111 | Result { 112 | status_code: FCPResponseStatus::UnclassifiedError, 113 | error_msg: err.into(), 114 | value, 115 | } 116 | } 117 | 118 | /// Safety: value must not be accessed. 119 | pub unsafe fn err_no_default(err: impl Into>) -> Self { 120 | Result { 121 | status_code: FCPResponseStatus::UnclassifiedError, 122 | error_msg: err.into(), 123 | value: MaybeUninit::zeroed().assume_init(), 124 | } 125 | } 126 | } 127 | 128 | impl Result { 129 | pub fn err(err: impl Into>) -> Self { 130 | Result { 131 | status_code: FCPResponseStatus::UnclassifiedError, 132 | error_msg: err.into(), 133 | value: Default::default(), 134 | } 135 | } 136 | } 137 | 138 | pub type GpuDeviceResponse = Result>>; 139 | 140 | #[ffi_export] 141 | pub fn destroy_gpu_device_response(ptr: repr_c::Box) { 142 | drop(ptr) 143 | } 144 | 145 | pub type InitLogFdResponse = Result<()>; 146 | 147 | #[ffi_export] 148 | pub fn destroy_init_log_fd_response(ptr: repr_c::Box) { 149 | drop(ptr) 150 | } 151 | 152 | /// Catch panics and return an error response 153 | pub fn catch_panic_response(name: &str, callback: F) -> repr_c::Box> 154 | where 155 | T: Sized + Default, 156 | F: FnOnce() -> anyhow::Result + std::panic::UnwindSafe, 157 | { 158 | catch_panic_response_raw(name, || { 159 | Result::from(callback().map_err(|err| format!("{err:?}"))) 160 | }) 161 | } 162 | 163 | pub fn catch_panic_response_no_log(callback: F) -> repr_c::Box> 164 | where 165 | T: Sized + Default, 166 | F: FnOnce() -> anyhow::Result + std::panic::UnwindSafe, 167 | { 168 | catch_panic_response_raw_no_log(|| Result::from(callback().map_err(|err| format!("{err:?}")))) 169 | } 170 | 171 | pub fn catch_panic_response_raw_no_log(callback: F) -> repr_c::Box> 172 | where 173 | T: Sized + Default, 174 | F: FnOnce() -> Result + std::panic::UnwindSafe, 175 | { 176 | let result = match panic::catch_unwind(callback) { 177 | Ok(t) => t, 178 | Err(panic) => { 179 | let error_msg = match panic.downcast_ref::<&'static str>() { 180 | Some(message) => message, 181 | _ => "no unwind information", 182 | }; 183 | 184 | Result::from(Err(format!("Rust panic: {}", error_msg))) 185 | } 186 | }; 187 | 188 | Box::new(result).into() 189 | } 190 | 191 | pub fn catch_panic_response_raw(name: &str, callback: F) -> repr_c::Box> 192 | where 193 | T: Sized + Default, 194 | F: FnOnce() -> Result + std::panic::UnwindSafe, 195 | { 196 | catch_panic_response_raw_no_log(|| { 197 | init_log(); 198 | log::debug!("{}: start", name); 199 | let res = callback(); 200 | log::debug!("{}: end", name); 201 | res 202 | }) 203 | } 204 | 205 | pub unsafe fn catch_panic_response_no_default( 206 | name: &str, 207 | callback: F, 208 | ) -> repr_c::Box> 209 | where 210 | T: Sized, 211 | F: FnOnce() -> anyhow::Result + std::panic::UnwindSafe, 212 | { 213 | let result = panic::catch_unwind(|| { 214 | init_log(); 215 | log::debug!("{}: start", name); 216 | let res = callback(); 217 | log::debug!("{}: end", name); 218 | res 219 | }); 220 | 221 | Box::new(match result { 222 | Ok(t) => match t { 223 | Ok(t) => Result::ok(t), 224 | Err(err) => Result::err_no_default(format!("{err:?}").into_bytes().into_boxed_slice()), 225 | }, 226 | Err(panic) => { 227 | let error_msg = match panic.downcast_ref::<&'static str>() { 228 | Some(message) => message, 229 | _ => "no unwind information", 230 | }; 231 | 232 | Result::err_no_default( 233 | format!("Rust panic: {}", error_msg) 234 | .into_bytes() 235 | .into_boxed_slice(), 236 | ) 237 | } 238 | }) 239 | .into() 240 | } 241 | 242 | /// Generate a destructor for the given type wrapped in a `repr_c::Box`. 243 | #[macro_export] 244 | macro_rules! destructor { 245 | ($name:ident, $type:ty) => { 246 | /// Destroys the passed in `repr_c::Box<$type>`. 247 | #[ffi_export] 248 | fn $name(ptr: repr_c::Box<$type>) { 249 | drop(ptr); 250 | } 251 | }; 252 | } 253 | -------------------------------------------------------------------------------- /sector_update.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package ffi 5 | 6 | import ( 7 | "github.com/filecoin-project/filecoin-ffi/cgo" 8 | commcid "github.com/filecoin-project/go-fil-commcid" 9 | "github.com/filecoin-project/go-state-types/abi" 10 | "github.com/filecoin-project/go-state-types/proof" 11 | "github.com/ipfs/go-cid" 12 | "github.com/pkg/errors" 13 | "golang.org/x/xerrors" 14 | ) 15 | 16 | func toFilRegisteredUpdateProof(p abi.RegisteredUpdateProof) (cgo.RegisteredUpdateProof, error) { 17 | switch p { 18 | case abi.RegisteredUpdateProof_StackedDrg2KiBV1: 19 | return cgo.RegisteredUpdateProofStackedDrg2KiBV1, nil 20 | case abi.RegisteredUpdateProof_StackedDrg8MiBV1: 21 | return cgo.RegisteredUpdateProofStackedDrg8MiBV1, nil 22 | case abi.RegisteredUpdateProof_StackedDrg512MiBV1: 23 | return cgo.RegisteredUpdateProofStackedDrg512MiBV1, nil 24 | case abi.RegisteredUpdateProof_StackedDrg32GiBV1: 25 | return cgo.RegisteredUpdateProofStackedDrg32GiBV1, nil 26 | case abi.RegisteredUpdateProof_StackedDrg64GiBV1: 27 | return cgo.RegisteredUpdateProofStackedDrg64GiBV1, nil 28 | default: 29 | return 0, errors.Errorf("no mapping to abi.RegisteredUpdateProof value available for: %v", p) 30 | } 31 | } 32 | 33 | type FunctionsSectorUpdate struct{} 34 | 35 | var SectorUpdate = FunctionsSectorUpdate{} 36 | 37 | func (FunctionsSectorUpdate) EncodeInto( 38 | proofType abi.RegisteredUpdateProof, 39 | newReplicaPath string, 40 | newReplicaCachePath string, 41 | sectorKeyPath string, 42 | sectorKeyCachePath string, 43 | stagedDataPath string, 44 | pieces []abi.PieceInfo, 45 | ) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) { 46 | up, err := toFilRegisteredUpdateProof(proofType) 47 | if err != nil { 48 | return cid.Undef, cid.Undef, err 49 | } 50 | 51 | filPublicPieceInfos, err := toFilPublicPieceInfos(pieces) 52 | if err != nil { 53 | return cid.Undef, cid.Undef, err 54 | } 55 | 56 | commRRaw, commDRaw, err := cgo.EmptySectorUpdateEncodeInto( 57 | up, 58 | cgo.AsSliceRefUint8([]byte(newReplicaPath)), 59 | cgo.AsSliceRefUint8([]byte(newReplicaCachePath)), 60 | cgo.AsSliceRefUint8([]byte(sectorKeyPath)), 61 | cgo.AsSliceRefUint8([]byte(sectorKeyCachePath)), 62 | cgo.AsSliceRefUint8([]byte(stagedDataPath)), 63 | cgo.AsSliceRefPublicPieceInfo(filPublicPieceInfos), 64 | ) 65 | if err != nil { 66 | return cid.Undef, cid.Undef, err 67 | } 68 | 69 | commR, errCommrSize := commcid.ReplicaCommitmentV1ToCID(commRRaw) 70 | if errCommrSize != nil { 71 | return cid.Undef, cid.Undef, errCommrSize 72 | } 73 | commD, errCommdSize := commcid.DataCommitmentV1ToCID(commDRaw) 74 | if errCommdSize != nil { 75 | return cid.Undef, cid.Undef, errCommdSize 76 | } 77 | 78 | return commR, commD, nil 79 | } 80 | 81 | func (FunctionsSectorUpdate) DecodeFrom( 82 | proofType abi.RegisteredUpdateProof, 83 | outDataPath string, 84 | replicaPath string, 85 | sectorKeyPath string, 86 | sectorKeyCachePath string, 87 | unsealedCID cid.Cid, 88 | ) error { 89 | up, err := toFilRegisteredUpdateProof(proofType) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | commD, err := to32ByteCommD(unsealedCID) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | return cgo.EmptySectorUpdateDecodeFrom( 100 | up, 101 | cgo.AsSliceRefUint8([]byte(outDataPath)), 102 | cgo.AsSliceRefUint8([]byte(replicaPath)), 103 | cgo.AsSliceRefUint8([]byte(sectorKeyPath)), 104 | cgo.AsSliceRefUint8([]byte(sectorKeyCachePath)), 105 | &commD, 106 | ) 107 | } 108 | 109 | func (FunctionsSectorUpdate) RemoveData( 110 | proofType abi.RegisteredUpdateProof, 111 | sectorKeyPath string, 112 | sectorKeyCachePath string, 113 | replicaPath string, 114 | replicaCachePath string, 115 | dataPath string, 116 | unsealedCID cid.Cid, 117 | ) error { 118 | up, err := toFilRegisteredUpdateProof(proofType) 119 | if err != nil { 120 | return err 121 | } 122 | 123 | commD, err := to32ByteCommD(unsealedCID) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | return cgo.EmptySectorUpdateRemoveEncodedData( 129 | up, 130 | cgo.AsSliceRefUint8([]byte(sectorKeyPath)), 131 | cgo.AsSliceRefUint8([]byte(sectorKeyCachePath)), 132 | cgo.AsSliceRefUint8([]byte(replicaPath)), 133 | cgo.AsSliceRefUint8([]byte(replicaCachePath)), 134 | cgo.AsSliceRefUint8([]byte(dataPath)), 135 | &commD, 136 | ) 137 | } 138 | 139 | func (FunctionsSectorUpdate) GenerateUpdateVanillaProofs( 140 | proofType abi.RegisteredUpdateProof, 141 | oldSealedCID cid.Cid, 142 | newSealedCID cid.Cid, 143 | unsealedCID cid.Cid, 144 | newReplicaPath string, 145 | newReplicaCachePath string, 146 | sectorKeyPath string, 147 | sectorKeyCachePath string, 148 | ) ([][]byte, error) { 149 | up, err := toFilRegisteredUpdateProof(proofType) 150 | if err != nil { 151 | return nil, err 152 | } 153 | 154 | commRold, err := to32ByteCommR(oldSealedCID) 155 | if err != nil { 156 | return nil, xerrors.Errorf("transforming old CommR: %w", err) 157 | } 158 | commRnew, err := to32ByteCommR(newSealedCID) 159 | if err != nil { 160 | return nil, xerrors.Errorf("transforming new CommR: %w", err) 161 | } 162 | commD, err := to32ByteCommD(unsealedCID) 163 | if err != nil { 164 | return nil, xerrors.Errorf("transforming new CommD: %w", err) 165 | } 166 | 167 | return cgo.GenerateEmptySectorUpdatePartitionProofs( 168 | up, 169 | &commRold, 170 | &commRnew, 171 | &commD, 172 | cgo.AsSliceRefUint8([]byte(sectorKeyPath)), 173 | cgo.AsSliceRefUint8([]byte(sectorKeyCachePath)), 174 | cgo.AsSliceRefUint8([]byte(newReplicaPath)), 175 | cgo.AsSliceRefUint8([]byte(newReplicaCachePath)), 176 | ) 177 | } 178 | 179 | func (FunctionsSectorUpdate) VerifyVanillaProofs( 180 | proofType abi.RegisteredUpdateProof, 181 | oldSealedCID cid.Cid, 182 | newSealedCID cid.Cid, 183 | unsealedCID cid.Cid, 184 | vanillaProofs [][]byte, 185 | ) (bool, error) { 186 | up, err := toFilRegisteredUpdateProof(proofType) 187 | if err != nil { 188 | return false, err 189 | } 190 | 191 | commRold, err := to32ByteCommR(oldSealedCID) 192 | if err != nil { 193 | return false, xerrors.Errorf("transorming old CommR: %w", err) 194 | } 195 | commRnew, err := to32ByteCommR(newSealedCID) 196 | if err != nil { 197 | return false, xerrors.Errorf("transorming new CommR: %w", err) 198 | } 199 | commD, err := to32ByteCommD(unsealedCID) 200 | if err != nil { 201 | return false, xerrors.Errorf("transorming new CommD: %w", err) 202 | } 203 | 204 | proofs, cleanup := toUpdateVanillaProofs(vanillaProofs) 205 | defer cleanup() 206 | 207 | return cgo.VerifyEmptySectorUpdatePartitionProofs( 208 | up, 209 | cgo.AsSliceRefSliceBoxedUint8(proofs), 210 | &commRold, 211 | &commRnew, 212 | &commD, 213 | ) 214 | } 215 | 216 | func (FunctionsSectorUpdate) GenerateUpdateProofWithVanilla( 217 | proofType abi.RegisteredUpdateProof, 218 | oldSealedCID cid.Cid, 219 | newSealedCID cid.Cid, 220 | unsealedCID cid.Cid, 221 | vanillaProofs [][]byte, 222 | ) ([]byte, error) { 223 | up, err := toFilRegisteredUpdateProof(proofType) 224 | if err != nil { 225 | return nil, err 226 | } 227 | 228 | commRold, err := to32ByteCommR(oldSealedCID) 229 | if err != nil { 230 | return nil, xerrors.Errorf("transorming old CommR: %w", err) 231 | } 232 | commRnew, err := to32ByteCommR(newSealedCID) 233 | if err != nil { 234 | return nil, xerrors.Errorf("transorming new CommR: %w", err) 235 | } 236 | commD, err := to32ByteCommD(unsealedCID) 237 | if err != nil { 238 | return nil, xerrors.Errorf("transorming new CommD: %w", err) 239 | } 240 | 241 | proofs, cleanup := toUpdateVanillaProofs(vanillaProofs) 242 | defer cleanup() 243 | 244 | return cgo.GenerateEmptySectorUpdateProofWithVanilla( 245 | up, 246 | cgo.AsSliceRefSliceBoxedUint8(proofs), 247 | &commRold, 248 | &commRnew, 249 | &commD, 250 | ) 251 | } 252 | 253 | func toUpdateVanillaProofs(src [][]byte) ([]cgo.SliceBoxedUint8, func()) { 254 | out := make([]cgo.SliceBoxedUint8, len(src)) 255 | for idx := range out { 256 | out[idx] = cgo.AllocSliceBoxedUint8(src[idx]) 257 | } 258 | 259 | return out, func() { 260 | for idx := range out { 261 | out[idx].Destroy() 262 | } 263 | } 264 | } 265 | 266 | func (FunctionsSectorUpdate) GenerateUpdateProof( 267 | proofType abi.RegisteredUpdateProof, 268 | oldSealedCID cid.Cid, 269 | newSealedCID cid.Cid, 270 | unsealedCID cid.Cid, 271 | newReplicaPath string, 272 | newReplicaCachePath string, 273 | sectorKeyPath string, 274 | sectorKeyCachePath string, 275 | ) ([]byte, error) { 276 | up, err := toFilRegisteredUpdateProof(proofType) 277 | if err != nil { 278 | return nil, err 279 | } 280 | 281 | commRold, err := to32ByteCommR(oldSealedCID) 282 | if err != nil { 283 | return nil, xerrors.Errorf("transorming old CommR: %w", err) 284 | } 285 | commRnew, err := to32ByteCommR(newSealedCID) 286 | if err != nil { 287 | return nil, xerrors.Errorf("transorming new CommR: %w", err) 288 | } 289 | commD, err := to32ByteCommD(unsealedCID) 290 | if err != nil { 291 | return nil, xerrors.Errorf("transorming new CommD: %w", err) 292 | } 293 | 294 | return cgo.GenerateEmptySectorUpdateProof( 295 | up, 296 | &commRold, 297 | &commRnew, 298 | &commD, 299 | cgo.AsSliceRefUint8([]byte(sectorKeyPath)), 300 | cgo.AsSliceRefUint8([]byte(sectorKeyCachePath)), 301 | cgo.AsSliceRefUint8([]byte(newReplicaPath)), 302 | cgo.AsSliceRefUint8([]byte(newReplicaCachePath)), 303 | ) 304 | } 305 | 306 | func (FunctionsSectorUpdate) VerifyUpdateProof(info proof.ReplicaUpdateInfo) (bool, error) { 307 | up, err := toFilRegisteredUpdateProof(info.UpdateProofType) 308 | if err != nil { 309 | return false, err 310 | } 311 | 312 | commRold, err := to32ByteCommR(info.OldSealedSectorCID) 313 | if err != nil { 314 | return false, xerrors.Errorf("transforming old CommR: %w", err) 315 | } 316 | commRnew, err := to32ByteCommR(info.NewSealedSectorCID) 317 | if err != nil { 318 | return false, xerrors.Errorf("tranfsorming new CommR: %w", err) 319 | } 320 | commD, err := to32ByteCommD(info.NewUnsealedSectorCID) 321 | if err != nil { 322 | return false, xerrors.Errorf("transforming new CommD: %w", err) 323 | } 324 | 325 | return cgo.VerifyEmptySectorUpdateProof( 326 | up, 327 | cgo.AsSliceRefUint8(info.Proof), 328 | &commRold, 329 | &commRnew, 330 | &commD, 331 | ) 332 | } 333 | -------------------------------------------------------------------------------- /srs-inner-product.json: -------------------------------------------------------------------------------- 1 | { 2 | "v28-fil-inner-product-v1.srs": { 3 | "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", 4 | "digest": "ae20310138f5ba81451d723f858e3797", 5 | "sector_size": 0 6 | } 7 | } -------------------------------------------------------------------------------- /types.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "sort" 8 | 9 | "github.com/filecoin-project/go-state-types/proof" 10 | 11 | "github.com/filecoin-project/go-state-types/abi" 12 | "github.com/ipfs/go-cid" 13 | ) 14 | 15 | // BLS 16 | 17 | // SignatureBytes is the length of a BLS signature 18 | const SignatureBytes = 96 19 | 20 | // PrivateKeyBytes is the length of a BLS private key 21 | const PrivateKeyBytes = 32 22 | 23 | // PublicKeyBytes is the length of a BLS public key 24 | const PublicKeyBytes = 48 25 | 26 | // DigestBytes is the length of a BLS message hash/digest 27 | const DigestBytes = 96 28 | 29 | // Signature is a compressed affine 30 | type Signature = [SignatureBytes]byte 31 | 32 | // PrivateKey is a compressed affine 33 | type PrivateKey = [PrivateKeyBytes]byte 34 | 35 | // PublicKey is a compressed affine 36 | type PublicKey = [PublicKeyBytes]byte 37 | 38 | // Message is a byte slice 39 | type Message = []byte 40 | 41 | // Digest is a compressed affine 42 | type Digest = [DigestBytes]byte 43 | 44 | // Used when generating a private key deterministically 45 | type PrivateKeyGenSeed = [32]byte 46 | 47 | // Proofs 48 | 49 | // SortedPublicSectorInfo is a slice of publicSectorInfo sorted 50 | // (lexicographically, ascending) by sealed (replica) CID. 51 | type SortedPublicSectorInfo struct { 52 | f []publicSectorInfo 53 | } 54 | 55 | // SortedPrivateSectorInfo is a slice of PrivateSectorInfo sorted 56 | // (lexicographically, ascending) by sealed (replica) CID. 57 | type SortedPrivateSectorInfo struct { 58 | f []PrivateSectorInfo 59 | } 60 | 61 | func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo { 62 | fn := func(i, j int) bool { 63 | return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1 64 | } 65 | 66 | sort.Slice(sectorInfo[:], fn) 67 | 68 | return SortedPublicSectorInfo{ 69 | f: sectorInfo, 70 | } 71 | } 72 | 73 | // Values returns the sorted publicSectorInfo as a slice 74 | func (s *SortedPublicSectorInfo) Values() []publicSectorInfo { 75 | return s.f 76 | } 77 | 78 | // MarshalJSON JSON-encodes and serializes the SortedPublicSectorInfo. 79 | func (s SortedPublicSectorInfo) MarshalJSON() ([]byte, error) { 80 | return json.Marshal(s.f) 81 | } 82 | 83 | // UnmarshalJSON parses the JSON-encoded byte slice and stores the result in the 84 | // value pointed to by s.f. Note that this method allows for construction of a 85 | // SortedPublicSectorInfo which violates its invariant (that its publicSectorInfo are sorted 86 | // in some defined way). Callers should take care to never provide a byte slice 87 | // which would violate this invariant. 88 | func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error { 89 | return json.Unmarshal(b, &s.f) 90 | } 91 | 92 | // NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo 93 | func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo { 94 | result := make([]PrivateSectorInfo, 0) 95 | seen := map[abi.SectorNumber]struct{}{} 96 | for i := range sectorInfo { 97 | if _, found := seen[sectorInfo[i].SectorNumber]; !found { 98 | seen[sectorInfo[i].SectorNumber] = struct{}{} 99 | result = append(result, sectorInfo[i]) 100 | } 101 | } 102 | sort.Slice(result, func(i, j int) bool { 103 | return result[i].SectorNumber < result[j].SectorNumber 104 | }) 105 | 106 | return SortedPrivateSectorInfo{ 107 | f: result, 108 | } 109 | } 110 | 111 | // Values returns the sorted PrivateSectorInfo as a slice 112 | func (s *SortedPrivateSectorInfo) Values() []PrivateSectorInfo { 113 | return s.f 114 | } 115 | 116 | // MarshalJSON JSON-encodes and serializes the SortedPrivateSectorInfo. 117 | func (s SortedPrivateSectorInfo) MarshalJSON() ([]byte, error) { 118 | return json.Marshal(s.f) 119 | } 120 | 121 | func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error { 122 | return json.Unmarshal(b, &s.f) 123 | } 124 | 125 | type publicSectorInfo struct { 126 | PoStProofType abi.RegisteredPoStProof 127 | SealedCID cid.Cid 128 | SectorNum abi.SectorNumber 129 | } 130 | 131 | type PrivateSectorInfo struct { 132 | proof.SectorInfo 133 | CacheDirPath string 134 | PoStProofType abi.RegisteredPoStProof 135 | SealedSectorPath string 136 | } 137 | 138 | // AllocationManager is an interface that provides Free() capability. 139 | type AllocationManager interface { 140 | Free() 141 | } 142 | 143 | func SplitSortedPrivateSectorInfo(ctx context.Context, sortPrivSectors SortedPrivateSectorInfo, start int, end int) (SortedPrivateSectorInfo, error) { 144 | var newSortPrivSectors SortedPrivateSectorInfo 145 | newSortPrivSectors.f = make([]PrivateSectorInfo, 0) 146 | newSortPrivSectors.f = append(newSortPrivSectors.f, sortPrivSectors.f[start:end]...) 147 | 148 | return newSortPrivSectors, nil 149 | } 150 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | // Version is most similar to semver's minor version. 4 | // It is here as we cannot use gomod versioning due to local replace directives 5 | // for native dependencies. 6 | const Version int = 3 7 | -------------------------------------------------------------------------------- /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v1.33.1-dev" 3 | } 4 | --------------------------------------------------------------------------------