├── rust ├── filecoin.pc ├── src │ ├── bls │ │ ├── mod.rs │ │ └── api.rs │ ├── util │ │ ├── mod.rs │ │ ├── api.rs │ │ └── types.rs │ ├── fvm │ │ ├── blockstore │ │ │ ├── mod.rs │ │ │ └── cgo.rs │ │ ├── cgo │ │ │ ├── mod.rs │ │ │ ├── error.rs │ │ │ └── externs.rs │ │ ├── mod.rs │ │ ├── types.rs │ │ └── externs.rs │ ├── proofs │ │ ├── mod.rs │ │ └── helpers.rs │ └── lib.rs ├── rust-toolchain.toml ├── filcrypto.pc.template ├── rustc-target-features-optimized.json ├── scripts │ ├── package-release.sh │ ├── publish-release.sh │ └── build-release.sh └── Cargo.toml ├── headerstubs ├── stdarg.h ├── stdlib.h ├── stddef.h ├── stdbool.h └── stdint.h ├── run_tests.sh ├── .gitignore ├── cgo ├── libs.go ├── util.go ├── errors.go ├── registry.go ├── interface.go ├── helpers_test.go ├── bls.go ├── fvm.go ├── extern.go ├── blockstore.go ├── const.go └── helpers.go ├── srs-inner-product.json ├── version.go ├── SECURITY.md ├── LICENSE-APACHE ├── Makefile ├── fvm_test.go ├── LICENSE-MIT ├── cgoleakdetect └── runner.go ├── go.mod ├── bls.go ├── types.go ├── distributed.go ├── bls_test.go ├── proofs_test.go ├── fvm.go ├── README.md ├── mkreleaselog ├── sector_update.go ├── install-filcrypto ├── parameters.json ├── .circleci └── config.yml └── workflows.go /rust/filecoin.pc: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /headerstubs/stdarg.h: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /headerstubs/stdlib.h: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /rust/src/bls/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | -------------------------------------------------------------------------------- /headerstubs/stddef.h: -------------------------------------------------------------------------------- 1 | typedef unsigned long int size_t; 2 | -------------------------------------------------------------------------------- /rust/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod types; 3 | -------------------------------------------------------------------------------- /rust/src/fvm/blockstore/mod.rs: -------------------------------------------------------------------------------- 1 | mod cgo; 2 | pub use cgo::*; 3 | -------------------------------------------------------------------------------- /rust/src/proofs/mod.rs: -------------------------------------------------------------------------------- 1 | mod helpers; 2 | 3 | pub mod api; 4 | pub mod types; 5 | -------------------------------------------------------------------------------- /rust/rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.67.1" 3 | components = ["clippy", "rustfmt"] 4 | -------------------------------------------------------------------------------- /rust/src/fvm/cgo/mod.rs: -------------------------------------------------------------------------------- 1 | mod externs; 2 | pub use externs::*; 3 | 4 | mod error; 5 | pub use error::*; 6 | -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RUST_LOG=info go test -count=1 ./... && cd rust && cargo test --release --all && cd .. 4 | -------------------------------------------------------------------------------- /rust/filcrypto.pc.template: -------------------------------------------------------------------------------- 1 | Name: filcrypto 2 | Version: @VERSION@ 3 | Description: C bindings for Filecoin Proofs 4 | Libs: @PRIVATE_LIBS@ 5 | -------------------------------------------------------------------------------- /headerstubs/stdbool.h: -------------------------------------------------------------------------------- 1 | #ifndef _STDBOOL_H 2 | #define _STDBOOL_H 3 | 4 | 5 | #define bool _Bool 6 | #define true 1 7 | #define false 0 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /rust/src/fvm/mod.rs: -------------------------------------------------------------------------------- 1 | mod blockstore; 2 | mod cgo; 3 | mod externs; 4 | 5 | pub mod engine; 6 | pub mod machine; 7 | pub mod types; 8 | 9 | pub use cgo::FvmError; 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.rs.bk 2 | **/include 3 | **/paramcache 4 | **/target 5 | .install-filcrypto 6 | filcrypto.h 7 | filcrypto.pc 8 | filecoin.h 9 | filecoin.pc 10 | *.a 11 | simulator 12 | -------------------------------------------------------------------------------- /cgo/libs.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. -lfilcrypto 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | -------------------------------------------------------------------------------- /srs-inner-product.json: -------------------------------------------------------------------------------- 1 | { 2 | "v28-fil-inner-product-v1.srs": { 3 | "cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g", 4 | "digest": "ae20310138f5ba81451d723f858e3797", 5 | "sector_size": 0 6 | } 7 | } -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | // Version is most similar to semver's minor version. 4 | // It is here as we cannot use gomod versioning due to local replace directives 5 | // for native dependencies. 6 | const Version int = 3 7 | -------------------------------------------------------------------------------- /headerstubs/stdint.h: -------------------------------------------------------------------------------- 1 | typedef unsigned char uint8_t; 2 | typedef long long int32_t; 3 | typedef unsigned long long uint32_t; 4 | typedef long long int64_t; 5 | typedef unsigned long long uint64_t; 6 | typedef unsigned long long uintptr_t; /* only valid on 64bit systems */ 7 | 8 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) 6 | 7 | ## Reporting a non security bug 8 | 9 | For non-critical bugs, please simply file a GitHub issue on this repo. 10 | -------------------------------------------------------------------------------- /cgo/util.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | func InitLogFd(fd int32) error { 12 | resp := C.init_log_fd(C.int32_t(fd)) 13 | defer resp.destroy() 14 | 15 | if err := CheckErr(resp); err != nil { 16 | return err 17 | } 18 | 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Filecoin Project 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 4 | 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /cgo/errors.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | // #cgo linux LDFLAGS: ${SRCDIR}/../libfilcrypto.a -Wl,-unresolved-symbols=ignore-all 4 | // #cgo darwin LDFLAGS: ${SRCDIR}/../libfilcrypto.a -Wl,-undefined,dynamic_lookup 5 | // #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | // #include "../filcrypto.h" 7 | import "C" 8 | import ( 9 | "fmt" 10 | "os" 11 | "runtime/debug" 12 | ) 13 | 14 | const ( 15 | ErrInvalidHandle = C.FVM_ERROR_INVALID_HANDLE 16 | ErrNotFound = C.FVM_ERROR_NOT_FOUND 17 | ErrIO = C.FVM_ERROR_IO 18 | ErrInvalidArgument = C.FVM_ERROR_INVALID_ARGUMENT 19 | ErrPanic = C.FVM_ERROR_PANIC 20 | ) 21 | 22 | func logPanic(err interface{}) { 23 | fmt.Fprintf(os.Stderr, "panic in cgo externs: %s\n", err) 24 | debug.PrintStack() 25 | } 26 | -------------------------------------------------------------------------------- /rust/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::all)] 2 | #![allow(clippy::missing_safety_doc)] 3 | #![allow(clippy::upper_case_acronyms)] 4 | 5 | pub mod bls; 6 | pub mod fvm; 7 | pub mod proofs; 8 | pub mod util; 9 | 10 | // Generates the headers. 11 | // Run `HEADER_DIR= cargo test --locked build_headers --features c-headers` to build 12 | #[safer_ffi::cfg_headers] 13 | #[test] 14 | fn build_headers() -> std::io::Result<()> { 15 | use std::env; 16 | use std::path::Path; 17 | 18 | let header_dir = env::var("HEADER_DIR").expect("Missing \"HEADER_DIR\""); 19 | let out_dir = Path::new(&header_dir); 20 | let hdr_out = out_dir.join("filcrypto.h"); 21 | 22 | safer_ffi::headers::builder() 23 | .to_file(&hdr_out)? 24 | .generate()?; 25 | 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /rust/rustc-target-features-optimized.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "rustc_target_feature": "+adx", 4 | "check_cpu_for_feature": "adx" 5 | }, 6 | { 7 | "rustc_target_feature": "+sha", 8 | "check_cpu_for_feature": "sha_ni" 9 | }, 10 | { 11 | "rustc_target_feature": "+sha2", 12 | "check_cpu_for_feature": "sha2" 13 | }, 14 | { 15 | "rustc_target_feature": "+sse2", 16 | "check_cpu_for_feature": "sse2" 17 | }, 18 | { 19 | "rustc_target_feature": "+avx2", 20 | "check_cpu_for_feature": "avx2" 21 | }, 22 | { 23 | "rustc_target_feature": "+avx", 24 | "check_cpu_for_feature": "avx" 25 | }, 26 | { 27 | "rustc_target_feature": "+sse4.2", 28 | "check_cpu_for_feature": "sse4_2" 29 | }, 30 | { 31 | "rustc_target_feature": "+sse4.1", 32 | "check_cpu_for_feature": "sse4_1" 33 | } 34 | ] 35 | -------------------------------------------------------------------------------- /rust/src/fvm/cgo/error.rs: -------------------------------------------------------------------------------- 1 | //! Error codes used by the cgo bridge (blockstore/externs). These are used by both rust and go, so 2 | //! don't remove them even if they seem dead. 3 | 4 | use safer_ffi::prelude::*; 5 | 6 | #[derive_ReprC] 7 | #[repr(i32)] 8 | #[derive(PartialEq, Eq, Debug, Copy, Clone)] 9 | pub enum FvmError { 10 | /// The error code returned by cgo if the blockstore handle isn't valid. 11 | InvalidHandle = -1, 12 | /// The error code returned by cgo when the block isn't found. 13 | NotFound = -2, 14 | /// The error code returned by cgo when there's some underlying system error. 15 | Io = -3, 16 | /// The error code returned by cgo when an argument is invalid. 17 | InvalidArgument = -4, 18 | /// The error code returned by cgo when the application panics. 19 | Panic = -5, 20 | } 21 | 22 | // Dummy to make safer-ffi export the error enum 23 | #[ffi_export] 24 | fn dummy(_error: FvmError) { 25 | panic!("Don't call me"); 26 | } 27 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DEPS:=filcrypto.h filcrypto.pc libfilcrypto.a 2 | 3 | export CARGO_TARGET_DIR=target 4 | 5 | all: $(DEPS) 6 | .PHONY: all 7 | 8 | # Create a file so that parallel make doesn't call `./install-filcrypto` for 9 | # each of the deps 10 | $(DEPS): .install-filcrypto ; 11 | 12 | .install-filcrypto: rust 13 | go clean -cache -testcache 14 | ./install-filcrypto 15 | @touch $@ 16 | 17 | clean: 18 | go clean -cache -testcache 19 | rm -rf $(DEPS) .install-filcrypto 20 | rm -f ./runner 21 | cd rust && cargo clean && cd .. 22 | .PHONY: clean 23 | 24 | go-lint: $(DEPS) 25 | golangci-lint run -v --concurrency 2 --new-from-rev origin/master 26 | .PHONY: go-lint 27 | 28 | shellcheck: 29 | shellcheck install-filcrypto 30 | 31 | lint: shellcheck go-lint 32 | 33 | cgo-leakdetect: runner 34 | valgrind --leak-check=full --show-leak-kinds=definite ./runner 35 | .PHONY: cgo-leakdetect 36 | 37 | runner: $(DEPS) 38 | rm -f ./runner 39 | go build -o ./runner ./cgoleakdetect/ 40 | .PHONY: runner 41 | -------------------------------------------------------------------------------- /fvm_test.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | 7 | "github.com/filecoin-project/go-state-types/big" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func checkSplitBigInt(t *testing.T, i big.Int, hi, lo uint64) { 12 | hiA, loA, err := splitBigInt(i) 13 | require.NoError(t, err) 14 | require.Equal(t, hi, hiA, "hi not equal") 15 | require.Equal(t, lo, loA, "lo not equal") 16 | } 17 | 18 | func TestSplitBigIntZero(t *testing.T) { 19 | checkSplitBigInt(t, big.Zero(), 0, 0) 20 | } 21 | 22 | func TestSplitBigIntOne(t *testing.T) { 23 | checkSplitBigInt(t, big.NewInt(1), 0, 1) 24 | } 25 | 26 | func TestSplitBigIntMax64(t *testing.T) { 27 | checkSplitBigInt(t, big.NewIntUnsigned(math.MaxUint64), 0, math.MaxUint64) 28 | } 29 | 30 | func TestSplitBigIntLarge(t *testing.T) { 31 | checkSplitBigInt(t, big.Mul(big.NewIntUnsigned(math.MaxUint64), big.NewInt(8)), 0x7, math.MaxUint64^0x7) 32 | } 33 | func TestSplitBigIntNeg(t *testing.T) { 34 | _, _, err := splitBigInt(big.NewInt(-1)) 35 | require.Error(t, err) 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /cgo/registry.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | mu sync.RWMutex 10 | registry map[uint64]registeredExterns 11 | nextId uint64 12 | ) 13 | 14 | type registeredExterns struct { 15 | context.Context 16 | Externs 17 | } 18 | 19 | // Register a new item and get a handle. 20 | func Register(ctx context.Context, externs Externs) uint64 { 21 | mu.Lock() 22 | defer mu.Unlock() 23 | if registry == nil { 24 | registry = make(map[uint64]registeredExterns) 25 | } 26 | id := nextId 27 | nextId++ 28 | registry[id] = registeredExterns{ctx, externs} 29 | return id 30 | } 31 | 32 | // Unregister a blockstore. 33 | // 34 | // WARNING: This method must be called at most _once_ with a handle previously returned by Register. 35 | func Unregister(handle uint64) { 36 | mu.Lock() 37 | defer mu.Unlock() 38 | 39 | delete(registry, handle) 40 | } 41 | 42 | // Lookup a blockstore by handle. 43 | func Lookup(handle uint64) (Externs, context.Context) { 44 | mu.RLock() 45 | externs := registry[handle] 46 | mu.RUnlock() 47 | 48 | return externs.Externs, externs.Context 49 | } 50 | -------------------------------------------------------------------------------- /cgo/interface.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/filecoin-project/go-address" 7 | "github.com/filecoin-project/go-state-types/abi" 8 | "github.com/ipfs/go-cid" 9 | blockstore "github.com/ipfs/go-ipfs-blockstore" 10 | ) 11 | 12 | type ConsensusFault struct { 13 | // Address of the miner at fault (always an ID address). 14 | Target address.Address 15 | // Epoch of the fault, which is the higher epoch of the two blocks causing it. 16 | Epoch abi.ChainEpoch 17 | // Type of fault. 18 | Type ConsensusFaultType 19 | } 20 | 21 | type ConsensusFaultType int64 22 | 23 | const ( 24 | ConsensusFaultNone ConsensusFaultType = 0 25 | ConsensusFaultDoubleForkMining ConsensusFaultType = 1 26 | ConsensusFaultParentGrinding ConsensusFaultType = 2 27 | ConsensusFaultTimeOffsetMining ConsensusFaultType = 3 28 | ) 29 | 30 | type Externs interface { 31 | GetChainRandomness(ctx context.Context, epoch abi.ChainEpoch) ([32]byte, error) 32 | GetBeaconRandomness(ctx context.Context, epoch abi.ChainEpoch) ([32]byte, error) 33 | VerifyConsensusFault(ctx context.Context, h1, h2, extra []byte) (*ConsensusFault, int64) 34 | TipsetCid(ctx context.Context, epoch abi.ChainEpoch) (cid.Cid, error) 35 | 36 | blockstore.Blockstore 37 | blockstore.Viewer 38 | } 39 | -------------------------------------------------------------------------------- /rust/scripts/package-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Exeuo pipefail 4 | 5 | main() { 6 | if [[ -z "$1" ]] 7 | then 8 | (>&2 echo '[package-release/main] Error: script requires path to which it will write release (gzipped) tarball, e.g. "/tmp/filecoin-ffi-Darwin-standard.tar.tz"') 9 | exit 1 10 | fi 11 | 12 | local __tarball_output_path=$1 13 | 14 | # create temporary directory to hold build artifacts (must not be declared 15 | # with 'local' because we will use 'trap' to clean it up) 16 | # 17 | __tmp_dir=$(mktemp -d) 18 | 19 | (>&2 echo "[package-release/main] preparing release files") 20 | 21 | # clean up temp directory on exit 22 | # 23 | trap '{ rm -rf $__tmp_dir; }' EXIT 24 | 25 | # copy assets into temporary directory 26 | # 27 | find -L . -type f -name filcrypto.h -exec cp -- "{}" $__tmp_dir/ \; 28 | find -L . -type f -name libfilcrypto.a -exec cp -- "{}" $__tmp_dir/ \; 29 | find -L . -type f -name filcrypto.pc -exec cp -- "{}" $__tmp_dir/ \; 30 | 31 | # create gzipped tarball from contents of temporary directory 32 | # 33 | tar -czf $__tarball_output_path $__tmp_dir/* 34 | 35 | (>&2 echo "[package-release/main] release file created: $__tarball_output_path") 36 | } 37 | 38 | main "$@"; exit 39 | -------------------------------------------------------------------------------- /rust/src/fvm/types.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Mutex; 2 | 3 | use safer_ffi::prelude::*; 4 | 5 | use super::engine::CgoExecutor; 6 | 7 | #[derive_ReprC] 8 | #[repr(u8)] 9 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 10 | pub enum FvmRegisteredVersion { 11 | V1, 12 | } 13 | 14 | #[derive_ReprC] 15 | #[ReprC::opaque] 16 | #[derive(Default)] 17 | pub struct InnerFvmMachine { 18 | pub(crate) machine: Option>>, 19 | } 20 | 21 | pub type FvmMachine = Option>; 22 | 23 | #[derive_ReprC] 24 | #[repr(C)] 25 | #[derive(Default)] 26 | pub struct FvmMachineExecuteResponse { 27 | pub exit_code: u64, 28 | pub return_val: Option>, 29 | pub gas_used: u64, 30 | pub penalty_hi: u64, 31 | pub penalty_lo: u64, 32 | pub miner_tip_hi: u64, 33 | pub miner_tip_lo: u64, 34 | pub base_fee_burn_hi: u64, 35 | pub base_fee_burn_lo: u64, 36 | pub over_estimation_burn_hi: u64, 37 | pub over_estimation_burn_lo: u64, 38 | pub refund_hi: u64, 39 | pub refund_lo: u64, 40 | pub gas_refund: u64, 41 | pub gas_burned: u64, 42 | pub exec_trace: Option>, 43 | pub failure_info: Option, 44 | pub events: Option>, 45 | pub events_root: Option>, 46 | } 47 | -------------------------------------------------------------------------------- /rust/src/fvm/cgo/externs.rs: -------------------------------------------------------------------------------- 1 | //! The externs/blockstore implemented by the go side of the cgo bridge. 2 | 3 | extern "C" { 4 | pub fn cgo_blockstore_get( 5 | store: u64, 6 | k: *const u8, 7 | k_len: i32, 8 | block: *mut *mut u8, 9 | size: *mut i32, 10 | ) -> i32; 11 | 12 | pub fn cgo_blockstore_put( 13 | store: u64, 14 | k: *const u8, 15 | k_len: i32, 16 | block: *const u8, 17 | block_len: i32, 18 | ) -> i32; 19 | 20 | pub fn cgo_blockstore_put_many( 21 | store: u64, 22 | lengths: *const i32, 23 | lengths_len: i32, 24 | blocks: *const u8, 25 | ) -> i32; 26 | 27 | pub fn cgo_blockstore_has(store: u64, k: *const u8, k_len: i32) -> i32; 28 | 29 | pub fn cgo_extern_get_chain_randomness( 30 | handle: u64, 31 | round: i64, 32 | randomness: *mut [u8; 32], 33 | ) -> i32; 34 | 35 | pub fn cgo_extern_get_beacon_randomness( 36 | handle: u64, 37 | round: i64, 38 | randomness: *mut [u8; 32], 39 | ) -> i32; 40 | 41 | pub fn cgo_extern_verify_consensus_fault( 42 | handle: u64, 43 | h1: *const u8, 44 | h1_len: i32, 45 | h2: *const u8, 46 | h2_len: i32, 47 | extra: *const u8, 48 | extra_len: i32, 49 | miner_id: *mut u64, 50 | epoch: *mut i64, 51 | fault: *mut i64, 52 | gas_used: *mut i64, 53 | ) -> i32; 54 | 55 | pub fn cgo_extern_get_tipset_cid( 56 | handle: u64, 57 | epoch: i64, 58 | output: *mut u8, 59 | output_len: i32, 60 | ) -> i32; 61 | } 62 | -------------------------------------------------------------------------------- /cgo/helpers_test.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "testing" 5 | "unsafe" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestAsSliceRefUint8(t *testing.T) { 11 | // some words 12 | foo := []byte("hello world") 13 | ref := AsSliceRefUint8(foo) 14 | assert.Equal(t, unsafe.Slice((*byte)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 15 | 16 | // empty 17 | foo = []byte("") 18 | ref = AsSliceRefUint8(foo) 19 | assert.Equal(t, unsafe.Slice((*byte)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 20 | } 21 | 22 | func TestAsSliceRefUint(t *testing.T) { 23 | foo := []uint{0, 1, 2} 24 | ref := AsSliceRefUint(foo) 25 | assert.Equal(t, unsafe.Slice((*uint)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 26 | 27 | // empty 28 | foo = []uint{} 29 | ref = AsSliceRefUint(foo) 30 | assert.Equal(t, unsafe.Slice((*uint)(unsafe.Pointer(ref.ptr)), int(ref.len)), foo) 31 | } 32 | 33 | func TestByteArray32(t *testing.T) { 34 | foo := make([]byte, 32) 35 | for i := range foo { 36 | foo[i] = 1 37 | } 38 | ary := AsByteArray32(foo) 39 | assert.Equal(t, ary.slice(), foo) 40 | 41 | ary2 := ary.copy() 42 | assert.Equal(t, ary.slice(), ary2) 43 | 44 | // input too short 45 | aryShort := AsByteArray32([]byte{0, 1, 2}) 46 | slice := aryShort.slice() 47 | for i := range slice { 48 | if i == 0 { 49 | assert.Equal(t, slice[i], byte(0)) 50 | } else if i == 1 { 51 | assert.Equal(t, slice[i], byte(1)) 52 | } else if i == 2 { 53 | assert.Equal(t, slice[i], byte(2)) 54 | } else { 55 | assert.Equal(t, slice[i], byte(0)) 56 | } 57 | } 58 | } 59 | 60 | func TestAllocSliceBoxedUint8(t *testing.T) { 61 | foo := []byte("hello world") 62 | 63 | boxed := AllocSliceBoxedUint8(foo) 64 | defer boxed.Destroy() 65 | assert.Equal(t, boxed.slice(), foo) 66 | } 67 | -------------------------------------------------------------------------------- /cgoleakdetect/runner.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package main 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | ffi "github.com/filecoin-project/filecoin-ffi" 11 | ) 12 | 13 | func main() { 14 | os.Setenv("RUST_LOG", "info") 15 | th := panicOnFailureTestHelper{} 16 | ffi.WorkflowGetGPUDevicesDoesNotProduceAnError(&th) 17 | ffi.WorkflowProofsLifecycle(&th) 18 | ffi.WorkflowRegisteredPoStProofFunctions(&th) 19 | ffi.WorkflowRegisteredSealProofFunctions(&th) 20 | } 21 | 22 | type panicOnFailureTestHelper struct{} 23 | 24 | func (p panicOnFailureTestHelper) AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { 25 | if expected != actual { 26 | panic(fmt.Sprintf("not equal: %+v, %+v, %+v", expected, actual, msgAndArgs)) 27 | } 28 | 29 | return true 30 | } 31 | 32 | func (p panicOnFailureTestHelper) AssertNoError(err error, msgAndArgs ...interface{}) bool { 33 | if err != nil { 34 | panic(fmt.Sprintf("there was an error: %+v, %+v", err, msgAndArgs)) 35 | } 36 | 37 | return true 38 | } 39 | 40 | func (p panicOnFailureTestHelper) AssertTrue(value bool, msgAndArgs ...interface{}) bool { 41 | if !value { 42 | panic(fmt.Sprintf("not true: %+v, %+v", value, msgAndArgs)) 43 | } 44 | 45 | return true 46 | } 47 | 48 | func (p panicOnFailureTestHelper) RequireEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { 49 | if expected != actual { 50 | panic(fmt.Sprintf("not equal: %+v, %+v, %+v", expected, actual, msgAndArgs)) 51 | } 52 | } 53 | 54 | func (p panicOnFailureTestHelper) RequireNoError(err error, msgAndArgs ...interface{}) { 55 | if err != nil { 56 | panic(fmt.Sprintf("there was an error: %+v, %+v", err, msgAndArgs)) 57 | } 58 | } 59 | 60 | func (p panicOnFailureTestHelper) RequireTrue(value bool, msgAndArgs ...interface{}) { 61 | if !value { 62 | panic(fmt.Sprintf("not true: %+v, %+v", value, msgAndArgs)) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /cgo/bls.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | func Hash(message SliceRefUint8) *[96]byte { 12 | resp := C.hash(message) 13 | defer resp.destroy() 14 | return resp.copyAsArray() 15 | } 16 | 17 | func Aggregate(flattenedSignatures SliceRefUint8) *[96]byte { 18 | resp := C.aggregate(flattenedSignatures) 19 | defer resp.destroy() 20 | return resp.copyAsArray() 21 | } 22 | 23 | func Verify(signature SliceRefUint8, flattenedDigests SliceRefUint8, flattenedPublicKeys SliceRefUint8) bool { 24 | resp := C.verify(signature, flattenedDigests, flattenedPublicKeys) 25 | return bool(resp) 26 | } 27 | 28 | func HashVerify(signature SliceRefUint8, flattenedMessages SliceRefUint8, messageSizes SliceRefUint, flattenedPublicKeys SliceRefUint8) bool { 29 | resp := C.hash_verify(signature, flattenedMessages, messageSizes, flattenedPublicKeys) 30 | return bool(resp) 31 | } 32 | 33 | func PrivateKeyGenerate() *[32]byte { 34 | resp := C.private_key_generate() 35 | defer resp.destroy() 36 | return resp.copyAsArray() 37 | } 38 | 39 | func PrivateKeyGenerateWithSeed(rawSeed *ByteArray32) *[32]byte { 40 | resp := C.private_key_generate_with_seed(rawSeed) 41 | defer resp.destroy() 42 | return resp.copyAsArray() 43 | } 44 | 45 | func PrivateKeySign(rawPrivateKey SliceRefUint8, message SliceRefUint8) *[96]byte { 46 | resp := C.private_key_sign(rawPrivateKey, message) 47 | defer resp.destroy() 48 | return resp.copyAsArray() 49 | } 50 | 51 | func PrivateKeyPublicKey(rawPrivateKey SliceRefUint8) *[48]byte { 52 | resp := C.private_key_public_key(rawPrivateKey) 53 | defer resp.destroy() 54 | return resp.copyAsArray() 55 | } 56 | 57 | func CreateZeroSignature() *[96]byte { 58 | resp := C.create_zero_signature() 59 | defer resp.destroy() 60 | return resp.copyAsArray() 61 | } 62 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/filecoin-project/filecoin-ffi 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/filecoin-project/go-address v1.1.0 7 | github.com/filecoin-project/go-fil-commcid v0.1.0 8 | github.com/filecoin-project/go-state-types v0.13.1 9 | github.com/ipfs/go-block-format v0.0.3 10 | github.com/ipfs/go-cid v0.3.2 11 | github.com/ipfs/go-ipfs-blockstore v1.2.0 12 | github.com/ipfs/go-ipld-format v0.4.0 13 | github.com/pkg/errors v0.9.1 14 | github.com/stretchr/testify v1.7.0 15 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 16 | ) 17 | 18 | require ( 19 | github.com/davecgh/go-spew v1.1.1 // indirect 20 | github.com/filecoin-project/go-crypto v0.0.1 // indirect 21 | github.com/gogo/protobuf v1.3.2 // indirect 22 | github.com/google/uuid v1.1.1 // indirect 23 | github.com/hashicorp/golang-lru v0.5.4 // indirect 24 | github.com/ipfs/bbloom v0.0.4 // indirect 25 | github.com/ipfs/go-datastore v0.5.0 // indirect 26 | github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect 27 | github.com/ipfs/go-ipfs-util v0.0.2 // indirect 28 | github.com/ipfs/go-ipld-cbor v0.0.6 // indirect 29 | github.com/ipfs/go-log v1.0.4 // indirect 30 | github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 // indirect 31 | github.com/ipfs/go-metrics-interface v0.0.1 // indirect 32 | github.com/jbenet/goprocess v0.1.4 // indirect 33 | github.com/klauspost/cpuid/v2 v2.2.3 // indirect 34 | github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect 35 | github.com/minio/sha256-simd v1.0.1 // indirect 36 | github.com/mr-tron/base58 v1.2.0 // indirect 37 | github.com/multiformats/go-base32 v0.0.4 // indirect 38 | github.com/multiformats/go-base36 v0.1.0 // indirect 39 | github.com/multiformats/go-multibase v0.0.3 // indirect 40 | github.com/multiformats/go-multihash v0.2.1 // indirect 41 | github.com/multiformats/go-varint v0.0.6 // indirect 42 | github.com/opentracing/opentracing-go v1.1.0 // indirect 43 | github.com/pmezard/go-difflib v1.0.0 // indirect 44 | github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect 45 | github.com/spaolacci/murmur3 v1.1.0 // indirect 46 | github.com/whyrusleeping/cbor-gen v0.1.0 // indirect 47 | go.uber.org/atomic v1.6.0 // indirect 48 | go.uber.org/multierr v1.5.0 // indirect 49 | go.uber.org/zap v1.14.1 // indirect 50 | golang.org/x/crypto v0.17.0 // indirect 51 | golang.org/x/sys v0.15.0 // indirect 52 | golang.org/x/tools v0.1.5 // indirect 53 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 54 | gopkg.in/yaml.v3 v3.0.0 // indirect 55 | lukechampine.com/blake3 v1.1.7 // indirect 56 | ) 57 | -------------------------------------------------------------------------------- /rust/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "filcrypto" 3 | description = "FFI Interface to Filecoin Proofs" 4 | version = "0.7.5" 5 | authors = [ 6 | "nemo ", 7 | "dignifiedquire ", 8 | "laser " 9 | ] 10 | license = "MIT OR Apache-2.0" 11 | repository = "https://github.com/filecoin-project/filecoin-ffi" 12 | readme = "README.md" 13 | edition = "2021" 14 | resolver = "2" 15 | publish = false 16 | 17 | [lib] 18 | crate-type = ["rlib", "staticlib"] 19 | 20 | [dependencies] 21 | bls-signatures = { version = "0.15.0", default-features = false, features = ["blst"] } 22 | blstrs = "0.7" 23 | filepath = "0.1.1" 24 | group = "0.13" 25 | libc = "0.2.58" 26 | log = "0.4.7" 27 | fil_logger = "0.1.6" 28 | rand = "0.8" 29 | rand_chacha = "0.3.1" 30 | rayon = "1.2.1" 31 | anyhow = "1.0.23" 32 | serde_json = "1.0.46" 33 | rust-gpu-tools = { version = "0.7", optional = true, default-features = false } 34 | fvm4 = { package = "fvm", version = "~4.1.2", default-features = false } 35 | fvm4_shared = { package = "fvm_shared", version = "~4.1.2" } 36 | fvm3 = { package = "fvm", version = "~3.9.0", default-features = false } 37 | fvm3_shared = { package = "fvm_shared", version = "~3.6.0" } 38 | fvm2 = { package = "fvm", version = "~2.7", default-features = false } 39 | fvm2_shared = { package = "fvm_shared", version = "~2.6" } 40 | fvm_ipld_encoding = "0.4.0" 41 | fvm_ipld_blockstore = "0.2.0" 42 | num-traits = "0.2.14" 43 | cid = { version = "0.10.1", features = ["serde-codec"] } 44 | lazy_static = "1.4.0" 45 | serde = "1.0.117" 46 | serde_tuple = "0.5" 47 | safer-ffi = { version = "0.0.7", features = ["proc_macros"] } 48 | filecoin-proofs-api = { version = "16.1", default-features = false } 49 | yastl = "0.1.2" 50 | 51 | [dev-dependencies] 52 | memmap2 = "0.5" 53 | tempfile = "3.0.8" 54 | 55 | [features] 56 | default = ["cuda", "multicore-sdr"] 57 | blst-portable = ["bls-signatures/blst-portable", "blstrs/portable"] 58 | cuda = ["filecoin-proofs-api/cuda", "rust-gpu-tools/cuda", "fvm2/cuda", "fvm3/cuda", "fvm4/cuda"] 59 | cuda-supraseal = ["filecoin-proofs-api/cuda-supraseal", "rust-gpu-tools/cuda", "fvm3/cuda-supraseal", "fvm4/cuda-supraseal"] 60 | opencl = ["filecoin-proofs-api/opencl", "rust-gpu-tools/opencl", "fvm2/opencl", "fvm3/opencl", "fvm4/opencl"] 61 | multicore-sdr = ["filecoin-proofs-api/multicore-sdr"] 62 | c-headers = ["safer-ffi/headers"] 63 | # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` 64 | # setting is ignored, no `TemporaryAux` file will be written. 65 | fixed-rows-to-discard = ["filecoin-proofs-api/fixed-rows-to-discard"] 66 | -------------------------------------------------------------------------------- /rust/scripts/publish-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Exeuo pipefail 4 | 5 | main() { 6 | if [[ -z "$1" ]] 7 | then 8 | (>&2 echo '[publish-release/main] Error: script requires a release (gzipped) tarball path, e.g. "/tmp/filecoin-ffi-Darwin-standard.tar.tz"') 9 | exit 1 10 | fi 11 | 12 | if [[ -z "$2" ]] 13 | then 14 | (>&2 echo '[publish-release/main] Error: script requires a release name, e.g. "filecoin-ffi-Darwin-standard" or "filecoin-ffi-Linux-standard"') 15 | exit 1 16 | fi 17 | 18 | local __release_file=$1 19 | local __release_name=$2 20 | local __release_tag="${CIRCLE_SHA1:0:16}" 21 | 22 | # make sure we have a token set, api requests won't work otherwise 23 | if [ -z $GITHUB_TOKEN ]; then 24 | (>&2 echo "[publish-release/main] \$GITHUB_TOKEN not set, publish failed") 25 | exit 1 26 | fi 27 | 28 | # see if the release already exists by tag 29 | local __release_response=` 30 | curl \ 31 | --header "Authorization: token $GITHUB_TOKEN" \ 32 | "https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases/tags/$__release_tag" 33 | ` 34 | 35 | local __release_id=`echo $__release_response | jq '.id'` 36 | 37 | if [ "$__release_id" = "null" ]; then 38 | (>&2 echo '[publish-release/main] creating release') 39 | 40 | RELEASE_DATA="{ 41 | \"tag_name\": \"$__release_tag\", 42 | \"target_commitish\": \"$CIRCLE_SHA1\", 43 | \"name\": \"$__release_tag\", 44 | \"body\": \"\" 45 | }" 46 | 47 | # create it if it doesn't exist yet 48 | # 49 | __release_response=` 50 | curl \ 51 | --request POST \ 52 | --header "Authorization: token $GITHUB_TOKEN" \ 53 | --header "Content-Type: application/json" \ 54 | --data "$RELEASE_DATA" \ 55 | "https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases" 56 | ` 57 | else 58 | (>&2 echo '[publish-release/main] release already exists') 59 | fi 60 | 61 | __release_upload_url=`echo $__release_response | jq -r '.upload_url' | cut -d'{' -f1` 62 | 63 | curl \ 64 | --request POST \ 65 | --header "Authorization: token $GITHUB_TOKEN" \ 66 | --header "Content-Type: application/octet-stream" \ 67 | --data-binary "@$__release_file" \ 68 | "$__release_upload_url?name=$(basename $__release_file)" 69 | 70 | (>&2 echo '[publish-release/main] release file uploaded') 71 | } 72 | 73 | main "$@"; exit 74 | -------------------------------------------------------------------------------- /rust/scripts/build-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -Exeo pipefail 4 | 5 | main() { 6 | if [[ -z "${1}" ]] 7 | then 8 | (>&2 echo '[build-release/main] Error: script requires a build action, e.g. ./build-release.sh [build|lipo]') 9 | exit 1 10 | fi 11 | 12 | local __action="${1}" 13 | 14 | # temporary place for storing build output (cannot use 'local', because 15 | # 'trap' is not going to have access to variables scoped to this function) 16 | # 17 | __build_output_log_tmp=$(mktemp) 18 | 19 | # clean up temp file on exit 20 | # 21 | trap '{ rm -f $__build_output_log_tmp; }' EXIT 22 | 23 | # build with RUSTFLAGS configured to output linker flags for native libs 24 | # 25 | local __rust_flags="--print native-static-libs ${RUSTFLAGS}" 26 | 27 | # shellcheck disable=SC2068 # the rest of the parameters should be split 28 | RUSTFLAGS="${__rust_flags}" \ 29 | cargo "${__action}" \ 30 | --release --locked ${@:2} 2>&1 | tee ${__build_output_log_tmp} 31 | 32 | # parse build output for linker flags 33 | # 34 | local __linker_flags=$(cat ${__build_output_log_tmp} \ 35 | | grep native-static-libs\: \ 36 | | head -n 1 \ 37 | | cut -d ':' -f 3) 38 | 39 | echo "Linker Flags: ${__linker_flags}" 40 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 41 | # With lipo enabled, this replacement may not be necessary, 42 | # but leaving it in doesn't hurt as it does nothing if not 43 | # needed 44 | __linker_flags=$(echo ${__linker_flags} | sed 's/-lOpenCL/-framework OpenCL/g') 45 | echo "Using Linker Flags: ${__linker_flags}" 46 | 47 | find . -type f -name "libfilcrypto.a" 48 | rm -f ./target/aarch64-apple-darwin/release/libfilcrypto.a 49 | rm -f ./target/x86_64-apple-darwin/release/libfilcrypto.a 50 | echo "Eliminated non-universal binary libraries" 51 | find . -type f -name "libfilcrypto.a" 52 | fi 53 | 54 | # generate filcrypto.h 55 | # The header files are the same even without having any features enables, 56 | # this reduces the compile time and makes it work on more platforms. 57 | RUSTFLAGS="${__rust_flags}" HEADER_DIR="." \ 58 | cargo test --no-default-features --locked build_headers --features c-headers 59 | 60 | # generate pkg-config 61 | # 62 | sed -e "s;@VERSION@;$(git rev-parse HEAD);" \ 63 | -e "s;@PRIVATE_LIBS@;${__linker_flags};" "filcrypto.pc.template" > "filcrypto.pc" 64 | 65 | # ensure header file was built 66 | # 67 | find -L . -type f -name "filcrypto.h" | read 68 | 69 | # ensure the archive file was built 70 | # 71 | find -L . -type f -name "libfilcrypto.a" | read 72 | } 73 | 74 | main "$@"; exit 75 | -------------------------------------------------------------------------------- /cgo/fvm.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | func CreateFvmMachine(fvmVersion FvmRegisteredVersion, chainEpoch, chainTimestamp, chainId, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, tracing bool, blockstoreId, externsId uint64) (*FvmMachine, error) { 12 | resp := C.create_fvm_machine( 13 | fvmVersion, 14 | C.uint64_t(chainEpoch), 15 | C.uint64_t(chainTimestamp), 16 | C.uint64_t(chainId), 17 | C.uint64_t(baseFeeHi), 18 | C.uint64_t(baseFeeLo), 19 | C.uint64_t(baseCircSupplyHi), 20 | C.uint64_t(baseCircSupplyLo), 21 | C.uint32_t(networkVersion), 22 | stateRoot, 23 | C.bool(tracing), 24 | C.uint64_t(blockstoreId), 25 | C.uint64_t(externsId), 26 | ) 27 | // take out the pointer from the result to ensure it doesn't get freed 28 | executor := resp.value 29 | resp.value = nil 30 | defer resp.destroy() 31 | 32 | if err := CheckErr(resp); err != nil { 33 | return nil, err 34 | } 35 | 36 | return executor, nil 37 | } 38 | 39 | func CreateFvmDebugMachine(fvmVersion FvmRegisteredVersion, chainEpoch, chainTimestamp, chainId, baseFeeHi, baseFeeLo, baseCircSupplyHi, baseCircSupplyLo, networkVersion uint64, stateRoot SliceRefUint8, actorRedirect SliceRefUint8, tracing bool, blockstoreId, externsId uint64) (*FvmMachine, error) { 40 | resp := C.create_fvm_debug_machine( 41 | fvmVersion, 42 | C.uint64_t(chainEpoch), 43 | C.uint64_t(chainTimestamp), 44 | C.uint64_t(chainId), 45 | C.uint64_t(baseFeeHi), 46 | C.uint64_t(baseFeeLo), 47 | C.uint64_t(baseCircSupplyHi), 48 | C.uint64_t(baseCircSupplyLo), 49 | C.uint32_t(networkVersion), 50 | stateRoot, 51 | actorRedirect, 52 | C.bool(tracing), 53 | C.uint64_t(blockstoreId), 54 | C.uint64_t(externsId), 55 | ) 56 | // take out the pointer from the result to ensure it doesn't get freed 57 | executor := resp.value 58 | resp.value = nil 59 | defer resp.destroy() 60 | 61 | if err := CheckErr(resp); err != nil { 62 | return nil, err 63 | } 64 | 65 | return executor, nil 66 | } 67 | 68 | func FvmMachineExecuteMessage(executor *FvmMachine, message SliceRefUint8, chainLen, applyKind uint64) (FvmMachineExecuteResponseGo, error) { 69 | resp := C.fvm_machine_execute_message( 70 | executor, 71 | message, 72 | C.uint64_t(chainLen), 73 | C.uint64_t(applyKind), 74 | ) 75 | defer resp.destroy() 76 | 77 | if err := CheckErr(resp); err != nil { 78 | return FvmMachineExecuteResponseGo{}, err 79 | } 80 | 81 | return resp.value.copy(), nil 82 | } 83 | 84 | func FvmMachineFlush(executor *FvmMachine) ([]byte, error) { 85 | resp := C.fvm_machine_flush(executor) 86 | defer resp.destroy() 87 | 88 | if err := CheckErr(resp); err != nil { 89 | return nil, err 90 | } 91 | return resp.value.copy(), nil 92 | } 93 | -------------------------------------------------------------------------------- /rust/src/proofs/helpers.rs: -------------------------------------------------------------------------------- 1 | use std::collections::btree_map::BTreeMap; 2 | 3 | use anyhow::Result; 4 | use filecoin_proofs_api::{self as api, SectorId}; 5 | use safer_ffi::prelude::*; 6 | 7 | use super::types::{PrivateReplicaInfo, PublicReplicaInfo, RegisteredPoStProof}; 8 | use crate::util::types::as_path_buf; 9 | 10 | #[derive(Debug, Clone)] 11 | struct PublicReplicaInfoTmp { 12 | pub registered_proof: RegisteredPoStProof, 13 | pub comm_r: [u8; 32], 14 | pub sector_id: u64, 15 | } 16 | 17 | pub fn to_public_replica_info_map( 18 | replicas: c_slice::Ref, 19 | ) -> BTreeMap { 20 | use rayon::prelude::*; 21 | 22 | let replicas = replicas 23 | .iter() 24 | .map(|ffi_info| PublicReplicaInfoTmp { 25 | sector_id: ffi_info.sector_id, 26 | registered_proof: ffi_info.registered_proof, 27 | comm_r: ffi_info.comm_r, 28 | }) 29 | .collect::>(); 30 | 31 | replicas 32 | .into_par_iter() 33 | .map(|info| { 34 | let PublicReplicaInfoTmp { 35 | registered_proof, 36 | comm_r, 37 | sector_id, 38 | } = info; 39 | 40 | ( 41 | SectorId::from(sector_id), 42 | api::PublicReplicaInfo::new(registered_proof.into(), comm_r), 43 | ) 44 | }) 45 | .collect() 46 | } 47 | 48 | #[derive(Debug, Clone)] 49 | struct PrivateReplicaInfoTmp { 50 | pub registered_proof: RegisteredPoStProof, 51 | pub cache_dir_path: std::path::PathBuf, 52 | pub comm_r: [u8; 32], 53 | pub replica_path: std::path::PathBuf, 54 | pub sector_id: u64, 55 | } 56 | 57 | pub fn to_private_replica_info_map( 58 | replicas: c_slice::Ref, 59 | ) -> Result> { 60 | use rayon::prelude::*; 61 | 62 | let replicas: Vec<_> = replicas 63 | .iter() 64 | .map(|ffi_info| { 65 | let cache_dir_path = as_path_buf(&ffi_info.cache_dir_path)?; 66 | let replica_path = as_path_buf(&ffi_info.replica_path)?; 67 | 68 | Ok(PrivateReplicaInfoTmp { 69 | registered_proof: ffi_info.registered_proof, 70 | cache_dir_path, 71 | comm_r: ffi_info.comm_r, 72 | replica_path, 73 | sector_id: ffi_info.sector_id, 74 | }) 75 | }) 76 | .collect::>()?; 77 | 78 | let map = replicas 79 | .into_par_iter() 80 | .map(|info| { 81 | let PrivateReplicaInfoTmp { 82 | registered_proof, 83 | cache_dir_path, 84 | comm_r, 85 | replica_path, 86 | sector_id, 87 | } = info; 88 | 89 | ( 90 | SectorId::from(sector_id), 91 | api::PrivateReplicaInfo::new( 92 | registered_proof.into(), 93 | comm_r, 94 | cache_dir_path, 95 | replica_path, 96 | ), 97 | ) 98 | }) 99 | .collect(); 100 | 101 | Ok(map) 102 | } 103 | -------------------------------------------------------------------------------- /cgo/extern.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #include 5 | typedef const uint8_t* buf_t; 6 | */ 7 | import "C" 8 | import ( 9 | "unsafe" 10 | 11 | "github.com/filecoin-project/go-address" 12 | 13 | "github.com/filecoin-project/go-state-types/abi" 14 | ) 15 | 16 | //export cgo_extern_get_chain_randomness 17 | func cgo_extern_get_chain_randomness( 18 | handle C.uint64_t, round C.int64_t, 19 | output C.buf_t, 20 | ) (res C.int32_t) { 21 | defer func() { 22 | if rerr := recover(); rerr != nil { 23 | logPanic(rerr) 24 | res = ErrPanic 25 | } 26 | }() 27 | 28 | out := unsafe.Slice((*byte)(unsafe.Pointer(output)), 32) 29 | externs, ctx := Lookup(uint64(handle)) 30 | if externs == nil { 31 | return ErrInvalidHandle 32 | } 33 | 34 | rand, err := externs.GetChainRandomness(ctx, abi.ChainEpoch(round)) 35 | 36 | switch err { 37 | case nil: 38 | copy(out[:], rand[:]) 39 | return 0 40 | default: 41 | return ErrIO 42 | } 43 | } 44 | 45 | //export cgo_extern_get_beacon_randomness 46 | func cgo_extern_get_beacon_randomness( 47 | handle C.uint64_t, round C.int64_t, 48 | output C.buf_t, 49 | ) (res C.int32_t) { 50 | defer func() { 51 | if rerr := recover(); rerr != nil { 52 | logPanic(rerr) 53 | res = ErrPanic 54 | } 55 | }() 56 | 57 | out := unsafe.Slice((*byte)(unsafe.Pointer(output)), 32) 58 | externs, ctx := Lookup(uint64(handle)) 59 | if externs == nil { 60 | return ErrInvalidHandle 61 | } 62 | 63 | rand, err := externs.GetBeaconRandomness(ctx, abi.ChainEpoch(round)) 64 | 65 | switch err { 66 | case nil: 67 | copy(out[:], rand[:]) 68 | return 0 69 | default: 70 | return ErrIO 71 | } 72 | } 73 | 74 | //export cgo_extern_verify_consensus_fault 75 | func cgo_extern_verify_consensus_fault( 76 | handle C.uint64_t, 77 | h1 C.buf_t, h1Len C.int32_t, 78 | h2 C.buf_t, h2Len C.int32_t, 79 | extra C.buf_t, extraLen C.int32_t, 80 | minerIdOut *C.uint64_t, 81 | epochOut *C.int64_t, 82 | faultOut *C.int64_t, 83 | gasUsedOut *C.int64_t, 84 | ) (res C.int32_t) { 85 | defer func() { 86 | if rerr := recover(); rerr != nil { 87 | logPanic(rerr) 88 | res = ErrPanic 89 | } 90 | }() 91 | 92 | externs, ctx := Lookup(uint64(handle)) 93 | if externs == nil { 94 | return ErrInvalidHandle 95 | } 96 | 97 | h1Go := C.GoBytes(unsafe.Pointer(h1), h1Len) 98 | h2Go := C.GoBytes(unsafe.Pointer(h2), h2Len) 99 | extraGo := C.GoBytes(unsafe.Pointer(extra), extraLen) 100 | 101 | faultRes, gas := externs.VerifyConsensusFault(ctx, h1Go, h2Go, extraGo) 102 | *gasUsedOut = C.int64_t(gas) 103 | *faultOut = C.int64_t(faultRes.Type) 104 | 105 | if faultRes.Type != ConsensusFaultNone { 106 | id, err := address.IDFromAddress(faultRes.Target) 107 | if err != nil { 108 | return ErrIO 109 | } 110 | *epochOut = C.int64_t(faultRes.Epoch) 111 | *minerIdOut = C.uint64_t(id) 112 | } 113 | 114 | return 0 115 | } 116 | 117 | //export cgo_extern_get_tipset_cid 118 | func cgo_extern_get_tipset_cid( 119 | handle C.uint64_t, 120 | epoch C.int64_t, 121 | output C.buf_t, 122 | outputLen C.int32_t, 123 | ) (res C.int32_t) { 124 | defer func() { 125 | if rerr := recover(); rerr != nil { 126 | logPanic(rerr) 127 | res = ErrPanic 128 | } 129 | }() 130 | 131 | externs, ctx := Lookup(uint64(handle)) 132 | if externs == nil { 133 | return ErrInvalidHandle 134 | } 135 | 136 | out := unsafe.Slice((*byte)(unsafe.Pointer(output)), outputLen) 137 | 138 | k, err := externs.TipsetCid(ctx, abi.ChainEpoch(epoch)) 139 | if err != nil { 140 | return ErrIO 141 | } 142 | if k.ByteLen() > int(outputLen) { 143 | return ErrInvalidArgument 144 | } 145 | copy(out, k.Bytes()) 146 | return 0 147 | } 148 | -------------------------------------------------------------------------------- /bls.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package ffi 5 | 6 | // #cgo linux LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-unresolved-symbols=ignore-all 7 | // #cgo darwin LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-undefined,dynamic_lookup 8 | // #cgo pkg-config: ${SRCDIR}/filcrypto.pc 9 | // #include "./filcrypto.h" 10 | import "C" 11 | import ( 12 | "github.com/filecoin-project/filecoin-ffi/cgo" 13 | ) 14 | 15 | // Hash computes the digest of a message 16 | func Hash(message Message) Digest { 17 | digest := cgo.Hash(cgo.AsSliceRefUint8(message)) 18 | if digest == nil { 19 | return Digest{} 20 | } 21 | return *digest 22 | } 23 | 24 | // Verify verifies that a signature is the aggregated signature of digests - pubkeys 25 | func Verify(signature *Signature, digests []Digest, publicKeys []PublicKey) bool { 26 | // prep data 27 | flattenedDigests := make([]byte, DigestBytes*len(digests)) 28 | for idx, digest := range digests { 29 | copy(flattenedDigests[(DigestBytes*idx):(DigestBytes*(1+idx))], digest[:]) 30 | } 31 | 32 | flattenedPublicKeys := make([]byte, PublicKeyBytes*len(publicKeys)) 33 | for idx, publicKey := range publicKeys { 34 | copy(flattenedPublicKeys[(PublicKeyBytes*idx):(PublicKeyBytes*(1+idx))], publicKey[:]) 35 | } 36 | 37 | return cgo.Verify( 38 | cgo.AsSliceRefUint8(signature[:]), 39 | cgo.AsSliceRefUint8(flattenedDigests), 40 | cgo.AsSliceRefUint8(flattenedPublicKeys), 41 | ) 42 | } 43 | 44 | // HashVerify verifies that a signature is the aggregated signature of hashed messages. 45 | func HashVerify(signature *Signature, messages []Message, publicKeys []PublicKey) bool { 46 | var flattenedMessages []byte 47 | messagesSizes := make([]uint, len(messages)) 48 | for idx := range messages { 49 | flattenedMessages = append(flattenedMessages, messages[idx]...) 50 | messagesSizes[idx] = uint(len(messages[idx])) 51 | } 52 | 53 | flattenedPublicKeys := make([]byte, PublicKeyBytes*len(publicKeys)) 54 | for idx, publicKey := range publicKeys { 55 | copy(flattenedPublicKeys[(PublicKeyBytes*idx):(PublicKeyBytes*(1+idx))], publicKey[:]) 56 | } 57 | 58 | return cgo.HashVerify( 59 | cgo.AsSliceRefUint8(signature[:]), 60 | cgo.AsSliceRefUint8(flattenedMessages), 61 | cgo.AsSliceRefUint(messagesSizes), 62 | cgo.AsSliceRefUint8(flattenedPublicKeys), 63 | ) 64 | } 65 | 66 | // Aggregate aggregates signatures together into a new signature. If the 67 | // provided signatures cannot be aggregated (due to invalid input or an 68 | // an operational error), Aggregate will return nil. 69 | func Aggregate(signatures []Signature) *Signature { 70 | // prep data 71 | flattenedSignatures := make([]byte, SignatureBytes*len(signatures)) 72 | for idx, sig := range signatures { 73 | copy(flattenedSignatures[(SignatureBytes*idx):(SignatureBytes*(1+idx))], sig[:]) 74 | } 75 | 76 | return cgo.Aggregate(cgo.AsSliceRefUint8(flattenedSignatures)) 77 | } 78 | 79 | // PrivateKeyGenerate generates a private key 80 | func PrivateKeyGenerate() PrivateKey { 81 | key := cgo.PrivateKeyGenerate() 82 | if key == nil { 83 | return PrivateKey{} 84 | } 85 | return *key 86 | } 87 | 88 | // PrivateKeyGenerate generates a private key in a predictable manner. 89 | func PrivateKeyGenerateWithSeed(seed PrivateKeyGenSeed) PrivateKey { 90 | ary := cgo.AsByteArray32(seed[:]) 91 | key := cgo.PrivateKeyGenerateWithSeed(&ary) 92 | if key == nil { 93 | return PrivateKey{} 94 | } 95 | return *key 96 | } 97 | 98 | // PrivateKeySign signs a message 99 | func PrivateKeySign(privateKey PrivateKey, message Message) *Signature { 100 | return cgo.PrivateKeySign(cgo.AsSliceRefUint8(privateKey[:]), cgo.AsSliceRefUint8(message)) 101 | } 102 | 103 | // PrivateKeyPublicKey gets the public key for a private key 104 | func PrivateKeyPublicKey(privateKey PrivateKey) *PublicKey { 105 | return cgo.PrivateKeyPublicKey(cgo.AsSliceRefUint8(privateKey[:])) 106 | } 107 | 108 | // CreateZeroSignature creates a zero signature, used as placeholder in filecoin. 109 | func CreateZeroSignature() Signature { 110 | signature := cgo.CreateZeroSignature() 111 | if signature == nil { 112 | return Signature{} 113 | } 114 | return *signature 115 | } 116 | -------------------------------------------------------------------------------- /cgo/blockstore.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | import ( 4 | "unsafe" 5 | 6 | blocks "github.com/ipfs/go-block-format" 7 | "github.com/ipfs/go-cid" 8 | ipld "github.com/ipfs/go-ipld-format" 9 | ) 10 | 11 | /* 12 | #include 13 | typedef const uint8_t* buf_t; 14 | */ 15 | import "C" 16 | 17 | func toCid(k C.buf_t, kLen C.int32_t) cid.Cid { 18 | type cidRepr struct { 19 | str string 20 | } 21 | return *(*cid.Cid)(unsafe.Pointer(&cidRepr{ 22 | str: C.GoStringN((*C.char)(unsafe.Pointer(k)), kLen), 23 | })) 24 | } 25 | 26 | //export cgo_blockstore_get 27 | func cgo_blockstore_get(handle C.uint64_t, k C.buf_t, kLen C.int32_t, block **C.uint8_t, size *C.int32_t) (res C.int32_t) { 28 | defer func() { 29 | if rerr := recover(); rerr != nil { 30 | logPanic(rerr) 31 | res = ErrPanic 32 | } 33 | }() 34 | 35 | c := toCid(k, kLen) 36 | externs, ctx := Lookup(uint64(handle)) 37 | if externs == nil { 38 | return ErrInvalidHandle 39 | } 40 | 41 | err := externs.View(ctx, c, func(data []byte) error { 42 | *block = (C.buf_t)(C.CBytes(data)) 43 | *size = C.int32_t(len(data)) 44 | return nil 45 | }) 46 | 47 | switch { 48 | case err == nil: 49 | return 0 50 | case ipld.IsNotFound(err): 51 | return ErrNotFound 52 | default: 53 | return ErrIO 54 | } 55 | } 56 | 57 | //export cgo_blockstore_put 58 | func cgo_blockstore_put(handle C.uint64_t, k C.buf_t, kLen C.int32_t, block C.buf_t, blockLen C.int32_t) (res C.int32_t) { 59 | defer func() { 60 | if rerr := recover(); rerr != nil { 61 | logPanic(rerr) 62 | res = ErrPanic 63 | } 64 | }() 65 | 66 | c := toCid(k, kLen) 67 | externs, ctx := Lookup(uint64(handle)) 68 | if externs == nil { 69 | return ErrInvalidHandle 70 | } 71 | b, _ := blocks.NewBlockWithCid(C.GoBytes(unsafe.Pointer(block), blockLen), c) 72 | if externs.Put(ctx, b) != nil { 73 | return ErrIO 74 | } 75 | return 0 76 | } 77 | 78 | //export cgo_blockstore_put_many 79 | func cgo_blockstore_put_many(handle C.uint64_t, lengths *C.int32_t, lengthsLen C.int32_t, blockBuf C.buf_t) (res C.int32_t) { 80 | defer func() { 81 | if rerr := recover(); rerr != nil { 82 | logPanic(rerr) 83 | res = ErrPanic 84 | } 85 | }() 86 | externs, ctx := Lookup(uint64(handle)) 87 | if externs == nil { 88 | return ErrInvalidHandle 89 | } 90 | // Get a reference to the lengths vector without copying. 91 | const MAX_LEN = 1 << 30 92 | if lengthsLen > MAX_LEN { 93 | return ErrInvalidArgument 94 | } 95 | 96 | lengthsGo := unsafe.Slice(lengths, lengthsLen) 97 | blocksGo := make([]blocks.Block, 0, lengthsLen) 98 | for _, length := range lengthsGo { 99 | if length > MAX_LEN { 100 | return ErrInvalidArgument 101 | } 102 | // get the next buffer. We could use C.GoBytes, but that copies. 103 | buf := unsafe.Slice((*byte)(unsafe.Pointer(blockBuf)), length) 104 | 105 | // read the CID. This function will copy the CID internally. 106 | cidLen, k, err := cid.CidFromBytes(buf) 107 | if err != nil { 108 | return ErrInvalidArgument 109 | } 110 | buf = buf[cidLen:] 111 | 112 | // Read the block and copy it. Unfortunately, our blockstore makes no guarantees 113 | // about not holding onto blocks. 114 | block := make([]byte, len(buf)) 115 | copy(block, buf) 116 | b, _ := blocks.NewBlockWithCid(block, k) 117 | 118 | // Add it to the batch. 119 | blocksGo = append(blocksGo, b) 120 | 121 | // Advance the block buffer. 122 | blockBuf = (C.buf_t)(unsafe.Pointer(uintptr(unsafe.Pointer(blockBuf)) + uintptr(length))) 123 | } 124 | if externs.PutMany(ctx, blocksGo) != nil { 125 | return ErrIO 126 | } 127 | return 0 128 | } 129 | 130 | //export cgo_blockstore_has 131 | func cgo_blockstore_has(handle C.uint64_t, k C.buf_t, kLen C.int32_t) (res C.int32_t) { 132 | defer func() { 133 | if rerr := recover(); rerr != nil { 134 | logPanic(rerr) 135 | res = ErrPanic 136 | } 137 | }() 138 | 139 | c := toCid(k, kLen) 140 | externs, ctx := Lookup(uint64(handle)) 141 | if externs == nil { 142 | return ErrInvalidHandle 143 | } 144 | has, err := externs.Has(ctx, c) 145 | switch { 146 | case err == nil: 147 | case ipld.IsNotFound(err): 148 | // Some old blockstores still return this. 149 | return 0 150 | default: 151 | return ErrIO 152 | } 153 | if has { 154 | return 1 155 | } 156 | return 0 157 | } 158 | -------------------------------------------------------------------------------- /cgo/const.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | 11 | const ( 12 | FCPResponseStatusNoError = C.F_C_P_RESPONSE_STATUS_NO_ERROR 13 | FCPResponseStatusUnclassifiedError = C.F_C_P_RESPONSE_STATUS_UNCLASSIFIED_ERROR 14 | FCPResponseStatusCallerError = C.F_C_P_RESPONSE_STATUS_CALLER_ERROR 15 | FCPResponseStatusReceiverError = C.F_C_P_RESPONSE_STATUS_RECEIVER_ERROR 16 | ) 17 | 18 | const ( 19 | RegisteredSealProofStackedDrg2KiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1 20 | RegisteredSealProofStackedDrg8MiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1 21 | RegisteredSealProofStackedDrg512MiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1 22 | RegisteredSealProofStackedDrg32GiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1 23 | RegisteredSealProofStackedDrg64GiBV1 = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1 24 | RegisteredSealProofStackedDrg2KiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1_1 25 | RegisteredSealProofStackedDrg8MiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1_1 26 | RegisteredSealProofStackedDrg512MiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1_1 27 | RegisteredSealProofStackedDrg32GiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1_1 28 | RegisteredSealProofStackedDrg64GiBV11 = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1_1 29 | RegisteredSealProofStackedDrg2KiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG2_KI_B_V1_1__FEAT__SYNTHETIC_PO_REP 30 | RegisteredSealProofStackedDrg8MiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG8_MI_B_V1_1__FEAT__SYNTHETIC_PO_REP 31 | RegisteredSealProofStackedDrg512MiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG512_MI_B_V1_1__FEAT__SYNTHETIC_PO_REP 32 | RegisteredSealProofStackedDrg32GiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG32_GI_B_V1_1__FEAT__SYNTHETIC_PO_REP 33 | RegisteredSealProofStackedDrg64GiBV11_Feat_SyntheticPoRep = C.REGISTERED_SEAL_PROOF_STACKED_DRG64_GI_B_V1_1__FEAT__SYNTHETIC_PO_REP 34 | ) 35 | 36 | const ( 37 | RegisteredAggregationProofSnarkPackV1 = C.REGISTERED_AGGREGATION_PROOF_SNARK_PACK_V1 38 | RegisteredAggregationProofSnarkPackV2 = C.REGISTERED_AGGREGATION_PROOF_SNARK_PACK_V2 39 | ) 40 | 41 | const ( 42 | RegisteredPoStProofStackedDrgWinning2KiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING2_KI_B_V1 43 | RegisteredPoStProofStackedDrgWinning8MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING8_MI_B_V1 44 | RegisteredPoStProofStackedDrgWinning512MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING512_MI_B_V1 45 | RegisteredPoStProofStackedDrgWinning32GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING32_GI_B_V1 46 | RegisteredPoStProofStackedDrgWinning64GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINNING64_GI_B_V1 47 | RegisteredPoStProofStackedDrgWindow2KiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW2_KI_B_V1 48 | RegisteredPoStProofStackedDrgWindow8MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW8_MI_B_V1 49 | RegisteredPoStProofStackedDrgWindow512MiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW512_MI_B_V1 50 | RegisteredPoStProofStackedDrgWindow32GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW32_GI_B_V1 51 | RegisteredPoStProofStackedDrgWindow64GiBV1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW64_GI_B_V1 52 | RegisteredPoStProofStackedDrgWindow2KiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW2_KI_B_V1_1 53 | RegisteredPoStProofStackedDrgWindow8MiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW8_MI_B_V1_1 54 | RegisteredPoStProofStackedDrgWindow512MiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW512_MI_B_V1_1 55 | RegisteredPoStProofStackedDrgWindow32GiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW32_GI_B_V1_1 56 | RegisteredPoStProofStackedDrgWindow64GiBV1_1 = C.REGISTERED_PO_ST_PROOF_STACKED_DRG_WINDOW64_GI_B_V1_1 57 | ) 58 | 59 | const ( 60 | RegisteredUpdateProofStackedDrg2KiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG2_KI_B_V1 61 | RegisteredUpdateProofStackedDrg8MiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG8_MI_B_V1 62 | RegisteredUpdateProofStackedDrg512MiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG512_MI_B_V1 63 | RegisteredUpdateProofStackedDrg32GiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG32_GI_B_V1 64 | RegisteredUpdateProofStackedDrg64GiBV1 = C.REGISTERED_UPDATE_PROOF_STACKED_DRG64_GI_B_V1 65 | ) 66 | -------------------------------------------------------------------------------- /types.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "sort" 8 | 9 | "github.com/filecoin-project/go-state-types/proof" 10 | 11 | "github.com/filecoin-project/go-state-types/abi" 12 | "github.com/ipfs/go-cid" 13 | ) 14 | 15 | // BLS 16 | 17 | // SignatureBytes is the length of a BLS signature 18 | const SignatureBytes = 96 19 | 20 | // PrivateKeyBytes is the length of a BLS private key 21 | const PrivateKeyBytes = 32 22 | 23 | // PublicKeyBytes is the length of a BLS public key 24 | const PublicKeyBytes = 48 25 | 26 | // DigestBytes is the length of a BLS message hash/digest 27 | const DigestBytes = 96 28 | 29 | // Signature is a compressed affine 30 | type Signature = [SignatureBytes]byte 31 | 32 | // PrivateKey is a compressed affine 33 | type PrivateKey = [PrivateKeyBytes]byte 34 | 35 | // PublicKey is a compressed affine 36 | type PublicKey = [PublicKeyBytes]byte 37 | 38 | // Message is a byte slice 39 | type Message = []byte 40 | 41 | // Digest is a compressed affine 42 | type Digest = [DigestBytes]byte 43 | 44 | // Used when generating a private key deterministically 45 | type PrivateKeyGenSeed = [32]byte 46 | 47 | // Proofs 48 | 49 | // SortedPublicSectorInfo is a slice of publicSectorInfo sorted 50 | // (lexicographically, ascending) by sealed (replica) CID. 51 | type SortedPublicSectorInfo struct { 52 | f []publicSectorInfo 53 | } 54 | 55 | // SortedPrivateSectorInfo is a slice of PrivateSectorInfo sorted 56 | // (lexicographically, ascending) by sealed (replica) CID. 57 | type SortedPrivateSectorInfo struct { 58 | f []PrivateSectorInfo 59 | } 60 | 61 | func newSortedPublicSectorInfo(sectorInfo ...publicSectorInfo) SortedPublicSectorInfo { 62 | fn := func(i, j int) bool { 63 | return bytes.Compare(sectorInfo[i].SealedCID.Bytes(), sectorInfo[j].SealedCID.Bytes()) == -1 64 | } 65 | 66 | sort.Slice(sectorInfo[:], fn) 67 | 68 | return SortedPublicSectorInfo{ 69 | f: sectorInfo, 70 | } 71 | } 72 | 73 | // Values returns the sorted publicSectorInfo as a slice 74 | func (s *SortedPublicSectorInfo) Values() []publicSectorInfo { 75 | return s.f 76 | } 77 | 78 | // MarshalJSON JSON-encodes and serializes the SortedPublicSectorInfo. 79 | func (s SortedPublicSectorInfo) MarshalJSON() ([]byte, error) { 80 | return json.Marshal(s.f) 81 | } 82 | 83 | // UnmarshalJSON parses the JSON-encoded byte slice and stores the result in the 84 | // value pointed to by s.f. Note that this method allows for construction of a 85 | // SortedPublicSectorInfo which violates its invariant (that its publicSectorInfo are sorted 86 | // in some defined way). Callers should take care to never provide a byte slice 87 | // which would violate this invariant. 88 | func (s *SortedPublicSectorInfo) UnmarshalJSON(b []byte) error { 89 | return json.Unmarshal(b, &s.f) 90 | } 91 | 92 | // NewSortedPrivateSectorInfo returns a SortedPrivateSectorInfo 93 | func NewSortedPrivateSectorInfo(sectorInfo ...PrivateSectorInfo) SortedPrivateSectorInfo { 94 | result := make([]PrivateSectorInfo, 0) 95 | seen := map[abi.SectorNumber]struct{}{} 96 | for i := range sectorInfo { 97 | if _, found := seen[sectorInfo[i].SectorNumber]; !found { 98 | seen[sectorInfo[i].SectorNumber] = struct{}{} 99 | result = append(result, sectorInfo[i]) 100 | } 101 | } 102 | sort.Slice(result, func(i, j int) bool { 103 | return result[i].SectorNumber < result[j].SectorNumber 104 | }) 105 | 106 | return SortedPrivateSectorInfo{ 107 | f: result, 108 | } 109 | } 110 | 111 | // Values returns the sorted PrivateSectorInfo as a slice 112 | func (s *SortedPrivateSectorInfo) Values() []PrivateSectorInfo { 113 | return s.f 114 | } 115 | 116 | // MarshalJSON JSON-encodes and serializes the SortedPrivateSectorInfo. 117 | func (s SortedPrivateSectorInfo) MarshalJSON() ([]byte, error) { 118 | return json.Marshal(s.f) 119 | } 120 | 121 | func (s *SortedPrivateSectorInfo) UnmarshalJSON(b []byte) error { 122 | return json.Unmarshal(b, &s.f) 123 | } 124 | 125 | type publicSectorInfo struct { 126 | PoStProofType abi.RegisteredPoStProof 127 | SealedCID cid.Cid 128 | SectorNum abi.SectorNumber 129 | } 130 | 131 | type PrivateSectorInfo struct { 132 | proof.SectorInfo 133 | CacheDirPath string 134 | PoStProofType abi.RegisteredPoStProof 135 | SealedSectorPath string 136 | } 137 | 138 | // AllocationManager is an interface that provides Free() capability. 139 | type AllocationManager interface { 140 | Free() 141 | } 142 | 143 | func SplitSortedPrivateSectorInfo(ctx context.Context, sortPrivSectors SortedPrivateSectorInfo, start int, end int) (SortedPrivateSectorInfo, error) { 144 | var newSortPrivSectors SortedPrivateSectorInfo 145 | newSortPrivSectors.f = make([]PrivateSectorInfo, 0) 146 | newSortPrivSectors.f = append(newSortPrivSectors.f, sortPrivSectors.f[start:end]...) 147 | 148 | return newSortPrivSectors, nil 149 | } 150 | -------------------------------------------------------------------------------- /rust/src/util/api.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::os::unix::io::FromRawFd; 3 | use std::sync::Once; 4 | 5 | use anyhow::anyhow; 6 | use safer_ffi::prelude::*; 7 | use safer_ffi::slice::slice_boxed; 8 | 9 | use super::types::{ 10 | catch_panic_response, catch_panic_response_no_log, GpuDeviceResponse, InitLogFdResponse, 11 | }; 12 | 13 | /// Protects the init off the logger. 14 | static LOG_INIT: Once = Once::new(); 15 | 16 | /// Ensures the logger is initialized. 17 | pub fn init_log() { 18 | LOG_INIT.call_once(|| { 19 | fil_logger::init(); 20 | }); 21 | } 22 | /// Initialize the logger with a file to log into 23 | /// 24 | /// Returns `None` if there is already an active logger 25 | pub fn init_log_with_file(file: File) -> Option<()> { 26 | if LOG_INIT.is_completed() { 27 | None 28 | } else { 29 | LOG_INIT.call_once(|| { 30 | fil_logger::init_with_file(file); 31 | }); 32 | Some(()) 33 | } 34 | } 35 | 36 | /// Serialize the GPU device names into a vector 37 | #[cfg(any(feature = "opencl", feature = "cuda", feature = "cuda-supraseal"))] 38 | fn get_gpu_devices_internal() -> Vec> { 39 | let devices = rust_gpu_tools::Device::all(); 40 | 41 | devices 42 | .into_iter() 43 | .map(|d| d.name().into_bytes().into_boxed_slice().into()) 44 | .collect() 45 | } 46 | 47 | // Return empty vector for GPU devices if cuda and opencl are disabled 48 | #[cfg(not(any(feature = "opencl", feature = "cuda", feature = "cuda-supraseal")))] 49 | fn get_gpu_devices_internal() -> Vec> { 50 | Vec::new() 51 | } 52 | 53 | /// Returns an array of strings containing the device names that can be used. 54 | #[ffi_export] 55 | pub fn get_gpu_devices() -> repr_c::Box { 56 | catch_panic_response("get_gpu_devices", || { 57 | let devices = get_gpu_devices_internal(); 58 | Ok(devices.into_boxed_slice().into()) 59 | }) 60 | } 61 | 62 | /// Initializes the logger with a file descriptor where logs will be logged into. 63 | /// 64 | /// This is usually a pipe that was opened on the receiving side of the logs. The logger is 65 | /// initialized on the invocation, subsequent calls won't have any effect. 66 | /// 67 | /// This function must be called right at the start, before any other call. Else the logger will 68 | /// be initializes implicitely and log to stderr. 69 | #[ffi_export] 70 | pub fn init_log_fd(log_fd: libc::c_int) -> repr_c::Box { 71 | catch_panic_response_no_log(|| { 72 | let file = unsafe { File::from_raw_fd(log_fd) }; 73 | 74 | if init_log_with_file(file).is_none() { 75 | return Err(anyhow!("There is already an active logger. `init_log_fd()` needs to be called before any other FFI function is called.")); 76 | } 77 | Ok(()) 78 | }) 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | 84 | #[cfg(any(feature = "opencl", feature = "cuda"))] 85 | #[test] 86 | #[allow(clippy::needless_collect)] 87 | fn test_get_gpu_devices() { 88 | use crate::util::api::get_gpu_devices; 89 | use crate::util::types::destroy_gpu_device_response; 90 | 91 | let resp = get_gpu_devices(); 92 | assert!(resp.error_msg.is_empty()); 93 | 94 | let strings = &resp.value; 95 | 96 | let devices: Vec<&str> = strings 97 | .iter() 98 | .map(|s| std::str::from_utf8(s).unwrap()) 99 | .collect(); 100 | 101 | assert_eq!(devices.len(), resp.value.len()); 102 | 103 | destroy_gpu_device_response(resp); 104 | } 105 | 106 | #[test] 107 | #[ignore] 108 | #[cfg(target_os = "linux")] 109 | fn test_init_log_fd() { 110 | /* 111 | 112 | Warning: This test is leaky. When run alongside other (Rust) tests in 113 | this project, `[flexi_logger] writing log line failed` lines will be 114 | observed in stderr, and various unrelated tests will fail. 115 | 116 | - @laser 20200725 117 | 118 | */ 119 | use std::env; 120 | use std::fs::File; 121 | use std::io::{BufRead, BufReader, Write}; 122 | use std::os::unix::io::FromRawFd; 123 | 124 | use crate::util::api::init_log_fd; 125 | use crate::util::types::{destroy_init_log_fd_response, FCPResponseStatus}; 126 | 127 | let mut fds: [libc::c_int; 2] = [0; 2]; 128 | let res = unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC) }; 129 | if res != 0 { 130 | panic!("Cannot create pipe"); 131 | } 132 | let [read_fd, write_fd] = fds; 133 | 134 | let mut reader = unsafe { BufReader::new(File::from_raw_fd(read_fd)) }; 135 | let mut writer = unsafe { File::from_raw_fd(write_fd) }; 136 | 137 | // Without setting this env variable there won't be any log output 138 | env::set_var("RUST_LOG", "debug"); 139 | 140 | let resp = init_log_fd(write_fd); 141 | destroy_init_log_fd_response(resp); 142 | 143 | log::info!("a log message"); 144 | 145 | // Write a newline so that things don't block even if the logging doesn't work 146 | writer.write_all(b"\n").unwrap(); 147 | 148 | let mut log_message = String::new(); 149 | reader.read_line(&mut log_message).unwrap(); 150 | 151 | assert!(log_message.ends_with("a log message\n")); 152 | 153 | // Now test that there is an error when we try to init it again 154 | let resp_error = init_log_fd(write_fd); 155 | assert_ne!(resp_error.status_code, FCPResponseStatus::NoError); 156 | destroy_init_log_fd_response(resp_error); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /distributed.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package ffi 5 | 6 | import ( 7 | "github.com/filecoin-project/filecoin-ffi/cgo" 8 | "github.com/filecoin-project/go-state-types/abi" 9 | "github.com/filecoin-project/go-state-types/proof" 10 | ) 11 | 12 | type FallbackChallenges struct { 13 | Sectors []abi.SectorNumber 14 | Challenges map[abi.SectorNumber][]uint64 15 | } 16 | 17 | // GenerateWinningPoStSectorChallenge 18 | func GeneratePoStFallbackSectorChallenges( 19 | proofType abi.RegisteredPoStProof, 20 | minerID abi.ActorID, 21 | randomness abi.PoStRandomness, 22 | sectorIds []abi.SectorNumber, 23 | ) (*FallbackChallenges, error) { 24 | proverID, err := toProverID(minerID) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | pp, err := toFilRegisteredPoStProof(proofType) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | // this should be a simple cast.. 35 | sectorIdsRaw := make([]uint64, len(sectorIds)) 36 | for i := range sectorIds { 37 | sectorIdsRaw[i] = uint64(sectorIds[i]) 38 | } 39 | 40 | randomnessBytes := cgo.AsByteArray32(randomness) 41 | ids, challenges, err := cgo.GenerateFallbackSectorChallenges(pp, &randomnessBytes, cgo.AsSliceRefUint64(sectorIdsRaw), &proverID) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | out := FallbackChallenges{ 47 | Sectors: make([]abi.SectorNumber, len(ids)), 48 | Challenges: make(map[abi.SectorNumber][]uint64), 49 | } 50 | for idx := range ids { 51 | secNum := abi.SectorNumber(ids[idx]) 52 | out.Sectors[idx] = secNum 53 | out.Challenges[secNum] = challenges[idx] 54 | } 55 | 56 | return &out, nil 57 | } 58 | 59 | func GenerateSingleVanillaProof( 60 | replica PrivateSectorInfo, 61 | challenges []uint64, 62 | ) ([]byte, error) { 63 | 64 | rep, err := toFilPrivateReplicaInfo(replica) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | return cgo.GenerateSingleVanillaProof(rep, cgo.AsSliceRefUint64(challenges)) 70 | } 71 | 72 | func GenerateWinningPoStWithVanilla( 73 | proofType abi.RegisteredPoStProof, 74 | minerID abi.ActorID, 75 | randomness abi.PoStRandomness, 76 | proofs [][]byte, 77 | ) ([]proof.PoStProof, error) { 78 | pp, err := toFilRegisteredPoStProof(proofType) 79 | if err != nil { 80 | return nil, err 81 | } 82 | 83 | proverID, err := toProverID(minerID) 84 | if err != nil { 85 | return nil, err 86 | } 87 | fproofs, cleanup := toVanillaProofs(proofs) 88 | defer cleanup() 89 | 90 | randomnessBytes := cgo.AsByteArray32(randomness) 91 | resp, err := cgo.GenerateWinningPoStWithVanilla(pp, &randomnessBytes, &proverID, cgo.AsSliceRefSliceBoxedUint8(fproofs)) 92 | if err != nil { 93 | return nil, err 94 | } 95 | 96 | out, err := fromFilPoStProofs(resp) 97 | if err != nil { 98 | return nil, err 99 | } 100 | 101 | return out, nil 102 | } 103 | 104 | func GenerateWindowPoStWithVanilla( 105 | proofType abi.RegisteredPoStProof, 106 | minerID abi.ActorID, 107 | randomness abi.PoStRandomness, 108 | proofs [][]byte, 109 | ) ([]proof.PoStProof, error) { 110 | pp, err := toFilRegisteredPoStProof(proofType) 111 | if err != nil { 112 | return nil, err 113 | } 114 | 115 | proverID, err := toProverID(minerID) 116 | if err != nil { 117 | return nil, err 118 | } 119 | fproofs, cleaner := toVanillaProofs(proofs) 120 | defer cleaner() 121 | 122 | randomnessBytes := cgo.AsByteArray32(randomness) 123 | rawProofs, _, err := cgo.GenerateWindowPoStWithVanilla(pp, &randomnessBytes, &proverID, cgo.AsSliceRefSliceBoxedUint8(fproofs)) 124 | if err != nil { 125 | return nil, err 126 | } 127 | 128 | out, err := fromFilPoStProofs(rawProofs) 129 | if err != nil { 130 | return nil, err 131 | } 132 | 133 | return out, nil 134 | } 135 | 136 | type PartitionProof proof.PoStProof 137 | 138 | func GenerateSinglePartitionWindowPoStWithVanilla( 139 | proofType abi.RegisteredPoStProof, 140 | minerID abi.ActorID, 141 | randomness abi.PoStRandomness, 142 | proofs [][]byte, 143 | partitionIndex uint, 144 | ) (*PartitionProof, error) { 145 | pp, err := toFilRegisteredPoStProof(proofType) 146 | if err != nil { 147 | return nil, err 148 | } 149 | 150 | proverID, err := toProverID(minerID) 151 | if err != nil { 152 | return nil, err 153 | } 154 | fproofs, cleaner := toVanillaProofs(proofs) 155 | defer cleaner() 156 | 157 | randomnessBytes := cgo.AsByteArray32(randomness) 158 | resp, _, err := cgo.GenerateSingleWindowPoStWithVanilla( 159 | pp, 160 | &randomnessBytes, 161 | &proverID, 162 | cgo.AsSliceRefSliceBoxedUint8(fproofs), 163 | partitionIndex, 164 | ) 165 | if err != nil { 166 | return nil, err 167 | } 168 | 169 | dpp, err := fromFilRegisteredPoStProof(resp.RegisteredProof) 170 | if err != nil { 171 | return nil, err 172 | } 173 | 174 | out := PartitionProof{ 175 | PoStProof: dpp, 176 | ProofBytes: resp.Proof, 177 | } 178 | 179 | return &out, nil 180 | } 181 | 182 | func MergeWindowPoStPartitionProofs( 183 | proofType abi.RegisteredPoStProof, 184 | partitionProofs []PartitionProof, 185 | ) (*proof.PoStProof, error) { 186 | pp, err := toFilRegisteredPoStProof(proofType) 187 | if err != nil { 188 | return nil, err 189 | } 190 | 191 | fproofs, cleaner := toPartitionProofs(partitionProofs) 192 | defer cleaner() 193 | 194 | resp, err := cgo.MergeWindowPoStPartitionProofs(pp, cgo.AsSliceRefSliceBoxedUint8(fproofs)) 195 | if err != nil { 196 | return nil, err 197 | } 198 | 199 | dpp, err := fromFilRegisteredPoStProof(resp.RegisteredProof) 200 | if err != nil { 201 | return nil, err 202 | } 203 | 204 | out := proof.PoStProof{ 205 | PoStProof: dpp, 206 | ProofBytes: resp.Proof, 207 | } 208 | 209 | return &out, nil 210 | } 211 | 212 | func toPartitionProofs(src []PartitionProof) ([]cgo.SliceBoxedUint8, func()) { 213 | out := make([]cgo.SliceBoxedUint8, len(src)) 214 | for idx := range out { 215 | out[idx] = cgo.AllocSliceBoxedUint8(src[idx].ProofBytes) 216 | } 217 | 218 | return out, makeCleanerSBU(out, len(src)) 219 | } 220 | -------------------------------------------------------------------------------- /bls_test.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestDeterministicPrivateKeyGeneration(t *testing.T) { 14 | rand.Seed(time.Now().UnixNano()) 15 | 16 | for i := 0; i < 10000; i++ { 17 | var xs [32]byte 18 | n, err := rand.Read(xs[:]) 19 | require.NoError(t, err) 20 | require.Equal(t, len(xs), n) 21 | 22 | first := PrivateKeyGenerateWithSeed(xs) 23 | secnd := PrivateKeyGenerateWithSeed(xs) 24 | 25 | assert.Equal(t, first, secnd) 26 | } 27 | } 28 | 29 | func TestBLSSigningAndVerification(t *testing.T) { 30 | // generate private keys 31 | fooPrivateKey := PrivateKeyGenerate() 32 | barPrivateKey := PrivateKeyGenerate() 33 | 34 | // get the public keys for the private keys 35 | fooPublicKey := PrivateKeyPublicKey(fooPrivateKey) 36 | barPublicKey := PrivateKeyPublicKey(barPrivateKey) 37 | 38 | // make messages to sign with the keys 39 | fooMessage := Message("hello foo") 40 | barMessage := Message("hello bar!") 41 | 42 | // calculate the digests of the messages 43 | fooDigest := Hash(fooMessage) 44 | barDigest := Hash(barMessage) 45 | 46 | // get the signature when signing the messages with the private keys 47 | fooSignature := PrivateKeySign(fooPrivateKey, fooMessage) 48 | barSignature := PrivateKeySign(barPrivateKey, barMessage) 49 | 50 | // get the aggregateSign 51 | aggregateSign := Aggregate([]Signature{*fooSignature, *barSignature}) 52 | 53 | // assert the foo message was signed with the foo key 54 | assert.True(t, Verify(fooSignature, []Digest{fooDigest}, []PublicKey{*fooPublicKey})) 55 | 56 | // assert the bar message was signed with the bar key 57 | assert.True(t, Verify(barSignature, []Digest{barDigest}, []PublicKey{*barPublicKey})) 58 | 59 | // assert the foo message was signed with the foo key 60 | assert.True(t, HashVerify(fooSignature, []Message{fooMessage}, []PublicKey{*fooPublicKey})) 61 | 62 | // assert the bar message was signed with the bar key 63 | assert.True(t, HashVerify(barSignature, []Message{barMessage}, []PublicKey{*barPublicKey})) 64 | 65 | // assert the foo message was not signed by the bar key 66 | assert.False(t, Verify(fooSignature, []Digest{fooDigest}, []PublicKey{*barPublicKey})) 67 | 68 | // assert the bar/foo message was not signed by the foo/bar key 69 | assert.False(t, Verify(barSignature, []Digest{barDigest}, []PublicKey{*fooPublicKey})) 70 | assert.False(t, Verify(barSignature, []Digest{fooDigest}, []PublicKey{*barPublicKey})) 71 | assert.False(t, Verify(fooSignature, []Digest{barDigest}, []PublicKey{*fooPublicKey})) 72 | 73 | //assert the foo and bar message was signed with the foo and bar key 74 | assert.True(t, HashVerify(aggregateSign, []Message{fooMessage, barMessage}, []PublicKey{*fooPublicKey, *barPublicKey})) 75 | 76 | //assert the bar and foo message was not signed by the foo and bar key 77 | assert.False(t, HashVerify(aggregateSign, []Message{fooMessage, barMessage}, []PublicKey{*fooPublicKey})) 78 | } 79 | 80 | func BenchmarkBLSVerify(b *testing.B) { 81 | priv := PrivateKeyGenerate() 82 | 83 | msg := Message("this is a message that i will be signing") 84 | digest := Hash(msg) 85 | 86 | sig := PrivateKeySign(priv, msg) 87 | // fmt.Println("SIG SIZE: ", len(sig)) 88 | // fmt.Println("SIG: ", sig) 89 | pubk := PrivateKeyPublicKey(priv) 90 | 91 | b.ResetTimer() 92 | for i := 0; i < b.N; i++ { 93 | if !Verify(sig, []Digest{digest}, []PublicKey{*pubk}) { 94 | b.Fatal("failed to verify") 95 | } 96 | } 97 | } 98 | 99 | func TestBlsAggregateErrors(t *testing.T) { 100 | t.Run("no signatures", func(t *testing.T) { 101 | var empty []Signature 102 | out := Aggregate(empty) 103 | require.Nil(t, out) 104 | }) 105 | 106 | t.Run("nil signatures", func(t *testing.T) { 107 | out := Aggregate(nil) 108 | require.Nil(t, out) 109 | }) 110 | } 111 | 112 | func BenchmarkBLSVerifyBatch(b *testing.B) { 113 | b.Run("10", benchmarkBLSVerifyBatchSize(10)) 114 | b.Run("50", benchmarkBLSVerifyBatchSize(50)) 115 | b.Run("100", benchmarkBLSVerifyBatchSize(100)) 116 | b.Run("300", benchmarkBLSVerifyBatchSize(300)) 117 | b.Run("1000", benchmarkBLSVerifyBatchSize(1000)) 118 | b.Run("4000", benchmarkBLSVerifyBatchSize(4000)) 119 | } 120 | 121 | func benchmarkBLSVerifyBatchSize(size int) func(b *testing.B) { 122 | return func(b *testing.B) { 123 | var digests []Digest 124 | var msgs []Message 125 | var sigs []Signature 126 | var pubks []PublicKey 127 | for i := 0; i < size; i++ { 128 | msg := Message(fmt.Sprintf("cats cats cats cats %d %d %d dogs", i, i, i)) 129 | msgs = append(msgs, msg) 130 | digests = append(digests, Hash(msg)) 131 | priv := PrivateKeyGenerate() 132 | sig := PrivateKeySign(priv, msg) 133 | sigs = append(sigs, *sig) 134 | pubk := PrivateKeyPublicKey(priv) 135 | pubks = append(pubks, *pubk) 136 | } 137 | 138 | t := time.Now() 139 | agsig := Aggregate(sigs) 140 | fmt.Println("Aggregate took: ", time.Since(t)) 141 | 142 | b.ResetTimer() 143 | for i := 0; i < b.N; i++ { 144 | if !Verify(agsig, digests, pubks) { 145 | b.Fatal("failed to verify") 146 | } 147 | } 148 | } 149 | } 150 | 151 | func BenchmarkBLSHashAndVerify(b *testing.B) { 152 | priv := PrivateKeyGenerate() 153 | 154 | msg := Message("this is a message that i will be signing") 155 | sig := PrivateKeySign(priv, msg) 156 | 157 | // fmt.Println("SIG SIZE: ", len(sig)) 158 | // fmt.Println("SIG: ", sig) 159 | pubk := PrivateKeyPublicKey(priv) 160 | 161 | b.ResetTimer() 162 | for i := 0; i < b.N; i++ { 163 | digest := Hash(msg) 164 | if !Verify(sig, []Digest{digest}, []PublicKey{*pubk}) { 165 | b.Fatal("failed to verify") 166 | } 167 | } 168 | } 169 | 170 | func BenchmarkBLSHashVerify(b *testing.B) { 171 | priv := PrivateKeyGenerate() 172 | 173 | msg := Message("this is a message that i will be signing") 174 | sig := PrivateKeySign(priv, msg) 175 | 176 | // fmt.Println("SIG SIZE: ", len(sig)) 177 | // fmt.Println("SIG: ", sig) 178 | pubk := PrivateKeyPublicKey(priv) 179 | 180 | b.ResetTimer() 181 | for i := 0; i < b.N; i++ { 182 | if !HashVerify(sig, []Message{msg}, []PublicKey{*pubk}) { 183 | b.Fatal("failed to verify") 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /proofs_test.go: -------------------------------------------------------------------------------- 1 | package ffi 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "io" 7 | "io/ioutil" 8 | "math/big" 9 | "testing" 10 | 11 | commcid "github.com/filecoin-project/go-fil-commcid" 12 | "github.com/filecoin-project/go-state-types/abi" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | 16 | "github.com/filecoin-project/filecoin-ffi/cgo" 17 | ) 18 | 19 | func TestRegisteredSealProofFunctions(t *testing.T) { 20 | WorkflowRegisteredSealProofFunctions(newTestingTeeHelper(t)) 21 | } 22 | 23 | func TestRegisteredPoStProofFunctions(t *testing.T) { 24 | WorkflowRegisteredPoStProofFunctions(newTestingTeeHelper(t)) 25 | } 26 | 27 | func TestProofsLifecycle(t *testing.T) { 28 | WorkflowProofsLifecycle(newTestingTeeHelper(t)) 29 | } 30 | 31 | func TestGetGPUDevicesDoesNotProduceAnError(t *testing.T) { 32 | WorkflowGetGPUDevicesDoesNotProduceAnError(newTestingTeeHelper(t)) 33 | } 34 | 35 | func TestGenerateWinningPoStSectorChallenge(t *testing.T) { 36 | WorkflowGenerateWinningPoStSectorChallenge(newTestingTeeHelper(t)) 37 | } 38 | 39 | func TestGenerateWinningPoStSectorChallengeEdgeCase(t *testing.T) { 40 | WorkflowGenerateWinningPoStSectorChallengeEdgeCase(newTestingTeeHelper(t)) 41 | } 42 | 43 | func TestJsonMarshalSymmetry(t *testing.T) { 44 | for i := 0; i < 100; i++ { 45 | xs := make([]publicSectorInfo, 10) 46 | for j := 0; j < 10; j++ { 47 | var x publicSectorInfo 48 | var commR [32]byte 49 | _, err := io.ReadFull(rand.Reader, commR[:]) 50 | require.NoError(t, err) 51 | 52 | // commR is defined as 32 long above, error can be safely ignored 53 | x.SealedCID, _ = commcid.ReplicaCommitmentV1ToCID(commR[:]) 54 | 55 | n, err := rand.Int(rand.Reader, big.NewInt(500)) 56 | require.NoError(t, err) 57 | x.SectorNum = abi.SectorNumber(n.Uint64()) 58 | xs[j] = x 59 | } 60 | toSerialize := newSortedPublicSectorInfo(xs...) 61 | 62 | serialized, err := toSerialize.MarshalJSON() 63 | require.NoError(t, err) 64 | 65 | var fromSerialized SortedPublicSectorInfo 66 | err = fromSerialized.UnmarshalJSON(serialized) 67 | require.NoError(t, err) 68 | 69 | require.Equal(t, toSerialize, fromSerialized) 70 | } 71 | } 72 | 73 | func TestDoesNotExhaustFileDescriptors(t *testing.T) { 74 | m := 500 // loops 75 | n := uint64(508) // quantity of piece bytes 76 | 77 | for i := 0; i < m; i++ { 78 | // create a temporary file over which we'll compute CommP 79 | file, err := ioutil.TempFile("", "") 80 | if err != nil { 81 | panic(err) 82 | } 83 | 84 | // create a slice of random bytes (represents our piece) 85 | b := make([]byte, n) 86 | 87 | // load up our byte slice with random bytes 88 | if _, err = rand.Read(b); err != nil { 89 | panic(err) 90 | } 91 | 92 | // write buffer to temp file 93 | if _, err := bytes.NewBuffer(b).WriteTo(file); err != nil { 94 | panic(err) 95 | } 96 | 97 | // seek to beginning of file 98 | if _, err := file.Seek(0, 0); err != nil { 99 | panic(err) 100 | } 101 | 102 | if _, err = GeneratePieceCID(abi.RegisteredSealProof_StackedDrg2KiBV1, file.Name(), abi.UnpaddedPieceSize(n)); err != nil { 103 | panic(err) 104 | } 105 | 106 | if err = file.Close(); err != nil { 107 | panic(err) 108 | } 109 | } 110 | } 111 | 112 | func newTestingTeeHelper(t *testing.T) *testingTeeHelper { 113 | return &testingTeeHelper{t: t} 114 | } 115 | 116 | type testingTeeHelper struct { 117 | t *testing.T 118 | } 119 | 120 | func (tth *testingTeeHelper) RequireTrue(value bool, msgAndArgs ...interface{}) { 121 | require.True(tth.t, value, msgAndArgs) 122 | } 123 | 124 | func (tth *testingTeeHelper) RequireNoError(err error, msgAndArgs ...interface{}) { 125 | require.NoError(tth.t, err, msgAndArgs) 126 | } 127 | 128 | func (tth *testingTeeHelper) RequireEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { 129 | require.Equal(tth.t, expected, actual, msgAndArgs) 130 | } 131 | 132 | func (tth *testingTeeHelper) AssertNoError(err error, msgAndArgs ...interface{}) bool { 133 | return assert.NoError(tth.t, err, msgAndArgs) 134 | } 135 | 136 | func (tth *testingTeeHelper) AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { 137 | return assert.Equal(tth.t, expected, actual, msgAndArgs) 138 | } 139 | 140 | func (tth *testingTeeHelper) AssertTrue(value bool, msgAndArgs ...interface{}) bool { 141 | return assert.True(tth.t, value, msgAndArgs) 142 | } 143 | 144 | func TestProofTypes(t *testing.T) { 145 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWinning2KiBV1, abi.RegisteredPoStProof_StackedDrgWinning2KiBV1) 146 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWinning8MiBV1, abi.RegisteredPoStProof_StackedDrgWinning8MiBV1) 147 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWinning512MiBV1, abi.RegisteredPoStProof_StackedDrgWinning512MiBV1) 148 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWinning32GiBV1, abi.RegisteredPoStProof_StackedDrgWinning32GiBV1) 149 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWinning64GiBV1, abi.RegisteredPoStProof_StackedDrgWinning64GiBV1) 150 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1) 151 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1) 152 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1) 153 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) 154 | assert.EqualValues(t, cgo.RegisteredPoStProofStackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1) 155 | 156 | assert.EqualValues(t, cgo.RegisteredSealProofStackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg2KiBV1) 157 | assert.EqualValues(t, cgo.RegisteredSealProofStackedDrg8MiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1) 158 | assert.EqualValues(t, cgo.RegisteredSealProofStackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg512MiBV1) 159 | assert.EqualValues(t, cgo.RegisteredSealProofStackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1) 160 | assert.EqualValues(t, cgo.RegisteredSealProofStackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1) 161 | } 162 | -------------------------------------------------------------------------------- /rust/src/fvm/blockstore/cgo.rs: -------------------------------------------------------------------------------- 1 | use std::ptr; 2 | 3 | use anyhow::{anyhow, Result}; 4 | use cid::Cid; 5 | use fvm3_shared::MAX_CID_LEN; 6 | use fvm_ipld_blockstore::Blockstore; 7 | 8 | use super::super::cgo::*; 9 | 10 | /// The maximum amount of data to buffer in a batch before writing it to the underlying blockstore. 11 | const MAX_BUF_SIZE: usize = 64 << 20; // 64MiB 12 | /// The maximum number of blocks to buffer in a batch before before writing it to the underlying 13 | /// blockstore. This will allocate 0.5MiB of memory to store offsets. 14 | const MAX_BLOCK_BATCH: usize = 64 << 10; 15 | 16 | pub struct CgoBlockstore { 17 | handle: u64, 18 | } 19 | 20 | impl CgoBlockstore { 21 | /// Construct a new blockstore from a handle. 22 | pub fn new(handle: u64) -> CgoBlockstore { 23 | CgoBlockstore { handle } 24 | } 25 | } 26 | 27 | impl Blockstore for CgoBlockstore { 28 | fn has(&self, k: &Cid) -> Result { 29 | let k_bytes = k.to_bytes(); 30 | unsafe { 31 | match cgo_blockstore_has(self.handle, k_bytes.as_ptr(), k_bytes.len() as i32) { 32 | // We shouldn't get an "error not found" here, but there's no reason to be strict 33 | // about it. 34 | 0 => Ok(false), 35 | x if x == FvmError::NotFound as i32 => Ok(false), 36 | 1 => Ok(true), 37 | // Panic on unknown values. There's a bug in the program. 38 | r @ 2.. => panic!("invalid return value from has: {}", r), 39 | // Panic if the store isn't registered. This means something _very_ unsafe is going 40 | // on and there is a bug in the program. 41 | x if x == FvmError::InvalidHandle as i32 => { 42 | panic!("blockstore {} not registered", self.handle) 43 | } 44 | // Otherwise, return "other". We should add error codes in the future. 45 | e => Err(anyhow!("cgo blockstore 'has' failed with error code {}", e)), 46 | } 47 | } 48 | } 49 | 50 | fn get(&self, k: &Cid) -> Result>> { 51 | let k_bytes = k.to_bytes(); 52 | unsafe { 53 | let mut buf: *mut u8 = ptr::null_mut(); 54 | let mut size: i32 = 0; 55 | match cgo_blockstore_get( 56 | self.handle, 57 | k_bytes.as_ptr(), 58 | k_bytes.len() as i32, 59 | &mut buf, 60 | &mut size, 61 | ) { 62 | 0 => Ok(Some(Vec::from_raw_parts(buf, size as usize, size as usize))), 63 | r @ 1.. => panic!("invalid return value from get: {}", r), 64 | x if x == FvmError::InvalidHandle as i32 => { 65 | panic!("blockstore {} not registered", self.handle) 66 | } 67 | x if x == FvmError::NotFound as i32 => Ok(None), 68 | e => Err(anyhow!("cgo blockstore 'get' failed with error code {}", e)), 69 | } 70 | } 71 | } 72 | 73 | fn put_many_keyed(&self, blocks: I) -> Result<()> 74 | where 75 | Self: Sized, 76 | D: AsRef<[u8]>, 77 | I: IntoIterator, 78 | { 79 | fn flush_buffered(handle: u64, lengths: &mut Vec, buf: &mut Vec) -> Result<()> { 80 | if buf.is_empty() { 81 | return Ok(()); 82 | } 83 | 84 | unsafe { 85 | let result = cgo_blockstore_put_many( 86 | handle, 87 | lengths.as_ptr(), 88 | lengths.len() as i32, 89 | buf.as_ptr(), 90 | ); 91 | buf.clear(); 92 | lengths.clear(); 93 | 94 | match result { 95 | 0 => Ok(()), 96 | r @ 1.. => panic!("invalid return value from put_many: {}", r), 97 | x if x == FvmError::InvalidHandle as i32 => { 98 | panic!("blockstore {} not registered", handle) 99 | } 100 | // This error makes no sense. 101 | x if x == FvmError::NotFound as i32 => panic!("not found error on put"), 102 | e => Err(anyhow!("cgo blockstore 'put' failed with error code {}", e)), 103 | } 104 | } 105 | } 106 | 107 | let mut lengths = Vec::with_capacity(MAX_BLOCK_BATCH); 108 | let mut buf = Vec::with_capacity(MAX_BUF_SIZE); 109 | for (k, block) in blocks { 110 | let block = block.as_ref(); 111 | // We limit both the max number of blocks and the max buffer size. Technically, we could 112 | // _just_ limit the buffer size as that should bound the number of blocks. However, 113 | // bounding the maximum number of blocks means we can allocate the vector up-front and 114 | // avoids any re-allocation, copying, etc. 115 | if lengths.len() >= MAX_BLOCK_BATCH 116 | || MAX_CID_LEN + block.len() + buf.len() > MAX_BUF_SIZE 117 | { 118 | flush_buffered(self.handle, &mut lengths, &mut buf)?; 119 | } 120 | 121 | let start = buf.len(); 122 | k.write_bytes(&mut buf)?; 123 | buf.extend_from_slice(block); 124 | let size = buf.len() - start; 125 | lengths.push(size as i32); 126 | } 127 | flush_buffered(self.handle, &mut lengths, &mut buf) 128 | } 129 | 130 | fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> { 131 | let k_bytes = k.to_bytes(); 132 | unsafe { 133 | match cgo_blockstore_put( 134 | self.handle, 135 | k_bytes.as_ptr(), 136 | k_bytes.len() as i32, 137 | block.as_ptr(), 138 | block.len() as i32, 139 | ) { 140 | 0 => Ok(()), 141 | r @ 1.. => panic!("invalid return value from put: {}", r), 142 | x if x == FvmError::InvalidHandle as i32 => { 143 | panic!("blockstore {} not registered", self.handle) 144 | } 145 | // This error makes no sense. 146 | x if x == FvmError::NotFound as i32 => panic!("not found error on put"), 147 | e => Err(anyhow!("cgo blockstore 'put' failed with error code {}", e)), 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /fvm.go: -------------------------------------------------------------------------------- 1 | //go:build cgo && (amd64 || arm64 || riscv64) 2 | // +build cgo 3 | // +build amd64 arm64 riscv64 4 | 5 | package ffi 6 | 7 | // #cgo linux LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-unresolved-symbols=ignore-all 8 | // #cgo darwin LDFLAGS: ${SRCDIR}/libfilcrypto.a -Wl,-undefined,dynamic_lookup 9 | // #cgo pkg-config: ${SRCDIR}/filcrypto.pc 10 | // #include "./filcrypto.h" 11 | import "C" 12 | import ( 13 | "context" 14 | "fmt" 15 | gobig "math/big" 16 | "runtime" 17 | 18 | "github.com/filecoin-project/filecoin-ffi/cgo" 19 | "github.com/filecoin-project/go-state-types/abi" 20 | "github.com/filecoin-project/go-state-types/big" 21 | "github.com/filecoin-project/go-state-types/network" 22 | "github.com/ipfs/go-cid" 23 | "golang.org/x/xerrors" 24 | ) 25 | 26 | type FVM struct { 27 | executor *cgo.FvmMachine 28 | } 29 | 30 | const ( 31 | applyExplicit = iota 32 | applyImplicit 33 | ) 34 | 35 | type FVMOpts struct { 36 | FVMVersion uint64 37 | Externs cgo.Externs 38 | 39 | Epoch abi.ChainEpoch 40 | Timestamp uint64 41 | ChainID uint64 42 | BaseFee abi.TokenAmount 43 | BaseCircSupply abi.TokenAmount 44 | NetworkVersion network.Version 45 | StateBase cid.Cid 46 | Tracing bool 47 | 48 | Debug bool 49 | ActorRedirect cid.Cid 50 | } 51 | 52 | // CreateFVM creates a new FVM instance. 53 | func CreateFVM(opts *FVMOpts) (*FVM, error) { 54 | baseFeeHi, baseFeeLo, err := splitBigInt(opts.BaseFee) 55 | if err != nil { 56 | return nil, xerrors.Errorf("invalid basefee: %w", err) 57 | } 58 | baseCircSupplyHi, baseCircSupplyLo, err := splitBigInt(opts.BaseCircSupply) 59 | if err != nil { 60 | return nil, xerrors.Errorf("invalid circ supply: %w", err) 61 | } 62 | 63 | exHandle := cgo.Register(context.TODO(), opts.Externs) 64 | var executor *cgo.FvmMachine 65 | if !opts.Debug { 66 | executor, err = cgo.CreateFvmMachine(cgo.FvmRegisteredVersion(opts.FVMVersion), 67 | uint64(opts.Epoch), 68 | opts.Timestamp, 69 | opts.ChainID, 70 | baseFeeHi, 71 | baseFeeLo, 72 | baseCircSupplyHi, 73 | baseCircSupplyLo, 74 | uint64(opts.NetworkVersion), 75 | cgo.AsSliceRefUint8(opts.StateBase.Bytes()), 76 | opts.Tracing, 77 | exHandle, exHandle, 78 | ) 79 | } else { 80 | executor, err = cgo.CreateFvmDebugMachine(cgo.FvmRegisteredVersion(opts.FVMVersion), 81 | uint64(opts.Epoch), 82 | opts.Timestamp, 83 | opts.ChainID, 84 | baseFeeHi, 85 | baseFeeLo, 86 | baseCircSupplyHi, 87 | baseCircSupplyLo, 88 | uint64(opts.NetworkVersion), 89 | cgo.AsSliceRefUint8(opts.StateBase.Bytes()), 90 | cgo.AsSliceRefUint8(opts.ActorRedirect.Bytes()), 91 | true, 92 | exHandle, exHandle, 93 | ) 94 | } 95 | 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | fvm := &FVM{ 101 | executor: executor, 102 | } 103 | runtime.SetFinalizer(fvm, func(f *FVM) { 104 | // Just to be extra safe 105 | if f.executor == nil { 106 | return 107 | } 108 | 109 | executor := f.executor 110 | f.executor = nil 111 | executor.Destroy() 112 | cgo.Unregister(exHandle) 113 | }) 114 | 115 | return fvm, nil 116 | } 117 | 118 | func (f *FVM) ApplyMessage(msgBytes []byte, chainLen uint) (*ApplyRet, error) { 119 | // NOTE: we need to call KeepAlive here (and below) because go doesn't guarantee that the 120 | // receiver will live to the end of the function. If we don't do this, go _will_ garbage 121 | // collect the FVM, causing us to run the finalizer while we're in the middle of using the 122 | // FVM. 123 | defer runtime.KeepAlive(f) 124 | resp, err := cgo.FvmMachineExecuteMessage( 125 | f.executor, 126 | cgo.AsSliceRefUint8(msgBytes), 127 | uint64(chainLen), 128 | applyExplicit, 129 | ) 130 | if err != nil { 131 | return nil, err 132 | } 133 | 134 | return buildResponse(resp) 135 | } 136 | 137 | func (f *FVM) ApplyImplicitMessage(msgBytes []byte) (*ApplyRet, error) { 138 | defer runtime.KeepAlive(f) 139 | resp, err := cgo.FvmMachineExecuteMessage( 140 | f.executor, 141 | cgo.AsSliceRefUint8(msgBytes), 142 | 0, // this isn't an on-chain message, so it has no chain length. 143 | applyImplicit, 144 | ) 145 | if err != nil { 146 | return nil, err 147 | } 148 | 149 | return buildResponse(resp) 150 | } 151 | 152 | func buildResponse(resp cgo.FvmMachineExecuteResponseGo) (*ApplyRet, error) { 153 | var eventsRoot *cid.Cid 154 | if len(resp.EventsRoot) > 0 { 155 | if eventsRootCid, err := cid.Cast(resp.EventsRoot); err != nil { 156 | return nil, fmt.Errorf("failed to cast events root CID: %w", err) 157 | } else { 158 | eventsRoot = &eventsRootCid 159 | } 160 | } 161 | 162 | return &ApplyRet{ 163 | Return: resp.ReturnVal, 164 | ExitCode: resp.ExitCode, 165 | GasUsed: int64(resp.GasUsed), 166 | MinerPenalty: reformBigInt(resp.PenaltyHi, resp.PenaltyLo), 167 | MinerTip: reformBigInt(resp.MinerTipHi, resp.MinerTipLo), 168 | BaseFeeBurn: reformBigInt(resp.BaseFeeBurnHi, resp.BaseFeeBurnLo), 169 | OverEstimationBurn: reformBigInt(resp.OverEstimationBurnHi, resp.OverEstimationBurnLo), 170 | Refund: reformBigInt(resp.RefundHi, resp.RefundLo), 171 | GasRefund: int64(resp.GasRefund), 172 | GasBurned: int64(resp.GasBurned), 173 | ExecTraceBytes: resp.ExecTrace, 174 | FailureInfo: resp.FailureInfo, 175 | EventsRoot: eventsRoot, 176 | EventsBytes: resp.Events, 177 | }, nil 178 | } 179 | 180 | func (f *FVM) Flush() (cid.Cid, error) { 181 | defer runtime.KeepAlive(f) 182 | stateRoot, err := cgo.FvmMachineFlush(f.executor) 183 | if err != nil { 184 | return cid.Undef, err 185 | } 186 | 187 | return cid.Cast(stateRoot) 188 | } 189 | 190 | type ApplyRet struct { 191 | Return []byte 192 | ExitCode uint64 193 | GasUsed int64 194 | MinerPenalty abi.TokenAmount 195 | MinerTip abi.TokenAmount 196 | BaseFeeBurn abi.TokenAmount 197 | OverEstimationBurn abi.TokenAmount 198 | Refund abi.TokenAmount 199 | GasRefund int64 200 | GasBurned int64 201 | ExecTraceBytes []byte 202 | FailureInfo string 203 | EventsRoot *cid.Cid 204 | EventsBytes []byte 205 | } 206 | 207 | // NOTE: We only support 64bit platforms 208 | 209 | // returns hi, lo 210 | func splitBigInt(i big.Int) (hi uint64, lo uint64, err error) { 211 | if i.Sign() < 0 { 212 | return 0, 0, xerrors.Errorf("negative number: %s", i) 213 | } 214 | words := i.Bits() 215 | switch len(words) { 216 | case 2: 217 | hi = uint64(words[1]) 218 | fallthrough 219 | case 1: 220 | lo = uint64(words[0]) 221 | case 0: 222 | default: 223 | return 0, 0, xerrors.Errorf("exceeds max bigint size: %s", i) 224 | } 225 | return hi, lo, nil 226 | } 227 | 228 | func reformBigInt(hi, lo uint64) big.Int { 229 | var words []gobig.Word 230 | if hi > 0 { 231 | words = []gobig.Word{gobig.Word(lo), gobig.Word(hi)} 232 | } else if lo > 0 { 233 | words = []gobig.Word{gobig.Word(lo)} 234 | } else { 235 | return big.Zero() 236 | } 237 | int := new(gobig.Int) 238 | int.SetBits(words) 239 | return big.NewFromGo(int) 240 | } 241 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status][circleci-image]][circleci-link] 2 | 3 | # Filecoin FFI 4 | 5 | > C and CGO bindings for Filecoin's Rust libraries, i.e: [proofs](https://github.com/filecoin-project/rust-fil-proofs) and [ref-fvm](https://github.com/filecoin-project/ref-fvm). This repository is built to enable the reference implementation of Filecoin, [Lotus](https://github.com/filecoin-project/lotus), to consume the Rust libraries that are needed. 6 | 7 | ## Building 8 | 9 | To build and install libfilcrypto, its header file and pkg-config manifest, run: 10 | 11 | ```shell 12 | make 13 | ``` 14 | 15 | To optionally authenticate with GitHub for assets download (to increase API limits) 16 | set `GITHUB_TOKEN` to personal access token. 17 | 18 | If no precompiled static library is available for your operating system, the 19 | build tooling will attempt to compile a static library from local Rust sources. 20 | 21 | ### Installation notes 22 | 23 | By default, building this will download a pre-built binary of the ffi. The advantages for downloading it are faster build times, and not requiring a rust toolchain and build environment. 24 | 25 | The disadvantage to downloading the pre-built binary is that it will not be optimized for your specific hardware. This means that if raw performance is of utmost importance to you, it's highly recommended that you build from source. 26 | 27 | ### Building from Source 28 | 29 | To opt out of downloading precompiled assets, set `FFI_BUILD_FROM_SOURCE=1`: 30 | 31 | To allow portable building of the `blst` dependency, set `FFI_USE_BLST_PORTABLE=1`: 32 | 33 | ```shell 34 | rm .install-filcrypto \ 35 | ; make clean \ 36 | ; FFI_BUILD_FROM_SOURCE=1 FFI_USE_BLST_PORTABLE=1 make 37 | ``` 38 | 39 | By default, a 'gpu' option is used in the proofs library. This feature is also used in FFI unless explicitly disabled. To disable building with the 'gpu' dependency, set `FFI_USE_GPU=0`: 40 | 41 | ```shell 42 | rm .install-filcrypto \ 43 | ; make clean \ 44 | ; FFI_BUILD_FROM_SOURCE=1 FFI_USE_GPU=0 make 45 | ``` 46 | 47 | #### GPU support 48 | 49 | CUDA for GPU support is now enabled by default in the proofs library. This feature can optionally be replaced by OpenCL by using `FFI_USE_OPENCL=1` set in the environment when building from source. Alternatively, if the CUDA toolkit (such as `nvcc`) cannot be located in the environment, OpenCL support is used instead. To disable GPU support entirely, set `FFI_USE_GPU=0` in the environment when building from source. 50 | 51 | There is experimental support for faster C2 named "SupraSeal". To enable it, set `FFI_USE_CUDA_SUPRASEAL=1`. It's specific to CUDA and won't work with OpenCL. 52 | 53 | ```shell 54 | rm .install-filcrypto \ 55 | ; make clean \ 56 | ; FFI_BUILD_FROM_SOURCE=1 make 57 | ``` 58 | 59 | By default, a 'multicore-sdr' option is used in the proofs library. This feature is also used in FFI unless explicitly disabled. To disable building with the 'multicore-sdr' dependency, set `FFI_USE_MULTICORE_SDR=0`: 60 | 61 | ```shell 62 | rm .install-filcrypto \ 63 | ; make clean \ 64 | ; FFI_BUILD_FROM_SOURCE=1 FFI_USE_MULTICORE_SDR=0 make 65 | ``` 66 | 67 | ## Updating rust-fil-proofs (via rust-filecoin-proofs-api) 68 | 69 | If rust-fil-proofs has changed from commit X to Y and you wish to get Y into 70 | the filecoin-ffi project, you need to do a few things: 71 | 72 | 1. Update the rust-filecoin-proofs-api [Cargo.toml][1] file to point to Y 73 | 2. Run `cd rust && cargo update -p "filecoin-proofs-api"` from the root of the filecoin-ffi project 74 | 3. After the previous step alters your Cargo.lock file, commit and push 75 | 76 | ## go get 77 | 78 | `go get` needs some additional steps in order to work as expected. 79 | 80 | Get the source, add this repo as a submodule to your repo, build it and point to it: 81 | 82 | ```shell 83 | $ go get github.com/filecoin-project/filecoin-ffi 84 | $ git submodule add https://github.com/filecoin-project/filecoin-ffi.git extern/filecoin-ffi 85 | $ make -C extern/filecoin-ffi 86 | $ go mod edit -replace=github.com/filecoin-project/filecoin-ffi=./extern/filecoin-ffi 87 | ``` 88 | 89 | ## Updating the Changelog 90 | 91 | The `mkreleaselog` script (in the project root) can be used to generate a good 92 | portion of the filecoin-ffi changelog. For historical reasons, the script must 93 | be run from the root of a filecoin-ffi checkout which is in your `$GOPATH`. 94 | 95 | Run it like so: 96 | 97 | ```shell 98 | ./mkreleaselog v0.25.0 v0.26.0 > /tmp/v0.26.0.notes.txt 99 | ``` 100 | 101 | ## Contribution 102 | 103 | ### Maintainers 104 | 105 | The core maintainers of this repository are: 106 | - @nemo & @vmx, from the fil-crypto team 107 | - @lotus-maintainers 108 | - @stebalien, from the FVM team 109 | 110 | Maintainers are not only the contributors of this repository, but also exercise a range of editorial responsibilities to keep the repository organized for the OSS contributors, that includes triage the issues, review and merge/close PRs, publish releases and so on. 111 | 112 | ### Development Guidelines (WIP) 113 | 114 | #### CI Builds 115 | 116 | To start a CI job to build binaries off of a commit push a tag starting with the character `v`, i.e. `v1.22.0-rc2`. 117 | 118 | #### Branches 119 | 120 | `master` is considered as the development branch of this repository. Changes being introduced to master must be tested (programmable and/or manual). The head of the master will be tagged and released upon the merge of each PR automatically. 121 | 122 | We will cooperates with the [lotus' releases and it's testing flows](https://github.com/filecoin-project/lotus/blob/0c91b0dc1012c3e54b305a76bb25fb68390adf9d/LOTUS_RELEASE_FLOW.md?plain=1#L50) to confirm whether a tagged release is production ready: 123 | 124 | *Non-consensus breaking changes* 125 | - All PRs introduce non-consensus breaking changes can be merged to master as long they have maintainers' approvals. 126 | - Roughly on a monthly basis, lotus will integrate ffi's head in `master` branch, for it's new feature release, and carry it through the testing flows. 127 | - `release/lotus-vX` will be created to determine the commit that lotus integrates in the corresponding release. 128 | - If any bug is found during the testing, the fix should land in master then get backported to `release/lotus-vX`. The updated commit should be integrated into lotus and getting tested. Repeat the steps until it can be considered as stable. 129 | 130 | #Consensus breaking changes* 131 | - Consensus breaking changes should be developed in it's own branch, (branch name is suggested to be: feature branches `feat/` or bug fix branches `bug/`). 132 | - Consensus breaking changes that are scoped into the next immediate network upgrade shall land in `next` branch first. The maintainers are responsible to coordinate on when to land `next` to `master` according to lotus mandatory(network upgrade) release schedules. 133 | - A new dev branch should be created and contributors are responsible to rebase the branch onto `master`/`next` as needed. 134 | 135 | 136 | 137 | ## License 138 | 139 | MIT or Apache 2.0 140 | 141 | [1]: https://github.com/filecoin-project/rust-filecoin-proofs-api/commit/61fde0e581cc38abc4e13dbe96145c9ad2f1f0f5 142 | 143 | [circleci-image]: https://circleci.com/gh/filecoin-project/filecoin-ffi.svg?branch=master&style=shield 144 | [circleci-link]: https://app.circleci.com/pipelines/github/filecoin-project/filecoin-ffi?branch=master 145 | -------------------------------------------------------------------------------- /rust/src/util/types.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, mem::MaybeUninit, ops::Deref, panic, path::PathBuf, str::Utf8Error}; 2 | 3 | use safer_ffi::prelude::*; 4 | 5 | use super::api::init_log; 6 | 7 | #[derive_ReprC] 8 | #[repr(i32)] 9 | #[derive(PartialEq, Eq, Debug, Copy, Clone)] 10 | pub enum FCPResponseStatus { 11 | // Don't use FCPSuccess, since that complicates description of 'successful' verification. 12 | NoError = 0, 13 | UnclassifiedError = 1, 14 | CallerError = 2, 15 | ReceiverError = 3, 16 | } 17 | 18 | #[cfg(target_os = "linux")] 19 | pub fn as_path_buf(bytes: &[u8]) -> std::result::Result { 20 | use std::ffi::OsStr; 21 | use std::os::unix::ffi::OsStrExt; 22 | 23 | Ok(OsStr::from_bytes(bytes).into()) 24 | } 25 | 26 | #[cfg(not(target_os = "linux"))] 27 | pub fn as_path_buf(bytes: &[u8]) -> std::result::Result { 28 | std::str::from_utf8(bytes).map(Into::into) 29 | } 30 | 31 | #[cfg(test)] 32 | #[cfg(target_os = "linux")] 33 | pub fn as_bytes(path: &std::path::Path) -> &[u8] { 34 | use std::os::unix::ffi::OsStrExt; 35 | 36 | path.as_os_str().as_bytes() 37 | } 38 | 39 | #[cfg(all(test, not(target_os = "linux")))] 40 | pub fn as_bytes(path: &std::path::Path) -> &[u8] { 41 | path.to_str().unwrap().as_bytes() 42 | } 43 | 44 | #[derive_ReprC] 45 | #[repr(C)] 46 | #[derive(Clone)] 47 | pub struct Result { 48 | pub status_code: FCPResponseStatus, 49 | pub error_msg: c_slice::Box, 50 | pub value: T, 51 | } 52 | 53 | impl Deref for Result { 54 | type Target = T; 55 | 56 | fn deref(&self) -> &Self::Target { 57 | &self.value 58 | } 59 | } 60 | 61 | impl Default for Result { 62 | fn default() -> Self { 63 | Result { 64 | status_code: FCPResponseStatus::NoError, 65 | error_msg: Default::default(), 66 | value: Default::default(), 67 | } 68 | } 69 | } 70 | 71 | impl From> for Result 72 | where 73 | T: Sized + Default, 74 | E: Display, 75 | { 76 | fn from(r: std::result::Result) -> Self { 77 | match r { 78 | Ok(value) => Self::ok(value), 79 | Err(e) => Self::err(e.to_string().into_bytes().into_boxed_slice()), 80 | } 81 | } 82 | } 83 | 84 | impl From for Result 85 | where 86 | T: Sized, 87 | { 88 | fn from(value: T) -> Self { 89 | Self { 90 | status_code: FCPResponseStatus::NoError, 91 | error_msg: Default::default(), 92 | value, 93 | } 94 | } 95 | } 96 | 97 | impl Result { 98 | pub fn ok(value: T) -> Self { 99 | Result { 100 | status_code: FCPResponseStatus::NoError, 101 | error_msg: Default::default(), 102 | value, 103 | } 104 | } 105 | 106 | pub unsafe fn into_boxed_raw(self) -> *mut Result { 107 | Box::into_raw(Box::new(self)) 108 | } 109 | 110 | pub fn err_with_default(err: impl Into>, value: T) -> Self { 111 | Result { 112 | status_code: FCPResponseStatus::UnclassifiedError, 113 | error_msg: err.into(), 114 | value, 115 | } 116 | } 117 | 118 | /// Safety: value must not be accessed. 119 | pub unsafe fn err_no_default(err: impl Into>) -> Self { 120 | Result { 121 | status_code: FCPResponseStatus::UnclassifiedError, 122 | error_msg: err.into(), 123 | value: MaybeUninit::zeroed().assume_init(), 124 | } 125 | } 126 | } 127 | 128 | impl Result { 129 | pub fn err(err: impl Into>) -> Self { 130 | Result { 131 | status_code: FCPResponseStatus::UnclassifiedError, 132 | error_msg: err.into(), 133 | value: Default::default(), 134 | } 135 | } 136 | } 137 | 138 | pub type GpuDeviceResponse = Result>>; 139 | 140 | #[ffi_export] 141 | pub fn destroy_gpu_device_response(ptr: repr_c::Box) { 142 | drop(ptr) 143 | } 144 | 145 | pub type InitLogFdResponse = Result<()>; 146 | 147 | #[ffi_export] 148 | pub fn destroy_init_log_fd_response(ptr: repr_c::Box) { 149 | drop(ptr) 150 | } 151 | 152 | /// Catch panics and return an error response 153 | pub fn catch_panic_response(name: &str, callback: F) -> repr_c::Box> 154 | where 155 | T: Sized + Default, 156 | F: FnOnce() -> anyhow::Result + std::panic::UnwindSafe, 157 | { 158 | catch_panic_response_raw(name, || { 159 | Result::from(callback().map_err(|err| format!("{err:?}"))) 160 | }) 161 | } 162 | 163 | pub fn catch_panic_response_no_log(callback: F) -> repr_c::Box> 164 | where 165 | T: Sized + Default, 166 | F: FnOnce() -> anyhow::Result + std::panic::UnwindSafe, 167 | { 168 | catch_panic_response_raw_no_log(|| Result::from(callback().map_err(|err| format!("{err:?}")))) 169 | } 170 | 171 | pub fn catch_panic_response_raw_no_log(callback: F) -> repr_c::Box> 172 | where 173 | T: Sized + Default, 174 | F: FnOnce() -> Result + std::panic::UnwindSafe, 175 | { 176 | let result = match panic::catch_unwind(callback) { 177 | Ok(t) => t, 178 | Err(panic) => { 179 | let error_msg = match panic.downcast_ref::<&'static str>() { 180 | Some(message) => message, 181 | _ => "no unwind information", 182 | }; 183 | 184 | Result::from(Err(format!("Rust panic: {}", error_msg))) 185 | } 186 | }; 187 | 188 | repr_c::Box::new(result) 189 | } 190 | 191 | pub fn catch_panic_response_raw(name: &str, callback: F) -> repr_c::Box> 192 | where 193 | T: Sized + Default, 194 | F: FnOnce() -> Result + std::panic::UnwindSafe, 195 | { 196 | catch_panic_response_raw_no_log(|| { 197 | init_log(); 198 | log::debug!("{}: start", name); 199 | let res = callback(); 200 | log::debug!("{}: end", name); 201 | res 202 | }) 203 | } 204 | 205 | pub unsafe fn catch_panic_response_no_default( 206 | name: &str, 207 | callback: F, 208 | ) -> repr_c::Box> 209 | where 210 | T: Sized, 211 | F: FnOnce() -> anyhow::Result + std::panic::UnwindSafe, 212 | { 213 | let result = match panic::catch_unwind(|| { 214 | init_log(); 215 | log::debug!("{}: start", name); 216 | let res = callback(); 217 | log::debug!("{}: end", name); 218 | res 219 | }) { 220 | Ok(t) => match t { 221 | Ok(t) => Result::ok(t), 222 | Err(err) => Result::err_no_default(format!("{err:?}").into_bytes().into_boxed_slice()), 223 | }, 224 | Err(panic) => { 225 | let error_msg = match panic.downcast_ref::<&'static str>() { 226 | Some(message) => message, 227 | _ => "no unwind information", 228 | }; 229 | 230 | Result::err_no_default( 231 | format!("Rust panic: {}", error_msg) 232 | .into_bytes() 233 | .into_boxed_slice(), 234 | ) 235 | } 236 | }; 237 | 238 | repr_c::Box::new(result) 239 | } 240 | 241 | /// Generate a destructor for the given type wrapped in a `repr_c::Box`. 242 | #[macro_export] 243 | macro_rules! destructor { 244 | ($name:ident, $type:ty) => { 245 | /// Destroys the passed in `repr_c::Box<$type>`. 246 | #[ffi_export] 247 | fn $name(ptr: repr_c::Box<$type>) { 248 | drop(ptr); 249 | } 250 | }; 251 | } 252 | -------------------------------------------------------------------------------- /cgo/helpers.go: -------------------------------------------------------------------------------- 1 | package cgo 2 | 3 | /* 4 | #cgo LDFLAGS: -L${SRCDIR}/.. 5 | #cgo pkg-config: ${SRCDIR}/../filcrypto.pc 6 | #include "../filcrypto.h" 7 | #include 8 | */ 9 | import "C" 10 | import ( 11 | "errors" 12 | "unsafe" 13 | ) 14 | 15 | var ( 16 | emptyUint8 C.uint8_t = 0 17 | emptyUint64 C.uint64_t = 0 18 | emptyUint C.size_t = 0 19 | emptyAggregationInputs C.AggregationInputs_t = C.AggregationInputs_t{} 20 | emptyPublicReplicaInfo C.PublicReplicaInfo_t = C.PublicReplicaInfo_t{} 21 | emptyPrivateReplicaInfo C.PrivateReplicaInfo_t = C.PrivateReplicaInfo_t{} 22 | emptyPoStProof C.PoStProof_t = C.PoStProof_t{} 23 | emptyPublicPieceInfo C.PublicPieceInfo_t = C.PublicPieceInfo_t{} 24 | emptyByteArray32 C.uint8_32_array_t = C.uint8_32_array_t{} 25 | emptySliceBoxedUint8 C.slice_boxed_uint8_t = C.slice_boxed_uint8_t{} 26 | ) 27 | 28 | func AsSliceRefUint8(goBytes []byte) SliceRefUint8 { 29 | len := len(goBytes) 30 | 31 | if len == 0 { 32 | // can't take element 0 of an empty slice 33 | return SliceRefUint8{ 34 | ptr: &emptyUint8, 35 | len: C.size_t(len), 36 | } 37 | } 38 | return SliceRefUint8{ 39 | ptr: (*C.uint8_t)(unsafe.Pointer(&goBytes[0])), 40 | len: C.size_t(len), 41 | } 42 | } 43 | 44 | func AsSliceRefUint64(goBytes []uint64) SliceRefUint64 { 45 | len := len(goBytes) 46 | 47 | if len == 0 { 48 | // can't take element 0 of an empty slice 49 | return SliceRefUint64{ 50 | ptr: &emptyUint64, 51 | len: C.size_t(len), 52 | } 53 | } 54 | return SliceRefUint64{ 55 | ptr: (*C.uint64_t)(unsafe.Pointer(&goBytes[0])), 56 | len: C.size_t(len), 57 | } 58 | } 59 | 60 | func AllocSliceBoxedUint8(goBytes []byte) SliceBoxedUint8 { 61 | len := len(goBytes) 62 | 63 | ptr := C.alloc_boxed_slice(C.size_t(len)) 64 | copy(ptr.slice(), goBytes) 65 | 66 | return ptr 67 | } 68 | 69 | func AsSliceRefUint(goSlice []uint) SliceRefUint { 70 | len := len(goSlice) 71 | 72 | if len == 0 { 73 | // can't take element 0 of an empty slice 74 | return SliceRefUint{ 75 | ptr: &emptyUint, 76 | len: C.size_t(len), 77 | } 78 | } 79 | 80 | return SliceRefUint{ 81 | ptr: (*C.size_t)(unsafe.Pointer(&goSlice[0])), 82 | len: C.size_t(len), 83 | } 84 | } 85 | 86 | func AsSliceRefAggregationInputs(goSlice []AggregationInputs) SliceRefAggregationInputs { 87 | len := len(goSlice) 88 | 89 | if len == 0 { 90 | // can't take element 0 of an empty slice 91 | return SliceRefAggregationInputs{ 92 | ptr: &emptyAggregationInputs, 93 | len: C.size_t(len), 94 | } 95 | } 96 | 97 | return SliceRefAggregationInputs{ 98 | ptr: (*C.AggregationInputs_t)(unsafe.Pointer(&goSlice[0])), 99 | len: C.size_t(len), 100 | } 101 | } 102 | 103 | func AsSliceRefPublicReplicaInfo(goSlice []PublicReplicaInfo) SliceRefPublicReplicaInfo { 104 | len := len(goSlice) 105 | 106 | if len == 0 { 107 | // can't take element 0 of an empty slice 108 | return SliceRefPublicReplicaInfo{ 109 | ptr: &emptyPublicReplicaInfo, 110 | len: C.size_t(len), 111 | } 112 | } 113 | 114 | return SliceRefPublicReplicaInfo{ 115 | ptr: (*C.PublicReplicaInfo_t)(unsafe.Pointer(&goSlice[0])), 116 | len: C.size_t(len), 117 | } 118 | } 119 | 120 | func AsSliceRefPrivateReplicaInfo(goSlice []PrivateReplicaInfo) SliceRefPrivateReplicaInfo { 121 | len := len(goSlice) 122 | 123 | if len == 0 { 124 | // can't take element 0 of an empty slice 125 | return SliceRefPrivateReplicaInfo{ 126 | ptr: &emptyPrivateReplicaInfo, 127 | len: C.size_t(len), 128 | } 129 | } 130 | 131 | return SliceRefPrivateReplicaInfo{ 132 | ptr: (*C.PrivateReplicaInfo_t)(unsafe.Pointer(&goSlice[0])), 133 | len: C.size_t(len), 134 | } 135 | } 136 | 137 | func AsSliceRefPoStProof(goSlice []PoStProof) SliceRefPoStProof { 138 | len := len(goSlice) 139 | 140 | if len == 0 { 141 | // can't take element 0 of an empty slice 142 | return SliceRefPoStProof{ 143 | ptr: &emptyPoStProof, 144 | len: C.size_t(len), 145 | } 146 | } 147 | 148 | return SliceRefPoStProof{ 149 | ptr: (*C.PoStProof_t)(unsafe.Pointer(&goSlice[0])), 150 | len: C.size_t(len), 151 | } 152 | } 153 | 154 | func AsSliceRefPublicPieceInfo(goSlice []PublicPieceInfo) SliceRefPublicPieceInfo { 155 | len := len(goSlice) 156 | 157 | if len == 0 { 158 | // can't take element 0 of an empty slice 159 | return SliceRefPublicPieceInfo{ 160 | ptr: &emptyPublicPieceInfo, 161 | len: C.size_t(len), 162 | } 163 | } 164 | 165 | return SliceRefPublicPieceInfo{ 166 | ptr: (*C.PublicPieceInfo_t)(unsafe.Pointer(&goSlice[0])), 167 | len: C.size_t(len), 168 | } 169 | } 170 | 171 | func AsSliceRefByteArray32(goSlice []ByteArray32) SliceRefByteArray32 { 172 | len := len(goSlice) 173 | 174 | if len == 0 { 175 | // can't take element 0 of an empty slice 176 | return SliceRefByteArray32{ 177 | ptr: &emptyByteArray32, 178 | len: C.size_t(len), 179 | } 180 | } 181 | 182 | return SliceRefByteArray32{ 183 | ptr: (*C.uint8_32_array_t)(unsafe.Pointer(&goSlice[0])), 184 | len: C.size_t(len), 185 | } 186 | } 187 | 188 | func AsSliceRefSliceBoxedUint8(goSlice []SliceBoxedUint8) SliceRefSliceBoxedUint8 { 189 | len := len(goSlice) 190 | 191 | if len == 0 { 192 | // can't take element 0 of an empty slice 193 | return SliceRefSliceBoxedUint8{ 194 | ptr: &emptySliceBoxedUint8, 195 | len: C.size_t(len), 196 | } 197 | } 198 | 199 | return SliceRefSliceBoxedUint8{ 200 | ptr: (*C.slice_boxed_uint8_t)(unsafe.Pointer(&goSlice[0])), 201 | len: C.size_t(len), 202 | } 203 | } 204 | 205 | func AsByteArray32(goSlice []byte) ByteArray32 { 206 | var ary ByteArray32 207 | l := len(goSlice) 208 | for idx := range goSlice { 209 | if idx < l { 210 | ary.idx[idx] = C.uchar(goSlice[idx]) 211 | } 212 | } 213 | return ary 214 | } 215 | 216 | // CheckErr returns `nil` if the `code` indicates success and an error otherwise. 217 | func CheckErr(resp result) error { 218 | if resp == nil { 219 | return errors.New("nil result from Filecoin FFI") 220 | } 221 | if resp.statusCode() == FCPResponseStatusNoError { 222 | return nil 223 | } 224 | 225 | return errors.New(string(resp.errorMsg().slice())) 226 | } 227 | 228 | func NewAggregationInputs(commR ByteArray32, commD ByteArray32, sectorId uint64, ticket ByteArray32, seed ByteArray32) AggregationInputs { 229 | return AggregationInputs{ 230 | comm_r: commR, 231 | comm_d: commD, 232 | sector_id: C.uint64_t(sectorId), 233 | ticket: ticket, 234 | seed: seed, 235 | } 236 | } 237 | 238 | func NewPrivateReplicaInfo(pp RegisteredPoStProof, cacheDirPath string, commR ByteArray32, replicaPath string, sectorId uint64) PrivateReplicaInfo { 239 | return PrivateReplicaInfo{ 240 | registered_proof: pp, 241 | cache_dir_path: AllocSliceBoxedUint8([]byte(cacheDirPath)), 242 | replica_path: AllocSliceBoxedUint8([]byte(replicaPath)), 243 | sector_id: C.uint64_t(sectorId), 244 | comm_r: commR, 245 | } 246 | } 247 | 248 | func NewPublicReplicaInfo(pp RegisteredPoStProof, commR ByteArray32, sectorId uint64) PublicReplicaInfo { 249 | return PublicReplicaInfo{ 250 | registered_proof: pp, 251 | sector_id: C.uint64_t(sectorId), 252 | comm_r: commR, 253 | } 254 | } 255 | 256 | func NewPoStProof(pp RegisteredPoStProof, proof []byte) PoStProof { 257 | return PoStProof{ 258 | registered_proof: pp, 259 | proof: AllocSliceBoxedUint8(proof), 260 | } 261 | } 262 | 263 | func NewPublicPieceInfo(numBytes uint64, commP ByteArray32) PublicPieceInfo { 264 | return PublicPieceInfo{ 265 | num_bytes: C.uint64_t(numBytes), 266 | comm_p: commP, 267 | } 268 | } 269 | 270 | func NewPartitionSnarkProof(pp RegisteredPoStProof, proof []byte) PartitionSnarkProof { 271 | return PartitionSnarkProof{ 272 | registered_proof: pp, 273 | proof: AllocSliceBoxedUint8(proof), 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /rust/src/fvm/externs.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Context}; 2 | 3 | use fvm2::externs::{Consensus as Consensus2, Externs as Externs2, Rand as Rand2}; 4 | use fvm3::externs::{Chain as Chain3, Consensus as Consensus3, Externs as Externs3, Rand as Rand3}; 5 | use fvm4::externs::{Chain as Chain4, Consensus as Consensus4, Externs as Externs4, Rand as Rand4}; 6 | 7 | use fvm2_shared::address::Address as Address2; 8 | use fvm3_shared::address::Address as Address3; 9 | use fvm4_shared::address::Address; 10 | 11 | use fvm4_shared::clock::ChainEpoch; 12 | 13 | use fvm2_shared::consensus::{ 14 | ConsensusFault as ConsensusFault2, ConsensusFaultType as ConsensusFaultType2, 15 | }; 16 | use fvm3_shared::consensus::{ 17 | ConsensusFault as ConsensusFault3, ConsensusFaultType as ConsensusFaultType3, 18 | }; 19 | use fvm4_shared::consensus::ConsensusFault as ConsensusFault4; 20 | 21 | use num_traits::FromPrimitive; 22 | 23 | use super::cgo::*; 24 | 25 | /// An implementation of [`fvm::externs::Externs`] that can call out to go. See the `cgo` directory 26 | /// in this repo for the go side. 27 | /// 28 | /// Importantly, this allows Filecoin client written in go to expose chain randomness and consensus 29 | /// fault verification to the FVM. 30 | pub struct CgoExterns { 31 | handle: u64, 32 | } 33 | 34 | impl CgoExterns { 35 | /// Construct a new externs from a handle. 36 | pub fn new(handle: u64) -> CgoExterns { 37 | CgoExterns { handle } 38 | } 39 | } 40 | 41 | impl Rand4 for CgoExterns { 42 | fn get_chain_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 43 | unsafe { 44 | let mut buf = [0u8; 32]; 45 | match cgo_extern_get_chain_randomness(self.handle, round, &mut buf) { 46 | 0 => Ok(buf), 47 | r @ 1.. => panic!("invalid return value from has: {}", r), 48 | x if x == FvmError::InvalidHandle as i32 => { 49 | panic!("extern {} not registered", self.handle) 50 | } 51 | e => Err(anyhow!( 52 | "cgo extern 'get_chain_randomness' failed with error code {}", 53 | e 54 | )), 55 | } 56 | } 57 | } 58 | 59 | fn get_beacon_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 60 | unsafe { 61 | let mut buf = [0u8; 32]; 62 | match cgo_extern_get_beacon_randomness(self.handle, round, &mut buf) { 63 | 0 => Ok(buf), 64 | r @ 1.. => panic!("invalid return value from has: {}", r), 65 | x if x == FvmError::InvalidHandle as i32 => { 66 | panic!("extern {} not registered", self.handle) 67 | } 68 | e => Err(anyhow!( 69 | "cgo extern 'get_beacon_randomness' failed with error code {}", 70 | e 71 | )), 72 | } 73 | } 74 | } 75 | } 76 | 77 | impl Rand3 for CgoExterns { 78 | fn get_chain_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 79 | Rand4::get_chain_randomness(self, round) 80 | } 81 | 82 | fn get_beacon_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 83 | Rand4::get_beacon_randomness(self, round) 84 | } 85 | } 86 | 87 | impl Rand2 for CgoExterns { 88 | fn get_chain_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 89 | Rand4::get_chain_randomness(self, round) 90 | } 91 | 92 | fn get_beacon_randomness(&self, round: ChainEpoch) -> anyhow::Result<[u8; 32]> { 93 | Rand4::get_beacon_randomness(self, round) 94 | } 95 | } 96 | 97 | impl Consensus4 for CgoExterns { 98 | fn verify_consensus_fault( 99 | &self, 100 | h1: &[u8], 101 | h2: &[u8], 102 | extra: &[u8], 103 | ) -> anyhow::Result<(Option, i64)> { 104 | unsafe { 105 | let mut miner_id: u64 = 0; 106 | let mut epoch: i64 = 0; 107 | let mut fault_type: i64 = 0; 108 | let mut gas_used: i64 = 0; 109 | match cgo_extern_verify_consensus_fault( 110 | self.handle, 111 | h1.as_ptr(), 112 | h1.len() as i32, 113 | h2.as_ptr(), 114 | h2.len() as i32, 115 | extra.as_ptr(), 116 | extra.len() as i32, 117 | &mut miner_id, 118 | &mut epoch, 119 | &mut fault_type, 120 | &mut gas_used, 121 | ) { 122 | 0 => Ok(( 123 | match fault_type { 124 | 0 => None, 125 | _ => Some(ConsensusFault4 { 126 | target: Address::new_id(miner_id), 127 | epoch, 128 | fault_type: FromPrimitive::from_i64(fault_type) 129 | .context("invalid fault type")?, 130 | }), 131 | }, 132 | gas_used, 133 | )), 134 | r @ 1.. => panic!("invalid return value from has: {}", r), 135 | x if x == FvmError::InvalidHandle as i32 => { 136 | panic!("extern {} not registered", self.handle) 137 | } 138 | e => Err(anyhow!( 139 | "cgo extern 'verify_consensus_fault' failed with error code {}", 140 | e 141 | )), 142 | } 143 | } 144 | } 145 | } 146 | 147 | impl Consensus3 for CgoExterns { 148 | fn verify_consensus_fault( 149 | &self, 150 | h1: &[u8], 151 | h2: &[u8], 152 | extra: &[u8], 153 | ) -> anyhow::Result<(Option, i64)> { 154 | let res = Consensus4::verify_consensus_fault(self, h1, h2, extra); 155 | match res { 156 | Ok((Some(res), x)) => Ok(( 157 | Some(ConsensusFault3 { 158 | target: Address3::from_bytes(&res.target.to_bytes()).unwrap(), 159 | epoch: res.epoch, 160 | fault_type: ConsensusFaultType3::from_u8(res.fault_type as u8).unwrap(), 161 | }), 162 | x, 163 | )), 164 | Ok((None, x)) => Ok((None, x)), 165 | Err(x) => Err(x), 166 | } 167 | } 168 | } 169 | 170 | impl Consensus2 for CgoExterns { 171 | fn verify_consensus_fault( 172 | &self, 173 | h1: &[u8], 174 | h2: &[u8], 175 | extra: &[u8], 176 | ) -> anyhow::Result<(Option, i64)> { 177 | let res = Consensus4::verify_consensus_fault(self, h1, h2, extra); 178 | match res { 179 | Ok((Some(res), x)) => Ok(( 180 | Some(ConsensusFault2 { 181 | target: Address2::from_bytes(&res.target.to_bytes()).unwrap(), 182 | epoch: res.epoch, 183 | fault_type: ConsensusFaultType2::from_u8(res.fault_type as u8).unwrap(), 184 | }), 185 | x, 186 | )), 187 | Ok((None, x)) => Ok((None, x)), 188 | Err(x) => Err(x), 189 | } 190 | } 191 | } 192 | 193 | impl Chain4 for CgoExterns { 194 | fn get_tipset_cid(&self, epoch: ChainEpoch) -> anyhow::Result { 195 | unsafe { 196 | let mut buf = [0; fvm4_shared::MAX_CID_LEN]; 197 | match cgo_extern_get_tipset_cid(self.handle, epoch, buf.as_mut_ptr(), buf.len() as i32) 198 | { 199 | 0 => Ok(buf[..].try_into()?), 200 | r @ 1.. => panic!("invalid return value from has: {}", r), 201 | x if x == FvmError::InvalidHandle as i32 => { 202 | panic!("extern {} not registered", self.handle) 203 | } 204 | e => Err(anyhow!( 205 | "cgo extern 'get_tipset_cid' failed with error code {}", 206 | e 207 | )), 208 | } 209 | } 210 | } 211 | } 212 | 213 | impl Chain3 for CgoExterns { 214 | fn get_tipset_cid(&self, epoch: ChainEpoch) -> anyhow::Result { 215 | Chain4::get_tipset_cid(self, epoch) 216 | } 217 | } 218 | 219 | impl Externs4 for CgoExterns {} 220 | impl Externs3 for CgoExterns {} 221 | impl Externs2 for CgoExterns {} 222 | -------------------------------------------------------------------------------- /mkreleaselog: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # Note: This script is a modified version of the mkreleaselog script used by 4 | # the go-ipfs team. 5 | # 6 | # Usage: ./mkreleaselog v0.25.0 v0.26.0 > /tmp/release.log 7 | 8 | set -euo pipefail 9 | export GO111MODULE=on 10 | export GOPATH="$(go env GOPATH)" 11 | 12 | alias jq="jq --unbuffered" 13 | 14 | REPO_SUFFIXES_TO_STRIP=( 15 | "/v2" 16 | "/v3" 17 | "/v4" 18 | "/v5" 19 | "/v6" 20 | ) 21 | 22 | AUTHORS=( 23 | # orgs 24 | filecoin-project 25 | 26 | # Authors of personal repos used by filecoin-ffi that should be mentioned in the 27 | # release notes. 28 | xlab 29 | ) 30 | 31 | [[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})" 32 | 33 | [[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci\)$' 34 | 35 | NL=$'\n' 36 | 37 | msg() { 38 | echo "$*" >&2 39 | } 40 | 41 | statlog() { 42 | rpath="$GOPATH/src/$1" 43 | for s in $REPO_SUFFIXES_TO_STRIP; do 44 | rpath=${rpath%$s} 45 | done 46 | 47 | start="${2:-}" 48 | end="${3:-HEAD}" 49 | 50 | git -C "$rpath" log --shortstat --no-merges --pretty="tformat:%H%n%aN%n%aE" "$start..$end" | while 51 | read hash 52 | read name 53 | read email 54 | read _ # empty line 55 | read changes 56 | do 57 | changed=0 58 | insertions=0 59 | deletions=0 60 | while read count event; do 61 | if [[ "$event" =~ ^file ]]; then 62 | changed=$count 63 | elif [[ "$event" =~ ^insertion ]]; then 64 | insertions=$count 65 | elif [[ "$event" =~ ^deletion ]]; then 66 | deletions=$count 67 | else 68 | echo "unknown event $event" >&2 69 | exit 1 70 | fi 71 | done<<<"${changes//,/$NL}" 72 | 73 | jq -n \ 74 | --arg "hash" "$hash" \ 75 | --arg "name" "$name" \ 76 | --arg "email" "$email" \ 77 | --argjson "changed" "$changed" \ 78 | --argjson "insertions" "$insertions" \ 79 | --argjson "deletions" "$deletions" \ 80 | '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}' 81 | done 82 | } 83 | 84 | # Returns a stream of deps changed between $1 and $2. 85 | dep_changes() { 86 | { 87 | <"$1" 88 | <"$2" 89 | } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' 90 | } 91 | 92 | # resolve_commits resolves a git ref for each version. 93 | resolve_commits() { 94 | jq '. + {Ref: (.Version|capture("^((?.*)\\+incompatible|v.*-(0\\.)?[0-9]{14}-(?[a-f0-9]{12})|(?v.*))$") | .ref1 // .ref2 // .ref3)}' 95 | } 96 | 97 | pr_link() { 98 | local repo="$1" 99 | local prnum="$2" 100 | local ghname="${repo##github.com/}" 101 | printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum" 102 | } 103 | 104 | # Generate a release log for a range of commits in a single repo. 105 | release_log() { 106 | setopt local_options BASH_REMATCH 107 | 108 | local repo="$1" 109 | local start="$2" 110 | local end="${3:-HEAD}" 111 | local dir="$GOPATH/src/$repo" 112 | 113 | local commit pr 114 | git -C "$dir" log \ 115 | --format='tformat:%H %s' \ 116 | --first-parent \ 117 | "$start..$end" | 118 | while read commit subject; do 119 | # Skip gx-only PRs. 120 | git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" | 121 | grep -v "${IGNORED_FILES}" >/dev/null || continue 122 | 123 | if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then 124 | local prnum="${BASH_REMATCH[2]}" 125 | local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" 126 | printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" 127 | elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then 128 | local prnum="${BASH_REMATCH[2]}" 129 | printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" 130 | else 131 | printf -- "- %s\n" "$subject" 132 | fi 133 | done 134 | } 135 | 136 | indent() { 137 | sed -e 's/^/ /' 138 | } 139 | 140 | mod_deps() { 141 | go list -json -m all | jq 'select(.Version != null)' 142 | } 143 | 144 | ensure() { 145 | local repo="$1" 146 | for s in $REPO_SUFFIXES_TO_STRIP; do 147 | repo=${repo%$s} 148 | done 149 | 150 | local commit="$2" 151 | 152 | local rpath="$GOPATH/src/$repo" 153 | if [[ ! -d "$rpath" ]]; then 154 | msg "Cloning $repo..." 155 | git clone "http://$repo" "$rpath" >&2 156 | fi 157 | 158 | if ! git -C "$rpath" rev-parse --verify "$commit" >/dev/null; then 159 | msg "Fetching $repo..." 160 | git -C "$rpath" fetch --all >&2 161 | fi 162 | 163 | git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1 164 | } 165 | 166 | statsummary() { 167 | jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' | 168 | jq '. + {Lines: (.Deletions + .Insertions)}' 169 | } 170 | 171 | recursive_release_log() { 172 | local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}" 173 | local end="${2:-$(git rev-parse HEAD)}" 174 | local repo_root="$(git rev-parse --show-toplevel)" 175 | local package="$(cd "$repo_root" && go list)" 176 | 177 | if ! [[ "${GOPATH}/${package}" != "${repo_root}" ]]; then 178 | echo "This script requires the target package and all dependencies to live in a GOPATH." 179 | return 1 180 | fi 181 | 182 | ( 183 | local result=0 184 | local workspace="$(mktemp -d)" 185 | trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT 186 | cd "$workspace" 187 | 188 | echo "Computing old deps..." >&2 189 | git -C "$repo_root" show "$start:go.mod" >go.mod 190 | mod_deps | resolve_commits | jq -s > old_deps.json 191 | 192 | echo "Computing new deps..." >&2 193 | git -C "$repo_root" show "$end:go.mod" >go.mod 194 | mod_deps | resolve_commits | jq -s > new_deps.json 195 | 196 | rm -f go.mod go.sum 197 | 198 | printf -- "Generating Changelog for %s %s..%s\n" "$package" "$start" "$end" >&2 199 | 200 | printf -- "- %s:\n" "$package" 201 | release_log "$package" "$start" "$end" | indent 202 | 203 | statlog "$package" "$start" "$end" > statlog.json 204 | 205 | dep_changes old_deps.json new_deps.json | 206 | jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' | 207 | # Compute changelogs 208 | jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' | 209 | while read repo new new_ref old old_ref; do 210 | for s in $REPO_SUFFIXES_TO_STRIP; do 211 | repo=${repo%$s} 212 | done 213 | 214 | if ! ensure "$repo" "$new_ref"; then 215 | result=1 216 | local changelog="failed to fetch repo" 217 | else 218 | statlog "$repo" "$old_ref" "$new_ref" >> statlog.json 219 | local changelog="$(release_log "$repo" "$old_ref" "$new_ref")" 220 | fi 221 | if [[ -n "$changelog" ]]; then 222 | printf -- "- %s (%s -> %s):\n" "$repo" "$old" "$new" 223 | echo "$changelog" | indent 224 | fi 225 | done 226 | 227 | echo 228 | echo "Contributors" 229 | echo 230 | 231 | echo "| Contributor | Commits | Lines ± | Files Changed |" 232 | echo "|-------------|---------|---------|---------------|" 233 | statsummary {{ 28 | match $res { 29 | Ok(res) => res, 30 | Err(_) => return $val, 31 | } 32 | }}; 33 | } 34 | 35 | #[ffi_export] 36 | fn destroy_box_bls_digest(ptr: repr_c::Box) { 37 | drop(ptr); 38 | } 39 | 40 | #[ffi_export] 41 | fn destroy_box_bls_private_key(ptr: repr_c::Box) { 42 | drop(ptr); 43 | } 44 | 45 | #[ffi_export] 46 | fn destroy_box_bls_public_key(ptr: repr_c::Box) { 47 | drop(ptr); 48 | } 49 | #[ffi_export] 50 | fn destroy_box_bls_signature(ptr: repr_c::Box) { 51 | drop(ptr); 52 | } 53 | 54 | /// Compute the digest of a message 55 | /// 56 | /// # Arguments 57 | /// 58 | /// * `message` - reference to a message byte array 59 | #[ffi_export] 60 | pub fn hash(message: c_slice::Ref) -> repr_c::Box { 61 | // call method 62 | let raw_digest = hash_sig(&message).to_bytes(); 63 | let digest: [u8; DIGEST_BYTES] = raw_digest.as_ref().try_into().expect("known size"); 64 | 65 | repr_c::Box::new(digest) 66 | } 67 | 68 | /// Aggregate signatures together into a new signature 69 | /// 70 | /// # Arguments 71 | /// 72 | /// * `flattened_signatures` - byte array containing signatures 73 | /// 74 | /// Returns `None` on error. Result must be freed using `destroy_aggregate_response`. 75 | #[ffi_export] 76 | pub fn aggregate(flattened_signatures: c_slice::Ref) -> Option> { 77 | // prep request 78 | let signatures = try_ffi!( 79 | flattened_signatures 80 | .par_chunks(SIGNATURE_BYTES) 81 | .map(|item| { Signature::from_bytes(item) }) 82 | .collect::, _>>(), 83 | None 84 | ); 85 | 86 | let mut signature: [u8; SIGNATURE_BYTES] = [0; SIGNATURE_BYTES]; 87 | 88 | let aggregated = try_ffi!(aggregate_sig(&signatures), None); 89 | aggregated 90 | .write_bytes(&mut signature.as_mut()) 91 | .expect("preallocated"); 92 | 93 | Some(repr_c::Box::new(signature)) 94 | } 95 | 96 | /// Verify that a signature is the aggregated signature of hashes - pubkeys 97 | /// 98 | /// # Arguments 99 | /// 100 | /// * `signature` - signature byte array (SIGNATURE_BYTES long) 101 | /// * `flattened_digests` - byte array containing digests 102 | /// * `flattened_public_keys` - byte array containing public keys 103 | #[ffi_export] 104 | pub fn verify( 105 | signature: c_slice::Ref, 106 | flattened_digests: c_slice::Ref, 107 | flattened_public_keys: c_slice::Ref, 108 | ) -> bool { 109 | // prep request 110 | let signature = try_ffi!(Signature::from_bytes(&signature), false); 111 | 112 | if flattened_digests.len() % DIGEST_BYTES != 0 { 113 | return false; 114 | } 115 | if flattened_public_keys.len() % PUBLIC_KEY_BYTES != 0 { 116 | return false; 117 | } 118 | 119 | if flattened_digests.len() / DIGEST_BYTES != flattened_public_keys.len() / PUBLIC_KEY_BYTES { 120 | return false; 121 | } 122 | 123 | let digests: Vec<_> = try_ffi!( 124 | flattened_digests 125 | .par_chunks(DIGEST_BYTES) 126 | .map(|item: &[u8]| { 127 | let mut digest = [0u8; DIGEST_BYTES]; 128 | digest.as_mut().copy_from_slice(item); 129 | 130 | let affine: Option = Option::from(G2Affine::from_compressed(&digest)); 131 | affine.map(Into::into).ok_or(Error::CurveDecode) 132 | }) 133 | .collect::, Error>>(), 134 | false 135 | ); 136 | 137 | let public_keys: Vec<_> = try_ffi!( 138 | flattened_public_keys 139 | .par_chunks(PUBLIC_KEY_BYTES) 140 | .map(|item| { PublicKey::from_bytes(item) }) 141 | .collect::>(), 142 | false 143 | ); 144 | 145 | verify_sig(&signature, digests.as_slice(), public_keys.as_slice()) 146 | } 147 | 148 | /// Verify that a signature is the aggregated signature of the hashed messages 149 | /// 150 | /// # Arguments 151 | /// 152 | /// * `signature` - signature byte array (SIGNATURE_BYTES long) 153 | /// * `messages` - array containing the pointers to the messages 154 | /// * `messages_sizes` - array containing the lengths of the messages 155 | /// * `messages_len` - length of the two messages arrays 156 | /// * `flattened_public_keys` - byte array containing public keys 157 | #[ffi_export] 158 | pub fn hash_verify( 159 | signature: c_slice::Ref, 160 | flattened_messages: c_slice::Ref, 161 | message_sizes: c_slice::Ref, 162 | flattened_public_keys: c_slice::Ref, 163 | ) -> bool { 164 | // prep request 165 | let signature = try_ffi!(Signature::from_bytes(&signature), false); 166 | 167 | // split the flattened message array into slices of individual messages to be hashed 168 | let mut messages: Vec<&[u8]> = Vec::with_capacity(message_sizes.len()); 169 | let mut offset = 0; 170 | for chunk_size in message_sizes.iter() { 171 | messages.push(&flattened_messages[offset..offset + *chunk_size]); 172 | offset += *chunk_size 173 | } 174 | 175 | if flattened_public_keys.len() % PUBLIC_KEY_BYTES != 0 { 176 | return false; 177 | } 178 | 179 | let public_keys: Vec<_> = try_ffi!( 180 | flattened_public_keys 181 | .par_chunks(PUBLIC_KEY_BYTES) 182 | .map(|item| { PublicKey::from_bytes(item) }) 183 | .collect::>(), 184 | false 185 | ); 186 | 187 | verify_messages_sig(&signature, &messages, &public_keys) 188 | } 189 | 190 | /// Generate a new private key 191 | #[ffi_export] 192 | pub fn private_key_generate() -> repr_c::Box { 193 | let mut raw_private_key: [u8; PRIVATE_KEY_BYTES] = [0; PRIVATE_KEY_BYTES]; 194 | PrivateKey::generate(&mut OsRng) 195 | .write_bytes(&mut raw_private_key.as_mut()) 196 | .expect("preallocated"); 197 | 198 | repr_c::Box::new(raw_private_key) 199 | } 200 | 201 | /// Generate a new private key with seed 202 | /// 203 | /// **Warning**: Use this function only for testing or with very secure seeds 204 | /// 205 | /// # Arguments 206 | /// 207 | /// * `raw_seed` - a seed byte array with 32 bytes 208 | #[ffi_export] 209 | pub fn private_key_generate_with_seed(raw_seed: &[u8; 32]) -> repr_c::Box { 210 | let rng = &mut ChaChaRng::from_seed(*raw_seed); 211 | 212 | let mut raw_private_key: [u8; PRIVATE_KEY_BYTES] = [0; PRIVATE_KEY_BYTES]; 213 | PrivateKey::generate(rng) 214 | .write_bytes(&mut raw_private_key.as_mut()) 215 | .expect("preallocated"); 216 | 217 | repr_c::Box::new(raw_private_key) 218 | } 219 | 220 | /// Sign a message with a private key and return the signature 221 | /// 222 | /// # Arguments 223 | /// 224 | /// * `raw_private_key` - private key byte array 225 | /// * `message` - message byte array 226 | /// 227 | /// Returns `None` when passed invalid arguments. 228 | #[ffi_export] 229 | pub fn private_key_sign( 230 | raw_private_key: c_slice::Ref, 231 | message: c_slice::Ref, 232 | ) -> Option> { 233 | let private_key = try_ffi!(PrivateKey::from_bytes(&raw_private_key), None); 234 | 235 | let mut raw_signature: [u8; SIGNATURE_BYTES] = [0; SIGNATURE_BYTES]; 236 | PrivateKey::sign(&private_key, &message[..]) 237 | .write_bytes(&mut raw_signature.as_mut()) 238 | .expect("preallocated"); 239 | 240 | Some(repr_c::Box::new(raw_signature)) 241 | } 242 | 243 | /// Generate the public key for a private key 244 | /// 245 | /// # Arguments 246 | /// 247 | /// * `raw_private_key` - private key byte array 248 | /// 249 | /// Returns `None` when passed invalid arguments. 250 | #[ffi_export] 251 | pub fn private_key_public_key( 252 | raw_private_key: c_slice::Ref, 253 | ) -> Option> { 254 | let private_key = try_ffi!(PrivateKey::from_bytes(&raw_private_key), None); 255 | 256 | let mut raw_public_key: [u8; PUBLIC_KEY_BYTES] = [0; PUBLIC_KEY_BYTES]; 257 | private_key 258 | .public_key() 259 | .write_bytes(&mut raw_public_key.as_mut()) 260 | .expect("preallocated"); 261 | 262 | Some(repr_c::Box::new(raw_public_key)) 263 | } 264 | 265 | /// Returns a zero signature, used as placeholder in Filecoin. 266 | /// 267 | /// The return value is a pointer to a compressed signature in bytes, of length `SIGNATURE_BYTES` 268 | #[ffi_export] 269 | pub fn create_zero_signature() -> repr_c::Box { 270 | let sig: Signature = G2Affine::identity().into(); 271 | 272 | let mut raw_signature: [u8; SIGNATURE_BYTES] = [0; SIGNATURE_BYTES]; 273 | 274 | sig.write_bytes(&mut raw_signature.as_mut()) 275 | .expect("preallocated"); 276 | 277 | repr_c::Box::new(raw_signature) 278 | } 279 | 280 | #[cfg(test)] 281 | mod tests { 282 | use super::*; 283 | 284 | #[test] 285 | fn key_verification() { 286 | let private_key = private_key_generate(); 287 | let public_key = private_key_public_key(private_key[..].into()).unwrap(); 288 | let message = b"hello world"; 289 | let digest = hash(message[..].into()); 290 | let signature = private_key_sign(private_key[..].into(), message[..].into()).unwrap(); 291 | let verified = verify( 292 | signature[..].into(), 293 | digest[..].into(), 294 | public_key[..].into(), 295 | ); 296 | 297 | assert!(verified); 298 | 299 | let message_sizes = vec![message.len()]; 300 | let flattened_messages = message; 301 | 302 | let verified = hash_verify( 303 | signature[..].into(), 304 | flattened_messages[..].into(), 305 | message_sizes[..].into(), 306 | public_key[..].into(), 307 | ); 308 | 309 | assert!(verified); 310 | 311 | let different_message = b"bye world"; 312 | let different_digest = hash(different_message[..].into()); 313 | let not_verified = verify( 314 | signature[..].into(), 315 | different_digest[..].into(), 316 | public_key[..].into(), 317 | ); 318 | 319 | assert!(!not_verified); 320 | 321 | // garbage verification 322 | let different_digest = vec![0, 1, 2, 3, 4]; 323 | let not_verified = verify( 324 | signature[..].into(), 325 | different_digest[..].into(), 326 | public_key[..].into(), 327 | ); 328 | 329 | assert!(!not_verified); 330 | } 331 | 332 | #[test] 333 | fn private_key_with_seed() { 334 | let seed = [5u8; 32]; 335 | let private_key = private_key_generate_with_seed(&seed); 336 | assert_eq!( 337 | &[ 338 | 56, 13, 181, 159, 37, 1, 12, 96, 45, 77, 254, 118, 103, 235, 218, 176, 220, 241, 339 | 142, 119, 206, 233, 83, 35, 26, 15, 118, 198, 192, 120, 179, 52 340 | ], 341 | &private_key[..], 342 | ); 343 | } 344 | 345 | #[test] 346 | fn test_zero_key() { 347 | let resp = create_zero_signature(); 348 | let sig = Signature::from_bytes(&(*resp)).unwrap(); 349 | 350 | assert_eq!(sig, Signature::from(G2Affine::identity())); 351 | } 352 | } 353 | -------------------------------------------------------------------------------- /install-filcrypto: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=SC2155 enable=require-variable-braces 3 | 4 | set -Exeo pipefail 5 | auth_header=() 6 | if [ -n "${GITHUB_TOKEN}" ]; then 7 | auth_header=("-H" "Authorization: token ${GITHUB_TOKEN}") 8 | fi 9 | 10 | # set CWD to the root of filecoin-ffi 11 | # 12 | cd "$(dirname "${BASH_SOURCE[0]}")" 13 | 14 | # tracks where the Rust sources are were we to build locally instead of 15 | # downloading from GitHub Releases 16 | # 17 | rust_sources_dir="rust" 18 | 19 | # an array of values passed as 'target-feature' to the Rust compiler if we're 20 | # building an optimized libfilcrypto (which takes advantage of some perf-boosting 21 | # instruction sets) 22 | # 23 | #optimized_release_rustc_target_features=$(jq -r '.[].rustc_target_feature' < "${rust_sources_dir}/rustc-target-features-optimized.json") 24 | 25 | # each value in this area is checked against the "features" of the hosts CPU 26 | # in order to determine if the host is suitable for an optimized release 27 | # 28 | cpu_features_required_for_optimized_release=$(jq -r '.[].check_cpu_for_feature | select(. != null)' < "${rust_sources_dir}/rustc-target-features-optimized.json") 29 | 30 | main() { 31 | local __release_flags=$(get_release_flags) 32 | if [ "${FFI_BUILD_FROM_SOURCE}" != "1" ] && download_release_tarball __tarball_path "${rust_sources_dir}" "filecoin-ffi" "${__release_flags}"; then 33 | local __tmp_dir=$(mktemp -d) 34 | 35 | # silence shellcheck warning as the assignment happened in 36 | # `download_release_tarball()` 37 | # shellcheck disable=SC2154 38 | # extract downloaded tarball to temporary directory 39 | # 40 | tar -C "${__tmp_dir}" -xzf "${__tarball_path}" 41 | 42 | # copy build assets into root of filecoin-ffi 43 | # 44 | 45 | find -L "${__tmp_dir}" -type f -name filcrypto.h -exec cp -- "{}" . \; 46 | find -L "${__tmp_dir}" -type f -name libfilcrypto.a -exec cp -- "{}" . \; 47 | find -L "${__tmp_dir}" -type f -name filcrypto.pc -exec cp -- "{}" . \; 48 | 49 | check_installed_files 50 | 51 | (>&2 echo "[install-filcrypto/main] successfully installed prebuilt libfilcrypto") 52 | else 53 | (>&2 echo "[install-filcrypto/main] building libfilcrypto from local sources (dir = ${rust_sources_dir})") 54 | 55 | # build libfilcrypto (and corresponding header and pkg-config) 56 | # 57 | build_from_source "${rust_sources_dir}" "${__release_flags}" 58 | 59 | # copy from Rust's build directory (target) to root of filecoin-ffi 60 | # 61 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 62 | find -L "${rust_sources_dir}/target/universal/release" -type f -name libfilcrypto.a -exec cp -- "{}" . \; 63 | else 64 | find -L "${rust_sources_dir}/target/release" -type f -name libfilcrypto.a -exec cp -- "{}" . \; 65 | fi 66 | 67 | find -L "${rust_sources_dir}" -type f -name filcrypto.h -exec cp -- "{}" . \; 68 | find -L "${rust_sources_dir}" -type f -name filcrypto.pc -exec cp -- "{}" . \; 69 | 70 | pwd 71 | ls ./*filcrypto* 72 | 73 | check_installed_files 74 | 75 | (>&2 echo "[install-filcrypto/main] successfully built and installed libfilcrypto from source") 76 | fi 77 | } 78 | 79 | download_release_tarball() { 80 | local __resultvar=$1 81 | local __rust_sources_path=$2 82 | local __repo_name=$3 83 | local __release_flags=$4 84 | local __release_sha1=$(git rev-parse HEAD) 85 | local __release_tag="${__release_sha1:0:16}" 86 | local __release_tag_url="https://api.github.com/repos/filecoin-project/${__repo_name}/releases/tags/${__release_tag}" 87 | 88 | # Download the non-optimized standard release. 89 | release_flag_name="standard" 90 | 91 | # TODO: This function shouldn't make assumptions about how these releases' 92 | # names are constructed. Marginally less-bad would be to require that this 93 | # function's caller provide the release name. 94 | # 95 | if [ "$(uname -s)" = "Darwin" ]; then 96 | # For MacOS a universal library is used so naming convention is different 97 | local __release_name="${__repo_name}-$(uname)-${release_flag_name}" 98 | else 99 | local __release_name="${__repo_name}-$(uname)-$(uname -m)-${release_flag_name}" 100 | fi 101 | 102 | (>&2 echo "[download_release_tarball] acquiring release @ ${__release_tag}") 103 | 104 | local __release_response=$(curl "${auth_header[@]}" \ 105 | --retry 3 \ 106 | --location "${__release_tag_url}") 107 | 108 | local __release_url=$(echo "${__release_response}" | jq -r ".assets[] | select(.name | contains(\"${__release_name}\")) | .url") 109 | 110 | local __tar_path="/tmp/${__release_name}_$(basename "${__release_url}").tar.gz" 111 | 112 | if [[ -z "${__release_url}" ]]; then 113 | (>&2 echo "[download_release_tarball] failed to download release (tag URL: ${__release_tag_url}, response: ${__release_response})") 114 | return 1 115 | fi 116 | 117 | local __asset_url=$(curl "${auth_header[@]}" \ 118 | --head \ 119 | --retry 3 \ 120 | --header "Accept:application/octet-stream" \ 121 | --location \ 122 | --output /dev/null \ 123 | -w "%{url_effective}" \ 124 | "${__release_url}") 125 | 126 | if ! curl "${auth_header[@]}" --retry 3 --output "${__tar_path}" "${__asset_url}"; then 127 | (>&2 echo "[download_release_tarball] failed to download release asset (tag URL: ${__release_tag_url}, asset URL: ${__asset_url})") 128 | return 1 129 | fi 130 | 131 | # set $__resultvar (which the caller provided as $1), which is the poor 132 | # man's way of returning a value from a function in Bash 133 | # 134 | eval "${__resultvar}='${__tar_path}'" 135 | } 136 | 137 | build_from_source() { 138 | local __rust_sources_path=$1 139 | local __release_flags=$2 140 | local __repo_sha1=${FFI_GIT_COMMIT:-$(git rev-parse HEAD)} 141 | local __repo_sha1_truncated="${__repo_sha1:0:16}" 142 | 143 | (>&2 echo "building from source @ ${__repo_sha1_truncated}") 144 | 145 | if ! [ -x "$(command -v cargo)" ]; then 146 | (>&2 echo '[build_from_source] Error: cargo is not installed.') 147 | (>&2 echo '[build_from_source] install Rust toolchain to resolve this problem.') 148 | exit 1 149 | fi 150 | 151 | if ! [ -x "$(command -v rustup)" ]; then 152 | (>&2 echo '[build_from_source] Error: rustup is not installed.') 153 | (>&2 echo '[build_from_source] install Rust toolchain installer to resolve this problem.') 154 | exit 1 155 | fi 156 | 157 | pushd "${__rust_sources_path}" 158 | 159 | cargo --version 160 | 161 | additional_flags="" 162 | # For building on Darwin, we try to use cargo-lipo instead of cargo build. 163 | # Note that the cross compile works on x86_64 for m1, but doesn't work on m1. 164 | # For m1, we will build natively if building from source. 165 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 166 | # Rustup only installs the correct versions for the current 167 | # architecture you're on. As we cross-compile to aarch64, we need to 168 | # make sure that toolchain is installes as well. 169 | rustup target add aarch64-apple-darwin 170 | build="lipo" 171 | additional_flags="--targets x86_64-apple-darwin,aarch64-apple-darwin " 172 | else 173 | build="build" 174 | fi 175 | 176 | # Check if GPU support is disabled. 177 | if [ "${FFI_USE_GPU}" == "0" ]; then 178 | gpu_flags="" 179 | elif [ "${FFI_USE_CUDA_SUPRASEAL}" == "1" ]; then 180 | # If SupraSeal is enabled, just use the `cuda-supraseal` eature and 181 | # nothing else GPU related. 182 | gpu_flags=",cuda-supraseal" 183 | else 184 | # If GPUs are enabled and SupraSeal is not, default to CUDA support 185 | # where possible. 186 | # First ensure that nvcc (as part of the CUDA toolkit) is available -- 187 | # if it's not warn that we are defaulting GPU to OpenCL instead. 188 | gpu_flags=",cuda" 189 | 190 | # Unless OpenCL support is specified or we're building on Darwin. 191 | if [ "${FFI_USE_OPENCL}" == "1" ] || [ "$(uname -s)" = "Darwin" ]; then 192 | gpu_flags=",opencl" 193 | else 194 | if [ ! "$(command -v nvcc)" ]; then 195 | echo "WARNING: Cannot find nvcc for CUDA support." 196 | echo "WARNING: For CUDA support, please ensure that the CUDA toolkit is properly installed." 197 | echo "WARNING: After installation, nvcc must be in the system path." 198 | echo "" 199 | echo "WARNING: Defaulting to OpenCL GPU support(!!!)" 200 | gpu_flags=",opencl" 201 | fi 202 | fi 203 | fi 204 | 205 | # Default to use multicore_sdr flags, unless specified to disable 206 | use_multicore_sdr="multicore-sdr" 207 | if [ "${FFI_USE_MULTICORE_SDR}" == "0" ]; then 208 | use_multicore_sdr="" 209 | fi 210 | 211 | # By default the number or rows to discard of the TreeRLast can be set via 212 | # `FIL_PROOFS_ROWS_TO_DISCARD`. When SupraSeal PC2 is used, then this 213 | # number is fixed. 214 | use_fixed_rows_to_discard="" 215 | if [ "${FFI_USE_FIXED_ROWS_TO_DISCARD}" == "1" ]; then 216 | use_fixed_rows_to_discard=",fixed-rows-to-discard" 217 | fi 218 | 219 | # Add feature specific rust flags as needed here. 220 | if [ "${FFI_USE_BLST_PORTABLE}" == "1" ]; then 221 | additional_flags="${additional_flags} --no-default-features --features ${use_multicore_sdr},blst-portable${gpu_flags}${use_fixed_rows_to_discard}" 222 | else 223 | additional_flags="${additional_flags} --no-default-features --features ${use_multicore_sdr}${gpu_flags}${use_fixed_rows_to_discard}" 224 | fi 225 | 226 | echo "Using additional build flags: ${additional_flags}" 227 | if [ -n "${__release_flags}" ]; then 228 | RUSTFLAGS="-C target-feature=${__release_flags}" ./scripts/build-release.sh ${build} "${additional_flags}" 229 | else 230 | ./scripts/build-release.sh ${build} "${additional_flags}" 231 | fi 232 | 233 | popd 234 | } 235 | 236 | get_release_flags() { 237 | local __features="" 238 | 239 | # determine where to look for CPU features 240 | # 241 | if [[ ! -f "/proc/cpuinfo" ]]; then 242 | (>&2 echo "[get_release_flags] no /proc/cpuinfo file; falling back to Darwin feature detection") 243 | __features=$(sysctl -a | grep machdep.cpu | tr '[:upper:]' '[:lower:]' | grep features) 244 | else 245 | #aarch64_uname=$(uname -a | grep aarch64) 246 | x86_64_uname=$(uname -a | grep x86_64) 247 | # shellcheck disable=SC2002 248 | if [ -n "${x86_64_uname}" ]; then 249 | __features=$(cat /proc/cpuinfo | grep flags | head -n 1) 250 | else 251 | # For now we assume aarch64. If another supported platform is added, explicitly check for it 252 | __features=$(cat /proc/cpuinfo | grep Features | head -n 1) 253 | fi 254 | fi 255 | 256 | # Maps cpu flag to rust flags (related to entries in rust/rustc-target-features-optimized.json) 257 | if [ "$(uname -s)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then 258 | feature_map=("adx:+adx" "sha_ni:+sha" "sha2:+sha2" "avx2:+avx2" "sse4_2:+sse4.2" "sse4_1:+sse4.1") 259 | else 260 | feature_map=("adx:+adx" "sha_ni:+sha" "sha2:+sha2" "sse2:+sse2" "avx2:+avx2" "avx:+avx" "sse4_2:+sse4.2" "sse4_1:+sse4.1") 261 | fi 262 | 263 | target_features="" 264 | # check for the presence of each required CPU feature 265 | # 266 | # shellcheck disable=SC2068 # the splitting is intentional 267 | for x in ${cpu_features_required_for_optimized_release[@]}; do 268 | current_feature=$(echo "${__features}" | grep -c "${x}") 269 | if [ "1" = "${current_feature}" ]; then 270 | for feature in "${feature_map[@]}"; do 271 | key=${feature%%:*} 272 | if [ "${key}" == "${x}" ]; then 273 | val=${feature#*:} 274 | if [ -z "${target_features}" ]; then 275 | target_features="${val}" 276 | else 277 | target_features="${target_features},${val}" 278 | fi 279 | fi 280 | done 281 | fi 282 | done 283 | 284 | echo "${target_features}" 285 | } 286 | 287 | check_installed_files() { 288 | pwd 289 | ls ./*filcrypto* 290 | 291 | if [[ ! -f "./filcrypto.h" ]]; then 292 | (>&2 echo "[check_installed_files] failed to install filcrypto.h") 293 | exit 1 294 | fi 295 | 296 | if [[ ! -f "./libfilcrypto.a" ]]; then 297 | (>&2 echo "[check_installed_files] failed to install libfilcrypto.a") 298 | exit 1 299 | fi 300 | 301 | if [[ ! -f "./filcrypto.pc" ]]; then 302 | (>&2 echo "[check_installed_files] failed to install filcrypto.pc") 303 | exit 1 304 | fi 305 | } 306 | 307 | main "$@"; exit 308 | -------------------------------------------------------------------------------- /parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": { 3 | "cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q", 4 | "digest": "c3ad7bb549470b82ad52ed070aebb4f4", 5 | "sector_size": 536870912 6 | }, 7 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": { 8 | "cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv", 9 | "digest": "994c5b7d450ca9da348c910689f2dc7f", 10 | "sector_size": 536870912 11 | }, 12 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": { 13 | "cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S", 14 | "digest": "5aedd2cf3e5c0a15623d56a1b43110ad", 15 | "sector_size": 8388608 16 | }, 17 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": { 18 | "cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i", 19 | "digest": "abd80269054d391a734febdac0d2e687", 20 | "sector_size": 8388608 21 | }, 22 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": { 23 | "cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9", 24 | "digest": "311f92a3e75036ced01b1c0025f1fa0c", 25 | "sector_size": 2048 26 | }, 27 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": { 28 | "cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P", 29 | "digest": "eadad9784969890d30f2749708c79771", 30 | "sector_size": 2048 31 | }, 32 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": { 33 | "cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS", 34 | "digest": "1b3cfd761a961543f9eb273e435a06a2", 35 | "sector_size": 34359738368 36 | }, 37 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": { 38 | "cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN", 39 | "digest": "3a6941983754737fde880d29c7094905", 40 | "sector_size": 34359738368 41 | }, 42 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": { 43 | "cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp", 44 | "digest": "1a392e7b759fb18e036c7559b5ece816", 45 | "sector_size": 68719476736 46 | }, 47 | "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": { 48 | "cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg", 49 | "digest": "80e366df2f1011953c2d01c7b7c9ee8e", 50 | "sector_size": 68719476736 51 | }, 52 | "v28-empty-sector-update-poseidon-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": { 53 | "cid": "QmfK4tonETepL6F4kDFdJ3fr72fzRWoRPf3XGMhV3RLX1S", 54 | "digest": "b69983b5d7a97a20f43b3d5ff2a4ed04", 55 | "sector_size": 34359738368 56 | }, 57 | "v28-empty-sector-update-poseidon-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": { 58 | "cid": "QmYCTYJQPu8wgtB2rMZ7HJC9nDx8c1fzYRPdCUiErK4q5a", 59 | "digest": "1ac05784f304129f74c5184190c1ec78", 60 | "sector_size": 34359738368 61 | }, 62 | "v28-empty-sector-update-poseidon-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": { 63 | "cid": "QmNaaQXfm2NveN2Hf7bJ3udnQB2Qa4moMcUoJYJS6oWL6w", 64 | "digest": "a6d4f96e2b641a6d7a1a8e6dc1155c8a", 65 | "sector_size": 68719476736 66 | }, 67 | "v28-empty-sector-update-poseidon-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": { 68 | "cid": "QmXyeg9hbM7x9dGuUuAS68ozhiFEez4UkPTgwSDCVYKHBw", 69 | "digest": "8e8fb9e2c56eb5d740d0de135305a7b8", 70 | "sector_size": 68719476736 71 | }, 72 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { 73 | "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", 74 | "digest": "7610b9f82bfc88405b7a832b651ce2f6", 75 | "sector_size": 2048 76 | }, 77 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { 78 | "cid": "QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X", 79 | "digest": "0e0958009936b9d5e515ec97b8cb792d", 80 | "sector_size": 2048 81 | }, 82 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { 83 | "cid": "QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR", 84 | "digest": "1a7d4a9c8a502a497ed92a54366af33f", 85 | "sector_size": 536870912 86 | }, 87 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { 88 | "cid": "QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV", 89 | "digest": "4dae975de4f011f101f5a2f86d1daaba", 90 | "sector_size": 536870912 91 | }, 92 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { 93 | "cid": "QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS", 94 | "digest": "82c88066be968bb550a05e30ff6c2413", 95 | "sector_size": 2048 96 | }, 97 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { 98 | "cid": "QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU", 99 | "digest": "ffd79788d614d27919ae5bd2d94eacb6", 100 | "sector_size": 2048 101 | }, 102 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { 103 | "cid": "QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP", 104 | "digest": "700171ecf7334e3199437c930676af82", 105 | "sector_size": 8388608 106 | }, 107 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { 108 | "cid": "QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG", 109 | "digest": "79ebb55f56fda427743e35053edad8fc", 110 | "sector_size": 8388608 111 | }, 112 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { 113 | "cid": "QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx", 114 | "digest": "c49499bb76a0762884896f9683403f55", 115 | "sector_size": 8388608 116 | }, 117 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { 118 | "cid": "QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc", 119 | "digest": "34d4feeacd9abf788d69ef1bb4d8fd00", 120 | "sector_size": 8388608 121 | }, 122 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { 123 | "cid": "QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT", 124 | "digest": "827359440349fe8f5a016e7598993b79", 125 | "sector_size": 536870912 126 | }, 127 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { 128 | "cid": "QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN", 129 | "digest": "bd2cd62f65c1ab84f19ca27e97b7c731", 130 | "sector_size": 536870912 131 | }, 132 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { 133 | "cid": "QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ", 134 | "digest": "2cf49eb26f1fee94c85781a390ddb4c8", 135 | "sector_size": 34359738368 136 | }, 137 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { 138 | "cid": "QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE", 139 | "digest": "0f8ec542485568fa3468c066e9fed82b", 140 | "sector_size": 34359738368 141 | }, 142 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { 143 | "cid": "Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i", 144 | "digest": "d84f79a16fe40e9e25a36e2107bb1ba0", 145 | "sector_size": 34359738368 146 | }, 147 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { 148 | "cid": "QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF", 149 | "digest": "fc02943678dd119e69e7fab8420e8819", 150 | "sector_size": 34359738368 151 | }, 152 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { 153 | "cid": "QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V", 154 | "digest": "3810b7780ac0e299b22ae70f1f94c9bc", 155 | "sector_size": 68719476736 156 | }, 157 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { 158 | "cid": "QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7", 159 | "digest": "59d2bf1857adc59a4f08fcf2afaa916b", 160 | "sector_size": 68719476736 161 | }, 162 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { 163 | "cid": "QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz", 164 | "digest": "2170a91ad5bae22ea61f2ea766630322", 165 | "sector_size": 68719476736 166 | }, 167 | "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { 168 | "cid": "QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm", 169 | "digest": "6d3789148fb6466d07ee1e24d6292fd6", 170 | "sector_size": 68719476736 171 | }, 172 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { 173 | "cid": "QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h", 174 | "digest": "434fb1338ecfaf0f59256f30dde4968f", 175 | "sector_size": 2048 176 | }, 177 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { 178 | "cid": "QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr", 179 | "digest": "dc1ade9929ade1708238f155343044ac", 180 | "sector_size": 2048 181 | }, 182 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { 183 | "cid": "QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC", 184 | "digest": "6c77597eb91ab936c1cef4cf19eba1b3", 185 | "sector_size": 536870912 186 | }, 187 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { 188 | "cid": "QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH", 189 | "digest": "065179da19fbe515507267677f02823e", 190 | "sector_size": 536870912 191 | }, 192 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { 193 | "cid": "QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH", 194 | "digest": "09e612e4eeb7a0eb95679a88404f960c", 195 | "sector_size": 8388608 196 | }, 197 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { 198 | "cid": "QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99", 199 | "digest": "b687beb9adbd9dabe265a7e3620813e4", 200 | "sector_size": 8388608 201 | }, 202 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { 203 | "cid": "QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ", 204 | "digest": "6a388072a518cf46ebd661f5cc46900a", 205 | "sector_size": 34359738368 206 | }, 207 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { 208 | "cid": "Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb", 209 | "digest": "0c7b4aac1c40fdb7eb82bc355b41addf", 210 | "sector_size": 34359738368 211 | }, 212 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { 213 | "cid": "QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX", 214 | "digest": "1801f8a6e1b00bceb00cc27314bb5ce3", 215 | "sector_size": 68719476736 216 | }, 217 | "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { 218 | "cid": "QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN", 219 | "digest": "a89884252c04c298d0b3c81bfd884164", 220 | "sector_size": 68719476736 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | shellcheck: circleci/shellcheck@3.0.0 5 | 6 | executors: 7 | golang: 8 | docker: 9 | - image: cimg/go:1.20 10 | resource_class: small 11 | environment: 12 | # Build the kernel only for the single architecture. This should reduce 13 | # the overall compile-time significantly. 14 | EC_GPU_CUDA_NVCC_ARGS: --fatbin --gpu-architecture=sm_75 --generate-code=arch=compute_75,code=sm_75 15 | BELLMAN_CUDA_NVCC_ARGS: --fatbin --gpu-architecture=sm_75 --generate-code=arch=compute_75,code=sm_75 16 | NEPTUNE_CUDA_NVCC_ARGS: --fatbin --gpu-architecture=sm_75 --generate-code=arch=compute_75,code=sm_75 17 | 18 | jobs: 19 | shellcheck: 20 | description: Lint the install-filcrypto shell script 21 | docker: 22 | - image: 'cimg/base:stable' 23 | resource_class: small 24 | steps: 25 | - checkout 26 | - shellcheck/install 27 | - run: shellcheck ./install-filcrypto 28 | gofmt: 29 | executor: golang 30 | steps: 31 | - checkout 32 | - run: git submodule sync 33 | - run: git submodule update --init 34 | - run: 35 | name: Run go fmt 36 | # `! go fmt ./... 2>&1 | read"` doesn't work, this one does, thanks 37 | # https://carsonip.me/posts/go-fmt-and-ci/ 38 | command: | 39 | output=$(go fmt ./...) 40 | echo "${output}" 41 | test -z "${output}" 42 | 43 | go_lint: 44 | description: Run various linters 45 | executor: golang 46 | resource_class: medium 47 | steps: 48 | - configure_environment_variables 49 | - prepare 50 | - run: 51 | command: make go-lint 52 | 53 | build_and_test_aarch64_linux_cgo_bindings: 54 | parameters: 55 | run_leak_detector: 56 | type: boolean 57 | default: true 58 | machine: 59 | image: ubuntu-2204:2023.07.2 60 | resource_class: arm.large 61 | working_directory: ~/go/src/github.com/filecoin-project/filecoin-ffi 62 | steps: 63 | - configure_environment_variables 64 | - prepare 65 | - run: 66 | name: Make sure libcuda is found by linker 67 | command: sudo ln --symbolic --relative /usr/lib/aarch64-linux-gnu/stubs/libcuda.so /usr/lib/aarch64-linux-gnu/stubs/libcuda.so.1 68 | - build_project 69 | - restore_parameter_cache 70 | - obtain_filecoin_parameters 71 | - save_parameter_cache 72 | - run: cd rust && rustup target add wasm32-unknown-unknown 73 | - run_tests 74 | 75 | build_and_test_linux_cgo_bindings: 76 | parameters: 77 | run_leak_detector: 78 | type: boolean 79 | default: true 80 | executor: golang 81 | resource_class: medium 82 | working_directory: ~/go/src/github.com/filecoin-project/filecoin-ffi 83 | steps: 84 | - configure_environment_variables 85 | - prepare 86 | - build_project 87 | - restore_parameter_cache 88 | - obtain_filecoin_parameters 89 | - save_parameter_cache 90 | - run: cd rust && rustup target add wasm32-unknown-unknown 91 | - run_tests: 92 | run_leak_detector: << parameters.run_leak_detector >> 93 | 94 | build_darwin_cgo_bindings: 95 | macos: 96 | xcode: "12.5.1" 97 | working_directory: ~/go/src/github.com/filecoin-project/filecoin-ffi 98 | resource_class: macos.x86.medium.gen2 99 | steps: 100 | - configure_environment_variables: 101 | linux: false 102 | darwin: true 103 | - prepare: 104 | linux: false 105 | darwin: true 106 | - run: cd rust && cargo fetch 107 | - run: cd rust && cargo install cargo-lipo 108 | - build_project 109 | - compile_tests 110 | 111 | # SupraSeal pulls in a C++ code base, make sure everything compiles properly. 112 | build_linux_supraseal: 113 | executor: golang 114 | resource_class: medium 115 | working_directory: ~/go/src/github.com/filecoin-project/filecoin-ffi 116 | steps: 117 | - configure_environment_variables 118 | - prepare 119 | - run: 120 | name: Build project with `FFI_USE_CUDA_SUPRASEAL=1` 121 | command: FFI_BUILD_FROM_SOURCE=1 FFI_USE_CUDA_SUPRASEAL=1 make 122 | 123 | publish_linux_x86_64_staticlib: 124 | executor: golang 125 | resource_class: medium 126 | steps: 127 | - configure_environment_variables 128 | - prepare 129 | - publish_release 130 | publish_linux_aarch64_staticlib: 131 | machine: 132 | image: ubuntu-2204:current 133 | resource_class: arm.large 134 | steps: 135 | - configure_environment_variables 136 | - prepare 137 | - publish_release 138 | publish_darwin_staticlib: 139 | macos: 140 | xcode: "12.5.1" 141 | resource_class: macos.x86.medium.gen2 142 | steps: 143 | - configure_environment_variables: 144 | linux: false 145 | darwin: true 146 | - prepare: 147 | linux: false 148 | darwin: true 149 | - run: cd rust && rustup target add aarch64-apple-darwin 150 | - run: cd rust && cargo fetch 151 | - run: cd rust && cargo install cargo-lipo 152 | - publish_darwin_release 153 | rustfmt: 154 | docker: 155 | - image: cimg/rust:1.67 156 | resource_class: small 157 | steps: 158 | - checkout 159 | - run: git submodule sync 160 | - run: git submodule update --init 161 | - run: 162 | name: Run cargo fmt 163 | command: cargo fmt --manifest-path ./rust/Cargo.toml --all -- --check 164 | clippy: 165 | executor: golang 166 | steps: 167 | - configure_environment_variables 168 | - prepare 169 | - run: 170 | name: Run cargo clippy 171 | command: cd rust && cargo clippy --all-targets --features blst-portable,opencl -- -D warnings 172 | 173 | workflows: 174 | version: 2 175 | test_all: 176 | jobs: 177 | # Lint the install Bash script 178 | - shellcheck 179 | - rustfmt 180 | - clippy 181 | - gofmt 182 | - go_lint 183 | - build_and_test_linux_cgo_bindings: 184 | run_leak_detector: false 185 | - build_and_test_aarch64_linux_cgo_bindings: 186 | run_leak_detector: false 187 | - build_darwin_cgo_bindings 188 | - build_linux_supraseal 189 | - publish_linux_x86_64_staticlib: 190 | filters: 191 | tags: 192 | only: /^v.*/ 193 | branches: 194 | ignore: /.*/ 195 | - publish_linux_aarch64_staticlib: 196 | filters: 197 | tags: 198 | only: /^v.*/ 199 | branches: 200 | ignore: /.*/ 201 | - publish_darwin_staticlib: 202 | filters: 203 | tags: 204 | only: /^v.*/ 205 | branches: 206 | ignore: /.*/ 207 | 208 | commands: 209 | prepare: 210 | parameters: 211 | linux: 212 | default: true 213 | description: is a linux build environment? 214 | type: boolean 215 | darwin: 216 | default: false 217 | description: is a darwin build environment? 218 | type: boolean 219 | steps: 220 | - checkout 221 | - when: 222 | condition: << parameters.linux >> 223 | steps: 224 | - run: sudo apt-get update 225 | - run: sudo apt-get install --no-install-recommends -y valgrind ocl-icd-opencl-dev libssl-dev libhwloc-dev nvidia-cuda-toolkit g++-10 226 | - run: 227 | name: Downgrade to GCC 10, as CUDA 11 doesn't play nice with GCC 11 228 | command: | 229 | sudo update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-10 10 230 | sudo update-alternatives --set c++ /usr/bin/g++-10 231 | - when: 232 | condition: << parameters.darwin >> 233 | steps: 234 | - run: 235 | name: Install Go 236 | command: | 237 | curl https://dl.google.com/go/go1.20.10.darwin-amd64.pkg -o /tmp/go.pkg && \ 238 | sudo installer -pkg /tmp/go.pkg -target / 239 | go version 240 | - run: 241 | name: Install other dependencies with Homebrew 242 | command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config md5sha1sum jq hwloc || brew link --overwrite python@2 243 | - run: 244 | name: Install Rust toolchain 245 | command: | 246 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain none 247 | - run: git submodule sync 248 | - run: git submodule update --init 249 | 250 | publish_release: 251 | steps: 252 | - run: 253 | name: Build and publish the standard release 254 | command: | 255 | cd rust 256 | 257 | TARBALL_PATH="/tmp/${CIRCLE_PROJECT_REPONAME}-$(uname)-$(uname -m)-standard.tar.gz" 258 | RELEASE_NAME="${CIRCLE_PROJECT_REPONAME}-$(uname)-$(uname -m)-standard" 259 | 260 | # Note: the blst dependency uses the portable configuration for maximum compatibility 261 | ./scripts/build-release.sh build --verbose --no-default-features --features multicore-sdr,opencl,blst-portable 262 | ./scripts/package-release.sh $TARBALL_PATH 263 | ./scripts/publish-release.sh $TARBALL_PATH $RELEASE_NAME 264 | - run: 265 | name: Build the optimized release 266 | command: | 267 | cd rust 268 | 269 | TARBALL_PATH="/tmp/${CIRCLE_PROJECT_REPONAME}-$(uname)-$(uname -m)-optimized.tar.gz" 270 | RUSTFLAGS="-C target-feature=$(cat rustc-target-features-optimized.json | jq -r '.[].rustc_target_feature' | tr '\n' ',')" 271 | 272 | ./scripts/build-release.sh build --verbose --no-default-features --features multicore-sdr,opencl 273 | ./scripts/package-release.sh $TARBALL_PATH 274 | 275 | publish_darwin_release: 276 | steps: 277 | - run: 278 | name: Build and publish the universal standard release 279 | command: | 280 | cd rust 281 | 282 | RELEASE_NAME="${CIRCLE_PROJECT_REPONAME}-$(uname)-standard" 283 | TARBALL_PATH="/tmp/${RELEASE_NAME}.tar.gz" 284 | 285 | # Note: the blst dependency uses the portable configuration for maximum compatibility 286 | ./scripts/build-release.sh lipo --targets x86_64-apple-darwin,aarch64-apple-darwin --verbose --no-default-features --features multicore-sdr,opencl,blst-portable 287 | ./scripts/package-release.sh $TARBALL_PATH 288 | ./scripts/publish-release.sh $TARBALL_PATH $RELEASE_NAME 289 | configure_environment_variables: 290 | parameters: 291 | linux: 292 | default: true 293 | description: is a Linux build environment? 294 | type: boolean 295 | darwin: 296 | default: false 297 | description: is a Darwin build environment? 298 | type: boolean 299 | steps: 300 | - run: 301 | name: Configure environment variables 302 | command: | 303 | echo 'export FIL_PROOFS_PARAMETER_CACHE="${HOME}/filecoin-proof-parameters/"' >> $BASH_ENV 304 | echo 'export GO111MODULE=on' >> $BASH_ENV 305 | echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV 306 | echo 'export PATH="/usr/local/go/bin:${HOME}/.cargo/bin:${PATH}:${HOME}/go/bin:${HOME}/.bin"' >> $BASH_ENV 307 | echo 'export RUST_LOG=info' >> $BASH_ENV 308 | echo 'export CIRCLE_ARTIFACTS="/tmp"' >> $BASH_ENV 309 | # Make sure CUDA is found on aarch64 310 | echo 'export LD_LIBRARY_PATH="/usr/lib/aarch64-linux-gnu/stubs:${LD_LIBRARY_PATH}"' >> ${BASH_ENV} 311 | echo 'export LIBRARY_PATH="/usr/lib/aarch64-linux-gnu/stubs:${LIBRARY_PATH}"' >> ${BASH_ENV} 312 | - when: 313 | condition: << parameters.darwin >> 314 | steps: 315 | - run: 316 | name: Add a few more environment variables 317 | command: | 318 | echo 'export PATH="${HOME}/.cargo/bin:${HOME}/.bin:${PATH}"' >> $BASH_ENV 319 | obtain_filecoin_parameters: 320 | steps: 321 | - run: | 322 | DIR=$(pwd) 323 | cd $(mktemp -d) 324 | go install github.com/filecoin-project/go-paramfetch/paramfetch@latest 325 | $GOPATH/bin/paramfetch 2048 "${DIR}/parameters.json" "${DIR}/srs-inner-product.json" 326 | 327 | build_project: 328 | steps: 329 | - run: 330 | name: Build project 331 | command: make 332 | 333 | - run: 334 | name: Build project without CGO 335 | command: env CGO_ENABLED=0 go build . 336 | 337 | run_tests: 338 | parameters: 339 | run_leak_detector: 340 | type: boolean 341 | default: false 342 | steps: 343 | - when: 344 | condition: <> 345 | steps: 346 | - run: 347 | name: Run leak detector 348 | command: make cgo-leakdetect 349 | no_output_timeout: 90m 350 | - run: 351 | name: Run the Rust tests with default features 352 | command: cd rust && FIL_PROOFS_PARAMETER_CACHE="${HOME}/filecoin-proof-parameters/" RUST_LOG=info cargo test --all --release -- --test-threads 1 && cd .. 353 | no_output_timeout: 90m 354 | - run: 355 | name: Run the Go tests 356 | command: GODEBUG=cgocheck=2 RUST_LOG=info go test -p 1 -timeout 60m 357 | no_output_timeout: 60m 358 | compile_tests: 359 | steps: 360 | - run: 361 | name: Build project and tests, but don't actually run the tests (used to verify that build/link works with Darwin) 362 | command: GODEBUG=cgocheck=2 RUST_LOG=info go test -run=^$ 363 | restore_parameter_cache: 364 | steps: 365 | - restore_cache: 366 | keys: 367 | - v28-proof-params-{{ arch }} 368 | save_parameter_cache: 369 | steps: 370 | - save_cache: 371 | key: v28-proof-params-{{ arch }} 372 | paths: 373 | - "~/filecoin-proof-parameters/" 374 | -------------------------------------------------------------------------------- /workflows.go: -------------------------------------------------------------------------------- 1 | //go:build cgo 2 | // +build cgo 3 | 4 | package ffi 5 | 6 | import ( 7 | "bytes" 8 | "crypto/rand" 9 | "encoding/binary" 10 | "fmt" 11 | "io" 12 | "io/ioutil" 13 | "math" 14 | "math/big" 15 | "os" 16 | "path/filepath" 17 | 18 | prooftypes "github.com/filecoin-project/go-state-types/proof" 19 | 20 | "github.com/filecoin-project/go-state-types/abi" 21 | "github.com/ipfs/go-cid" 22 | ) 23 | 24 | func WorkflowProofsLifecycle(t TestHelper) { 25 | minerID := abi.ActorID(42) 26 | randomness := [32]byte{9, 9, 9} 27 | sealProofType := abi.RegisteredSealProof_StackedDrg2KiBV1 28 | winningPostProofType := abi.RegisteredPoStProof_StackedDrgWinning2KiBV1 29 | sectorNum := abi.SectorNumber(42) 30 | 31 | ticket := abi.SealRandomness{5, 4, 2} 32 | 33 | seed := abi.InteractiveSealRandomness{7, 4, 2} 34 | 35 | // initialize a sector builder 36 | metadataDir := requireTempDirPath(t, "metadata") 37 | defer os.RemoveAll(metadataDir) 38 | 39 | sealedSectorsDir := requireTempDirPath(t, "sealed-sectors") 40 | defer os.RemoveAll(sealedSectorsDir) 41 | 42 | stagedSectorsDir := requireTempDirPath(t, "staged-sectors") 43 | defer os.RemoveAll(stagedSectorsDir) 44 | 45 | sectorCacheRootDir := requireTempDirPath(t, "sector-cache-root-dir") 46 | defer os.RemoveAll(sectorCacheRootDir) 47 | 48 | sectorCacheDirPath := requireTempDirPath(t, "sector-cache-dir") 49 | defer os.RemoveAll(sectorCacheDirPath) 50 | 51 | fauxSectorCacheDirPath := requireTempDirPath(t, "faux-sector-cache-dir") 52 | defer os.RemoveAll(fauxSectorCacheDirPath) 53 | 54 | stagedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0) 55 | defer stagedSectorFile.Close() 56 | 57 | sealedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0) 58 | defer sealedSectorFile.Close() 59 | 60 | fauxSealedSectorFile := requireTempFile(t, bytes.NewReader([]byte{}), 0) 61 | defer fauxSealedSectorFile.Close() 62 | 63 | unsealOutputFileA := requireTempFile(t, bytes.NewReader([]byte{}), 0) 64 | defer unsealOutputFileA.Close() 65 | 66 | unsealOutputFileB := requireTempFile(t, bytes.NewReader([]byte{}), 0) 67 | defer unsealOutputFileB.Close() 68 | 69 | unsealOutputFileC := requireTempFile(t, bytes.NewReader([]byte{}), 0) 70 | defer unsealOutputFileC.Close() 71 | 72 | unsealOutputFileD := requireTempFile(t, bytes.NewReader([]byte{}), 0) 73 | defer unsealOutputFileD.Close() 74 | 75 | // some rando bytes 76 | someBytes := make([]byte, abi.PaddedPieceSize(2048).Unpadded()) 77 | _, err := io.ReadFull(rand.Reader, someBytes) 78 | t.RequireNoError(err) 79 | 80 | // write first piece 81 | pieceFileA := requireTempFile(t, bytes.NewReader(someBytes[0:127]), 127) 82 | 83 | pieceCIDA, err := GeneratePieceCIDFromFile(sealProofType, pieceFileA, 127) 84 | t.RequireNoError(err) 85 | 86 | // seek back to head (generating piece commitment moves offset) 87 | _, err = pieceFileA.Seek(0, 0) 88 | t.RequireNoError(err) 89 | 90 | // write the first piece using the alignment-free function 91 | n, pieceCID, err := WriteWithoutAlignment(sealProofType, pieceFileA, 127, stagedSectorFile) 92 | t.RequireNoError(err) 93 | t.AssertEqual(int(n), 127) 94 | t.AssertTrue(pieceCID.Equals(pieceCIDA)) 95 | 96 | // write second piece + alignment 97 | t.RequireNoError(err) 98 | pieceFileB := requireTempFile(t, bytes.NewReader(someBytes[0:1016]), 1016) 99 | 100 | pieceCIDB, err := GeneratePieceCIDFromFile(sealProofType, pieceFileB, 1016) 101 | t.RequireNoError(err) 102 | 103 | // seek back to head 104 | _, err = pieceFileB.Seek(0, 0) 105 | t.RequireNoError(err) 106 | 107 | // second piece relies on the alignment-computing version 108 | left, tot, pieceCID, err := WriteWithAlignment(sealProofType, pieceFileB, 1016, stagedSectorFile, []abi.UnpaddedPieceSize{127}) 109 | t.RequireNoError(err) 110 | t.AssertEqual(889, int(left)) 111 | t.AssertEqual(1905, int(tot)) 112 | t.AssertTrue(pieceCID.Equals(pieceCIDB)) 113 | 114 | publicPieces := []abi.PieceInfo{{ 115 | Size: abi.UnpaddedPieceSize(127).Padded(), 116 | PieceCID: pieceCIDA, 117 | }, { 118 | Size: abi.UnpaddedPieceSize(1016).Padded(), 119 | PieceCID: pieceCIDB, 120 | }} 121 | 122 | preGeneratedUnsealedCID, err := GenerateUnsealedCID(sealProofType, publicPieces) 123 | t.RequireNoError(err) 124 | 125 | // pre-commit the sector 126 | sealPreCommitPhase1Output, err := SealPreCommitPhase1(sealProofType, sectorCacheDirPath, stagedSectorFile.Name(), sealedSectorFile.Name(), sectorNum, minerID, ticket, publicPieces) 127 | t.RequireNoError(err) 128 | 129 | sealedCID, unsealedCID, err := SealPreCommitPhase2(sealPreCommitPhase1Output, sectorCacheDirPath, sealedSectorFile.Name()) 130 | t.RequireNoError(err) 131 | 132 | t.AssertTrue(unsealedCID.Equals(preGeneratedUnsealedCID), "prover and verifier should agree on data commitment") 133 | 134 | // commit the sector 135 | sealCommitPhase1Output, err := SealCommitPhase1(sealProofType, sealedCID, unsealedCID, sectorCacheDirPath, sealedSectorFile.Name(), sectorNum, minerID, ticket, seed, publicPieces) 136 | t.RequireNoError(err) 137 | 138 | proof, err := SealCommitPhase2(sealCommitPhase1Output, sectorNum, minerID) 139 | t.RequireNoError(err) 140 | 141 | // verify the 'ole proofy 142 | isValid, err := VerifySeal(prooftypes.SealVerifyInfo{ 143 | SectorID: abi.SectorID{ 144 | Miner: minerID, 145 | Number: sectorNum, 146 | }, 147 | SealedCID: sealedCID, 148 | SealProof: sealProofType, 149 | Proof: proof, 150 | DealIDs: []abi.DealID{}, 151 | Randomness: ticket, 152 | InteractiveRandomness: seed, 153 | UnsealedCID: unsealedCID, 154 | }) 155 | t.RequireNoError(err) 156 | t.RequireTrue(isValid, "proof wasn't valid") 157 | 158 | // unseal the entire sector and verify that things went as we planned 159 | _, err = sealedSectorFile.Seek(0, 0) 160 | t.RequireNoError(err) 161 | t.RequireNoError(Unseal(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileA, sectorNum, minerID, ticket, unsealedCID)) 162 | _, err = unsealOutputFileA.Seek(0, 0) 163 | t.RequireNoError(err) 164 | contents, err := ioutil.ReadFile(unsealOutputFileA.Name()) 165 | t.RequireNoError(err) 166 | 167 | // unsealed sector includes a bunch of alignment NUL-bytes 168 | alignment := make([]byte, 889) 169 | 170 | // verify that we unsealed what we expected to unseal 171 | t.AssertTrue(bytes.Equal(someBytes[0:127], contents[0:127]), "bytes aren't equal") 172 | t.AssertTrue(bytes.Equal(alignment, contents[127:1016]), "bytes aren't equal") 173 | t.AssertTrue(bytes.Equal(someBytes[0:1016], contents[1016:2032]), "bytes aren't equal") 174 | 175 | // unseal just the first piece 176 | _, err = sealedSectorFile.Seek(0, 0) 177 | t.RequireNoError(err) 178 | err = UnsealRange(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileB, sectorNum, minerID, ticket, unsealedCID, 0, 127) 179 | t.RequireNoError(err) 180 | _, err = unsealOutputFileB.Seek(0, 0) 181 | t.RequireNoError(err) 182 | contentsB, err := ioutil.ReadFile(unsealOutputFileB.Name()) 183 | t.RequireNoError(err) 184 | t.AssertEqual(127, len(contentsB)) 185 | t.AssertTrue(bytes.Equal(someBytes[0:127], contentsB[0:127]), "bytes aren't equal") 186 | 187 | // unseal just the second piece 188 | _, err = sealedSectorFile.Seek(0, 0) 189 | t.RequireNoError(err) 190 | err = UnsealRange(sealProofType, sectorCacheDirPath, sealedSectorFile, unsealOutputFileC, sectorNum, minerID, ticket, unsealedCID, 1016, 1016) 191 | t.RequireNoError(err) 192 | _, err = unsealOutputFileC.Seek(0, 0) 193 | t.RequireNoError(err) 194 | contentsC, err := ioutil.ReadFile(unsealOutputFileC.Name()) 195 | t.RequireNoError(err) 196 | t.AssertEqual(1016, len(contentsC)) 197 | t.AssertTrue(bytes.Equal(someBytes[0:1016], contentsC[0:1016]), "bytes aren't equal") 198 | 199 | // verify that the sector builder owns no sealed sectors 200 | var sealedSectorPaths []string 201 | t.RequireNoError(filepath.Walk(sealedSectorsDir, visit(&sealedSectorPaths))) 202 | t.AssertEqual(1, len(sealedSectorPaths), sealedSectorPaths) 203 | 204 | // no sector cache dirs, either 205 | var sectorCacheDirPaths []string 206 | t.RequireNoError(filepath.Walk(sectorCacheRootDir, visit(§orCacheDirPaths))) 207 | t.AssertEqual(1, len(sectorCacheDirPaths), sectorCacheDirPaths) 208 | 209 | // run the FauxRep routine, for good measure 210 | fauxSectorCID, err := FauxRep(sealProofType, fauxSectorCacheDirPath, fauxSealedSectorFile.Name()) 211 | t.RequireNoError(err, "FauxRep produced an error") 212 | t.RequireTrue(!cid.Undef.Equals(fauxSectorCID), "faux sector CID shouldn't be undefined") 213 | 214 | // run the FauxRep2 routine, for good measure 215 | fauxSectorCID2, err := FauxRep2(sealProofType, fauxSectorCacheDirPath, fauxSealedSectorFile.Name()) 216 | t.RequireNoError(err, "FauxRep2 produced an error") 217 | t.RequireTrue(!cid.Undef.Equals(fauxSectorCID2), "faux sector CID 2 shouldn't be undefined") 218 | 219 | // generate a PoSt over the proving set before importing, just to exercise 220 | // the new API 221 | privateInfo := NewSortedPrivateSectorInfo(PrivateSectorInfo{ 222 | SectorInfo: prooftypes.SectorInfo{ 223 | SectorNumber: sectorNum, 224 | SealedCID: sealedCID, 225 | }, 226 | CacheDirPath: sectorCacheDirPath, 227 | PoStProofType: winningPostProofType, 228 | SealedSectorPath: sealedSectorFile.Name(), 229 | }) 230 | 231 | provingSet := []prooftypes.SectorInfo{{ 232 | SealProof: sealProofType, 233 | SectorNumber: sectorNum, 234 | SealedCID: sealedCID, 235 | }} 236 | 237 | // figure out which sectors have been challenged 238 | indicesInProvingSet, err := GenerateWinningPoStSectorChallenge(winningPostProofType, minerID, randomness[:], uint64(len(provingSet))) 239 | t.RequireNoError(err) 240 | 241 | var challengedSectors []prooftypes.SectorInfo 242 | for idx := range indicesInProvingSet { 243 | challengedSectors = append(challengedSectors, provingSet[indicesInProvingSet[idx]]) 244 | } 245 | 246 | proofs, err := GenerateWinningPoSt(minerID, privateInfo, randomness[:]) 247 | t.RequireNoError(err) 248 | 249 | isValid, err = VerifyWinningPoSt(prooftypes.WinningPoStVerifyInfo{ 250 | Randomness: randomness[:], 251 | Proofs: proofs, 252 | ChallengedSectors: challengedSectors, 253 | Prover: minerID, 254 | }) 255 | t.RequireNoError(err) 256 | t.AssertTrue(isValid, "VerifyWinningPoSt rejected the (standalone) proof as invalid") 257 | } 258 | 259 | func WorkflowGetGPUDevicesDoesNotProduceAnError(t TestHelper) { 260 | devices, err := GetGPUDevices() 261 | t.RequireNoError(err) 262 | fmt.Printf("devices: %+v\n", devices) // clutters up test output, but useful 263 | } 264 | 265 | func WorkflowRegisteredSealProofFunctions(t TestHelper) { 266 | sealTypes := []abi.RegisteredSealProof{ 267 | abi.RegisteredSealProof_StackedDrg2KiBV1, 268 | abi.RegisteredSealProof_StackedDrg8MiBV1, 269 | abi.RegisteredSealProof_StackedDrg512MiBV1, 270 | abi.RegisteredSealProof_StackedDrg32GiBV1, 271 | abi.RegisteredSealProof_StackedDrg64GiBV1, 272 | 273 | abi.RegisteredSealProof_StackedDrg2KiBV1_1, 274 | abi.RegisteredSealProof_StackedDrg8MiBV1_1, 275 | abi.RegisteredSealProof_StackedDrg512MiBV1_1, 276 | abi.RegisteredSealProof_StackedDrg32GiBV1_1, 277 | abi.RegisteredSealProof_StackedDrg64GiBV1_1, 278 | } 279 | 280 | for _, st := range sealTypes { 281 | v, err := GetSealVersion(st) 282 | t.AssertNoError(err) 283 | t.AssertTrue(len(v) > 0) 284 | } 285 | } 286 | 287 | func WorkflowRegisteredPoStProofFunctions(t TestHelper) { 288 | postTypes := []abi.RegisteredPoStProof{ 289 | abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, 290 | abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, 291 | abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, 292 | abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, 293 | abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, 294 | 295 | abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, 296 | abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, 297 | abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, 298 | abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, 299 | abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, 300 | } 301 | 302 | for _, pt := range postTypes { 303 | v, err := GetPoStVersion(pt) 304 | t.AssertNoError(err) 305 | t.AssertTrue(len(v) > 0) 306 | } 307 | } 308 | 309 | func WorkflowGenerateWinningPoStSectorChallengeEdgeCase(t TestHelper) { 310 | for i := 0; i < 10000; i++ { 311 | var randomnessFr32 [32]byte 312 | _, err := io.ReadFull(rand.Reader, randomnessFr32[0:31]) // last byte of the 32 is always NUL 313 | t.RequireNoError(err) 314 | 315 | minerID := randActorID() 316 | eligibleSectorsLen := uint64(1) 317 | 318 | indices2, err := GenerateWinningPoStSectorChallenge(abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, minerID, randomnessFr32[:], eligibleSectorsLen) 319 | t.RequireNoError(err) 320 | t.AssertEqual(1, len(indices2)) 321 | t.AssertEqual(0, int(indices2[0])) 322 | } 323 | } 324 | 325 | func WorkflowGenerateWinningPoStSectorChallenge(t TestHelper) { 326 | for i := 0; i < 10000; i++ { 327 | var randomnessFr32 [32]byte 328 | _, err := io.ReadFull(rand.Reader, randomnessFr32[0:31]) // last byte of the 32 is always NUL 329 | t.RequireNoError(err) 330 | 331 | minerID := randActorID() 332 | eligibleSectorsLen := randUInt64() 333 | 334 | if eligibleSectorsLen == 0 { 335 | continue // no fun 336 | } 337 | 338 | indices, err := GenerateWinningPoStSectorChallenge(abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, minerID, randomnessFr32[:], eligibleSectorsLen) 339 | t.AssertNoError(err) 340 | 341 | max := uint64(0) 342 | for idx := range indices { 343 | if indices[idx] > max { 344 | max = indices[idx] 345 | } 346 | } 347 | 348 | t.AssertTrue(max < eligibleSectorsLen, "out of range value - max: ", max, "eligibleSectorsLen: ", eligibleSectorsLen) 349 | t.AssertTrue(uint64(len(indices)) <= eligibleSectorsLen, "should never generate more indices than number of eligible sectors") 350 | } 351 | } 352 | 353 | func randActorID() abi.ActorID { 354 | bID, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) 355 | if err != nil { 356 | panic(err) 357 | } 358 | return abi.ActorID(bID.Uint64()) 359 | } 360 | 361 | func randUInt64() uint64 { 362 | buf := make([]byte, 8) 363 | _, err := rand.Read(buf) 364 | if err != nil { 365 | panic(err) 366 | } 367 | 368 | return binary.LittleEndian.Uint64(buf) 369 | } 370 | 371 | func requireTempFile(t TestHelper, fileContentsReader io.Reader, size uint64) *os.File { 372 | file, err := ioutil.TempFile("", "") 373 | t.RequireNoError(err) 374 | 375 | written, err := io.Copy(file, fileContentsReader) 376 | t.RequireNoError(err) 377 | // check that we wrote everything 378 | t.RequireEqual(int(size), int(written)) 379 | 380 | t.RequireNoError(file.Sync()) 381 | 382 | // seek to the beginning 383 | _, err = file.Seek(0, 0) 384 | t.RequireNoError(err) 385 | 386 | return file 387 | } 388 | 389 | func requireTempDirPath(t TestHelper, prefix string) string { 390 | dir, err := ioutil.TempDir("", prefix) 391 | t.RequireNoError(err) 392 | 393 | return dir 394 | } 395 | 396 | func visit(paths *[]string) filepath.WalkFunc { 397 | return func(path string, info os.FileInfo, err error) error { 398 | if err != nil { 399 | panic(err) 400 | } 401 | *paths = append(*paths, path) 402 | return nil 403 | } 404 | } 405 | 406 | type TestHelper interface { 407 | AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool 408 | AssertNoError(err error, msgAndArgs ...interface{}) bool 409 | AssertTrue(value bool, msgAndArgs ...interface{}) bool 410 | RequireEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) 411 | RequireNoError(err error, msgAndArgs ...interface{}) 412 | RequireTrue(value bool, msgAndArgs ...interface{}) 413 | } 414 | --------------------------------------------------------------------------------