├── rust-toolchain ├── .dockerignore ├── archived ├── car-utility │ ├── rust-toolchain │ ├── src │ │ ├── lib.rs │ │ ├── main.rs │ │ ├── unpack.rs │ │ ├── pack.rs │ │ └── run.rs │ ├── Cross.toml │ ├── Cargo.toml │ └── README.md └── cpp-transmit-example │ ├── src │ ├── api.hpp │ ├── lib.rs │ └── main.cpp │ ├── Cargo.toml │ └── Makefile ├── config ├── src │ ├── lib.rs │ └── config.rs └── Cargo.toml ├── local-storage ├── Cross.toml ├── src │ ├── error.rs │ ├── lib.rs │ ├── util.rs │ ├── provider.rs │ └── null_provider.rs └── Cargo.toml ├── myceli ├── build.rs ├── src │ ├── lib.rs │ ├── version_info.rs │ └── main.rs ├── Cross.toml ├── README.md ├── Cargo.toml └── tests │ ├── utils │ └── mod.rs │ └── listener_test.rs ├── .gitignore ├── smalog ├── Cargo.toml └── src │ └── lib.rs ├── .github ├── dependabot.yml └── workflows │ ├── stale.yml │ ├── docker-test.yml │ ├── generated-pr.yml │ ├── linting.yaml │ ├── unit-testing.yaml │ └── create-release.yaml ├── .idea ├── vcs.xml ├── .gitignore ├── modules.xml └── space.iml ├── messages ├── src │ ├── err.rs │ ├── lib.rs │ ├── protocol.rs │ ├── sync.rs │ ├── api.rs │ ├── cid_list.rs │ └── message.rs └── Cargo.toml ├── ipfs-unixfs ├── build.rs ├── src │ ├── unixfs.proto │ ├── merkledag.proto │ ├── lib.rs │ ├── chunker.rs │ ├── chunker │ │ └── fixed.rs │ └── types.rs └── Cargo.toml ├── meetings ├── 2023-02-13.md ├── 2023-02-08.md ├── 2023-02-21.md ├── 2023-01-26.md └── 2022-11-14.md ├── Cross.toml ├── testing ├── udp_forward │ ├── src │ │ ├── err.rs │ │ └── main.rs │ └── Cargo.toml ├── local_test │ ├── all.sh │ ├── compress.case.sh │ ├── setup.env │ ├── kubo.case.sh │ ├── timeout.killer.sh │ ├── original.case.sh │ ├── watcher.case.sh │ ├── netperf.case.sh │ └── funcs.env └── testing-plan.md ├── cross-x86-linux.Dockerfile ├── local-dev-environment ├── desktop │ ├── rfm69-service │ │ ├── Cargo.toml │ │ └── src │ │ │ └── main.rs │ └── rfm69-driver │ │ └── driver │ │ └── driver.ino └── raspberry-pi │ └── RFM69HCW │ └── service.py ├── cross-aarch64-linux.Dockerfile ├── transports ├── src │ ├── lib.rs │ ├── error.rs │ ├── chunking.rs │ └── udp_transport.rs └── Cargo.toml ├── cross-armv7-linux.Dockerfile ├── controller ├── Cargo.toml └── src │ └── main.rs ├── docs ├── charts │ ├── netover.md │ ├── sync_specialfailure.md │ ├── filedag.md │ ├── ship.md │ └── sync.md ├── myceli-docker.md ├── hyphae.md ├── poc-car-transmission.md ├── setup-local-environment.md └── myceli-basic-setup.md ├── myceli.Dockerfile ├── watcher ├── Cargo.toml └── src │ ├── main.rs │ └── handler.rs ├── LICENSE ├── Cargo.toml └── DESIGN.md /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.70.0 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .git/ 3 | *.db -------------------------------------------------------------------------------- /archived/car-utility/rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.63 2 | -------------------------------------------------------------------------------- /config/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod config; 2 | 3 | pub type Config = config::Config; 4 | -------------------------------------------------------------------------------- /archived/car-utility/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod pack; 2 | pub mod run; 3 | pub mod unpack; 4 | -------------------------------------------------------------------------------- /local-storage/Cross.toml: -------------------------------------------------------------------------------- 1 | [target.armv7-unknown-linux-gnueabihf] 2 | dockerfile = "../cross.Dockerfile" -------------------------------------------------------------------------------- /archived/car-utility/Cross.toml: -------------------------------------------------------------------------------- 1 | [target.armv7-unknown-linux-gnueabihf] 2 | dockerfile = "../cross.Dockerfile" -------------------------------------------------------------------------------- /myceli/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | built::write_built_file().expect("Failed to acquire build-time information"); 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .DS_Store 3 | .vscode/ 4 | build/ 5 | storage.db 6 | sat.*/ 7 | gnd/ 8 | ctl 9 | log 10 | *.pid 11 | -------------------------------------------------------------------------------- /archived/cpp-transmit-example/src/api.hpp: -------------------------------------------------------------------------------- 1 | 2 | extern "C" { 3 | 4 | int generate_transmit_msg(unsigned char* msg, char path[], char addr[]); 5 | 6 | } -------------------------------------------------------------------------------- /smalog/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "smalog" 3 | version = "0.0.1" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | log.workspace = true 8 | chrono = "0.4.31" -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /myceli/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod handlers; 2 | pub mod listener; 3 | #[cfg(feature = "proto_ship")] 4 | pub mod shipper; 5 | #[cfg(feature = "proto_sync")] 6 | mod sync; 7 | mod version_info; 8 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /shelf/ 3 | /workspace.xml 4 | # Editor-based HTTP Client requests 5 | /httpRequests/ 6 | # Datasource local storage ignored files 7 | /dataSources/ 8 | /dataSources.local.xml 9 | -------------------------------------------------------------------------------- /messages/src/err.rs: -------------------------------------------------------------------------------- 1 | use derive_error::Error; 2 | 3 | #[derive(Debug, Error)] 4 | pub enum Error { 5 | Cid(cid::Error), 6 | EmptyCidList, 7 | } 8 | 9 | pub type Result = std::result::Result; 10 | -------------------------------------------------------------------------------- /archived/car-utility/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | 4 | #[tokio::main(flavor = "multi_thread")] 5 | async fn main() -> Result<()> { 6 | let cli = car_utility::run::Cli::parse(); 7 | cli.run().await 8 | } 9 | -------------------------------------------------------------------------------- /ipfs-unixfs/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | prost_build::Config::new() 3 | .bytes([".unixfs_pb.Data", ".merkledag_pb.PBNode.Data"]) 4 | .compile_protos(&["src/unixfs.proto", "src/merkledag.proto"], &["src"]) 5 | .unwrap(); 6 | } 7 | -------------------------------------------------------------------------------- /meetings/2023-02-13.md: -------------------------------------------------------------------------------- 1 | # Meeting on 2/13/2023 with Little Bear Labs, Lockheed, and Protocol Labs 2 | 3 | ## Notes 4 | 5 | * Attendees: Ryan, Jon, Pete, Anshuman, David 6 | * Talked over Jon's system diagrams 7 | * Talked over Anshuman's control flow diagrams -------------------------------------------------------------------------------- /archived/cpp-transmit-example/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cpp-transmit-example" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | messages = { path = "../messages" } 8 | 9 | [lib] 10 | name = "transmit" 11 | crate-type = ["staticlib"] -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [target.armv7-unknown-linux-gnueabihf] 2 | dockerfile = "cross-armv7-linux.Dockerfile" 3 | 4 | [target.aarch64-unknown-linux-gnu] 5 | dockerfile = "cross-aarch64-linux.Dockerfile" 6 | 7 | [target.x86_64-unknown-linux-gnu] 8 | dockerfile = "cross-x86-linux.Dockerfile" -------------------------------------------------------------------------------- /local-storage/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Debug, Error)] 4 | pub enum StorageError { 5 | #[error("Block not found for CID {0}: {1}")] 6 | BlockNotFound(String, String), 7 | #[error("DAG incomplete {0}")] 8 | DagIncomplete(String), 9 | } 10 | -------------------------------------------------------------------------------- /myceli/Cross.toml: -------------------------------------------------------------------------------- 1 | [target.armv7-unknown-linux-gnueabihf] 2 | dockerfile = "../cross-armv7-linux.Dockerfile" 3 | 4 | [target.aarch64-unknown-linux-gnu] 5 | dockerfile = "../cross-aarch64-linux.Dockerfile" 6 | 7 | [target.x86_64-unknown-linux-gnu] 8 | dockerfile = "../cross-x86-linux.Dockerfile" -------------------------------------------------------------------------------- /testing/udp_forward/src/err.rs: -------------------------------------------------------------------------------- 1 | use derive_error::Error; 2 | use std::io; 3 | use std::num::ParseIntError; 4 | 5 | #[derive(Debug, Error)] 6 | pub enum Error { 7 | Io(io::Error), 8 | ParseInt(ParseIntError), 9 | } 10 | 11 | pub type Result = std::result::Result; 12 | -------------------------------------------------------------------------------- /meetings/2023-02-08.md: -------------------------------------------------------------------------------- 1 | # Meeting on 2/8/2023 with Little Bear Labs and Protocol Labs 2 | 3 | ## Notes 4 | 5 | * Attendees: Ryan and Anshuman 6 | * Talked over sequence diagrams for ground-to-space and space-to-ground transmissions 7 | * Revised several API calls to operate on DAGs instead of CIDs 8 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /testing/udp_forward/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "udp_forward" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | derive-error.workspace = true 10 | rand.workspace = true 11 | chrono = "0.4.31" -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/docker-test.yml: -------------------------------------------------------------------------------- 1 | name: Docker Testing 2 | on: [ pull_request ] 3 | jobs: 4 | test_docker_build: 5 | name: Test Docker build 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v3 9 | - name: Test build of myceli docker 10 | run: docker build -f myceli.Dockerfile . 11 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /archived/cpp-transmit-example/Makefile: -------------------------------------------------------------------------------- 1 | all: ../target/debug/libtransmit.a 2 | mkdir -p build 3 | g++ src/main.cpp -L ../target/debug/ -ltransmit -o build/transmit 4 | 5 | run: 6 | LD_LIBRARY_PATH=../target/debug/ ./build/transmit 7 | 8 | ../target/debug/libtransmit.a: src/lib.rs Cargo.toml 9 | cargo build 10 | 11 | clean: 12 | rm -rf build && cargo clean -------------------------------------------------------------------------------- /config/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "config" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | rust-version.workspace = true 8 | 9 | [dependencies] 10 | anyhow.workspace = true 11 | dirs = "5.0.1" 12 | figment.workspace = true 13 | log.workspace = true 14 | serde.workspace = true 15 | serde_derive.workspace = true -------------------------------------------------------------------------------- /cross-x86-linux.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/cross-rs/x86_64-unknown-linux-gnu:latest 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y unzip libssl-dev 5 | 6 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip" 7 | RUN unzip -q protoc.zip -d /usr/local 8 | RUN chmod a+x /usr/local/bin/protoc 9 | ENV PROTOC=/usr/local/bin/protoc -------------------------------------------------------------------------------- /messages/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod api; 2 | pub mod cid_list; 3 | mod err; 4 | pub(crate) mod message; 5 | 6 | #[cfg(feature = "proto_ship")] 7 | pub(crate) mod protocol; 8 | mod sync; 9 | 10 | pub use api::{ApplicationAPI, DagInfo}; 11 | pub use message::Message; 12 | #[cfg(feature = "proto_ship")] 13 | pub use protocol::{DataProtocol, TransmissionBlock}; 14 | pub use sync::{SyncMessage, PUSH_OVERHEAD}; 15 | -------------------------------------------------------------------------------- /local-dev-environment/desktop/rfm69-service/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rfm69-service" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | clap.workspace = true 10 | log.workspace = true 11 | tokio.workspace = true 12 | tokio-serial.workspace = true 13 | env_logger.workspace = true 14 | -------------------------------------------------------------------------------- /cross-aarch64-linux.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5 2 | 3 | RUN dpkg --add-architecture arm64 && apt-get update && \ 4 | apt-get install -y unzip libssl-dev:arm64 5 | 6 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip" 7 | RUN unzip -q protoc.zip -d /usr/local 8 | RUN chmod a+x /usr/local/bin/protoc 9 | ENV PROTOC=/usr/local/bin/protoc -------------------------------------------------------------------------------- /transports/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod chunking; 2 | mod error; 3 | mod udp_chunking; 4 | mod udp_transport; 5 | 6 | use messages::Message; 7 | 8 | pub const MAX_MTU: u16 = 1024 * 3; 9 | pub use error::{Result, TransportError}; 10 | 11 | pub trait Transport: Send + Sync { 12 | fn receive(&self) -> Result<(Message, String)>; 13 | fn send(&self, msg: Message, addr: &str) -> Result<()>; 14 | } 15 | 16 | pub use udp_transport::UdpTransport; 17 | -------------------------------------------------------------------------------- /cross-armv7-linux.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/cross-rs/armv7-unknown-linux-gnueabihf:0.2.5 2 | 3 | RUN dpkg --add-architecture armhf && apt-get update && \ 4 | apt-get install -y unzip openssl libssl-dev:armhf 5 | 6 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip" 7 | RUN unzip -q protoc.zip -d /usr/local 8 | RUN chmod a+x /usr/local/bin/protoc 9 | ENV PROTOC=/usr/local/bin/protoc -------------------------------------------------------------------------------- /meetings/2023-02-21.md: -------------------------------------------------------------------------------- 1 | # Meeting on 2/21/2023 with Little Bear Labs, Lockheed, and Protocol Labs 2 | 3 | ## Notes 4 | 5 | * Attendees: Ryan, Jon, Pete, Dietrich, David 6 | * We decided that a dedicated CLI tool for forming control API messages is sufficient for manual operators. 7 | * Ideal test payloads will be satellite imagery files in GeoTIFF format. Testing with several of these images across a variety of sizes will be necessary before any on-mission usage. -------------------------------------------------------------------------------- /local-storage/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod block; 2 | pub mod error; 3 | pub mod provider; 4 | pub mod storage; 5 | mod util; 6 | 7 | mod null_provider; 8 | 9 | #[cfg(feature = "files")] 10 | mod file_provider; 11 | #[cfg(feature = "sqlite")] 12 | pub mod sql_provider; 13 | 14 | #[cfg(all(not(test), feature = "sqlite", feature = "files"))] 15 | compile_error! {"Outside of unit tests there's not a good reason to compile with multiple StorageProvider implementations."} 16 | -------------------------------------------------------------------------------- /controller/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "controller" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | rust-version.workspace = true 8 | 9 | [dependencies] 10 | anyhow.workspace = true 11 | clap.workspace = true 12 | config.workspace = true 13 | env_logger.workspace = true 14 | log.workspace = true 15 | messages.workspace = true 16 | serde_json.workspace = true 17 | tokio.workspace = true 18 | transports.workspace = true -------------------------------------------------------------------------------- /docs/charts/netover.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | flowchart TD 3 | subgraph Vehicle 4 | A["Application (e.g. Watcher)"] -- ApplicationAPI/UDP --> B[Myceli] 5 | B <-- CommsAPI/UDP --> C[Comms] 6 | end 7 | 8 | subgraph Radio 9 | Z[Data Transfer Protocol] 10 | end 11 | 12 | subgraph Ground 13 | F["Service (e.g. Controller)"] -- ApplicationAPI/UDP --> E[Myceli] 14 | E <-- CommsAPI/UDP --> G[Comms] 15 | end 16 | 17 | C <--> Z 18 | G <--> Z 19 | ``` 20 | -------------------------------------------------------------------------------- /ipfs-unixfs/src/unixfs.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package unixfs_pb; 4 | 5 | message Data { 6 | enum DataType { 7 | Raw = 0; 8 | Directory = 1; 9 | File = 2; 10 | Metadata = 3; 11 | Symlink = 4; 12 | HAMTShard = 5; 13 | } 14 | 15 | DataType Type = 1; 16 | optional bytes Data = 2; 17 | optional uint64 filesize = 3; 18 | repeated uint64 blocksizes = 4; 19 | 20 | optional uint64 hashType = 5; 21 | optional uint64 fanout = 6; 22 | } 23 | 24 | message Metadata { 25 | optional string MimeType = 1; 26 | } 27 | -------------------------------------------------------------------------------- /ipfs-unixfs/src/merkledag.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package merkledag_pb; 4 | 5 | // An IPFS MerkleDAG Link 6 | message PBLink { 7 | // multihash of the target object 8 | optional bytes Hash = 1; 9 | 10 | // utf string name. should be unique per object 11 | optional string Name = 2; 12 | 13 | // cumulative size of target object 14 | optional uint64 Tsize = 3; 15 | } 16 | 17 | // An IPFS MerkleDAG Node 18 | message PBNode { 19 | 20 | // refs to other objects 21 | repeated PBLink Links = 2; 22 | 23 | // opaque user data 24 | optional bytes Data = 1; 25 | } 26 | -------------------------------------------------------------------------------- /myceli/src/version_info.rs: -------------------------------------------------------------------------------- 1 | use messages::ApplicationAPI; 2 | // The file has been placed there by the build script. 3 | include!(concat!(env!("OUT_DIR"), "/built.rs")); 4 | 5 | pub fn get(remote_label: Option) -> ApplicationAPI { 6 | ApplicationAPI::Version { 7 | version: env!("CARGO_PKG_VERSION").to_string(), 8 | rust: env!("CARGO_PKG_RUST_VERSION").to_string(), 9 | target: TARGET.to_owned(), 10 | profile: PROFILE.to_owned(), 11 | features: FEATURES.iter().map(|s| s.to_string()).collect(), 12 | remote_label, 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /testing/local_test/all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | cd `dirname "${0}"` 3 | for c in *.case.sh 4 | do 5 | echo -e "\n\n\t ### \t START \t ### \t ### \t Test Suite: \t ${c%.case.sh} \t ### \t###\n" 6 | if "./${c}" 7 | then 8 | echo -e "\n\t ### \t PASSED \t ### \t ### \t Test Suite: \t ${c%.case.sh} \t ### \t###\n\n" 9 | else 10 | echo -e "\n\t ### \t FAILED \t ### \t ### \t Test Suite: \t ${c%.case.sh} \t ### \t###\n\n" 11 | exit 9 12 | fi 13 | done 14 | 15 | echo -e '\n\n\t###\t###\t PASSED \t###\t###\n' 16 | 17 | source funcs.env 18 | kill_all 19 | 20 | echo -e '\n\t###\t###\t DONE \t###\t###\n\n' 21 | -------------------------------------------------------------------------------- /meetings/2023-01-26.md: -------------------------------------------------------------------------------- 1 | # Meeting on 1/26/2023 with Little Bear Labs and Lockheed 2 | 3 | ## Notes 4 | 5 | * Attendees: Ryan, Dietrich, Jon, Pete, Anshuman, Mark, David 6 | * Went over the Application API 7 | * Ryan demo’d sending/receiving control message + resultant file transmission 8 | * Jon suggested implementing byte estimation into the API (ground knows this, but satellite may not) 9 | * Request CID, Request Available CIDs API implementations requested by Dietrich 10 | * Advertise New CIDs (with description), Delete CID API, Missing Child CIDs implementations requested by Jon 11 | 12 | ## Follow Up 13 | 14 | Anshuman to writeup use cases with diagrams. -------------------------------------------------------------------------------- /messages/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "messages" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | rust-version.workspace = true 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | cid.workspace = true 13 | clap.workspace = true 14 | derive-error.workspace = true 15 | ipfs-unixfs.workspace = true 16 | log.workspace = true 17 | parity-scale-codec.workspace = true 18 | parity-scale-codec-derive.workspace = true 19 | serde.workspace = true 20 | 21 | [features] 22 | proto_ship = [] 23 | proto_sync = [] 24 | -------------------------------------------------------------------------------- /.github/workflows/linting.yaml: -------------------------------------------------------------------------------- 1 | name: CI Linting 2 | on: [ pull_request ] 3 | jobs: 4 | clippy_check: 5 | name: Run clippy check 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Install Protoc 9 | uses: arduino/setup-protoc@v2 10 | - uses: actions/checkout@v3 11 | - id: rust 12 | run: echo "toolchain=$(cat rust-toolchain)" >> $GITHUB_OUTPUT 13 | shell: bash 14 | - uses: dtolnay/rust-toolchain@master 15 | with: 16 | toolchain: ${{ steps.rust.outputs.toolchain }} 17 | components: clippy 18 | - name: Run clippy 19 | run: cargo clippy --all --features big 20 | shell: bash 21 | -------------------------------------------------------------------------------- /testing/local_test/compress.case.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | source `dirname "${0}"`/setup.env 3 | 4 | for m in sat.*/myceli 5 | do 6 | xz -9 --keep --extreme "${m}" 7 | gzip --keep --best "${m}" 8 | done 9 | max_size=1000000 # 1MB (not MiB) in B 10 | 11 | for format in {g,x}z 12 | do 13 | for variant in sat.{all,sync,ship}/myceli. 14 | do 15 | fil="${variant}${format}" 16 | ls -lrth "${fil}" 17 | if [ `stat --format=%s "${fil}"` -gt ${max_size} ] 18 | then 19 | echo -e "\n\t###\t PROBLEM: \t###\t ${fil} is over ${max_size} B \t###\n" 20 | exit 99 21 | else 22 | export max_size=$((max_size - 40000)) 23 | fi 24 | done 25 | done 26 | -------------------------------------------------------------------------------- /archived/car-utility/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "car-utility" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | anyhow = "1" 10 | iroh-resolver = { git = "https://github.com/n0-computer/iroh", tag = "v0.1.0" } 11 | iroh-car = { git = "https://github.com/n0-computer/iroh", tag = "v0.1.0" } 12 | tokio = { version = "1", features = ["fs", "io-util"] } 13 | cid = "0.8" 14 | futures = "0.3.21" 15 | clap = { version = "4.0.15", features = ["derive"] } 16 | 17 | [patch.crates-io] 18 | libp2p = { git = "https://github.com/dignifiedquire/rust-libp2p", branch = "iroh-0-50" } -------------------------------------------------------------------------------- /myceli.Dockerfile: -------------------------------------------------------------------------------- 1 | # Base build stage 2 | FROM rust:1.67 as builder 3 | # Install protobuf compiler 4 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip" 5 | RUN unzip protoc.zip -d protoc/ 6 | RUN cp -a protoc/* /usr/local 7 | 8 | # Copy over and build myceli 9 | COPY . . 10 | RUN cargo build --bin myceli --features big 11 | RUN cp ./target/debug/myceli /usr/bin/myceli 12 | 13 | # Extras stage 14 | FROM debian:bullseye-slim 15 | LABEL org.opencontainers.image.source="https://github.com/ipfs-shipyard/space" 16 | COPY --from=builder /usr/bin/myceli /usr/bin/myceli 17 | COPY --from=builder Cargo.toml /usr/local/Cargo.toml 18 | ENTRYPOINT myceli $CONFIG_PATH 19 | -------------------------------------------------------------------------------- /watcher/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "watcher" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | anyhow.workspace = true 10 | config.workspace = true 11 | env_logger = { workspace = true, optional = true } 12 | log.workspace = true 13 | messages.workspace = true 14 | notify = "6.0.1" 15 | smalog = { workspace = true, optional = true } 16 | transports.workspace = true 17 | 18 | [features] 19 | big = ["good_log", "proto_sync", "proto_ship"] 20 | small = ["small_log"] 21 | good_log = ["dep:env_logger"] 22 | small_log = ["dep:smalog"] 23 | proto_sync = ["messages/proto_sync"] 24 | proto_ship = ["messages/proto_ship"] 25 | -------------------------------------------------------------------------------- /archived/car-utility/src/unpack.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::TryStreamExt; 3 | use iroh_car::CarReader; 4 | use std::path::PathBuf; 5 | use tokio::fs::File; 6 | use tokio::io::AsyncWriteExt; 7 | use tokio::io::BufReader; 8 | 9 | pub async fn unpack(path: &PathBuf, output: &PathBuf) -> Result<()> { 10 | let file = File::open(path).await?; 11 | let buf_reader = BufReader::new(file); 12 | 13 | let car_reader = CarReader::new(buf_reader).await?; 14 | let mut output_file = File::create(output).await?; 15 | 16 | let files: Vec<_> = car_reader.stream().try_collect().await.unwrap(); 17 | for (_cid, data) in files { 18 | output_file.write_all(&data).await?; 19 | } 20 | output_file.flush().await?; 21 | Ok(()) 22 | } 23 | -------------------------------------------------------------------------------- /meetings/2022-11-14.md: -------------------------------------------------------------------------------- 1 | # Meeting on 11/14/2022 with Little Bear Labs and Lockheed 2 | 3 | ## Notes 4 | 5 | * 2 use cases: (1) Satellite to Ground, and (2) Satellite to Satellite 6 | * 3 core pieces: bus, payloads (crosslink control, antennae), mission processing (Linux) 7 | * Runtime is powered on for a small duration (on demand) 8 | * 3 tech parts: IPFS (trimmed down), content management (e.g. web server), network layer (most work needs to be done here) 9 | * Constraints: high packet loss rates, variable bandwidth 10 | * Sample protocol: CCSDS (Common Space Protocol) similar to IPV4 11 | * Could take the API directly to the bus if applicable 12 | * 128kbps bandwidth (decent amount is overhead) 13 | 14 | ## Follow Up 15 | 16 | We plan to have regular meetings (TBD). Non-mission data will be published in this repository. -------------------------------------------------------------------------------- /.github/workflows/unit-testing.yaml: -------------------------------------------------------------------------------- 1 | name: CI Unit Testing 2 | on: [ pull_request ] 3 | jobs: 4 | build_and_test: 5 | name: Build and test rust 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Install Protoc 9 | uses: arduino/setup-protoc@v2 10 | - uses: actions/checkout@v3 11 | - uses: dtolnay/rust-toolchain@stable 12 | - name: Build workspace 13 | run: cargo build --release --features big 14 | shell: bash 15 | - name: Test big 16 | run: cargo test --features big,proto_all 17 | shell: bash 18 | - name: Test small 19 | run: cargo test --features small,proto_all 20 | shell: bash 21 | - name: Test small ship 22 | run: cargo test --features small,proto_ship 23 | shell: bash 24 | - name: Test small sync 25 | run: cargo test --features small,proto_sync 26 | shell: bash 27 | -------------------------------------------------------------------------------- /testing/local_test/setup.env: -------------------------------------------------------------------------------- 1 | if ! ( uname | grep -q Linux ) 2 | then 3 | echo "This script only works on linux." 4 | exit 6 5 | fi 6 | export script_dir=`dirname "${0}"` 7 | export repo_dir=`git -C "${script_dir}" rev-parse --show-toplevel` 8 | cd "${repo_dir}" 9 | 10 | if [ "${o}" = '' ] 11 | then 12 | export o=`mktemp -d` 13 | fi 14 | 15 | source "./testing/local_test/funcs.env" 16 | 17 | kill_all 18 | 19 | for d in gnd sat.{all,sync,ship} 20 | do 21 | rm -r ${d} || true 22 | done 23 | mkdir -p sat.{all,ship,sync} gnd ctl 24 | 25 | ./testing/local_test/timeout.killer.sh 26 | 27 | configure 10000 28 | 29 | bld gnd myceli big release 30 | bld gnd watcher big release 31 | #bld gnd hyphae big release 32 | bld ctl controller big release 33 | for p in all sync ship 34 | do 35 | bld sat.${p} myceli small,proto_${p} small 36 | bld sat.${p} watcher small small 37 | done 38 | 39 | echo 'Setup finished' 40 | -------------------------------------------------------------------------------- /transports/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "transports" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | rust-version.workspace = true 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | cid.workspace = true 13 | derive-error.workspace = true 14 | env_logger = { workspace = true, optional = true } 15 | log.workspace = true 16 | messages = { workspace = true, features = [] } 17 | parity-scale-codec.workspace = true 18 | parity-scale-codec-derive.workspace = true 19 | rand.workspace = true 20 | serde.workspace = true 21 | serde_derive.workspace = true 22 | smalog = { workspace = true, optional = true } 23 | 24 | [features] 25 | good_log = ["dep:env_logger"] 26 | small_log = ["dep:smalog"] 27 | proto_ship = ["messages/proto_ship"] 28 | proto_sync = ["messages/proto_sync"] 29 | -------------------------------------------------------------------------------- /myceli/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | The myceli application acts as the "node" in this IPFS-for-space project. The current design allows a myceli to act as a node either on a spacecraft or in a ground station. While myceli is running it can receive and respond to any API or data protocol messaging. 4 | 5 | ## Usage 6 | 7 | Start an instance: 8 | 9 | $ cargo run --bin myceli 10 | 11 | This command will start a `myceli` instance which is listening at `127.0.0.1:8001` and will respond to any valid messages received on that address. 12 | 13 | Next, send a command. The `controller` utility is a CLI tool used to generate and send messages to `myceli` instances. For example, we can ask the running instance which blocks it currently has available: 14 | 15 | $ cargo run --bin controller -- -l 127.0.0.1:8001 request-available-blocks 16 | 17 | This will send a `RequestAvailableBlocks` message to the instance listening at `127.0.0.1:8001` and display the response when it is received. -------------------------------------------------------------------------------- /local-storage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "local-storage" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | rust-version.workspace = true 8 | 9 | [dependencies] 10 | anyhow.workspace = true 11 | cid.workspace = true 12 | bytes.workspace = true 13 | env_logger = { workspace = true, optional = true } 14 | futures.workspace = true 15 | ipfs-unixfs.workspace = true 16 | log.workspace = true 17 | rusqlite = { workspace = true, optional = true } 18 | smalog = { workspace = true, optional = true } 19 | thiserror.workspace = true 20 | tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } 21 | #tokio-util = { workspace = true, features = ["io-util"] } 22 | 23 | [features] 24 | big = ["sqlite", "good_log"] 25 | small = ["files", "small_log"] 26 | 27 | good_log = [] 28 | small_log = [] 29 | sqlite = ["dep:rusqlite"] 30 | files = [] 31 | 32 | [dev-dependencies] 33 | assert_fs.workspace = true 34 | rand.workspace = true 35 | -------------------------------------------------------------------------------- /transports/src/error.rs: -------------------------------------------------------------------------------- 1 | use derive_error::Error; 2 | use std::{ 3 | fmt::{Display, Formatter}, 4 | num::TryFromIntError, 5 | }; 6 | 7 | #[derive(Error, Debug)] 8 | pub enum TransportError { 9 | Io(std::io::Error), 10 | Cid(cid::Error), 11 | AdHoc(AdHocError), 12 | Scale(parity_scale_codec::Error), 13 | TimedOut, 14 | IntegerValueOutOfBounds(TryFromIntError), 15 | } 16 | 17 | pub type Result = std::result::Result; 18 | 19 | pub fn adhoc(msg: &str) -> TransportError { 20 | TransportError::AdHoc(AdHocError { 21 | message: msg.to_owned(), 22 | }) 23 | } 24 | pub fn adhoc_err(msg: &str) -> Result<()> { 25 | Err(adhoc(msg)) 26 | } 27 | 28 | #[derive(Debug)] 29 | pub struct AdHocError { 30 | pub message: String, 31 | } 32 | 33 | impl Display for AdHocError { 34 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 35 | f.write_str(&self.message) 36 | } 37 | } 38 | 39 | impl std::error::Error for AdHocError {} 40 | -------------------------------------------------------------------------------- /testing/local_test/kubo.case.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | source `dirname "${0}"`/setup.env 4 | 5 | for i in {0..99} 6 | do 7 | sleep ${i} 8 | if port_open 5001 9 | then 10 | break 11 | else 12 | ( ( ipfs daemon >"${o}/kubo.log" 2>&1 <&- & ) & ) & 13 | sleep $(( i + 9 )) 14 | fi 15 | done 16 | 17 | start_myceli gnd 18 | 19 | date > "${o}/known_good_path" 20 | echo 'Import a file.' 21 | controller 8765 import-file "${o}/known_good_path" 22 | 23 | echo -e '\n\n\t###\tStarting hyphae...\t###\n' 24 | start hyphae gnd hyphae.toml 25 | echo -e '\nNow waiting for sync to Kubo...\n' 26 | for i in {0..99} 27 | do 28 | export cid=`grep 'Received.response:.*FileImported' ctl/controller.log | tail -n 1 | cut -d '"' -f 4` 29 | if [ "${cid}" = '' ] 30 | then 31 | echo "CID not imported into myceli yet." 32 | elif timeout $(( 9 + i )) ipfs block get "${cid}" 33 | then 34 | break 35 | else 36 | echo "${cid} not yet in Kubo" 37 | fi 38 | done 39 | ipfs block get ${cid} 40 | ipfs dag get ${cid} | jq . 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 IPFS Shipyard 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /archived/cpp-transmit-example/src/lib.rs: -------------------------------------------------------------------------------- 1 | use messages::{ApplicationAPI, Message}; 2 | use std::ffi::{c_char, c_int, c_uchar, CStr}; 3 | 4 | /// # Safety 5 | /// 6 | /// The caller of this function needs to ensure that buffer, path, and addr are not null 7 | /// and that buffer has sufficient space for a message to be written into it. 8 | #[no_mangle] 9 | pub unsafe extern "C" fn generate_transmit_msg( 10 | buffer: *mut c_uchar, 11 | cid: *const c_char, 12 | addr: *const c_char, 13 | ) -> c_int { 14 | let cid_str = unsafe { 15 | assert!(!cid.is_null()); 16 | CStr::from_ptr(cid) 17 | }; 18 | 19 | let addr_str = unsafe { 20 | assert!(!addr.is_null()); 21 | CStr::from_ptr(addr) 22 | }; 23 | 24 | let msg = Message::ApplicationAPI(ApplicationAPI::TransmitDag { 25 | cid: cid_str.to_str().unwrap().to_owned(), 26 | target_addr: addr_str.to_str().unwrap().to_owned(), 27 | retries: 0, 28 | }); 29 | let msg_bytes = msg.to_bytes(); 30 | unsafe { 31 | std::slice::from_raw_parts_mut(buffer, msg_bytes.len()).copy_from_slice(&msg_bytes); 32 | } 33 | msg_bytes.len().try_into().unwrap() 34 | } 35 | -------------------------------------------------------------------------------- /docs/charts/sync_specialfailure.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | sequenceDiagram 3 | participant G as Ground 4 | participant V as Vehicle 5 | Note over G: Import File 6 | Note left of G: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
... 4 more (5 leaves in total) 7 | G --X V: "Push" Send CIDs to Expect (& File Name) 8 | Note right of V: Available CIDs:
(none)
Missing CIDs:
(none - the push never got here) 9 | G ->> V: Send Block (bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i) 10 | Note over V: Hash, store. 11 | Note over V: Parse as stem, passes - has 5 children. 12 | loop For each child CID 13 | Note over V: Neither available nor marked as missing, mark as missing. 14 | end 15 | Note right of V: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli

Missing CIDs:
... 4 CIDs remain ... 16 | V ->> G: Pull (5 CIDs mentioned in stem) 17 | loop Other CIDs in pull 18 | G ->> V: Send Blocks 19 | End 20 | ``` -------------------------------------------------------------------------------- /archived/car-utility/src/pack.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use cid::Cid; 3 | use futures::StreamExt; 4 | use iroh_car::{CarHeader, CarWriter}; 5 | use iroh_resolver::unixfs_builder::{File, FileBuilder}; 6 | use std::fs::File as FsFile; 7 | 8 | use std::io::Write; 9 | use std::path::PathBuf; 10 | 11 | pub async fn pack(path: &PathBuf, output: &PathBuf) -> Result<()> { 12 | let file: File = FileBuilder::new().path(path).build().await?; 13 | 14 | let _root: Option = None; 15 | let parts = { Box::pin(file.encode().await?) }; 16 | tokio::pin!(parts); 17 | 18 | let mut cids = vec![]; 19 | let mut datas = vec![]; 20 | 21 | while let Some(part) = parts.next().await { 22 | let (cid, bytes, _links) = part?.into_parts(); 23 | cids.push(cid); 24 | datas.push(bytes); 25 | } 26 | 27 | let mut buffer = vec![]; 28 | let car_header = CarHeader::new_v1(cids.clone()); 29 | let mut writer = CarWriter::new(car_header, &mut buffer); 30 | 31 | for (cid, data) in cids.into_iter().zip(datas.into_iter()) { 32 | writer.write(cid, data).await?; 33 | } 34 | 35 | writer.finish().await?; 36 | 37 | let mut f = FsFile::create(output).expect("failed to create file"); 38 | f.write_all(&buffer)?; 39 | 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /ipfs-unixfs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ipfs-unixfs" 3 | authors = ["dignifiedquire ", "Ryan Plauche "] 4 | description = "Implementation of unixfs for iroh, a trimmed copy of https://github.com/n0-computer/beetle/tree/main/iroh-unixfs" 5 | version.workspace = true 6 | edition.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [dependencies] 12 | anyhow.workspace = true 13 | async-recursion.workspace = true 14 | async-stream.workspace = true 15 | bytes.workspace = true 16 | cid.workspace = true 17 | futures.workspace = true 18 | libipld.workspace = true 19 | log.workspace = true 20 | multihash.workspace = true 21 | num_enum.workspace = true 22 | prost.workspace = true 23 | tokio = { workspace = true, features = ["fs", "io-util", "rt"] } 24 | #tokio-util = { workspace = true, features = ["io-util"] } 25 | 26 | [dev-dependencies] 27 | # criterion = { workspace = true, features = ["async_tokio"] } 28 | cid.workspace = true 29 | proptest.workspace = true 30 | rand.workspace = true 31 | tempfile.workspace = true 32 | tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread", "fs"] } 33 | tokio-util = { workspace = true, features = ["io-util"] } 34 | 35 | [build-dependencies] 36 | prost-build.workspace = true 37 | -------------------------------------------------------------------------------- /docs/charts/filedag.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | flowchart TD 3 | FC("File Content = '0123456789'") 4 | CZ["File chunking size = 2B"] 5 | FC --> CZ 6 | subgraph Chunking 7 | A["'01'"] 8 | B["'23'"] 9 | C["'45'"] 10 | D["'67'"] 11 | E["'89'"] 12 | CZ --> A 13 | CZ --> B 14 | CZ --> C 15 | CZ --> D 16 | CZ --> E 17 | end 18 | subgraph Hashing Chunks 19 | AA["'01'"] --> AH["bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q"] 20 | EP["..."] 21 | EE["'89'"] --> EH["bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli"] 22 | A --> AA 23 | B --> EP 24 | C --> EP 25 | D --> EP 26 | E --> EE 27 | end 28 | subgraph Form Stem Node 29 | LT("bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q 30 | bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa 31 | bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq 32 | bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza 33 | bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli") 34 | AH --> LT 35 | EH --> LT 36 | end 37 | subgraph Hash Stem 38 | RT["Root = bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i"] 39 | LT-->RT 40 | end 41 | ``` 42 | -------------------------------------------------------------------------------- /docs/charts/ship.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | %%{init: { "sequence": { "noteAlign": "left"} } }%% 3 | 4 | sequenceDiagram 5 | participant O as Operator 6 | participant G as Ground IPFS 7 | participant S as Space IPFS 8 | Note over G,S: Both nodes begin listening for messages on boot 9 | Note over O,S: Satellite comes into LOS 10 | O->>G: IsConnected(true) 11 | S->>S: IsConnected(true) 12 | Note over O,G: Operator commands IPFS
node to transmit a file 13 | O->>G: TransmitFile(path) 14 | Note over G,S: Transfer of blocks
1. File is chunked into blocks, each with a CID
2. Root block contains links to child CIDs
3. Blocks are transmitted over UDP-radio 15 | loop Until DAG is Complete 16 | Note over G,S: Operator asks space IPFS node to verify that all
CIDs are received. 17 | G->>S: GetMissingDagBlocks(CID): [Block]
18 | Note over G,S: If empty response, all blocks are received 19 | S->>G: MissingDagBlocks(): [CID] 20 | Note over G,S: If blocks are missing, ground retransmits 21 | G->>S: While blocks remain missing,
TransmitBlock(CID) 22 | end 23 | Note over O,S: Operator asks space IPFS to write DAG to the file system 24 | O->>S: ExportDag(CID, path) 25 | Note over G,S: Satellite goes out of range 26 | O->>G: IsConnected(false) 27 | S->>S: IsConnected(false) 28 | ``` -------------------------------------------------------------------------------- /docs/myceli-docker.md: -------------------------------------------------------------------------------- 1 | # Build and running Myceli in Docker 2 | 3 | This doc contains instructions on how to build and run `myceli` in Docker 4 | 5 | ### Building 6 | 7 | The file `myceli.Dockerfile` contains all the instructions needed by Docker to produce an image for running `myceli`. This image can be built by running the following command: 8 | 9 | $ docker build -f myceli.Dockerfile . -t myceli 10 | 11 | ### Pulling 12 | 13 | The `myceli` docker images are published to the Github Container registry and can be pulled with the following command: 14 | 15 | $ docker pull ghcr.io/ipfs-shipyard/myceli:latest 16 | 17 | ### Running 18 | 19 | We only suggest running `myceli` in Docker in Linux environments due to networking requirements. 20 | 21 | Example running of `myceli` in a standalone Docker container with default settings: 22 | 23 | $ docker run --rm -v `pwd`:/myceli/ --network host -it ghcr.io/ipfs-shipyard/myceli:latest 24 | 25 | Important pieces to point out here: 26 | 27 | - `-v pwd:/myceli/`: Mounting a local directory is necessary for `myceli`'s storage to persist 28 | - `--network host`: The container running `myceli` needs to either run on the host network, or on the same network as the other services which will be communicating with it (controller CLI, ground radio bridge). 29 | 30 | Optionally you may want to pass a config file argument in with the `CONFIG_PATH` environment variable, like this: 31 | 32 | $ docker run --rm -v `pwd`:/myceli/ --network host -e CONFIG_PATH=/myceli/config.toml -it ghcr.io/ipfs-shipyard/myceli:latest -------------------------------------------------------------------------------- /archived/car-utility/README.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | This utility is a simple way to pack individual files into CAR archives and extract packed files from a CAR archive. It currently only supports one fs file per CAR archive. 4 | 5 | ## Usage 6 | 7 | ### Packing a file 8 | 9 | $ car-utility pack /path/to/input/file /path/to/archive.car 10 | 11 | ### Unpacking a file 12 | 13 | $ car-utility unpack /path/to/archive.car /path/to/output/file 14 | 15 | ## Dependencies 16 | 17 | These system dependencies are required to build: 18 | - Rust v1.63 19 | - [Protobuf compiler](https://github.com/protocolbuffers/protobuf#protocol-compiler-installation): Download it from the [Protobuf Releases page](https://github.com/protocolbuffers/protobuf/releases) 20 | 21 | ## Cross-compiling for Raspberry Pi 22 | 23 | ### General Setup 24 | 25 | Install `cross` tool for the cross-compiling environment: 26 | 27 | $ cargo install cross --git https://github.com/cross-rs/cross 28 | 29 | Make sure `Docker` is installed and running. 30 | 31 | ### Building the app 32 | 33 | The build command for the rasberry pi target is: 34 | 35 | $ cross build --target armv7-unknown-linux-gnueabihf 36 | 37 | It is generally a good idea to run `cargo clean` between building for different targets, such as building for your local machine and then building for the raspi, otherwise the cross build may throw some weird glibc errors. 38 | 39 | The built executable will be located at `target/armv7-unknown-linux-gnueabihf/[release|debug]/car-utility` and can now be transferred to the raspberry pi for usage. -------------------------------------------------------------------------------- /smalog/src/lib.rs: -------------------------------------------------------------------------------- 1 | use chrono::Utc; 2 | use log::{Level, Metadata, Record}; 3 | use std::env; 4 | 5 | struct Smalog { 6 | lev: log::LevelFilter, 7 | } 8 | 9 | static mut LOGGER: Smalog = Smalog { 10 | lev: log::LevelFilter::Info, 11 | }; 12 | pub fn init() { 13 | let lev = match env::var("RUST_LOG") { 14 | Ok(lev_s) => level_from_str(&lev_s), 15 | Err(_) => Level::Info, 16 | }; 17 | set_level(lev.to_level_filter()); 18 | } 19 | pub fn set_level(lev: log::LevelFilter) { 20 | unsafe { 21 | LOGGER.lev = lev; 22 | log::set_logger(&LOGGER).expect("Failed to set the logger implementation!"); 23 | } 24 | log::set_max_level(lev); 25 | } 26 | 27 | impl log::Log for Smalog { 28 | fn enabled(&self, metadata: &Metadata) -> bool { 29 | metadata.level() <= self.lev 30 | } 31 | 32 | fn log(&self, record: &Record) { 33 | if self.enabled(record.metadata()) { 34 | println!( 35 | "[{} {}] {}", 36 | Utc::now().to_rfc3339(), 37 | record.level(), 38 | record.args() 39 | ); 40 | } 41 | } 42 | 43 | fn flush(&self) {} 44 | } 45 | 46 | fn level_from_str(s: &str) -> Level { 47 | use std::str::FromStr; 48 | if let Ok(l) = Level::from_str(s) { 49 | return l; 50 | } 51 | println!("ERROR! RUST_LOG set to {s} which is not recognized by smalog which only accepts a simple level name, i.e. one of: OFF; ERROR; WARN; INFO; DEBUG; TRACE. Will use INFO instead."); 52 | Level::Info 53 | } 54 | -------------------------------------------------------------------------------- /local-storage/src/util.rs: -------------------------------------------------------------------------------- 1 | use super::block::StoredBlock; 2 | use anyhow::{bail, Result}; 3 | use std::collections::BTreeMap; 4 | 5 | pub(crate) fn verify_dag(blocks: &[StoredBlock]) -> Result<()> { 6 | if blocks.is_empty() { 7 | bail!("No blocks is not a meaningful DAG"); 8 | } 9 | if blocks.len() == 1 { 10 | if blocks[0].links.is_empty() { 11 | return Ok(()); 12 | } 13 | bail!("Given only root of DAG, no children"); 14 | } else if blocks.iter().all(|b| b.links.is_empty()) { 15 | bail!("No root found"); 16 | } 17 | let mut counts: BTreeMap<&str, (u16, u16)> = BTreeMap::new(); 18 | for block in blocks { 19 | block.validate()?; 20 | counts.entry(block.cid.as_str()).or_default().0 += 1; 21 | for link in &block.links { 22 | counts.entry(link.as_str()).or_default().1 += 1; 23 | } 24 | } 25 | let mut root = ""; 26 | for (cid, (h,n)) in counts { 27 | if n > h { 28 | bail!("Missing block: {cid}"); 29 | } 30 | if h == 1 && n == 0 { 31 | if root.is_empty() { 32 | root = cid; 33 | } else if root < cid { 34 | bail!("Multiple roots! {root} {cid}"); 35 | } else { 36 | bail!("Multiple roots! {cid} {root}"); 37 | } 38 | } else if h > n { 39 | bail!("Too many copies of {cid}"); 40 | } 41 | } 42 | if root.is_empty() { 43 | bail!("DAG is actually DG (cycle detected)"); 44 | } 45 | Ok(()) 46 | } 47 | -------------------------------------------------------------------------------- /archived/cpp-transmit-example/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "api.hpp" 6 | 7 | int main(int argc, char *argv[]) 8 | { 9 | if (argc != 4) 10 | { 11 | printf("Please provide three arguments: [ipfs_addr] [dag_cid_to_transmit] [destination_addr]\n"); 12 | return -1; 13 | } 14 | printf("Sending {\"Transmit\": {\"dag\": %s, \"addr\": %s}} to %s\n", argv[2], argv[3], argv[1]); 15 | 16 | // Parse out network address 17 | std::string addr(argv[1]); 18 | int split_pos = addr.find(":"); 19 | if (split_pos == std::string::npos) 20 | { 21 | printf("Invalid address found %s", addr.c_str()); 22 | return -1; 23 | } 24 | std::string ip = addr.substr(0, split_pos); 25 | std::string port = addr.substr(split_pos + 1); 26 | 27 | // Call into Rust code to generate transmit message 28 | unsigned char msg[1024]; 29 | int len = generate_transmit_msg((unsigned char *)msg, argv[2], argv[3]); 30 | 31 | // Send transmit over udp to ipfs instance 32 | int sockfd; 33 | char buffer[1024]; 34 | struct sockaddr_in servaddr; 35 | 36 | if ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) 37 | { 38 | perror("Socket creation failed"); 39 | exit(-1); 40 | } 41 | 42 | memset(&servaddr, 0, sizeof(servaddr)); 43 | servaddr.sin_family = AF_INET; 44 | servaddr.sin_port = htons(std::stoi(port)); 45 | servaddr.sin_addr.s_addr = inet_addr(ip.c_str()); 46 | 47 | sendto(sockfd, msg, len, 0, (const struct sockaddr *)&servaddr, sizeof(servaddr)); 48 | close(sockfd); 49 | return 0; 50 | } -------------------------------------------------------------------------------- /myceli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "myceli" 3 | version.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | repository.workspace = true 7 | rust-version.workspace = true 8 | build = "build.rs" 9 | 10 | [dependencies] 11 | anyhow.workspace = true 12 | cid = { workspace = true } 13 | config.workspace = true 14 | env_logger = { workspace = true, optional = true } 15 | figment.workspace = true 16 | ipfs-unixfs.workspace = true 17 | libipld.workspace = true 18 | local-storage = { path = "../local-storage", default-features = false } 19 | log.workspace = true 20 | messages = { workspace = true, features = [] } 21 | parity-scale-codec.workspace = true 22 | serde.workspace = true 23 | smalog = { workspace = true, optional = true } 24 | tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } 25 | toml = { workspace = true, features = ["display"] } 26 | transports = { workspace = true, features = [] } 27 | 28 | [features] 29 | big = ["sqlite", "good_log", "proto_all"] 30 | small = ["files", "small_log"] 31 | proto_all = ["proto_ship", "proto_sync"] 32 | proto_ship = ["messages/proto_ship", "transports/proto_ship"] 33 | proto_sync = ["messages/proto_sync", "transports/proto_sync"] 34 | good_log = ["dep:env_logger", "local-storage/good_log"] 35 | small_log = ["dep:smalog", "local-storage/small_log"] 36 | sqlite = ["local-storage/sqlite"] 37 | files = ["local-storage/files"] 38 | 39 | [dev-dependencies] 40 | assert_fs.workspace = true 41 | blake2.workspace = true 42 | file-hashing.workspace = true 43 | futures.workspace = true 44 | ipfs-unixfs.workspace = true 45 | rand.workspace = true 46 | local-storage.workspace = true 47 | 48 | [build-dependencies] 49 | built = "0.7.0" -------------------------------------------------------------------------------- /docs/hyphae.md: -------------------------------------------------------------------------------- 1 | # Hyphae Setup 2 | 3 | Hyphae is a filament, or bridge, between Myceli and Kubo. It provides a pathway for the IPFS blocks inside of Myceli to flow into Kubo, and from there potentially into the broader public IPFS network. 4 | 5 | ## Running Hyphae 6 | 7 | After building from source, or downloading a binary, `hyphae` can be run with no additional arguments: 8 | 9 | $ hyphae 10 | 11 | Starting hyphae with no config file will run with a few default settings: 12 | - Looking for `myceli` at `127.0.0.1:8001` 13 | - Using an MTU of 512 when communicating with `myceli` 14 | - Looking for `kubo` at `127.0.0.1:5001` 15 | - Syncing data every 10 seconds 16 | 17 | Every ten seconds, `hyphae` will query `myceli` for it's available blocks, query `kubo` for it's local refs, and transfer over any blocks which exist in `myceli` and not in `kubo`. 18 | 19 | ## Configuring Hyphae 20 | 21 | `hypahe` has a few configuration options which ship with default values, or can be tuned to fit system requirements. 22 | 23 | Current configuration values and defaults are: 24 | - `myceli_address` - The network address of the `myceli` instance. Defaults to `127.0.0.1:8001`. 25 | - `kubo_address` - The network address of the `kubo` instance. Defaults to `127.0.0.1:5001`. 26 | - `sync_interval` - Duration in milliseconds between sync operations. Defaults to 10_000 ms. 27 | - `mtu` - The MTU used when chunking messages to/from `myceli` 28 | 29 | These configuration values can be set via a TOML config file which is passed as an argument when running `hyphae`. 30 | 31 | Here is an example configuration file: 32 | 33 | myceli_address="127.0.0.1:8002" 34 | kubo_address="127.0.0.1:8200" 35 | sync_interval=30_000 36 | mtu=1024 37 | 38 | If this configuration is saved to "config.toml", then we would run `hyphae config.toml` to use the config file. -------------------------------------------------------------------------------- /.idea/space.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /archived/car-utility/src/run.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use crate::pack::pack; 4 | use crate::unpack::unpack; 5 | use anyhow::Result; 6 | use clap::{Parser, Subcommand}; 7 | 8 | #[derive(Parser, Debug, Clone)] 9 | #[clap(version, long_about = None, propagate_version = true)] 10 | #[clap(about = "CAR packer/unpacker based on Iroh")] 11 | pub struct Cli { 12 | #[clap(subcommand)] 13 | command: Commands, 14 | } 15 | 16 | #[derive(Subcommand, Debug, Clone)] 17 | enum Commands { 18 | #[clap(about = "Pack a file into a CAR")] 19 | Pack { 20 | /// The path to a file to be CAR packed 21 | path: PathBuf, 22 | // The path to the CAR output file 23 | output: PathBuf, 24 | }, 25 | #[clap(about = "Unpack a CAR into a file")] 26 | Unpack { 27 | /// The path to a CAR file to be unpacked 28 | path: PathBuf, 29 | /// The path to the unpacked output file 30 | output: PathBuf, 31 | }, 32 | } 33 | 34 | impl Cli { 35 | pub async fn run(&self) -> Result<()> { 36 | self.cli_command().await?; 37 | 38 | Ok(()) 39 | } 40 | 41 | async fn cli_command(&self) -> Result<()> { 42 | match &self.command { 43 | Commands::Pack { path, output } => { 44 | if !path.is_file() { 45 | anyhow::bail!("{} is not a file", path.display()); 46 | } 47 | println!("Packing {} into {}", path.display(), output.display()); 48 | pack(path, output).await?; 49 | } 50 | Commands::Unpack { path, output } => { 51 | if !path.is_file() { 52 | anyhow::bail!("{} is not a file", path.display()); 53 | } 54 | println!("Unpacking {} into {}", path.display(), output.display()); 55 | unpack(path, output).await?; 56 | } 57 | }; 58 | 59 | Ok(()) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /watcher/src/main.rs: -------------------------------------------------------------------------------- 1 | use log::debug; 2 | use messages::Message; 3 | use notify::Watcher; 4 | use std::{fs, path::PathBuf, time::Duration}; 5 | 6 | mod handler; 7 | 8 | #[cfg(all(not(feature = "small"), not(feature = "big")))] 9 | compile_error! {"Select either big or small feature"} 10 | 11 | fn watched_dir(cfg: &config::Config) -> PathBuf { 12 | let mut result = PathBuf::new(); 13 | result.push( 14 | cfg.clone() 15 | .watched_directory 16 | .expect("Must configure watched_directory before running watcher."), 17 | ); 18 | result 19 | .canonicalize() 20 | .expect("Watched directory does not exist?") 21 | } 22 | 23 | fn main() { 24 | #[cfg(feature = "good_log")] 25 | env_logger::init(); 26 | 27 | #[cfg(feature = "small_log")] 28 | smalog::init(); 29 | 30 | let config_path = std::env::args().nth(1); 31 | let cfg = 32 | config::Config::parse(config_path, &Message::fit_size).expect("Failed to parse config"); 33 | let hndr = handler::Handler::new(&cfg).expect("Failed to configure transport & event handler"); 34 | let dir = watched_dir(&cfg); 35 | let mut watcher = notify::recommended_watcher(move |e| hndr.handle_event(e)) 36 | .expect("Unable to create directory watcher."); 37 | watcher 38 | .watch(&dir, notify::RecursiveMode::NonRecursive) 39 | .expect("Unable to watch directory."); 40 | let hndr = 41 | handler::Handler::new(&cfg).expect("Failed to configure second transport & event handler"); 42 | let mut preexisting = 43 | fs::read_dir(&dir).expect("Can't list watched_directory - does it exist?"); 44 | let mut t = 4; 45 | while dir.is_dir() { 46 | std::thread::sleep(Duration::from_secs(t)); 47 | if let Some(Ok(f)) = preexisting.next() { 48 | if f.metadata().map(|d| d.is_file()).unwrap_or(false) { 49 | debug!("Discovered path in {dir:?} - {f:?} - notifying Myceli."); 50 | hndr.send(&f.path()); 51 | } 52 | } else if let Ok(rd) = fs::read_dir(&dir) { 53 | preexisting = rd; 54 | t *= 2; 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /transports/src/chunking.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{adhoc_err, Result}; 2 | use cid::multihash::{Code, MultihashDigest}; 3 | use log::error; 4 | use messages::Message; 5 | use parity_scale_codec::{Decode, Encode}; 6 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode}; 7 | use serde::Serialize; 8 | 9 | const HASH_SIZE: usize = 16; 10 | 11 | // This MessageContainer struct is intended to be used inside of the chunkers 12 | // for verification of Message integrity during the chunking/assembly process 13 | #[derive(Clone, Debug, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)] 14 | pub(crate) struct MessageContainer { 15 | // Hash of payload 16 | hash: [u8; HASH_SIZE], 17 | // Message payload 18 | pub message: Message, 19 | } 20 | 21 | impl MessageContainer { 22 | pub fn new(message: Message) -> Self { 23 | let hash = gen_hash(&message); 24 | // This hash uses a 128-bit Blake2s-128 hash, rather than the common sha2-256 to save on overhead size 25 | MessageContainer { hash, message } 26 | } 27 | 28 | pub fn to_bytes(&self) -> Vec { 29 | self.encode() 30 | } 31 | 32 | pub fn verify_cid(&self) -> Result { 33 | let regenerated_hash = gen_hash(&self.message); 34 | if regenerated_hash == self.hash { 35 | Ok(true) 36 | } else { 37 | error!( 38 | "Hash mismatch: provided={:?} deduced={:?}", 39 | self.hash, regenerated_hash 40 | ); 41 | Ok(false) 42 | } 43 | } 44 | 45 | pub fn from_bytes(bytes: &mut &[u8]) -> Result { 46 | let container: MessageContainer = MessageContainer::decode(bytes)?; 47 | if !container.verify_cid()? { 48 | adhoc_err("Message container failed CID verification")?; 49 | } 50 | Ok(container) 51 | } 52 | } 53 | 54 | fn gen_hash(msg: &Message) -> [u8; HASH_SIZE] { 55 | let bytes = msg.to_bytes(); 56 | Code::Blake2s128 57 | .digest(&bytes) 58 | .digest() 59 | .try_into() 60 | .expect("Hash is wrong size (should be constant since hash type is not changing)") 61 | } 62 | -------------------------------------------------------------------------------- /ipfs-unixfs/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod balanced_tree; 2 | pub mod builder; 3 | pub mod chunker; 4 | pub mod codecs; 5 | mod types; 6 | pub mod unixfs; 7 | 8 | pub use crate::types::{Block, Link, LinkRef, Links, LoadedCid, PbLinks, Source}; 9 | 10 | use crate::codecs::Codec; 11 | use anyhow::{bail, Context as _, Result}; 12 | use cid::Cid; 13 | use libipld::{prelude::Codec as _, Ipld, IpldCodec}; 14 | 15 | /// Extract links from the given content. 16 | /// 17 | /// Links will be returned as a vec with meaningful order 18 | pub fn parse_links(cid: &Cid, bytes: &[u8]) -> Result> { 19 | let codec = Codec::try_from(cid.codec()).context("unknown codec")?; 20 | let mut cids = vec![]; 21 | let codec = match codec { 22 | Codec::DagCbor => IpldCodec::DagCbor, 23 | Codec::DagPb => IpldCodec::DagPb, 24 | Codec::DagJson => IpldCodec::DagJson, 25 | Codec::Raw => IpldCodec::Raw, 26 | _ => bail!("unsupported codec {:?}", codec), 27 | }; 28 | codec.references::(bytes, &mut cids)?; 29 | Ok(cids) 30 | } 31 | 32 | #[cfg(test)] 33 | pub mod tests { 34 | use super::*; 35 | 36 | #[test] 37 | pub fn file_with_repeat_chunks() { 38 | let bytes: &[u8] = &[ 39 | 0x12, 0x2a, 0x0a, 0x24, 0x01, 0x55, 0x12, 0x20, 0x8f, 0x43, 0x43, 0x46, 0x64, 0x8f, 40 | 0x6b, 0x96, 0xdf, 0x89, 0xdd, 0xa9, 0x01, 0xc5, 0x17, 0x6b, 0x10, 0xa6, 0xd8, 0x39, 41 | 0x61, 0xdd, 0x3c, 0x1a, 0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4, 0x12, 0x00, 42 | 0x18, 0x02, 0x12, 0x2a, 0x0a, 0x24, 0x01, 0x55, 0x12, 0x20, 0x8f, 0x43, 0x43, 0x46, 43 | 0x64, 0x8f, 0x6b, 0x96, 0xdf, 0x89, 0xdd, 0xa9, 0x01, 0xc5, 0x17, 0x6b, 0x10, 0xa6, 44 | 0xd8, 0x39, 0x61, 0xdd, 0x3c, 0x1a, 0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4, 45 | 0x12, 0x00, 0x18, 0x02, 0x0a, 0x08, 0x08, 0x02, 0x18, 0x04, 0x20, 0x02, 0x20, 0x02, 46 | ]; 47 | let cid: Cid = "bafybeiegfwauaenc4pa7jqfssar4i4pafsul4g62e3av64fwir5uodv7q4" 48 | .try_into() 49 | .unwrap(); 50 | let actual = parse_links(&cid, bytes).unwrap(); 51 | let child: Cid = "bafkreiepinbumzepnoln7co5vea4kf3lcctnqolb3u6bvsellgznymt2uq" 52 | .try_into() 53 | .unwrap(); 54 | let expected = [child.clone(), child.clone()]; 55 | assert_eq!(actual, expected); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /testing/udp_forward/src/main.rs: -------------------------------------------------------------------------------- 1 | use chrono::{offset::Utc, DateTime}; 2 | use rand::Rng; 3 | use std::{env, net::*, str, time::SystemTime}; 4 | 5 | mod err; 6 | 7 | fn main() -> err::Result<()> { 8 | let mut arg_it = env::args(); 9 | arg_it.next(); 10 | let listen = arg_it.next().expect("First arg= to listen to."); 11 | let dest_a = arg_it 12 | .next() 13 | .expect("Second arg= to forward packets to."); 14 | let dest_b = arg_it 15 | .next() 16 | .expect("Third arg= to forward packets to."); 17 | let rate: usize = arg_it 18 | .next() 19 | .map(|s| str::parse(&s)) 20 | .unwrap_or(Ok(usize::MAX))?; 21 | let mut buf = [0u8; u16::MAX as usize]; 22 | let socket = UdpSocket::bind(listen.clone())?; 23 | let mut good = 0; 24 | let mut bad = 0; 25 | let mut rng = rand::thread_rng(); 26 | loop { 27 | match socket.recv_from(&mut buf) { 28 | Ok((len, sender)) => { 29 | let to = if format!("{sender:?}") == dest_a { 30 | dest_b.clone() 31 | } else { 32 | dest_a.clone() 33 | }; 34 | if bad >= 1 && good >= rate { 35 | bad -= 1; 36 | good -= rate; 37 | } 38 | let bad_odds = good / rate / 2 + 1; 39 | let good_odds = bad / 2 + rate; 40 | let n = bad_odds + good_odds; 41 | let i = rng.gen_range(0..n); 42 | if i < bad_odds { 43 | let now: DateTime = SystemTime::now().into(); 44 | println!( 45 | "Dropping {}th packet (from {sender:?}). Excess: good={good} bad={bad} @ {now}", 46 | rate +1 47 | ); 48 | bad += 1; 49 | } else { 50 | match socket.send_to(&buf[0..len], to) { 51 | Ok(_) => { 52 | good += 1; 53 | print!("."); 54 | } 55 | Err(e) => println!("Error sending: {e:?}"), 56 | } 57 | } 58 | } 59 | Err(e) => println!("Error receiving: {e:?}"), 60 | } 61 | } 62 | // Ok(()) 63 | } 64 | -------------------------------------------------------------------------------- /local-dev-environment/desktop/rfm69-driver/driver/driver.ino: -------------------------------------------------------------------------------- 1 | // This is borrowed heavily from the rf69 rx and tx demo code 2 | // -*- mode: C++ -*- 3 | 4 | #include 5 | #include 6 | 7 | /************ Radio Setup ***************/ 8 | 9 | // Change to 434.0 or other frequency, must match RX's freq! 10 | #define RF69_FREQ 915.0 11 | 12 | // Feather 32u4 w/Radio pin defs 13 | #define RFM69_CS 8 14 | #define RFM69_INT 7 15 | #define RFM69_RST 4 16 | #define LED 13 17 | 18 | // Singleton instance of the radio driver 19 | RH_RF69 rf69(RFM69_CS, RFM69_INT); 20 | 21 | void setup() 22 | { 23 | Serial.begin(115200); 24 | 25 | pinMode(LED, OUTPUT); 26 | pinMode(RFM69_RST, OUTPUT); 27 | digitalWrite(RFM69_RST, LOW); 28 | 29 | // manual reset 30 | digitalWrite(RFM69_RST, HIGH); 31 | delay(10); 32 | digitalWrite(RFM69_RST, LOW); 33 | delay(10); 34 | 35 | if (!rf69.init()) { 36 | Serial.println("RFM69 radio init failed"); 37 | while (1); 38 | } 39 | // Defaults after init are 434.0MHz, modulation GFSK_Rb250Fd250, +13dbM (for low power module) 40 | // No encryption 41 | if (!rf69.setFrequency(RF69_FREQ)) { 42 | Serial.println("setFrequency failed"); 43 | } 44 | 45 | // If you are using a high power RF69 eg RFM69HW, you *must* set a Tx power with the 46 | // ishighpowermodule flag set like this: 47 | rf69.setTxPower(20, true); // range from 14-20 for power, 2nd arg must be true for 69HCW 48 | 49 | pinMode(LED, OUTPUT); 50 | } 51 | 52 | 53 | void loop() { 54 | delay(10); // Wait 10ms between cycles 55 | 56 | uint8_t buf[RH_RF69_MAX_MESSAGE_LEN]; 57 | uint8_t len = sizeof(buf); 58 | 59 | int availableBytes = Serial.available(); 60 | if (availableBytes > 0) { 61 | int len = Serial.readBytes(buf, availableBytes); 62 | buf[len] = 0; 63 | rf69.send((uint8_t*)buf, len); 64 | rf69.waitPacketSent(); 65 | Blink(LED, 30, 2); 66 | } 67 | 68 | if (rf69.waitAvailableTimeout(10)) { 69 | // Should be a reply message for us now 70 | if (rf69.recv(buf, &len)) { 71 | buf[len] = 0; 72 | Serial.write(buf, len); 73 | Blink(LED, 70, 2); 74 | } 75 | } 76 | } 77 | 78 | void Blink(byte PIN, byte DELAY_MS, byte loops) { 79 | for (byte i=0; i, 9 | pub data: Vec, 10 | pub links: Vec>, 11 | pub filename: Option, 12 | } 13 | 14 | impl fmt::Debug for TransmissionBlock { 15 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 16 | let cid_str = Cid::try_from(self.cid.clone()) 17 | .map(|c| c.to_string()) 18 | .unwrap(); 19 | 20 | f.debug_struct("TransmissionBlock") 21 | .field("cid", &cid_str) 22 | .field("data", &self.data.len()) 23 | .field("links", &self.links.len()) 24 | .finish() 25 | } 26 | } 27 | 28 | #[derive(Clone, Debug, ParityDecode, ParityEncode, Serialize, Eq, PartialEq)] 29 | pub enum DataProtocol { 30 | // Transmission message for individual block 31 | Block(TransmissionBlock), 32 | // Protocol level request for transmission of block 33 | RequestTransmitBlock { 34 | cid: String, 35 | target_addr: String, 36 | }, 37 | // This message is used inside of the protocol to initiate the re-requesting of missing dag blocks 38 | // in order to continue transmitting the dag 39 | RetryDagSession { 40 | cid: String, 41 | }, 42 | // Requests windowed transmission of a dag 43 | RequestTransmitDag { 44 | cid: String, 45 | target_addr: String, 46 | retries: u8, 47 | }, 48 | // Resumes the transmission of a dag which may have run out of retries or 49 | // paused due to connectivity lost 50 | ResumeTransmitDag { 51 | cid: String, 52 | }, 53 | // Resumes the transmission of all dags which may be paused 54 | ResumeTransmitAllDags, 55 | // Message to request list of blocks missing from list of CIDs sent 56 | RequestMissingDagWindowBlocks { 57 | cid: String, 58 | blocks: Vec, 59 | }, 60 | // Message to request list of blocks missing from CID sent 61 | RequestMissingDagBlocks { 62 | cid: String, 63 | }, 64 | // Notifies which dag blocks are missing in current window 65 | MissingDagBlocks { 66 | cid: String, 67 | blocks: Vec, 68 | }, 69 | } 70 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "config", 4 | "controller", 5 | # "hyphae", 6 | "ipfs-unixfs", 7 | "local-dev-environment/desktop/rfm69-service", 8 | "local-storage", 9 | "messages", 10 | "myceli", 11 | "smalog", 12 | "testing/udp_forward", 13 | "transports", 14 | "watcher" 15 | ] 16 | 17 | [workspace.package] 18 | version = "0.7.0" 19 | edition = "2021" 20 | license = "Apache-2.0/MIT" 21 | rust-version = "1.70.0" 22 | repository = "https://github.com/ipfs-shipyard/space" 23 | 24 | [workspace.dependencies] 25 | # External deps 26 | anyhow = { version = "1.0.71", default-features = false, features = ["backtrace", "std"] } 27 | assert_fs = "1.0.13" 28 | async-recursion = "1.0.4" 29 | async-stream = "0.3.3" 30 | blake2 = { version = "0.10.6", default-features = false } 31 | bytes = "1.1" 32 | cid = { version = "0.9", default-features = false, features = ["scale-codec"] } 33 | clap = { version = "4.0.15", features = ["derive"] } 34 | derive-error = "0.0.5" 35 | env_logger = "0.10.0" 36 | figment = { version = "0.10", features = ["toml"] } 37 | file-hashing = "0.1.1" 38 | futures = "0.3.24" 39 | libipld = { version = "0.15", default-features = false, features = ["dag-pb", "dag-cbor", "dag-json"] } 40 | log = "0.4.19" 41 | multihash = "0.18.1" 42 | num_enum = "0.5.7" 43 | parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive", "std"] } 44 | parity-scale-codec-derive = "3.1.3" 45 | proptest = "1.1.0" 46 | prost = "0.11" 47 | prost-build = "0.11.1" 48 | rand = "0.8.5" 49 | reqwest = { version = "0.11.10", default-features = false } 50 | rusqlite = { version = "0.28.0", features = ["bundled"] } 51 | serde = "1.0.163" 52 | serde_derive = "1.0.163" 53 | serde_json = "1.0.87" 54 | tempfile = "3.3" 55 | thiserror = "1.0.40" 56 | tokio = "1.28.1" 57 | tokio-serial = "5.4" 58 | tokio-util = "0.7.8" 59 | toml = { version = "0.7.3", default-features = false } 60 | 61 | # Internal deps 62 | config = { path = "config" } 63 | ipfs-unixfs = { path = "ipfs-unixfs" } 64 | local-storage = { path = "local-storage" } 65 | messages = { path = "messages" } 66 | smalog = { path = "smalog" } 67 | transports = { path = "transports" } 68 | 69 | [profile.small] 70 | inherits = "release" 71 | lto = "fat" 72 | 73 | [profile.small.package.myceli] 74 | opt-level = "s" 75 | debug = false 76 | strip = "symbols" 77 | debug-assertions = false 78 | overflow-checks = false 79 | incremental = false 80 | -------------------------------------------------------------------------------- /local-storage/src/provider.rs: -------------------------------------------------------------------------------- 1 | use crate::block::StoredBlock; 2 | use anyhow::Result; 3 | use cid::Cid; 4 | use std::sync::{Arc, Mutex}; 5 | 6 | #[allow(unused_imports)] 7 | use crate::null_provider::NullStorageProvider; 8 | 9 | #[cfg(feature = "sqlite")] 10 | use crate::sql_provider::SqliteStorageProvider; 11 | 12 | #[cfg(feature = "files")] 13 | use crate::file_provider::FileStorageProvider; 14 | 15 | pub type Handle = Arc>; 16 | 17 | pub trait StorageProvider { 18 | // Import a stored block 19 | fn import_block(&mut self, block: &StoredBlock) -> Result<()>; 20 | // Requests a list of CIDs currently available in storage 21 | fn get_available_cids(&self) -> Result>; 22 | // Requests the block associated with the given CID 23 | fn get_block_by_cid(&self, cid: &str) -> Result; 24 | // Requests the links associated with the given CID 25 | fn get_links_by_cid(&self, cid: &str) -> Result>; 26 | fn list_available_dags(&self) -> Result>; 27 | // Attaches filename to dag 28 | fn name_dag(&self, cid: &str, file_name: &str) -> Result<()>; 29 | fn get_name(&self, cid: &str) -> Result; 30 | fn get_missing_cid_blocks(&self, cid: &str) -> Result>; 31 | fn get_dag_blocks_by_window( 32 | &self, 33 | cid: &str, 34 | offset: u32, 35 | window_size: u32, 36 | ) -> Result>; 37 | fn get_all_dag_cids( 38 | &self, 39 | cid: &str, 40 | offset: Option, 41 | window_size: Option, 42 | ) -> Result>; 43 | fn get_all_dag_blocks(&self, cid: &str) -> Result>; 44 | fn incremental_gc(&mut self) -> bool; 45 | fn has_cid(&self, cid: &Cid) -> bool; 46 | fn ack_cid(&self, cid: &Cid); 47 | fn get_dangling_cids(&self) -> Result>; 48 | } 49 | 50 | pub fn default_storage_provider(_storage_path: &str, _high_disk_usage: u64) -> Result { 51 | #[cfg(all(not(feature = "files"), not(feature = "sqlite")))] 52 | let provider = NullStorageProvider::default(); 53 | #[cfg(all(feature = "files", not(feature = "sqlite")))] 54 | let provider = FileStorageProvider::new(_storage_path, _high_disk_usage)?; 55 | #[cfg(feature = "sqlite")] 56 | let provider = SqliteStorageProvider::new(_storage_path)?; 57 | Ok(Arc::new(Mutex::new(provider))) 58 | } 59 | -------------------------------------------------------------------------------- /local-storage/src/null_provider.rs: -------------------------------------------------------------------------------- 1 | use crate::block::StoredBlock; 2 | use crate::provider::StorageProvider; 3 | use anyhow::bail; 4 | use cid::Cid; 5 | 6 | #[derive(Default)] 7 | pub(crate) struct NullStorageProvider {} 8 | 9 | impl StorageProvider for NullStorageProvider { 10 | fn import_block(&mut self, _block: &StoredBlock) -> anyhow::Result<()> { 11 | bail!("NullStorageProvider does not implement anything") 12 | } 13 | fn get_dangling_cids(&self) -> anyhow::Result> { 14 | Ok(vec![]) 15 | } 16 | fn get_available_cids(&self) -> anyhow::Result> { 17 | bail!("NullStorageProvider does not implement anything") 18 | } 19 | fn get_name(&self, _cid: &str) -> anyhow::Result { 20 | bail!("nope") 21 | } 22 | fn get_block_by_cid(&self, _cid: &str) -> anyhow::Result { 23 | bail!("NullStorageProvider does not implement anything") 24 | } 25 | 26 | fn get_links_by_cid(&self, _cid: &str) -> anyhow::Result> { 27 | bail!("NullStorageProvider does not implement anything") 28 | } 29 | 30 | fn list_available_dags(&self) -> anyhow::Result> { 31 | bail!("NullStorageProvider does not implement anything") 32 | } 33 | 34 | fn name_dag(&self, _cid: &str, _file_name: &str) -> anyhow::Result<()> { 35 | bail!("NullStorageProvider does not implement anything") 36 | } 37 | 38 | fn get_missing_cid_blocks(&self, _cid: &str) -> anyhow::Result> { 39 | bail!("NullStorageProvider does not implement anything") 40 | } 41 | 42 | fn get_dag_blocks_by_window( 43 | &self, 44 | _cid: &str, 45 | _offset: u32, 46 | _window_size: u32, 47 | ) -> anyhow::Result> { 48 | bail!("NullStorageProvider does not implement anything") 49 | } 50 | 51 | fn get_all_dag_cids( 52 | &self, 53 | _cid: &str, 54 | _offset: Option, 55 | _window_size: Option, 56 | ) -> anyhow::Result> { 57 | bail!("NullStorageProvider does not implement anything") 58 | } 59 | 60 | fn get_all_dag_blocks(&self, _cid: &str) -> anyhow::Result> { 61 | bail!("NullStorageProvider does not implement anything") 62 | } 63 | 64 | fn incremental_gc(&mut self) -> bool { 65 | false 66 | } 67 | 68 | fn has_cid(&self, _cid: &Cid) -> bool { 69 | false 70 | } 71 | 72 | fn ack_cid(&self, _cid: &Cid) {} 73 | } 74 | -------------------------------------------------------------------------------- /docs/charts/sync.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | sequenceDiagram 3 | participant G as Ground 4 | participant V as Vehicle 5 | Note over G: Import File 6 | Note left of G: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli 7 | G ->> V: "Push" Send CIDs to Expect (& File Name) 8 | Note right of V: Available CIDs:

Missing CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli 9 | G ->> V: Send Block 10 | Note over V: Hash, store. 11 | Note over V: Parse as stem (fails - it's a leaf). 12 | Note right of V: Available CIDs:
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli
Missing CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza 13 | G --X V: Attempt to send blocks, packets dropped 14 | V ->> G: "Pull" Send CIDs for blocks to send/re-send 15 | G ->> V: Send Block (bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i) 16 | Note over V: Hash, store. 17 | Note over V: Parse as stem, passes - has 5 children. 18 | loop For each child CID 19 | Note over V: If already available, ignore. 20 | Note over V: Otherwise add to 'missing' & "Pull" 21 | end 22 | Note right of V: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli

Missing CIDs:
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza 23 | loop Other CIDs in pull 24 | G ->> V: Send Blocks 25 | End 26 | ``` 27 | -------------------------------------------------------------------------------- /watcher/src/handler.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, error, info, trace}; 2 | use messages::{ApplicationAPI, Message}; 3 | use notify::{event::ModifyKind, Event, EventKind}; 4 | use std::path::Path; 5 | use std::time::{Duration, SystemTime}; 6 | use transports::{Transport, UdpTransport}; 7 | 8 | pub(crate) struct Handler { 9 | trx: UdpTransport, 10 | target_addr: String, 11 | } 12 | 13 | impl Handler { 14 | pub fn new(cfg: &config::Config) -> Result { 15 | let trx = UdpTransport::new("0.0.0.0:0", cfg.mtu, cfg.chunk_transmit_throttle)?; 16 | let target_addr = cfg.listen_address.clone(); 17 | Ok(Self { trx, target_addr }) 18 | } 19 | 20 | pub fn handle_event(&self, event: notify::Result) { 21 | trace!("handle_event({:?})", &event); 22 | match event { 23 | Err(err) => { 24 | error!("FileSystem error: {:?}", err); 25 | } 26 | Ok(ev) => match ev.kind { 27 | EventKind::Modify(ModifyKind::Data(_)) => { 28 | for p in ev.paths { 29 | //Some of these events can occur while the file is still being modified 30 | self.wait_for_modification_to_stop(&p).ok(); 31 | info!("File modified, import: {:?}", &p); 32 | self.send(&p); 33 | } 34 | } 35 | _ => debug!("Ignoring FileSystem event: {:?}", &ev), 36 | }, 37 | } 38 | } 39 | pub fn send(&self, path: &Path) { 40 | let path = if let Some(p) = path.as_os_str().to_str() { 41 | p.to_owned() 42 | } else { 43 | error!("Path {:?} can't be turned into string?!", &path); 44 | return; 45 | }; 46 | let m = ApplicationAPI::ImportFile { path }; 47 | let m = Message::ApplicationAPI(m); 48 | match self.trx.send(m, &self.target_addr) { 49 | Ok(()) => debug!("Sent message to {}", &self.target_addr), 50 | Err(e) => error!("Error sending: {:?}", &e), 51 | } 52 | } 53 | fn wait_for_modification_to_stop(&self, p: &Path) -> std::io::Result<()> { 54 | const MIN_AGE: Duration = Duration::from_secs(1); 55 | const MAX_SLEEP: Duration = Duration::from_millis(1234); 56 | loop { 57 | let mdt = p.metadata()?.modified()?; 58 | let now = SystemTime::now(); 59 | if mdt + MIN_AGE < now { 60 | return Ok(()); 61 | } 62 | let elapsed = now.duration_since(mdt).unwrap_or_default(); 63 | std::thread::sleep(MAX_SLEEP - elapsed); 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /testing/local_test/timeout.killer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | cd `dirname "${0}"` 4 | sleep $# 5 | parent=$(realpath /proc/${PPID}/exe) 6 | if [ $# -lt 9 ] && [ "${1}" != sed ] && [ ${PPID} != 1 ] && ! grep -q systemd <<< "${parent}" 7 | then 8 | echo daemonize killer "${@}" 9 | ( ./timeout.killer.sh ${PPID} "${@}" <&- 2>&1 & ) & 10 | exit 11 | elif [ "${1}" != sed ] 12 | then 13 | ( ./timeout.killer.sh sed "${@}" <&- 2>&1 ) 2>&1 | sed 's,^,KILLER: ,' 14 | exit 15 | fi 16 | mod=`stat --format=%Y timeout.killer.sh` 17 | 18 | conflict() { 19 | if ! [ -f timeout.killer.pid ] 20 | then 21 | echo "$$" $(( ++t )) > timeout.killer.pid 22 | elif read pid ot < timeout.killer.pid 23 | then 24 | if [ "${pid}" = $$ ] 25 | then 26 | return 1 27 | elif [ -d "/proc/${pid}/" ] && [ ${ot} -ge ${t} ] 28 | then 29 | echo 'Older timeout still running' 30 | echo $$ 0 > timeout.killer.pid 31 | exit 0 32 | else 33 | rm -v timeout.killer.pid 34 | rm -v "${o}"/running.* || true 35 | fi 36 | else 37 | rm -v timeout.killer.pid 38 | fi 39 | return 0 40 | } 41 | 42 | while sleep $(( t += 9 )) 43 | do 44 | if [ ${mod} -lt `stat --format=%Y timeout.killer.sh` ] 45 | then 46 | sleep $(( ++t )) 47 | ls -lth timeout.killer.sh 48 | echo -n "${mod}" vs ' ' 49 | stat --format=%Y timeout.killer.sh 50 | echo 'timeout.killer.sh modified, recurse' 51 | sleep $(( ++t )) 52 | ./timeout.killer.sh 53 | exit 54 | fi 55 | if conflict 56 | then 57 | sleep $(( ++t )) 58 | continue 59 | fi 60 | if ! [ -f "${o}"/running.scripts.now ] 61 | then 62 | fuser *.case.sh ../../???/{myceli,controller,hyphae,watcher} > "${o}"/running.scripts.now 2>/dev/null 63 | rm -v "${o}"/running.tree.* 2>/dev/null || true 64 | elif ! diff "${o}"/running.scripts.{now,old} 2>/dev/null 65 | then 66 | mv -v "${o}"/running.scripts.{now,old} 67 | elif [ "${o}"/running.scripts.old -nt timeout.killer.pid ] 68 | then 69 | rm -v timeout.killer.pid 70 | elif ! [ -f "${o}"/running.tree.new ] 71 | then 72 | for pid in `cat "${o}"/running.scripts.old` 73 | do 74 | pstree --arguments "${pid}" | tr -d '[:digit:]' || true 75 | sleep $(( ++t )) 76 | done > "${o}"/running.tree.new 77 | elif ! diff "${o}"/running.tree.{new,old} 78 | then 79 | mv -v "${o}"/running.tree.{new,old} 80 | elif [ "${o}"/running.tree.old -nt timeout.killer.pid ] 81 | then 82 | rm -v timeout.killer.pid "${o}"/running.*.new 83 | elif read apid others < "${o}"/running.scripts.old 84 | then 85 | echo -e "\n \t # \t WARNING \t # " 86 | echo -e "\n \t # \t TIMING OUT PID ${apid} \t #" 87 | ps -f | grep "${apid}" 88 | kill "${apid}" 89 | rm -v "${o}"/running.scripts.old 90 | else 91 | break 92 | fi 93 | done 94 | -------------------------------------------------------------------------------- /local-dev-environment/raspberry-pi/RFM69HCW/service.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import time 3 | import adafruit_rfm69 4 | import busio 5 | from digitalio import DigitalInOut, Direction, Pull 6 | import board 7 | import threading 8 | import queue 9 | import argparse 10 | import ipaddress 11 | 12 | TIMEOUT = 0.1 13 | DELAY = 0.25 14 | 15 | radio_lock = threading.Lock() 16 | radio_write_queue = queue.Queue() 17 | 18 | def str_to_addr(addr_str): 19 | parts = addr_str.split(':') 20 | return (str(parts[0]), int(parts[1])) 21 | 22 | 23 | def radio_thread_fn(radio_handle): 24 | while True: 25 | if not radio_write_queue.empty(): 26 | data = radio_write_queue.get() 27 | print(f'Found data {len(data)} for radio to write, sending') 28 | radio_lock.acquire() 29 | radio_handle.send(bytes(data)) 30 | radio_lock.release() 31 | time.sleep(DELAY) 32 | 33 | 34 | def main_fn(): 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('uplink_address') 37 | parser.add_argument('downlink_address') 38 | args = parser.parse_args() 39 | 40 | uplink_addr = str_to_addr(args.uplink_address) 41 | downlink_addr = str_to_addr(args.downlink_address) 42 | 43 | # Configure UDP socket 44 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 45 | sock.bind(uplink_addr) 46 | sock.settimeout(TIMEOUT) 47 | 48 | # Configure Radio Interface 49 | CS = DigitalInOut(board.CE1) 50 | RESET = DigitalInOut(board.D25) 51 | spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO) 52 | radio = adafruit_rfm69.RFM69(spi, CS, RESET, 915.0) 53 | 54 | radio_thread = threading.Thread(target=radio_thread_fn, args=(radio,)) 55 | radio_thread.start() 56 | 57 | print(f'Listening for UDP traffic on {args.uplink_address}') 58 | print(f'Downlinking radio data to {args.downlink_address}') 59 | 60 | while True: 61 | try: 62 | # First check if we have any incoming UDP traffic that needs sending out 63 | udp_data = sock.recv(1024) 64 | # If we received any UDP data, then send over radio interface 65 | if udp_data != None: 66 | print(f'Got UDP data {len(udp_data)}, queueing up') 67 | radio_write_queue.put(udp_data) 68 | except (Exception): 69 | pass 70 | 71 | # Now we check radio interface for any incoming packets 72 | radio_lock.acquire() 73 | radio_data = radio.receive() 74 | radio_lock.release() 75 | # If we received a radio packet, then pass along UDP interface 76 | if radio_data != None: 77 | print(f'Got radio data {len(radio_data)}, sending along') 78 | sock.sendto(radio_data, downlink_addr) 79 | 80 | time.sleep(0.01) 81 | 82 | 83 | if __name__ == "__main__": 84 | main_fn() 85 | -------------------------------------------------------------------------------- /myceli/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use config::Config; 3 | use log::{info, warn}; 4 | use messages::Message; 5 | use myceli::listener::Listener; 6 | use std::{net::ToSocketAddrs, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; 7 | use transports::UdpTransport; 8 | 9 | #[cfg(all(not(feature = "sqlite"), not(feature = "files")))] 10 | compile_error! {"Myceli built without a local storage implementation will not function. Select a feature, recommended: either big or small"} 11 | 12 | #[cfg(all(not(feature = "proto_ship"), not(feature = "proto_sync")))] 13 | compile_error! {"Select a protocol feature, e.g. proto_all, proto_sync, or proto_ship"} 14 | 15 | fn main() -> Result<()> { 16 | #[cfg(feature = "good_log")] 17 | env_logger::init(); 18 | #[cfg(feature = "small_log")] 19 | smalog::init(); 20 | 21 | #[cfg(feature = "proto_sync")] 22 | info!("Sync Protocol enabled"); 23 | #[cfg(feature = "proto_ship")] 24 | info!("Ship(per) Protocol enabled"); 25 | let config_path = std::env::args() 26 | .skip(1) 27 | .find(|a| PathBuf::from_str(a).map(|p| p.is_file()).unwrap_or(false)); 28 | let cfg = Config::parse(config_path, &Message::fit_size).expect("Failed to parse config"); 29 | if std::env::args().any(|a| a == "--show-config") { 30 | println!("{}", toml::to_string(&cfg).unwrap()); 31 | return Ok(()); 32 | } 33 | 34 | let mut resolved_listen_addr = cfg 35 | .listen_address 36 | .to_socket_addrs() 37 | .expect("Unable to resolve socket address"); 38 | let resolved_listen_addr = resolved_listen_addr 39 | .next() 40 | .expect("Unable to resolve socket addr"); 41 | 42 | std::fs::create_dir_all(&cfg.storage_path).expect("Failed to create storage dir"); 43 | 44 | let db_path = cfg.storage_path.clone(); 45 | let disk_bytes = cfg.disk_usage * 1024; 46 | let timeout = Duration::from_millis(cfg.chatter_ms.clamp(10, 60 * 60 * 1000).into()); 47 | let mut udp_transport = 48 | UdpTransport::new(&cfg.listen_address, cfg.mtu, cfg.chunk_transmit_throttle) 49 | .expect("Failed to create udp transport"); 50 | udp_transport 51 | .set_read_timeout(Some(timeout)) 52 | .expect("Failed to set timeout"); 53 | println!("pid={}", std::process::id()); 54 | let mut listener = Listener::new( 55 | &resolved_listen_addr, 56 | &db_path, 57 | Arc::new(udp_transport), 58 | cfg.block_size 59 | .expect("Block size default should've been calculated."), 60 | cfg.radio_address, 61 | disk_bytes, 62 | cfg.mtu, 63 | ) 64 | .expect("Listener creation failed"); 65 | listener 66 | .start( 67 | cfg.retry_timeout_duration, 68 | cfg.window_size, 69 | cfg.shipper_throttle_packet_delay_ms, 70 | ) 71 | .expect("Error encountered in listener operation"); 72 | println!("Exiting"); 73 | warn!("Exiting"); 74 | Ok(()) 75 | } 76 | -------------------------------------------------------------------------------- /messages/src/sync.rs: -------------------------------------------------------------------------------- 1 | use crate::cid_list; 2 | use cid::multihash; 3 | use cid::multihash::Hasher; 4 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode}; 5 | use serde::Serialize; 6 | use std::fmt::{Debug, Formatter}; 7 | 8 | const HASH_SIZE: usize = 16; 9 | pub const PUSH_OVERHEAD: usize = HASH_SIZE + 1; 10 | 11 | pub type HashCheck = [u8; HASH_SIZE]; 12 | 13 | #[derive(Clone, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)] 14 | pub enum SyncMessage { 15 | Push(PushMessage), //I have these CIDs, you may pull them. 16 | Pull(cid_list::CompactList), //I do not have these CIDs, maybe you could send their blocks to me 17 | Ack(cid_list::CompactList), //I *also* have these CIDs, stop pushing them 18 | Block(Vec), //Here's the data for a block. 19 | } 20 | 21 | impl SyncMessage { 22 | pub fn name(&self) -> &'static str { 23 | match &self { 24 | Self::Push(_) => "Push", 25 | Self::Pull(_) => "Pull", 26 | Self::Ack(_) => "Ack", 27 | Self::Block(_) => "Block", 28 | } 29 | } 30 | } 31 | 32 | impl Debug for SyncMessage { 33 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 34 | match &self { 35 | Self::Push(x) => write!(f, "Push({x:?})"), 36 | Self::Pull(x) => write!(f, "Pull({x:?})"), 37 | Self::Ack(x) => write!(f, "Ack({x:?})"), 38 | Self::Block(x) => write!(f, "Block({}B)", x.len()), 39 | } 40 | } 41 | } 42 | 43 | #[derive(Clone, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)] 44 | pub struct PushMessage { 45 | pub first_cid_name: String, 46 | pub cids: cid_list::CompactList, 47 | //A corrupted pull has a modest negative impact, but a corrupted push can begin a search for a 48 | // CID that points to something which may never have actually existed in the first place. 49 | // Adding this hashing of the CIDs to detect corruption. 50 | hash: HashCheck, 51 | } 52 | impl PushMessage { 53 | pub fn new(cids: cid_list::CompactList, first_cid_name: String) -> Self { 54 | let hash = Self::do_hash(&cids); 55 | Self { 56 | first_cid_name, 57 | cids, 58 | hash, 59 | } 60 | } 61 | pub fn check(&self) -> bool { 62 | self.hash == Self::do_hash(&self.cids) 63 | } 64 | fn do_hash(cids: &cid_list::CompactList) -> HashCheck { 65 | let mut hasher = multihash::Blake2s128::default(); 66 | for d in cids { 67 | hasher.update(&d.to_bytes()); 68 | } 69 | let digest_slice = hasher.finalize(); 70 | digest_slice.try_into().unwrap() 71 | } 72 | } 73 | impl Debug for PushMessage { 74 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 75 | write!(f, "PushMsg(")?; 76 | if !self.first_cid_name.is_empty() { 77 | write!(f, "{}=", &self.first_cid_name)?; 78 | } 79 | write!(f, "{:?})", &self.cids) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /local-dev-environment/desktop/rfm69-service/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use log::info; 3 | use std::io::{Read, Write}; 4 | use std::net::SocketAddr; 5 | use std::sync::mpsc; 6 | use std::sync::mpsc::{Receiver, Sender}; 7 | use std::sync::{Arc, Mutex}; 8 | use std::thread; 9 | use std::time::Duration; 10 | use tokio::net::UdpSocket; 11 | use tokio_serial::SerialPortBuilderExt; 12 | 13 | #[derive(Parser, Debug)] 14 | struct Args { 15 | /// Uplink Address (IP:Port) 16 | #[arg(short, long)] 17 | uplink_address: String, 18 | 19 | /// Downlink Address (IP:Port) 20 | #[arg(short, long)] 21 | downlink_address: String, 22 | 23 | /// Serial device 24 | #[arg(short, long)] 25 | serial_device: String, 26 | } 27 | 28 | #[tokio::main] 29 | async fn main() -> tokio_serial::Result<()> { 30 | env_logger::init(); 31 | 32 | let args = Args::parse(); 33 | 34 | let uplink_addr: SocketAddr = args 35 | .uplink_address 36 | .parse() 37 | .expect("Failed to parse uplink address"); 38 | let downlink_addr: SocketAddr = args 39 | .downlink_address 40 | .parse() 41 | .expect("Failed to parse downlink address"); 42 | 43 | let socket = UdpSocket::bind(&uplink_addr).await?; 44 | info!("UDP Uplink on: {}", args.uplink_address); 45 | info!("UPD Downlink on: {}", args.downlink_address); 46 | info!("Serial radio on: {}", args.serial_device); 47 | 48 | let (serial_queue_writer, serial_queue_reader): (Sender>, Receiver>) = 49 | mpsc::channel(); 50 | 51 | let mut serial_stream = tokio_serial::new(args.serial_device, 115200).open_native_async()?; 52 | serial_stream 53 | .set_exclusive(false) 54 | .expect("Failed to set serial to exclusive"); 55 | 56 | let wrapped_serial = Arc::new(Mutex::new(serial_stream)); 57 | 58 | let mut buf = vec![0; 1024]; 59 | 60 | let thread_serial = Arc::clone(&wrapped_serial); 61 | 62 | thread::spawn(move || loop { 63 | if let Ok(data) = serial_queue_reader.recv() { 64 | info!("Found {} bytes to send over serial", data.len()); 65 | let mut ser = thread_serial.lock().unwrap(); 66 | let _ = ser.write(&data).unwrap(); 67 | } 68 | thread::sleep(Duration::from_millis(250)); 69 | }); 70 | 71 | let main_serial = Arc::clone(&wrapped_serial); 72 | 73 | loop { 74 | if let Ok(len) = socket.try_recv(&mut buf) { 75 | if len > 0 { 76 | info!("Received {len} bytes over udp, queueing for serial"); 77 | serial_queue_writer 78 | .send(buf[..len].to_vec()) 79 | .expect("Failed to send??"); 80 | } 81 | } 82 | 83 | let len = { 84 | let mut ser = main_serial.lock().unwrap(); 85 | ser.read(&mut buf) 86 | }; 87 | if let Ok(serial_len) = len { 88 | if serial_len > 0 { 89 | info!("Received {serial_len} bytes over serial, sending over udp"); 90 | socket.send_to(&buf[..serial_len], downlink_addr).await?; 91 | } 92 | } 93 | 94 | thread::sleep(Duration::from_millis(1)); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /testing/local_test/original.case.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | source `dirname "${0}"`/setup.env 3 | configure 9876543 4 | 5 | start_myceli sat.all 6 | start_myceli gnd 7 | 8 | echo -e '\n\n# Test Case 0: Print Version Info\n' 9 | controller `port_for gnd` --output-format=json request-version 10 | jq . ctl/output.log 11 | controller `port_for sat.all` --output-format=json request-version 12 | jq . ctl/output.log 13 | 14 | echo -e '\n# Test Case - Verify Myceli Instances Alive' 15 | 16 | echo '1. Using controller software, send the `RequestAvailableBlocks` command to the `myceli` ground instance.' 17 | controller 8765 request-available-blocks 18 | echo '- This step passes if an `AvailableBlocks` response is received. Any other response / no response is a failure.' 19 | check_log 'Received.*AvailableBlocks' ctl 20 | echo '1. Using controller software, send the `RequestAvailableBlocks` command to the `myceli` space instance.' 21 | controller 8764 request-available-blocks 22 | echo '- This step passes if an `AvailableBlocks` response is received. Any other response / no response is a failure.' 23 | check_log 'Received.*AvailableBlocks' ctl 24 | 25 | echo -e '\n# Test Case - Transmit an IPFS File (Ground to Space)' 26 | 27 | date > "${o}/known_good_path" 28 | 29 | echo 'Using the controller software, send the ImportFile command to the myceli ground instance with a known good path for the one-pass payload file.' 30 | controller 8765 import-file "${o}/known_good_path" 31 | echo 'This step passes if an FileImported response with CID is received. Any other response / no response is a failure.' 32 | check_log FileImported ctl 33 | 34 | echo ' ...with the CID obtained from the FileImported response... ' 35 | export cid=`grep 'Received.response:.*FileImported' ctl/controller.log | tail -n 1 | cut -d '"' -f 4` 36 | echo "... cid=${cid} ...and with the network address of the ground-to-space radio link... " 37 | echo 'send the TransmitDag command to the myceli ground instance' 38 | g2s 39 | 40 | echo 'controller software, send the ValidateDag command to the myceli space instance' 41 | controller 8764 validate-dag "${cid}" 42 | echo 'This step passes if an ValidateDagResponse response with true. Any other response / no response is a failure.' 43 | check_log 'ValidateDagResponse.*Dag.is.valid' ctl 44 | 45 | echo 'controller software, send the ExportDag command to the myceli space' 46 | controller 8764 export-dag "${cid}" "${o}/exported" 47 | sleep 1 48 | echo 'This step passes if the controller is able to correctly write a file to the given file path.' 49 | diff "${o}/known_good_path" "${o}/exported" 50 | 51 | echo -e '\n# Test Case - Transmit Back & Forth, and Export File with IPFS' 52 | 53 | echo `uptime` `uname -a` > "${o}/imported2" 54 | echo 'controller software, send the ImportFile command to the myceli ground instance with a known good path for the one-pass payload file.' 55 | controller 8765 import-file "${o}/imported2" 56 | echo 'This step passes if an FileImported response with CID is received. Any other response / no response is a failure. ...' 57 | check_log Received.*FileImported.*cid ctl 58 | 59 | export cid=`grep Received.*FileImported ctl/controller.log | tail -n 1 | cut -d '"' -f 4` 60 | echo "cid=${cid}" 61 | 62 | echo 'Using the controller software, send the TransmitDag command to the myceli ground instance with the CID obtained from the FileImported response and with the network address of the ground-to-space radio link.' 63 | g2s 64 | echo 'controller software, send the ValidateDag command to the myceli space' 65 | controller 8764 validate-dag "${cid}" 66 | check_log 'ValidateDagResponse.*Dag.is.valid' ctl 67 | 68 | echo 'Shutdown the myceli ground instance' 69 | kill_myceli gnd 70 | 71 | echo ', delete the storage database' 72 | rm gnd/storage.db 73 | 74 | echo ', and start the myceli ground instance again.' 75 | start_myceli gnd 76 | 77 | echo 'controller software, send the TransmitDag command to the myceli space' 78 | s2g 79 | sleep 1 80 | 81 | echo 'controller software, send the ValidateDag command to the myceli ground' 82 | controller 8765 validate-dag "${cid}" 83 | check_log 'ValidateDagResponse.*Dag.is.valid' ctl 84 | 85 | echo 'controller software, send the ExportDag command to the myceli ground' 86 | controller 8765 export-dag "${cid}" "${o}/exported2" 87 | 88 | diff "${o}/"{im,ex}ported2 -------------------------------------------------------------------------------- /testing/local_test/watcher.case.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | source `dirname "${0}"`/setup.env 3 | 4 | 5 | wait_for_sync() { 6 | d=${2} 7 | b=`other_side ${d}` 8 | sleep 1 9 | check_log "${3}.*${d}${1}" ${d} watcher || check_log "${3}.*${d}${1}" ${d} watcher 10 | check_log "Imported.path.*${d}${1}" ${d} myceli 11 | check_log "Remote.(127.0.0.1|localhost):87...reported.*supports.sync" ${d} myceli 12 | check_log "Remote.(127.0.0.1|localhost):87...reported.*supports.sync" ${b} myceli 13 | check_log "Sending.Sync.Push" ${d} myceli 14 | sleep 5 15 | check_log "Sync.:handle.Push.PushMsg.${d}${1}" ${b} myceli 16 | p=`port_for ${b}` 17 | touch ${o}/notfound 18 | for i in {0..9}1 19 | do 20 | sleep $i 21 | controller ${p} --output-format json list-files 22 | if jq ".AvailableDags.dags[]" ctl/output.log 2>/dev/null | grep -F --color=always "${d}${1}" 23 | then 24 | break 25 | fi 26 | done 27 | export cid=`jq -r ".AvailableDags.dags[] | select( .filename == \"${d}${1}\" ).cid" ctl/output.log` 28 | echo "filename=${d}${1};CID=${cid}" 29 | if [ "${cid}" = '' ] 30 | then 31 | jq . ctl/output.log 32 | exit 32 33 | fi 34 | for i in {0..9}1 35 | do 36 | controller ${p} --output-format json validate-dag ${cid} 37 | if jq .ValidateDagResponse.result ctl/output.log 2>/dev/null | grep -F --color=always 'Dag is valid' 38 | then 39 | cat ctl/output.log 40 | rm ${o}/notfound 41 | break 42 | fi 43 | done 44 | if [ -f ${o}/notfound ] 45 | then 46 | echo "DAG for ${d}${1} never finished syncing." 47 | kill_all 48 | exit 5 49 | fi 50 | e=`pwd`/${b}/synced.${d}${1} 51 | echo "${p} Exporting ${cid} to ${e}" 52 | for i in {0..99} 53 | do 54 | if controller ${p} export-dag ${cid} ${e} 55 | then 56 | break 57 | else 58 | echo "Trouble exporting... could be temporary." 59 | sleep $i 60 | fi 61 | done 62 | for i in {1..99} 63 | do 64 | sleep $i 65 | if [ ! -f ${e} ] 66 | then 67 | sleep $i 68 | echo "Waiting for ${e} to be exported." 69 | continue 70 | fi 71 | if fuser "${e}" || [ `stat --format=%Y ${e}` -lt `date -d '1 second ago' +%s` ] 72 | then 73 | echo "Waiting for writing to finish on ${e}" 74 | break 75 | fi 76 | done 77 | set -x 78 | diff ${b}/synced.${d}${1} ${d}/watched/${d}${1} 79 | set +x 80 | } 81 | 82 | for sd in sat.{all,sync} # Not ship as it won't sync on its own 83 | do 84 | export sd 85 | 86 | echo -e "\n\n# Test suite: watcher ${sd}" 87 | 88 | kill_all 89 | rm */*.log 90 | for rd in {gnd,sat.{all,sync,ship}}/{watched,storage.db,blocks,cids,names} 91 | do 92 | ( 93 | rm -r "${rd}" 2>/dev/null || true 94 | ) 95 | done 96 | 97 | 98 | mkdir -p gnd/watched ${sd}/watched/ 99 | date > gnd/watched/gnd.prexisting.txt 100 | date -d 'next second' > ${sd}/watched/${sd}.prexisting.txt 101 | configure 7 102 | start_myceli ${sd} 103 | start_myceli gnd 104 | export RUST_LOG=debug 105 | start watcher gnd config.toml 106 | start watcher ${sd} config.toml 107 | sleep 9 108 | echo -e "\n ## Test: watcher discovers pre-existing file ${sd}\n" 109 | wait_for_sync .prexisting.txt gnd 'Discovered path in' 110 | sleep 1 111 | wait_for_sync .prexisting.txt ${sd} 'Discovered.path in' 112 | 113 | echo -e '\n ## Test: watcher picks up moved-in file\n' 114 | for s in gnd ${sd} 115 | do 116 | echo 'begin' > ${o}/${s}.big.txt 117 | yes $s `date` | head -c 2048 >> ${o}/${s}.big.txt 118 | echo -e '\nend' >> ${o}/${s}.big.txt 119 | mv ${o}/${s}.big.txt ${s}/watched/ 120 | sleep 1 121 | done 122 | wait_for_sync .big.txt ${sd} 'File modified, import:' 123 | wait_for_sync .big.txt gnd 'File modified, import:' 124 | 125 | echo -e '\n ## Test: watcher picks up file written in-situ\n' 126 | for s in gnd ${sd} 127 | do 128 | yes $s `date` | head -c 2048 >> ${s}/watched/${s}.written.txt 129 | sleep 1 130 | done 131 | echo " ### From ${sd} to ground ###" 132 | wait_for_sync .written.txt ${sd} 'File modified, import:' 133 | echo " ### From ground to ${sd} ###" 134 | wait_for_sync .written.txt gnd "File modified, import:" 135 | done 136 | -------------------------------------------------------------------------------- /DESIGN.md: -------------------------------------------------------------------------------- 1 | This is an overview of the current data transfer protocol implemented in the `block-streamer/` application and a few open questions for the future. 2 | 3 | ## Current Design 4 | 5 | The current design and implementation of the `block-streamer/` is intended to be a very simple way to transmit a file in IPFS across a radio link and reassemble using block data. 6 | 7 | The file to be transmitted is read into 50 (tbd configurable) byte blocks. Each block consists of a CID, data, and links to other CIDs (if a root node). Each block is serialized into one binary blob, which is broken up into 40 byte (tbd configurable) chunks. Each chunk consists of a CID marker (first 4 bytes of CID), a chunk offset, and data. A header message consisting of the block CID is transmitted to the receiver first, followed by the chunks of the block's data+links, which are then reassembled in order. The current implementation is able to handle a dag with depth of two and can reassemble blocks sent out of order, but it can't yet handle chunks sent out of order. 8 | 9 | *Current magic numbers and CID marker are placeholders to get things working, not final decisions.* 10 | 11 | *Why not to use the CAR transport around blocks?* 12 | 13 | In this initial implementation the CAR transport is not used. The reasoning was that this IPFS implementation should be designed for exchanging data over constrained communications links. This means it is likely that blocks will be transmitted individually, or even broken up into smaller chunks. There did not seem to be an immediate advantage to packaging these blocks up into a CAR, only to break that CAR up again into smaller chunks for transmission, when then blocks themselves could be transmitted as-is. However the CAR transport may still prove to be useful in this system in the future. 14 | 15 | *Why decided to chunk blocks (hash+data) down to payload size)* 16 | 17 | The [lab radio hardware](https://www.adafruit.com/product/3076) currently used in developing this system has a [strict payload size limit of 60 bytes](https://github.com/adafruit/RadioHead/blob/master/RH_RF69.h#L346-L347). While this radio may be more restrictive than typical satellite radios, it seems prudent to work under stricter requirements to ensure this system can scale both up and down to different payload limits. If sending individual 60-byte blocks the payload is already mostly consumed by the CID (36 bytes). This 60% overhead is not exactly efficient, so the decision was made to break blocks down into chunks which contain a CID marker (4 bytes), and a chunk offset (2 bytes), and a data blob, minimizing overhead to improve efficiency. 18 | 19 | ## Future Design Decisions 20 | 21 | *Are there existing UDP data transfer protocols we can borrow from or use as-is?* 22 | 23 | The current protocol for chunking/sending/assembling blocks was intentionally made simple to better understand the block transmission problem. It is very possible that an existing protocol built on UDP may provide the necessary chunking functionality, or at least functional pieces which can be built on. 24 | 25 | Existing protocols which should be further investigated: 26 | - [UDT](https://en.wikipedia.org/wiki/UDP-based_Data_Transfer_Protocol) 27 | - [QUIC](https://www.chromium.org/quic/) 28 | - [CoAP](https://en.wikipedia.org/wiki/Constrained_Application_Protocol) 29 | 30 | *How should it handle specific data requests?* 31 | 32 | A crucial part of this system will be correctly handling the transmission of a file across multiple communications passes, and dealing with lossy communication links, so the ability to request specific pieces of a DAG will be required. There are a number of different methods for specifying these pieces, such as by CID, with bitmasks, bloom filters, and sub-graphs. This decision will likely include a simple proof of concept implementing individual CID requests, followed by an analysis of the tradeoffs of other specification methods. 33 | 34 | *Formal protocol messages* 35 | 36 | The current implementation is a very simple one-way stream of block chunks. The future functional system will need to implement a formalized protocol with defined messages which allow for interactions such as requesting a specific CID or indicating that a CID has been received correctly. These will likely be created as required when implementing additional protocol functionality. -------------------------------------------------------------------------------- /ipfs-unixfs/src/chunker.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{Debug, Display}, 3 | io, 4 | pin::Pin, 5 | str::FromStr, 6 | task, 7 | }; 8 | 9 | use anyhow::{anyhow, Context}; 10 | use bytes::Bytes; 11 | use futures::{stream::BoxStream, Stream}; 12 | use tokio::io::AsyncRead; 13 | 14 | mod fixed; 15 | mod rabin; 16 | 17 | /// Chunks are limited to 1MiB by default 18 | pub const DEFAULT_CHUNK_SIZE_LIMIT: usize = 1024 * 1024; 19 | 20 | pub use self::{ 21 | fixed::{Fixed, DEFAULT_CHUNKS_SIZE}, 22 | rabin::Rabin, 23 | }; 24 | 25 | #[derive(Debug, PartialEq, Eq, Clone)] 26 | pub enum Chunker { 27 | Fixed(Fixed), 28 | Rabin(Box), 29 | } 30 | 31 | impl Display for Chunker { 32 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 33 | match self { 34 | Self::Fixed(c) => write!(f, "Chunker::Fixed({})", c.chunk_size), 35 | Self::Rabin(_) => write!(f, "Chunker::Rabin"), 36 | } 37 | } 38 | } 39 | 40 | /// Chunker configuration. 41 | #[derive(Debug, Clone, PartialEq, Eq, Copy)] 42 | pub enum ChunkerConfig { 43 | /// Fixed sized chunker. 44 | Fixed(usize), 45 | /// Rabin chunker. 46 | Rabin, 47 | } 48 | 49 | impl Display for ChunkerConfig { 50 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 51 | match self { 52 | Self::Fixed(chunk_size) => write!(f, "fixed-{chunk_size}"), 53 | Self::Rabin => write!(f, "rabin"), 54 | } 55 | } 56 | } 57 | 58 | impl FromStr for ChunkerConfig { 59 | type Err = anyhow::Error; 60 | 61 | fn from_str(s: &str) -> Result { 62 | if s == "rabin" { 63 | return Ok(ChunkerConfig::Rabin); 64 | } 65 | 66 | if let Some(rest) = s.strip_prefix("fixed") { 67 | if rest.is_empty() { 68 | return Ok(ChunkerConfig::Fixed(DEFAULT_CHUNKS_SIZE)); 69 | } 70 | 71 | if let Some(rest) = rest.strip_prefix('-') { 72 | let chunk_size: usize = rest.parse().context("invalid chunk size")?; 73 | if chunk_size > DEFAULT_CHUNK_SIZE_LIMIT { 74 | return Err(anyhow!("chunk size too large")); 75 | } 76 | 77 | return Ok(ChunkerConfig::Fixed(chunk_size)); 78 | } 79 | } 80 | 81 | Err(anyhow!("unknown chunker: {}", s)) 82 | } 83 | } 84 | 85 | impl From for Chunker { 86 | fn from(cfg: ChunkerConfig) -> Self { 87 | match cfg { 88 | ChunkerConfig::Fixed(chunk_size) => Chunker::Fixed(Fixed::new(chunk_size)), 89 | ChunkerConfig::Rabin => Chunker::Rabin(Box::default()), 90 | } 91 | } 92 | } 93 | 94 | pub enum ChunkerStream<'a> { 95 | Fixed(BoxStream<'a, io::Result>), 96 | Rabin(BoxStream<'a, io::Result>), 97 | } 98 | 99 | impl<'a> Debug for ChunkerStream<'a> { 100 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 101 | match self { 102 | Self::Fixed(_) => write!(f, "Fixed(impl Stream)"), 103 | Self::Rabin(_) => write!(f, "Rabin(impl Stream)"), 104 | } 105 | } 106 | } 107 | 108 | impl<'a> Stream for ChunkerStream<'a> { 109 | type Item = io::Result; 110 | 111 | fn poll_next( 112 | mut self: Pin<&mut Self>, 113 | cx: &mut task::Context<'_>, 114 | ) -> task::Poll> { 115 | match &mut *self { 116 | Self::Fixed(ref mut stream) => Pin::new(stream).poll_next(cx), 117 | Self::Rabin(ref mut stream) => Pin::new(stream).poll_next(cx), 118 | } 119 | } 120 | 121 | fn size_hint(&self) -> (usize, Option) { 122 | match self { 123 | Self::Fixed(ref stream) => stream.size_hint(), 124 | Self::Rabin(ref stream) => stream.size_hint(), 125 | } 126 | } 127 | } 128 | 129 | impl Chunker { 130 | pub fn chunks<'a, R: AsyncRead + Unpin + Send + 'a>(self, source: R) -> ChunkerStream<'a> { 131 | match self { 132 | Self::Fixed(chunker) => ChunkerStream::Fixed(chunker.chunks(source)), 133 | Self::Rabin(chunker) => ChunkerStream::Rabin(chunker.chunks(source)), 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /.github/workflows/create-release.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | jobs: 9 | start_release: 10 | name: build_release 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | - name: Create release 15 | uses: ncipollo/release-action@v1 16 | id: create_release 17 | with: 18 | draft: true 19 | prerelease: false 20 | name: ${{ github.ref_name }} 21 | tag: ${{ github.ref }} 22 | env: 23 | GITHUB_TOKEN: ${{ github.token }} 24 | build_upload_artifacts: 25 | needs: [ start_release ] 26 | name: build_upload_artifacts 27 | runs-on: ubuntu-latest 28 | strategy: 29 | matrix: 30 | project: [ myceli, controller, hyphae, watcher ] 31 | target: 32 | [ 33 | armv7-unknown-linux-gnueabihf, 34 | aarch64-unknown-linux-gnu, 35 | x86_64-unknown-linux-gnu, 36 | ] 37 | steps: 38 | - uses: actions/checkout@v3 39 | - uses: dtolnay/rust-toolchain@stable 40 | - name: install cross 41 | run: cargo install cross --git https://github.com/cross-rs/cross 42 | - name: clean cargo 43 | run: cargo clean 44 | - name: cargo/cross build 45 | run: CROSS_CONFIG=Cross.toml cross build --bin ${{ matrix.project }} --target ${{ matrix.target }} --release --features big 46 | - name: Archive binary 47 | run: tar -czvf ${{ matrix.project }}-${{ matrix.target }}.tar.gz -C ./target/${{ matrix.target }}/release ${{ matrix.project }} 48 | - name: upload binary 49 | run: | 50 | echo "GITHUB_REF=${GITHUB_REF}" 51 | mv "$SOURCE_PATH" "$TARGET_PATH" 52 | set -x 53 | gh release upload "$GITHUB_REF" "$TARGET_PATH" 54 | env: 55 | GITHUB_TOKEN: ${{ github.token }} 56 | SOURCE_PATH: ./${{ matrix.project }}-${{ matrix.target }}.tar.gz 57 | TARGET_PATH: ./${{ matrix.project }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz 58 | shell: bash 59 | small: 60 | needs: [ start_release ] 61 | name: small 62 | runs-on: ubuntu-latest 63 | strategy: 64 | matrix: 65 | project: [ myceli, watcher ] 66 | target: 67 | [ 68 | armv7-unknown-linux-gnueabihf, 69 | aarch64-unknown-linux-gnu, 70 | ] 71 | steps: 72 | - uses: actions/checkout@v3 73 | - uses: dtolnay/rust-toolchain@stable 74 | - name: install cross 75 | run: cargo install cross --git https://github.com/cross-rs/cross 76 | - name: clean cargo 77 | run: cargo clean 78 | - name: cargo/cross build 79 | run: CROSS_CONFIG=Cross.toml cross build --bin ${{ matrix.project }} --target ${{ matrix.target }} --profile small --features small --no-default-features 80 | - name: Compress binary 81 | run: | 82 | tar -cvf ${{ matrix.project }}-small-${{ github.ref_name }}-${{ matrix.target }}.tar -C ./target/${{ matrix.target }}/small ${{ matrix.project }} 83 | xz -9 --keep --extreme ${{ matrix.project }}-small-${{ github.ref_name }}-${{ matrix.target }}.tar 84 | gzip --best ${{ matrix.project }}-small-${{ github.ref_name }}-${{ matrix.target }}.tar 85 | - name: upload binary 86 | run: | 87 | echo "GITHUB_REF=${GITHUB_REF}" 88 | set -x 89 | gh release upload "${GITHUB_REF}" *-small-${{ github.ref_name }}-${{ matrix.target }}.tar.?z 90 | env: 91 | GITHUB_TOKEN: ${{ github.token }} 92 | shell: bash 93 | 94 | build_upload_docker: 95 | needs: [ start_release ] 96 | name: build_upload_docker 97 | runs-on: ubuntu-latest 98 | steps: 99 | - uses: actions/checkout@v3 100 | - name: Login to Github Container Registry 101 | uses: docker/login-action@v2 102 | with: 103 | registry: ghcr.io 104 | username: ${{ github.actor }} 105 | password: ${{ secrets.GITHUB_TOKEN }} 106 | - name: Build and publish myceli docker 107 | run: | 108 | docker build -f myceli.Dockerfile . --tag ghcr.io/ipfs-shipyard/myceli:latest --tag ghcr.io/ipfs-shipyard/myceli:${{ github.ref_name }} 109 | docker push ghcr.io/ipfs-shipyard/myceli:latest 110 | docker push ghcr.io/ipfs-shipyard/myceli:${{ github.ref_name }} 111 | -------------------------------------------------------------------------------- /ipfs-unixfs/src/chunker/fixed.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use bytes::{Bytes, BytesMut}; 4 | use futures::{stream::BoxStream, StreamExt}; 5 | use tokio::io::{AsyncRead, AsyncReadExt}; 6 | 7 | /// Default size for chunks. 8 | pub const DEFAULT_CHUNKS_SIZE: usize = 1024 * 256; 9 | 10 | #[derive(Debug, Clone, PartialEq, Eq)] 11 | pub struct Fixed { 12 | pub chunk_size: usize, 13 | } 14 | 15 | impl Default for Fixed { 16 | fn default() -> Self { 17 | Self { 18 | chunk_size: DEFAULT_CHUNKS_SIZE, 19 | } 20 | } 21 | } 22 | 23 | impl Fixed { 24 | pub fn new(chunk_size: usize) -> Self { 25 | assert!(chunk_size > 0); 26 | 27 | Self { chunk_size } 28 | } 29 | 30 | pub fn chunks<'a, R: AsyncRead + Unpin + Send + 'a>( 31 | self, 32 | mut source: R, 33 | ) -> BoxStream<'a, io::Result> { 34 | let chunk_size = self.chunk_size; 35 | async_stream::stream! { 36 | let mut buffer = BytesMut::with_capacity(chunk_size); 37 | let mut current_len = 0; 38 | 39 | loop { 40 | if current_len == 0 { 41 | buffer.clear(); 42 | } 43 | match source.read_buf(&mut buffer).await { 44 | Ok(len) => { 45 | current_len += len; 46 | debug_assert!(current_len == buffer.len()); 47 | if current_len == chunk_size { 48 | // read a full chunk 49 | current_len = 0; 50 | debug_assert!(buffer.len()==chunk_size); 51 | yield Ok(buffer.clone().freeze()); 52 | } else if current_len < chunk_size && len > 0 { 53 | // not done reading, read again 54 | continue; 55 | } else if current_len > chunk_size { 56 | // read more than a chunk, emit only a single chunk 57 | let out = buffer.split_to(chunk_size); 58 | current_len -= chunk_size; 59 | debug_assert!(out.len()==chunk_size); 60 | yield Ok(out.freeze()); 61 | } else { 62 | // finished reading 63 | debug_assert!(len == 0); 64 | if current_len > 0 { 65 | debug_assert!(buffer.len() { 72 | yield Err(err); 73 | } 74 | } 75 | } 76 | } 77 | .boxed() 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use futures::TryStreamExt; 84 | 85 | use super::*; 86 | 87 | #[tokio::test] 88 | async fn test_fixed_chunker() { 89 | // exact match 90 | { 91 | let mut content = Vec::with_capacity(1024); 92 | content.resize(256, 1); 93 | content.resize(512, 2); 94 | content.resize(768, 3); 95 | content.resize(1024, 4); 96 | let bytes = std::io::Cursor::new(content); 97 | 98 | let chunks: Vec<_> = Fixed::new(256).chunks(bytes).try_collect().await.unwrap(); 99 | assert_eq!(chunks.len(), 4); 100 | assert_eq!(&chunks[0], &[1u8; 256][..]); 101 | assert_eq!(&chunks[1], &[2u8; 256][..]); 102 | assert_eq!(&chunks[2], &[3u8; 256][..]); 103 | assert_eq!(&chunks[3], &[4u8; 256][..]); 104 | } 105 | 106 | // overflow 107 | { 108 | let mut content = Vec::with_capacity(1024); 109 | content.resize(256, 1); 110 | content.resize(512, 2); 111 | content.resize(768, 3); 112 | content.resize(1024, 4); 113 | content.push(5); 114 | content.push(5); 115 | 116 | let bytes = std::io::Cursor::new(content); 117 | let chunks: Vec<_> = Fixed::new(256).chunks(bytes).try_collect().await.unwrap(); 118 | assert_eq!(chunks.len(), 5); 119 | assert_eq!(&chunks[0], &[1u8; 256][..]); 120 | assert_eq!(&chunks[1], &[2u8; 256][..]); 121 | assert_eq!(&chunks[2], &[3u8; 256][..]); 122 | assert_eq!(&chunks[3], &[4u8; 256][..]); 123 | assert_eq!(&chunks[4], &[5u8; 2][..]); 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /messages/src/api.rs: -------------------------------------------------------------------------------- 1 | use clap::Subcommand; 2 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode}; 3 | use serde::Serialize; 4 | 5 | #[derive(Clone, Debug, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)] 6 | pub struct DagInfo { 7 | pub cid: String, 8 | pub filename: String, 9 | } 10 | 11 | #[derive(Clone, Debug, ParityEncode, ParityDecode, Serialize, Subcommand, Eq, PartialEq)] 12 | pub enum ApplicationAPI { 13 | /// Asks IPFS instance to import a file path into the local IPFS store 14 | ImportFile { 15 | path: String, 16 | }, 17 | /// Response message to ImportFile containing file's root CID 18 | #[command(skip)] 19 | FileImported { 20 | path: String, 21 | cid: String, 22 | }, 23 | /// Asks IPFS instance to attempt to export a DAG to a file path 24 | ExportDag { 25 | cid: String, 26 | path: String, 27 | }, 28 | /// Used to indicate the failure of a dag export 29 | #[command(skip)] 30 | DagExportFailed { 31 | cid: String, 32 | path: String, 33 | error: String, 34 | }, 35 | /// Used to indicate a successful dag export 36 | #[command(skip)] 37 | DagExported { 38 | cid: String, 39 | path: String, 40 | }, 41 | /// Sets current connected state 42 | SetConnected { 43 | #[arg(action(clap::ArgAction::Set), required(true))] 44 | connected: bool, 45 | }, 46 | /// Requests the current connected state 47 | GetConnected, 48 | /// Response to GetConnected, with current connected state 49 | #[command(skip)] 50 | ConnectedState { 51 | connected: bool, 52 | }, 53 | /// Asks IPFS instance if it has a valid DAG corresponding to the CID and all its child data 54 | ValidateDag { 55 | cid: String, 56 | }, 57 | /// Response to ValidateDag request, contains requested CID and a text response 58 | #[command(skip)] 59 | ValidateDagResponse { 60 | cid: String, 61 | result: String, 62 | }, 63 | // Initiates the transmission of a DAG corresponding to the given CID, with a given number of retries 64 | TransmitDag { 65 | cid: String, 66 | target_addr: String, 67 | retries: u8, 68 | }, 69 | /// Indicates that a Dag has been transmitted completely successfully 70 | DagTransmissionComplete { 71 | cid: String, 72 | }, 73 | /// Initiates transmission of block corresponding to the given CID 74 | TransmitBlock { 75 | cid: String, 76 | target_addr: String, 77 | }, 78 | // Resumes the transmission of a dag which may have run out of retries or 79 | // paused due to connectivity lost 80 | ResumeTransmitDag { 81 | cid: String, 82 | }, 83 | // Resumes the transmission of all dags which may be paused 84 | ResumeTransmitAllDags, 85 | /// Listens on address for data and writes out files received 86 | Receive { 87 | listen_addr: String, 88 | }, 89 | /// Request Available Blocks 90 | RequestAvailableBlocks, 91 | /// Advertise all available blocks by CID 92 | #[command(skip)] 93 | AvailableBlocks { 94 | cids: Vec, 95 | }, 96 | /// Delete CID from local store 97 | DeleteCid { 98 | cid: String, 99 | }, 100 | /// Delete block from local store 101 | DeleteBlock { 102 | cid: String, 103 | }, 104 | /// Request missing DAG blocks 105 | GetMissingDagBlocks { 106 | cid: String, 107 | }, 108 | /// List of missing blocks and associated DAG's CID 109 | #[command(skip)] 110 | MissingDagBlocks { 111 | cid: String, 112 | blocks: Vec, 113 | }, 114 | /// Requests current version of myceli 115 | RequestVersion { 116 | label: Option, 117 | }, 118 | /// Provides current version of myceli 119 | #[command(skip)] 120 | Version { 121 | version: String, 122 | rust: String, 123 | target: String, 124 | profile: String, 125 | features: Vec, 126 | remote_label: Option, 127 | }, 128 | /// Request ALL available DAGs 129 | RequestAvailableDags, 130 | /// Request a list of named DAGs 131 | ListFiles, 132 | /// Lists available DAGs and corresponding filenames 133 | #[command(skip)] 134 | AvailableDags { 135 | dags: Vec, 136 | }, 137 | Acknowledged { 138 | req: String, 139 | }, 140 | // TODO: Implement later 141 | // Information about the next pass used for calculating 142 | // data transfer parameters 143 | // NextPassInfo { 144 | // duration: u32, 145 | // send_bytes: u32, 146 | // receive_bytes: u32, 147 | // }, 148 | } 149 | -------------------------------------------------------------------------------- /config/src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use figment::{ 3 | providers::{Format, Serialized, Toml}, 4 | Figment, Provider, 5 | }; 6 | use log::{debug, info, trace}; 7 | use serde_derive::{Deserialize, Serialize}; 8 | use std::path::PathBuf; 9 | 10 | //Duplicated in transport 11 | const MAX_MTU: u16 = 3 * 1024; 12 | 13 | #[derive(Debug, Deserialize, Serialize, Clone)] 14 | pub struct Config { 15 | // The network address myceli will listen on for incoming messages. 16 | pub listen_address: String, 17 | // The timeout before retrying a dag transfer, measured in milliseconds. This is reset every window. 18 | pub retry_timeout_duration: u64, 19 | // Directory path for myceli to use for storage. 20 | pub storage_path: String, 21 | // The MTU (in bytes) used to chunk up messages into UDP packets. Maximum value is 3072. 22 | pub mtu: u16, 23 | // The number of blocks to send in each window of a DAG transfer. 24 | pub window_size: u32, 25 | // The size (in bytes) of the blocks that a file is broken up into when imported. 26 | pub block_size: Option, 27 | // The number of milliseconds to wait between sending chunks of a DAG transfer, optional. 28 | pub chunk_transmit_throttle: Option, 29 | // The network address of the radio that myceli should respond to by default, if not set then 30 | // myceli will respond to the sending address (or address set in relevant request). 31 | pub radio_address: Option, 32 | // A path to a directory which where files that appear should be auto-imported. 33 | // Absence implies no such directory exists. 34 | // This value only relevant if using the `watcher` command. 35 | pub watched_directory: Option, 36 | //How much storage space should Local Storage use? Measured in kiB. Default is 1 GiB 37 | pub disk_usage: u64, 38 | //Minimum amount of time (milliseconds) to elapse between background tasks 39 | //Note: some background tasks can send a packet on the network depending on circumstance. 40 | //Default is 10000 (10 seconds). 41 | //Minimum is 10 (10ms) 42 | //Maximum is 3600000 (1 hour) 43 | pub chatter_ms: u32, 44 | pub shipper_throttle_packet_delay_ms: u32, 45 | } 46 | 47 | impl Default for Config { 48 | fn default() -> Self { 49 | Config { 50 | // Default listening address 51 | listen_address: "0.0.0.0:8001".to_string(), 52 | // Default retry timeout of 120_000 ms = 120 s = 2 minutes 53 | retry_timeout_duration: 120_000, 54 | // Default storage dir 55 | storage_path: default_storage_path(), 56 | // Default MTU appropriate for dev radio 57 | // Maxes out at 1024 * 3 bytes 58 | mtu: 512, 59 | // Default to sending five blocks at a time 60 | window_size: 5, 61 | // Default to slightly smaller than mtu 62 | block_size: None, 63 | // Default to no throttling of chunks 64 | chunk_transmit_throttle: None, 65 | // Default to no set radio address 66 | radio_address: None, 67 | watched_directory: None, 68 | disk_usage: 1024 * 1024, 69 | chatter_ms: 10_000, 70 | shipper_throttle_packet_delay_ms: 0, 71 | } 72 | } 73 | } 74 | fn default_storage_path() -> String { 75 | dirs::data_local_dir() 76 | .and_then(|d: PathBuf| { 77 | d.join("myceli") 78 | .into_os_string() 79 | .to_str() 80 | .map(|s| s.to_owned()) 81 | }) 82 | .unwrap_or_else(|| "storage".to_owned()) 83 | } 84 | fn default_config_path() -> Option { 85 | if let Some(d) = dirs::config_dir() { 86 | let f = d.join("myceli").join("myceli.toml"); 87 | if f.is_file() { 88 | return f.into_os_string().into_string().ok(); 89 | } 90 | } 91 | None 92 | } 93 | impl Config { 94 | pub fn parse(path: Option, mtu2block_size: &dyn Fn(u16) -> u16) -> Result { 95 | trace!("Config::parse({path:?})"); 96 | let mut config = Figment::from(Serialized::defaults(Config::default())); 97 | if let Some(path) = path.or(default_config_path()) { 98 | let toml_values = Toml::file(&path); 99 | debug!("Config values in file {}: {:?}", &path, toml_values.data()); 100 | config = config.merge(toml_values); 101 | } 102 | let mut config: Self = config.extract()?; 103 | if config.mtu > MAX_MTU { 104 | bail!("Configured MTU is too large, cannot exceed {MAX_MTU}",); 105 | } 106 | if config.block_size.is_none() { 107 | let sz = mtu2block_size(config.mtu).into(); 108 | info!("Used a mtu {} to deduce block_size {}", config.mtu, sz); 109 | config.block_size = Some(sz); 110 | } 111 | if config.block_size.unwrap() < 128 { 112 | bail!("block_size too small"); 113 | } 114 | Ok(config) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /transports/src/udp_transport.rs: -------------------------------------------------------------------------------- 1 | use crate::error::TransportError; 2 | use crate::{ 3 | error::{adhoc, Result}, 4 | udp_chunking::SimpleChunker, 5 | Transport, MAX_MTU, 6 | }; 7 | use log::{debug, error, info, trace}; 8 | use messages::Message; 9 | use std::{ 10 | io, 11 | net::{ToSocketAddrs, UdpSocket}, 12 | sync::{Arc, Mutex}, 13 | thread::sleep, 14 | time::Duration, 15 | }; 16 | 17 | pub struct UdpTransport { 18 | pub socket: UdpSocket, 19 | chunker: Arc>, 20 | max_read_attempts: Option, 21 | chunk_transmit_throttle: Option, 22 | timeout: Option, 23 | } 24 | 25 | impl UdpTransport { 26 | pub fn new(listen_addr: &str, mtu: u16, chunk_transmit_throttle: Option) -> Result { 27 | info!("Will listen on {}", &listen_addr); 28 | let socket = UdpSocket::bind(listen_addr)?; 29 | Ok(UdpTransport { 30 | socket, 31 | chunker: Arc::new(Mutex::new(SimpleChunker::new(mtu))), 32 | max_read_attempts: None, 33 | chunk_transmit_throttle, 34 | timeout: None, 35 | }) 36 | } 37 | 38 | pub fn set_read_timeout(&mut self, dur: Option) -> Result<()> { 39 | self.timeout = dur; 40 | Ok(self.socket.set_read_timeout(dur.map(|d| d / 10))?) 41 | } 42 | 43 | pub fn set_max_read_attempts(&mut self, attempts: Option) { 44 | self.max_read_attempts = attempts; 45 | } 46 | } 47 | 48 | impl Transport for UdpTransport { 49 | fn receive(&self) -> Result<(Message, String)> { 50 | let mut buf = vec![0; usize::from(MAX_MTU)]; 51 | let mut sender_addr; 52 | let mut read_errors = 0; 53 | let mut read_len; 54 | let mut timeouts = 0; 55 | loop { 56 | loop { 57 | trace!("Receiving..."); 58 | match self.socket.recv_from(&mut buf) { 59 | Ok((len, sender)) => { 60 | debug!("Received {len} bytes from {sender}"); 61 | if len > 0 { 62 | read_len = len; 63 | sender_addr = sender; 64 | break; 65 | } 66 | } 67 | Err(e) => match e.kind() { 68 | io::ErrorKind::TimedOut | io::ErrorKind::WouldBlock => { 69 | trace!("Receive timed out. May be normal depending on usage."); 70 | if timeouts >= 10 { 71 | return Err(TransportError::TimedOut); 72 | } 73 | timeouts += 1; 74 | } 75 | _ => { 76 | error!("Recv failed {e}"); 77 | if self.max_read_attempts.unwrap_or(u16::MAX) <= read_errors { 78 | return Err(e.into()); 79 | } 80 | read_errors += 1; 81 | } 82 | }, 83 | } 84 | sleep(Duration::from_millis(1)); 85 | } 86 | 87 | debug!("Received possible chunk of {} bytes", read_len); 88 | let hex_str = buf[0..read_len] 89 | .iter() 90 | .map(|b| format!("{b:02X}")) 91 | .collect::(); 92 | trace!("Received possible chunk of hex {hex_str}"); 93 | 94 | match self 95 | .chunker 96 | .lock() 97 | .expect("Lock failed, this is really bad") 98 | .unchunk(&buf[0..read_len]) 99 | { 100 | Ok(Some(msg)) => { 101 | debug!("Assembled msg: {msg:?}"); 102 | return Ok((msg, sender_addr.to_string())); 103 | } 104 | Ok(None) => { 105 | debug!("Received: no msg ready for assembly yet"); 106 | } 107 | Err(err) => { 108 | return Err(err); 109 | } 110 | } 111 | } 112 | } 113 | 114 | fn send(&self, msg: Message, addr: &str) -> Result<()> { 115 | debug!("UDP: Transmitting msg: {msg:?}"); 116 | let addr = addr 117 | .to_socket_addrs()? 118 | .next() 119 | .ok_or(adhoc("Failed to parse address"))?; 120 | for chunk in self 121 | .chunker 122 | .lock() 123 | .expect("Lock failed, this is really bad") 124 | .chunk(msg)? 125 | { 126 | debug!("Transmitting chunk of {} bytes to {addr}", chunk.len()); 127 | let hex_str = chunk.iter().map(|b| format!("{b:02X}")).collect::(); 128 | trace!("Transmitting chunk of hex {hex_str}"); 129 | self.socket.send_to(&chunk, addr)?; 130 | if let Some(throttle) = self.chunk_transmit_throttle { 131 | sleep(Duration::from_millis(throttle.into())); 132 | } 133 | } 134 | Ok(()) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /controller/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use clap::{arg, Parser, ValueEnum}; 3 | use log::{debug, error, info, trace}; 4 | use messages::{ApplicationAPI, Message}; 5 | use std::time::Duration; 6 | use transports::{Transport, UdpTransport, MAX_MTU}; 7 | 8 | #[derive(Parser, Debug, Clone)] 9 | #[clap(version, long_about = None, propagate_version = true)] 10 | #[clap(about = "Control a Myceli instance")] 11 | pub struct Cli { 12 | #[arg(help = "The network address that a myceli instance is listening on")] 13 | instance_addr: Option, 14 | #[arg( 15 | short, 16 | long, 17 | default_value = "512", 18 | help = "The MTU (in bytes) that messages are chunked into." 19 | )] 20 | mtu: u16, 21 | #[arg( 22 | short, 23 | long, 24 | help = "An optional delay (in milliseconds) between sending chunks." 25 | )] 26 | chunk_transmit_throttle: Option, 27 | #[arg( 28 | short, 29 | long, 30 | help = "Format to display the response message in. A value other than none implies --listen_mode", 31 | default_value = "none" 32 | )] 33 | output_format: Format, 34 | #[arg( 35 | short, 36 | long, 37 | help = "Listens for a response from the myceli instance. If --output-format is not specified, debug is assumed for backward compatibility." 38 | )] 39 | listen_mode: bool, 40 | #[arg( 41 | short, 42 | long, 43 | default_value = "0.0.0.0:8200", 44 | help = "An optional network address to bind to" 45 | )] 46 | bind_address: String, 47 | #[clap(subcommand)] 48 | command: ApplicationAPI, 49 | } 50 | 51 | impl Cli { 52 | pub async fn run(&self) -> Result<()> { 53 | let mut transport = 54 | UdpTransport::new(&self.bind_address, self.mtu, self.chunk_transmit_throttle)?; 55 | transport 56 | .set_read_timeout(Some(Duration::from_secs(60 * 60))) 57 | .expect("Failed to set timeout"); 58 | let command = Message::ApplicationAPI(self.command.clone()); 59 | let cmd_str = serde_json::to_string(&command)?; 60 | info!("Transmitting: {}", &cmd_str); 61 | 62 | let instance_addr = if let Some(addr) = &self.instance_addr { 63 | addr.clone() 64 | } else { 65 | let cfg = config::Config::parse(None, &Message::fit_size) 66 | .expect("Please specify instance addr, as I can't read myceli.toml"); 67 | info!( 68 | "Address not specified, using the one found in config: {}", 69 | &cfg.listen_address 70 | ); 71 | cfg.listen_address 72 | }; 73 | transport.send(command, &instance_addr)?; 74 | if self.listen_mode { 75 | for i in 0..9 { 76 | trace!("Listening for response, attempt {i}"); 77 | match transport.receive() { 78 | Ok((Message::ApplicationAPI(msg), _)) => { 79 | let json = serde_json::to_string(&msg).unwrap(); 80 | info!("Received response: {msg:?} from {instance_addr} \nJSON: {json}"); 81 | match self.output_format { 82 | Format::Json => println!("{json}"), 83 | Format::Debug => println!("{msg:?}"), 84 | Format::None => panic!("Response received {msg:?} which implies listen_mode==true but output_format==None {self:?}"), 85 | } 86 | 87 | return Ok(()); 88 | } 89 | Ok((Message::Error(msg), _)) => { 90 | error!("Received error message: {msg}"); 91 | bail!("Server: {msg}"); 92 | } 93 | Err(e) => bail!("Error: {e:?}"), 94 | Ok((Message::DataProtocol(msg), _)) => { 95 | debug!("Ignoring shipper data protocol message {msg:?}"); 96 | } 97 | Ok((Message::Sync(msg), _)) => { 98 | debug!("Ignoring sync message {msg:?}"); 99 | } 100 | } 101 | } 102 | } 103 | 104 | Ok(()) 105 | } 106 | } 107 | 108 | #[tokio::main(flavor = "current_thread")] 109 | async fn main() -> Result<()> { 110 | env_logger::init(); 111 | 112 | let mut cli = Cli::parse(); 113 | 114 | if cli.mtu > MAX_MTU { 115 | bail!("Configured MTU is too large, cannot exceed {MAX_MTU}",); 116 | } 117 | if cli.output_format != Format::None { 118 | cli.listen_mode = true; 119 | } else if cli.listen_mode { 120 | cli.output_format = Format::Debug; 121 | } 122 | if matches!(cli.command, ApplicationAPI::RequestVersion { label: None }) { 123 | cli.command = ApplicationAPI::RequestVersion { 124 | label: Some("Requested by controller".to_owned()), 125 | }; 126 | } 127 | cli.run().await 128 | } 129 | 130 | #[derive(Clone, Parser, Debug, ValueEnum, Eq, PartialEq)] 131 | enum Format { 132 | None, 133 | Json, 134 | Debug, 135 | } 136 | -------------------------------------------------------------------------------- /myceli/tests/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use anyhow::Result; 4 | use assert_fs::fixture::ChildPath; 5 | use assert_fs::{fixture::FileWriteBin, fixture::PathChild, TempDir}; 6 | use blake2::{Blake2s256, Digest}; 7 | use file_hashing::get_hash_file; 8 | use myceli::listener::Listener; 9 | use rand::{rngs::StdRng, thread_rng, Rng, RngCore, SeedableRng}; 10 | use std::net::{SocketAddr, ToSocketAddrs}; 11 | use std::path::PathBuf; 12 | use std::sync::Arc; 13 | use std::thread::{sleep, spawn}; 14 | use transports::{Transport, UdpTransport}; 15 | 16 | const BLOCK_SIZE: u32 = 1024 * 3; 17 | 18 | pub fn wait_receiving_done(receiver: &TestListener, controller: &mut TestController) { 19 | let mut prev_num_blocks = 0; 20 | let mut num_retries = 0; 21 | 22 | loop { 23 | while controller.recv_msg().ok().is_some() { 24 | sleep(Duration::from_millis(1)); 25 | } 26 | let current_blocks = match controller 27 | .send_and_recv(&receiver.listen_addr, Message::request_available_blocks()) 28 | { 29 | Message::ApplicationAPI(messages::ApplicationAPI::AvailableBlocks { cids }) => cids, 30 | x => panic!("Failed to get ApplicationAPI::AvailableBlocks response to blocks request; got {x:?}"), 31 | }; 32 | let current_num_blocks = current_blocks.len(); 33 | if current_num_blocks > prev_num_blocks { 34 | prev_num_blocks = current_num_blocks; 35 | num_retries = 0; 36 | } else { 37 | if num_retries > 10 { 38 | break; 39 | } 40 | num_retries += 1; 41 | } 42 | sleep(Duration::from_millis(num_retries * num_retries + 1)); 43 | } 44 | } 45 | 46 | pub struct TestListener { 47 | pub listen_addr: String, 48 | pub test_dir: TempDir, 49 | } 50 | 51 | impl TestListener { 52 | pub fn new() -> TestListener { 53 | let test_dir = TempDir::new().unwrap(); 54 | let port_num = thread_rng().gen_range(6000..9000); 55 | let listen_addr = format!("127.0.0.1:{port_num}"); 56 | 57 | TestListener { 58 | listen_addr, 59 | test_dir, 60 | } 61 | } 62 | 63 | pub fn start(&self) -> Result<()> { 64 | let thread_listen_addr = self 65 | .listen_addr 66 | .to_owned() 67 | .to_socket_addrs() 68 | .map(|mut i| i.next().unwrap()) 69 | .unwrap(); 70 | let thread_db_path = self.test_dir.child("storage.db"); 71 | 72 | spawn(move || start_listener_thread(thread_listen_addr, thread_db_path)); 73 | 74 | // A little wait so the listener can get listening 75 | sleep(Duration::from_millis(50)); 76 | Ok(()) 77 | } 78 | 79 | pub fn generate_file(&self) -> Result { 80 | let mut data = Vec::::new(); 81 | data.resize(256 * 50, 1); 82 | let mut rng = StdRng::seed_from_u64(2); 83 | rng.fill_bytes(&mut data); 84 | 85 | let tmp_file = self.test_dir.child("test.file"); 86 | tmp_file.write_binary(&data)?; 87 | Ok(tmp_file.path().to_str().unwrap().to_owned()) 88 | } 89 | } 90 | 91 | fn start_listener_thread(listen_addr: SocketAddr, db_path: ChildPath) { 92 | let db_path = db_path.path().to_str().unwrap(); 93 | let listen_addr_str = listen_addr.to_string(); 94 | std::thread::sleep(Duration::from_millis(1)); 95 | let mut transport = UdpTransport::new(&listen_addr_str, 60, None).unwrap(); 96 | transport 97 | .set_read_timeout(Some(Duration::from_secs(10))) 98 | .unwrap(); 99 | transport.set_max_read_attempts(Some(1)); 100 | let transport = Arc::new(transport); 101 | let mut listener = 102 | Listener::new(&listen_addr, db_path, transport, BLOCK_SIZE, None, 9, 512).unwrap(); 103 | listener 104 | .start(10, 2, 1) 105 | .expect("Error encountered in listener"); 106 | } 107 | 108 | pub struct TestController { 109 | pub transport: UdpTransport, 110 | } 111 | 112 | impl TestController { 113 | pub fn new() -> Self { 114 | let mut transport = UdpTransport::new("127.0.0.1:0", 60, None).unwrap(); 115 | transport 116 | .set_read_timeout(Some(Duration::from_millis(9034))) 117 | .unwrap(); 118 | transport.set_max_read_attempts(Some(1)); 119 | TestController { transport } 120 | } 121 | 122 | pub fn send_and_recv(&mut self, target_addr: &str, message: Message) -> Message { 123 | self.send_msg(message, target_addr); 124 | std::thread::sleep(Duration::from_millis(710)); 125 | self.recv_msg().unwrap() 126 | } 127 | 128 | pub fn send_msg(&self, message: Message, target_addr: &str) { 129 | self.transport 130 | .send(message, target_addr) 131 | .expect("Transport send failed"); 132 | } 133 | 134 | pub fn recv_msg(&mut self) -> Result { 135 | Ok(self.transport.receive()?.0) 136 | } 137 | } 138 | 139 | #[allow(unused)] 140 | pub fn hash_file(path_str: &str) -> String { 141 | let path = PathBuf::from(path_str); 142 | let mut hash = Blake2s256::new(); 143 | get_hash_file(path, &mut hash).expect(path_str) 144 | } 145 | -------------------------------------------------------------------------------- /ipfs-unixfs/src/types.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use anyhow::{anyhow, Result}; 4 | use bytes::Bytes; 5 | use cid::Cid; 6 | use libipld::error::{InvalidMultihash, UnsupportedMultihash}; 7 | use multihash::{Code, MultihashDigest}; 8 | 9 | use crate::{codecs::Codec, parse_links, unixfs::dag_pb}; 10 | 11 | #[derive(Debug)] 12 | pub struct LoadedCid { 13 | pub data: Bytes, 14 | pub source: Source, 15 | } 16 | 17 | #[derive(Debug, Clone, PartialEq, Eq)] 18 | pub enum Source { 19 | Bitswap, 20 | Http(String), 21 | Store(&'static str), 22 | } 23 | 24 | #[derive(Debug, Clone, PartialEq, Eq)] 25 | pub struct Block { 26 | cid: Cid, 27 | data: Bytes, 28 | links: Vec, 29 | } 30 | 31 | impl Block { 32 | pub fn new(cid: Cid, data: Bytes, links: Vec) -> Self { 33 | Self { cid, data, links } 34 | } 35 | 36 | pub fn cid(&self) -> &Cid { 37 | &self.cid 38 | } 39 | 40 | pub fn data(&self) -> &Bytes { 41 | &self.data 42 | } 43 | 44 | pub fn links(&self) -> &[Cid] { 45 | &self.links 46 | } 47 | 48 | pub fn raw_data_size(&self) -> Option { 49 | let codec = Codec::try_from(self.cid.codec()).unwrap(); 50 | match codec { 51 | Codec::Raw => Some(self.data.len() as u64), 52 | _ => None, 53 | } 54 | } 55 | 56 | /// Validate the block. Will return an error if the hash or the links are wrong. 57 | pub fn validate(&self) -> Result<()> { 58 | // check that the cid is supported 59 | let code = self.cid.hash().code(); 60 | let mh = Code::try_from(code) 61 | .map_err(|_| UnsupportedMultihash(code))? 62 | .digest(&self.data); 63 | // check that the hash matches the data 64 | if mh.digest() != self.cid.hash().digest() { 65 | return Err(InvalidMultihash(mh.to_bytes()).into()); 66 | } 67 | // check that the links are complete 68 | let expected_links = parse_links(&self.cid, &self.data)?; 69 | let actual_links = self.links.clone(); 70 | anyhow::ensure!(expected_links == actual_links, "links do not match"); 71 | Ok(()) 72 | } 73 | 74 | pub fn into_parts(self) -> (Cid, Bytes, Vec) { 75 | (self.cid, self.data, self.links) 76 | } 77 | } 78 | 79 | #[derive(Debug, Clone, PartialEq, Eq)] 80 | pub struct Link { 81 | pub cid: Cid, 82 | pub name: Option, 83 | pub tsize: Option, 84 | } 85 | 86 | impl Link { 87 | pub fn as_ref(&self) -> LinkRef<'_> { 88 | LinkRef { 89 | cid: self.cid, 90 | name: self.name.as_deref(), 91 | tsize: self.tsize, 92 | } 93 | } 94 | } 95 | 96 | #[derive(Debug, Clone, PartialEq, Eq)] 97 | pub struct LinkRef<'a> { 98 | pub cid: Cid, 99 | pub name: Option<&'a str>, 100 | pub tsize: Option, 101 | } 102 | 103 | impl LinkRef<'_> { 104 | pub fn to_owned(&self) -> Link { 105 | Link { 106 | cid: self.cid, 107 | name: self.name.map(|t| t.to_string()), 108 | tsize: self.tsize, 109 | } 110 | } 111 | } 112 | 113 | #[derive(Debug)] 114 | pub enum Links<'a> { 115 | Raw, 116 | RawNode(PbLinks<'a>), 117 | Directory(PbLinks<'a>), 118 | File(PbLinks<'a>), 119 | Symlink(PbLinks<'a>), 120 | HamtShard(PbLinks<'a>), 121 | } 122 | 123 | #[derive(Debug)] 124 | pub struct PbLinks<'a> { 125 | i: usize, 126 | outer: &'a dag_pb::PbNode, 127 | } 128 | 129 | impl<'a> PbLinks<'a> { 130 | pub fn new(outer: &'a dag_pb::PbNode) -> Self { 131 | PbLinks { i: 0, outer } 132 | } 133 | } 134 | 135 | impl<'a> Iterator for Links<'a> { 136 | type Item = Result>; 137 | 138 | fn next(&mut self) -> Option { 139 | match self { 140 | Links::Raw => None, 141 | Links::Directory(links) 142 | | Links::RawNode(links) 143 | | Links::File(links) 144 | | Links::Symlink(links) 145 | | Links::HamtShard(links) => links.next(), 146 | } 147 | } 148 | 149 | fn size_hint(&self) -> (usize, Option) { 150 | match self { 151 | Links::Raw => (0, Some(0)), 152 | Links::Directory(links) 153 | | Links::RawNode(links) 154 | | Links::File(links) 155 | | Links::Symlink(links) 156 | | Links::HamtShard(links) => links.size_hint(), 157 | } 158 | } 159 | } 160 | 161 | impl<'a> Iterator for PbLinks<'a> { 162 | type Item = Result>; 163 | 164 | fn next(&mut self) -> Option { 165 | if self.i == self.outer.links.len() { 166 | return None; 167 | } 168 | 169 | let l = &self.outer.links[self.i]; 170 | self.i += 1; 171 | 172 | let res = l 173 | .hash 174 | .as_ref() 175 | .ok_or_else(|| anyhow!("missing link")) 176 | .and_then(|c| { 177 | Ok(LinkRef { 178 | cid: Cid::read_bytes(Cursor::new(c))?, 179 | name: l.name.as_deref(), 180 | tsize: l.tsize, 181 | }) 182 | }); 183 | 184 | Some(res) 185 | } 186 | 187 | fn size_hint(&self) -> (usize, Option) { 188 | (self.outer.links.len(), Some(self.outer.links.len())) 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /docs/poc-car-transmission.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | This is documenting the pieces and steps necessary for the basic CAR transmission proof of concept. This specific step doesn't involve implementing any additional code, rather it is tying existing tools together in a very manual workflow to demonstrate generating & transmitting a CAR file from the raspberry pi to a desktop computer for receiving & unpacking. 4 | 5 | # Setup 6 | 7 | ## Configure Raspberry PI 8 | 9 | The raspberry pi must be configured with an ethernet connection and static IP in order to communicate with the desktop computer. 10 | 11 | 1. Connect raspberry pi via ethernet to an ethernet switch or hub. 12 | 2. Open `/etc/dhcpcd.conf` on the raspberry pi, comment out any existing network configuration, and add the following lines: 13 | ``` 14 | interface eth0 15 | static ip_address=10.11.44.123 16 | static routers = 10.11.44.1 17 | ``` 18 | 3. Either restart the pi or run `/etc/init.d/dhcpcd restart` to reconfigure the network. 19 | 20 | ## Configure desktop 21 | 22 | The desktop computer must also be configure with an ethernet connection and static IP to communicate with the pi. 23 | 24 | 1. Connect the desktop computer via ethernet to the same switch or hub the pi is plugged into. 25 | 1. Configure the ethernet interface to have static IP `10.11.44.124`. 26 | 1. Create a directory `poc-car-transmission` to store the local binaries & files needed 27 | 28 | ## Software setup 29 | 30 | A few pieces of software will need to be built and configured before running this proof-of-concept exercise: [the kubos file service](https://github.com/kubos/kubos/tree/master/services/file-service), [the kubos file client](https://github.com/kubos/kubos/tree/master/clients), and [the car utility](https://github.com/ipfs-shipyard/space/tree/main/car-utility). 31 | 32 | ### Kubos File Service 33 | 34 | The kubos file service is a file transfer service built for satellite systems and will serve as a "simple" drop in file transfer service that can be built around in future MVPs. More information about this service can be found [here](https://docs.kubos.com/1.21.0/ecosystem/services/file.html). 35 | 36 | This service will be run on the raspberry pi, so it will need to be cross-compiled and transferred over. 37 | 38 | 1. Clone https://github.com/kubos/kubos and navigate to `kubos/services/file-service`. 39 | 1. Build with `cross build --target armv7-unknown-linux-gnueabihf`. 40 | 1. Transfer the binary `kubos/target/armv7-unknown-linux-gnueabihf/debug/file-service` to `/home/pi/file-service` on the raspberry pi. 41 | 1. On the raspberry pi, create a file name `config.toml` in `/home/pi/` with the following contents: 42 | ``` 43 | [file-transfer-service] 44 | downlink_ip = "10.11.44.124" 45 | downlink_port = 8080 46 | 47 | [file-transfer-service.addr] 48 | ip = "10.11.44.123" 49 | port = 8040 50 | ``` 51 | 52 | ### Kubos File Client 53 | 54 | The kubos file client will be used by the desktop computer to communicate with the file service on the raspberry pi. 55 | 56 | 1. Navigate to `kubos/clients/kubos-file-client`. 57 | 1. Run `cargo build` to build. 58 | 1. Copy the binary `kubos/target/debug/kubos-file-client` to the `poc-car-transmission` directory. 59 | 60 | 61 | ### CAR Utility 62 | 63 | The car utility will need to be built twice: once for the raspberry pi, and once for the desktop computer. 64 | 65 | 1. Clone https://github.com/ipfs-shipyard/space and navigate to `space/car-utility`. 66 | 1. Build for raspberry pi with `cross build --target armv7-unknown-linux-gnueabihf` 67 | 1. Transfer the binary `target/armv7-unknown-linux-gnueabihf/debug/car-utility` to `/home/pi/car-utility` on the raspberry pi. 68 | 1. Build for the desktop using `cargo build`. 69 | 1. Copy the binary `target/debug/car-utility` to the `poc-car-transmission` directory. 70 | 1. Transfer the file `Cargo.toml` to `/home/pi/Cargo.toml` on the raspberry pi. 71 | 72 | # Running the proof of concept 73 | 74 | After running through the setup steps, the following files should exist in these directories: 75 | 76 | `/home/pi` on the raspberry pi: 77 | - `car-utility` 78 | - `file-service` 79 | - `config.toml` 80 | - `Cargo.toml` 81 | 82 | `poc-car-transmission` on the desktop computer: 83 | - `car-utility` 84 | - `kubos-file-client` 85 | 86 | Once these files are in place, the following steps can be followed to demonstrate (very manual) end-to-end CAR handling & transmission: 87 | 88 | On the raspberry pi: 89 | 1. Navigate to `/home/pi`. 90 | 1. Execute `./car-utility pack Cargo.toml Cargo.car`. 91 | 1. Run `./file-service -c config.toml --stdout`. 92 | 93 | On the desktop computer: 94 | 1. Navigate to the `poc-car-utility` directory. 95 | 2. Run `./kubos-file-client -h 10.11.44.124 -P 8080 -r 10.11.44.123 -p 8040 download Cargo.car`, it should output something like the following: 96 | ``` 97 | 16:58:55 [INFO] Starting file transfer client 98 | 16:58:55 [INFO] Downloading remote: Cargo.car to local: Cargo.car 99 | 16:58:55 [INFO] -> { import, Cargo.car } 100 | 16:58:55 [INFO] <- { 116885, true, 59ef596b0585681ca63adf49da13edd2, 1, 33188 } 101 | 16:58:55 [INFO] -> { 116885, 59ef596b0585681ca63adf49da13edd2, false, [0, 1] } 102 | 16:58:55 [INFO] <- { 116885, 59ef596b0585681ca63adf49da13edd2, 0, chunk_data } 103 | 16:58:57 [INFO] -> { 116885, 59ef596b0585681ca63adf49da13edd2, true, None } 104 | 16:58:57 [INFO] -> { 116885, true, 59ef596b0585681ca63adf49da13edd2 } 105 | 16:58:57 [INFO] Operation successful 106 | ``` 107 | 3. Run `./car-utility unpack Cargo.car Cargo.toml` 108 | 4. Verify the contents of `Cargo.toml` matches the contents of `space/car-utility/Cargo.toml` 109 | -------------------------------------------------------------------------------- /myceli/tests/listener_test.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | #[allow(unused)] 4 | use messages::{ApplicationAPI, Message}; 5 | use std::thread::sleep; 6 | use std::time::Duration; 7 | use utils::{TestController, TestListener}; 8 | 9 | #[test] 10 | pub fn test_verify_listener_alive() { 11 | // env_logger::init(); 12 | let listener = TestListener::new(); 13 | listener.start().unwrap(); 14 | 15 | let mut controller = TestController::new(); 16 | 17 | let response = 18 | controller.send_and_recv(&listener.listen_addr, Message::request_available_blocks()); 19 | 20 | assert_eq!(response, Message::available_blocks(vec![])); 21 | } 22 | 23 | #[cfg(feature = "proto_ship")] 24 | #[ignore] 25 | #[test] 26 | pub fn test_resume_dag_after_reconnect() { 27 | let transmitter = TestListener::new(); 28 | let receiver = TestListener::new(); 29 | let mut controller = TestController::new(); 30 | 31 | transmitter.start().unwrap(); 32 | receiver.start().unwrap(); 33 | 34 | let test_file_path = transmitter.generate_file().unwrap(); 35 | let resp = controller.send_and_recv( 36 | &transmitter.listen_addr, 37 | Message::import_file(&test_file_path), 38 | ); 39 | let root_cid = match resp { 40 | Message::ApplicationAPI(ApplicationAPI::FileImported { cid, .. }) => cid, 41 | other => panic!("Failed to receive FileImported msg {other:?}"), 42 | }; 43 | 44 | controller.send_msg( 45 | Message::ApplicationAPI(ApplicationAPI::SetConnected { connected: false }), 46 | &transmitter.listen_addr, 47 | ); 48 | 49 | controller.send_msg( 50 | Message::transmit_dag(&root_cid, &receiver.listen_addr, 0), 51 | &transmitter.listen_addr, 52 | ); 53 | 54 | utils::wait_receiving_done(&receiver, &mut controller); 55 | 56 | let receiver_blocks = 57 | controller.send_and_recv(&receiver.listen_addr, Message::request_available_blocks()); 58 | 59 | assert_eq!( 60 | receiver_blocks, 61 | Message::ApplicationAPI(ApplicationAPI::AvailableBlocks { cids: vec![] }) 62 | ); 63 | 64 | controller.send_msg( 65 | Message::ApplicationAPI(ApplicationAPI::SetConnected { connected: true }), 66 | &transmitter.listen_addr, 67 | ); 68 | 69 | utils::wait_receiving_done(&receiver, &mut controller); 70 | 71 | let receiver_blocks = 72 | controller.send_and_recv(&receiver.listen_addr, Message::request_available_blocks()); 73 | 74 | let transmitter_blocks = controller.send_and_recv( 75 | &transmitter.listen_addr, 76 | Message::request_available_blocks(), 77 | ); 78 | 79 | assert_eq!(receiver_blocks, transmitter_blocks); 80 | } 81 | 82 | #[test] 83 | pub fn test_no_transmit_after_disconnect() { 84 | let transmitter = TestListener::new(); 85 | let receiver = TestListener::new(); 86 | let mut controller = TestController::new(); 87 | 88 | transmitter.start().unwrap(); 89 | receiver.start().unwrap(); 90 | 91 | let test_file_path = transmitter.generate_file().unwrap(); 92 | let resp = controller.send_and_recv( 93 | &transmitter.listen_addr, 94 | Message::import_file(&test_file_path), 95 | ); 96 | let root_cid = match resp { 97 | Message::ApplicationAPI(ApplicationAPI::FileImported { cid, .. }) => cid, 98 | other => panic!("Failed to receive FileImported msg {other:?}"), 99 | }; 100 | 101 | controller.send_msg( 102 | Message::ApplicationAPI(ApplicationAPI::SetConnected { connected: false }), 103 | &transmitter.listen_addr, 104 | ); 105 | 106 | controller.send_msg( 107 | Message::transmit_dag(&root_cid, &receiver.listen_addr, 0), 108 | &transmitter.listen_addr, 109 | ); 110 | 111 | utils::wait_receiving_done(&receiver, &mut controller); 112 | 113 | let receiver_blocks = 114 | controller.send_and_recv(&receiver.listen_addr, Message::request_available_blocks()); 115 | 116 | assert_eq!( 117 | receiver_blocks, 118 | Message::ApplicationAPI(ApplicationAPI::AvailableBlocks { cids: vec![] }) 119 | ); 120 | } 121 | 122 | #[cfg(feature = "proto_ship")] 123 | #[test] 124 | #[ignore] 125 | pub fn test_transmit_resume_after_timeout() { 126 | let transmitter = TestListener::new(); 127 | let receiver = TestListener::new(); 128 | let mut controller = TestController::new(); 129 | 130 | transmitter.start().unwrap(); 131 | 132 | let test_file_path = transmitter.generate_file().unwrap(); 133 | let resp = controller.send_and_recv( 134 | &transmitter.listen_addr, 135 | Message::import_file(&test_file_path), 136 | ); 137 | let root_cid = match resp { 138 | Message::ApplicationAPI(ApplicationAPI::FileImported { cid, .. }) => cid, 139 | other => panic!("Failed to receive FileImported msg {other:?}"), 140 | }; 141 | 142 | controller.send_msg( 143 | Message::transmit_dag(&root_cid, &receiver.listen_addr, 1), 144 | &transmitter.listen_addr, 145 | ); 146 | 147 | sleep(Duration::from_secs(1)); 148 | 149 | receiver.start().unwrap(); 150 | 151 | let receiver_blocks = 152 | controller.send_and_recv(&receiver.listen_addr, Message::request_available_blocks()); 153 | 154 | assert_eq!( 155 | receiver_blocks, 156 | Message::ApplicationAPI(ApplicationAPI::AvailableBlocks { cids: vec![] }) 157 | ); 158 | 159 | controller.send_msg( 160 | Message::ApplicationAPI(ApplicationAPI::ResumeTransmitDag { cid: root_cid }), 161 | &transmitter.listen_addr, 162 | ); 163 | 164 | utils::wait_receiving_done(&receiver, &mut controller); 165 | 166 | let receiver_blocks = 167 | controller.send_and_recv(&receiver.listen_addr, Message::request_available_blocks()); 168 | 169 | let transmitter_blocks = controller.send_and_recv( 170 | &transmitter.listen_addr, 171 | Message::request_available_blocks(), 172 | ); 173 | 174 | assert_eq!(receiver_blocks, transmitter_blocks); 175 | } 176 | 177 | // TODO: need another test here to verify single-block transfers, they seem to have some issues that multi-block files don't have 178 | -------------------------------------------------------------------------------- /docs/setup-local-environment.md: -------------------------------------------------------------------------------- 1 | # Local Environment Setup Guide 2 | 3 | This folder contains instructions and tools for setting up a local development environment. Specifically one involving raspberrypi-based satellites and RFM69 radio links. This readme will go over the instructions for setting up a UDP-to-radio link between a computer and a raspberry pi. 4 | 5 | ## Computer Environment 6 | 7 | ### Prerequisites 8 | 9 | These instructions assume the following are on hand or installed: 10 | - [Adafruit Feather 32u4 RFM69HCW Packet Radio](https://www.adafruit.com/product/3076) connected via USB 11 | - The latest version of Rust 12 | - The [Arduino IDE](https://www.arduino.cc/en/software) 13 | - netcat 14 | 15 | ### Radio Setup 16 | 17 | This will cover the installation of the radio firmware. 18 | 19 | First, it is highly recommended to follow the [antenna setup instructions](https://learn.adafruit.com/adafruit-feather-32u4-radio-with-rfm69hcw-module/antenna-options) for the radio. 20 | 21 | The Arduino IDE will be used to compile and install the radio firmware. Follow the [Arduino IDE setup instructions](https://learn.adafruit.com/adafruit-feather-32u4-radio-with-rfm69hcw-module/setup) and install the [RadioHead library](https://learn.adafruit.com/adafruit-feather-32u4-radio-with-rfm69hcw-module/using-the-rfm69-radio#radiohead-library-example-2328977) to prepare the IDE for usage. 22 | 23 | Once the Arduino IDE is setup, follow these instructions to load the radio firmware: 24 | 25 | 1. Fetch the appropriate branch of the [space](https://github.com/ipfs-shipyard/space). 26 | 1. Open the Arduino IDE, and use it to open the `space/local-dev-environment/desktop/rfm69-driver/driver` folder. 27 | 1. Click the `Select Board` drop down at the top of the editor window and select the `Adafruit Feather 32u4` option. 28 | * Write down the `/dev/...` path under `Adafruit Feather 32u4` for usage later. 29 | 1. Click the green circle with right pointing arrow to compile and upload the driver to the radio. 30 | 1. A little popup should appear saying _Done Uploading_ once this process is complete. 31 | 1. Now the Arduino IDE can be closed to free up the serial port. 32 | 33 | ### Radio Service Setup 34 | 35 | This will cover the setup of the udp-to-radio service. 36 | 37 | 1. Navigate to the `space/local-dev-environment/desktop/radio-service` directory. 38 | 1. Build the radio service with `cargo build`. 39 | 1. Start the radio service with following parameters: 40 | 41 | $ cargo run -- --uplink-address 127.0.0.1:8002 --downlink-address 127.0.0.1:8001 --serial-device /dev/radio-dev-path 42 | 43 | This command configures the radio service to listen for data to uplink on the socket address `127.0.0.1:8002`, and to downlink any radio data received to the socket address `127.0.0.1:8001`. Upon starting the service should output something like this: 44 | 45 | UDP Uplink on: 127.0.0.1:8002 46 | UPD Downlink on: 127.0.0.1:8001 47 | Serial radio on: /dev/radio-dev-path 48 | 49 | The desktop side of the radio service is now up and ready for communication! 50 | 51 | ## Raspberry Pi Environment 52 | 53 | ### Prerequisites 54 | 55 | These instructions assume the following are on-hand or installed: 56 | - [Adafruit RFM69HCW Transceiver Radio Bonnet 915Mhz](https://www.adafruit.com/product/4072) 57 | - An ssh connection into the raspberry pi 58 | - An internet connection for the raspberry pi 59 | 60 | ### Radio Setup 61 | 62 | This will cover the installation of the radio bonnet and necessary system libraries for communicating with it. 63 | 64 | First, it is highly recommended to follow the [antenna setup instructions](https://learn.adafruit.com/adafruit-radio-bonnets/antenna-options) prior to installing the radio bonnet on the raspberry pi. 65 | 66 | After the wire antenna is installed, the radio bonnet should be mounted on the raspberry pi's header, oriented such that the _Antenna_ text is floating over the microsd port. The raspberry pi should be powered off and unplugged prior to mounting the bonnet. The pi may be powered back on after the bonnet is firmly pressed down into the header. 67 | 68 | Once the bonnet is installed, and the pi is powered back up, follow the [Update Your Pi and Python instructions](https://learn.adafruit.com/circuitpython-on-raspberrypi-linux/installing-circuitpython-on-raspberry-pi#update-your-pi-and-python-2993452) and the [Installing CircuitPython Libraries instructions](https://learn.adafruit.com/adafruit-radio-bonnets/rfm69-raspberry-pi-setup#installing-circuitpython-libraries-3016664) to install the necessary libraries to run the radio service. 69 | 70 | ### Radio Service 71 | 72 | This will cover the installation of the udp-to-radio service. 73 | 74 | 1. Navigate to the `space/local-dev-environment/raspberry-pi` directory. 75 | 1. Use SCP (or another file transfer mechanism) to move `service.py` to `/home/pi/service.py` on the raspberry pi. 76 | 1. Access the raspberry pi with an SSH or serial console and navigate to the `/home/pi` directory. 77 | 1. Start the radio service with the following parameters: 78 | 79 | $ python3 service.py 127.0.0.1:8002 127.0.0.1:8001 80 | 81 | This command configures the radio service to listen for data to uplink on the socket address `127.0.0.1:8002`, and to downlink any radio data received to the socket address `127.0.0.1:8001`. Upon starting the service should output something like this: 82 | 83 | Listening for UDP traffic on 127.0.0.1:8002 84 | Downlinking radio data to 127.0.0.1:8001 85 | 86 | The raspberry pi side of the radio service is now up and ready for communication! 87 | 88 | ## Verifying Radio Link 89 | 90 | Once the desktop and raspberry pi environments have been setup and have their radio services running, the radio link can be verified. The current radio services provide a UDP interface to the radio, which means that standard network tools can be used to test connectivity. In this case the [netcat](https://netcat.sourceforge.net/) tool will be used. 91 | 92 | On the raspberry pi, run this command to setup a netcat instance listening for traffic from the radio: 93 | 94 | $ nc -ul 127.0.0.1 8001 95 | 96 | On the computer, run this command to send a UDP packet over the radio: 97 | 98 | $ echo "Hello Radio" | nc -u -w 0 127.0.0.1 8002 99 | 100 | The text "Hello Radio" should appear on the raspberry pi console! -------------------------------------------------------------------------------- /testing/testing-plan.md: -------------------------------------------------------------------------------- 1 | # Testing Plan 2 | 3 | This doc lays out a basic testing plan for verifying `myceli` functionality in a lab, dev, or live setup. 4 | 5 | This doc **will not** cover any hardware or system specifics around running `myceli`. This includes radio configuration, how/where to run `myceli`, or even how to configure `myceli`. All of these specifics are assumed to be system dependent and will change based on the hardware and it's deployment configuration. 6 | 7 | This doc **will** cover generic behavioral testing plans for `myceli` which can be used to validate any `myceli` installation or configuration. 8 | 9 | ## Test Case - Verify Myceli Instances Alive 10 | 11 | Steps: 12 | 1. Using controller software, send the `RequestAvailableBlocks` command to the `myceli` ground instance. 13 | - This step passes if an `AvailableBlocks` response is received. Any other response / no response is a failure. 14 | 1. Using controller software, send the `RequestAvailableBlocks` command to the `myceli` space instance. 15 | - This step passes if an `AvailableBlocks` response is received. Any other response / no response is a failure. 16 | 17 | Command Details: 18 | - `RequestAvailableBlocks` takes no parameters. A `myceli` node which receives this command will respond with an `AvailableBlocks` message to the sending address. 19 | - Example: `./controller myceli-service:8001 -l request-available-blocks` 20 | 21 | This test case passes if both steps pass. 22 | 23 | ## Test Case - Transmit an IPFS File (Ground to Space) 24 | 25 | Steps: 26 | 1. Using the controller software, send the `ImportFile` command to the `myceli` ground instance with a known good path for the one-pass payload file. 27 | - This step passes if an `FileImported` response with CID is received. Any other response / no response is a failure. 28 | 1. Using the controller software, send the `TransmitDag` command to the `myceli` ground instance with the CID obtained from the `FileImported` response and with the network address of the ground-to-space radio link. 29 | 1. Using the controller software, send the `ValidateDag` command to the `myceli` space instance with the CID obtained from the `FileImported` response. 30 | - This step passes if an `ValidateDagResponse` response with true. Any other response / no response is a failure. 31 | 1. Using the controller software, send the `ExportDag` command to the `myceli` space instance with the CID obtained from the `FileImported` response and a writeable file path. 32 | - This step passes if `myceli` is able to correctly write a file to the given file path. 33 | 34 | Command Details: 35 | - **`ImportFile`** takes one parameter, the path of the file to import. A `myceli` node which receives this command will respond with a `FileImported` message to the sending address. 36 | - Example: `./controller myceli-service:8001 -l import-file /path/to/file` 37 | - **`TransmitDag`** takes three parameters: the CID of the dag to transfer, the address to transmit the dag to, and the number of retries allowed in the transfer. A `myceli` node which receives this command will begin a dag transmission session with the specified address, which is assumed to be another `myceli` node, and will not respond back to the sending address. 38 | - Example: `./controller myceli-service:8001 transmit-dag cid-here radio-service:8002 5` 39 | - **`ValidateDag`** takes one parameter: the CID of the dag to validate. A `myceli` node which receives this command will respond with a `ValidateDagResponse` to the sending address, which is assumed to be another `myceli` node. 40 | - Example: `./controller myceli-service:8001 -l validate-dag cid-here` 41 | - **`ExportDag`** takes two parameters: the CID of the dag to export and a filepath to export to. A `myceli` node which receives this command will not send a response back. 42 | - Example: `./controller myceli-service:8001 export-dag cid-here /path/to/file` 43 | 44 | This test case passes if the final step is successful and the resulting written file matches the onboard payload file. 45 | 46 | ## Test Case - Transmit Back & Forth, and Export File with IPFS 47 | 48 | Steps: 49 | 1. Using the controller software, send the `ImportFile` command to the `myceli` ground instance with a known good path for the one-pass payload file. 50 | - This step passes if an `FileImported` response with CID is received. Any other response / no response is a failure. 51 | 1. Using the controller software, send the `TransmitDag` command to the `myceli` ground instance with the CID obtained from the `FileImported` response and with the network address of the ground-to-space radio link. 52 | 1. Using the controller software, send the `ValidateDag` command to the `myceli` space instance with the CID obtained from the `FileImported` response. 53 | - This step passes if an `ValidateDagResponse` response with true. Any other response / no response is a failure. 54 | 1. Shutdown the `myceli` ground instance, delete the storage database, and start the `myceli` ground instance again. 55 | 1. Using the controller software, send the `TransmitDag` command to the `myceli` space instance with the CID obtained from the `FileImported` response and with the network address of the space-to-ground radio link. 56 | 1. Using the controller software, send the `ValidateDag` command to the `myceli` ground instance with the CID obtained from the `FileImported` response. 57 | - This step passes if an `ValidateDagResponse` response with true. Any other response / no response is a failure. 58 | 1. Using the controller software, send the `ExportDag` command to the `myceli` ground instance with the CID obtained from the `FileImported` response and a writeable file path. 59 | - This step passes if `myceli` is able to correctly write a file to the given file path. 60 | 61 | Command Details: 62 | - **`ImportFile`** takes one parameter, the path of the file to import. A `myceli` node which receives this command will respond with a `FileImported` message to the sending address. 63 | - Example: `./controller myceli-service:8001 -l import-file /path/to/file` 64 | - **`TransmitDag`** takes three parameters: the CID of the dag to transfer, the address to transmit the dag to, and the number of retries allowed in the transfer. A `myceli` node which receives this command will begin a dag transmission session with the specified address, which is assumed to be another `myceli` node, and will not respond back to the sending address. 65 | - Example: `./controller myceli-service:8001 transmit-dag cid-here radio-service:8002 5` 66 | - **`ValidateDag`** takes one parameter: the CID of the dag to validate. A `myceli` node which receives this command will respond with a `ValidateDagResponse` to the sending address. 67 | - Example: `./controller myceli-service:8001 -l validate-dag cid-here` 68 | - **`ExportDag`** takes two parameters: the CID of the dag to export and a filepath to export to. A `myceli` node which receives this command will not send a response back. 69 | - Example: `./controller myceli-service:8001 export-dag cid-here /path/to/file` 70 | 71 | This test case passes if the final step is successful and the resulting written file matches the originally transmitted payload file. -------------------------------------------------------------------------------- /testing/local_test/netperf.case.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | source `dirname "${0}"`/setup.env 3 | 4 | export file="${o}/fortiming.txt" 5 | echo "File for timing: ${file}" 6 | gen_file() { 7 | echo 'File start' > "${file}" 8 | date >> "${file}" 9 | yes '' | head -n $((1024 * 1024 * 3)) >> "${file}" 10 | date >> "${file}" 11 | echo 'File end' >> "${file}" 12 | } 13 | await_root() { 14 | for i in {0..99} 15 | do 16 | s=$((i + ${1})) 17 | rm ctl/output.log 2>/dev/null 18 | if ! ls -lt "${sd}/"{names,blocks,cids}/* 2>/dev/null > "${o}/store.ls.new" 19 | then 20 | sleep ${s} 21 | elif ! diff "${o}/store.ls."{new,old} >/dev/null 22 | then 23 | mv -v "${o}/store.ls."{new,old} 24 | sleep ${s} 25 | elif ! controller 8763 --output-format json list-files 26 | then 27 | sleep ${s} 28 | elif jq '.AvailableDags.dags[].filename' ctl/output.log | grep -q '"fortiming.txt"' 29 | then 30 | return 0 31 | fi 32 | sleep ${s} 33 | done 34 | false 35 | } 36 | await_validate() { 37 | for i in {0..9} 38 | do 39 | s=$(( i + ${1} )) 40 | if ! await_root ${s} 41 | then 42 | sleep "${s}" 43 | fi 44 | if controller 8763 --output-format json validate-dag "${cid}" >/dev/null 2>/dev/null 45 | then 46 | return 0 47 | else 48 | sleep ${s} 49 | fi 50 | done 51 | false 52 | } 53 | do_transmit() { 54 | echo "do_transmit(${*})" 55 | export sd="sat.${1}" 56 | sed -i 's/8764/8763/' "${sd}/config.toml" 57 | sed -i 's/8765/8764/' "${sd}/config.toml" 58 | sed -i 's/chatter_ms.*$/chatter_ms = 12345/' "${sd}/config.toml" 59 | unset cid 60 | kill_all 61 | gen_file 62 | export RUST_LOG=trace 63 | while killall udp_forward 64 | do 65 | sleep 1 66 | done 67 | sleep 1 68 | echo "Setting up forwarder with rate ${rate}" 69 | cargo run --bin udp_forward -- 127.0.0.1:876{4,3,5} "${rate}" > gnd/udp_forward.log & 70 | rm -v gnd/storage.db || true 71 | start_myceli gnd 72 | start_myceli "${sd}" 73 | check_log 8764.reported.that.it.supports.${1}.protocol gnd 74 | check_log 8764.reported.that.it.supports.${1}.protocol "${sd}" 75 | controller 8765 import-file "${file}" 76 | imports=0 77 | for it in {0..99} 78 | do 79 | controller 8765 --output-format json list-files || sleep "${it}" 80 | sleep ${it} 81 | if grep fortiming.txt ctl/output.log 82 | then 83 | export cid=$(jq -r ".AvailableDags.dags[] | select( .filename == \"fortiming.txt\" ).cid" ctl/output.log) 84 | fi 85 | if grep bafy <<< "${cid}" 86 | then 87 | echo "CID='${cid}'" 88 | elif [ $(( ++imports )) -gt 9 ] 89 | then 90 | echo "File ${file} never imported?!" 91 | exit 98 92 | else 93 | echo "Retry file import" 94 | controller 8765 import-file "${file}" 95 | sleep $(( ${it} + 9 )) 96 | continue 97 | fi 98 | export imports=0 99 | controller 8765 transmit-dag "${cid}" 127.0.0.1:8764 3 || sleep "${it}" 100 | sleep "${it}" 101 | if await_validate "${it}" 102 | then 103 | echo "Vehicle validated the transmitted DAG." 104 | break 105 | else 106 | echo "Retry transmit. ${it} @ " `date` 107 | fi 108 | done 109 | sleep 1 110 | controller 8763 --output-format json export-dag "${cid}" 'exported.for.timing.txt' && jq '.DagExported.path' ctl/output.log | grep 'exported.for.timing.txt' 111 | for i in {0..9} 112 | do 113 | if [ -f "${sd}/exported.for.timing.txt" ] 114 | then 115 | break 116 | else 117 | echo "Waiting for ${sd}/exported.for.timing.txt" 118 | sleep $(( i + 9 )) 119 | fi 120 | done 121 | for i in {0..9} 122 | do 123 | if diff "${sd}/exported.for.timing.txt" "${file}" 124 | then 125 | break 126 | else 127 | echo "Waiting for ${sd}/exported.for.timing.txt to finish writing." 128 | sleep $(( i + 9 )) 129 | fi 130 | done 131 | if ! diff "${sd}/exported.for.timing.txt" "${file}" 132 | then 133 | echo "Transmission corrupted! ${sd}/exported.for.timing.txt != ${file}" 134 | exit 89 135 | fi 136 | # for appdir in gnd "${sd}" 137 | # do 138 | # for direct in send recv 139 | # do 140 | # for unit in packets bytes 141 | # do 142 | # echo "stats: ${appdir} ${direct} ${unit} " `stats ${appdir} ${direct} ${unit}` 143 | # done 144 | # done 145 | # done 146 | } 147 | 148 | export rate=19 149 | worser=0 150 | while [ ${worser} -le 99 ] 151 | do 152 | echo "rate=${rate}" 153 | export rate 154 | kill_all 155 | configure 765 156 | do_transmit ship 157 | echo 'Now collect stats.' 158 | gsp=`stats gnd send packets` 159 | gsb=`stats gnd send bytes` 160 | grp=`stats gnd recv packets` 161 | grb=`stats gnd recv bytes` 162 | ssp=`stats sat.ship send packets` 163 | ssb=`stats sat.ship send bytes` 164 | srp=`stats sat.ship recv packets` 165 | srb=`stats sat.ship recv bytes` 166 | set +x 167 | 168 | if [ ${ssp} -gt ${grp} ] 169 | then 170 | echo $(( ssp - grp )) 'sat->gnd' packets lost 171 | elif [ ${grp} -gt $(( ${ssp} + 1 )) ] 172 | then 173 | echo gnd hallucinated $(( grp - ssp - 1 )) packets 174 | exit 9 175 | elif [ ${gsp} -gt ${srp} ] 176 | then 177 | echo $(( gsp - grp )) 'gnd->sat' packets lost 178 | elif [ ${srp} -gt $(( ${gsp} + 1 )) ] 179 | then 180 | echo sat.all hallucinated $(( srp - gsp - 1 )) packets 181 | exit 8 182 | fi 183 | 184 | do_transmit sync 185 | 186 | gsp_=`stats gnd send packets` 187 | gsb_=`stats gnd send bytes` 188 | grp_=`stats gnd recv packets` 189 | grb_=`stats gnd recv bytes` 190 | ssp_=`stats sat.sync send packets` 191 | ssb_=`stats sat.sync send bytes` 192 | srp_=`stats sat.sync recv packets` 193 | srb_=`stats sat.sync recv bytes` 194 | 195 | for ship in {g,s}{s,r}{b,p} 196 | do 197 | sync=${ship}_ 198 | echo "${ship} vs ${sync} : ${!ship} ${!sync}" 199 | done 200 | env | grep '^all' | sort 201 | if [ ${ssb} -lt ${ssb_} ] 202 | then 203 | echo "Vehicle-sent bytes. " $(( ( ssb_ - ssb ) * 100 / ssb_ )) '% increase ' $((++worser)) 204 | else 205 | echo low = $((--worser)) 206 | fi 207 | if [ ${ssp} -lt ${ssp_} ] 208 | then 209 | echo "Vehicle-sent packets. " $(( ( ssp_ - ssp ) * 100 / ssp_ )) '% increase ' $((worser += 2)) 210 | else 211 | echo low = $((--worser)) 212 | fi 213 | if [ ${gsb} -lt ${gsb_} ] 214 | then 215 | echo "Ground-sent bytes. " $(( ( gsb_ - gsb ) * 100 / gsb_ )) '% increase ' $((worser += 3)) 216 | else 217 | echo low = $((--worser)) 218 | fi 219 | if [ ${gsp} -lt ${gsp_} ] 220 | then 221 | echo "Ground-sent packets. " $(( ( gsp_ - gsp ) * 100 / gsp_ )) '% increase ' $((worser += 4)) 222 | else 223 | echo low = $((--worser)) 224 | fi 225 | if [ $((gsb + ssb)) -lt $((gsb_ + ssb_)) ] 226 | then 227 | echo "Total sent bytes. " $(( ( gsb_ + ssb_ - gsb - ssb ) * 100 / (gsb_ + ssb_) )) '% increase ' $((worser += 5)) 228 | else 229 | echo low = $((--worser)) 230 | fi 231 | if [ $((gsp + ssp)) -lt $((gsp_ + ssp_)) ] 232 | then 233 | echo "Total sent packets. " $(( ( gsp_ + ssp_ - gsp - ssp ) * 100 / (gsp_ + ssp_) )) '% increase ' $((worser += 6)) 234 | else 235 | echo low = $((--worser)) 236 | fi 237 | echo "Test finished for rate 1:${rate}" 238 | export rate=$(( rate + 1 )) 239 | done 240 | fuser testing/local_test/timeout.killer.sh | xargs kill || true 241 | [ ${rate} -ge 99 ] 242 | -------------------------------------------------------------------------------- /messages/src/cid_list.rs: -------------------------------------------------------------------------------- 1 | use crate::err::{Error, Result}; 2 | use cid::{multihash::Multihash, Cid}; 3 | use ipfs_unixfs::codecs::Codec; 4 | use parity_scale_codec::{Compact, CompactLen, Encode}; 5 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode}; 6 | use serde::Serialize; 7 | use std::fmt::{Debug, Formatter}; 8 | 9 | #[derive( 10 | Copy, 11 | Clone, 12 | Debug, 13 | Eq, 14 | PartialEq, 15 | ParityEncode, 16 | ParityDecode, 17 | Serialize, 18 | Default, 19 | Ord, 20 | PartialOrd, 21 | )] 22 | pub struct Meta { 23 | #[codec(compact)] 24 | codec: u64, 25 | #[codec(compact)] 26 | algo: u64, 27 | // digest_len: u8, 28 | } 29 | 30 | #[derive(Clone, ParityEncode, ParityDecode, Serialize, Eq, PartialEq, Default)] 31 | pub struct CompactList { 32 | meta: Meta, 33 | digests: Vec>, 34 | 35 | #[codec(skip)] 36 | size: usize, 37 | } 38 | 39 | impl CompactList { 40 | fn assign(&mut self, cid: &Cid) -> Result<()> { 41 | let (meta, hash) = Meta::new(cid)?; 42 | self.digests = vec![hash.digest().into()]; 43 | self.meta = meta; 44 | self.size = self.encoded_size(); 45 | Ok(()) 46 | } 47 | pub fn contains(&self, cid: &Cid) -> bool { 48 | if let Ok((m, h)) = Meta::new(cid) { 49 | if m != self.meta { 50 | return false; 51 | } 52 | self.contains_digest(h.digest()) 53 | } else { 54 | false 55 | } 56 | } 57 | pub fn include(&mut self, cid: &Cid, sz: usize) -> bool { 58 | if self.size == 0 { 59 | self.assign(cid).is_ok() 60 | } else if let Ok((m, h)) = Meta::new(cid) { 61 | if m != self.meta { 62 | return false; 63 | } 64 | let digest = h.digest(); 65 | if self.contains_digest(digest) { 66 | return true; 67 | } 68 | let delta = digest.len() + len_len(digest.len()) + len_len(self.digests.len() + 1) 69 | - len_len(self.digests.len()); 70 | if self.size + delta <= sz { 71 | self.digests.push(digest.into()); 72 | self.size += delta; 73 | true 74 | } else { 75 | false 76 | } 77 | } else { 78 | false 79 | } 80 | } 81 | pub fn is_empty(&self) -> bool { 82 | self.size == 0 83 | } 84 | pub fn shared_traits(&self) -> Meta { 85 | self.meta 86 | } 87 | fn contains_digest(&self, digest: &[u8]) -> bool { 88 | self.digests.iter().any(|d| d.as_slice() == digest) 89 | } 90 | pub fn built_size(&self) -> usize { 91 | self.size 92 | } 93 | } 94 | impl TryFrom<&Cid> for CompactList { 95 | type Error = Error; 96 | 97 | fn try_from(value: &Cid) -> Result { 98 | let mut result = CompactList::default(); 99 | result.assign(value)?; 100 | Ok(result) 101 | } 102 | } 103 | impl TryFrom for CompactList { 104 | type Error = Error; 105 | 106 | fn try_from(value: Cid) -> Result { 107 | Self::try_from(&value) 108 | } 109 | } 110 | impl Debug for CompactList { 111 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 112 | if let Ok(cdc) = Codec::try_from(self.meta.codec) { 113 | write!(f, "CIDs({:?} ", cdc)?; 114 | } 115 | if let Ok(alg) = cid::multihash::Code::try_from(self.meta.algo) { 116 | if alg != cid::multihash::Code::Sha2_256 { 117 | write!(f, "{alg:?} ")?; 118 | } 119 | } 120 | if let Some(c) = self.into_iter().next() { 121 | let l = self.digests.len(); 122 | write!(f, "{};[N={l}])", c) 123 | } else { 124 | write!(f, "EMPTY!)") 125 | } 126 | } 127 | } 128 | 129 | fn len_len(i: usize) -> usize { 130 | let i: u64 = i.try_into().unwrap_or(u64::MAX); 131 | Compact::::compact_len(&i) 132 | } 133 | 134 | impl Meta { 135 | fn new(cid: &Cid) -> Result<(Self, Multihash)> { 136 | if cid.version() == cid::Version::V0 { 137 | return Self::new(&cid.into_v1()?); 138 | } 139 | let h = cid.hash(); 140 | let me = Self { 141 | codec: cid.codec(), 142 | algo: h.code(), 143 | // digest_len: h.size(), 144 | }; 145 | Ok((me, *h)) 146 | } 147 | } 148 | 149 | impl TryFrom<&Cid> for Meta { 150 | type Error = Error; 151 | 152 | fn try_from(value: &Cid) -> Result { 153 | Self::new(value).map(|x| x.0) 154 | } 155 | } 156 | impl TryFrom for Meta { 157 | type Error = Error; 158 | 159 | fn try_from(value: Cid) -> Result { 160 | Self::try_from(&value) 161 | } 162 | } 163 | 164 | impl<'a> IntoIterator for &'a CompactList { 165 | type Item = Cid; 166 | type IntoIter = CompactListIter<'a>; 167 | 168 | fn into_iter(self) -> Self::IntoIter { 169 | CompactListIter { l: self, i: 0 } 170 | } 171 | } 172 | 173 | #[derive(Clone, Copy)] 174 | pub struct CompactListIter<'a> { 175 | l: &'a CompactList, 176 | i: usize, 177 | } 178 | 179 | impl<'a> Iterator for CompactListIter<'a> { 180 | type Item = Cid; 181 | 182 | fn next(&mut self) -> Option { 183 | if self.i < self.l.digests.len() { 184 | let h = Multihash::wrap(self.l.meta.algo, &self.l.digests[self.i]).ok()?; 185 | let result = Cid::new(cid::Version::V1, self.l.meta.codec, h).ok()?; 186 | self.i += 1; 187 | Some(result) 188 | } else { 189 | None 190 | } 191 | } 192 | } 193 | 194 | #[cfg(test)] 195 | mod tests { 196 | use super::*; 197 | 198 | #[test] 199 | fn check_sizes() { 200 | let mut t: CompactList = Cid::try_from("QmfYEZk4qQNFUemHDwRZe9Cxg1U8aMhhAsLFz3JXBvn4WL") 201 | .unwrap() 202 | .try_into() 203 | .unwrap(); 204 | assert_eq!(t.meta.codec, 0x70); 205 | const CHUNK_SIZE: usize = 500; 206 | //These are all V0 and thus all the meta is shared 207 | let cids = &[ 208 | "QmYMq3DPTgD1pNvprFryigLeHvbDzGMZrmLovgAGNgdtVj", 209 | "QmSBjoLPJtDm7XfDrHuAkLe3fFjUmeDJ9EusAG4Q5zozsa", 210 | "QmSjBJV94TzRpvPcDsT4zM23VLvHuSKGVRbSvBf7HSHR2h", 211 | "QmPEuX1jLFFEw8Cps9hrRhENt2bo6z6sCp2vdzRRCZui2w", 212 | "QmThhbkE7WunooPSU2YxmYqJTbt2NJMz43noBwyhVPLUGU", 213 | "QmQyfg1KWwNyrgTq2MEHnz5bMgWVaUhQ79AHV8DxMz3Egy", 214 | "QmcbR8nUYKhy7bc93K5PoP1hHLeLipXUJJZ6cNurUrms6Q", 215 | "QmSS5Ecov1VxxRAA5fYBsQYSPzq15GL7yntw99R4D8ehSH", 216 | "QmVVrrUkqNECz3qF6HeBgzMfQo75zswpp5Ux6fEAQEDHqi", 217 | "QmRrAYCw9Gwi1hDsEsJf9gutW7Xy5aFLezpsqaZFSqyshA", 218 | "QmebrayY6dntCg7mDp7GSycLPjCb7PStqipt5zrgG9y9cA", 219 | "QmeNiriJ7ou4Cn1tb6P5ratfhTQRMFZxbqbkM5dbcDejoZ", 220 | "QmQC5dCzH5smMAcVdPuyMKq2zJgHHQzf4d2Dq3Bcyc1s5Y", 221 | "QmWPaG8xhbonT2jnzBF78X7emd82imMfStMaM22pttMw8j", 222 | ]; 223 | for c in cids { 224 | assert_eq!(t.size, t.encoded_size()); 225 | let c = Cid::try_from(*c).unwrap(); 226 | assert!(t.include(&c, CHUNK_SIZE), "{c:?}"); 227 | } 228 | assert!(t.encoded_size() <= CHUNK_SIZE); 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /messages/src/message.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | api::ApplicationAPI, 3 | cid_list, 4 | err::{Error, Result}, 5 | sync::{PushMessage, SyncMessage}, 6 | }; 7 | #[cfg(feature = "proto_ship")] 8 | use crate::{protocol::DataProtocol, TransmissionBlock}; 9 | use parity_scale_codec::Encode; 10 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode}; 11 | use serde::Serialize; 12 | 13 | #[derive(Clone, Debug, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)] 14 | pub struct Unsupported {} 15 | #[derive(Clone, Debug, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)] 16 | pub enum Message { 17 | #[cfg(feature = "proto_ship")] 18 | DataProtocol(DataProtocol), 19 | #[cfg(not(feature = "proto_ship"))] 20 | DataProtocol(Unsupported), 21 | 22 | ApplicationAPI(ApplicationAPI), 23 | Error(String), 24 | 25 | Sync(SyncMessage), 26 | } 27 | 28 | impl Message { 29 | pub fn to_bytes(&self) -> Vec { 30 | self.encode() 31 | } 32 | 33 | pub fn to_hex(&self) -> String { 34 | let mut hex_str = String::new(); 35 | 36 | for b in self.to_bytes() { 37 | hex_str = format!("{}{:02X}", hex_str, b); 38 | } 39 | 40 | hex_str 41 | } 42 | 43 | // All functions below are helper functions for generating messages 44 | 45 | pub fn available_blocks(cids: Vec) -> Self { 46 | Message::ApplicationAPI(ApplicationAPI::AvailableBlocks { cids }) 47 | } 48 | pub fn request_available_blocks() -> Self { 49 | Message::ApplicationAPI(ApplicationAPI::RequestAvailableBlocks) 50 | } 51 | 52 | pub fn transmit_block(cid: &str, target_addr: &str) -> Self { 53 | Message::ApplicationAPI(ApplicationAPI::TransmitBlock { 54 | cid: cid.to_string(), 55 | target_addr: target_addr.to_string(), 56 | }) 57 | } 58 | 59 | pub fn transmit_dag(cid: &str, target_addr: &str, retries: u8) -> Self { 60 | Message::ApplicationAPI(ApplicationAPI::TransmitDag { 61 | cid: cid.to_string(), 62 | target_addr: target_addr.to_string(), 63 | retries, 64 | }) 65 | } 66 | 67 | pub fn import_file(path: &str) -> Self { 68 | Message::ApplicationAPI(ApplicationAPI::ImportFile { 69 | path: path.to_string(), 70 | }) 71 | } 72 | 73 | pub fn export_dag(cid: &str, path: &str) -> Self { 74 | Message::ApplicationAPI(ApplicationAPI::ExportDag { 75 | cid: cid.to_string(), 76 | path: path.to_string(), 77 | }) 78 | } 79 | 80 | pub fn get_missing_dag_blocks(cid: &str) -> Self { 81 | Message::ApplicationAPI(ApplicationAPI::GetMissingDagBlocks { 82 | cid: cid.to_string(), 83 | }) 84 | } 85 | 86 | #[cfg(feature = "proto_ship")] 87 | pub fn data_block(block: TransmissionBlock) -> Self { 88 | Message::DataProtocol(DataProtocol::Block(block)) 89 | } 90 | 91 | #[cfg(feature = "proto_ship")] 92 | pub fn request_missing_dag_blocks(cid: &str) -> Self { 93 | Message::DataProtocol(DataProtocol::RequestMissingDagBlocks { 94 | cid: cid.to_owned(), 95 | }) 96 | } 97 | 98 | #[cfg(feature = "proto_ship")] 99 | pub fn request_missing_dag_window_blocks(cid: &str, blocks: Vec) -> Self { 100 | Message::DataProtocol(DataProtocol::RequestMissingDagWindowBlocks { 101 | cid: cid.to_owned(), 102 | blocks, 103 | }) 104 | } 105 | 106 | #[cfg(feature = "proto_ship")] 107 | pub fn missing_dag_blocks(cid: &str, blocks: Vec) -> Self { 108 | Message::DataProtocol(DataProtocol::MissingDagBlocks { 109 | cid: cid.to_owned(), 110 | blocks, 111 | }) 112 | } 113 | 114 | pub fn request_version(resp_label: String) -> Self { 115 | Message::ApplicationAPI(ApplicationAPI::RequestVersion { 116 | label: Some(resp_label), 117 | }) 118 | } 119 | 120 | pub fn push(cids: cid_list::CompactList, name: String) -> Result { 121 | if cids.is_empty() { 122 | Err(Error::EmptyCidList) 123 | } else { 124 | Ok(Self::Sync(SyncMessage::Push(PushMessage::new(cids, name)))) 125 | } 126 | } 127 | 128 | #[cfg(feature = "proto_sync")] 129 | pub fn pull(cids: cid_list::CompactList) -> Self { 130 | Self::Sync(SyncMessage::Pull(cids)) 131 | } 132 | 133 | #[cfg(feature = "proto_sync")] 134 | pub fn block(block_bytes: Vec) -> Self { 135 | Self::Sync(SyncMessage::Block(block_bytes)) 136 | } 137 | 138 | pub fn needs_envelope(&self) -> bool { 139 | !matches!(self, Self::Sync(_)) 140 | } 141 | 142 | pub fn fit_size(_within: u16) -> u16 { 143 | #[cfg(feature = "proto_sync")] 144 | { 145 | let mut v = vec![0u8; _within as usize - crate::PUSH_OVERHEAD]; 146 | loop { 147 | if Self::block(v.clone()).encoded_size() < _within.into() { 148 | if let Ok(result) = v.len().try_into() { 149 | return result; 150 | } else { 151 | v.pop(); 152 | } 153 | } else { 154 | v.pop(); 155 | } 156 | } 157 | } 158 | #[cfg(not(feature = "proto_sync"))] 159 | { 160 | 3 * 1024 161 | } 162 | } 163 | pub fn name(&self) -> &'static str { 164 | match &self { 165 | Self::DataProtocol(_) => "Data", 166 | Self::ApplicationAPI(_) => "API", 167 | Self::Error(_) => "Error", 168 | Self::Sync(_m) => { 169 | #[cfg(feature = "proto_sync")] 170 | { 171 | _m.name() 172 | } 173 | #[cfg(not(feature = "proto_sync"))] 174 | "UnsupportedSyncMessage" 175 | } 176 | } 177 | } 178 | 179 | pub fn target_addr(&self) -> Option { 180 | match &self { 181 | Self::ApplicationAPI(ApplicationAPI::TransmitBlock { target_addr, .. }) => { 182 | Some(target_addr.clone()) 183 | } 184 | Self::ApplicationAPI(ApplicationAPI::TransmitDag { target_addr, .. }) => { 185 | Some(target_addr.clone()) 186 | } 187 | _ => None, 188 | } 189 | } 190 | 191 | pub fn ack(desc: &str) -> Option { 192 | Some(Message::ApplicationAPI(ApplicationAPI::Acknowledged { 193 | req: desc.to_string(), 194 | })) 195 | } 196 | } 197 | 198 | #[cfg(all(feature = "proto_sync", feature = "proto_ship"))] 199 | #[cfg(test)] 200 | mod tests { 201 | use super::*; 202 | use cid::Cid; 203 | use std::str::FromStr; 204 | 205 | #[test] 206 | fn onepackettypemessagebiggerthanmtuwhenotherfitsjust() { 207 | let block_size = Message::fit_size(512); 208 | assert_eq!(block_size, 495); 209 | let block = TransmissionBlock { 210 | cid: Cid::from_str("bafkreieifgj3kxgayut7bjqftnu3h6xu546mxhhm2pmii7fa4snbirg6xy") 211 | .unwrap() 212 | .to_bytes(), 213 | data: vec![0u8; 495], 214 | links: vec![], 215 | filename: None, 216 | }; 217 | let m = Message::data_block(block); 218 | let sz = m.encoded_size(); 219 | assert!(sz > 512, "{sz} should be > 512"); 220 | assert_eq!(sz, 538); 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /testing/local_test/funcs.env: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | if [ "${o}" = '' ] 4 | then 5 | export o=`mktemp -d` 6 | fi 7 | 8 | stop() { 9 | find ${2-*}/ -name "*${1}*" -type f -executable -exec fuser '{}' \; 2>/dev/null | while read p 10 | do 11 | kill ${p} 12 | done 13 | if [ $# -eq 1 ] 14 | then 15 | killall ${1} 2>/dev/null || true 16 | fi 17 | } 18 | kill_all() { 19 | for p in myceli controller hyphae watcher 20 | do 21 | stop ${p} 22 | killall ${p} 2>/dev/null || true # echo "${p} is stopped" 23 | done 24 | for f in {gnd,sat.{all,sync,ship},ctl}/* 25 | do 26 | fuser "${f}" 2>/dev/null | xargs kill 2>/dev/null || true 27 | done 28 | } 29 | 30 | check_log() { 31 | if [ $# -lt 2 ] 32 | then 33 | echo 'Specify log directory.' 34 | exit 2 35 | fi 36 | l=${2} 37 | for i in {0..18} 38 | do 39 | if ls ${l}/${3-*}.log >/dev/null 40 | then 41 | grep --extended-regexp --max-count=1 --color=always "${1}" ${l}/${3-*}.log && return 42 | else 43 | sleep 9 44 | fi 45 | sleep $i 46 | done 47 | echo `date` ' Failed to find ' "${1}" ' in these logs:' 48 | ls -lrth --color=always ${l}/${3-*}.log 49 | echo ' ...elsewhere... ' 50 | grep --extended-regexp --color=always "${1}" */*.log 51 | kill_all 52 | fuser "${0}" | xargs kill 2>/dev/null 53 | exit 1 54 | } 55 | 56 | kill_pid() { 57 | for i in {0..9} 58 | do 59 | if [ -d /proc/${1}/ ] 60 | then 61 | kill ${1} || true 62 | sleep ${i} 63 | else 64 | return 0 65 | fi 66 | done 67 | echo "Failed to kill ${1}" 68 | exit 7 69 | } 70 | kill_myceli() { 71 | export c="$1" 72 | if grep -q pid= ${c}/myceli.log 2>/dev/null 73 | then 74 | kill_pid `grep pid= ${c}/myceli.log | cut -d = -f 2` 75 | fi 76 | for i in {0..9} 77 | do 78 | for p in `fuser ${c}/myceli` 79 | do 80 | echo "Pid ${p} using ${c}/myceli - kill" 81 | kill "${p}" 82 | sleep $i 83 | done 84 | done 85 | stop myceli ${c} 86 | } 87 | start() { 88 | #echo start "${@}" 89 | [ $# -lt 2 ] && exit 9 90 | stop ${1} ${2} 91 | sleep 1 92 | b=${1} 93 | shift 94 | d="${1}" 95 | shift 96 | ( 97 | ( 98 | cd "${d}" 99 | "./${b}" "${@}" > "${b}.log" 2>&1 <&- & 100 | ) >/dev/null 2>&1 & 101 | ) >/dev/null 2>&1 & 102 | echo "Starting (in ${d}) ./${b} ${*} > ${b}.log" 103 | sleep 1 104 | } 105 | port_open() { 106 | if nc -u -z -w 9 127.0.0.1 ${1} 2>/dev/null 107 | then 108 | return 0 109 | elif nc -z -w 9 127.0.0.1 ${1} 2>/dev/null 110 | then 111 | #echo "Port ${1} is TCP" 112 | return 0 113 | else 114 | echo "port ${1} not yet open" 115 | false 116 | fi 117 | } 118 | port_for() { 119 | grep listen_address "${1}/config.toml" | sed 's/^.*:\([0-9]*\)".*$/\1/' 120 | } 121 | start_myceli() { 122 | kill_myceli "${1}" 123 | while port_open `port_for "${1}"` 124 | do 125 | sleep 9 126 | done 127 | export c="$1" 128 | if [ "${RUST_LOG}" != trace ] 129 | then 130 | export RUST_LOG=debug 131 | fi 132 | sleep 9 133 | start myceli ${c} config.toml 134 | until [ -f ${c}/myceli.log ] 135 | do 136 | sleep 9 137 | done 138 | sleep 9 139 | until port_open `port_for "${1}"` 140 | do 141 | sleep 9 142 | done 143 | sleep 9 144 | check_log 'pid=' ${c} >/dev/null 145 | sleep 9 146 | } 147 | 148 | configure() { 149 | cat > sat.all/config.toml < gnd/config.toml < gnd/hyphae.toml < ctl/controller.log > ctl/output.log 187 | then 188 | echo -e "\n\t #### \t $(date) \t ### \t Controller command failed: \t ### \t ./ctl/controller --listen-mode 127.0.0.1:${port} ${*} \t ###" 189 | false 190 | fi 191 | } 192 | cid_present() { 193 | if [ -f ${1}/cids/${2} ] 194 | then 195 | true 196 | elif [ -f ${1}/storage.db ] 197 | then 198 | for i in {0..9} 199 | do 200 | if sqlite3 ${1}/storage.db "select length(data) from blocks where cid = '${2}';" | grep -q '[1-9]' 201 | then 202 | return 0 203 | fi 204 | sleep $i 205 | done 206 | false 207 | else 208 | false 209 | fi 210 | } 211 | other_side() { 212 | if grep -q sat <<< "${1}" 213 | then 214 | echo -n gnd 215 | elif [ "${sd}" = '' ] 216 | then 217 | echo -n sat.all 218 | else 219 | echo -n "${sd}" 220 | fi 221 | } 222 | transmit() { 223 | cid_present ${3} ${cid} 224 | b=`other_side ${3}` 225 | ! cid_present ${b} ${cid} 226 | echo "transmit: ./ctl/controller 127.0.0.1:${1} transmit-dag \"${cid}\" 127.0.0.1:${2} 99" 227 | timeout 9 ./ctl/controller 127.0.0.1:${1} transmit-dag "${cid}" 127.0.0.1:${2} 99 > ctl/controller.log 2>&1 228 | for i in {0..9}1 229 | do 230 | if cid_present ${b} ${cid} 231 | then 232 | return 0 233 | else 234 | sleep ${i} 235 | fi 236 | done 237 | echo "$(date) ${cid} never showed up on ${b}" 238 | exit 8 239 | } 240 | g2s() { 241 | if [ "${cid}" = '' ] 242 | then 243 | echo "g2s(${*}) called without cid set!" 244 | exit 9 245 | fi 246 | echo "Transmit ${cid} from ground to satellite..." 247 | transmit 8765 8764 gnd 248 | } 249 | s2g() { 250 | if [ "${cid}" = '' ] 251 | then 252 | echo "s2g(${*}) called without cid set!" 253 | exit 9 254 | fi 255 | echo "Transmit ${cid} from satellite to ground..." 256 | transmit 8764 8765 sat.all 257 | } 258 | 259 | #stats appdir recv/send packets/bytes 260 | stats() { 261 | if [ ! -d "${1}" ] 262 | then 263 | echo "stats(${*}) ${1} is not an existing appdir" 264 | exit 77 265 | elif [ $# -lt 3 ] 266 | then 267 | echo "stats($*) not enough args" 268 | exit 9 269 | elif [ $# -eq 3 ] 270 | then 271 | case "${2}" in 272 | recv) 273 | stats "${@}" 'Received [0-9]* bytes from ' 274 | ;; 275 | send) 276 | stats "${@}" 'Transmitting chunk of [0-9]* bytes to ' 277 | ;; 278 | *) 279 | echo "stats(${*}) - ${2} is wrong" 280 | exit 8 281 | ;; 282 | esac 283 | return 284 | else 285 | ( 286 | set -e 287 | cd "${1}" >/dev/null 288 | if [ ! -f myceli.log ] 289 | then 290 | echo myceli.log missing in `pwd` 291 | exit 9 292 | fi 293 | radio=`grep radio config.toml | cut -d '"' -f 2` 294 | export out="${2}.${3}" 295 | case "${3}" in 296 | packets) 297 | grep "${4}.*${radio}" myceli.log | wc -l > "${out}" 298 | ;; 299 | bytes) 300 | echo -n 0 > "${out}" 301 | if grep -q "${4}.*${radio}" myceli.log 302 | then 303 | grep "${4}.*${radio}" myceli.log | sed 's/^.*\ \([0-9]*\) bytes.*$/\1/' | while read numbyt 304 | do 305 | if grep -q '[1-9]' "${out}" 306 | then 307 | total=$(( `cat "${out}"` + numbyt )) 308 | echo -n ${total} > "${out}" 309 | else 310 | echo -n "${numbyt}" > "${out}" 311 | fi 312 | done 313 | fi 314 | ;; 315 | *) 316 | echo "stats(${*}) - ${3} is wrong" 317 | exit 7 318 | ;; 319 | esac 320 | cat "${out}" 321 | ) 322 | fi 323 | } 324 | 325 | -------------------------------------------------------------------------------- /docs/myceli-basic-setup.md: -------------------------------------------------------------------------------- 1 | # Myceli Basic Setup and Usage Guide 2 | 3 | This document covers the steps required to get a `myceli` instance up and running on both a raspbery-pi and local computer, and to begin transferring data between the two instances. 4 | 5 | ## Dependencies 6 | 7 | These system dependencies are required to build: 8 | - Rust v1.63 9 | - [Protobuf compiler](https://github.com/protocolbuffers/protobuf#protocol-compiler-installation): Download it from the [Protobuf Releases page](https://github.com/protocolbuffers/protobuf/releases) 10 | - Docker 11 | 12 | ## Prerequisites 13 | 14 | This guide assumes you have already followed the steps in the [`Setup Local Environment Guide`](setup-local-environment.md), if you haven't then please work through that first. 15 | 16 | Install `cross` tool for the cross-compiling environment: 17 | 18 | $ cargo install cross --git https://github.com/cross-rs/cross 19 | 20 | Make sure `Docker` is installed and running. 21 | 22 | ## Build Myceli 23 | 24 | The first step in using `myceli` to transfer data is building it for the raspberry-pi and local computer. 25 | 26 | ### Building for the raspberry-pi 27 | 28 | Navigate to the root `space` directory and run the following build command: 29 | 30 | $ CROSS_CONFIG=Cross.toml cross build --bin myceli --target armv7-unknown-linux-gnueabihf 31 | 32 | This will kick off the cross-compiling process for the `myceli` project. After it has completed, you will find the finished binary at `space/target/armv7-unknown-linux-gnueabihf/debug/myceli`. This binary can now be transferred to the raspberry-pi for usage. A typical way to transfer this binary is with `scpp`, like so: 33 | 34 | $ scp target/armv7-unknown-linux-gnueabihf/debug/myceli pi@pi-address:/home/pi/ 35 | 36 | ### Building for the local computer 37 | 38 | Navigate to the root `space` directory and run the following build command: 39 | 40 | $ cargo build --bin myceli 41 | 42 | This will kick off the build process for the `myceli` binary. After it has completed, you will find the finished binary at `space/target/debug/myceli`. 43 | 44 | ## Running Myceli 45 | 46 | After `myceli` has been built for the appropriate environments it needs to be run with the correct configuration info. 47 | 48 | ### Running on the raspberry-pi 49 | 50 | Use ssh to access the raspberry-pi and navigate to the `/home/pi` directory. Start a `myceli` instance with the following command: 51 | 52 | $ ./myceli 53 | 54 | This command assumes that the pi currently has a radio service running which is downlinking to the address `127.0.0.1:8001`, as specified in the local environment setup guide. 55 | 56 | A log message should appear indicating that `myceli` is up and listening: 57 | 58 | $ INFO myceli::listener: Listening for messages on 127.0.0.1:8001 59 | 60 | ### Running on the local computer 61 | 62 | Navigate to the `space/myceli` directory and run the following command: 63 | 64 | $ cargo run 65 | 66 | This command assumes that the local computer has a radio service running which is downlinking to the address `127.0.0.1:8001`, as specified in the local environment setup guide. 67 | 68 | A log message should appear indicating that `myceli` is up and listening: 69 | 70 | $ INFO myceli::listener: Listening for messages on 127.0.0.1:8001 71 | 72 | ## Configuring Myceli 73 | 74 | `myceli` has a few configuration options which ship with default values, or can be tuned to fit system requirements. 75 | 76 | Current configuration values and defaults are: 77 | - `listen_address` - The network address `myceli` will listen on for incoming messages. Defaults to `127.0.0.1:8001`. 78 | - `retry_timeout_duration` - Timeout before `myceli` will retry a dag transfer, measured in milliseconds. The default value is 120_00 or two minutes. 79 | - `storage_path` - Directory path for `myceli` to use for storage. If this directory does not exist it will be created. Defaults to `storage/` in the process working directory. 80 | - `mtu` - The MTU (in bytes) used to chunk up messages into UDP packets. This defaults to `512`. 81 | - `window_size` - DAG transfers are broken up into windows of blocks. This value controls the number of blocks in a window. This defaults to `5` blocks in a window. 82 | - `block_size` - The size (in bytes) of blocks that a file should be broken up into when importing. This defaults to 3kB or 3072. 83 | - `chunk_transmit_throttle` - If set, this will cause the UDP transport to throttle or delay by the specified number of milliseconds between chunk transmissions. Defaults to none. 84 | - `radio_address` - The network address of the radio that myceli should respond to by default, if not set then myceli will respond to the sending address (or address set in relevant request). 85 | 86 | These configuration values can be set via a TOML config file which is passed as an argument when running `myceli`. 87 | 88 | Here is an example configuration file: 89 | 90 | listen_address="127.0.0.1:9011" 91 | retry_timeout_duration=360_000 92 | storage_path="myceli_storage" 93 | 94 | If this configuration is saved to "myceli.toml", then we would run `myceli myceli.toml` to use the config file. 95 | 96 | ## Interacting with Myceli 97 | 98 | Now that `myceli` has been built and is running on both the raspberry-pi and local computer, commands may be sent to the instances to control them. 99 | 100 | Navigate to root `space` dir and run `cargo build --bin controller` to build the tool we'll use for interacting with `myceli`. After the `controller` is built we'll walk through some basic commands. 101 | 102 | ### Importing a file 103 | 104 | One of the fundamental actions `myceli` can take is importing a file into it's internal IPFS store. Navigate to root `space` dir and run the following command to import a local file: 105 | 106 | $ cargo run --bin controller -- -l 127.0.0.1:8001 import-file Cargo.toml 107 | 108 | This will send the `ImportFile` command to the local `myceli` instance listening at `127.0.0.1:8001` with the local `Cargo.toml` as the file to import. In this case we'll use the `-l` flag to listen for a response, as `myceli` will respond with the root CID if the file is correctly imported. Here is what the output may look like for a successful file import: 109 | 110 | Transmitting: {"ApplicationAPI":{"ImportFile":{"path":"Cargo.toml"}}} 111 | ApplicationAPI(FileImported { path: "Cargo.toml", cid: "bafybeicwxyav7jde73wb5svahp53qi5okq2p4bguyflfw6hsbmwbbl4bw4" }) 112 | 113 | ### Transmitting a dag 114 | 115 | Once a file has been imported, and the root CID is known, it is possible to ask the `myceli` instance holding that file in storage to transmit it to another `myceli` instance. In this case we'll transmit from the local computer to the raspberry-pi. 116 | 117 | On the local computer, in the root `space` directory, run the following command: 118 | 119 | $ cargo run --bin controller -- 127.0.0.1:8001 transmit-dag [root-cid-here] 127.0.0.1:8002 5 120 | 121 | This will send the `TransmitDag` command to the `myceli` instance listening on `127.0.0.1:8001`, which will ask it to transmit the blocks associated with the specified root CID to `127.0.0.1:8002` with `5` specified as the number of retries. After sending this command you should see several `Transmitting block ...` messages from the local computer's `myceli`, and several `Received block ...` messages from the raspberry-pi's `myceli`. 122 | 123 | ### Validating a dag 124 | 125 | After a dag has been transmitted, it must be verified that it is complete and valid at the destination. 126 | 127 | To verify the status of the dag on the raspberry-pi, run the following `controller` command: 128 | 129 | $ cargo run --bin controller -- 127.0.0.1:8002 -l validate-dag [root-cid-here] 130 | 131 | This will send the `ValidateDag` command to the radio listening at `127.0.0.1:8002`, which will then send it to the `myceli` instance on the raspberry-pi. In this case, the response will appear in the logs for the local computer's `myceli` instance. Check over there for `ValidateDagResponse` and the `result` string. If it says `Dag is valid`, then we know the transfer was complete and valid. 132 | 133 | 134 | ### Exporting a dag 135 | 136 | Once a dag has been transmitted and validated, it can be exported as a file on the receiving system. 137 | 138 | To export the received dag on the raspberry-pi, run the following app-api-cli command: 139 | 140 | $ cargo run -- 127.0.0.1:8002 export-dag [root-cid-here] [/file/system/path] 141 | 142 | This will send the `ExportDag` command to the radio listening at `127.0.0.1:8002`, which will send it to the `myceli` instance on the rasberry-pi. This command includes the specified root cid and path to export to. After the command has been received and executed, you should find a file at the specified path containing the dag data. 143 | 144 | --------------------------------------------------------------------------------