├── rust-toolchain
├── .dockerignore
├── archived
├── car-utility
│ ├── rust-toolchain
│ ├── src
│ │ ├── lib.rs
│ │ ├── main.rs
│ │ ├── unpack.rs
│ │ ├── pack.rs
│ │ └── run.rs
│ ├── Cross.toml
│ ├── Cargo.toml
│ └── README.md
└── cpp-transmit-example
│ ├── src
│ ├── api.hpp
│ ├── lib.rs
│ └── main.cpp
│ ├── Cargo.toml
│ └── Makefile
├── config
├── src
│ ├── lib.rs
│ └── config.rs
└── Cargo.toml
├── local-storage
├── Cross.toml
├── src
│ ├── error.rs
│ ├── lib.rs
│ ├── util.rs
│ ├── provider.rs
│ └── null_provider.rs
└── Cargo.toml
├── myceli
├── build.rs
├── src
│ ├── lib.rs
│ ├── version_info.rs
│ └── main.rs
├── Cross.toml
├── README.md
├── Cargo.toml
└── tests
│ ├── utils
│ └── mod.rs
│ └── listener_test.rs
├── .gitignore
├── smalog
├── Cargo.toml
└── src
│ └── lib.rs
├── .github
├── dependabot.yml
└── workflows
│ ├── stale.yml
│ ├── docker-test.yml
│ ├── generated-pr.yml
│ ├── linting.yaml
│ ├── unit-testing.yaml
│ └── create-release.yaml
├── .idea
├── vcs.xml
├── .gitignore
├── modules.xml
└── space.iml
├── messages
├── src
│ ├── err.rs
│ ├── lib.rs
│ ├── protocol.rs
│ ├── sync.rs
│ ├── api.rs
│ ├── cid_list.rs
│ └── message.rs
└── Cargo.toml
├── ipfs-unixfs
├── build.rs
├── src
│ ├── unixfs.proto
│ ├── merkledag.proto
│ ├── lib.rs
│ ├── chunker.rs
│ ├── chunker
│ │ └── fixed.rs
│ └── types.rs
└── Cargo.toml
├── meetings
├── 2023-02-13.md
├── 2023-02-08.md
├── 2023-02-21.md
├── 2023-01-26.md
└── 2022-11-14.md
├── Cross.toml
├── testing
├── udp_forward
│ ├── src
│ │ ├── err.rs
│ │ └── main.rs
│ └── Cargo.toml
├── local_test
│ ├── all.sh
│ ├── compress.case.sh
│ ├── setup.env
│ ├── kubo.case.sh
│ ├── timeout.killer.sh
│ ├── original.case.sh
│ ├── watcher.case.sh
│ ├── netperf.case.sh
│ └── funcs.env
└── testing-plan.md
├── cross-x86-linux.Dockerfile
├── local-dev-environment
├── desktop
│ ├── rfm69-service
│ │ ├── Cargo.toml
│ │ └── src
│ │ │ └── main.rs
│ └── rfm69-driver
│ │ └── driver
│ │ └── driver.ino
└── raspberry-pi
│ └── RFM69HCW
│ └── service.py
├── cross-aarch64-linux.Dockerfile
├── transports
├── src
│ ├── lib.rs
│ ├── error.rs
│ ├── chunking.rs
│ └── udp_transport.rs
└── Cargo.toml
├── cross-armv7-linux.Dockerfile
├── controller
├── Cargo.toml
└── src
│ └── main.rs
├── docs
├── charts
│ ├── netover.md
│ ├── sync_specialfailure.md
│ ├── filedag.md
│ ├── ship.md
│ └── sync.md
├── myceli-docker.md
├── hyphae.md
├── poc-car-transmission.md
├── setup-local-environment.md
└── myceli-basic-setup.md
├── myceli.Dockerfile
├── watcher
├── Cargo.toml
└── src
│ ├── main.rs
│ └── handler.rs
├── LICENSE
├── Cargo.toml
└── DESIGN.md
/rust-toolchain:
--------------------------------------------------------------------------------
1 | 1.70.0
2 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | target/
2 | .git/
3 | *.db
--------------------------------------------------------------------------------
/archived/car-utility/rust-toolchain:
--------------------------------------------------------------------------------
1 | 1.63
2 |
--------------------------------------------------------------------------------
/config/src/lib.rs:
--------------------------------------------------------------------------------
1 | mod config;
2 |
3 | pub type Config = config::Config;
4 |
--------------------------------------------------------------------------------
/archived/car-utility/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod pack;
2 | pub mod run;
3 | pub mod unpack;
4 |
--------------------------------------------------------------------------------
/local-storage/Cross.toml:
--------------------------------------------------------------------------------
1 | [target.armv7-unknown-linux-gnueabihf]
2 | dockerfile = "../cross.Dockerfile"
--------------------------------------------------------------------------------
/archived/car-utility/Cross.toml:
--------------------------------------------------------------------------------
1 | [target.armv7-unknown-linux-gnueabihf]
2 | dockerfile = "../cross.Dockerfile"
--------------------------------------------------------------------------------
/myceli/build.rs:
--------------------------------------------------------------------------------
1 | fn main() {
2 | built::write_built_file().expect("Failed to acquire build-time information");
3 | }
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | .DS_Store
3 | .vscode/
4 | build/
5 | storage.db
6 | sat.*/
7 | gnd/
8 | ctl
9 | log
10 | *.pid
11 |
--------------------------------------------------------------------------------
/archived/cpp-transmit-example/src/api.hpp:
--------------------------------------------------------------------------------
1 |
2 | extern "C" {
3 |
4 | int generate_transmit_msg(unsigned char* msg, char path[], char addr[]);
5 |
6 | }
--------------------------------------------------------------------------------
/smalog/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "smalog"
3 | version = "0.0.1"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | log.workspace = true
8 | chrono = "0.4.31"
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 |
--------------------------------------------------------------------------------
/myceli/src/lib.rs:
--------------------------------------------------------------------------------
1 | mod handlers;
2 | pub mod listener;
3 | #[cfg(feature = "proto_ship")]
4 | pub mod shipper;
5 | #[cfg(feature = "proto_sync")]
6 | mod sync;
7 | mod version_info;
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/messages/src/err.rs:
--------------------------------------------------------------------------------
1 | use derive_error::Error;
2 |
3 | #[derive(Debug, Error)]
4 | pub enum Error {
5 | Cid(cid::Error),
6 | EmptyCidList,
7 | }
8 |
9 | pub type Result = std::result::Result;
10 |
--------------------------------------------------------------------------------
/archived/car-utility/src/main.rs:
--------------------------------------------------------------------------------
1 | use anyhow::Result;
2 | use clap::Parser;
3 |
4 | #[tokio::main(flavor = "multi_thread")]
5 | async fn main() -> Result<()> {
6 | let cli = car_utility::run::Cli::parse();
7 | cli.run().await
8 | }
9 |
--------------------------------------------------------------------------------
/ipfs-unixfs/build.rs:
--------------------------------------------------------------------------------
1 | fn main() {
2 | prost_build::Config::new()
3 | .bytes([".unixfs_pb.Data", ".merkledag_pb.PBNode.Data"])
4 | .compile_protos(&["src/unixfs.proto", "src/merkledag.proto"], &["src"])
5 | .unwrap();
6 | }
7 |
--------------------------------------------------------------------------------
/meetings/2023-02-13.md:
--------------------------------------------------------------------------------
1 | # Meeting on 2/13/2023 with Little Bear Labs, Lockheed, and Protocol Labs
2 |
3 | ## Notes
4 |
5 | * Attendees: Ryan, Jon, Pete, Anshuman, David
6 | * Talked over Jon's system diagrams
7 | * Talked over Anshuman's control flow diagrams
--------------------------------------------------------------------------------
/archived/cpp-transmit-example/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "cpp-transmit-example"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | messages = { path = "../messages" }
8 |
9 | [lib]
10 | name = "transmit"
11 | crate-type = ["staticlib"]
--------------------------------------------------------------------------------
/Cross.toml:
--------------------------------------------------------------------------------
1 | [target.armv7-unknown-linux-gnueabihf]
2 | dockerfile = "cross-armv7-linux.Dockerfile"
3 |
4 | [target.aarch64-unknown-linux-gnu]
5 | dockerfile = "cross-aarch64-linux.Dockerfile"
6 |
7 | [target.x86_64-unknown-linux-gnu]
8 | dockerfile = "cross-x86-linux.Dockerfile"
--------------------------------------------------------------------------------
/local-storage/src/error.rs:
--------------------------------------------------------------------------------
1 | use thiserror::Error;
2 |
3 | #[derive(Debug, Error)]
4 | pub enum StorageError {
5 | #[error("Block not found for CID {0}: {1}")]
6 | BlockNotFound(String, String),
7 | #[error("DAG incomplete {0}")]
8 | DagIncomplete(String),
9 | }
10 |
--------------------------------------------------------------------------------
/myceli/Cross.toml:
--------------------------------------------------------------------------------
1 | [target.armv7-unknown-linux-gnueabihf]
2 | dockerfile = "../cross-armv7-linux.Dockerfile"
3 |
4 | [target.aarch64-unknown-linux-gnu]
5 | dockerfile = "../cross-aarch64-linux.Dockerfile"
6 |
7 | [target.x86_64-unknown-linux-gnu]
8 | dockerfile = "../cross-x86-linux.Dockerfile"
--------------------------------------------------------------------------------
/testing/udp_forward/src/err.rs:
--------------------------------------------------------------------------------
1 | use derive_error::Error;
2 | use std::io;
3 | use std::num::ParseIntError;
4 |
5 | #[derive(Debug, Error)]
6 | pub enum Error {
7 | Io(io::Error),
8 | ParseInt(ParseIntError),
9 | }
10 |
11 | pub type Result = std::result::Result;
12 |
--------------------------------------------------------------------------------
/meetings/2023-02-08.md:
--------------------------------------------------------------------------------
1 | # Meeting on 2/8/2023 with Little Bear Labs and Protocol Labs
2 |
3 | ## Notes
4 |
5 | * Attendees: Ryan and Anshuman
6 | * Talked over sequence diagrams for ground-to-space and space-to-ground transmissions
7 | * Revised several API calls to operate on DAGs instead of CIDs
8 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/testing/udp_forward/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "udp_forward"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 | derive-error.workspace = true
10 | rand.workspace = true
11 | chrono = "0.4.31"
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Close Stale Issues
2 |
3 | on:
4 | schedule:
5 | - cron: '0 0 * * *'
6 | workflow_dispatch:
7 |
8 | permissions:
9 | issues: write
10 | pull-requests: write
11 |
12 | jobs:
13 | stale:
14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1
15 |
--------------------------------------------------------------------------------
/.github/workflows/docker-test.yml:
--------------------------------------------------------------------------------
1 | name: Docker Testing
2 | on: [ pull_request ]
3 | jobs:
4 | test_docker_build:
5 | name: Test Docker build
6 | runs-on: ubuntu-latest
7 | steps:
8 | - uses: actions/checkout@v3
9 | - name: Test build of myceli docker
10 | run: docker build -f myceli.Dockerfile .
11 |
--------------------------------------------------------------------------------
/.github/workflows/generated-pr.yml:
--------------------------------------------------------------------------------
1 | name: Close Generated PRs
2 |
3 | on:
4 | schedule:
5 | - cron: '0 0 * * *'
6 | workflow_dispatch:
7 |
8 | permissions:
9 | issues: write
10 | pull-requests: write
11 |
12 | jobs:
13 | stale:
14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1
15 |
--------------------------------------------------------------------------------
/archived/cpp-transmit-example/Makefile:
--------------------------------------------------------------------------------
1 | all: ../target/debug/libtransmit.a
2 | mkdir -p build
3 | g++ src/main.cpp -L ../target/debug/ -ltransmit -o build/transmit
4 |
5 | run:
6 | LD_LIBRARY_PATH=../target/debug/ ./build/transmit
7 |
8 | ../target/debug/libtransmit.a: src/lib.rs Cargo.toml
9 | cargo build
10 |
11 | clean:
12 | rm -rf build && cargo clean
--------------------------------------------------------------------------------
/config/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "config"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | repository.workspace = true
7 | rust-version.workspace = true
8 |
9 | [dependencies]
10 | anyhow.workspace = true
11 | dirs = "5.0.1"
12 | figment.workspace = true
13 | log.workspace = true
14 | serde.workspace = true
15 | serde_derive.workspace = true
--------------------------------------------------------------------------------
/cross-x86-linux.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/cross-rs/x86_64-unknown-linux-gnu:latest
2 |
3 | RUN apt-get update && \
4 | apt-get install -y unzip libssl-dev
5 |
6 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip"
7 | RUN unzip -q protoc.zip -d /usr/local
8 | RUN chmod a+x /usr/local/bin/protoc
9 | ENV PROTOC=/usr/local/bin/protoc
--------------------------------------------------------------------------------
/messages/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub(crate) mod api;
2 | pub mod cid_list;
3 | mod err;
4 | pub(crate) mod message;
5 |
6 | #[cfg(feature = "proto_ship")]
7 | pub(crate) mod protocol;
8 | mod sync;
9 |
10 | pub use api::{ApplicationAPI, DagInfo};
11 | pub use message::Message;
12 | #[cfg(feature = "proto_ship")]
13 | pub use protocol::{DataProtocol, TransmissionBlock};
14 | pub use sync::{SyncMessage, PUSH_OVERHEAD};
15 |
--------------------------------------------------------------------------------
/local-dev-environment/desktop/rfm69-service/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "rfm69-service"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 | clap.workspace = true
10 | log.workspace = true
11 | tokio.workspace = true
12 | tokio-serial.workspace = true
13 | env_logger.workspace = true
14 |
--------------------------------------------------------------------------------
/cross-aarch64-linux.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5
2 |
3 | RUN dpkg --add-architecture arm64 && apt-get update && \
4 | apt-get install -y unzip libssl-dev:arm64
5 |
6 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip"
7 | RUN unzip -q protoc.zip -d /usr/local
8 | RUN chmod a+x /usr/local/bin/protoc
9 | ENV PROTOC=/usr/local/bin/protoc
--------------------------------------------------------------------------------
/transports/src/lib.rs:
--------------------------------------------------------------------------------
1 | mod chunking;
2 | mod error;
3 | mod udp_chunking;
4 | mod udp_transport;
5 |
6 | use messages::Message;
7 |
8 | pub const MAX_MTU: u16 = 1024 * 3;
9 | pub use error::{Result, TransportError};
10 |
11 | pub trait Transport: Send + Sync {
12 | fn receive(&self) -> Result<(Message, String)>;
13 | fn send(&self, msg: Message, addr: &str) -> Result<()>;
14 | }
15 |
16 | pub use udp_transport::UdpTransport;
17 |
--------------------------------------------------------------------------------
/cross-armv7-linux.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/cross-rs/armv7-unknown-linux-gnueabihf:0.2.5
2 |
3 | RUN dpkg --add-architecture armhf && apt-get update && \
4 | apt-get install -y unzip openssl libssl-dev:armhf
5 |
6 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip"
7 | RUN unzip -q protoc.zip -d /usr/local
8 | RUN chmod a+x /usr/local/bin/protoc
9 | ENV PROTOC=/usr/local/bin/protoc
--------------------------------------------------------------------------------
/meetings/2023-02-21.md:
--------------------------------------------------------------------------------
1 | # Meeting on 2/21/2023 with Little Bear Labs, Lockheed, and Protocol Labs
2 |
3 | ## Notes
4 |
5 | * Attendees: Ryan, Jon, Pete, Dietrich, David
6 | * We decided that a dedicated CLI tool for forming control API messages is sufficient for manual operators.
7 | * Ideal test payloads will be satellite imagery files in GeoTIFF format. Testing with several of these images across a variety of sizes will be necessary before any on-mission usage.
--------------------------------------------------------------------------------
/local-storage/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod block;
2 | pub mod error;
3 | pub mod provider;
4 | pub mod storage;
5 | mod util;
6 |
7 | mod null_provider;
8 |
9 | #[cfg(feature = "files")]
10 | mod file_provider;
11 | #[cfg(feature = "sqlite")]
12 | pub mod sql_provider;
13 |
14 | #[cfg(all(not(test), feature = "sqlite", feature = "files"))]
15 | compile_error! {"Outside of unit tests there's not a good reason to compile with multiple StorageProvider implementations."}
16 |
--------------------------------------------------------------------------------
/controller/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "controller"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | repository.workspace = true
7 | rust-version.workspace = true
8 |
9 | [dependencies]
10 | anyhow.workspace = true
11 | clap.workspace = true
12 | config.workspace = true
13 | env_logger.workspace = true
14 | log.workspace = true
15 | messages.workspace = true
16 | serde_json.workspace = true
17 | tokio.workspace = true
18 | transports.workspace = true
--------------------------------------------------------------------------------
/docs/charts/netover.md:
--------------------------------------------------------------------------------
1 | ```mermaid
2 | flowchart TD
3 | subgraph Vehicle
4 | A["Application (e.g. Watcher)"] -- ApplicationAPI/UDP --> B[Myceli]
5 | B <-- CommsAPI/UDP --> C[Comms]
6 | end
7 |
8 | subgraph Radio
9 | Z[Data Transfer Protocol]
10 | end
11 |
12 | subgraph Ground
13 | F["Service (e.g. Controller)"] -- ApplicationAPI/UDP --> E[Myceli]
14 | E <-- CommsAPI/UDP --> G[Comms]
15 | end
16 |
17 | C <--> Z
18 | G <--> Z
19 | ```
20 |
--------------------------------------------------------------------------------
/ipfs-unixfs/src/unixfs.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package unixfs_pb;
4 |
5 | message Data {
6 | enum DataType {
7 | Raw = 0;
8 | Directory = 1;
9 | File = 2;
10 | Metadata = 3;
11 | Symlink = 4;
12 | HAMTShard = 5;
13 | }
14 |
15 | DataType Type = 1;
16 | optional bytes Data = 2;
17 | optional uint64 filesize = 3;
18 | repeated uint64 blocksizes = 4;
19 |
20 | optional uint64 hashType = 5;
21 | optional uint64 fanout = 6;
22 | }
23 |
24 | message Metadata {
25 | optional string MimeType = 1;
26 | }
27 |
--------------------------------------------------------------------------------
/ipfs-unixfs/src/merkledag.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package merkledag_pb;
4 |
5 | // An IPFS MerkleDAG Link
6 | message PBLink {
7 | // multihash of the target object
8 | optional bytes Hash = 1;
9 |
10 | // utf string name. should be unique per object
11 | optional string Name = 2;
12 |
13 | // cumulative size of target object
14 | optional uint64 Tsize = 3;
15 | }
16 |
17 | // An IPFS MerkleDAG Node
18 | message PBNode {
19 |
20 | // refs to other objects
21 | repeated PBLink Links = 2;
22 |
23 | // opaque user data
24 | optional bytes Data = 1;
25 | }
26 |
--------------------------------------------------------------------------------
/myceli/src/version_info.rs:
--------------------------------------------------------------------------------
1 | use messages::ApplicationAPI;
2 | // The file has been placed there by the build script.
3 | include!(concat!(env!("OUT_DIR"), "/built.rs"));
4 |
5 | pub fn get(remote_label: Option) -> ApplicationAPI {
6 | ApplicationAPI::Version {
7 | version: env!("CARGO_PKG_VERSION").to_string(),
8 | rust: env!("CARGO_PKG_RUST_VERSION").to_string(),
9 | target: TARGET.to_owned(),
10 | profile: PROFILE.to_owned(),
11 | features: FEATURES.iter().map(|s| s.to_string()).collect(),
12 | remote_label,
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/testing/local_test/all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | cd `dirname "${0}"`
3 | for c in *.case.sh
4 | do
5 | echo -e "\n\n\t ### \t START \t ### \t ### \t Test Suite: \t ${c%.case.sh} \t ### \t###\n"
6 | if "./${c}"
7 | then
8 | echo -e "\n\t ### \t PASSED \t ### \t ### \t Test Suite: \t ${c%.case.sh} \t ### \t###\n\n"
9 | else
10 | echo -e "\n\t ### \t FAILED \t ### \t ### \t Test Suite: \t ${c%.case.sh} \t ### \t###\n\n"
11 | exit 9
12 | fi
13 | done
14 |
15 | echo -e '\n\n\t###\t###\t PASSED \t###\t###\n'
16 |
17 | source funcs.env
18 | kill_all
19 |
20 | echo -e '\n\t###\t###\t DONE \t###\t###\n\n'
21 |
--------------------------------------------------------------------------------
/meetings/2023-01-26.md:
--------------------------------------------------------------------------------
1 | # Meeting on 1/26/2023 with Little Bear Labs and Lockheed
2 |
3 | ## Notes
4 |
5 | * Attendees: Ryan, Dietrich, Jon, Pete, Anshuman, Mark, David
6 | * Went over the Application API
7 | * Ryan demo’d sending/receiving control message + resultant file transmission
8 | * Jon suggested implementing byte estimation into the API (ground knows this, but satellite may not)
9 | * Request CID, Request Available CIDs API implementations requested by Dietrich
10 | * Advertise New CIDs (with description), Delete CID API, Missing Child CIDs implementations requested by Jon
11 |
12 | ## Follow Up
13 |
14 | Anshuman to writeup use cases with diagrams.
--------------------------------------------------------------------------------
/messages/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "messages"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | repository.workspace = true
7 | rust-version.workspace = true
8 |
9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
10 |
11 | [dependencies]
12 | cid.workspace = true
13 | clap.workspace = true
14 | derive-error.workspace = true
15 | ipfs-unixfs.workspace = true
16 | log.workspace = true
17 | parity-scale-codec.workspace = true
18 | parity-scale-codec-derive.workspace = true
19 | serde.workspace = true
20 |
21 | [features]
22 | proto_ship = []
23 | proto_sync = []
24 |
--------------------------------------------------------------------------------
/.github/workflows/linting.yaml:
--------------------------------------------------------------------------------
1 | name: CI Linting
2 | on: [ pull_request ]
3 | jobs:
4 | clippy_check:
5 | name: Run clippy check
6 | runs-on: ubuntu-latest
7 | steps:
8 | - name: Install Protoc
9 | uses: arduino/setup-protoc@v2
10 | - uses: actions/checkout@v3
11 | - id: rust
12 | run: echo "toolchain=$(cat rust-toolchain)" >> $GITHUB_OUTPUT
13 | shell: bash
14 | - uses: dtolnay/rust-toolchain@master
15 | with:
16 | toolchain: ${{ steps.rust.outputs.toolchain }}
17 | components: clippy
18 | - name: Run clippy
19 | run: cargo clippy --all --features big
20 | shell: bash
21 |
--------------------------------------------------------------------------------
/testing/local_test/compress.case.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | source `dirname "${0}"`/setup.env
3 |
4 | for m in sat.*/myceli
5 | do
6 | xz -9 --keep --extreme "${m}"
7 | gzip --keep --best "${m}"
8 | done
9 | max_size=1000000 # 1MB (not MiB) in B
10 |
11 | for format in {g,x}z
12 | do
13 | for variant in sat.{all,sync,ship}/myceli.
14 | do
15 | fil="${variant}${format}"
16 | ls -lrth "${fil}"
17 | if [ `stat --format=%s "${fil}"` -gt ${max_size} ]
18 | then
19 | echo -e "\n\t###\t PROBLEM: \t###\t ${fil} is over ${max_size} B \t###\n"
20 | exit 99
21 | else
22 | export max_size=$((max_size - 40000))
23 | fi
24 | done
25 | done
26 |
--------------------------------------------------------------------------------
/archived/car-utility/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "car-utility"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 | anyhow = "1"
10 | iroh-resolver = { git = "https://github.com/n0-computer/iroh", tag = "v0.1.0" }
11 | iroh-car = { git = "https://github.com/n0-computer/iroh", tag = "v0.1.0" }
12 | tokio = { version = "1", features = ["fs", "io-util"] }
13 | cid = "0.8"
14 | futures = "0.3.21"
15 | clap = { version = "4.0.15", features = ["derive"] }
16 |
17 | [patch.crates-io]
18 | libp2p = { git = "https://github.com/dignifiedquire/rust-libp2p", branch = "iroh-0-50" }
--------------------------------------------------------------------------------
/myceli.Dockerfile:
--------------------------------------------------------------------------------
1 | # Base build stage
2 | FROM rust:1.67 as builder
3 | # Install protobuf compiler
4 | RUN curl -Lo protoc.zip "https://github.com/protocolbuffers/protobuf/releases/download/v22.2/protoc-22.2-linux-x86_64.zip"
5 | RUN unzip protoc.zip -d protoc/
6 | RUN cp -a protoc/* /usr/local
7 |
8 | # Copy over and build myceli
9 | COPY . .
10 | RUN cargo build --bin myceli --features big
11 | RUN cp ./target/debug/myceli /usr/bin/myceli
12 |
13 | # Extras stage
14 | FROM debian:bullseye-slim
15 | LABEL org.opencontainers.image.source="https://github.com/ipfs-shipyard/space"
16 | COPY --from=builder /usr/bin/myceli /usr/bin/myceli
17 | COPY --from=builder Cargo.toml /usr/local/Cargo.toml
18 | ENTRYPOINT myceli $CONFIG_PATH
19 |
--------------------------------------------------------------------------------
/watcher/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "watcher"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 | anyhow.workspace = true
10 | config.workspace = true
11 | env_logger = { workspace = true, optional = true }
12 | log.workspace = true
13 | messages.workspace = true
14 | notify = "6.0.1"
15 | smalog = { workspace = true, optional = true }
16 | transports.workspace = true
17 |
18 | [features]
19 | big = ["good_log", "proto_sync", "proto_ship"]
20 | small = ["small_log"]
21 | good_log = ["dep:env_logger"]
22 | small_log = ["dep:smalog"]
23 | proto_sync = ["messages/proto_sync"]
24 | proto_ship = ["messages/proto_ship"]
25 |
--------------------------------------------------------------------------------
/archived/car-utility/src/unpack.rs:
--------------------------------------------------------------------------------
1 | use anyhow::Result;
2 | use futures::TryStreamExt;
3 | use iroh_car::CarReader;
4 | use std::path::PathBuf;
5 | use tokio::fs::File;
6 | use tokio::io::AsyncWriteExt;
7 | use tokio::io::BufReader;
8 |
9 | pub async fn unpack(path: &PathBuf, output: &PathBuf) -> Result<()> {
10 | let file = File::open(path).await?;
11 | let buf_reader = BufReader::new(file);
12 |
13 | let car_reader = CarReader::new(buf_reader).await?;
14 | let mut output_file = File::create(output).await?;
15 |
16 | let files: Vec<_> = car_reader.stream().try_collect().await.unwrap();
17 | for (_cid, data) in files {
18 | output_file.write_all(&data).await?;
19 | }
20 | output_file.flush().await?;
21 | Ok(())
22 | }
23 |
--------------------------------------------------------------------------------
/meetings/2022-11-14.md:
--------------------------------------------------------------------------------
1 | # Meeting on 11/14/2022 with Little Bear Labs and Lockheed
2 |
3 | ## Notes
4 |
5 | * 2 use cases: (1) Satellite to Ground, and (2) Satellite to Satellite
6 | * 3 core pieces: bus, payloads (crosslink control, antennae), mission processing (Linux)
7 | * Runtime is powered on for a small duration (on demand)
8 | * 3 tech parts: IPFS (trimmed down), content management (e.g. web server), network layer (most work needs to be done here)
9 | * Constraints: high packet loss rates, variable bandwidth
10 | * Sample protocol: CCSDS (Common Space Protocol) similar to IPV4
11 | * Could take the API directly to the bus if applicable
12 | * 128kbps bandwidth (decent amount is overhead)
13 |
14 | ## Follow Up
15 |
16 | We plan to have regular meetings (TBD). Non-mission data will be published in this repository.
--------------------------------------------------------------------------------
/.github/workflows/unit-testing.yaml:
--------------------------------------------------------------------------------
1 | name: CI Unit Testing
2 | on: [ pull_request ]
3 | jobs:
4 | build_and_test:
5 | name: Build and test rust
6 | runs-on: ubuntu-latest
7 | steps:
8 | - name: Install Protoc
9 | uses: arduino/setup-protoc@v2
10 | - uses: actions/checkout@v3
11 | - uses: dtolnay/rust-toolchain@stable
12 | - name: Build workspace
13 | run: cargo build --release --features big
14 | shell: bash
15 | - name: Test big
16 | run: cargo test --features big,proto_all
17 | shell: bash
18 | - name: Test small
19 | run: cargo test --features small,proto_all
20 | shell: bash
21 | - name: Test small ship
22 | run: cargo test --features small,proto_ship
23 | shell: bash
24 | - name: Test small sync
25 | run: cargo test --features small,proto_sync
26 | shell: bash
27 |
--------------------------------------------------------------------------------
/testing/local_test/setup.env:
--------------------------------------------------------------------------------
1 | if ! ( uname | grep -q Linux )
2 | then
3 | echo "This script only works on linux."
4 | exit 6
5 | fi
6 | export script_dir=`dirname "${0}"`
7 | export repo_dir=`git -C "${script_dir}" rev-parse --show-toplevel`
8 | cd "${repo_dir}"
9 |
10 | if [ "${o}" = '' ]
11 | then
12 | export o=`mktemp -d`
13 | fi
14 |
15 | source "./testing/local_test/funcs.env"
16 |
17 | kill_all
18 |
19 | for d in gnd sat.{all,sync,ship}
20 | do
21 | rm -r ${d} || true
22 | done
23 | mkdir -p sat.{all,ship,sync} gnd ctl
24 |
25 | ./testing/local_test/timeout.killer.sh
26 |
27 | configure 10000
28 |
29 | bld gnd myceli big release
30 | bld gnd watcher big release
31 | #bld gnd hyphae big release
32 | bld ctl controller big release
33 | for p in all sync ship
34 | do
35 | bld sat.${p} myceli small,proto_${p} small
36 | bld sat.${p} watcher small small
37 | done
38 |
39 | echo 'Setup finished'
40 |
--------------------------------------------------------------------------------
/transports/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "transports"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | repository.workspace = true
7 | rust-version.workspace = true
8 |
9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
10 |
11 | [dependencies]
12 | cid.workspace = true
13 | derive-error.workspace = true
14 | env_logger = { workspace = true, optional = true }
15 | log.workspace = true
16 | messages = { workspace = true, features = [] }
17 | parity-scale-codec.workspace = true
18 | parity-scale-codec-derive.workspace = true
19 | rand.workspace = true
20 | serde.workspace = true
21 | serde_derive.workspace = true
22 | smalog = { workspace = true, optional = true }
23 |
24 | [features]
25 | good_log = ["dep:env_logger"]
26 | small_log = ["dep:smalog"]
27 | proto_ship = ["messages/proto_ship"]
28 | proto_sync = ["messages/proto_sync"]
29 |
--------------------------------------------------------------------------------
/myceli/README.md:
--------------------------------------------------------------------------------
1 | # Overview
2 |
3 | The myceli application acts as the "node" in this IPFS-for-space project. The current design allows a myceli to act as a node either on a spacecraft or in a ground station. While myceli is running it can receive and respond to any API or data protocol messaging.
4 |
5 | ## Usage
6 |
7 | Start an instance:
8 |
9 | $ cargo run --bin myceli
10 |
11 | This command will start a `myceli` instance which is listening at `127.0.0.1:8001` and will respond to any valid messages received on that address.
12 |
13 | Next, send a command. The `controller` utility is a CLI tool used to generate and send messages to `myceli` instances. For example, we can ask the running instance which blocks it currently has available:
14 |
15 | $ cargo run --bin controller -- -l 127.0.0.1:8001 request-available-blocks
16 |
17 | This will send a `RequestAvailableBlocks` message to the instance listening at `127.0.0.1:8001` and display the response when it is received.
--------------------------------------------------------------------------------
/local-storage/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "local-storage"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | repository.workspace = true
7 | rust-version.workspace = true
8 |
9 | [dependencies]
10 | anyhow.workspace = true
11 | cid.workspace = true
12 | bytes.workspace = true
13 | env_logger = { workspace = true, optional = true }
14 | futures.workspace = true
15 | ipfs-unixfs.workspace = true
16 | log.workspace = true
17 | rusqlite = { workspace = true, optional = true }
18 | smalog = { workspace = true, optional = true }
19 | thiserror.workspace = true
20 | tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
21 | #tokio-util = { workspace = true, features = ["io-util"] }
22 |
23 | [features]
24 | big = ["sqlite", "good_log"]
25 | small = ["files", "small_log"]
26 |
27 | good_log = []
28 | small_log = []
29 | sqlite = ["dep:rusqlite"]
30 | files = []
31 |
32 | [dev-dependencies]
33 | assert_fs.workspace = true
34 | rand.workspace = true
35 |
--------------------------------------------------------------------------------
/transports/src/error.rs:
--------------------------------------------------------------------------------
1 | use derive_error::Error;
2 | use std::{
3 | fmt::{Display, Formatter},
4 | num::TryFromIntError,
5 | };
6 |
7 | #[derive(Error, Debug)]
8 | pub enum TransportError {
9 | Io(std::io::Error),
10 | Cid(cid::Error),
11 | AdHoc(AdHocError),
12 | Scale(parity_scale_codec::Error),
13 | TimedOut,
14 | IntegerValueOutOfBounds(TryFromIntError),
15 | }
16 |
17 | pub type Result = std::result::Result;
18 |
19 | pub fn adhoc(msg: &str) -> TransportError {
20 | TransportError::AdHoc(AdHocError {
21 | message: msg.to_owned(),
22 | })
23 | }
24 | pub fn adhoc_err(msg: &str) -> Result<()> {
25 | Err(adhoc(msg))
26 | }
27 |
28 | #[derive(Debug)]
29 | pub struct AdHocError {
30 | pub message: String,
31 | }
32 |
33 | impl Display for AdHocError {
34 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
35 | f.write_str(&self.message)
36 | }
37 | }
38 |
39 | impl std::error::Error for AdHocError {}
40 |
--------------------------------------------------------------------------------
/testing/local_test/kubo.case.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | source `dirname "${0}"`/setup.env
4 |
5 | for i in {0..99}
6 | do
7 | sleep ${i}
8 | if port_open 5001
9 | then
10 | break
11 | else
12 | ( ( ipfs daemon >"${o}/kubo.log" 2>&1 <&- & ) & ) &
13 | sleep $(( i + 9 ))
14 | fi
15 | done
16 |
17 | start_myceli gnd
18 |
19 | date > "${o}/known_good_path"
20 | echo 'Import a file.'
21 | controller 8765 import-file "${o}/known_good_path"
22 |
23 | echo -e '\n\n\t###\tStarting hyphae...\t###\n'
24 | start hyphae gnd hyphae.toml
25 | echo -e '\nNow waiting for sync to Kubo...\n'
26 | for i in {0..99}
27 | do
28 | export cid=`grep 'Received.response:.*FileImported' ctl/controller.log | tail -n 1 | cut -d '"' -f 4`
29 | if [ "${cid}" = '' ]
30 | then
31 | echo "CID not imported into myceli yet."
32 | elif timeout $(( 9 + i )) ipfs block get "${cid}"
33 | then
34 | break
35 | else
36 | echo "${cid} not yet in Kubo"
37 | fi
38 | done
39 | ipfs block get ${cid}
40 | ipfs dag get ${cid} | jq .
41 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 IPFS Shipyard
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/archived/cpp-transmit-example/src/lib.rs:
--------------------------------------------------------------------------------
1 | use messages::{ApplicationAPI, Message};
2 | use std::ffi::{c_char, c_int, c_uchar, CStr};
3 |
4 | /// # Safety
5 | ///
6 | /// The caller of this function needs to ensure that buffer, path, and addr are not null
7 | /// and that buffer has sufficient space for a message to be written into it.
8 | #[no_mangle]
9 | pub unsafe extern "C" fn generate_transmit_msg(
10 | buffer: *mut c_uchar,
11 | cid: *const c_char,
12 | addr: *const c_char,
13 | ) -> c_int {
14 | let cid_str = unsafe {
15 | assert!(!cid.is_null());
16 | CStr::from_ptr(cid)
17 | };
18 |
19 | let addr_str = unsafe {
20 | assert!(!addr.is_null());
21 | CStr::from_ptr(addr)
22 | };
23 |
24 | let msg = Message::ApplicationAPI(ApplicationAPI::TransmitDag {
25 | cid: cid_str.to_str().unwrap().to_owned(),
26 | target_addr: addr_str.to_str().unwrap().to_owned(),
27 | retries: 0,
28 | });
29 | let msg_bytes = msg.to_bytes();
30 | unsafe {
31 | std::slice::from_raw_parts_mut(buffer, msg_bytes.len()).copy_from_slice(&msg_bytes);
32 | }
33 | msg_bytes.len().try_into().unwrap()
34 | }
35 |
--------------------------------------------------------------------------------
/docs/charts/sync_specialfailure.md:
--------------------------------------------------------------------------------
1 | ```mermaid
2 | sequenceDiagram
3 | participant G as Ground
4 | participant V as Vehicle
5 | Note over G: Import File
6 | Note left of G: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
... 4 more (5 leaves in total)
7 | G --X V: "Push" Send CIDs to Expect (& File Name)
8 | Note right of V: Available CIDs:
(none)
Missing CIDs:
(none - the push never got here)
9 | G ->> V: Send Block (bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i)
10 | Note over V: Hash, store.
11 | Note over V: Parse as stem, passes - has 5 children.
12 | loop For each child CID
13 | Note over V: Neither available nor marked as missing, mark as missing.
14 | end
15 | Note right of V: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli
Missing CIDs:
... 4 CIDs remain ...
16 | V ->> G: Pull (5 CIDs mentioned in stem)
17 | loop Other CIDs in pull
18 | G ->> V: Send Blocks
19 | End
20 | ```
--------------------------------------------------------------------------------
/archived/car-utility/src/pack.rs:
--------------------------------------------------------------------------------
1 | use anyhow::Result;
2 | use cid::Cid;
3 | use futures::StreamExt;
4 | use iroh_car::{CarHeader, CarWriter};
5 | use iroh_resolver::unixfs_builder::{File, FileBuilder};
6 | use std::fs::File as FsFile;
7 |
8 | use std::io::Write;
9 | use std::path::PathBuf;
10 |
11 | pub async fn pack(path: &PathBuf, output: &PathBuf) -> Result<()> {
12 | let file: File = FileBuilder::new().path(path).build().await?;
13 |
14 | let _root: Option = None;
15 | let parts = { Box::pin(file.encode().await?) };
16 | tokio::pin!(parts);
17 |
18 | let mut cids = vec![];
19 | let mut datas = vec![];
20 |
21 | while let Some(part) = parts.next().await {
22 | let (cid, bytes, _links) = part?.into_parts();
23 | cids.push(cid);
24 | datas.push(bytes);
25 | }
26 |
27 | let mut buffer = vec![];
28 | let car_header = CarHeader::new_v1(cids.clone());
29 | let mut writer = CarWriter::new(car_header, &mut buffer);
30 |
31 | for (cid, data) in cids.into_iter().zip(datas.into_iter()) {
32 | writer.write(cid, data).await?;
33 | }
34 |
35 | writer.finish().await?;
36 |
37 | let mut f = FsFile::create(output).expect("failed to create file");
38 | f.write_all(&buffer)?;
39 |
40 | Ok(())
41 | }
42 |
--------------------------------------------------------------------------------
/ipfs-unixfs/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "ipfs-unixfs"
3 | authors = ["dignifiedquire ", "Ryan Plauche "]
4 | description = "Implementation of unixfs for iroh, a trimmed copy of https://github.com/n0-computer/beetle/tree/main/iroh-unixfs"
5 | version.workspace = true
6 | edition.workspace = true
7 | license.workspace = true
8 | repository.workspace = true
9 | rust-version.workspace = true
10 |
11 | [dependencies]
12 | anyhow.workspace = true
13 | async-recursion.workspace = true
14 | async-stream.workspace = true
15 | bytes.workspace = true
16 | cid.workspace = true
17 | futures.workspace = true
18 | libipld.workspace = true
19 | log.workspace = true
20 | multihash.workspace = true
21 | num_enum.workspace = true
22 | prost.workspace = true
23 | tokio = { workspace = true, features = ["fs", "io-util", "rt"] }
24 | #tokio-util = { workspace = true, features = ["io-util"] }
25 |
26 | [dev-dependencies]
27 | # criterion = { workspace = true, features = ["async_tokio"] }
28 | cid.workspace = true
29 | proptest.workspace = true
30 | rand.workspace = true
31 | tempfile.workspace = true
32 | tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread", "fs"] }
33 | tokio-util = { workspace = true, features = ["io-util"] }
34 |
35 | [build-dependencies]
36 | prost-build.workspace = true
37 |
--------------------------------------------------------------------------------
/docs/charts/filedag.md:
--------------------------------------------------------------------------------
1 | ```mermaid
2 | flowchart TD
3 | FC("File Content = '0123456789'")
4 | CZ["File chunking size = 2B"]
5 | FC --> CZ
6 | subgraph Chunking
7 | A["'01'"]
8 | B["'23'"]
9 | C["'45'"]
10 | D["'67'"]
11 | E["'89'"]
12 | CZ --> A
13 | CZ --> B
14 | CZ --> C
15 | CZ --> D
16 | CZ --> E
17 | end
18 | subgraph Hashing Chunks
19 | AA["'01'"] --> AH["bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q"]
20 | EP["..."]
21 | EE["'89'"] --> EH["bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli"]
22 | A --> AA
23 | B --> EP
24 | C --> EP
25 | D --> EP
26 | E --> EE
27 | end
28 | subgraph Form Stem Node
29 | LT("bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
30 | bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
31 | bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
32 | bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
33 | bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli")
34 | AH --> LT
35 | EH --> LT
36 | end
37 | subgraph Hash Stem
38 | RT["Root = bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i"]
39 | LT-->RT
40 | end
41 | ```
42 |
--------------------------------------------------------------------------------
/docs/charts/ship.md:
--------------------------------------------------------------------------------
1 | ```mermaid
2 | %%{init: { "sequence": { "noteAlign": "left"} } }%%
3 |
4 | sequenceDiagram
5 | participant O as Operator
6 | participant G as Ground IPFS
7 | participant S as Space IPFS
8 | Note over G,S: Both nodes begin listening for messages on boot
9 | Note over O,S: Satellite comes into LOS
10 | O->>G: IsConnected(true)
11 | S->>S: IsConnected(true)
12 | Note over O,G: Operator commands IPFS
node to transmit a file
13 | O->>G: TransmitFile(path)
14 | Note over G,S: Transfer of blocks
1. File is chunked into blocks, each with a CID
2. Root block contains links to child CIDs
3. Blocks are transmitted over UDP-radio
15 | loop Until DAG is Complete
16 | Note over G,S: Operator asks space IPFS node to verify that all
CIDs are received.
17 | G->>S: GetMissingDagBlocks(CID): [Block]
18 | Note over G,S: If empty response, all blocks are received
19 | S->>G: MissingDagBlocks(): [CID]
20 | Note over G,S: If blocks are missing, ground retransmits
21 | G->>S: While blocks remain missing,
TransmitBlock(CID)
22 | end
23 | Note over O,S: Operator asks space IPFS to write DAG to the file system
24 | O->>S: ExportDag(CID, path)
25 | Note over G,S: Satellite goes out of range
26 | O->>G: IsConnected(false)
27 | S->>S: IsConnected(false)
28 | ```
--------------------------------------------------------------------------------
/docs/myceli-docker.md:
--------------------------------------------------------------------------------
1 | # Build and running Myceli in Docker
2 |
3 | This doc contains instructions on how to build and run `myceli` in Docker
4 |
5 | ### Building
6 |
7 | The file `myceli.Dockerfile` contains all the instructions needed by Docker to produce an image for running `myceli`. This image can be built by running the following command:
8 |
9 | $ docker build -f myceli.Dockerfile . -t myceli
10 |
11 | ### Pulling
12 |
13 | The `myceli` docker images are published to the Github Container registry and can be pulled with the following command:
14 |
15 | $ docker pull ghcr.io/ipfs-shipyard/myceli:latest
16 |
17 | ### Running
18 |
19 | We only suggest running `myceli` in Docker in Linux environments due to networking requirements.
20 |
21 | Example running of `myceli` in a standalone Docker container with default settings:
22 |
23 | $ docker run --rm -v `pwd`:/myceli/ --network host -it ghcr.io/ipfs-shipyard/myceli:latest
24 |
25 | Important pieces to point out here:
26 |
27 | - `-v pwd:/myceli/`: Mounting a local directory is necessary for `myceli`'s storage to persist
28 | - `--network host`: The container running `myceli` needs to either run on the host network, or on the same network as the other services which will be communicating with it (controller CLI, ground radio bridge).
29 |
30 | Optionally you may want to pass a config file argument in with the `CONFIG_PATH` environment variable, like this:
31 |
32 | $ docker run --rm -v `pwd`:/myceli/ --network host -e CONFIG_PATH=/myceli/config.toml -it ghcr.io/ipfs-shipyard/myceli:latest
--------------------------------------------------------------------------------
/archived/car-utility/README.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 |
3 | This utility is a simple way to pack individual files into CAR archives and extract packed files from a CAR archive. It currently only supports one fs file per CAR archive.
4 |
5 | ## Usage
6 |
7 | ### Packing a file
8 |
9 | $ car-utility pack /path/to/input/file /path/to/archive.car
10 |
11 | ### Unpacking a file
12 |
13 | $ car-utility unpack /path/to/archive.car /path/to/output/file
14 |
15 | ## Dependencies
16 |
17 | These system dependencies are required to build:
18 | - Rust v1.63
19 | - [Protobuf compiler](https://github.com/protocolbuffers/protobuf#protocol-compiler-installation): Download it from the [Protobuf Releases page](https://github.com/protocolbuffers/protobuf/releases)
20 |
21 | ## Cross-compiling for Raspberry Pi
22 |
23 | ### General Setup
24 |
25 | Install `cross` tool for the cross-compiling environment:
26 |
27 | $ cargo install cross --git https://github.com/cross-rs/cross
28 |
29 | Make sure `Docker` is installed and running.
30 |
31 | ### Building the app
32 |
33 | The build command for the rasberry pi target is:
34 |
35 | $ cross build --target armv7-unknown-linux-gnueabihf
36 |
37 | It is generally a good idea to run `cargo clean` between building for different targets, such as building for your local machine and then building for the raspi, otherwise the cross build may throw some weird glibc errors.
38 |
39 | The built executable will be located at `target/armv7-unknown-linux-gnueabihf/[release|debug]/car-utility` and can now be transferred to the raspberry pi for usage.
--------------------------------------------------------------------------------
/smalog/src/lib.rs:
--------------------------------------------------------------------------------
1 | use chrono::Utc;
2 | use log::{Level, Metadata, Record};
3 | use std::env;
4 |
5 | struct Smalog {
6 | lev: log::LevelFilter,
7 | }
8 |
9 | static mut LOGGER: Smalog = Smalog {
10 | lev: log::LevelFilter::Info,
11 | };
12 | pub fn init() {
13 | let lev = match env::var("RUST_LOG") {
14 | Ok(lev_s) => level_from_str(&lev_s),
15 | Err(_) => Level::Info,
16 | };
17 | set_level(lev.to_level_filter());
18 | }
19 | pub fn set_level(lev: log::LevelFilter) {
20 | unsafe {
21 | LOGGER.lev = lev;
22 | log::set_logger(&LOGGER).expect("Failed to set the logger implementation!");
23 | }
24 | log::set_max_level(lev);
25 | }
26 |
27 | impl log::Log for Smalog {
28 | fn enabled(&self, metadata: &Metadata) -> bool {
29 | metadata.level() <= self.lev
30 | }
31 |
32 | fn log(&self, record: &Record) {
33 | if self.enabled(record.metadata()) {
34 | println!(
35 | "[{} {}] {}",
36 | Utc::now().to_rfc3339(),
37 | record.level(),
38 | record.args()
39 | );
40 | }
41 | }
42 |
43 | fn flush(&self) {}
44 | }
45 |
46 | fn level_from_str(s: &str) -> Level {
47 | use std::str::FromStr;
48 | if let Ok(l) = Level::from_str(s) {
49 | return l;
50 | }
51 | println!("ERROR! RUST_LOG set to {s} which is not recognized by smalog which only accepts a simple level name, i.e. one of: OFF; ERROR; WARN; INFO; DEBUG; TRACE. Will use INFO instead.");
52 | Level::Info
53 | }
54 |
--------------------------------------------------------------------------------
/local-storage/src/util.rs:
--------------------------------------------------------------------------------
1 | use super::block::StoredBlock;
2 | use anyhow::{bail, Result};
3 | use std::collections::BTreeMap;
4 |
5 | pub(crate) fn verify_dag(blocks: &[StoredBlock]) -> Result<()> {
6 | if blocks.is_empty() {
7 | bail!("No blocks is not a meaningful DAG");
8 | }
9 | if blocks.len() == 1 {
10 | if blocks[0].links.is_empty() {
11 | return Ok(());
12 | }
13 | bail!("Given only root of DAG, no children");
14 | } else if blocks.iter().all(|b| b.links.is_empty()) {
15 | bail!("No root found");
16 | }
17 | let mut counts: BTreeMap<&str, (u16, u16)> = BTreeMap::new();
18 | for block in blocks {
19 | block.validate()?;
20 | counts.entry(block.cid.as_str()).or_default().0 += 1;
21 | for link in &block.links {
22 | counts.entry(link.as_str()).or_default().1 += 1;
23 | }
24 | }
25 | let mut root = "";
26 | for (cid, (h,n)) in counts {
27 | if n > h {
28 | bail!("Missing block: {cid}");
29 | }
30 | if h == 1 && n == 0 {
31 | if root.is_empty() {
32 | root = cid;
33 | } else if root < cid {
34 | bail!("Multiple roots! {root} {cid}");
35 | } else {
36 | bail!("Multiple roots! {cid} {root}");
37 | }
38 | } else if h > n {
39 | bail!("Too many copies of {cid}");
40 | }
41 | }
42 | if root.is_empty() {
43 | bail!("DAG is actually DG (cycle detected)");
44 | }
45 | Ok(())
46 | }
47 |
--------------------------------------------------------------------------------
/archived/cpp-transmit-example/src/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include "api.hpp"
6 |
7 | int main(int argc, char *argv[])
8 | {
9 | if (argc != 4)
10 | {
11 | printf("Please provide three arguments: [ipfs_addr] [dag_cid_to_transmit] [destination_addr]\n");
12 | return -1;
13 | }
14 | printf("Sending {\"Transmit\": {\"dag\": %s, \"addr\": %s}} to %s\n", argv[2], argv[3], argv[1]);
15 |
16 | // Parse out network address
17 | std::string addr(argv[1]);
18 | int split_pos = addr.find(":");
19 | if (split_pos == std::string::npos)
20 | {
21 | printf("Invalid address found %s", addr.c_str());
22 | return -1;
23 | }
24 | std::string ip = addr.substr(0, split_pos);
25 | std::string port = addr.substr(split_pos + 1);
26 |
27 | // Call into Rust code to generate transmit message
28 | unsigned char msg[1024];
29 | int len = generate_transmit_msg((unsigned char *)msg, argv[2], argv[3]);
30 |
31 | // Send transmit over udp to ipfs instance
32 | int sockfd;
33 | char buffer[1024];
34 | struct sockaddr_in servaddr;
35 |
36 | if ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
37 | {
38 | perror("Socket creation failed");
39 | exit(-1);
40 | }
41 |
42 | memset(&servaddr, 0, sizeof(servaddr));
43 | servaddr.sin_family = AF_INET;
44 | servaddr.sin_port = htons(std::stoi(port));
45 | servaddr.sin_addr.s_addr = inet_addr(ip.c_str());
46 |
47 | sendto(sockfd, msg, len, 0, (const struct sockaddr *)&servaddr, sizeof(servaddr));
48 | close(sockfd);
49 | return 0;
50 | }
--------------------------------------------------------------------------------
/myceli/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "myceli"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | repository.workspace = true
7 | rust-version.workspace = true
8 | build = "build.rs"
9 |
10 | [dependencies]
11 | anyhow.workspace = true
12 | cid = { workspace = true }
13 | config.workspace = true
14 | env_logger = { workspace = true, optional = true }
15 | figment.workspace = true
16 | ipfs-unixfs.workspace = true
17 | libipld.workspace = true
18 | local-storage = { path = "../local-storage", default-features = false }
19 | log.workspace = true
20 | messages = { workspace = true, features = [] }
21 | parity-scale-codec.workspace = true
22 | serde.workspace = true
23 | smalog = { workspace = true, optional = true }
24 | tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
25 | toml = { workspace = true, features = ["display"] }
26 | transports = { workspace = true, features = [] }
27 |
28 | [features]
29 | big = ["sqlite", "good_log", "proto_all"]
30 | small = ["files", "small_log"]
31 | proto_all = ["proto_ship", "proto_sync"]
32 | proto_ship = ["messages/proto_ship", "transports/proto_ship"]
33 | proto_sync = ["messages/proto_sync", "transports/proto_sync"]
34 | good_log = ["dep:env_logger", "local-storage/good_log"]
35 | small_log = ["dep:smalog", "local-storage/small_log"]
36 | sqlite = ["local-storage/sqlite"]
37 | files = ["local-storage/files"]
38 |
39 | [dev-dependencies]
40 | assert_fs.workspace = true
41 | blake2.workspace = true
42 | file-hashing.workspace = true
43 | futures.workspace = true
44 | ipfs-unixfs.workspace = true
45 | rand.workspace = true
46 | local-storage.workspace = true
47 |
48 | [build-dependencies]
49 | built = "0.7.0"
--------------------------------------------------------------------------------
/docs/hyphae.md:
--------------------------------------------------------------------------------
1 | # Hyphae Setup
2 |
3 | Hyphae is a filament, or bridge, between Myceli and Kubo. It provides a pathway for the IPFS blocks inside of Myceli to flow into Kubo, and from there potentially into the broader public IPFS network.
4 |
5 | ## Running Hyphae
6 |
7 | After building from source, or downloading a binary, `hyphae` can be run with no additional arguments:
8 |
9 | $ hyphae
10 |
11 | Starting hyphae with no config file will run with a few default settings:
12 | - Looking for `myceli` at `127.0.0.1:8001`
13 | - Using an MTU of 512 when communicating with `myceli`
14 | - Looking for `kubo` at `127.0.0.1:5001`
15 | - Syncing data every 10 seconds
16 |
17 | Every ten seconds, `hyphae` will query `myceli` for it's available blocks, query `kubo` for it's local refs, and transfer over any blocks which exist in `myceli` and not in `kubo`.
18 |
19 | ## Configuring Hyphae
20 |
21 | `hypahe` has a few configuration options which ship with default values, or can be tuned to fit system requirements.
22 |
23 | Current configuration values and defaults are:
24 | - `myceli_address` - The network address of the `myceli` instance. Defaults to `127.0.0.1:8001`.
25 | - `kubo_address` - The network address of the `kubo` instance. Defaults to `127.0.0.1:5001`.
26 | - `sync_interval` - Duration in milliseconds between sync operations. Defaults to 10_000 ms.
27 | - `mtu` - The MTU used when chunking messages to/from `myceli`
28 |
29 | These configuration values can be set via a TOML config file which is passed as an argument when running `hyphae`.
30 |
31 | Here is an example configuration file:
32 |
33 | myceli_address="127.0.0.1:8002"
34 | kubo_address="127.0.0.1:8200"
35 | sync_interval=30_000
36 | mtu=1024
37 |
38 | If this configuration is saved to "config.toml", then we would run `hyphae config.toml` to use the config file.
--------------------------------------------------------------------------------
/.idea/space.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/archived/car-utility/src/run.rs:
--------------------------------------------------------------------------------
1 | use std::path::PathBuf;
2 |
3 | use crate::pack::pack;
4 | use crate::unpack::unpack;
5 | use anyhow::Result;
6 | use clap::{Parser, Subcommand};
7 |
8 | #[derive(Parser, Debug, Clone)]
9 | #[clap(version, long_about = None, propagate_version = true)]
10 | #[clap(about = "CAR packer/unpacker based on Iroh")]
11 | pub struct Cli {
12 | #[clap(subcommand)]
13 | command: Commands,
14 | }
15 |
16 | #[derive(Subcommand, Debug, Clone)]
17 | enum Commands {
18 | #[clap(about = "Pack a file into a CAR")]
19 | Pack {
20 | /// The path to a file to be CAR packed
21 | path: PathBuf,
22 | // The path to the CAR output file
23 | output: PathBuf,
24 | },
25 | #[clap(about = "Unpack a CAR into a file")]
26 | Unpack {
27 | /// The path to a CAR file to be unpacked
28 | path: PathBuf,
29 | /// The path to the unpacked output file
30 | output: PathBuf,
31 | },
32 | }
33 |
34 | impl Cli {
35 | pub async fn run(&self) -> Result<()> {
36 | self.cli_command().await?;
37 |
38 | Ok(())
39 | }
40 |
41 | async fn cli_command(&self) -> Result<()> {
42 | match &self.command {
43 | Commands::Pack { path, output } => {
44 | if !path.is_file() {
45 | anyhow::bail!("{} is not a file", path.display());
46 | }
47 | println!("Packing {} into {}", path.display(), output.display());
48 | pack(path, output).await?;
49 | }
50 | Commands::Unpack { path, output } => {
51 | if !path.is_file() {
52 | anyhow::bail!("{} is not a file", path.display());
53 | }
54 | println!("Unpacking {} into {}", path.display(), output.display());
55 | unpack(path, output).await?;
56 | }
57 | };
58 |
59 | Ok(())
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/watcher/src/main.rs:
--------------------------------------------------------------------------------
1 | use log::debug;
2 | use messages::Message;
3 | use notify::Watcher;
4 | use std::{fs, path::PathBuf, time::Duration};
5 |
6 | mod handler;
7 |
8 | #[cfg(all(not(feature = "small"), not(feature = "big")))]
9 | compile_error! {"Select either big or small feature"}
10 |
11 | fn watched_dir(cfg: &config::Config) -> PathBuf {
12 | let mut result = PathBuf::new();
13 | result.push(
14 | cfg.clone()
15 | .watched_directory
16 | .expect("Must configure watched_directory before running watcher."),
17 | );
18 | result
19 | .canonicalize()
20 | .expect("Watched directory does not exist?")
21 | }
22 |
23 | fn main() {
24 | #[cfg(feature = "good_log")]
25 | env_logger::init();
26 |
27 | #[cfg(feature = "small_log")]
28 | smalog::init();
29 |
30 | let config_path = std::env::args().nth(1);
31 | let cfg =
32 | config::Config::parse(config_path, &Message::fit_size).expect("Failed to parse config");
33 | let hndr = handler::Handler::new(&cfg).expect("Failed to configure transport & event handler");
34 | let dir = watched_dir(&cfg);
35 | let mut watcher = notify::recommended_watcher(move |e| hndr.handle_event(e))
36 | .expect("Unable to create directory watcher.");
37 | watcher
38 | .watch(&dir, notify::RecursiveMode::NonRecursive)
39 | .expect("Unable to watch directory.");
40 | let hndr =
41 | handler::Handler::new(&cfg).expect("Failed to configure second transport & event handler");
42 | let mut preexisting =
43 | fs::read_dir(&dir).expect("Can't list watched_directory - does it exist?");
44 | let mut t = 4;
45 | while dir.is_dir() {
46 | std::thread::sleep(Duration::from_secs(t));
47 | if let Some(Ok(f)) = preexisting.next() {
48 | if f.metadata().map(|d| d.is_file()).unwrap_or(false) {
49 | debug!("Discovered path in {dir:?} - {f:?} - notifying Myceli.");
50 | hndr.send(&f.path());
51 | }
52 | } else if let Ok(rd) = fs::read_dir(&dir) {
53 | preexisting = rd;
54 | t *= 2;
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/transports/src/chunking.rs:
--------------------------------------------------------------------------------
1 | use crate::error::{adhoc_err, Result};
2 | use cid::multihash::{Code, MultihashDigest};
3 | use log::error;
4 | use messages::Message;
5 | use parity_scale_codec::{Decode, Encode};
6 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode};
7 | use serde::Serialize;
8 |
9 | const HASH_SIZE: usize = 16;
10 |
11 | // This MessageContainer struct is intended to be used inside of the chunkers
12 | // for verification of Message integrity during the chunking/assembly process
13 | #[derive(Clone, Debug, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)]
14 | pub(crate) struct MessageContainer {
15 | // Hash of payload
16 | hash: [u8; HASH_SIZE],
17 | // Message payload
18 | pub message: Message,
19 | }
20 |
21 | impl MessageContainer {
22 | pub fn new(message: Message) -> Self {
23 | let hash = gen_hash(&message);
24 | // This hash uses a 128-bit Blake2s-128 hash, rather than the common sha2-256 to save on overhead size
25 | MessageContainer { hash, message }
26 | }
27 |
28 | pub fn to_bytes(&self) -> Vec {
29 | self.encode()
30 | }
31 |
32 | pub fn verify_cid(&self) -> Result {
33 | let regenerated_hash = gen_hash(&self.message);
34 | if regenerated_hash == self.hash {
35 | Ok(true)
36 | } else {
37 | error!(
38 | "Hash mismatch: provided={:?} deduced={:?}",
39 | self.hash, regenerated_hash
40 | );
41 | Ok(false)
42 | }
43 | }
44 |
45 | pub fn from_bytes(bytes: &mut &[u8]) -> Result {
46 | let container: MessageContainer = MessageContainer::decode(bytes)?;
47 | if !container.verify_cid()? {
48 | adhoc_err("Message container failed CID verification")?;
49 | }
50 | Ok(container)
51 | }
52 | }
53 |
54 | fn gen_hash(msg: &Message) -> [u8; HASH_SIZE] {
55 | let bytes = msg.to_bytes();
56 | Code::Blake2s128
57 | .digest(&bytes)
58 | .digest()
59 | .try_into()
60 | .expect("Hash is wrong size (should be constant since hash type is not changing)")
61 | }
62 |
--------------------------------------------------------------------------------
/ipfs-unixfs/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod balanced_tree;
2 | pub mod builder;
3 | pub mod chunker;
4 | pub mod codecs;
5 | mod types;
6 | pub mod unixfs;
7 |
8 | pub use crate::types::{Block, Link, LinkRef, Links, LoadedCid, PbLinks, Source};
9 |
10 | use crate::codecs::Codec;
11 | use anyhow::{bail, Context as _, Result};
12 | use cid::Cid;
13 | use libipld::{prelude::Codec as _, Ipld, IpldCodec};
14 |
15 | /// Extract links from the given content.
16 | ///
17 | /// Links will be returned as a vec with meaningful order
18 | pub fn parse_links(cid: &Cid, bytes: &[u8]) -> Result> {
19 | let codec = Codec::try_from(cid.codec()).context("unknown codec")?;
20 | let mut cids = vec![];
21 | let codec = match codec {
22 | Codec::DagCbor => IpldCodec::DagCbor,
23 | Codec::DagPb => IpldCodec::DagPb,
24 | Codec::DagJson => IpldCodec::DagJson,
25 | Codec::Raw => IpldCodec::Raw,
26 | _ => bail!("unsupported codec {:?}", codec),
27 | };
28 | codec.references::(bytes, &mut cids)?;
29 | Ok(cids)
30 | }
31 |
32 | #[cfg(test)]
33 | pub mod tests {
34 | use super::*;
35 |
36 | #[test]
37 | pub fn file_with_repeat_chunks() {
38 | let bytes: &[u8] = &[
39 | 0x12, 0x2a, 0x0a, 0x24, 0x01, 0x55, 0x12, 0x20, 0x8f, 0x43, 0x43, 0x46, 0x64, 0x8f,
40 | 0x6b, 0x96, 0xdf, 0x89, 0xdd, 0xa9, 0x01, 0xc5, 0x17, 0x6b, 0x10, 0xa6, 0xd8, 0x39,
41 | 0x61, 0xdd, 0x3c, 0x1a, 0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4, 0x12, 0x00,
42 | 0x18, 0x02, 0x12, 0x2a, 0x0a, 0x24, 0x01, 0x55, 0x12, 0x20, 0x8f, 0x43, 0x43, 0x46,
43 | 0x64, 0x8f, 0x6b, 0x96, 0xdf, 0x89, 0xdd, 0xa9, 0x01, 0xc5, 0x17, 0x6b, 0x10, 0xa6,
44 | 0xd8, 0x39, 0x61, 0xdd, 0x3c, 0x1a, 0xc8, 0x8b, 0x59, 0xb2, 0xdc, 0x32, 0x7a, 0xa4,
45 | 0x12, 0x00, 0x18, 0x02, 0x0a, 0x08, 0x08, 0x02, 0x18, 0x04, 0x20, 0x02, 0x20, 0x02,
46 | ];
47 | let cid: Cid = "bafybeiegfwauaenc4pa7jqfssar4i4pafsul4g62e3av64fwir5uodv7q4"
48 | .try_into()
49 | .unwrap();
50 | let actual = parse_links(&cid, bytes).unwrap();
51 | let child: Cid = "bafkreiepinbumzepnoln7co5vea4kf3lcctnqolb3u6bvsellgznymt2uq"
52 | .try_into()
53 | .unwrap();
54 | let expected = [child.clone(), child.clone()];
55 | assert_eq!(actual, expected);
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/testing/udp_forward/src/main.rs:
--------------------------------------------------------------------------------
1 | use chrono::{offset::Utc, DateTime};
2 | use rand::Rng;
3 | use std::{env, net::*, str, time::SystemTime};
4 |
5 | mod err;
6 |
7 | fn main() -> err::Result<()> {
8 | let mut arg_it = env::args();
9 | arg_it.next();
10 | let listen = arg_it.next().expect("First arg= to listen to.");
11 | let dest_a = arg_it
12 | .next()
13 | .expect("Second arg= to forward packets to.");
14 | let dest_b = arg_it
15 | .next()
16 | .expect("Third arg= to forward packets to.");
17 | let rate: usize = arg_it
18 | .next()
19 | .map(|s| str::parse(&s))
20 | .unwrap_or(Ok(usize::MAX))?;
21 | let mut buf = [0u8; u16::MAX as usize];
22 | let socket = UdpSocket::bind(listen.clone())?;
23 | let mut good = 0;
24 | let mut bad = 0;
25 | let mut rng = rand::thread_rng();
26 | loop {
27 | match socket.recv_from(&mut buf) {
28 | Ok((len, sender)) => {
29 | let to = if format!("{sender:?}") == dest_a {
30 | dest_b.clone()
31 | } else {
32 | dest_a.clone()
33 | };
34 | if bad >= 1 && good >= rate {
35 | bad -= 1;
36 | good -= rate;
37 | }
38 | let bad_odds = good / rate / 2 + 1;
39 | let good_odds = bad / 2 + rate;
40 | let n = bad_odds + good_odds;
41 | let i = rng.gen_range(0..n);
42 | if i < bad_odds {
43 | let now: DateTime = SystemTime::now().into();
44 | println!(
45 | "Dropping {}th packet (from {sender:?}). Excess: good={good} bad={bad} @ {now}",
46 | rate +1
47 | );
48 | bad += 1;
49 | } else {
50 | match socket.send_to(&buf[0..len], to) {
51 | Ok(_) => {
52 | good += 1;
53 | print!(".");
54 | }
55 | Err(e) => println!("Error sending: {e:?}"),
56 | }
57 | }
58 | }
59 | Err(e) => println!("Error receiving: {e:?}"),
60 | }
61 | }
62 | // Ok(())
63 | }
64 |
--------------------------------------------------------------------------------
/local-dev-environment/desktop/rfm69-driver/driver/driver.ino:
--------------------------------------------------------------------------------
1 | // This is borrowed heavily from the rf69 rx and tx demo code
2 | // -*- mode: C++ -*-
3 |
4 | #include
5 | #include
6 |
7 | /************ Radio Setup ***************/
8 |
9 | // Change to 434.0 or other frequency, must match RX's freq!
10 | #define RF69_FREQ 915.0
11 |
12 | // Feather 32u4 w/Radio pin defs
13 | #define RFM69_CS 8
14 | #define RFM69_INT 7
15 | #define RFM69_RST 4
16 | #define LED 13
17 |
18 | // Singleton instance of the radio driver
19 | RH_RF69 rf69(RFM69_CS, RFM69_INT);
20 |
21 | void setup()
22 | {
23 | Serial.begin(115200);
24 |
25 | pinMode(LED, OUTPUT);
26 | pinMode(RFM69_RST, OUTPUT);
27 | digitalWrite(RFM69_RST, LOW);
28 |
29 | // manual reset
30 | digitalWrite(RFM69_RST, HIGH);
31 | delay(10);
32 | digitalWrite(RFM69_RST, LOW);
33 | delay(10);
34 |
35 | if (!rf69.init()) {
36 | Serial.println("RFM69 radio init failed");
37 | while (1);
38 | }
39 | // Defaults after init are 434.0MHz, modulation GFSK_Rb250Fd250, +13dbM (for low power module)
40 | // No encryption
41 | if (!rf69.setFrequency(RF69_FREQ)) {
42 | Serial.println("setFrequency failed");
43 | }
44 |
45 | // If you are using a high power RF69 eg RFM69HW, you *must* set a Tx power with the
46 | // ishighpowermodule flag set like this:
47 | rf69.setTxPower(20, true); // range from 14-20 for power, 2nd arg must be true for 69HCW
48 |
49 | pinMode(LED, OUTPUT);
50 | }
51 |
52 |
53 | void loop() {
54 | delay(10); // Wait 10ms between cycles
55 |
56 | uint8_t buf[RH_RF69_MAX_MESSAGE_LEN];
57 | uint8_t len = sizeof(buf);
58 |
59 | int availableBytes = Serial.available();
60 | if (availableBytes > 0) {
61 | int len = Serial.readBytes(buf, availableBytes);
62 | buf[len] = 0;
63 | rf69.send((uint8_t*)buf, len);
64 | rf69.waitPacketSent();
65 | Blink(LED, 30, 2);
66 | }
67 |
68 | if (rf69.waitAvailableTimeout(10)) {
69 | // Should be a reply message for us now
70 | if (rf69.recv(buf, &len)) {
71 | buf[len] = 0;
72 | Serial.write(buf, len);
73 | Blink(LED, 70, 2);
74 | }
75 | }
76 | }
77 |
78 | void Blink(byte PIN, byte DELAY_MS, byte loops) {
79 | for (byte i=0; i,
9 | pub data: Vec,
10 | pub links: Vec>,
11 | pub filename: Option,
12 | }
13 |
14 | impl fmt::Debug for TransmissionBlock {
15 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
16 | let cid_str = Cid::try_from(self.cid.clone())
17 | .map(|c| c.to_string())
18 | .unwrap();
19 |
20 | f.debug_struct("TransmissionBlock")
21 | .field("cid", &cid_str)
22 | .field("data", &self.data.len())
23 | .field("links", &self.links.len())
24 | .finish()
25 | }
26 | }
27 |
28 | #[derive(Clone, Debug, ParityDecode, ParityEncode, Serialize, Eq, PartialEq)]
29 | pub enum DataProtocol {
30 | // Transmission message for individual block
31 | Block(TransmissionBlock),
32 | // Protocol level request for transmission of block
33 | RequestTransmitBlock {
34 | cid: String,
35 | target_addr: String,
36 | },
37 | // This message is used inside of the protocol to initiate the re-requesting of missing dag blocks
38 | // in order to continue transmitting the dag
39 | RetryDagSession {
40 | cid: String,
41 | },
42 | // Requests windowed transmission of a dag
43 | RequestTransmitDag {
44 | cid: String,
45 | target_addr: String,
46 | retries: u8,
47 | },
48 | // Resumes the transmission of a dag which may have run out of retries or
49 | // paused due to connectivity lost
50 | ResumeTransmitDag {
51 | cid: String,
52 | },
53 | // Resumes the transmission of all dags which may be paused
54 | ResumeTransmitAllDags,
55 | // Message to request list of blocks missing from list of CIDs sent
56 | RequestMissingDagWindowBlocks {
57 | cid: String,
58 | blocks: Vec,
59 | },
60 | // Message to request list of blocks missing from CID sent
61 | RequestMissingDagBlocks {
62 | cid: String,
63 | },
64 | // Notifies which dag blocks are missing in current window
65 | MissingDagBlocks {
66 | cid: String,
67 | blocks: Vec,
68 | },
69 | }
70 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = [
3 | "config",
4 | "controller",
5 | # "hyphae",
6 | "ipfs-unixfs",
7 | "local-dev-environment/desktop/rfm69-service",
8 | "local-storage",
9 | "messages",
10 | "myceli",
11 | "smalog",
12 | "testing/udp_forward",
13 | "transports",
14 | "watcher"
15 | ]
16 |
17 | [workspace.package]
18 | version = "0.7.0"
19 | edition = "2021"
20 | license = "Apache-2.0/MIT"
21 | rust-version = "1.70.0"
22 | repository = "https://github.com/ipfs-shipyard/space"
23 |
24 | [workspace.dependencies]
25 | # External deps
26 | anyhow = { version = "1.0.71", default-features = false, features = ["backtrace", "std"] }
27 | assert_fs = "1.0.13"
28 | async-recursion = "1.0.4"
29 | async-stream = "0.3.3"
30 | blake2 = { version = "0.10.6", default-features = false }
31 | bytes = "1.1"
32 | cid = { version = "0.9", default-features = false, features = ["scale-codec"] }
33 | clap = { version = "4.0.15", features = ["derive"] }
34 | derive-error = "0.0.5"
35 | env_logger = "0.10.0"
36 | figment = { version = "0.10", features = ["toml"] }
37 | file-hashing = "0.1.1"
38 | futures = "0.3.24"
39 | libipld = { version = "0.15", default-features = false, features = ["dag-pb", "dag-cbor", "dag-json"] }
40 | log = "0.4.19"
41 | multihash = "0.18.1"
42 | num_enum = "0.5.7"
43 | parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive", "std"] }
44 | parity-scale-codec-derive = "3.1.3"
45 | proptest = "1.1.0"
46 | prost = "0.11"
47 | prost-build = "0.11.1"
48 | rand = "0.8.5"
49 | reqwest = { version = "0.11.10", default-features = false }
50 | rusqlite = { version = "0.28.0", features = ["bundled"] }
51 | serde = "1.0.163"
52 | serde_derive = "1.0.163"
53 | serde_json = "1.0.87"
54 | tempfile = "3.3"
55 | thiserror = "1.0.40"
56 | tokio = "1.28.1"
57 | tokio-serial = "5.4"
58 | tokio-util = "0.7.8"
59 | toml = { version = "0.7.3", default-features = false }
60 |
61 | # Internal deps
62 | config = { path = "config" }
63 | ipfs-unixfs = { path = "ipfs-unixfs" }
64 | local-storage = { path = "local-storage" }
65 | messages = { path = "messages" }
66 | smalog = { path = "smalog" }
67 | transports = { path = "transports" }
68 |
69 | [profile.small]
70 | inherits = "release"
71 | lto = "fat"
72 |
73 | [profile.small.package.myceli]
74 | opt-level = "s"
75 | debug = false
76 | strip = "symbols"
77 | debug-assertions = false
78 | overflow-checks = false
79 | incremental = false
80 |
--------------------------------------------------------------------------------
/local-storage/src/provider.rs:
--------------------------------------------------------------------------------
1 | use crate::block::StoredBlock;
2 | use anyhow::Result;
3 | use cid::Cid;
4 | use std::sync::{Arc, Mutex};
5 |
6 | #[allow(unused_imports)]
7 | use crate::null_provider::NullStorageProvider;
8 |
9 | #[cfg(feature = "sqlite")]
10 | use crate::sql_provider::SqliteStorageProvider;
11 |
12 | #[cfg(feature = "files")]
13 | use crate::file_provider::FileStorageProvider;
14 |
15 | pub type Handle = Arc>;
16 |
17 | pub trait StorageProvider {
18 | // Import a stored block
19 | fn import_block(&mut self, block: &StoredBlock) -> Result<()>;
20 | // Requests a list of CIDs currently available in storage
21 | fn get_available_cids(&self) -> Result>;
22 | // Requests the block associated with the given CID
23 | fn get_block_by_cid(&self, cid: &str) -> Result;
24 | // Requests the links associated with the given CID
25 | fn get_links_by_cid(&self, cid: &str) -> Result>;
26 | fn list_available_dags(&self) -> Result>;
27 | // Attaches filename to dag
28 | fn name_dag(&self, cid: &str, file_name: &str) -> Result<()>;
29 | fn get_name(&self, cid: &str) -> Result;
30 | fn get_missing_cid_blocks(&self, cid: &str) -> Result>;
31 | fn get_dag_blocks_by_window(
32 | &self,
33 | cid: &str,
34 | offset: u32,
35 | window_size: u32,
36 | ) -> Result>;
37 | fn get_all_dag_cids(
38 | &self,
39 | cid: &str,
40 | offset: Option,
41 | window_size: Option,
42 | ) -> Result>;
43 | fn get_all_dag_blocks(&self, cid: &str) -> Result>;
44 | fn incremental_gc(&mut self) -> bool;
45 | fn has_cid(&self, cid: &Cid) -> bool;
46 | fn ack_cid(&self, cid: &Cid);
47 | fn get_dangling_cids(&self) -> Result>;
48 | }
49 |
50 | pub fn default_storage_provider(_storage_path: &str, _high_disk_usage: u64) -> Result {
51 | #[cfg(all(not(feature = "files"), not(feature = "sqlite")))]
52 | let provider = NullStorageProvider::default();
53 | #[cfg(all(feature = "files", not(feature = "sqlite")))]
54 | let provider = FileStorageProvider::new(_storage_path, _high_disk_usage)?;
55 | #[cfg(feature = "sqlite")]
56 | let provider = SqliteStorageProvider::new(_storage_path)?;
57 | Ok(Arc::new(Mutex::new(provider)))
58 | }
59 |
--------------------------------------------------------------------------------
/local-storage/src/null_provider.rs:
--------------------------------------------------------------------------------
1 | use crate::block::StoredBlock;
2 | use crate::provider::StorageProvider;
3 | use anyhow::bail;
4 | use cid::Cid;
5 |
6 | #[derive(Default)]
7 | pub(crate) struct NullStorageProvider {}
8 |
9 | impl StorageProvider for NullStorageProvider {
10 | fn import_block(&mut self, _block: &StoredBlock) -> anyhow::Result<()> {
11 | bail!("NullStorageProvider does not implement anything")
12 | }
13 | fn get_dangling_cids(&self) -> anyhow::Result> {
14 | Ok(vec![])
15 | }
16 | fn get_available_cids(&self) -> anyhow::Result> {
17 | bail!("NullStorageProvider does not implement anything")
18 | }
19 | fn get_name(&self, _cid: &str) -> anyhow::Result {
20 | bail!("nope")
21 | }
22 | fn get_block_by_cid(&self, _cid: &str) -> anyhow::Result {
23 | bail!("NullStorageProvider does not implement anything")
24 | }
25 |
26 | fn get_links_by_cid(&self, _cid: &str) -> anyhow::Result> {
27 | bail!("NullStorageProvider does not implement anything")
28 | }
29 |
30 | fn list_available_dags(&self) -> anyhow::Result> {
31 | bail!("NullStorageProvider does not implement anything")
32 | }
33 |
34 | fn name_dag(&self, _cid: &str, _file_name: &str) -> anyhow::Result<()> {
35 | bail!("NullStorageProvider does not implement anything")
36 | }
37 |
38 | fn get_missing_cid_blocks(&self, _cid: &str) -> anyhow::Result> {
39 | bail!("NullStorageProvider does not implement anything")
40 | }
41 |
42 | fn get_dag_blocks_by_window(
43 | &self,
44 | _cid: &str,
45 | _offset: u32,
46 | _window_size: u32,
47 | ) -> anyhow::Result> {
48 | bail!("NullStorageProvider does not implement anything")
49 | }
50 |
51 | fn get_all_dag_cids(
52 | &self,
53 | _cid: &str,
54 | _offset: Option,
55 | _window_size: Option,
56 | ) -> anyhow::Result> {
57 | bail!("NullStorageProvider does not implement anything")
58 | }
59 |
60 | fn get_all_dag_blocks(&self, _cid: &str) -> anyhow::Result> {
61 | bail!("NullStorageProvider does not implement anything")
62 | }
63 |
64 | fn incremental_gc(&mut self) -> bool {
65 | false
66 | }
67 |
68 | fn has_cid(&self, _cid: &Cid) -> bool {
69 | false
70 | }
71 |
72 | fn ack_cid(&self, _cid: &Cid) {}
73 | }
74 |
--------------------------------------------------------------------------------
/docs/charts/sync.md:
--------------------------------------------------------------------------------
1 | ```mermaid
2 | sequenceDiagram
3 | participant G as Ground
4 | participant V as Vehicle
5 | Note over G: Import File
6 | Note left of G: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli
7 | G ->> V: "Push" Send CIDs to Expect (& File Name)
8 | Note right of V: Available CIDs:
Missing CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli
9 | G ->> V: Send Block
10 | Note over V: Hash, store.
11 | Note over V: Parse as stem (fails - it's a leaf).
12 | Note right of V: Available CIDs:
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli
Missing CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
13 | G --X V: Attempt to send blocks, packets dropped
14 | V ->> G: "Pull" Send CIDs for blocks to send/re-send
15 | G ->> V: Send Block (bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i)
16 | Note over V: Hash, store.
17 | Note over V: Parse as stem, passes - has 5 children.
18 | loop For each child CID
19 | Note over V: If already available, ignore.
20 | Note over V: Otherwise add to 'missing' & "Pull"
21 | end
22 | Note right of V: Available CIDs:
bafybeicbshh2atg556w77jzb5yl4e63fefisnutf32l7byzrteosqjhb6i (file.name)
bafkreignoc7kai7xkkqfmsv3n3ii2qwbiqhs4m7ctekokxql4fmv4jhuli
Missing CIDs:
bafkreietrw4mt6bmrs2y2pz66t6skabwusgsnjysou6s7xs2xub2qxfl6q
bafkreictl6rq27rf3wfet4ktm54xgtwifbqqruiv3jielv37hnaylwhxsa
bafkreiebc6dk2gxhjlp52ig5anzkxkxlyysg4nb25pib3if7ytacx4aqnq
bafkreicj2gaoz5lbgkazk4n7hhm3pm2ckivcvrwshqkbruztqji37zdjza
23 | loop Other CIDs in pull
24 | G ->> V: Send Blocks
25 | End
26 | ```
27 |
--------------------------------------------------------------------------------
/watcher/src/handler.rs:
--------------------------------------------------------------------------------
1 | use log::{debug, error, info, trace};
2 | use messages::{ApplicationAPI, Message};
3 | use notify::{event::ModifyKind, Event, EventKind};
4 | use std::path::Path;
5 | use std::time::{Duration, SystemTime};
6 | use transports::{Transport, UdpTransport};
7 |
8 | pub(crate) struct Handler {
9 | trx: UdpTransport,
10 | target_addr: String,
11 | }
12 |
13 | impl Handler {
14 | pub fn new(cfg: &config::Config) -> Result {
15 | let trx = UdpTransport::new("0.0.0.0:0", cfg.mtu, cfg.chunk_transmit_throttle)?;
16 | let target_addr = cfg.listen_address.clone();
17 | Ok(Self { trx, target_addr })
18 | }
19 |
20 | pub fn handle_event(&self, event: notify::Result) {
21 | trace!("handle_event({:?})", &event);
22 | match event {
23 | Err(err) => {
24 | error!("FileSystem error: {:?}", err);
25 | }
26 | Ok(ev) => match ev.kind {
27 | EventKind::Modify(ModifyKind::Data(_)) => {
28 | for p in ev.paths {
29 | //Some of these events can occur while the file is still being modified
30 | self.wait_for_modification_to_stop(&p).ok();
31 | info!("File modified, import: {:?}", &p);
32 | self.send(&p);
33 | }
34 | }
35 | _ => debug!("Ignoring FileSystem event: {:?}", &ev),
36 | },
37 | }
38 | }
39 | pub fn send(&self, path: &Path) {
40 | let path = if let Some(p) = path.as_os_str().to_str() {
41 | p.to_owned()
42 | } else {
43 | error!("Path {:?} can't be turned into string?!", &path);
44 | return;
45 | };
46 | let m = ApplicationAPI::ImportFile { path };
47 | let m = Message::ApplicationAPI(m);
48 | match self.trx.send(m, &self.target_addr) {
49 | Ok(()) => debug!("Sent message to {}", &self.target_addr),
50 | Err(e) => error!("Error sending: {:?}", &e),
51 | }
52 | }
53 | fn wait_for_modification_to_stop(&self, p: &Path) -> std::io::Result<()> {
54 | const MIN_AGE: Duration = Duration::from_secs(1);
55 | const MAX_SLEEP: Duration = Duration::from_millis(1234);
56 | loop {
57 | let mdt = p.metadata()?.modified()?;
58 | let now = SystemTime::now();
59 | if mdt + MIN_AGE < now {
60 | return Ok(());
61 | }
62 | let elapsed = now.duration_since(mdt).unwrap_or_default();
63 | std::thread::sleep(MAX_SLEEP - elapsed);
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/testing/local_test/timeout.killer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | cd `dirname "${0}"`
4 | sleep $#
5 | parent=$(realpath /proc/${PPID}/exe)
6 | if [ $# -lt 9 ] && [ "${1}" != sed ] && [ ${PPID} != 1 ] && ! grep -q systemd <<< "${parent}"
7 | then
8 | echo daemonize killer "${@}"
9 | ( ./timeout.killer.sh ${PPID} "${@}" <&- 2>&1 & ) &
10 | exit
11 | elif [ "${1}" != sed ]
12 | then
13 | ( ./timeout.killer.sh sed "${@}" <&- 2>&1 ) 2>&1 | sed 's,^,KILLER: ,'
14 | exit
15 | fi
16 | mod=`stat --format=%Y timeout.killer.sh`
17 |
18 | conflict() {
19 | if ! [ -f timeout.killer.pid ]
20 | then
21 | echo "$$" $(( ++t )) > timeout.killer.pid
22 | elif read pid ot < timeout.killer.pid
23 | then
24 | if [ "${pid}" = $$ ]
25 | then
26 | return 1
27 | elif [ -d "/proc/${pid}/" ] && [ ${ot} -ge ${t} ]
28 | then
29 | echo 'Older timeout still running'
30 | echo $$ 0 > timeout.killer.pid
31 | exit 0
32 | else
33 | rm -v timeout.killer.pid
34 | rm -v "${o}"/running.* || true
35 | fi
36 | else
37 | rm -v timeout.killer.pid
38 | fi
39 | return 0
40 | }
41 |
42 | while sleep $(( t += 9 ))
43 | do
44 | if [ ${mod} -lt `stat --format=%Y timeout.killer.sh` ]
45 | then
46 | sleep $(( ++t ))
47 | ls -lth timeout.killer.sh
48 | echo -n "${mod}" vs ' '
49 | stat --format=%Y timeout.killer.sh
50 | echo 'timeout.killer.sh modified, recurse'
51 | sleep $(( ++t ))
52 | ./timeout.killer.sh
53 | exit
54 | fi
55 | if conflict
56 | then
57 | sleep $(( ++t ))
58 | continue
59 | fi
60 | if ! [ -f "${o}"/running.scripts.now ]
61 | then
62 | fuser *.case.sh ../../???/{myceli,controller,hyphae,watcher} > "${o}"/running.scripts.now 2>/dev/null
63 | rm -v "${o}"/running.tree.* 2>/dev/null || true
64 | elif ! diff "${o}"/running.scripts.{now,old} 2>/dev/null
65 | then
66 | mv -v "${o}"/running.scripts.{now,old}
67 | elif [ "${o}"/running.scripts.old -nt timeout.killer.pid ]
68 | then
69 | rm -v timeout.killer.pid
70 | elif ! [ -f "${o}"/running.tree.new ]
71 | then
72 | for pid in `cat "${o}"/running.scripts.old`
73 | do
74 | pstree --arguments "${pid}" | tr -d '[:digit:]' || true
75 | sleep $(( ++t ))
76 | done > "${o}"/running.tree.new
77 | elif ! diff "${o}"/running.tree.{new,old}
78 | then
79 | mv -v "${o}"/running.tree.{new,old}
80 | elif [ "${o}"/running.tree.old -nt timeout.killer.pid ]
81 | then
82 | rm -v timeout.killer.pid "${o}"/running.*.new
83 | elif read apid others < "${o}"/running.scripts.old
84 | then
85 | echo -e "\n \t # \t WARNING \t # "
86 | echo -e "\n \t # \t TIMING OUT PID ${apid} \t #"
87 | ps -f | grep "${apid}"
88 | kill "${apid}"
89 | rm -v "${o}"/running.scripts.old
90 | else
91 | break
92 | fi
93 | done
94 |
--------------------------------------------------------------------------------
/local-dev-environment/raspberry-pi/RFM69HCW/service.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import time
3 | import adafruit_rfm69
4 | import busio
5 | from digitalio import DigitalInOut, Direction, Pull
6 | import board
7 | import threading
8 | import queue
9 | import argparse
10 | import ipaddress
11 |
12 | TIMEOUT = 0.1
13 | DELAY = 0.25
14 |
15 | radio_lock = threading.Lock()
16 | radio_write_queue = queue.Queue()
17 |
18 | def str_to_addr(addr_str):
19 | parts = addr_str.split(':')
20 | return (str(parts[0]), int(parts[1]))
21 |
22 |
23 | def radio_thread_fn(radio_handle):
24 | while True:
25 | if not radio_write_queue.empty():
26 | data = radio_write_queue.get()
27 | print(f'Found data {len(data)} for radio to write, sending')
28 | radio_lock.acquire()
29 | radio_handle.send(bytes(data))
30 | radio_lock.release()
31 | time.sleep(DELAY)
32 |
33 |
34 | def main_fn():
35 | parser = argparse.ArgumentParser()
36 | parser.add_argument('uplink_address')
37 | parser.add_argument('downlink_address')
38 | args = parser.parse_args()
39 |
40 | uplink_addr = str_to_addr(args.uplink_address)
41 | downlink_addr = str_to_addr(args.downlink_address)
42 |
43 | # Configure UDP socket
44 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
45 | sock.bind(uplink_addr)
46 | sock.settimeout(TIMEOUT)
47 |
48 | # Configure Radio Interface
49 | CS = DigitalInOut(board.CE1)
50 | RESET = DigitalInOut(board.D25)
51 | spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
52 | radio = adafruit_rfm69.RFM69(spi, CS, RESET, 915.0)
53 |
54 | radio_thread = threading.Thread(target=radio_thread_fn, args=(radio,))
55 | radio_thread.start()
56 |
57 | print(f'Listening for UDP traffic on {args.uplink_address}')
58 | print(f'Downlinking radio data to {args.downlink_address}')
59 |
60 | while True:
61 | try:
62 | # First check if we have any incoming UDP traffic that needs sending out
63 | udp_data = sock.recv(1024)
64 | # If we received any UDP data, then send over radio interface
65 | if udp_data != None:
66 | print(f'Got UDP data {len(udp_data)}, queueing up')
67 | radio_write_queue.put(udp_data)
68 | except (Exception):
69 | pass
70 |
71 | # Now we check radio interface for any incoming packets
72 | radio_lock.acquire()
73 | radio_data = radio.receive()
74 | radio_lock.release()
75 | # If we received a radio packet, then pass along UDP interface
76 | if radio_data != None:
77 | print(f'Got radio data {len(radio_data)}, sending along')
78 | sock.sendto(radio_data, downlink_addr)
79 |
80 | time.sleep(0.01)
81 |
82 |
83 | if __name__ == "__main__":
84 | main_fn()
85 |
--------------------------------------------------------------------------------
/myceli/src/main.rs:
--------------------------------------------------------------------------------
1 | use anyhow::Result;
2 | use config::Config;
3 | use log::{info, warn};
4 | use messages::Message;
5 | use myceli::listener::Listener;
6 | use std::{net::ToSocketAddrs, path::PathBuf, str::FromStr, sync::Arc, time::Duration};
7 | use transports::UdpTransport;
8 |
9 | #[cfg(all(not(feature = "sqlite"), not(feature = "files")))]
10 | compile_error! {"Myceli built without a local storage implementation will not function. Select a feature, recommended: either big or small"}
11 |
12 | #[cfg(all(not(feature = "proto_ship"), not(feature = "proto_sync")))]
13 | compile_error! {"Select a protocol feature, e.g. proto_all, proto_sync, or proto_ship"}
14 |
15 | fn main() -> Result<()> {
16 | #[cfg(feature = "good_log")]
17 | env_logger::init();
18 | #[cfg(feature = "small_log")]
19 | smalog::init();
20 |
21 | #[cfg(feature = "proto_sync")]
22 | info!("Sync Protocol enabled");
23 | #[cfg(feature = "proto_ship")]
24 | info!("Ship(per) Protocol enabled");
25 | let config_path = std::env::args()
26 | .skip(1)
27 | .find(|a| PathBuf::from_str(a).map(|p| p.is_file()).unwrap_or(false));
28 | let cfg = Config::parse(config_path, &Message::fit_size).expect("Failed to parse config");
29 | if std::env::args().any(|a| a == "--show-config") {
30 | println!("{}", toml::to_string(&cfg).unwrap());
31 | return Ok(());
32 | }
33 |
34 | let mut resolved_listen_addr = cfg
35 | .listen_address
36 | .to_socket_addrs()
37 | .expect("Unable to resolve socket address");
38 | let resolved_listen_addr = resolved_listen_addr
39 | .next()
40 | .expect("Unable to resolve socket addr");
41 |
42 | std::fs::create_dir_all(&cfg.storage_path).expect("Failed to create storage dir");
43 |
44 | let db_path = cfg.storage_path.clone();
45 | let disk_bytes = cfg.disk_usage * 1024;
46 | let timeout = Duration::from_millis(cfg.chatter_ms.clamp(10, 60 * 60 * 1000).into());
47 | let mut udp_transport =
48 | UdpTransport::new(&cfg.listen_address, cfg.mtu, cfg.chunk_transmit_throttle)
49 | .expect("Failed to create udp transport");
50 | udp_transport
51 | .set_read_timeout(Some(timeout))
52 | .expect("Failed to set timeout");
53 | println!("pid={}", std::process::id());
54 | let mut listener = Listener::new(
55 | &resolved_listen_addr,
56 | &db_path,
57 | Arc::new(udp_transport),
58 | cfg.block_size
59 | .expect("Block size default should've been calculated."),
60 | cfg.radio_address,
61 | disk_bytes,
62 | cfg.mtu,
63 | )
64 | .expect("Listener creation failed");
65 | listener
66 | .start(
67 | cfg.retry_timeout_duration,
68 | cfg.window_size,
69 | cfg.shipper_throttle_packet_delay_ms,
70 | )
71 | .expect("Error encountered in listener operation");
72 | println!("Exiting");
73 | warn!("Exiting");
74 | Ok(())
75 | }
76 |
--------------------------------------------------------------------------------
/messages/src/sync.rs:
--------------------------------------------------------------------------------
1 | use crate::cid_list;
2 | use cid::multihash;
3 | use cid::multihash::Hasher;
4 | use parity_scale_codec_derive::{Decode as ParityDecode, Encode as ParityEncode};
5 | use serde::Serialize;
6 | use std::fmt::{Debug, Formatter};
7 |
8 | const HASH_SIZE: usize = 16;
9 | pub const PUSH_OVERHEAD: usize = HASH_SIZE + 1;
10 |
11 | pub type HashCheck = [u8; HASH_SIZE];
12 |
13 | #[derive(Clone, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)]
14 | pub enum SyncMessage {
15 | Push(PushMessage), //I have these CIDs, you may pull them.
16 | Pull(cid_list::CompactList), //I do not have these CIDs, maybe you could send their blocks to me
17 | Ack(cid_list::CompactList), //I *also* have these CIDs, stop pushing them
18 | Block(Vec), //Here's the data for a block.
19 | }
20 |
21 | impl SyncMessage {
22 | pub fn name(&self) -> &'static str {
23 | match &self {
24 | Self::Push(_) => "Push",
25 | Self::Pull(_) => "Pull",
26 | Self::Ack(_) => "Ack",
27 | Self::Block(_) => "Block",
28 | }
29 | }
30 | }
31 |
32 | impl Debug for SyncMessage {
33 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
34 | match &self {
35 | Self::Push(x) => write!(f, "Push({x:?})"),
36 | Self::Pull(x) => write!(f, "Pull({x:?})"),
37 | Self::Ack(x) => write!(f, "Ack({x:?})"),
38 | Self::Block(x) => write!(f, "Block({}B)", x.len()),
39 | }
40 | }
41 | }
42 |
43 | #[derive(Clone, ParityEncode, ParityDecode, Serialize, Eq, PartialEq)]
44 | pub struct PushMessage {
45 | pub first_cid_name: String,
46 | pub cids: cid_list::CompactList,
47 | //A corrupted pull has a modest negative impact, but a corrupted push can begin a search for a
48 | // CID that points to something which may never have actually existed in the first place.
49 | // Adding this hashing of the CIDs to detect corruption.
50 | hash: HashCheck,
51 | }
52 | impl PushMessage {
53 | pub fn new(cids: cid_list::CompactList, first_cid_name: String) -> Self {
54 | let hash = Self::do_hash(&cids);
55 | Self {
56 | first_cid_name,
57 | cids,
58 | hash,
59 | }
60 | }
61 | pub fn check(&self) -> bool {
62 | self.hash == Self::do_hash(&self.cids)
63 | }
64 | fn do_hash(cids: &cid_list::CompactList) -> HashCheck {
65 | let mut hasher = multihash::Blake2s128::default();
66 | for d in cids {
67 | hasher.update(&d.to_bytes());
68 | }
69 | let digest_slice = hasher.finalize();
70 | digest_slice.try_into().unwrap()
71 | }
72 | }
73 | impl Debug for PushMessage {
74 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
75 | write!(f, "PushMsg(")?;
76 | if !self.first_cid_name.is_empty() {
77 | write!(f, "{}=", &self.first_cid_name)?;
78 | }
79 | write!(f, "{:?})", &self.cids)
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/local-dev-environment/desktop/rfm69-service/src/main.rs:
--------------------------------------------------------------------------------
1 | use clap::Parser;
2 | use log::info;
3 | use std::io::{Read, Write};
4 | use std::net::SocketAddr;
5 | use std::sync::mpsc;
6 | use std::sync::mpsc::{Receiver, Sender};
7 | use std::sync::{Arc, Mutex};
8 | use std::thread;
9 | use std::time::Duration;
10 | use tokio::net::UdpSocket;
11 | use tokio_serial::SerialPortBuilderExt;
12 |
13 | #[derive(Parser, Debug)]
14 | struct Args {
15 | /// Uplink Address (IP:Port)
16 | #[arg(short, long)]
17 | uplink_address: String,
18 |
19 | /// Downlink Address (IP:Port)
20 | #[arg(short, long)]
21 | downlink_address: String,
22 |
23 | /// Serial device
24 | #[arg(short, long)]
25 | serial_device: String,
26 | }
27 |
28 | #[tokio::main]
29 | async fn main() -> tokio_serial::Result<()> {
30 | env_logger::init();
31 |
32 | let args = Args::parse();
33 |
34 | let uplink_addr: SocketAddr = args
35 | .uplink_address
36 | .parse()
37 | .expect("Failed to parse uplink address");
38 | let downlink_addr: SocketAddr = args
39 | .downlink_address
40 | .parse()
41 | .expect("Failed to parse downlink address");
42 |
43 | let socket = UdpSocket::bind(&uplink_addr).await?;
44 | info!("UDP Uplink on: {}", args.uplink_address);
45 | info!("UPD Downlink on: {}", args.downlink_address);
46 | info!("Serial radio on: {}", args.serial_device);
47 |
48 | let (serial_queue_writer, serial_queue_reader): (Sender>, Receiver>) =
49 | mpsc::channel();
50 |
51 | let mut serial_stream = tokio_serial::new(args.serial_device, 115200).open_native_async()?;
52 | serial_stream
53 | .set_exclusive(false)
54 | .expect("Failed to set serial to exclusive");
55 |
56 | let wrapped_serial = Arc::new(Mutex::new(serial_stream));
57 |
58 | let mut buf = vec![0; 1024];
59 |
60 | let thread_serial = Arc::clone(&wrapped_serial);
61 |
62 | thread::spawn(move || loop {
63 | if let Ok(data) = serial_queue_reader.recv() {
64 | info!("Found {} bytes to send over serial", data.len());
65 | let mut ser = thread_serial.lock().unwrap();
66 | let _ = ser.write(&data).unwrap();
67 | }
68 | thread::sleep(Duration::from_millis(250));
69 | });
70 |
71 | let main_serial = Arc::clone(&wrapped_serial);
72 |
73 | loop {
74 | if let Ok(len) = socket.try_recv(&mut buf) {
75 | if len > 0 {
76 | info!("Received {len} bytes over udp, queueing for serial");
77 | serial_queue_writer
78 | .send(buf[..len].to_vec())
79 | .expect("Failed to send??");
80 | }
81 | }
82 |
83 | let len = {
84 | let mut ser = main_serial.lock().unwrap();
85 | ser.read(&mut buf)
86 | };
87 | if let Ok(serial_len) = len {
88 | if serial_len > 0 {
89 | info!("Received {serial_len} bytes over serial, sending over udp");
90 | socket.send_to(&buf[..serial_len], downlink_addr).await?;
91 | }
92 | }
93 |
94 | thread::sleep(Duration::from_millis(1));
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/testing/local_test/original.case.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | source `dirname "${0}"`/setup.env
3 | configure 9876543
4 |
5 | start_myceli sat.all
6 | start_myceli gnd
7 |
8 | echo -e '\n\n# Test Case 0: Print Version Info\n'
9 | controller `port_for gnd` --output-format=json request-version
10 | jq . ctl/output.log
11 | controller `port_for sat.all` --output-format=json request-version
12 | jq . ctl/output.log
13 |
14 | echo -e '\n# Test Case - Verify Myceli Instances Alive'
15 |
16 | echo '1. Using controller software, send the `RequestAvailableBlocks` command to the `myceli` ground instance.'
17 | controller 8765 request-available-blocks
18 | echo '- This step passes if an `AvailableBlocks` response is received. Any other response / no response is a failure.'
19 | check_log 'Received.*AvailableBlocks' ctl
20 | echo '1. Using controller software, send the `RequestAvailableBlocks` command to the `myceli` space instance.'
21 | controller 8764 request-available-blocks
22 | echo '- This step passes if an `AvailableBlocks` response is received. Any other response / no response is a failure.'
23 | check_log 'Received.*AvailableBlocks' ctl
24 |
25 | echo -e '\n# Test Case - Transmit an IPFS File (Ground to Space)'
26 |
27 | date > "${o}/known_good_path"
28 |
29 | echo 'Using the controller software, send the ImportFile command to the myceli ground instance with a known good path for the one-pass payload file.'
30 | controller 8765 import-file "${o}/known_good_path"
31 | echo 'This step passes if an FileImported response with CID is received. Any other response / no response is a failure.'
32 | check_log FileImported ctl
33 |
34 | echo ' ...with the CID obtained from the FileImported response... '
35 | export cid=`grep 'Received.response:.*FileImported' ctl/controller.log | tail -n 1 | cut -d '"' -f 4`
36 | echo "... cid=${cid} ...and with the network address of the ground-to-space radio link... "
37 | echo 'send the TransmitDag command to the myceli ground instance'
38 | g2s
39 |
40 | echo 'controller software, send the ValidateDag command to the myceli space instance'
41 | controller 8764 validate-dag "${cid}"
42 | echo 'This step passes if an ValidateDagResponse response with true. Any other response / no response is a failure.'
43 | check_log 'ValidateDagResponse.*Dag.is.valid' ctl
44 |
45 | echo 'controller software, send the ExportDag command to the myceli space'
46 | controller 8764 export-dag "${cid}" "${o}/exported"
47 | sleep 1
48 | echo 'This step passes if the controller is able to correctly write a file to the given file path.'
49 | diff "${o}/known_good_path" "${o}/exported"
50 |
51 | echo -e '\n# Test Case - Transmit Back & Forth, and Export File with IPFS'
52 |
53 | echo `uptime` `uname -a` > "${o}/imported2"
54 | echo 'controller software, send the ImportFile command to the myceli ground instance with a known good path for the one-pass payload file.'
55 | controller 8765 import-file "${o}/imported2"
56 | echo 'This step passes if an FileImported response with CID is received. Any other response / no response is a failure. ...'
57 | check_log Received.*FileImported.*cid ctl
58 |
59 | export cid=`grep Received.*FileImported ctl/controller.log | tail -n 1 | cut -d '"' -f 4`
60 | echo "cid=${cid}"
61 |
62 | echo 'Using the controller software, send the TransmitDag command to the myceli ground instance with the CID obtained from the FileImported response and with the network address of the ground-to-space radio link.'
63 | g2s
64 | echo 'controller software, send the ValidateDag command to the myceli space'
65 | controller 8764 validate-dag "${cid}"
66 | check_log 'ValidateDagResponse.*Dag.is.valid' ctl
67 |
68 | echo 'Shutdown the myceli ground instance'
69 | kill_myceli gnd
70 |
71 | echo ', delete the storage database'
72 | rm gnd/storage.db
73 |
74 | echo ', and start the myceli ground instance again.'
75 | start_myceli gnd
76 |
77 | echo 'controller software, send the TransmitDag command to the myceli space'
78 | s2g
79 | sleep 1
80 |
81 | echo 'controller software, send the ValidateDag command to the myceli ground'
82 | controller 8765 validate-dag "${cid}"
83 | check_log 'ValidateDagResponse.*Dag.is.valid' ctl
84 |
85 | echo 'controller software, send the ExportDag command to the myceli ground'
86 | controller 8765 export-dag "${cid}" "${o}/exported2"
87 |
88 | diff "${o}/"{im,ex}ported2
--------------------------------------------------------------------------------
/testing/local_test/watcher.case.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | source `dirname "${0}"`/setup.env
3 |
4 |
5 | wait_for_sync() {
6 | d=${2}
7 | b=`other_side ${d}`
8 | sleep 1
9 | check_log "${3}.*${d}${1}" ${d} watcher || check_log "${3}.*${d}${1}" ${d} watcher
10 | check_log "Imported.path.*${d}${1}" ${d} myceli
11 | check_log "Remote.(127.0.0.1|localhost):87...reported.*supports.sync" ${d} myceli
12 | check_log "Remote.(127.0.0.1|localhost):87...reported.*supports.sync" ${b} myceli
13 | check_log "Sending.Sync.Push" ${d} myceli
14 | sleep 5
15 | check_log "Sync.:handle.Push.PushMsg.${d}${1}" ${b} myceli
16 | p=`port_for ${b}`
17 | touch ${o}/notfound
18 | for i in {0..9}1
19 | do
20 | sleep $i
21 | controller ${p} --output-format json list-files
22 | if jq ".AvailableDags.dags[]" ctl/output.log 2>/dev/null | grep -F --color=always "${d}${1}"
23 | then
24 | break
25 | fi
26 | done
27 | export cid=`jq -r ".AvailableDags.dags[] | select( .filename == \"${d}${1}\" ).cid" ctl/output.log`
28 | echo "filename=${d}${1};CID=${cid}"
29 | if [ "${cid}" = '' ]
30 | then
31 | jq . ctl/output.log
32 | exit 32
33 | fi
34 | for i in {0..9}1
35 | do
36 | controller ${p} --output-format json validate-dag ${cid}
37 | if jq .ValidateDagResponse.result ctl/output.log 2>/dev/null | grep -F --color=always 'Dag is valid'
38 | then
39 | cat ctl/output.log
40 | rm ${o}/notfound
41 | break
42 | fi
43 | done
44 | if [ -f ${o}/notfound ]
45 | then
46 | echo "DAG for ${d}${1} never finished syncing."
47 | kill_all
48 | exit 5
49 | fi
50 | e=`pwd`/${b}/synced.${d}${1}
51 | echo "${p} Exporting ${cid} to ${e}"
52 | for i in {0..99}
53 | do
54 | if controller ${p} export-dag ${cid} ${e}
55 | then
56 | break
57 | else
58 | echo "Trouble exporting... could be temporary."
59 | sleep $i
60 | fi
61 | done
62 | for i in {1..99}
63 | do
64 | sleep $i
65 | if [ ! -f ${e} ]
66 | then
67 | sleep $i
68 | echo "Waiting for ${e} to be exported."
69 | continue
70 | fi
71 | if fuser "${e}" || [ `stat --format=%Y ${e}` -lt `date -d '1 second ago' +%s` ]
72 | then
73 | echo "Waiting for writing to finish on ${e}"
74 | break
75 | fi
76 | done
77 | set -x
78 | diff ${b}/synced.${d}${1} ${d}/watched/${d}${1}
79 | set +x
80 | }
81 |
82 | for sd in sat.{all,sync} # Not ship as it won't sync on its own
83 | do
84 | export sd
85 |
86 | echo -e "\n\n# Test suite: watcher ${sd}"
87 |
88 | kill_all
89 | rm */*.log
90 | for rd in {gnd,sat.{all,sync,ship}}/{watched,storage.db,blocks,cids,names}
91 | do
92 | (
93 | rm -r "${rd}" 2>/dev/null || true
94 | )
95 | done
96 |
97 |
98 | mkdir -p gnd/watched ${sd}/watched/
99 | date > gnd/watched/gnd.prexisting.txt
100 | date -d 'next second' > ${sd}/watched/${sd}.prexisting.txt
101 | configure 7
102 | start_myceli ${sd}
103 | start_myceli gnd
104 | export RUST_LOG=debug
105 | start watcher gnd config.toml
106 | start watcher ${sd} config.toml
107 | sleep 9
108 | echo -e "\n ## Test: watcher discovers pre-existing file ${sd}\n"
109 | wait_for_sync .prexisting.txt gnd 'Discovered path in'
110 | sleep 1
111 | wait_for_sync .prexisting.txt ${sd} 'Discovered.path in'
112 |
113 | echo -e '\n ## Test: watcher picks up moved-in file\n'
114 | for s in gnd ${sd}
115 | do
116 | echo 'begin' > ${o}/${s}.big.txt
117 | yes $s `date` | head -c 2048 >> ${o}/${s}.big.txt
118 | echo -e '\nend' >> ${o}/${s}.big.txt
119 | mv ${o}/${s}.big.txt ${s}/watched/
120 | sleep 1
121 | done
122 | wait_for_sync .big.txt ${sd} 'File modified, import:'
123 | wait_for_sync .big.txt gnd 'File modified, import:'
124 |
125 | echo -e '\n ## Test: watcher picks up file written in-situ\n'
126 | for s in gnd ${sd}
127 | do
128 | yes $s `date` | head -c 2048 >> ${s}/watched/${s}.written.txt
129 | sleep 1
130 | done
131 | echo " ### From ${sd} to ground ###"
132 | wait_for_sync .written.txt ${sd} 'File modified, import:'
133 | echo " ### From ground to ${sd} ###"
134 | wait_for_sync .written.txt gnd "File modified, import:"
135 | done
136 |
--------------------------------------------------------------------------------
/DESIGN.md:
--------------------------------------------------------------------------------
1 | This is an overview of the current data transfer protocol implemented in the `block-streamer/` application and a few open questions for the future.
2 |
3 | ## Current Design
4 |
5 | The current design and implementation of the `block-streamer/` is intended to be a very simple way to transmit a file in IPFS across a radio link and reassemble using block data.
6 |
7 | The file to be transmitted is read into 50 (tbd configurable) byte blocks. Each block consists of a CID, data, and links to other CIDs (if a root node). Each block is serialized into one binary blob, which is broken up into 40 byte (tbd configurable) chunks. Each chunk consists of a CID marker (first 4 bytes of CID), a chunk offset, and data. A header message consisting of the block CID is transmitted to the receiver first, followed by the chunks of the block's data+links, which are then reassembled in order. The current implementation is able to handle a dag with depth of two and can reassemble blocks sent out of order, but it can't yet handle chunks sent out of order.
8 |
9 | *Current magic numbers and CID marker are placeholders to get things working, not final decisions.*
10 |
11 | *Why not to use the CAR transport around blocks?*
12 |
13 | In this initial implementation the CAR transport is not used. The reasoning was that this IPFS implementation should be designed for exchanging data over constrained communications links. This means it is likely that blocks will be transmitted individually, or even broken up into smaller chunks. There did not seem to be an immediate advantage to packaging these blocks up into a CAR, only to break that CAR up again into smaller chunks for transmission, when then blocks themselves could be transmitted as-is. However the CAR transport may still prove to be useful in this system in the future.
14 |
15 | *Why decided to chunk blocks (hash+data) down to payload size)*
16 |
17 | The [lab radio hardware](https://www.adafruit.com/product/3076) currently used in developing this system has a [strict payload size limit of 60 bytes](https://github.com/adafruit/RadioHead/blob/master/RH_RF69.h#L346-L347). While this radio may be more restrictive than typical satellite radios, it seems prudent to work under stricter requirements to ensure this system can scale both up and down to different payload limits. If sending individual 60-byte blocks the payload is already mostly consumed by the CID (36 bytes). This 60% overhead is not exactly efficient, so the decision was made to break blocks down into chunks which contain a CID marker (4 bytes), and a chunk offset (2 bytes), and a data blob, minimizing overhead to improve efficiency.
18 |
19 | ## Future Design Decisions
20 |
21 | *Are there existing UDP data transfer protocols we can borrow from or use as-is?*
22 |
23 | The current protocol for chunking/sending/assembling blocks was intentionally made simple to better understand the block transmission problem. It is very possible that an existing protocol built on UDP may provide the necessary chunking functionality, or at least functional pieces which can be built on.
24 |
25 | Existing protocols which should be further investigated:
26 | - [UDT](https://en.wikipedia.org/wiki/UDP-based_Data_Transfer_Protocol)
27 | - [QUIC](https://www.chromium.org/quic/)
28 | - [CoAP](https://en.wikipedia.org/wiki/Constrained_Application_Protocol)
29 |
30 | *How should it handle specific data requests?*
31 |
32 | A crucial part of this system will be correctly handling the transmission of a file across multiple communications passes, and dealing with lossy communication links, so the ability to request specific pieces of a DAG will be required. There are a number of different methods for specifying these pieces, such as by CID, with bitmasks, bloom filters, and sub-graphs. This decision will likely include a simple proof of concept implementing individual CID requests, followed by an analysis of the tradeoffs of other specification methods.
33 |
34 | *Formal protocol messages*
35 |
36 | The current implementation is a very simple one-way stream of block chunks. The future functional system will need to implement a formalized protocol with defined messages which allow for interactions such as requesting a specific CID or indicating that a CID has been received correctly. These will likely be created as required when implementing additional protocol functionality.
--------------------------------------------------------------------------------
/ipfs-unixfs/src/chunker.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | fmt::{Debug, Display},
3 | io,
4 | pin::Pin,
5 | str::FromStr,
6 | task,
7 | };
8 |
9 | use anyhow::{anyhow, Context};
10 | use bytes::Bytes;
11 | use futures::{stream::BoxStream, Stream};
12 | use tokio::io::AsyncRead;
13 |
14 | mod fixed;
15 | mod rabin;
16 |
17 | /// Chunks are limited to 1MiB by default
18 | pub const DEFAULT_CHUNK_SIZE_LIMIT: usize = 1024 * 1024;
19 |
20 | pub use self::{
21 | fixed::{Fixed, DEFAULT_CHUNKS_SIZE},
22 | rabin::Rabin,
23 | };
24 |
25 | #[derive(Debug, PartialEq, Eq, Clone)]
26 | pub enum Chunker {
27 | Fixed(Fixed),
28 | Rabin(Box),
29 | }
30 |
31 | impl Display for Chunker {
32 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
33 | match self {
34 | Self::Fixed(c) => write!(f, "Chunker::Fixed({})", c.chunk_size),
35 | Self::Rabin(_) => write!(f, "Chunker::Rabin"),
36 | }
37 | }
38 | }
39 |
40 | /// Chunker configuration.
41 | #[derive(Debug, Clone, PartialEq, Eq, Copy)]
42 | pub enum ChunkerConfig {
43 | /// Fixed sized chunker.
44 | Fixed(usize),
45 | /// Rabin chunker.
46 | Rabin,
47 | }
48 |
49 | impl Display for ChunkerConfig {
50 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
51 | match self {
52 | Self::Fixed(chunk_size) => write!(f, "fixed-{chunk_size}"),
53 | Self::Rabin => write!(f, "rabin"),
54 | }
55 | }
56 | }
57 |
58 | impl FromStr for ChunkerConfig {
59 | type Err = anyhow::Error;
60 |
61 | fn from_str(s: &str) -> Result {
62 | if s == "rabin" {
63 | return Ok(ChunkerConfig::Rabin);
64 | }
65 |
66 | if let Some(rest) = s.strip_prefix("fixed") {
67 | if rest.is_empty() {
68 | return Ok(ChunkerConfig::Fixed(DEFAULT_CHUNKS_SIZE));
69 | }
70 |
71 | if let Some(rest) = rest.strip_prefix('-') {
72 | let chunk_size: usize = rest.parse().context("invalid chunk size")?;
73 | if chunk_size > DEFAULT_CHUNK_SIZE_LIMIT {
74 | return Err(anyhow!("chunk size too large"));
75 | }
76 |
77 | return Ok(ChunkerConfig::Fixed(chunk_size));
78 | }
79 | }
80 |
81 | Err(anyhow!("unknown chunker: {}", s))
82 | }
83 | }
84 |
85 | impl From for Chunker {
86 | fn from(cfg: ChunkerConfig) -> Self {
87 | match cfg {
88 | ChunkerConfig::Fixed(chunk_size) => Chunker::Fixed(Fixed::new(chunk_size)),
89 | ChunkerConfig::Rabin => Chunker::Rabin(Box::default()),
90 | }
91 | }
92 | }
93 |
94 | pub enum ChunkerStream<'a> {
95 | Fixed(BoxStream<'a, io::Result>),
96 | Rabin(BoxStream<'a, io::Result>),
97 | }
98 |
99 | impl<'a> Debug for ChunkerStream<'a> {
100 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
101 | match self {
102 | Self::Fixed(_) => write!(f, "Fixed(impl Stream- )"),
103 | Self::Rabin(_) => write!(f, "Rabin(impl Stream
- )"),
104 | }
105 | }
106 | }
107 |
108 | impl<'a> Stream for ChunkerStream<'a> {
109 | type Item = io::Result;
110 |
111 | fn poll_next(
112 | mut self: Pin<&mut Self>,
113 | cx: &mut task::Context<'_>,
114 | ) -> task::Poll