├── .cargo
└── config.toml
├── .githooks
└── pre-commit
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── dependabot.yml
└── workflows
│ ├── release-plz.yml
│ ├── release.yml
│ ├── rust-clippy.yml
│ ├── rust-fmt.yml
│ └── rust.yml
├── .gitignore
├── .release-plz.toml
├── Cargo.toml
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── bin
├── Cargo.toml
├── deployments
│ └── systemd
│ │ ├── atm0s-sdn-node.service
│ │ ├── atm0s-sdn-node.sh
│ │ ├── deploy.sh
│ │ ├── remove.sh
│ │ ├── servers.sh.templ
│ │ ├── test_get_logs.sh
│ │ ├── test_install.sh
│ │ ├── test_remove_logs.sh
│ │ ├── test_run.sh
│ │ └── test_view_logs.sh
├── public
│ ├── app.js
│ └── index.html
├── src
│ └── main.rs
├── start_agent.sh
└── start_collector.sh
├── deny.toml
├── docs
├── imgs
│ ├── dht.drawio.svg
│ ├── flow.excalidraw.png
│ ├── overlay.drawio.svg
│ ├── pubsub-relay.drawio.svg
│ ├── pubsub-relay2.drawio.svg
│ └── visualization.png
└── smart_routing.md
├── examples
├── .gitignore
├── Cargo.toml
├── quic-tunnel
│ ├── Cargo.toml
│ ├── README.md
│ └── src
│ │ ├── main.rs
│ │ ├── sdn.rs
│ │ └── vnet
│ │ ├── mod.rs
│ │ └── socket.rs
└── whip-whep
│ ├── Cargo.toml
│ ├── public
│ ├── index.html
│ ├── whep
│ │ ├── index.html
│ │ ├── whep.demo.js
│ │ └── whep.js
│ └── whip
│ │ ├── index.html
│ │ ├── whip.demo.js
│ │ └── whip.js
│ └── src
│ ├── http.rs
│ ├── main.rs
│ ├── sfu
│ ├── cluster.rs
│ ├── media.rs
│ ├── mod.rs
│ ├── shared_port.rs
│ ├── whep.rs
│ └── whip.rs
│ └── worker.rs
├── fuzz
├── .gitignore
├── Cargo.toml
└── fuzz_targets
│ ├── network_control_pkt.rs
│ └── transport_msg.rs
├── packages
├── core
│ ├── identity
│ │ ├── CHANGELOG.md
│ │ ├── Cargo.toml
│ │ └── src
│ │ │ ├── conn_id.rs
│ │ │ ├── lib.rs
│ │ │ ├── node_addr.rs
│ │ │ └── node_id.rs
│ ├── router
│ │ ├── CHANGELOG.md
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── benches
│ │ │ └── router.rs
│ │ └── src
│ │ │ ├── core
│ │ │ ├── mod.rs
│ │ │ ├── registry.rs
│ │ │ ├── registry
│ │ │ │ └── dest.rs
│ │ │ ├── router.rs
│ │ │ ├── table.rs
│ │ │ └── table
│ │ │ │ ├── dest.rs
│ │ │ │ ├── metric.rs
│ │ │ │ └── path.rs
│ │ │ ├── lib.rs
│ │ │ └── shadow
│ │ │ ├── mod.rs
│ │ │ ├── service.rs
│ │ │ └── table.rs
│ └── utils
│ │ ├── CHANGELOG.md
│ │ ├── Cargo.toml
│ │ └── src
│ │ ├── error_handle.rs
│ │ ├── hash.rs
│ │ ├── init_array.rs
│ │ ├── init_vec.rs
│ │ ├── lib.rs
│ │ ├── option_handle.rs
│ │ └── types.rs
├── network
│ ├── .DS_Store
│ ├── CHANGELOG.md
│ ├── Cargo.toml
│ ├── README.md
│ ├── src
│ │ ├── _fuzz_export.rs
│ │ ├── base
│ │ │ ├── control.rs
│ │ │ ├── feature.rs
│ │ │ ├── mod.rs
│ │ │ ├── msg.rs
│ │ │ ├── secure.rs
│ │ │ └── service.rs
│ │ ├── controller_plane.rs
│ │ ├── controller_plane
│ │ │ ├── features.rs
│ │ │ ├── neighbours.rs
│ │ │ ├── neighbours
│ │ │ │ └── connection.rs
│ │ │ └── services.rs
│ │ ├── data_plane.rs
│ │ ├── data_plane
│ │ │ ├── connection.rs
│ │ │ ├── features.rs
│ │ │ └── services.rs
│ │ ├── features
│ │ │ ├── alias.rs
│ │ │ ├── data.rs
│ │ │ ├── dht_kv
│ │ │ │ ├── README.md
│ │ │ │ ├── client.rs
│ │ │ │ ├── client
│ │ │ │ │ └── map.rs
│ │ │ │ ├── internal.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── msg.rs
│ │ │ │ ├── server.rs
│ │ │ │ └── server
│ │ │ │ │ └── map.rs
│ │ │ ├── mod.rs
│ │ │ ├── neighbours.rs
│ │ │ ├── pubsub
│ │ │ │ ├── README.md
│ │ │ │ ├── controller.rs
│ │ │ │ ├── controller
│ │ │ │ │ ├── consumers.rs
│ │ │ │ │ ├── feedbacks.rs
│ │ │ │ │ ├── local_relay.rs
│ │ │ │ │ ├── remote_relay.rs
│ │ │ │ │ └── source_hint.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── msg.rs
│ │ │ │ └── worker.rs
│ │ │ ├── router_sync.rs
│ │ │ ├── socket.rs
│ │ │ └── vpn.rs
│ │ ├── lib.rs
│ │ ├── secure
│ │ │ ├── authorization
│ │ │ │ ├── mod.rs
│ │ │ │ └── static_key.rs
│ │ │ ├── encryption
│ │ │ │ ├── mod.rs
│ │ │ │ └── x25519_dalek_aes.rs
│ │ │ └── mod.rs
│ │ ├── services
│ │ │ ├── manual2_discovery.rs
│ │ │ ├── manual_discovery.rs
│ │ │ ├── mod.rs
│ │ │ └── visualization.rs
│ │ └── worker.rs
│ └── tests
│ │ ├── feature_alias.rs
│ │ ├── feature_dht_kv.rs
│ │ ├── feature_neighbours.rs
│ │ ├── feature_pubsub.rs
│ │ ├── feature_router_sync.rs
│ │ ├── feature_socket.rs
│ │ ├── service_manual2_discovery.rs
│ │ ├── service_manual_discovery.rs
│ │ ├── service_visualization.rs
│ │ └── simulator.rs
└── runner
│ ├── CHANGELOG.md
│ ├── Cargo.toml
│ ├── docs
│ └── architecture.excalidraw.png
│ ├── examples
│ ├── simple_kv.rs
│ └── simple_node.rs
│ ├── run-example-debug.sh
│ ├── run-example-release.sh
│ ├── src
│ ├── builder.rs
│ ├── history.rs
│ ├── lib.rs
│ ├── time.rs
│ └── worker_inner.rs
│ └── tests
│ └── feature_dht_kv.rs
├── renovate.json
├── run-test.sh
├── rust-toolchain.toml
└── rustfmt.toml
/.cargo/config.toml:
--------------------------------------------------------------------------------
1 | [net]
2 | git-fetch-with-cli = true
3 |
--------------------------------------------------------------------------------
/.githooks/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | HAS_ISSUES=0
4 | FIRST_FILE=1
5 |
6 | for file in $(git diff --name-only --staged); do
7 | FMT_RESULT="$(rustfmt --check $file 2>/dev/null || true)"
8 | if [ "$FMT_RESULT" != "" ]; then
9 | echo "$file"
10 | HAS_ISSUES=1
11 | FIRST_FILE=0
12 | fi
13 | done
14 |
15 | if [ $HAS_ISSUES -eq 0 ]; then
16 | exit 0
17 | fi
18 |
19 | echo "Your code has formatting issues in files listed above. Format your code with \`make format\` or call rustfmt manually."
20 | exit 1
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: 'Bug: '
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Start Node A with ...
16 | 2. Start Node B with ...
17 | 3. Call a function in Node A ...
18 |
19 | **Expected behavior**
20 | A clear and concise description of what you expected to happen.
21 |
22 | **Screenshots**
23 | If applicable, add screenshots to help explain your problem.
24 |
25 | **Desktop (please complete the following information):**
26 | - OS: [e.g. Windows]
27 |
28 | **Additional context**
29 | Add any other context about the problem here.
30 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: 'Feature Request: '
5 | labels: 'feature request'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "cargo" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
--------------------------------------------------------------------------------
/.github/workflows/release-plz.yml:
--------------------------------------------------------------------------------
1 | name: Release-plz
2 |
3 | permissions:
4 | pull-requests: write
5 | contents: write
6 |
7 | on:
8 | push:
9 | branches:
10 | - master
11 |
12 | jobs:
13 | release-plz:
14 | name: Release-plz
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Checkout repository
18 | uses: actions/checkout@v4
19 | with:
20 | fetch-depth: 0
21 | - name: Install Rust toolchain
22 | uses: dtolnay/rust-toolchain@stable
23 | - name: Run release-plz
24 | uses: MarcoIeni/release-plz-action@v0.5
25 | env:
26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
27 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/rust-clippy.yml:
--------------------------------------------------------------------------------
1 | # rust-clippy is a tool that runs a bunch of lints to catch common
2 | # mistakes in your Rust code and help improve your Rust code.
3 | # More details at https://github.com/rust-lang/rust-clippy
4 | # and https://rust-lang.github.io/rust-clippy/
5 |
6 | name: rust-clippy analyze
7 |
8 | on:
9 | push:
10 | branches: [ "master" ]
11 | pull_request:
12 | # The branches below must be a subset of the branches above
13 | branches: [ "master" ]
14 | schedule:
15 | - cron: '29 19 * * 2'
16 |
17 | concurrency:
18 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
19 | cancel-in-progress: true
20 |
21 | env:
22 | CARGO_TERM_COLOR: always
23 |
24 | jobs:
25 | rust-clippy-analyze:
26 | name: Run rust-clippy analyzing
27 | runs-on: ubuntu-latest
28 | permissions:
29 | contents: read
30 | security-events: write
31 | actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
32 | steps:
33 | - name: Checkout code
34 | uses: actions/checkout@v4
35 |
36 | - uses: actions/cache@v4
37 | id: cache-cargo
38 | with:
39 | path: |
40 | ~/.cargo/registry
41 | ~/.cargo/git
42 | target
43 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
44 | restore-keys: ${{ runner.os }}-cargo-
45 |
46 | - name: Install Protoc
47 | uses: arduino/setup-protoc@v3
48 | with:
49 | version: "25.1"
50 | repo-token: ${{ secrets.GITHUB_TOKEN }}
51 |
52 | - name: Run rust-clippy
53 | run: cargo clippy --all-targets --all-features -- -D warnings
54 |
55 | - name: Install required cargo
56 | run: cargo install clippy-sarif sarif-fmt
57 |
58 | - name: Run rust-sarif
59 | run: cargo clippy --all-features --message-format=json |
60 | clippy-sarif | tee rust-clippy-results.sarif | sarif-fmt
61 |
62 | - name: Upload analysis results to GitHub
63 | uses: github/codeql-action/upload-sarif@v3
64 | with:
65 | sarif_file: rust-clippy-results.sarif
66 | wait-for-processing: true
67 |
--------------------------------------------------------------------------------
/.github/workflows/rust-fmt.yml:
--------------------------------------------------------------------------------
1 | name: rust-fmt analyze
2 |
3 | on:
4 | push:
5 | branches: [ "master" ]
6 | pull_request:
7 | # The branches below must be a subset of the branches above
8 | branches: [ "master" ]
9 | schedule:
10 | - cron: '29 19 * * 2'
11 |
12 | concurrency:
13 | # One build per PR, branch or tag
14 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
15 | cancel-in-progress: true
16 |
17 | env:
18 | CARGO_TERM_COLOR: always
19 |
20 | jobs:
21 | rust-fmt-analyze:
22 | name: Run rust-fmt analyzing
23 | runs-on: ubuntu-latest
24 | steps:
25 | - name: Checkout code
26 | uses: actions/checkout@v4
27 |
28 | - uses: actions/cache@v4
29 | with:
30 | path: |
31 | ~/.cargo/bin/
32 | ~/.cargo/registry/index/
33 | ~/.cargo/registry/cache/
34 | ~/.cargo/git/db/
35 | target/
36 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
37 |
38 | - name: cargo fmt
39 | run: cargo fmt --all -- --check
40 |
--------------------------------------------------------------------------------
/.github/workflows/rust.yml:
--------------------------------------------------------------------------------
1 | name: Rust
2 |
3 | on:
4 | push:
5 | branches: ["master"]
6 | paths-ignore:
7 | - "docs/**"
8 |
9 | pull_request:
10 | branches: ["master"]
11 | paths-ignore:
12 | - "docs/**"
13 |
14 | env:
15 | CARGO_TERM_COLOR: always
16 |
17 | jobs:
18 | code-coverage:
19 | runs-on: ubuntu-latest
20 | env:
21 | CARGO_TERM_COLOR: always
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Install deps
25 | run: |
26 | sudo apt-get update
27 | sudo apt install -y libsoxr-dev libopus-dev libssl-dev libfdk-aac-dev
28 |
29 | - uses: actions/cache@v4
30 | with:
31 | path: |
32 | ~/.cargo/bin/
33 | ~/.cargo/registry/index/
34 | ~/.cargo/registry/cache/
35 | ~/.cargo/git/db/
36 | target/
37 | key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
38 |
39 | - name: Install Protoc
40 | uses: arduino/setup-protoc@v3
41 | with:
42 | version: "25.1"
43 | repo-token: ${{ secrets.GITHUB_TOKEN }}
44 | - name: Install cargo-llvm-cov
45 | uses: taiki-e/install-action@cargo-llvm-cov
46 | - name: Running cargo test
47 | run: cargo test --all-features --workspace
48 | - name: Generate code coverage
49 | run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
50 | - name: Upload coverage to Codecov
51 | uses: codecov/codecov-action@v4
52 | with:
53 | token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
54 | files: lcov.info
55 | fail_ci_if_error: false
56 | cargo-deny:
57 | name: cargo-deny
58 |
59 | # TODO: remove this matrix when https://github.com/EmbarkStudios/cargo-deny/issues/324 is resolved
60 | strategy:
61 | fail-fast: false
62 | matrix:
63 | platform:
64 | - x86_64-unknown-linux-gnu
65 | # - x86_64-unknown-linux-musl
66 | - aarch64-unknown-linux-gnu
67 | # - arm-unknown-linux-gnueabihf
68 | # - armv7-unknown-linux-gnueabihf
69 | # - mips-unknown-linux-gnu
70 | # - mips64-unknown-linux-gnuabi64
71 | # - mips64el-unknown-linux-gnuabi64
72 | # - mipsel-unknown-linux-gnu
73 | # - aarch64-unknown-linux-musl
74 | - x86_64-apple-darwin
75 | - aarch64-apple-darwin
76 | # - x86_64-pc-windows-gnu
77 | # - x86_64-pc-windows-msvc
78 |
79 | runs-on: ubuntu-latest
80 | steps:
81 | - uses: actions/checkout@v4
82 | - uses: EmbarkStudios/cargo-deny-action@v1
83 | with:
84 | command: check
85 | log-level: error
86 | arguments: --all-features --target ${{ matrix.platform }}
87 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 | /Cargo.lock
3 | .idea
4 | .vscode
--------------------------------------------------------------------------------
/.release-plz.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | git_release_enable = false
3 |
4 | [[package]]
5 | name = "atm0s-sdn"
6 | git_release_enable = true
7 | changelog_include = [
8 | "atm0s-sdn-identity",
9 | "atm0s-sdn-router",
10 | "atm0s-sdn-layers-spread-router",
11 | "atm0s-sdn-multiaddr",
12 | "atm0s-sdn-utils",
13 | "atm0s-sdn-network",
14 | "atm0s-sdn-manual-discovery",
15 | "atm0s-sdn-dht-discovery",
16 | "atm0s-sdn-layers-spread-router-sync",
17 | "atm0s-sdn-key-value",
18 | "atm0s-sdn-pub-sub",
19 | "atm0s-sdn-transport-vnet",
20 | "atm0s-sdn-transport-tcp",
21 | "atm0s-sdn-transport-udp",
22 | ]
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace.metadata.workspaces]
2 | independent = true
3 |
4 | [workspace]
5 | resolver = "2"
6 | members = [
7 | "bin",
8 | "fuzz",
9 | "packages/core/utils",
10 | "packages/core/identity",
11 | "packages/core/router",
12 | "packages/network",
13 | "packages/runner",
14 | ]
15 |
16 | [workspace.dependencies]
17 | bincode = "1.3"
18 | serde = { version = "1.0", features = ["derive"] }
19 | thiserror = "1.0"
20 | log = "0.4"
21 | rand = "0.8"
22 | parking_lot = "0.12"
23 | env_logger = "0.11"
24 | clap = { version = "4.4", features = ["derive", "env"] }
25 | mockall = "0.13"
26 | num_enum = "0.7"
27 | convert-enum = "0.1.0"
28 | sans-io-runtime = { version = "0.3", default-features = false }
29 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04 as base
2 | ARG TARGETPLATFORM
3 | COPY . /tmp
4 | WORKDIR /tmp
5 |
6 | RUN echo $TARGETPLATFORM
7 | RUN ls -R /tmp/
8 | # move the binary to root based on platform
9 | RUN case $TARGETPLATFORM in \
10 | "linux/amd64") BUILD=x86_64-unknown-linux-gnu ;; \
11 | "linux/arm64") BUILD=aarch64-unknown-linux-gnu ;; \
12 | *) exit 1 ;; \
13 | esac; \
14 | mv /tmp/$BUILD/atm0s-sdn-standalone-$BUILD /atm0s-sdn-standalone; \
15 | chmod +x /atm0s-sdn-standalone
16 |
17 | FROM ubuntu:22.04
18 |
19 | COPY --from=base /atm0s-sdn-standalone /atm0s-sdn-standalone
20 |
21 | ENTRYPOINT ["/atm0s-sdn-standalone"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 8xFF
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | init:
2 | git config core.hooksPath .githooks
3 | format:
4 | cargo fmt
--------------------------------------------------------------------------------
/bin/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "atm0s-sdn-standalone"
3 | version = "0.1.0"
4 | edition = "2021"
5 | repository = "https://github.com/8xFF/atm0s-sdn"
6 | description = "Decentralized Ultra-Low-Latency Software Defined Network"
7 | license = "MIT"
8 | publish = false
9 |
10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
11 |
12 | [dependencies]
13 | signal-hook = "0.3.17"
14 | log.workspace = true
15 | clap.workspace = true
16 | serde.workspace = true
17 | atm0s-sdn = { path = "../packages/runner", version = "0.2.8", features = [
18 | "vpn",
19 | ] }
20 | tokio = { version = "1", features = ["full"] }
21 | poem = { version = "3", features = ["embed", "static-files", "websocket"] }
22 | rust-embed = { version = "8.2", optional = true }
23 |
24 | futures-util = "0.3"
25 | tracing-subscriber = "0.3"
26 | serde_json = "1.0"
27 | local-ip-address = "0.6"
28 |
29 | [features]
30 | default = ["embed"]
31 | embed = ["rust-embed"]
32 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/atm0s-sdn-node.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=atm0s-sdn-node service
3 | After=network.target
4 |
5 | [Service]
6 | ExecStart=/opt/atm0s-sdn-node.sh
7 | StandardOutput=append:/var/log/atm0s-sdn-node.log
8 | StandardError=append:/var/log/atm0s-sdn-node.log
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/bin/deployments/systemd/atm0s-sdn-node.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export RUST_LOG=info
3 | export UDP_PORT=10000
4 | export WORKERS=1
5 | export LOCAL_TAGS=global
6 | export CONNECT_TAGS=global
7 | export VPN=true
8 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | # Loop through each server
6 | for server in "${!servers[@]}"; do
7 | # Check if the server key ends with "_node_id" or "_web_addr"
8 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
9 | continue
10 | fi
11 |
12 | # Retrieve the node_id and web_addr for the current server
13 | node_id="${servers["$server"_node_id]}"
14 | ssh_port="${servers["$server"_ssh_port]:-22}"
15 | seeds="${servers["$server"_seeds]}"
16 | collector="${servers["$server"_collector]}"
17 | public_ip="${servers["$server"_public]}"
18 |
19 | ssh -p $ssh_port "${servers[$server]}" "systemctl stop atm0s-sdn-node"
20 |
21 | echo "Configuring for node $node_id"
22 |
23 | # Replace atm0s-sdn-node.env HERE_NODE_ID
24 | cp ./atm0s-sdn-node.sh /tmp/atm0s-sdn-node-sh
25 | echo "export NODE_ID=$node_id" >> /tmp/atm0s-sdn-node-sh
26 | if [ -n "$seeds" ]; then
27 | echo "export SEEDS=$seeds" >> /tmp/atm0s-sdn-node-sh
28 | fi
29 | if [ -n "$collector" ]; then
30 | echo "export COLLECTOR=$collector" >> /tmp/atm0s-sdn-node-sh
31 | fi
32 | if [ -n "$public_ip" ]; then
33 | echo "export CUSTOM_ADDRS=\"$public_ip:10000\"" >> /tmp/atm0s-sdn-node-sh
34 | fi
35 |
36 | echo "/opt/atm0s-sdn-node" >> /tmp/atm0s-sdn-node-sh
37 |
38 | echo "Connecting to $server"
39 |
40 | # Upload the file
41 | ssh -p $ssh_port "${servers[$server]}" "rm -f ${servers[$server]}:/opt/atm0s-sdn-node"
42 | ssh -p $ssh_port "${servers[$server]}" "rm -f ${servers[$server]}:/etc/systemd/system/atm0s-sdn-node.service"
43 | ssh -p $ssh_port "${servers[$server]}" "rm -f ${servers[$server]}:/opt/atm0s-sdn-node.sh"
44 | scp -P $ssh_port "../../../target/release/atm0s-sdn-standalone" "${servers[$server]}:/opt/atm0s-sdn-node"
45 | scp -P $ssh_port "./atm0s-sdn-node.service" "${servers[$server]}:/etc/systemd/system/atm0s-sdn-node.service"
46 | scp -P $ssh_port "/tmp/atm0s-sdn-node-sh" "${servers[$server]}:/opt/atm0s-sdn-node.sh"
47 |
48 | # Execute the command on the server
49 | ssh -p $ssh_port "${servers[$server]}" "systemctl daemon-reload"
50 | ssh -p $ssh_port "${servers[$server]}" "systemctl enable atm0s-sdn-node"
51 | ssh -p $ssh_port "${servers[$server]}" "systemctl start atm0s-sdn-node"
52 | ssh -p $ssh_port "${servers[$server]}" "systemctl status atm0s-sdn-node"
53 | done
54 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | # Loop through each server
6 | for server in "${!servers[@]}"; do
7 | # Check if the server key ends with "_node_id" or "_web_addr"
8 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
9 | continue
10 | fi
11 |
12 | ssh_port="${servers["$server"_ssh_port]:-22}"
13 |
14 | echo "Disable and stop" "${servers[$server]}"
15 |
16 | ssh -p $ssh_port "${servers[$server]}" "systemctl disable atm0s-sdn-node"
17 | ssh -p $ssh_port "${servers[$server]}" "systemctl stop atm0s-sdn-node"
18 | done
--------------------------------------------------------------------------------
/bin/deployments/systemd/servers.sh.templ:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Define servers as an associative array with additional details
4 | declare -A servers
5 | servers=(
6 | ["server1"]="root@IP"
7 | ["server1_name"]="NAME"
8 | ["server1_public"]="IP"
9 | ["server1_ssh_port"]="22"
10 | ["server1_node_id"]="1"
11 | ["server1_collector"]="false"
12 | ["server1_seeds"]="0@/ip4/IP/udp/10000"
13 | )
--------------------------------------------------------------------------------
/bin/deployments/systemd/test_get_logs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | mkdir -p ./logs/
6 | rm -f ./logs/*
7 |
8 | # Loop through each server
9 | for server in "${!servers[@]}"; do
10 | # Check if the server key ends with "_node_id" or "_web_addr"
11 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
12 | continue
13 | fi
14 |
15 | node_id="${servers["$server"_node_id]}"
16 | ssh_port="${servers["$server"_ssh_port]:-22}"
17 |
18 | ssh -p $ssh_port "${servers[$server]}" "gzip -f --keep /var/log/atm0s-sdn-node.log"
19 | scp -P $ssh_port "${servers[$server]}:/var/log/atm0s-sdn-node.log.gz" "logs/$node_id.log.gz"
20 | done
21 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/test_install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | # Loop through each server
6 | for server in "${!servers[@]}"; do
7 | # Check if the server key ends with "_node_id" or "_web_addr"
8 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
9 | continue
10 | fi
11 |
12 | echo "Install and config iperf3" "${servers[$server]}"
13 | ssh_port="${servers["$server"_ssh_port]:-22}"
14 |
15 | ssh -p $ssh_port "${servers[$server]}" "apt-get install -y iperf3 python3 python3-pip"
16 | ssh -p $ssh_port "${servers[$server]}" "pip3 install jc --break-system-packages"
17 | done
18 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/test_remove_logs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | # Loop through each server
6 | for server in "${!servers[@]}"; do
7 | # Check if the server key ends with "_node_id" or "_web_addr"
8 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
9 | continue
10 | fi
11 |
12 | node_id="${servers["$server"_node_id]}"
13 | ssh_port="${servers["$server"_ssh_port]:-22}"
14 |
15 | echo "Remove log of node $node_id"
16 |
17 | ssh -p $ssh_port "${servers[$server]}" "rm -f /var/log/atm0s-sdn-node.log*"
18 | done
19 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/test_run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | mkdir -p results
6 | rm -f results/*
7 |
8 | echo "source; dest; public; vpn" > results/stats.csv
9 |
10 | # Loop through each server
11 | for server in "${!servers[@]}"; do
12 | # Check if the server key ends with "_node_id" or "_web_addr"
13 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
14 | continue
15 | fi
16 |
17 | node_id="${servers["$server"_node_id]}"
18 | name="${servers["$server"_name]}"
19 | ssh_port="${servers["$server"_ssh_port]:-22}"
20 | public_ip="${servers["$server"_public]}"
21 | vpn_ip="10.33.33.$node_id"
22 |
23 | for target in "${!servers[@]}"; do
24 | if [[ $target == *"_node_id" ]] || [[ $target == *"_name" ]] || [[ $target == *"_public" ]] || [[ $target == *"_ssh_port" ]] || [[ $target == *"_node_id" ]] || [[ $target == *"_seeds" ]] || [[ $target == *"_collector" ]]; then
25 | continue
26 | fi
27 |
28 | target_name="${servers["$target"_name]}"
29 | target_node_id="${servers["$target"_node_id]}"
30 | target_public_ip="${servers["$target"_public]}"
31 | target_vpn_ip="10.33.33.$target_node_id"
32 |
33 | if [[ "$node_id" == "$target_node_id" ]]; then
34 | continue
35 | fi
36 |
37 | echo "Running test from $node_id $name to $target_node_id $target_name, $public_ip, $target_public_ip"
38 |
39 | ssh -p $ssh_port "${servers[$server]}" "ping -c 1 $target_public_ip | jc --ping > /tmp/$node_id-$target_node_id-ping-public.json"
40 | ssh -p $ssh_port "${servers[$server]}" "ping -c 1 $target_vpn_ip | jc --ping > /tmp/$node_id-$target_node_id-ping-vpn.json"
41 | scp -P $ssh_port "${servers[$server]}:/tmp/$node_id-$target_node_id-ping-public.json" "results/$node_id-$target_node_id-ping-public.json"
42 | scp -P $ssh_port "${servers[$server]}:/tmp/$node_id-$target_node_id-ping-vpn.json" "results/$node_id-$target_node_id-ping-vpn.json"
43 |
44 | rtt_public=$(cat results/$node_id-$target_node_id-ping-public.json | jq ".round_trip_ms_avg")
45 | rtt_vpn=$(cat results/$node_id-$target_node_id-ping-vpn.json | jq ".round_trip_ms_avg")
46 | echo "$node_id; $target_node_id; $name; $target_name; $rtt_public; $rtt_vpn"
47 | echo "$node_id; $target_node_id; $name; $target_name; $rtt_public; $rtt_vpn" >> results/stats.csv
48 | done
49 | done
50 |
--------------------------------------------------------------------------------
/bin/deployments/systemd/test_view_logs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source "./servers.sh"
4 |
5 | mkdir -p ./logs/
6 | rm -f ./logs/*
7 |
8 | # Loop through each server
9 | for server in "${!servers[@]}"; do
10 | # Check if the server key ends with "_node_id" or "_web_addr"
11 | if [[ $server == *"_node_id" ]] || [[ $server == *"_name" ]] || [[ $server == *"_public" ]] || [[ $server == *"_ssh_port" ]] || [[ $server == *"_seeds" ]] || [[ $server == *"_collector" ]]; then
12 | continue
13 | fi
14 |
15 | node_id="${servers["$server"_node_id]}"
16 | ssh_port="${servers["$server"_ssh_port]:-22}"
17 |
18 | echo "#########################"
19 | echo "### Node $node_id. ###"
20 | echo "#########################"
21 |
22 | ssh -p $ssh_port "${servers[$server]}" "tail -n $1 /var/log/atm0s-sdn-node.log"
23 | done
24 |
--------------------------------------------------------------------------------
/bin/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Network Graph with Cytoscape
5 |
6 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/bin/start_agent.sh:
--------------------------------------------------------------------------------
1 | cargo run -- --local-tags vpn --connect-tags vpn --seeds $1 --node-id $2 --udp-port $3
2 |
--------------------------------------------------------------------------------
/bin/start_collector.sh:
--------------------------------------------------------------------------------
1 | # If provided $3, it will be seeds
2 | if [ -n "$4" ]; then
3 | # $4 is defined
4 | cargo run -- --collector --local-tags vpn --connect-tags vpn --node-id $1 --udp-port $2 --web-addr $3 --seeds $4
5 | else
6 | # $4 is not defined
7 | cargo run -- --collector --local-tags vpn --connect-tags vpn --node-id $1 --udp-port $2 --web-addr $3
8 | fi
9 |
--------------------------------------------------------------------------------
/deny.toml:
--------------------------------------------------------------------------------
1 | [graph]
2 |
3 | targets = []
4 | all-features = false
5 | no-default-features = false
6 | [output]
7 | feature-depth = 1
8 |
9 | [advisories]
10 | ignore = []
11 |
12 | [licenses]
13 | allow = [
14 | "Apache-2.0",
15 | "BSD-2-Clause",
16 | "BSD-3-Clause",
17 | "ISC",
18 | "MIT",
19 | "WTFPL",
20 | "Unicode-3.0",
21 | "NCSA",
22 | ]
23 | confidence-threshold = 0.8
24 | exceptions = []
25 |
26 |
27 | [licenses.private]
28 | ignore = false
29 | registries = [
30 | #"https://sekretz.com/registry
31 | ]
32 |
33 | [bans]
34 | multiple-versions = "warn"
35 | wildcards = "allow"
36 | highlight = "all"
37 | workspace-default-features = "allow"
38 | external-default-features = "allow"
39 | allow = []
40 | deny = []
41 |
42 | skip = []
43 | skip-tree = []
44 | [sources]
45 | unknown-registry = "warn"
46 | unknown-git = "warn"
47 | allow-registry = ["https://github.com/rust-lang/crates.io-index"]
48 | allow-git = []
49 |
50 | [sources.allow-org]
51 | github = [""]
52 | gitlab = [""]
53 | bitbucket = [""]
54 |
--------------------------------------------------------------------------------
/docs/imgs/flow.excalidraw.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/8xFF/atm0s-sdn/63ae52d3e14e1963c310c7872ca6e6cdd6013a5f/docs/imgs/flow.excalidraw.png
--------------------------------------------------------------------------------
/docs/imgs/visualization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/8xFF/atm0s-sdn/63ae52d3e14e1963c310c7872ca6e6cdd6013a5f/docs/imgs/visualization.png
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | target
2 | Cargo.lock
3 | .idea
4 | .vscode
--------------------------------------------------------------------------------
/examples/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | resolver = "2"
3 | members = [
4 | "quic-tunnel"
5 | , "whip-whep"]
6 |
7 | [workspace.package]
8 | version = "0.1.0"
9 | edition = "2021"
10 | publish = false
11 |
12 | [workspace.dependencies]
13 | atm0s-sdn = { path = "../packages/runner" }
14 | tracing-subscriber = "0.3"
15 | signal-hook = "0.3"
16 | clap = { version = "4.4", features = ["derive", "env"] }
17 | tokio = { version = "1", features = ["full"] }
18 | log = "0.4"
19 |
--------------------------------------------------------------------------------
/examples/quic-tunnel/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "quic-tunnel"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | log.workspace = true
8 | tracing-subscriber.workspace = true
9 | clap.workspace = true
10 | tokio.workspace = true
11 | atm0s-sdn.workspace = true
12 | signal-hook.workspace = true
13 | quinn = { version = "0.10", default-features = false, features = ["runtime-tokio", "log", "ring"] }
14 | quinn-plaintext = "0.2.0"
15 |
--------------------------------------------------------------------------------
/examples/quic-tunnel/README.md:
--------------------------------------------------------------------------------
1 | # Quic-tunnel
2 |
3 | This sample implement a simple tunnel using QUIC protocol. It is based on the virtual socket feature and Quinn crate.
--------------------------------------------------------------------------------
/examples/quic-tunnel/src/sdn.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | sync::{
3 | atomic::{AtomicBool, Ordering},
4 | Arc,
5 | },
6 | time::Duration,
7 | };
8 |
9 | use atm0s_sdn::{
10 | features::{socket, FeaturesControl, FeaturesEvent},
11 | sans_io_runtime::backend::PollingBackend,
12 | services::visualization,
13 | NodeAddr, NodeId, SdnBuilder, SdnExtIn, SdnExtOut, SdnOwner,
14 | };
15 | use tokio::sync::mpsc::{Receiver, Sender};
16 |
17 | use crate::vnet::{NetworkPkt, OutEvent};
18 |
19 | type SC = visualization::Control;
20 | type SE = visualization::Event;
21 | type TC = ();
22 | type TW = ();
23 |
24 | pub async fn run_sdn(node_id: NodeId, udp_port: u16, seeds: Vec, workers: usize, tx: Sender, mut rx: Receiver) {
25 | let term = Arc::new(AtomicBool::new(false));
26 | signal_hook::flag::register(signal_hook::consts::SIGINT, Arc::clone(&term)).expect("Should register hook");
27 | let mut shutdown_wait = 0;
28 | let mut builder = SdnBuilder::<(), SC, SE, TC, TW>::new(node_id, udp_port, vec![]);
29 |
30 | builder.set_manual_discovery(vec!["tunnel".to_string()], vec!["tunnel".to_string()]);
31 | builder.set_visualization_collector(false);
32 |
33 | for seed in seeds {
34 | builder.add_seed(seed);
35 | }
36 |
37 | let mut controller = builder.build::>(workers);
38 | while controller.process().is_some() {
39 | if term.load(Ordering::Relaxed) {
40 | if shutdown_wait == 200 {
41 | log::warn!("Force shutdown");
42 | break;
43 | }
44 | shutdown_wait += 1;
45 | controller.shutdown();
46 | }
47 | while let Ok(c) = rx.try_recv() {
48 | // log::info!("Command: {:?}", c);
49 | match c {
50 | OutEvent::Bind(port) => {
51 | controller.send_to(0, SdnExtIn::FeaturesControl((), FeaturesControl::Socket(socket::Control::Bind(port))));
52 | }
53 | OutEvent::Pkt(pkt) => {
54 | let send = socket::Control::SendTo(pkt.local_port, pkt.remote, pkt.remote_port, pkt.data, pkt.meta);
55 | controller.send_to(0, SdnExtIn::FeaturesControl((), FeaturesControl::Socket(send)));
56 | }
57 | OutEvent::Unbind(port) => {
58 | controller.send_to(0, SdnExtIn::FeaturesControl((), FeaturesControl::Socket(socket::Control::Unbind(port))));
59 | }
60 | }
61 | }
62 | while let Some(event) = controller.pop_event() {
63 | // log::info!("Event: {:?}", event);
64 | match event {
65 | SdnExtOut::FeaturesEvent(_, FeaturesEvent::Socket(socket::Event::RecvFrom(local_port, remote, remote_port, data, meta))) => {
66 | if let Err(e) = tx.try_send(NetworkPkt {
67 | local_port,
68 | remote,
69 | remote_port,
70 | data,
71 | meta,
72 | }) {
73 | log::error!("Failed to send to tx: {:?}", e);
74 | }
75 | }
76 | _ => {}
77 | }
78 | }
79 | tokio::time::sleep(Duration::from_millis(1)).await;
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/examples/quic-tunnel/src/vnet/mod.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{HashMap, VecDeque};
2 |
3 | use atm0s_sdn::{base::Buffer, NodeId};
4 | use tokio::{
5 | select,
6 | sync::mpsc::{channel, Receiver, Sender, UnboundedReceiver, UnboundedSender},
7 | };
8 |
9 | pub use self::socket::VirtualUdpSocket;
10 |
11 | mod socket;
12 |
13 | #[derive(Debug)]
14 | pub enum OutEvent {
15 | Bind(u16),
16 | Pkt(NetworkPkt),
17 | Unbind(u16),
18 | }
19 |
20 | #[derive(Debug)]
21 | pub struct NetworkPkt {
22 | pub local_port: u16,
23 | pub remote: NodeId,
24 | pub remote_port: u16,
25 | pub data: Buffer,
26 | pub meta: u8,
27 | }
28 |
29 | pub struct VirtualNetwork {
30 | node_id: NodeId,
31 | in_rx: Receiver,
32 | out_tx: Sender,
33 | close_socket_tx: UnboundedSender,
34 | close_socket_rx: UnboundedReceiver,
35 | sockets: HashMap>,
36 | ports: VecDeque,
37 | }
38 |
39 | impl VirtualNetwork {
40 | pub fn new(node_id: NodeId) -> (Self, Sender, Receiver) {
41 | let (in_tx, in_rx) = tokio::sync::mpsc::channel(1000);
42 | let (out_tx, out_rx) = tokio::sync::mpsc::channel(1000);
43 | let (close_socket_tx, close_socket_rx) = tokio::sync::mpsc::unbounded_channel();
44 |
45 | (
46 | Self {
47 | node_id,
48 | in_rx,
49 | out_tx,
50 | close_socket_rx,
51 | close_socket_tx,
52 | sockets: HashMap::new(),
53 | ports: (0..60000).collect(),
54 | },
55 | in_tx,
56 | out_rx,
57 | )
58 | }
59 |
60 | pub async fn udp_socket(&mut self, port: u16) -> VirtualUdpSocket {
61 | //remove port from ports
62 | let port = if port > 0 {
63 | let index = self.ports.iter().position(|&x| x == port).expect("Should have port");
64 | self.ports.swap_remove_back(index);
65 | port
66 | } else {
67 | self.ports.pop_front().expect("Should have port")
68 | };
69 | self.out_tx.send(OutEvent::Bind(port)).await.expect("Should send bind");
70 | let (tx, rx) = channel(1000);
71 | self.sockets.insert(port, tx);
72 | VirtualUdpSocket::new(self.node_id, port, self.out_tx.clone(), rx, self.close_socket_tx.clone())
73 | }
74 |
75 | pub async fn recv(&mut self) -> Option<()> {
76 | select! {
77 | port = self.close_socket_rx.recv() => {
78 | let port = port.expect("Should have port");
79 | self.ports.push_back(port);
80 | self.out_tx.send(OutEvent::Unbind(port)).await.expect("Should send unbind");
81 | Some(())
82 | }
83 | pkt = self.in_rx.recv() => {
84 | let pkt = pkt?;
85 | let src = pkt.local_port;
86 | if let Some(socket_tx) = self.sockets.get(&src) {
87 | if let Err(e) = socket_tx.try_send(pkt) {
88 | log::error!("Send to socket {} error {:?}", src, e);
89 | }
90 | }
91 | Some(())
92 | }
93 | }
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/examples/quic-tunnel/src/vnet/socket.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | fmt::Debug,
3 | io::IoSliceMut,
4 | net::{SocketAddr, SocketAddrV4},
5 | ops::DerefMut,
6 | sync::Mutex,
7 | task::{Context, Poll},
8 | };
9 |
10 | use quinn::{
11 | udp::{EcnCodepoint, RecvMeta, Transmit, UdpState},
12 | AsyncUdpSocket,
13 | };
14 | use tokio::sync::mpsc::{Receiver, Sender, UnboundedSender};
15 |
16 | use super::{NetworkPkt, OutEvent};
17 |
18 | pub struct VirtualUdpSocket {
19 | node_id: u32,
20 | port: u16,
21 | addr: SocketAddr,
22 | rx: Mutex>,
23 | tx: Sender,
24 | close_socket_tx: UnboundedSender,
25 | }
26 |
27 | impl VirtualUdpSocket {
28 | pub fn new(node_id: u32, port: u16, tx: Sender, rx: Receiver, close_socket_tx: UnboundedSender) -> Self {
29 | Self {
30 | node_id,
31 | port,
32 | addr: SocketAddr::V4(SocketAddrV4::new(node_id.into(), port)),
33 | rx: Mutex::new(rx),
34 | tx,
35 | close_socket_tx,
36 | }
37 | }
38 | }
39 |
40 | impl Debug for VirtualUdpSocket {
41 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
42 | f.debug_struct("VirtualUdpSocket").finish()
43 | }
44 | }
45 |
46 | impl AsyncUdpSocket for VirtualUdpSocket {
47 | fn poll_send(&self, _state: &UdpState, _cx: &mut Context, transmits: &[Transmit]) -> Poll> {
48 | let mut sent = 0;
49 | for transmit in transmits {
50 | match transmit.destination {
51 | SocketAddr::V4(addr) => {
52 | let pkt = NetworkPkt {
53 | local_port: self.port,
54 | remote: u32::from_be_bytes(addr.ip().octets()),
55 | remote_port: addr.port(),
56 | data: transmit.contents.to_vec().into(),
57 | meta: transmit.ecn.map(|x| x as u8).unwrap_or(0),
58 | };
59 | log::debug!("{} sending {} bytes to {}", self.addr, pkt.data.len(), addr);
60 | if self.tx.try_send(OutEvent::Pkt(pkt)).is_ok() {
61 | sent += 1;
62 | }
63 | }
64 | _ => {
65 | sent += 1;
66 | }
67 | }
68 | }
69 | std::task::Poll::Ready(Ok(sent))
70 | }
71 |
72 | fn poll_recv(&self, cx: &mut Context, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> Poll> {
73 | let mut rx = self.rx.lock().expect("should lock rx");
74 | match rx.poll_recv(cx) {
75 | std::task::Poll::Pending => std::task::Poll::Pending,
76 | std::task::Poll::Ready(Some(pkt)) => {
77 | let len = pkt.data.len();
78 | if len <= bufs[0].len() {
79 | let addr = SocketAddr::V4(SocketAddrV4::new(pkt.remote.into(), pkt.remote_port));
80 | log::debug!("{} received {} bytes from {}", self.addr, len, addr);
81 | bufs[0].deref_mut()[0..len].copy_from_slice(&pkt.data);
82 | meta[0] = quinn::udp::RecvMeta {
83 | addr,
84 | len,
85 | stride: len,
86 | ecn: if pkt.meta == 0 {
87 | None
88 | } else {
89 | EcnCodepoint::from_bits(pkt.meta)
90 | },
91 | dst_ip: None,
92 | };
93 | std::task::Poll::Ready(Ok(1))
94 | } else {
95 | log::warn!("Buffer too small for packet {} vs {}, dropping", len, bufs[0].len());
96 | std::task::Poll::Pending
97 | }
98 | }
99 | std::task::Poll::Ready(None) => std::task::Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::ConnectionAborted, "Socket closed"))),
100 | }
101 | }
102 |
103 | fn local_addr(&self) -> std::io::Result {
104 | Ok(self.addr)
105 | }
106 | }
107 |
108 | impl Drop for VirtualUdpSocket {
109 | fn drop(&mut self) {
110 | if let Err(e) = self.close_socket_tx.send(self.port) {
111 | log::error!("Failed to send close socket: {:?}", e);
112 | }
113 | }
114 | }
115 |
--------------------------------------------------------------------------------
/examples/whip-whep/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "whip-whep"
3 | version.workspace = true
4 | edition.workspace = true
5 | publish.workspace = true
6 |
7 | [dependencies]
8 | atm0s-sdn = { path = "../../packages/runner" }
9 | derive_more = "0.99.17"
10 | str0m = "0.5.0"
11 | tiny_http = "0.12.0"
12 | signal-hook = "0.3.17"
13 | env_logger = "0.11.3"
14 | log.workspace = true
15 | faster-stun = "1.0.2"
16 | clap.workspace = true
17 | serde = "1.0.197"
18 | bincode = "1.3.3"
19 | rand = "0.8.5"
20 | convert-enum = "0.1.0"
21 |
--------------------------------------------------------------------------------
/examples/whip-whep/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
9 |
10 |
--------------------------------------------------------------------------------
/examples/whip-whep/public/whep/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Whep
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/examples/whip-whep/public/whep/whep.demo.js:
--------------------------------------------------------------------------------
1 | import { WHEPClient } from "./whep.js"
2 |
3 | window.start = async () => {
4 | console.log("Will start");
5 | //Create peerconnection
6 | const pc = window.pc = new RTCPeerConnection();
7 |
8 | //Add recv only transceivers
9 | pc.addTransceiver("audio", { direction: 'recvonly' });
10 | pc.addTransceiver("video", { direction: 'recvonly' });
11 |
12 | let stream = new MediaStream();
13 | document.querySelector("video").srcObject = stream;
14 | pc.ontrack = (event) => {
15 | stream.addTrack(event.track);
16 | }
17 |
18 | //Create whep client
19 | const whep = new WHEPClient();
20 |
21 | const url = "/whep/endpoint";
22 | const token = document.getElementById("room-id").value;
23 |
24 | //Start viewing
25 | whep.view(pc, url, token);
26 |
27 | window.whep_instance = whep;
28 | }
29 |
30 | window.stop = async () => {
31 | if (window.whep_instance) {
32 | window.whep_instance.stop();
33 | }
34 |
35 | document.getElementById("video").srcObject = null;
36 | }
--------------------------------------------------------------------------------
/examples/whip-whep/public/whip/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Whip
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/examples/whip-whep/public/whip/whip.demo.js:
--------------------------------------------------------------------------------
1 | import { WHIPClient } from "./whip.js"
2 |
3 | window.start = async () => {
4 | console.log("Will start");
5 | if (window.whip_instance) {
6 | window.whip_instance.stop();
7 | }
8 |
9 | if (window.stream_instance) {
10 | window.stream_instance.getTracks().forEach(track => track.stop());
11 | }
12 |
13 | //Get mic+cam
14 | const stream = await navigator.mediaDevices.getUserMedia({audio:true, video:true});
15 |
16 | document.getElementById("video").srcObject = stream;
17 |
18 | //Create peerconnection
19 | const pc = new RTCPeerConnection();
20 |
21 | //Send all tracks
22 | for (const track of stream.getTracks()) {
23 | //You could add simulcast too here
24 | pc.addTransceiver(track, {
25 | direction: "sendonly",
26 | streams: [stream],
27 | // sendEncodings: [
28 | // { rid: "0", active: true, scaleResolutionDownBy: 2},
29 | // { rid: "1", active: true, scaleResolutionDownBy: 2},
30 | // { rid: "2", active: true },
31 | // ],
32 | });
33 | }
34 |
35 | //Create whip client
36 | const whip = new WHIPClient();
37 |
38 | const url = "/whip/endpoint";
39 | const token = document.getElementById("room-id").value;
40 |
41 | //Start publishing
42 | whip.publish(pc, url, token);
43 |
44 | window.whip_instance = whip;
45 | window.stream_instance = stream;
46 | }
47 |
48 | window.stop = async () => {
49 | if (window.whip_instance) {
50 | window.whip_instance.stop();
51 | }
52 |
53 | if (window.stream_instance) {
54 | window.stream_instance.getTracks().forEach(track => track.stop());
55 | }
56 |
57 | document.getElementById("video").srcObject = null;
58 | }
--------------------------------------------------------------------------------
/examples/whip-whep/src/http.rs:
--------------------------------------------------------------------------------
1 | use std::io::Read;
2 | use std::{collections::HashMap, fs::File, net::SocketAddr, path::Path, time::Duration};
3 | use tiny_http::{Header, Method, Request, Response, Server};
4 |
5 | #[derive(Debug, Clone)]
6 | pub struct HttpRequest {
7 | pub req_id: u64,
8 | pub method: String,
9 | pub path: String,
10 | pub headers: HashMap,
11 | pub body: Vec,
12 | }
13 |
14 | impl HttpRequest {
15 | pub fn http_auth(&self) -> String {
16 | if let Some(auth) = self.headers.get("Authorization") {
17 | auth.clone()
18 | } else if let Some(auth) = self.headers.get("authorization") {
19 | auth.clone()
20 | } else {
21 | "demo".to_string()
22 | }
23 | }
24 | }
25 |
26 | #[derive(Debug, Clone)]
27 | pub struct HttpResponse {
28 | pub req_id: u64,
29 | pub status: u16,
30 | pub headers: HashMap,
31 | pub body: Vec,
32 | }
33 |
34 | pub struct SimpleHttpServer {
35 | req_id_seed: u64,
36 | server: Server,
37 | reqs: HashMap,
38 | }
39 |
40 | impl SimpleHttpServer {
41 | pub fn new(port: u16) -> Self {
42 | Self {
43 | req_id_seed: 0,
44 | server: Server::http(SocketAddr::from(([0, 0, 0, 0], port))).expect("Should open http port"),
45 | reqs: HashMap::new(),
46 | }
47 | }
48 |
49 | pub fn send_response(&mut self, res: HttpResponse) {
50 | log::info!("sending response for request_id {}, status {}", res.req_id, res.status);
51 | let req = self.reqs.remove(&res.req_id).expect("Should have a request.");
52 | let mut response = Response::from_data(res.body).with_status_code(res.status);
53 | for (k, v) in res.headers {
54 | response.add_header(Header::from_bytes(k.as_bytes(), v.as_bytes()).unwrap());
55 | }
56 | response.add_header(Header::from_bytes("Access-Control-Allow-Origin", "*").unwrap());
57 | response.add_header(Header::from_bytes("Access-Control-Allow-Methods", "GET, POST, PATCH, DELETE, OPTIONS").unwrap());
58 | response.add_header(Header::from_bytes("Access-Control-Allow-Headers", "*").unwrap());
59 | response.add_header(Header::from_bytes("Access-Control-Allow-Credentials", "true").unwrap());
60 | req.respond(response).unwrap();
61 | }
62 |
63 | pub fn recv(&mut self, timeout: Duration) -> Result