├── .cargo └── config ├── .gitignore ├── .travis.yml ├── .travis └── rustfmt.toml ├── 3rdparty ├── dpdk-confs │ ├── common_linuxapp-16.04 │ ├── common_linuxapp-16.07 │ ├── common_linuxapp-16.07.container │ ├── common_linuxapp-16.07.vswitch │ ├── common_linuxapp-16.11 │ ├── common_linuxapp-16.11.container │ ├── common_linuxapp-16.11.vswitch │ ├── common_linuxapp-17.08 │ ├── common_linuxapp-17.08.container │ ├── common_linuxapp-2.1.0 │ ├── common_linuxapp-2.2.0 │ └── common_linuxapp-master ├── get-dpdk.sh ├── scripts │ ├── rustc.sh │ └── rustdoc.sh └── translate.py ├── Cargo.toml ├── LICENSE.md ├── README.md ├── build-container └── Dockerfile ├── build.sh ├── container ├── Dockerfile ├── build-containers.sh ├── run-container-bess.sh ├── run-container.sh ├── run-script.py ├── run-script.sh ├── sources.list └── test.cid ├── debugging └── rdtscp-test │ ├── Cargo.toml │ ├── asm │ ├── asm2 │ ├── core │ ├── rdtscp_test │ ├── src │ └── main.rs │ └── target │ └── rexplicit.ll ├── examples.sh ├── framework ├── .gitignore ├── Cargo.toml ├── build.rs ├── src │ ├── allocators │ │ ├── cache_aligned.rs │ │ └── mod.rs │ ├── common │ │ ├── errors.rs │ │ └── mod.rs │ ├── config │ │ ├── config_reader.rs │ │ ├── flag_reader.rs │ │ └── mod.rs │ ├── control │ │ ├── linux │ │ │ └── epoll.rs │ │ ├── mod.rs │ │ ├── sctp.rs │ │ └── tcp.rs │ ├── headers │ │ ├── ip.rs │ │ ├── mac.rs │ │ ├── mod.rs │ │ ├── null_header.rs │ │ ├── tcp.rs │ │ └── udp.rs │ ├── interface │ │ ├── dpdk.rs │ │ ├── mod.rs │ │ ├── packet.rs │ │ └── port │ │ │ ├── mod.rs │ │ │ ├── phy_port.rs │ │ │ └── virt_port.rs │ ├── lib.rs │ ├── native │ │ ├── libnuma.rs │ │ ├── mod.rs │ │ └── zcsi │ │ │ ├── mbuf.rs │ │ │ ├── mod.rs │ │ │ └── zcsi.rs │ ├── native_include │ │ ├── dpdk-headers.h │ │ └── mod.rs │ ├── operators │ │ ├── act.rs │ │ ├── add_metadata.rs │ │ ├── add_metadata_mut.rs │ │ ├── composition_batch.rs │ │ ├── deparsed_batch.rs │ │ ├── filter_batch.rs │ │ ├── group_by.rs │ │ ├── iterator.rs │ │ ├── macros.rs │ │ ├── map_batch.rs │ │ ├── merge_batch.rs │ │ ├── mod.rs │ │ ├── packet_batch.rs │ │ ├── parsed_batch.rs │ │ ├── receive_batch.rs │ │ ├── reset_parse.rs │ │ ├── restore_header.rs │ │ ├── send_batch.rs │ │ └── transform_batch.rs │ ├── queues │ │ ├── mod.rs │ │ └── mpsc_mbuf_queue.rs │ ├── scheduler │ │ ├── context.rs │ │ ├── embedded_scheduler.rs │ │ ├── mod.rs │ │ └── standalone_scheduler.rs │ ├── shared_state │ │ ├── directory.rs │ │ ├── mod.rs │ │ └── shared_vec.rs │ ├── state │ │ ├── cp_mergeable.rs │ │ ├── dp_mergeable.rs │ │ ├── mergeable.rs │ │ ├── mod.rs │ │ ├── reordered_buffer │ │ │ ├── mod.rs │ │ │ └── reordered_buffer.rs │ │ └── ring_buffer.rs │ └── utils │ │ ├── asm.rs │ │ ├── flow.rs │ │ └── mod.rs └── tests │ ├── address.rs │ ├── ring_buffer.rs │ └── tcp_window.rs ├── native ├── .clang-format ├── Makefile ├── ethpacket.c ├── fmt.sh ├── include │ ├── dpdk.h │ ├── mempool.h │ ├── pmd.h │ └── simd.h ├── init.c ├── mempool.c ├── pmd.c ├── ring.c ├── test │ ├── Makefile │ └── test.c └── utils.c ├── patches └── ovs-patch-c4623bb.patch ├── rustfmt.toml ├── scripts ├── bind-xl710.sh ├── check-examples.py ├── comparisons │ ├── kill-ovs-chain.py │ ├── start-bess-chain-vm.sh │ ├── start-bess-chain.sh │ ├── start-ovs-chain-vm.sh │ ├── start-ovs-chain.sh │ ├── start-ovs-test-vm.sh │ ├── start-ovs-test.sh │ ├── start-ovs-thruput-vm.sh │ ├── start-ovs-thruput.sh │ ├── start-vm-bess.sh │ ├── start-vm-writable.sh │ ├── start-vm.sh │ ├── vhchain.bess │ └── vpchain.bess ├── init.sh ├── kill-ovs-chain.sh ├── ovs-hairpin.sh ├── read-target.py ├── start-ovs-chain.sh └── tuning │ ├── energy.sh │ ├── pmqos-static.py │ └── read_cpu_dma_latency.py └── test ├── acl-fw ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── chain-test ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── config-test ├── Cargo.toml └── src │ └── main.rs ├── delay-test ├── .gitignore ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── embedded-scheduler-dependency-test ├── .gitignore ├── Cargo.toml └── src │ └── main.rs ├── embedded-scheduler-test ├── .gitignore ├── Cargo.toml └── src │ └── main.rs ├── framework-test ├── .gitignore ├── Cargo.toml └── src │ └── main.rs ├── lpm-embedded ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── lpm ├── Cargo.toml ├── out ├── src │ ├── main.rs │ └── nf.rs └── sudo ├── macswap ├── Cargo.toml ├── check.sh ├── data │ ├── expect.out │ └── http_lemmy.pcap └── src │ ├── main.rs │ └── nf.rs ├── maglev ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── nat ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── othertest ├── feature.rs └── test ├── packet_generation ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── packet_test ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── reset-parse ├── .gitignore ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── sctp-test ├── Cargo.toml ├── control-test │ ├── Cargo.toml │ ├── ctl │ │ ├── controller.py │ │ └── requirements.txt │ └── src │ │ ├── control.rs │ │ ├── main.rs │ │ └── nf.rs ├── ctl │ └── controller.py └── src │ ├── control.rs │ ├── main.rs │ └── nf.rs ├── shutdown-test ├── .gitignore ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── tcp_check ├── Cargo.toml └── src │ ├── main.rs │ └── nf.rs ├── tcp_payload ├── Cargo.toml ├── check.sh ├── data │ ├── expect.out │ └── http_lemmy.pcap └── src │ ├── main.rs │ └── nf.rs └── tcp_reconstruction ├── Cargo.toml └── src ├── main.rs └── nf.rs /.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | rustc="3rdparty/tools/bin/rustc.sh" 3 | rustdoc="3rdparty/tools/bin/rustdoc.sh" 4 | 5 | [term] 6 | color = 'auto' 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | 4 | # User-specific files 5 | *.suo 6 | *.user 7 | *.sln.docstates 8 | 9 | # Build results 10 | Cargo.lock 11 | /target/ 12 | target/ 13 | .make.dep 14 | *.tar.gz 15 | 3rdparty/dpdk 16 | /native/test/test 17 | /native/*.o 18 | /native/*.so 19 | 3rdparty/tools 20 | 3rdparty/rust 21 | 3rdparty/llvm 22 | 3rdparty/musl 23 | 24 | # Remove rustfmt backups 25 | *.rs.bk 26 | *.o 27 | *.so 28 | 29 | # Vim files 30 | *.vi 31 | *.swp 32 | 33 | # cscope 34 | cscope* 35 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | language: rust 3 | rust: 4 | - nightly 5 | dist: trusty 6 | matrix: 7 | include: 8 | - rust: nightly 9 | env: 10 | - SYSTEM_CARGO=1 11 | before_install: 12 | - sudo apt-get update -qq 13 | - sudo apt-get install -qq apt-utils build-essential pciutils linux-headers-`uname -r` python python3 libssl-dev 14 | - sudo apt-get install -qq libgnutls28 libgnutls-dev libcurl4-gnutls-dev cmake bash libpcap-dev libnuma-dev 15 | - ./build.sh deps 16 | services: 17 | - docker 18 | before_script: 19 | - sudo sysctl -w vm.nr_hugepages=256 20 | - sudo mkdir -p /mnt/huge 21 | - sudo mount -t hugetlbfs nodev /mnt/huge 22 | script: 23 | - ./build.sh check_examples 24 | - ./build.sh fmt_travis 25 | - ./build.sh build_container 26 | - ./build.sh ctr_test 27 | install: true 28 | -------------------------------------------------------------------------------- /.travis/rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 120 2 | reorder_imports = true 3 | report_fixme="Never" 4 | -------------------------------------------------------------------------------- /3rdparty/get-dpdk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Stop on any errors 3 | set -e 4 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 5 | DOWNLOAD_BASE="${1-$BASE_DIR}" 6 | echo Using "$DOWNLOAD_BASE" for downloads 7 | DPDK_VER=${DPDK_VER-"17.08"} 8 | MODE=download # or git 9 | DOWNLOAD_PATH="${DOWNLOAD_BASE}/dpdk.tar.gz" 10 | DPDK_RESULT="${BASE_DIR}/dpdk" 11 | CONFIG_FILE=${DPDK_CONFIG_FILE-"${BASE_DIR}/dpdk-confs/common_linuxapp-${DPDK_VER}"} 12 | CONFIG_PFX=${DPDK_CONFIG_PFX-""} 13 | echo "Using configuration ${CONFIG_FILE}${CONFIG_PFX}" 14 | 15 | if [ "$MODE" = "download" ]; then 16 | if [ ! -e "$DOWNLOAD_PATH" ]; then 17 | echo Fetching "http://dpdk.org/browse/dpdk/snapshot/dpdk-${DPDK_VER}.tar.gz" 18 | curl http://git.dpdk.org/dpdk/snapshot/dpdk-${DPDK_VER}.tar.gz -o "${DOWNLOAD_PATH}" 19 | fi 20 | if [ ! -d "${DPDK_RESULT}" ]; then 21 | mkdir -p ${DPDK_RESULT} 22 | fi 23 | tar zxvf "${DOWNLOAD_PATH}" -C "${DPDK_RESULT}" --strip-components=1 24 | else 25 | DPDK_REV="2e14846d15addd349a909176473e936f0cf79075" 26 | if [ ! -d "${DPDK_RESULT}" ]; then 27 | git clone git://dpdk.org/dpdk ${DPDK_RESULT} 28 | pushd ${DPDK_RESULT} 29 | git checkout $DPDK_REV 30 | popd 31 | fi 32 | fi 33 | 34 | cp "${CONFIG_FILE}${CONFIG_PFX}" "${DPDK_RESULT}/config/common_linuxapp" 35 | export RTE_TARGET=x86_64-native-linuxapp-gcc 36 | FLAGS="-g3 -Wno-error=maybe-uninitialized -fPIC" 37 | make config -C "${DPDK_RESULT}" T=x86_64-native-linuxapp-gcc \ 38 | EXTRA_CFLAGS="$FLAGS" 39 | PROCS="$(nproc)" 40 | make -j $PROCS -C "${DPDK_RESULT}" EXTRA_CFLAGS="$FLAGS" 41 | -------------------------------------------------------------------------------- /3rdparty/scripts/rustc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | if [ -e $BASE_DIR/rustc ]; then 5 | $BASE_DIR/rustc "$@" 6 | else 7 | #(>&2 echo "WARNING: Using system rustc") 8 | rustc "$@" 9 | fi 10 | -------------------------------------------------------------------------------- /3rdparty/scripts/rustdoc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | if [ -e $BASE_DIR/rustdoc ]; then 5 | $BASE_DIR/rustc "$@" 6 | else 7 | echo "WARNING: Using system rustdoc" 1&>2 8 | rustdoc "$@" 9 | fi 10 | -------------------------------------------------------------------------------- /3rdparty/translate.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from clang.cindex import * 3 | 4 | def FindStruct(node, name): 5 | 6 | if node.kind is CursorKind.STRUCT_DECL and node.spelling == name: 7 | return node 8 | for c in node.get_children(): 9 | u = FindStruct(c, name) 10 | if u: 11 | return u 12 | return None 13 | 14 | def PrintTypes(scursor): 15 | offset = 0 16 | for c in scursor.get_children(): 17 | if c.kind is not CursorKind.FIELD_DECL: 18 | print c.kind 19 | continue 20 | var = ''.join(x for x in c.spelling.title() if x is not '_') 21 | if var == "Cacheline1": 22 | assert(offset < 64) 23 | offset = 64 24 | if c.type.get_size() == 0: 25 | continue 26 | type = c.type.spelling if c.type.kind is not TypeKind.POINTER else "IntPtr" 27 | print offset, var, type, c.type.get_size() 28 | offset += c.type.get_size() 29 | 30 | if __name__ == "__main__": 31 | f = sys.argv[1] 32 | index = Index.create() 33 | tu = index.parse(sys.argv[1], ["-DRTE_NEXT_ABI"]) 34 | cursor = FindStruct(tu.cursor, "rte_mbuf") 35 | print cursor.location.line 36 | PrintTypes(cursor) 37 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members=["framework", 3 | "test/framework-test", 4 | "test/delay-test", 5 | "test/macswap", 6 | "test/shutdown-test", 7 | "test/chain-test", 8 | "test/lpm", 9 | "test/lpm-embedded", 10 | "test/nat", 11 | "test/maglev", 12 | "test/packet_generation", 13 | "test/packet_test", 14 | "test/tcp_check", 15 | "test/tcp_payload", 16 | "test/sctp-test", 17 | "test/config-test", 18 | "test/reset-parse", 19 | "test/tcp_reconstruction", 20 | "test/acl-fw", 21 | "test/embedded-scheduler-test", 22 | "test/embedded-scheduler-dependency-test"] 23 | [profile.release] 24 | opt-level = 3 25 | lto = true 26 | rpath = true 27 | debug = true 28 | debug-assertions = false 29 | 30 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | ISC License 2 | 3 | Copyright (c) 2016, Aurojit Panda (UC Berkeley NetSys Lab) 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | -------------------------------------------------------------------------------- /build-container/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:artful 2 | MAINTAINER "Aurojit Panda " 3 | RUN apt-get -yq update && apt-get -yq install apt-utils 4 | RUN apt-get -yq update && apt-get -yq install build-essential \ 5 | vim-nox curl \ 6 | pciutils sudo git \ 7 | python python3 gosu 8 | RUN apt-get -yq update && apt-get -yq install libssl-dev \ 9 | libgnutls30 libgnutls-openssl-dev \ 10 | libcurl4-gnutls-dev cmake bash libpcap-dev libnuma-dev \ 11 | tcpdump clang-5.0 12 | RUN mkdir -p ~/.ssh && ssh-keyscan -t rsa github.com > ~/.ssh/known_hosts 13 | RUN curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain nightly -y 14 | ENV PATH /root/.cargo/bin:$PATH 15 | RUN rustup component add rustfmt-preview --toolchain=nightly 16 | RUN rustup update 17 | # RUN cargo install rustfmt-nightly || true 18 | CMD [/bin/bash] 19 | -------------------------------------------------------------------------------- /container/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:testing 2 | MAINTAINER "Aurojit Panda " 3 | ARG dpdk_file="common_linuxapp-16.07.container" 4 | #COPY container/sources.list /etc/apt/sources.list 5 | RUN apt-get -yq update && apt-get -yq install apt-utils 6 | RUN apt-get -yq update && apt-get -yq install build-essential \ 7 | vim-nox curl \ 8 | pciutils sudo git \ 9 | python python3 10 | RUN apt-get -yq update && apt-get -yq install libssl-dev \ 11 | libgnutls30 libgnutls-openssl-dev \ 12 | libcurl4-gnutls-dev cmake bash libpcap-dev libnuma-dev \ 13 | clang-5.0 libclang-dev 14 | # Fix the date at which we take Rust 15 | RUN mkdir -p ~/.ssh && ssh-keyscan -t rsa github.com > ~/.ssh/known_hosts 16 | ENV RTE_SDK=/opt/netbricks/3rdparty/dpdk 17 | ENV RTE_TARGET=build 18 | ENV RTE_ARCH=x86_64 19 | ENV NETBRICKS_ROOT=/opt/netbricks 20 | ENV DPDK_CONFIG_FILE="/opt/netbricks/3rdparty/dpdk-confs/$dpdk_file" 21 | ENV LD_LIBRARY_PATH="/opt/netbricks/native:/opt/netbricks/3rdparty/dpdk/build/lib" 22 | ENV DELAY_TEST_ROOT="/opt/netbricks/test/delay-test/target/release" 23 | ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt 24 | RUN mkdir -p /opt/netbricks 25 | COPY Cargo.toml /opt/netbricks 26 | COPY 3rdparty /opt/netbricks/3rdparty 27 | COPY framework /opt/netbricks/framework 28 | COPY native /opt/netbricks/native 29 | COPY patches /opt/netbricks/patches 30 | COPY scripts /opt/netbricks/scripts 31 | COPY test /opt/netbricks/test 32 | COPY .gitignore /opt/netbricks/.gitignore 33 | COPY LICENSE.md /opt/netbricks/LICENSE.md 34 | COPY README.md /opt/netbricks/README.md 35 | COPY build.sh /opt/netbricks/build.sh 36 | COPY examples.sh /opt/netbricks/examples.sh 37 | COPY rustfmt.toml /opt/netbricks/rustfmt.toml 38 | RUN curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain nightly -y 39 | ENV PATH /root/.cargo/bin:$PATH 40 | RUN /opt/netbricks/build.sh 41 | CMD /bin/bash 42 | -------------------------------------------------------------------------------- /container/build-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker build -t e2d2/zcsi:0.5 -t e2d2/zcsi:latest --no-cache \ 3 | --cpuset-cpus="4-19" . 4 | -------------------------------------------------------------------------------- /container/run-container-bess.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Arguments 3 | # 1 start or stop 4 | # 2 name 5 | # 3 delay 6 | # 4 master lcore 7 | # 5 receive core 8 | # 6 interface 9 | 10 | cmd=$1 11 | 12 | case $cmd in 13 | start) 14 | if [ "$#" -ne 6 ]; then 15 | echo "Incorrect arguments $#" 16 | echo "$0 start name delay mcore rcore iface" 17 | exit 1 18 | fi 19 | name=$2 20 | delay=$3 21 | mcore=$4 22 | rcore=$5 23 | iface=$6 24 | iface_array=($iface) 25 | docker run -d --privileged --cidfile="${name}.cid" \ 26 | --name=${name} \ 27 | --cpuset-cpus="${mcore},${rcore}" \ 28 | -e DELAY=$delay \ 29 | -e MCORE=$mcore \ 30 | -e RCORE=$rcore \ 31 | -e IFACE= \ 32 | -v /sys/bus/pci/drivers:/sys/bus/pci/drivers \ 33 | -v /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages \ 34 | -v /mnt/huge/:/mnt/huge/ \ 35 | -v /dev:/dev \ 36 | -v /sys/devices/system/node:/sys/devices/system/node \ 37 | -v /var/run:/var/run \ 38 | -v /tmp/sn_vports:/tmp/sn_vports e2d2/zcsi:0.2 39 | ;; 40 | stop) 41 | if [ "$#" -ne 2 ]; then 42 | echo "Incorrect arguments" 43 | exit 1 44 | fi 45 | name=$2 46 | if [ ! -e "${name}.cid" ]; then 47 | echo "Could not find container ${name}" 48 | exit 1 49 | fi 50 | docker kill `cat "${name}.cid"` 51 | rm ${name}.cid 52 | docker rm ${name} 53 | ;; 54 | esac 55 | -------------------------------------------------------------------------------- /container/run-container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Arguments 3 | # 1 start or stop 4 | # 2 name 5 | # 3 delay 6 | # 4 master lcore 7 | # 5 receive core 8 | # 6 interface 9 | 10 | cmd=$1 11 | 12 | case $cmd in 13 | start) 14 | if [ "$#" -ne 6 ]; then 15 | echo "Incorrect arguments $#" 16 | echo "$0 start name delay mcore rcore iface" 17 | exit 1 18 | fi 19 | name=$2 20 | delay=$3 21 | mcore=$4 22 | rcore=$5 23 | iface=$6 24 | docker run -d --privileged --cidfile="${name}.cid" \ 25 | --name=${name} \ 26 | --cpuset-cpus="${mcore},${rcore}" \ 27 | -e DELAY=$delay \ 28 | -e MCORE=$mcore \ 29 | -e RCORE=$rcore \ 30 | -e IFACE="$iface" \ 31 | -v /sys/bus/pci/drivers:/sys/bus/pci/drivers \ 32 | -v /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages \ 33 | -v /mnt/huge/:/mnt/huge/ \ 34 | -v /dev:/dev \ 35 | -v /sys/devices/system/node:/sys/devices/system/node \ 36 | -v /var/run:/var/run \ 37 | -v /tmp/sn_vports:/tmp/sn_vports e2d2/zcsi:latest 38 | ;; 39 | stop) 40 | if [ "$#" -ne 2 ]; then 41 | echo "Incorrect arguments" 42 | exit 1 43 | fi 44 | name=$2 45 | if [ ! -e "${name}.cid" ]; then 46 | echo "Could not find container ${name}" 47 | exit 1 48 | fi 49 | docker kill `cat "${name}.cid"` 50 | rm ${name}.cid 51 | docker rm ${name} 52 | ;; 53 | esac 54 | -------------------------------------------------------------------------------- /container/run-script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os 3 | import subprocess 4 | delay = os.environ['DELAY'] 5 | ifaces = os.environ['IFACE'] 6 | mcore = os.environ['MCORE'] 7 | rcore = os.environ['RCORE'] 8 | loc = os.environ['DELAY_TEST_ROOT'] 9 | cmd = ['%s/zcsi-delay'%loc, '-m', mcore, '--secondary', '-n', 'rte', '-d', \ 10 | delay] 11 | for iface in ifaces.strip().split(): 12 | cmd.append('-c') 13 | cmd.append(rcore) 14 | cmd.append('-v') 15 | cmd.append(iface) 16 | print "Going to run ", ' '.join(cmd) 17 | subprocess.check_call(cmd) 18 | #echo "Using intefaces" ${IFACE[@]} 19 | #echo "Master core" $MCORE 20 | #echo "Receiving core" $RCORE 21 | -------------------------------------------------------------------------------- /container/run-script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ZCSI_ROOT=/opt/e2d2 3 | echo "Delaying for " $DELAY 4 | echo "Using intefaces" ${IFACE[@]} 5 | echo "Master core" $MCORE 6 | echo "Receiving core" $RCORE 7 | IF=( "${IFACE[@]/#/-v }" ) 8 | CORES=( ) 9 | for i in "${!IFACE[@]}"; do 10 | CORES[$i]="-c $RCORE" 11 | done 12 | $DELAY_TEST_ROOT/zcsi-delay -m $MCORE ${IF[@]} ${CORES[@]} --secondary -n rte -d $DELAY 13 | -------------------------------------------------------------------------------- /container/sources.list: -------------------------------------------------------------------------------- 1 | #file generated by puppet 2 | # os 3 | deb http://ftp.us.debian.org/debian testing main contrib non-free 4 | deb-src http://ftp.us.debian.org/debian testing main contrib non-free 5 | -------------------------------------------------------------------------------- /container/test.cid: -------------------------------------------------------------------------------- 1 | 4d2d3d0ca1e6b028765990e1b42e8f046ad9a1d80aff3b62c58a2beebbef9930 -------------------------------------------------------------------------------- /debugging/rdtscp-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rdtscp-test" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | [profile.release] 8 | opt-level = 3 9 | rpath = true 10 | debug = true 11 | debug-assertions = false 12 | -------------------------------------------------------------------------------- /debugging/rdtscp-test/core: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NetSys/NetBricks/71dfb94beaeac107d7cd359985f9bd66fd223e1b/debugging/rdtscp-test/core -------------------------------------------------------------------------------- /debugging/rdtscp-test/rdtscp_test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NetSys/NetBricks/71dfb94beaeac107d7cd359985f9bd66fd223e1b/debugging/rdtscp-test/rdtscp_test -------------------------------------------------------------------------------- /debugging/rdtscp-test/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(asm)] 2 | #[inline] 3 | fn rdtscp_unsafe() -> u64 { 4 | let high: u32; 5 | let low: u32; 6 | let aux: u32; 7 | unsafe { 8 | asm!("rdtscp" 9 | : "={eax}" (low), "={edx}" (high) 10 | : 11 | : "ecx" 12 | : "volatile"); 13 | let ret = ((high as u64) << 32) | (low as u64); 14 | ret 15 | } 16 | } 17 | 18 | #[inline] 19 | fn rdtsc_unsafe() -> u64 { 20 | unsafe { 21 | let low: u32; 22 | let high: u32; 23 | asm!("rdtsc" 24 | : "={eax}" (low), "={edx}" (high) 25 | : 26 | : "rdx rax" 27 | : "volatile"); 28 | ((high as u64) << 32) | (low as u64) 29 | } 30 | } 31 | 32 | #[inline] 33 | fn cpuid() { 34 | unsafe { 35 | asm!("movl $$0x2, %eax":::"eax":"volatile"); 36 | asm!("movl $$0x0, %ecx":::"ecx":"volatile"); 37 | asm!("cpuid" 38 | : 39 | : 40 | : "rax", "rbx", "rcx", "rdx" 41 | : "volatile"); 42 | } 43 | } 44 | 45 | fn main() { 46 | let mut a = 0; 47 | let mut b = 0; 48 | let mut c = 0; 49 | let mut d = 0; 50 | let mut e = 0; 51 | let mut e = 0; 52 | let mut f = 0; 53 | let mut g = 0; 54 | let mut h = 0; 55 | let mut i = 0; 56 | loop { 57 | a = rdtscp_unsafe(); 58 | while b < a + 750 { 59 | b = rdtscp_unsafe(); 60 | } 61 | while c < b + (b - a) { 62 | c = rdtscp_unsafe(); 63 | } 64 | d += 1; 65 | e += b * 2; 66 | cpuid(); 67 | g = rdtscp_unsafe(); 68 | cpuid(); 69 | h = g + 2; 70 | i = a + g; 71 | println!("a {} b {} c {} d {} e {} f {} g {} h {} i {}", a, b, c, d, e, f, g, h, i); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Lists all the examples in Bess. This is used by the build script. 3 | export examples=( 4 | test/framework-test 5 | test/delay-test 6 | test/shutdown-test 7 | test/chain-test 8 | test/lpm 9 | test/lpm-embedded 10 | test/nat 11 | test/maglev 12 | test/tcp_check 13 | test/sctp-test 14 | test/config-test 15 | test/reset-parse 16 | test/tcp_reconstruction 17 | test/acl-fw 18 | test/packet_generation 19 | test/packet_test 20 | test/embedded-scheduler-test 21 | test/embedded-scheduler-dependency-test 22 | test/tcp_payload 23 | test/macswap 24 | ) 25 | 26 | -------------------------------------------------------------------------------- /framework/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /framework/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "e2d2" 3 | version = "0.2.0" 4 | authors = ["Aurojit Panda "] 5 | build = "build.rs" 6 | 7 | [lib] 8 | doctest = false 9 | 10 | [dependencies] 11 | libc = ">= 0.2.4" 12 | time = ">=0.1.0" 13 | getopts = "*" 14 | byteorder = "*" 15 | clippy = { version = "*", optional = true } 16 | fnv = "*" 17 | twox-hash = "*" 18 | regex = "*" 19 | lazy_static = "*" 20 | net2 = "*" 21 | # NIX restricts us to just unix for now, we can fix this if someone cares at a later point. 22 | nix = "*" 23 | # Figure out if we want this permanently or just for now. 24 | rust-sctp = { git="https://github.com/netsys/rust-sctp", optional = true} 25 | toml = "*" 26 | # Hack for SHM 27 | uuid= { version = "*", features=["v4"] } 28 | error-chain = "*" 29 | tokio-core=">=0.1.8" 30 | futures=">=0.1.14" 31 | 32 | [features] 33 | default = [] 34 | performance = [] 35 | dev = ["clippy"] 36 | packet_offset = [] 37 | sctp = ["rust-sctp"] 38 | 39 | [build-dependencies] 40 | # Use Bindgen to generate DPDK structures. 41 | bindgen = ">=0.30.0" 42 | -------------------------------------------------------------------------------- /framework/build.rs: -------------------------------------------------------------------------------- 1 | extern crate bindgen; 2 | 3 | use std::env; 4 | use std::fs::File; 5 | use std::io::prelude::*; 6 | use std::path::Path; 7 | 8 | #[allow(dead_code)] 9 | fn parse_ld_archive(ar: &Path) -> Vec { 10 | let mut f = File::open(ar).unwrap(); 11 | let mut content = String::new(); 12 | f.read_to_string(&mut content).unwrap(); 13 | if "GROUP" == &content[0..5] { 14 | println!("Found group"); 15 | let open_idx = content.find("(").unwrap_or_else(|| content.len()); 16 | let remove_open = content[open_idx + 1..].trim(); 17 | let end_idx = remove_open.find(")").unwrap_or_else(|| remove_open.len()); 18 | let remaining = remove_open[..end_idx].trim(); 19 | println!("Remaining is {}", remaining); 20 | remaining 21 | .split_whitespace() 22 | .map(|s| { 23 | let end = s.len() - 2; 24 | String::from(&s[3..end]) 25 | }) 26 | .collect() 27 | } else { 28 | panic!("Could not find a group"); 29 | } 30 | } 31 | 32 | #[allow(dead_code)] 33 | fn write_external_link(libs: &Vec) { 34 | let out_dir = env::var("OUT_DIR").unwrap(); 35 | let dest = Path::new(&out_dir).join("linkage.rs"); 36 | let mut f = File::create(&dest).unwrap(); 37 | for l in libs { 38 | let link_str = format!("#[link(name=\"{}\", kind=\"static\")]", l); 39 | let overall = link_str + "\nextern \"C\" {}\n"; 40 | f.write_all(&overall.into_bytes()).unwrap(); 41 | } 42 | } 43 | 44 | /// Cargo runs main in this file to get some additional settings (e.g., LD_LIBRARY_PATH). It reads the printed output 45 | /// looking for certain variables, see [here](http://doc.crates.io/build-script.html) for documentation. 46 | fn main() { 47 | // Get the directory where we are building. 48 | let cargo_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); 49 | let dpdk_build = Path::new(&cargo_dir) 50 | .parent() 51 | .unwrap() 52 | .join("3rdparty") 53 | .join("dpdk") 54 | .join("build"); 55 | 56 | let dpdk_libs = dpdk_build.clone().join("lib"); 57 | let native_path = Path::new(&cargo_dir) 58 | .parent() 59 | .unwrap() 60 | .join("target") 61 | .join("native"); 62 | //println!("DPDK {:?}", dpdk_libs.to_str()); 63 | // Use DPDK directory as -L 64 | println!( 65 | "cargo:rustc-link-search=native={}", 66 | dpdk_libs.to_str().unwrap() 67 | ); 68 | if dpdk_libs.join("libdpdk.so").exists() { 69 | println!("cargo:rustc-link-lib=dpdk"); 70 | } 71 | println!( 72 | "cargo:rustc-link-search=native={}", 73 | native_path.to_str().unwrap() 74 | ); 75 | let header_path = Path::new(&cargo_dir) 76 | .join("src") 77 | .join("native_include") 78 | .join("dpdk-headers.h"); 79 | let dpdk_include_path = dpdk_build.clone().join("include"); 80 | println!("Header path {:?}", header_path.to_str()); 81 | let bindings = bindgen::Builder::default() 82 | .header(header_path.to_str().unwrap()) 83 | .rust_target(bindgen::RustTarget::Nightly) 84 | .clang_args(vec!["-I", dpdk_include_path.to_str().unwrap()].iter()) 85 | .blacklist_type("max_align_t") // https://github.com/servo/rust-bindgen/issues/550 86 | .generate() 87 | .expect("Unable to generate DPDK bindings"); 88 | let out_dir = env::var("OUT_DIR").unwrap(); 89 | let dpdk_bindings = Path::new(&out_dir).join("dpdk_bindings.rs"); 90 | bindings 91 | .write_to_file(dpdk_bindings) 92 | .expect("Could not write bindings"); 93 | } 94 | -------------------------------------------------------------------------------- /framework/src/allocators/cache_aligned.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::{self, Alloc, Global, Layout}; 2 | use std::fmt; 3 | use std::mem::size_of; 4 | use std::ops::{Deref, DerefMut}; 5 | use std::ptr::{self, Unique, NonNull}; 6 | 7 | const CACHE_LINE_SIZE: usize = 64; 8 | unsafe fn allocate_cache_line(size: usize) -> *mut u8 { 9 | alloc::Global.alloc_zeroed(Layout::from_size_align(size, CACHE_LINE_SIZE).unwrap()) 10 | .unwrap().as_ptr() as *mut u8 11 | } 12 | 13 | pub struct CacheAligned { 14 | ptr: Unique, 15 | } 16 | 17 | impl Drop for CacheAligned { 18 | fn drop(&mut self) { 19 | unsafe { 20 | alloc::Global.dealloc( 21 | NonNull::::new_unchecked(self.ptr.as_ptr() as *mut u8), 22 | Layout::from_size_align(size_of::(), CACHE_LINE_SIZE).unwrap(), 23 | ); 24 | } 25 | } 26 | } 27 | 28 | impl Deref for CacheAligned { 29 | type Target = T; 30 | fn deref(&self) -> &T { 31 | unsafe { self.ptr.as_ref() } 32 | } 33 | } 34 | 35 | impl DerefMut for CacheAligned { 36 | fn deref_mut(&mut self) -> &mut T { 37 | unsafe { self.ptr.as_mut() } 38 | } 39 | } 40 | 41 | impl CacheAligned { 42 | pub fn allocate(src: T) -> CacheAligned { 43 | unsafe { 44 | let alloc = allocate_cache_line(size_of::()) as *mut T; 45 | ptr::write(alloc, src); 46 | CacheAligned { 47 | ptr: Unique::new(alloc).unwrap(), 48 | } 49 | } 50 | } 51 | } 52 | 53 | impl Clone for CacheAligned 54 | where 55 | T: Clone, 56 | { 57 | fn clone(&self) -> CacheAligned { 58 | unsafe { 59 | let alloc = allocate_cache_line(size_of::()) as *mut T; 60 | ptr::copy(self.ptr.as_ptr() as *const T, alloc, 1); 61 | CacheAligned { 62 | ptr: Unique::new(alloc).unwrap(), 63 | } 64 | } 65 | } 66 | } 67 | 68 | impl fmt::Display for CacheAligned 69 | where 70 | T: fmt::Display, 71 | { 72 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 73 | T::fmt(&*self, f) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /framework/src/allocators/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::cache_aligned::*; 2 | mod cache_aligned; 3 | -------------------------------------------------------------------------------- /framework/src/common/errors.rs: -------------------------------------------------------------------------------- 1 | error_chain! { 2 | errors { 3 | FailedAllocation { 4 | description("Failed to allocate memory") 5 | display("Failed to allocate memory") 6 | } 7 | FailedDeallocation { 8 | description("Failed to deallocate memory") 9 | display("Failed to deallocate memory") 10 | } 11 | FailedToInitializePort(port: i32) { 12 | description("Failed to initialize port") 13 | display("Failed to initialize port: {}", port) 14 | } 15 | BadQueue { 16 | description("Invalid queue request") 17 | display("Invalid queue request") 18 | } 19 | CannotSend { 20 | description("Cannot send data out port") 21 | display("Cannot send data out port") 22 | } 23 | BadDev(dev: String) { 24 | description("Cannot find device") 25 | display("Cannot find device: {}", dev) 26 | } 27 | BadVdev(vdev: String) { 28 | description("Bad vdev specification") 29 | display("Bad vdev specification: {}", vdev) 30 | } 31 | BadTxQueue(port: i32, queue: i32) { 32 | description("Bad TX queue") 33 | display("Bad TX queue {} for port {}", queue, port) 34 | } 35 | BadRxQueue(port: i32, queue: i32) { 36 | description("Bad RX queue") 37 | display("Bad RX queue {} for port {}", queue, port) 38 | } 39 | BadOffset(offset: usize) { 40 | description("Attempt to access bad packet offset") 41 | display("Attempt to access bad packet offset {}", offset) 42 | } 43 | 44 | MetadataTooLarge { 45 | description("Metadata is too large") 46 | display("Metadata is too large") 47 | } 48 | 49 | RingAllocationFailure { 50 | description("Could not allocate ring") 51 | display("Could not allocate ring") 52 | } 53 | 54 | InvalidRingSize(size: usize) { 55 | description("Bad ring size, must be power of 2") 56 | display("Bad ring size {}, must be a power of 2", size) 57 | } 58 | 59 | RingDuplicationFailure { 60 | description("Address of second copy of ring does not match expected address") 61 | display("Address of second copy of ring does not match expected address") 62 | } 63 | 64 | ConfigurationError(description: String) { 65 | description("Configuration error") 66 | display("Configuration error: {}", description) 67 | } 68 | 69 | NoRunningSchedulerOnCore(core: i32) { 70 | description("No scheduler running on core") 71 | display("No scheduler running on core {}", core) 72 | } 73 | } 74 | 75 | foreign_links { 76 | Io(::std::io::Error); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /framework/src/common/mod.rs: -------------------------------------------------------------------------------- 1 | mod errors; 2 | pub use self::errors::*; 3 | 4 | /// Null metadata associated with packets initially. 5 | pub struct EmptyMetadata; 6 | 7 | pub fn print_error(e: &Error) { 8 | println!("Error: {}", e); 9 | for e in e.iter().skip(1) { 10 | println!("Cause: {}", e); 11 | } 12 | if let Some(backtrace) = e.backtrace() { 13 | println!("Backtrace: {:?}", backtrace); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /framework/src/control/linux/epoll.rs: -------------------------------------------------------------------------------- 1 | use super::{Available, HUP, NONE, READ, WRITE}; 2 | use nix::sys::epoll::*; 3 | use std::default::Default; 4 | use std::os::unix::io::AsRawFd; 5 | use std::os::unix::io::RawFd; 6 | use std::slice; 7 | 8 | pub type Token = u64; 9 | 10 | pub struct PollHandle { 11 | epoll_fd: RawFd, 12 | } 13 | 14 | impl PollHandle { 15 | pub fn schedule_read(&self, file: &Fd, token: Token) { 16 | self.schedule_read_rawfd(file.as_raw_fd(), token); 17 | } 18 | 19 | pub fn schedule_read_rawfd(&self, fd: RawFd, token: Token) { 20 | let mut event = EpollEvent::new( 21 | EpollFlags::EPOLLIN | EpollFlags::EPOLLET | EpollFlags::EPOLLONESHOT, 22 | token, 23 | ); 24 | epoll_ctl(self.epoll_fd, EpollOp::EpollCtlMod, fd, &mut event).unwrap(); 25 | } 26 | 27 | pub fn schedule_write(&self, file: &Fd, token: Token) { 28 | self.schedule_write_rawfd(file.as_raw_fd(), token); 29 | } 30 | 31 | pub fn schedule_write_rawfd(&self, fd: RawFd, token: Token) { 32 | let mut event = EpollEvent::new( 33 | EpollFlags::EPOLLOUT | EpollFlags::EPOLLET | EpollFlags::EPOLLONESHOT, 34 | token, 35 | ); 36 | epoll_ctl(self.epoll_fd, EpollOp::EpollCtlMod, fd, &mut event).unwrap(); 37 | } 38 | 39 | /// This assumes file is already set to be non-blocking. This must also be called only the first time round. 40 | pub fn new_io_port(&self, file: &Fd, token: Token) { 41 | self.new_io_fd(file.as_raw_fd(), token); 42 | } 43 | 44 | pub fn new_io_fd(&self, fd: RawFd, token: Token) { 45 | let mut event = EpollEvent::new(EpollFlags::EPOLLET | EpollFlags::EPOLLONESHOT, token); 46 | epoll_ctl(self.epoll_fd, EpollOp::EpollCtlAdd, fd, &mut event).unwrap(); 47 | } 48 | } 49 | 50 | pub struct PollScheduler { 51 | epoll_fd: RawFd, 52 | ready_tokens: Vec, 53 | events: usize, 54 | } 55 | 56 | impl Default for PollScheduler { 57 | fn default() -> PollScheduler { 58 | PollScheduler::new() 59 | } 60 | } 61 | 62 | impl PollScheduler { 63 | pub fn new_poll_handle(&self) -> PollHandle { 64 | PollHandle { 65 | epoll_fd: self.epoll_fd, 66 | } 67 | } 68 | 69 | pub fn new() -> PollScheduler { 70 | PollScheduler { 71 | epoll_fd: epoll_create().unwrap(), 72 | ready_tokens: Vec::with_capacity(32), 73 | events: 0, 74 | } 75 | } 76 | 77 | #[inline] 78 | fn epoll_kind_to_available(&self, kind: &EpollFlags) -> Available { 79 | let mut available = NONE; 80 | if kind.contains(EpollFlags::EPOLLIN) { 81 | available |= READ 82 | }; 83 | if kind.contains(EpollFlags::EPOLLOUT) { 84 | available |= WRITE 85 | }; 86 | if kind.contains(EpollFlags::EPOLLHUP) || kind.contains(EpollFlags::EPOLLERR) { 87 | available |= HUP 88 | }; 89 | available 90 | } 91 | 92 | pub fn get_token_noblock(&mut self) -> Option<(Token, Available)> { 93 | if self.events > 0 { 94 | self.events -= 1; 95 | self.ready_tokens.pop() 96 | } else { 97 | let dest = 98 | unsafe { slice::from_raw_parts_mut(self.ready_tokens.as_mut_ptr(), self.ready_tokens.capacity()) }; 99 | self.events = epoll_wait(self.epoll_fd, dest, 0).unwrap(); 100 | unsafe { self.ready_tokens.set_len(self.events) }; 101 | self.ready_tokens.pop() 102 | }.map(|t| (t.data(), self.epoll_kind_to_available(&t.events()))) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /framework/src/control/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_os = "linux")] 2 | pub use self::epoll::*; 3 | 4 | #[cfg(target_os = "linux")] 5 | #[path = "linux/epoll.rs"] 6 | mod epoll; 7 | pub mod tcp; 8 | #[cfg(feature = "sctp")] 9 | pub mod sctp; 10 | 11 | use std::os::unix::io::RawFd; 12 | 13 | pub type Available = u64; 14 | 15 | pub const NONE: u64 = 0x0; 16 | pub const READ: u64 = 0x1; 17 | pub const WRITE: u64 = 0x2; 18 | pub const HUP: u64 = 0x4; 19 | 20 | pub struct IOScheduler { 21 | fd: RawFd, 22 | scheduler: PollHandle, 23 | token: Token, 24 | } 25 | 26 | impl IOScheduler { 27 | pub fn new(scheduler: PollHandle, fd: RawFd, token: Token) -> IOScheduler { 28 | scheduler.new_io_fd(fd, token); 29 | IOScheduler { 30 | fd: fd, 31 | scheduler: scheduler, 32 | token: token, 33 | } 34 | } 35 | 36 | pub fn schedule_read(&self) { 37 | self.scheduler.schedule_read_rawfd(self.fd, self.token); 38 | } 39 | 40 | pub fn schedule_write(&self) { 41 | self.scheduler.schedule_write_rawfd(self.fd, self.token); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /framework/src/headers/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::ip::*; 2 | pub use self::mac::*; 3 | pub use self::null_header::*; 4 | pub use self::tcp::*; 5 | pub use self::udp::*; 6 | mod mac; 7 | mod ip; 8 | mod udp; 9 | mod tcp; 10 | mod null_header; 11 | 12 | /// A trait implemented by all headers, used for reading them from a mbuf. 13 | pub trait EndOffset: Send { 14 | type PreviousHeader: EndOffset; 15 | 16 | /// Offset returns the number of bytes to skip to get to the next header, relative to the start 17 | /// of the mbuf. 18 | fn offset(&self) -> usize; 19 | 20 | /// Returns the size of this header in bytes. 21 | fn size() -> usize; 22 | 23 | /// Returns the size of the payload in bytes. The hint is necessary for things like the L2 header which have no 24 | /// explicit length field. 25 | fn payload_size(&self, hint: usize) -> usize; 26 | 27 | fn check_correct(&self, prev: &Self::PreviousHeader) -> bool; 28 | } 29 | -------------------------------------------------------------------------------- /framework/src/headers/null_header.rs: -------------------------------------------------------------------------------- 1 | use super::EndOffset; 2 | use std::fmt; 3 | 4 | #[derive(Default)] 5 | #[repr(C, packed)] 6 | pub struct NullHeader; 7 | 8 | impl fmt::Display for NullHeader { 9 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 10 | write!(f, "null") 11 | } 12 | } 13 | 14 | impl EndOffset for NullHeader { 15 | type PreviousHeader = NullHeader; 16 | 17 | #[inline] 18 | fn offset(&self) -> usize { 19 | 0 20 | } 21 | #[inline] 22 | fn size() -> usize { 23 | 0 24 | } 25 | #[inline] 26 | fn payload_size(&self, hint: usize) -> usize { 27 | hint 28 | } 29 | 30 | #[inline] 31 | fn check_correct(&self, _: &NullHeader) -> bool { 32 | true 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /framework/src/headers/udp.rs: -------------------------------------------------------------------------------- 1 | use super::EndOffset; 2 | use headers::IpHeader; 3 | use std::default::Default; 4 | use std::fmt; 5 | 6 | /// UDP header using SSE 7 | #[derive(Default)] 8 | #[repr(C, packed)] 9 | pub struct UdpHeader { 10 | src_port: u16, 11 | dst_port: u16, 12 | len: u16, 13 | csum: u16, 14 | } 15 | 16 | impl fmt::Display for UdpHeader { 17 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 18 | write!( 19 | f, 20 | "src_port: {} dst_port: {} len: {} checksum: {}", 21 | self.src_port(), 22 | self.dst_port(), 23 | self.length(), 24 | self.checksum() 25 | ) 26 | } 27 | } 28 | 29 | impl EndOffset for UdpHeader { 30 | type PreviousHeader = IpHeader; 31 | #[inline] 32 | fn offset(&self) -> usize { 33 | 8 // 8 bytes 34 | } 35 | 36 | #[inline] 37 | fn size() -> usize { 38 | 8 39 | } 40 | 41 | #[inline] 42 | fn payload_size(&self, _: usize) -> usize { 43 | self.length() as usize - self.offset() 44 | } 45 | 46 | #[inline] 47 | fn check_correct(&self, _prev: &IpHeader) -> bool { 48 | true 49 | } 50 | } 51 | 52 | impl UdpHeader { 53 | #[inline] 54 | pub fn new() -> UdpHeader { 55 | Default::default() 56 | } 57 | 58 | #[inline] 59 | pub fn src_port(&self) -> u16 { 60 | u16::from_be(self.src_port) 61 | } 62 | 63 | #[inline] 64 | pub fn dst_port(&self) -> u16 { 65 | u16::from_be(self.dst_port) 66 | } 67 | 68 | #[inline] 69 | pub fn set_src_port(&mut self, port: u16) { 70 | self.src_port = u16::to_be(port); 71 | } 72 | 73 | #[inline] 74 | pub fn set_dst_port(&mut self, port: u16) { 75 | self.dst_port = u16::to_be(port); 76 | } 77 | 78 | #[inline] 79 | pub fn length(&self) -> u16 { 80 | u16::from_be(self.len) 81 | } 82 | 83 | #[inline] 84 | pub fn set_length(&mut self, len: u16) { 85 | self.len = u16::to_be(len) 86 | } 87 | 88 | #[inline] 89 | pub fn checksum(&self) -> u16 { 90 | u16::from_be(self.csum) 91 | } 92 | 93 | #[inline] 94 | pub fn set_checksum(&mut self, csum: u16) { 95 | self.csum = u16::to_be(csum); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /framework/src/interface/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::packet::*; 2 | pub use self::port::*; 3 | pub mod dpdk; 4 | mod port; 5 | mod packet; 6 | use common::*; 7 | use native::zcsi::MBuf; 8 | 9 | /// Generic trait for objects that can receive packets. 10 | pub trait PacketRx: Send { 11 | fn recv(&self, pkts: &mut [*mut MBuf]) -> Result; 12 | } 13 | 14 | /// Generic trait for objects that can send packets. 15 | pub trait PacketTx: Send { 16 | fn send(&self, pkts: &mut [*mut MBuf]) -> Result; 17 | } 18 | 19 | pub trait PacketRxTx: PacketRx + PacketTx {} 20 | -------------------------------------------------------------------------------- /framework/src/interface/port/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::phy_port::*; 2 | pub use self::virt_port::*; 3 | use allocators::*; 4 | use common::*; 5 | use interface::{PacketRx, PacketTx}; 6 | use native::zcsi::MBuf; 7 | use std::sync::atomic::AtomicUsize; 8 | mod phy_port; 9 | mod virt_port; 10 | 11 | /// Statistics for PMD port. 12 | struct PortStats { 13 | pub stats: AtomicUsize, 14 | } 15 | 16 | impl PortStats { 17 | pub fn new() -> CacheAligned { 18 | CacheAligned::allocate(PortStats { 19 | stats: AtomicUsize::new(0), 20 | }) 21 | } 22 | } 23 | 24 | impl PacketRx for CacheAligned { 25 | #[inline] 26 | fn recv(&self, pkts: &mut [*mut MBuf]) -> Result { 27 | T::recv(&*self, pkts) 28 | } 29 | } 30 | 31 | impl PacketTx for CacheAligned { 32 | #[inline] 33 | fn send(&self, pkts: &mut [*mut MBuf]) -> Result { 34 | T::send(&*self, pkts) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /framework/src/interface/port/virt_port.rs: -------------------------------------------------------------------------------- 1 | use super::PortStats; 2 | use super::super::{PacketRx, PacketTx}; 3 | use allocators::*; 4 | use common::*; 5 | use native::zcsi::*; 6 | use std::fmt; 7 | use std::sync::Arc; 8 | use std::sync::atomic::Ordering; 9 | 10 | pub struct VirtualPort { 11 | stats_rx: Arc>, 12 | stats_tx: Arc>, 13 | } 14 | 15 | #[derive(Clone)] 16 | pub struct VirtualQueue { 17 | stats_rx: Arc>, 18 | stats_tx: Arc>, 19 | } 20 | 21 | impl fmt::Display for VirtualQueue { 22 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 23 | write!(f, "virtual queue") 24 | } 25 | } 26 | 27 | impl PacketTx for VirtualQueue { 28 | #[inline] 29 | fn send(&self, pkts: &mut [*mut MBuf]) -> Result { 30 | let len = pkts.len() as i32; 31 | let update = self.stats_tx.stats.load(Ordering::Relaxed) + len as usize; 32 | self.stats_tx.stats.store(update, Ordering::Relaxed); 33 | unsafe { 34 | mbuf_free_bulk(pkts.as_mut_ptr(), len); 35 | } 36 | Ok(len as u32) 37 | } 38 | } 39 | 40 | impl PacketRx for VirtualQueue { 41 | /// Send a batch of packets out this PortQueue. Note this method is internal to NetBricks (should not be directly 42 | /// called). 43 | #[inline] 44 | fn recv(&self, pkts: &mut [*mut MBuf]) -> Result { 45 | let len = pkts.len() as i32; 46 | let status = unsafe { mbuf_alloc_bulk(pkts.as_mut_ptr(), 60, len) }; 47 | let alloced = if status == 0 { len } else { 0 }; 48 | let update = self.stats_rx.stats.load(Ordering::Relaxed) + alloced as usize; 49 | self.stats_rx.stats.store(update, Ordering::Relaxed); 50 | Ok(alloced as u32) 51 | } 52 | } 53 | 54 | impl VirtualPort { 55 | pub fn new(_queues: i32) -> Result> { 56 | Ok(Arc::new(VirtualPort { 57 | stats_rx: Arc::new(PortStats::new()), 58 | stats_tx: Arc::new(PortStats::new()), 59 | })) 60 | } 61 | 62 | pub fn new_virtual_queue(&self, _queue: i32) -> Result> { 63 | Ok(CacheAligned::allocate(VirtualQueue { 64 | stats_rx: self.stats_rx.clone(), 65 | stats_tx: self.stats_tx.clone(), 66 | })) 67 | } 68 | 69 | /// Get stats for an RX/TX queue pair. 70 | pub fn stats(&self) -> (usize, usize) { 71 | ( 72 | self.stats_rx.stats.load(Ordering::Relaxed), 73 | self.stats_tx.stats.load(Ordering::Relaxed), 74 | ) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /framework/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![recursion_limit = "1024"] 2 | #![feature(asm)] 3 | #![feature(log_syntax)] 4 | #![feature(box_syntax)] 5 | #![feature(specialization)] 6 | #![feature(slice_concat_ext)] 7 | #![feature(fnbox)] 8 | #![feature(alloc)] 9 | #![feature(heap_api)] 10 | #![feature(unique)] 11 | #![feature(const_fn)] 12 | // FIXME: Figure out if this is really the right thing here. 13 | #![feature(ptr_internals)] 14 | // Used for cache alignment. 15 | #![feature(allocator_api)] 16 | #![allow(unused_features)] 17 | #![feature(integer_atomics)] 18 | #![allow(unused_doc_comments)] 19 | #![cfg_attr(feature = "dev", allow(unstable_features))] 20 | // Need this since PMD port construction triggers too many arguments. 21 | #![cfg_attr(feature = "dev", allow(too_many_arguments))] 22 | #![cfg_attr(feature = "dev", feature(plugin))] 23 | #![cfg_attr(feature = "dev", plugin(clippy))] 24 | #![cfg_attr(feature = "dev", deny(warnings))] 25 | extern crate byteorder; 26 | extern crate fnv; 27 | #[macro_use] 28 | extern crate lazy_static; 29 | extern crate libc; 30 | extern crate net2; 31 | extern crate regex; 32 | #[cfg(feature = "sctp")] 33 | extern crate sctp; 34 | extern crate twox_hash; 35 | // TOML for scheduling configuration 36 | extern crate toml; 37 | // UUID for SHM naming 38 | extern crate uuid; 39 | 40 | // For cache aware allocation 41 | extern crate alloc; 42 | 43 | // Better error handling. 44 | #[macro_use] 45 | extern crate error_chain; 46 | 47 | #[cfg(unix)] 48 | extern crate nix; 49 | #[allow(dead_code)] 50 | mod native; 51 | mod native_include; 52 | pub mod allocators; 53 | pub mod headers; 54 | pub mod scheduler; 55 | pub mod utils; 56 | pub mod queues; 57 | pub mod state; 58 | pub mod operators; 59 | pub mod interface; 60 | pub mod common; 61 | pub mod control; 62 | pub mod shared_state; 63 | pub mod config; 64 | -------------------------------------------------------------------------------- /framework/src/native/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod libnuma; 2 | pub(crate) mod zcsi; 3 | -------------------------------------------------------------------------------- /framework/src/native/zcsi/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg_attr(feature = "dev", allow(module_inception))] 2 | mod zcsi; 3 | mod mbuf; 4 | pub use self::mbuf::*; 5 | pub use self::zcsi::*; 6 | -------------------------------------------------------------------------------- /framework/src/native/zcsi/zcsi.rs: -------------------------------------------------------------------------------- 1 | use super::MBuf; 2 | use headers::MacAddress; 3 | use std::os::raw::c_char; 4 | #[link(name = "zcsi")] 5 | extern "C" { 6 | pub fn init_system_whitelisted( 7 | name: *const c_char, 8 | nlen: i32, 9 | core: i32, 10 | whitelist: *mut *const c_char, 11 | wlcount: i32, 12 | pool_size: u32, 13 | cache_size: u32, 14 | slots: u16, 15 | ) -> i32; 16 | pub fn init_thread(tid: i32, core: i32) -> i32; 17 | pub fn init_secondary(name: *const c_char, nlen: i32, core: i32, vdevs: *mut *const c_char, vdev_count: i32) 18 | -> i32; 19 | pub fn init_pmd_port( 20 | port: i32, 21 | rxqs: i32, 22 | txqs: i32, 23 | rx_cores: *const i32, 24 | tx_cores: *const i32, 25 | nrxd: i32, 26 | ntxd: i32, 27 | loopback: i32, 28 | tso: i32, 29 | csumoffload: i32, 30 | ) -> i32; 31 | pub fn free_pmd_port(port: i32) -> i32; 32 | pub fn recv_pkts(port: i32, qid: i32, pkts: *mut *mut MBuf, len: i32) -> i32; 33 | pub fn send_pkts(port: i32, qid: i32, pkts: *mut *mut MBuf, len: i32) -> i32; 34 | pub fn num_pmd_ports() -> i32; 35 | pub fn rte_eth_macaddr_get(port: i32, address: *mut MacAddress); 36 | pub fn init_bess_eth_ring(ifname: *const c_char, core: i32) -> i32; 37 | pub fn init_ovs_eth_ring(iface: i32, core: i32) -> i32; 38 | pub fn find_port_with_pci_address(pciaddr: *const c_char) -> i32; 39 | pub fn attach_pmd_device(dev: *const c_char) -> i32; 40 | // FIXME: Generic PMD info 41 | pub fn max_rxqs(port: i32) -> i32; 42 | pub fn max_txqs(port: i32) -> i32; 43 | pub fn mbuf_alloc() -> *mut MBuf; 44 | pub fn mbuf_free(buf: *mut MBuf); 45 | pub fn mbuf_alloc_bulk(array: *mut *mut MBuf, len: u16, cnt: i32) -> i32; 46 | pub fn mbuf_free_bulk(array: *mut *mut MBuf, cnt: i32) -> i32; 47 | pub fn crc_hash_native(to_hash: *const u8, size: u32, iv: u32) -> u32; 48 | pub fn ipv4_cksum(payload: *const u8) -> u16; 49 | } 50 | -------------------------------------------------------------------------------- /framework/src/native_include/dpdk-headers.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | -------------------------------------------------------------------------------- /framework/src/native_include/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_upper_case_globals)] 2 | #![allow(non_camel_case_types)] 3 | #![allow(non_snake_case)] 4 | #![allow(dead_code)] 5 | 6 | include!(concat!(env!("OUT_DIR"), "/dpdk_bindings.rs")); 7 | -------------------------------------------------------------------------------- /framework/src/operators/act.rs: -------------------------------------------------------------------------------- 1 | use super::packet_batch::PacketBatch; 2 | use common::*; 3 | use interface::PacketTx; 4 | pub trait Act { 5 | /// Actually perform whatever needs to be done by this processing node. 6 | #[inline] 7 | fn act(&mut self); 8 | 9 | /// Notification indicating we are done processing the current batch of packets 10 | #[inline] 11 | fn done(&mut self); 12 | 13 | #[inline] 14 | fn send_q(&mut self, port: &PacketTx) -> Result; 15 | 16 | #[inline] 17 | fn capacity(&self) -> i32; 18 | 19 | #[inline] 20 | fn drop_packets(&mut self, idxes: &[usize]) -> Option; 21 | 22 | /// Remove all packets from the batch (without actually freeing them). 23 | #[inline] 24 | fn clear_packets(&mut self) { 25 | self.get_packet_batch().clear_packets(); 26 | } 27 | 28 | #[inline] 29 | fn get_packet_batch(&mut self) -> &mut PacketBatch; 30 | 31 | /// Get tasks that feed produce packets for this batch. We use this in the embedded scheduler. 32 | #[inline] 33 | fn get_task_dependencies(&self) -> Vec; 34 | } 35 | -------------------------------------------------------------------------------- /framework/src/operators/add_metadata.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use interface::Packet; 7 | use interface::PacketTx; 8 | use std::marker::PhantomData; 9 | 10 | pub type MetadataFn = Box) -> M2 + Send>; 11 | 12 | pub struct AddMetadataBatch 13 | where 14 | M: Send + Sized, 15 | V: Batch + BatchIterator + Act, 16 | { 17 | parent: V, 18 | generator: MetadataFn, 19 | applied: bool, 20 | _phantom_m: PhantomData, 21 | } 22 | 23 | impl AddMetadataBatch 24 | where 25 | M: Send + Sized, 26 | V: Batch + BatchIterator + Act, 27 | { 28 | pub fn new(parent: V, generator: MetadataFn) -> AddMetadataBatch { 29 | AddMetadataBatch { 30 | parent: parent, 31 | generator: generator, 32 | applied: false, 33 | _phantom_m: PhantomData, 34 | } 35 | } 36 | } 37 | 38 | impl Batch for AddMetadataBatch 39 | where 40 | M: Send + Sized, 41 | V: Batch + BatchIterator + Act, 42 | { 43 | } 44 | 45 | impl BatchIterator for AddMetadataBatch 46 | where 47 | M: Send + Sized, 48 | V: Batch + BatchIterator + Act, 49 | { 50 | type Header = V::Header; 51 | type Metadata = M; 52 | 53 | #[inline] 54 | fn start(&mut self) -> usize { 55 | self.parent.start() 56 | } 57 | 58 | #[inline] 59 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 60 | self.parent.next_payload(idx).map(|p| PacketDescriptor { 61 | packet: p.packet.reinterpret_metadata(), 62 | }) 63 | } 64 | } 65 | 66 | impl Act for AddMetadataBatch 67 | where 68 | M: Send + Sized, 69 | V: Batch + BatchIterator + Act, 70 | { 71 | #[inline] 72 | fn act(&mut self) { 73 | if !self.applied { 74 | self.parent.act(); 75 | { 76 | let iter = PayloadEnumerator::::new(&mut self.parent); 77 | while let Some(ParsedDescriptor { mut packet, .. }) = iter.next(&mut self.parent) { 78 | let metadata = (self.generator)(&packet); 79 | packet.write_metadata(&metadata).unwrap(); // FIXME: WHat to do on error? 80 | } 81 | } 82 | self.applied = true; 83 | } 84 | } 85 | 86 | #[inline] 87 | fn done(&mut self) { 88 | self.applied = false; 89 | self.parent.done(); 90 | } 91 | 92 | #[inline] 93 | fn send_q(&mut self, port: &PacketTx) -> Result { 94 | self.parent.send_q(port) 95 | } 96 | 97 | #[inline] 98 | fn capacity(&self) -> i32 { 99 | self.parent.capacity() 100 | } 101 | 102 | #[inline] 103 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 104 | self.parent.drop_packets(idxes) 105 | } 106 | 107 | #[inline] 108 | fn clear_packets(&mut self) { 109 | self.parent.clear_packets() 110 | } 111 | 112 | #[inline] 113 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 114 | self.parent.get_packet_batch() 115 | } 116 | 117 | #[inline] 118 | fn get_task_dependencies(&self) -> Vec { 119 | self.parent.get_task_dependencies() 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /framework/src/operators/add_metadata_mut.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use interface::Packet; 7 | use interface::PacketTx; 8 | use std::marker::PhantomData; 9 | 10 | pub type MutableMetadataFn = Box) -> M2 + Send>; 11 | 12 | pub struct MutableAddMetadataBatch 13 | where 14 | M: Send + Sized, 15 | V: Batch + BatchIterator + Act, 16 | { 17 | parent: V, 18 | generator: MutableMetadataFn, 19 | applied: bool, 20 | _phantom_m: PhantomData, 21 | } 22 | 23 | impl MutableAddMetadataBatch 24 | where 25 | M: Send + Sized, 26 | V: Batch + BatchIterator + Act, 27 | { 28 | pub fn new(parent: V, generator: MutableMetadataFn) -> MutableAddMetadataBatch { 29 | MutableAddMetadataBatch { 30 | parent: parent, 31 | generator: generator, 32 | applied: false, 33 | _phantom_m: PhantomData, 34 | } 35 | } 36 | } 37 | 38 | impl Batch for MutableAddMetadataBatch 39 | where 40 | M: Send + Sized, 41 | V: Batch + BatchIterator + Act, 42 | { 43 | } 44 | 45 | impl BatchIterator for MutableAddMetadataBatch 46 | where 47 | M: Send + Sized, 48 | V: Batch + BatchIterator + Act, 49 | { 50 | type Header = V::Header; 51 | type Metadata = M; 52 | 53 | #[inline] 54 | fn start(&mut self) -> usize { 55 | self.parent.start() 56 | } 57 | 58 | #[inline] 59 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 60 | self.parent.next_payload(idx).map(|p| PacketDescriptor { 61 | packet: p.packet.reinterpret_metadata(), 62 | }) 63 | } 64 | } 65 | 66 | impl Act for MutableAddMetadataBatch 67 | where 68 | M: Send + Sized, 69 | V: Batch + BatchIterator + Act, 70 | { 71 | #[inline] 72 | fn act(&mut self) { 73 | if !self.applied { 74 | self.parent.act(); 75 | { 76 | let iter = PayloadEnumerator::::new(&mut self.parent); 77 | while let Some(ParsedDescriptor { mut packet, .. }) = iter.next(&mut self.parent) { 78 | let metadata = (self.generator)(&mut packet); 79 | packet.write_metadata(&metadata).unwrap(); // FIXME: WHat to do on error? 80 | } 81 | } 82 | self.applied = true; 83 | } 84 | } 85 | 86 | #[inline] 87 | fn done(&mut self) { 88 | self.applied = false; 89 | self.parent.done(); 90 | } 91 | 92 | #[inline] 93 | fn send_q(&mut self, port: &PacketTx) -> Result { 94 | self.parent.send_q(port) 95 | } 96 | 97 | #[inline] 98 | fn capacity(&self) -> i32 { 99 | self.parent.capacity() 100 | } 101 | 102 | #[inline] 103 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 104 | self.parent.drop_packets(idxes) 105 | } 106 | 107 | #[inline] 108 | fn clear_packets(&mut self) { 109 | self.parent.clear_packets() 110 | } 111 | 112 | #[inline] 113 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 114 | self.parent.get_packet_batch() 115 | } 116 | 117 | #[inline] 118 | fn get_task_dependencies(&self) -> Vec { 119 | self.parent.get_task_dependencies() 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /framework/src/operators/composition_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::{BatchIterator, PacketDescriptor}; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use headers::NullHeader; 8 | use interface::PacketTx; 9 | use scheduler::Executable; 10 | 11 | /// `CompositionBatch` allows multiple NFs to be combined. A composition batch resets the packet pointer so that each NF 12 | /// can treat packets as originating from the NF itself. 13 | pub struct CompositionBatch { 14 | parent: Box>, 15 | } 16 | 17 | impl CompositionBatch { 18 | pub fn new>( 19 | parent: V, 20 | ) -> CompositionBatch { 21 | CompositionBatch { 22 | parent: box parent.reset(), 23 | } 24 | } 25 | } 26 | 27 | impl Batch for CompositionBatch {} 28 | 29 | impl BatchIterator for CompositionBatch { 30 | type Header = NullHeader; 31 | type Metadata = EmptyMetadata; 32 | 33 | #[inline] 34 | fn start(&mut self) -> usize { 35 | self.parent.start() 36 | } 37 | 38 | #[inline] 39 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 40 | self.parent.next_payload(idx) 41 | } 42 | } 43 | 44 | /// Internal interface for packets. 45 | impl Act for CompositionBatch { 46 | act!{} 47 | } 48 | 49 | impl Executable for CompositionBatch { 50 | #[inline] 51 | fn execute(&mut self) { 52 | self.act(); 53 | self.done(); 54 | } 55 | 56 | #[inline] 57 | fn dependencies(&mut self) -> Vec { 58 | self.get_task_dependencies() 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /framework/src/operators/deparsed_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use interface::*; 8 | 9 | pub struct DeparsedBatch 10 | where 11 | V: Batch + BatchIterator + Act, 12 | { 13 | parent: V, 14 | } 15 | 16 | impl Act for DeparsedBatch 17 | where 18 | V: Batch + BatchIterator + Act, 19 | { 20 | act!{} 21 | } 22 | 23 | impl Batch for DeparsedBatch 24 | where 25 | V: Batch + BatchIterator + Act, 26 | { 27 | } 28 | 29 | impl DeparsedBatch 30 | where 31 | V: Batch + BatchIterator + Act, 32 | { 33 | #[inline] 34 | pub fn new(parent: V) -> DeparsedBatch { 35 | DeparsedBatch { parent: parent } 36 | } 37 | } 38 | 39 | impl BatchIterator for DeparsedBatch 40 | where 41 | V: Batch + BatchIterator + Act, 42 | { 43 | type Header = <::Header as EndOffset>::PreviousHeader; 44 | type Metadata = ::Metadata; 45 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 46 | self.parent.next_payload(idx).map(|p| PacketDescriptor { 47 | packet: p.packet.deparse_header_stack().unwrap(), 48 | }) 49 | } 50 | 51 | #[inline] 52 | fn start(&mut self) -> usize { 53 | self.parent.start() 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /framework/src/operators/filter_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use interface::Packet; 8 | use interface::PacketTx; 9 | 10 | pub type FilterFn = Box) -> bool + Send>; 11 | 12 | pub struct FilterBatch 13 | where 14 | T: EndOffset, 15 | V: Batch + BatchIterator
+ Act, 16 | { 17 | parent: V, 18 | filter: FilterFn, 19 | capacity: usize, 20 | remove: Vec, 21 | } 22 | 23 | impl FilterBatch 24 | where 25 | T: EndOffset, 26 | V: Batch + BatchIterator
+ Act, 27 | { 28 | #[inline] 29 | pub fn new(parent: V, filter: FilterFn) -> FilterBatch { 30 | let capacity = parent.capacity() as usize; 31 | FilterBatch { 32 | parent: parent, 33 | filter: filter, 34 | capacity: capacity, 35 | remove: Vec::with_capacity(capacity), 36 | } 37 | } 38 | } 39 | 40 | batch_no_new!{FilterBatch} 41 | 42 | impl Act for FilterBatch 43 | where 44 | T: EndOffset, 45 | V: Batch + BatchIterator
+ Act, 46 | { 47 | #[inline] 48 | fn act(&mut self) { 49 | self.parent.act(); 50 | // Filter during the act 51 | let iter = PayloadEnumerator::::new(&mut self.parent); 52 | while let Some(ParsedDescriptor { 53 | mut packet, 54 | index: idx, 55 | }) = iter.next(&mut self.parent) 56 | { 57 | if !(self.filter)(&mut packet) { 58 | self.remove.push(idx) 59 | } 60 | } 61 | if !self.remove.is_empty() { 62 | self.parent 63 | .drop_packets(&self.remove[..]) 64 | .expect("Filtering was performed incorrectly"); 65 | } 66 | self.remove.clear(); 67 | } 68 | 69 | #[inline] 70 | fn done(&mut self) { 71 | self.parent.done(); 72 | } 73 | 74 | #[inline] 75 | fn send_q(&mut self, port: &PacketTx) -> Result { 76 | self.parent.send_q(port) 77 | } 78 | 79 | #[inline] 80 | fn capacity(&self) -> i32 { 81 | self.capacity as i32 82 | } 83 | 84 | #[inline] 85 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 86 | self.parent.drop_packets(idxes) 87 | } 88 | 89 | #[inline] 90 | fn clear_packets(&mut self) { 91 | self.parent.clear_packets() 92 | } 93 | 94 | #[inline] 95 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 96 | self.parent.get_packet_batch() 97 | } 98 | 99 | #[inline] 100 | fn get_task_dependencies(&self) -> Vec { 101 | self.parent.get_task_dependencies() 102 | } 103 | } 104 | 105 | impl BatchIterator for FilterBatch 106 | where 107 | T: EndOffset, 108 | V: Batch + BatchIterator
+ Act, 109 | { 110 | type Header = T; 111 | type Metadata = ::Metadata; 112 | 113 | #[inline] 114 | fn start(&mut self) -> usize { 115 | self.parent.start() 116 | } 117 | 118 | #[inline] 119 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 120 | self.parent.next_payload(idx) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /framework/src/operators/group_by.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::ReceiveBatch; 3 | use super::RestoreHeader; 4 | use super::act::Act; 5 | use super::iterator::*; 6 | use headers::EndOffset; 7 | use interface::Packet; 8 | use queues::*; 9 | use scheduler::{Executable, Scheduler}; 10 | use std::collections::HashMap; 11 | use std::marker::PhantomData; 12 | 13 | pub type GroupFn = Box) -> usize + Send>; 14 | 15 | pub struct GroupBy 16 | where 17 | T: EndOffset + 'static, 18 | V: Batch + BatchIterator
+ Act + 'static, 19 | { 20 | _phantom_v: PhantomData, 21 | groups: usize, 22 | _phantom_t: PhantomData, 23 | consumers: HashMap>, 24 | task: usize, 25 | } 26 | 27 | struct GroupByProducer 28 | where 29 | T: EndOffset + 'static, 30 | V: Batch + BatchIterator
+ Act + 'static, 31 | { 32 | parent: V, 33 | producers: Vec, 34 | group_fn: GroupFn, 35 | } 36 | 37 | impl Executable for GroupByProducer 38 | where 39 | T: EndOffset + 'static, 40 | V: Batch + BatchIterator
+ Act + 'static, 41 | { 42 | #[inline] 43 | fn execute(&mut self) { 44 | self.parent.act(); // Let the parent get some packets. 45 | { 46 | let iter = PayloadEnumerator::::new(&mut self.parent); 47 | while let Some(ParsedDescriptor { mut packet, .. }) = iter.next(&mut self.parent) { 48 | let group = (self.group_fn)(&packet); 49 | packet.save_header_and_offset(); 50 | self.producers[group].enqueue_one(packet); 51 | } 52 | } 53 | self.parent.get_packet_batch().clear_packets(); 54 | self.parent.done(); 55 | } 56 | 57 | #[inline] 58 | fn dependencies(&mut self) -> Vec { 59 | self.parent.get_task_dependencies() 60 | } 61 | } 62 | 63 | #[cfg_attr(feature = "dev", allow(len_without_is_empty))] 64 | impl GroupBy 65 | where 66 | T: EndOffset + 'static, 67 | V: Batch + BatchIterator
+ Act + 'static, 68 | { 69 | pub fn new( 70 | parent: V, 71 | groups: usize, 72 | group_fn: GroupFn, 73 | sched: &mut S, 74 | ) -> GroupBy { 75 | let mut producers = Vec::with_capacity(groups); 76 | let mut consumers = HashMap::with_capacity(groups); 77 | for i in 0..groups { 78 | let (prod, consumer) = new_mpsc_queue_pair(); 79 | producers.push(prod); 80 | consumers.insert(i, consumer); 81 | } 82 | let task = sched 83 | .add_task(GroupByProducer { 84 | parent: parent, 85 | group_fn: group_fn, 86 | producers: producers, 87 | }) 88 | .unwrap(); 89 | GroupBy { 90 | _phantom_v: PhantomData, 91 | groups: groups, 92 | _phantom_t: PhantomData, 93 | consumers: consumers, 94 | task: task, 95 | } 96 | } 97 | 98 | pub fn len(&self) -> usize { 99 | self.groups 100 | } 101 | 102 | pub fn get_group(&mut self, group: usize) -> Option>> { 103 | match self.consumers.remove(&group) { 104 | Some(mut p) => { 105 | { 106 | p.get_packet_batch().add_parent_task(self.task) 107 | }; 108 | Some(RestoreHeader::new(p)) 109 | } 110 | None => None, 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /framework/src/operators/macros.rs: -------------------------------------------------------------------------------- 1 | //macro_rules! batch { 2 | //($name : ident, [ $($parts: ident : $pty: ty),* ], [$($defid : ident : $val : expr),*]) => { 3 | //impl $name 4 | //where T: EndOffset, 5 | //V:Batch + BatchIterator + Act { 6 | //#[inline] 7 | //pub fn new($( $parts : $pty ),*) -> $name { 8 | //$name{ $( $parts: $parts ),*, $($defid : $val),* } 9 | //} 10 | //} 11 | //batch_no_new!{$name} 12 | //}; 13 | //($name: ident, [ $($parts: ident : $pty: ty),* ]) => { 14 | //batch!{$name, [$($parts:$pty),*], []} 15 | //} 16 | //} 17 | 18 | macro_rules! batch_no_new { 19 | ($name : ident) => { 20 | impl Batch for $name 21 | where T: EndOffset, 22 | V:Batch + BatchIterator + Act { 23 | } 24 | }; 25 | ($name: ident, [ $($parts: ident : $pty: ty),* ]) => { 26 | batch!{$name, [$($parts:$pty),*], []} 27 | } 28 | } 29 | 30 | macro_rules! act { 31 | () => { 32 | #[inline] 33 | fn act(&mut self) { 34 | self.parent.act(); 35 | } 36 | 37 | #[inline] 38 | fn done(&mut self) { 39 | self.parent.done(); 40 | } 41 | 42 | #[inline] 43 | fn send_q(&mut self, port: &PacketTx) -> Result { 44 | self.parent.send_q(port) 45 | } 46 | 47 | #[inline] 48 | fn capacity(&self) -> i32 { 49 | self.parent.capacity() 50 | } 51 | 52 | #[inline] 53 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 54 | self.parent.drop_packets(idxes) 55 | } 56 | 57 | #[inline] 58 | fn clear_packets(&mut self) { 59 | self.parent.clear_packets() 60 | } 61 | 62 | #[inline] 63 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 64 | self.parent.get_packet_batch() 65 | } 66 | 67 | #[inline] 68 | fn get_task_dependencies(&self) -> Vec { 69 | self.parent.get_task_dependencies() 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /framework/src/operators/map_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use interface::Packet; 8 | use interface::PacketTx; 9 | use std::marker::PhantomData; 10 | 11 | pub type MapFn = Box) + Send>; 12 | 13 | pub struct MapBatch 14 | where 15 | T: EndOffset, 16 | V: Batch + BatchIterator
+ Act, 17 | { 18 | parent: V, 19 | transformer: MapFn, 20 | applied: bool, 21 | phantom_t: PhantomData, 22 | } 23 | 24 | impl MapBatch 25 | where 26 | T: EndOffset, 27 | V: Batch + BatchIterator
+ Act, 28 | { 29 | pub fn new(parent: V, transformer: MapFn) -> MapBatch { 30 | MapBatch { 31 | parent: parent, 32 | transformer: transformer, 33 | applied: false, 34 | phantom_t: PhantomData, 35 | } 36 | } 37 | } 38 | 39 | impl Batch for MapBatch 40 | where 41 | T: EndOffset, 42 | V: Batch + BatchIterator
+ Act, 43 | { 44 | } 45 | 46 | impl Act for MapBatch 47 | where 48 | T: EndOffset, 49 | V: Batch + BatchIterator
+ Act, 50 | { 51 | #[inline] 52 | fn act(&mut self) { 53 | if !self.applied { 54 | self.parent.act(); 55 | { 56 | let iter = PayloadEnumerator::::new(&mut self.parent); 57 | while let Some(ParsedDescriptor { packet, .. }) = iter.next(&mut self.parent) { 58 | (self.transformer)(&packet); 59 | } 60 | } 61 | self.applied = true; 62 | } 63 | } 64 | 65 | #[inline] 66 | fn done(&mut self) { 67 | self.applied = false; 68 | self.parent.done(); 69 | } 70 | 71 | #[inline] 72 | fn send_q(&mut self, port: &PacketTx) -> Result { 73 | self.parent.send_q(port) 74 | } 75 | 76 | #[inline] 77 | fn capacity(&self) -> i32 { 78 | self.parent.capacity() 79 | } 80 | 81 | #[inline] 82 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 83 | self.parent.drop_packets(idxes) 84 | } 85 | 86 | #[inline] 87 | fn clear_packets(&mut self) { 88 | self.parent.clear_packets() 89 | } 90 | 91 | #[inline] 92 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 93 | self.parent.get_packet_batch() 94 | } 95 | 96 | #[inline] 97 | fn get_task_dependencies(&self) -> Vec { 98 | self.parent.get_task_dependencies() 99 | } 100 | } 101 | 102 | impl BatchIterator for MapBatch 103 | where 104 | T: EndOffset, 105 | V: Batch + BatchIterator
+ Act, 106 | { 107 | type Header = T; 108 | type Metadata = ::Metadata; 109 | 110 | #[inline] 111 | fn start(&mut self) -> usize { 112 | self.parent.start() 113 | } 114 | 115 | #[inline] 116 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 117 | // self.parent.next_payload(idx).map(|p| {(self.transformer)(&p.packet); p}) 118 | self.parent.next_payload(idx) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /framework/src/operators/merge_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::{BatchIterator, PacketDescriptor}; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use interface::PacketTx; 7 | use scheduler::Executable; 8 | use std::cmp; 9 | 10 | pub struct MergeBatch { 11 | parents: Vec, 12 | which: usize, 13 | } 14 | 15 | impl MergeBatch { 16 | pub fn new(parents: Vec) -> MergeBatch { 17 | MergeBatch { 18 | parents: parents, 19 | which: 0, 20 | } 21 | } 22 | } 23 | 24 | impl Batch for MergeBatch {} 25 | 26 | impl BatchIterator for MergeBatch { 27 | type Header = T::Header; 28 | type Metadata = T::Metadata; 29 | 30 | #[inline] 31 | fn start(&mut self) -> usize { 32 | self.parents[self.which].start() 33 | } 34 | 35 | #[inline] 36 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 37 | self.parents[self.which].next_payload(idx) 38 | } 39 | } 40 | 41 | /// Internal interface for packets. 42 | impl Act for MergeBatch { 43 | #[inline] 44 | fn act(&mut self) { 45 | self.parents[self.which].act() 46 | } 47 | 48 | #[inline] 49 | fn done(&mut self) { 50 | self.parents[self.which].done(); 51 | let next = self.which + 1; 52 | if next == self.parents.len() { 53 | self.which = 0 54 | } else { 55 | self.which = next 56 | } 57 | } 58 | 59 | #[inline] 60 | fn send_q(&mut self, port: &PacketTx) -> Result { 61 | self.parents[self.which].send_q(port) 62 | } 63 | 64 | #[inline] 65 | fn capacity(&self) -> i32 { 66 | self.parents 67 | .iter() 68 | .fold(0, |acc, x| cmp::max(acc, x.capacity())) 69 | } 70 | 71 | #[inline] 72 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 73 | self.parents[self.which].drop_packets(idxes) 74 | } 75 | 76 | #[inline] 77 | fn clear_packets(&mut self) { 78 | self.parents[self.which].clear_packets() 79 | } 80 | 81 | #[inline] 82 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 83 | self.parents[self.which].get_packet_batch() 84 | } 85 | 86 | #[inline] 87 | fn get_task_dependencies(&self) -> Vec { 88 | let mut deps = Vec::with_capacity(self.parents.len()); // Might actually need to be larger, will get resized 89 | for parent in &self.parents { 90 | deps.extend(parent.get_task_dependencies().iter()) 91 | } 92 | // We need to eliminate duplicate tasks. Fortunately this is not called on the critical path so it is fine to do 93 | // it this way. 94 | deps.sort(); 95 | deps.dedup(); 96 | deps 97 | } 98 | } 99 | 100 | impl Executable for MergeBatch { 101 | #[inline] 102 | fn execute(&mut self) { 103 | self.act(); 104 | self.done(); 105 | } 106 | 107 | #[inline] 108 | fn dependencies(&mut self) -> Vec { 109 | self.get_task_dependencies() 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /framework/src/operators/parsed_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use interface::*; 8 | use std::marker::PhantomData; 9 | 10 | pub struct ParsedBatch 11 | where 12 | T: EndOffset, 13 | V: Batch + BatchIterator + Act, 14 | { 15 | parent: V, 16 | phantom: PhantomData, 17 | } 18 | 19 | impl Act for ParsedBatch 20 | where 21 | T: EndOffset, 22 | V: Batch + BatchIterator + Act, 23 | { 24 | act!{} 25 | } 26 | 27 | impl Batch for ParsedBatch 28 | where 29 | V: Batch + BatchIterator + Act, 30 | T: EndOffset, 31 | { 32 | } 33 | 34 | impl ParsedBatch 35 | where 36 | V: Batch + BatchIterator + Act, 37 | T: EndOffset, 38 | { 39 | #[inline] 40 | pub fn new(parent: V) -> ParsedBatch { 41 | ParsedBatch { 42 | parent: parent, 43 | phantom: PhantomData, 44 | } 45 | } 46 | } 47 | 48 | impl BatchIterator for ParsedBatch 49 | where 50 | V: Batch + BatchIterator + Act, 51 | T: EndOffset, 52 | { 53 | type Header = T; 54 | type Metadata = V::Metadata; 55 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 56 | self.parent.next_payload(idx).map(|p| PacketDescriptor { 57 | packet: p.packet.parse_header(), 58 | }) 59 | } 60 | 61 | #[inline] 62 | fn start(&mut self) -> usize { 63 | self.parent.start() 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /framework/src/operators/receive_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::NullHeader; 7 | use interface::{PacketRx, PacketTx}; 8 | 9 | pub struct ReceiveBatch { 10 | parent: PacketBatch, 11 | queue: T, 12 | pub received: u64, 13 | } 14 | 15 | impl ReceiveBatch { 16 | pub fn new_with_parent(parent: PacketBatch, queue: T) -> ReceiveBatch { 17 | ReceiveBatch { 18 | parent: parent, 19 | queue: queue, 20 | received: 0, 21 | } 22 | } 23 | 24 | pub fn new(queue: T) -> ReceiveBatch { 25 | ReceiveBatch { 26 | parent: PacketBatch::new(32), 27 | queue: queue, 28 | received: 0, 29 | } 30 | } 31 | } 32 | 33 | impl Batch for ReceiveBatch {} 34 | 35 | impl BatchIterator for ReceiveBatch { 36 | type Header = NullHeader; 37 | type Metadata = EmptyMetadata; 38 | #[inline] 39 | fn start(&mut self) -> usize { 40 | self.parent.start() 41 | } 42 | 43 | #[inline] 44 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 45 | self.parent.next_payload(idx) 46 | } 47 | } 48 | 49 | /// Internal interface for packets. 50 | impl Act for ReceiveBatch { 51 | #[inline] 52 | fn act(&mut self) { 53 | self.parent.act(); 54 | self.parent 55 | .recv(&self.queue) 56 | .and_then(|x| { 57 | self.received += x as u64; 58 | Ok(x) 59 | }) 60 | .expect("Receive failure"); 61 | } 62 | 63 | #[inline] 64 | fn done(&mut self) { 65 | // Free up memory 66 | self.parent.deallocate_batch().expect("Deallocation failed"); 67 | } 68 | 69 | #[inline] 70 | fn send_q(&mut self, port: &PacketTx) -> Result { 71 | self.parent.send_q(port) 72 | } 73 | 74 | #[inline] 75 | fn capacity(&self) -> i32 { 76 | self.parent.capacity() 77 | } 78 | 79 | #[inline] 80 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 81 | self.parent.drop_packets(idxes) 82 | } 83 | 84 | #[inline] 85 | fn clear_packets(&mut self) { 86 | self.parent.clear_packets() 87 | } 88 | 89 | #[inline] 90 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 91 | &mut self.parent 92 | } 93 | 94 | #[inline] 95 | fn get_task_dependencies(&self) -> Vec { 96 | self.parent.get_task_dependencies() 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /framework/src/operators/reset_parse.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::NullHeader; 7 | use interface::PacketTx; 8 | 9 | pub struct ResetParsingBatch 10 | where 11 | V: Batch + BatchIterator + Act, 12 | { 13 | parent: V, 14 | } 15 | 16 | impl ResetParsingBatch 17 | where 18 | V: Batch + BatchIterator + Act, 19 | { 20 | pub fn new(parent: V) -> ResetParsingBatch { 21 | ResetParsingBatch { parent: parent } 22 | } 23 | } 24 | 25 | impl BatchIterator for ResetParsingBatch 26 | where 27 | V: Batch + BatchIterator + Act, 28 | { 29 | type Header = NullHeader; 30 | type Metadata = EmptyMetadata; 31 | #[inline] 32 | fn start(&mut self) -> usize { 33 | self.parent.start() 34 | } 35 | 36 | #[inline] 37 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 38 | match self.parent.next_payload(idx) { 39 | Some(PacketDescriptor { packet }) => Some(PacketDescriptor { 40 | packet: packet.reset(), 41 | }), 42 | None => None, 43 | } 44 | } 45 | } 46 | 47 | /// Internal interface for packets. 48 | impl Act for ResetParsingBatch 49 | where 50 | V: Batch + BatchIterator + Act, 51 | { 52 | act!{} 53 | } 54 | 55 | impl Batch for ResetParsingBatch 56 | where 57 | V: Batch + BatchIterator + Act, 58 | { 59 | } 60 | -------------------------------------------------------------------------------- /framework/src/operators/restore_header.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use interface::*; 8 | use std::marker::PhantomData; 9 | 10 | pub struct RestoreHeader 11 | where 12 | T: EndOffset + 'static, 13 | M: Sized + Send, 14 | V: Batch + BatchIterator + Act, 15 | { 16 | parent: V, 17 | _phantom_t: PhantomData, 18 | _phantom_m: PhantomData, 19 | } 20 | 21 | impl Act for RestoreHeader 22 | where 23 | T: EndOffset + 'static, 24 | M: Sized + Send, 25 | V: Batch + BatchIterator + Act, 26 | { 27 | act!{} 28 | } 29 | 30 | impl Batch for RestoreHeader 31 | where 32 | V: Batch + BatchIterator + Act, 33 | M: Sized + Send, 34 | T: EndOffset + 'static, 35 | { 36 | } 37 | 38 | impl RestoreHeader 39 | where 40 | V: Batch + BatchIterator + Act, 41 | M: Sized + Send, 42 | T: EndOffset + 'static, 43 | { 44 | #[inline] 45 | pub fn new(parent: V) -> RestoreHeader { 46 | RestoreHeader { 47 | parent: parent, 48 | _phantom_t: PhantomData, 49 | _phantom_m: PhantomData, 50 | } 51 | } 52 | } 53 | 54 | impl BatchIterator for RestoreHeader 55 | where 56 | V: Batch + BatchIterator + Act, 57 | M: Sized + Send, 58 | T: EndOffset + 'static, 59 | { 60 | type Header = T; 61 | type Metadata = M; 62 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 63 | self.parent.next_payload(idx).map(|p| PacketDescriptor { 64 | packet: p.packet.restore_saved_header().unwrap(), 65 | }) 66 | } 67 | 68 | #[inline] 69 | fn start(&mut self) -> usize { 70 | self.parent.start() 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /framework/src/operators/send_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::NullHeader; 7 | use interface::PacketTx; 8 | use scheduler::Executable; 9 | 10 | pub struct SendBatch 11 | where 12 | Port: PacketTx, 13 | V: Batch + BatchIterator + Act, 14 | { 15 | port: Port, 16 | parent: V, 17 | pub sent: u64, 18 | } 19 | 20 | impl SendBatch 21 | where 22 | Port: PacketTx, 23 | V: Batch + BatchIterator + Act, 24 | { 25 | pub fn new(parent: V, port: Port) -> SendBatch { 26 | SendBatch { 27 | port: port, 28 | sent: 0, 29 | parent: parent, 30 | } 31 | } 32 | } 33 | 34 | impl Batch for SendBatch 35 | where 36 | Port: PacketTx, 37 | V: Batch + BatchIterator + Act, 38 | { 39 | } 40 | 41 | impl BatchIterator for SendBatch 42 | where 43 | Port: PacketTx, 44 | V: Batch + BatchIterator + Act, 45 | { 46 | type Header = NullHeader; 47 | type Metadata = EmptyMetadata; 48 | #[inline] 49 | fn start(&mut self) -> usize { 50 | panic!("Cannot iterate send batch") 51 | } 52 | 53 | #[inline] 54 | unsafe fn next_payload(&mut self, _: usize) -> Option> { 55 | panic!("Cannot iterate send batch") 56 | } 57 | } 58 | 59 | /// Internal interface for packets. 60 | impl Act for SendBatch 61 | where 62 | Port: PacketTx, 63 | V: Batch + BatchIterator + Act, 64 | { 65 | #[inline] 66 | fn act(&mut self) { 67 | // First everything is applied 68 | self.parent.act(); 69 | self.parent 70 | .get_packet_batch() 71 | .send_q(&self.port) 72 | .and_then(|x| { 73 | self.sent += x as u64; 74 | Ok(x) 75 | }) 76 | .expect("Send failed"); 77 | self.parent.done(); 78 | } 79 | 80 | fn done(&mut self) {} 81 | 82 | fn send_q(&mut self, _: &PacketTx) -> Result { 83 | panic!("Cannot send a sent packet batch") 84 | } 85 | 86 | fn capacity(&self) -> i32 { 87 | self.parent.capacity() 88 | } 89 | 90 | #[inline] 91 | fn drop_packets(&mut self, _: &[usize]) -> Option { 92 | panic!("Cannot drop packets from a sent batch") 93 | } 94 | 95 | #[inline] 96 | fn clear_packets(&mut self) { 97 | panic!("Cannot clear packets from a sent batch") 98 | } 99 | 100 | #[inline] 101 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 102 | self.parent.get_packet_batch() 103 | } 104 | 105 | #[inline] 106 | fn get_task_dependencies(&self) -> Vec { 107 | self.parent.get_task_dependencies() 108 | } 109 | } 110 | 111 | impl Executable for SendBatch 112 | where 113 | Port: PacketTx, 114 | V: Batch + BatchIterator + Act, 115 | { 116 | #[inline] 117 | fn execute(&mut self) { 118 | self.act() 119 | } 120 | 121 | #[inline] 122 | fn dependencies(&mut self) -> Vec { 123 | self.get_task_dependencies() 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /framework/src/operators/transform_batch.rs: -------------------------------------------------------------------------------- 1 | use super::Batch; 2 | use super::act::Act; 3 | use super::iterator::*; 4 | use super::packet_batch::PacketBatch; 5 | use common::*; 6 | use headers::EndOffset; 7 | use interface::Packet; 8 | use interface::PacketTx; 9 | use std::marker::PhantomData; 10 | 11 | pub type TransformFn = Box) + Send>; 12 | 13 | pub struct TransformBatch 14 | where 15 | T: EndOffset, 16 | V: Batch + BatchIterator
+ Act, 17 | { 18 | parent: V, 19 | transformer: TransformFn, 20 | applied: bool, 21 | phantom_t: PhantomData, 22 | } 23 | 24 | impl TransformBatch 25 | where 26 | T: EndOffset, 27 | V: Batch + BatchIterator
+ Act, 28 | { 29 | pub fn new(parent: V, transformer: TransformFn) -> TransformBatch { 30 | TransformBatch { 31 | parent: parent, 32 | transformer: transformer, 33 | applied: false, 34 | phantom_t: PhantomData, 35 | } 36 | } 37 | } 38 | 39 | impl Batch for TransformBatch 40 | where 41 | T: EndOffset, 42 | V: Batch + BatchIterator
+ Act, 43 | { 44 | } 45 | 46 | impl BatchIterator for TransformBatch 47 | where 48 | T: EndOffset, 49 | V: Batch + BatchIterator
+ Act, 50 | { 51 | type Header = T; 52 | type Metadata = ::Metadata; 53 | #[inline] 54 | fn start(&mut self) -> usize { 55 | self.parent.start() 56 | } 57 | 58 | #[inline] 59 | unsafe fn next_payload(&mut self, idx: usize) -> Option> { 60 | self.parent.next_payload(idx) 61 | } 62 | } 63 | 64 | impl Act for TransformBatch 65 | where 66 | T: EndOffset, 67 | V: Batch + BatchIterator
+ Act, 68 | { 69 | #[inline] 70 | fn act(&mut self) { 71 | if !self.applied { 72 | self.parent.act(); 73 | { 74 | let iter = PayloadEnumerator::::new(&mut self.parent); 75 | while let Some(ParsedDescriptor { mut packet, .. }) = iter.next(&mut self.parent) { 76 | (self.transformer)(&mut packet); 77 | } 78 | } 79 | self.applied = true; 80 | } 81 | } 82 | 83 | #[inline] 84 | fn done(&mut self) { 85 | self.applied = false; 86 | self.parent.done(); 87 | } 88 | 89 | #[inline] 90 | fn send_q(&mut self, port: &PacketTx) -> Result { 91 | self.parent.send_q(port) 92 | } 93 | 94 | #[inline] 95 | fn capacity(&self) -> i32 { 96 | self.parent.capacity() 97 | } 98 | 99 | #[inline] 100 | fn drop_packets(&mut self, idxes: &[usize]) -> Option { 101 | self.parent.drop_packets(idxes) 102 | } 103 | 104 | #[inline] 105 | fn clear_packets(&mut self) { 106 | self.parent.clear_packets() 107 | } 108 | 109 | #[inline] 110 | fn get_packet_batch(&mut self) -> &mut PacketBatch { 111 | self.parent.get_packet_batch() 112 | } 113 | 114 | #[inline] 115 | fn get_task_dependencies(&self) -> Vec { 116 | self.parent.get_task_dependencies() 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /framework/src/queues/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::mpsc_mbuf_queue::*; 2 | 3 | mod mpsc_mbuf_queue; 4 | -------------------------------------------------------------------------------- /framework/src/scheduler/embedded_scheduler.rs: -------------------------------------------------------------------------------- 1 | use super::{Executable, Scheduler}; 2 | use common::*; 3 | use std::default::Default; 4 | 5 | /// Used to keep stats about each pipeline and eventually grant tokens, etc. 6 | struct Runnable { 7 | pub task: Box, 8 | pub dependencies: Vec, 9 | } 10 | 11 | impl Runnable { 12 | pub fn from_task(mut task: T) -> Runnable { 13 | let deps = task.dependencies(); 14 | Runnable { 15 | task: box task, 16 | dependencies: deps, 17 | } 18 | } 19 | } 20 | 21 | /// This scheduler is designed to allow NetBricks to be embedded in other vswitches (e.g., Bess). As a result it neither 22 | /// does any of the resource accounting `Scheduler` attempts to do at the moment, nor does it have anything that just 23 | /// runs tasks in a loop. 24 | pub struct EmbeddedScheduler { 25 | /// The set of runnable items. Note we currently don't have a blocked queue. 26 | tasks: Vec, 27 | } 28 | 29 | const DEFAULT_TASKQ_SIZE: usize = 256; 30 | 31 | impl Default for EmbeddedScheduler { 32 | fn default() -> EmbeddedScheduler { 33 | EmbeddedScheduler::new() 34 | } 35 | } 36 | 37 | impl Scheduler for EmbeddedScheduler { 38 | /// Add a task, and return a handle allowing the task to be run. 39 | fn add_task(&mut self, task: T) -> Result { 40 | self.tasks.push(Runnable::from_task(task)); 41 | Ok(self.tasks.len()) 42 | } 43 | } 44 | 45 | impl EmbeddedScheduler { 46 | /// Create a new Bess scheduler. 47 | pub fn new() -> EmbeddedScheduler { 48 | EmbeddedScheduler { 49 | tasks: Vec::with_capacity(DEFAULT_TASKQ_SIZE), 50 | } 51 | } 52 | 53 | /// Run specified task. 54 | pub fn exec_task(&mut self, task_id: usize) { 55 | { 56 | let len = self.tasks[task_id - 1].dependencies.len(); 57 | for dep in 0..len { 58 | let dep_task = self.tasks[task_id - 1].dependencies[dep]; 59 | self.exec_task(dep_task) 60 | } 61 | } 62 | self.tasks[task_id - 1].task.execute(); 63 | } 64 | 65 | fn display_dependencies_internal(&self, task_id: usize, depth: usize) { 66 | { 67 | let len = self.tasks[task_id - 1].dependencies.len(); 68 | for dep in 0..len { 69 | let dep_task = self.tasks[task_id - 1].dependencies[dep]; 70 | self.display_dependencies_internal(dep_task, depth + 1) 71 | } 72 | } 73 | println!("{} Task {}", depth, task_id); 74 | } 75 | 76 | /// For debugging purposes 77 | pub fn display_dependencies(&mut self, task_id: usize) { 78 | self.display_dependencies_internal(task_id, 0) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /framework/src/scheduler/mod.rs: -------------------------------------------------------------------------------- 1 | /// All projects involve building a thread pool. This is the task equivalent for the threadpool in `NetBricks`. 2 | /// Anything that implements Runnable can be polled by the scheduler. This thing can be a `Batch` (e.g., `SendBatch`) or 3 | /// something else (e.g., the `GroupBy` operator). Eventually this trait will have more stuff. 4 | pub use self::context::*; 5 | pub use self::standalone_scheduler::*; 6 | use common::*; 7 | 8 | mod standalone_scheduler; 9 | pub mod embedded_scheduler; 10 | 11 | mod context; 12 | 13 | pub trait Executable { 14 | fn execute(&mut self); 15 | fn dependencies(&mut self) -> Vec; 16 | } 17 | 18 | impl Executable for F 19 | where 20 | F: FnMut(), 21 | { 22 | fn execute(&mut self) { 23 | (*self)() 24 | } 25 | 26 | fn dependencies(&mut self) -> Vec { 27 | vec![] 28 | } 29 | } 30 | 31 | pub trait Scheduler { 32 | fn add_task(&mut self, task: T) -> Result 33 | where 34 | Self: Sized; 35 | } 36 | -------------------------------------------------------------------------------- /framework/src/shared_state/directory.rs: -------------------------------------------------------------------------------- 1 | use super::{open_shared, SharedMemory}; 2 | use std::mem::size_of; 3 | use std::sync::atomic::*; 4 | use utils::PAGE_SIZE; 5 | /// A directory of shared structures. 6 | 7 | const MAX_LEN: usize = 256; // 255 byte names 8 | const DIRECTORY_PAGES: usize = 2; // Dedicate 2 pages to the directory. 9 | const BYTE_SIZE: usize = DIRECTORY_PAGES * PAGE_SIZE; 10 | 11 | /// Directory header for shared data. 12 | #[repr(packed, C)] 13 | pub struct DirectoryHeader { 14 | entries: AtomicUsize, 15 | // Used to signal that snapshotting is in progress. 16 | current_version: AtomicUsize, 17 | committed_version: AtomicUsize, 18 | length: usize, 19 | } 20 | 21 | #[repr(packed, C)] 22 | pub struct DirectoryEntry { 23 | pub name: [u8; MAX_LEN], 24 | } 25 | 26 | pub struct Directory { 27 | head: *mut DirectoryHeader, 28 | data: *mut DirectoryEntry, 29 | // Need this to make sure memory is not dropped 30 | _shared_memory: SharedMemory, 31 | entry: usize, 32 | len: usize, 33 | } 34 | 35 | impl Directory { 36 | pub fn new(name: &str) -> Directory { 37 | unsafe { 38 | let shared = open_shared(name, BYTE_SIZE); 39 | let head = shared.mem as *mut DirectoryHeader; 40 | (*head).current_version.store(1, Ordering::SeqCst); 41 | let header_size = size_of::(); 42 | let entry_size = size_of::(); 43 | let entries = (BYTE_SIZE - header_size) / entry_size; 44 | let entry = (head.offset(1) as *mut u8) as *mut DirectoryEntry; 45 | (*head).length = entries; 46 | (*head).entries.store(0, Ordering::Release); 47 | (*head).committed_version.store(1, Ordering::SeqCst); 48 | Directory { 49 | head: head, 50 | data: entry, 51 | _shared_memory: shared, 52 | entry: 0, 53 | len: entries, 54 | } 55 | } 56 | } 57 | 58 | pub fn register_new_entry(&mut self, name: &str) -> Option { 59 | let entry = self.entry; 60 | if entry >= self.len || name.len() >= MAX_LEN { 61 | None 62 | } else { 63 | unsafe { 64 | let entry_ptr = self.data.offset(entry as isize); 65 | (*entry_ptr).name.copy_from_slice(name.as_bytes()); 66 | (*self.head).entries.store(entry, Ordering::Release); 67 | } 68 | self.entry += 1; 69 | Some(entry) 70 | } 71 | } 72 | 73 | #[inline] 74 | pub fn begin_snapshot(&mut self) { 75 | unsafe { 76 | (*self.head).current_version.fetch_add(1, Ordering::SeqCst); 77 | } 78 | } 79 | 80 | #[inline] 81 | pub fn end_snapshot(&mut self) { 82 | unsafe { 83 | let version = (*self.head).current_version.load(Ordering::Acquire); 84 | (*self.head) 85 | .committed_version 86 | .store(version, Ordering::Release); 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /framework/src/shared_state/mod.rs: -------------------------------------------------------------------------------- 1 | /// Shareable data structures. 2 | pub mod directory; 3 | pub use self::shared_vec::*; 4 | mod shared_vec; 5 | use libc::{self, c_void, close, ftruncate, mmap, munmap, shm_open, shm_unlink}; 6 | use std::ffi::CString; 7 | use std::io::Error; 8 | use std::ptr; 9 | use utils::PAGE_SIZE; 10 | 11 | struct SharedMemory { 12 | pub mem: *mut T, 13 | name: CString, 14 | size: usize, 15 | } 16 | 17 | impl Drop for SharedMemory { 18 | fn drop(&mut self) { 19 | unsafe { 20 | let size = self.size; 21 | let _ret = munmap(self.mem as *mut c_void, size); // Unmap pages. 22 | // Record munmap failure. 23 | let shm_ret = shm_unlink(self.name.as_ptr()); 24 | assert!(shm_ret == 0, "Could not unlink shared memory region"); 25 | } 26 | } 27 | } 28 | 29 | unsafe fn open_shared(name: &str, size: usize) -> SharedMemory { 30 | // Make sure size is page aligned 31 | assert!(size & !PAGE_SIZE == 0); 32 | let name = CString::new(name).unwrap(); 33 | let mut fd = shm_open( 34 | name.as_ptr(), 35 | libc::O_CREAT | libc::O_EXCL | libc::O_RDWR, 36 | 0o700, 37 | ); 38 | if fd == -1 { 39 | if let Some(e) = Error::last_os_error().raw_os_error() { 40 | if e == libc::EEXIST { 41 | shm_unlink(name.as_ptr()); 42 | fd = shm_open( 43 | name.as_ptr(), 44 | libc::O_CREAT | libc::O_EXCL | libc::O_RDWR, 45 | 0o700, 46 | ); 47 | } 48 | } 49 | }; 50 | assert!(fd >= 0, "Could not create shared memory segment"); 51 | let ftret = ftruncate(fd, size as i64); 52 | assert!(ftret == 0, "Could not truncate"); 53 | let address = mmap( 54 | ptr::null_mut(), 55 | size, 56 | libc::PROT_READ | libc::PROT_WRITE, 57 | libc::MAP_POPULATE | libc::MAP_PRIVATE, 58 | fd, 59 | 0, 60 | ); 61 | if address == libc::MAP_FAILED { 62 | let err_string = CString::new("mmap failed").unwrap(); 63 | libc::perror(err_string.as_ptr()); 64 | panic!("Could not mmap shared region"); 65 | } 66 | close(fd); 67 | SharedMemory { 68 | mem: address as *mut T, 69 | name: name, 70 | size: size, 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /framework/src/shared_state/shared_vec.rs: -------------------------------------------------------------------------------- 1 | use super::{open_shared, SharedMemory}; 2 | use std::borrow::Borrow; 3 | use std::hash::{Hash, Hasher}; 4 | use std::ops::{Index, IndexMut, Range, RangeFrom, RangeTo}; 5 | use utils::round_to_pages; 6 | 7 | #[allow(dead_code)] // FIXME: While WIP 8 | pub struct SharedVec { 9 | vec: Vec, 10 | shared: SharedMemory, 11 | modified: bool, 12 | } 13 | 14 | impl SharedVec { 15 | pub fn new_with_capacity(name: &str, capacity: usize) -> SharedVec { 16 | let capacity_pages = round_to_pages(capacity); 17 | unsafe { 18 | SharedVec { 19 | vec: Vec::with_capacity(capacity), 20 | shared: open_shared(name, capacity_pages), 21 | modified: false, 22 | } 23 | } 24 | } 25 | } 26 | 27 | impl Borrow<[T]> for SharedVec { 28 | fn borrow(&self) -> &[T] { 29 | self.vec.borrow() 30 | } 31 | } 32 | 33 | impl Hash for SharedVec { 34 | fn hash(&self, state: &mut H) 35 | where 36 | H: Hasher, 37 | { 38 | self.vec.hash(state) 39 | } 40 | } 41 | 42 | impl Index for SharedVec { 43 | type Output = T; 44 | fn index(&self, index: usize) -> &T { 45 | self.vec.index(index) 46 | } 47 | } 48 | 49 | impl Index> for SharedVec { 50 | type Output = [T]; 51 | fn index(&self, index: Range) -> &[T] { 52 | self.vec.index(index) 53 | } 54 | } 55 | 56 | impl Index> for SharedVec { 57 | type Output = [T]; 58 | fn index(&self, index: RangeTo) -> &[T] { 59 | self.vec.index(index) 60 | } 61 | } 62 | 63 | impl Index> for SharedVec { 64 | type Output = [T]; 65 | fn index(&self, index: RangeFrom) -> &[T] { 66 | self.vec.index(index) 67 | } 68 | } 69 | 70 | impl IndexMut for SharedVec { 71 | fn index_mut(&mut self, index: usize) -> &mut T { 72 | self.modified = true; 73 | self.vec.index_mut(index) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /framework/src/state/dp_mergeable.rs: -------------------------------------------------------------------------------- 1 | use fnv::FnvHasher; 2 | use std::collections::HashMap; 3 | use std::collections::hash_map::Iter; 4 | use std::hash::BuildHasherDefault; 5 | use std::ops::AddAssign; 6 | use utils::Flow; 7 | 8 | /// A generic store for associating some merge-able type with each flow. Note, the merge must be commutative, we do not 9 | /// guarantee ordering for things being merged. The merge function is implemented by implementing the 10 | /// [`AddAssign`](https://doc.rust-lang.org/std/ops/trait.AddAssign.html) trait and overriding the `add_assign` method 11 | /// there. We assume that the quantity stored here does not need to be accessed by the control plane and can only be 12 | /// accessed from the data plane. The `cache_size` should be tuned depending on whether gets or puts are the most common 13 | /// operation in this table. 14 | /// 15 | /// #[FIXME] 16 | /// Garbage collection. 17 | type FnvHash = BuildHasherDefault; 18 | const VEC_SIZE: usize = 1 << 24; 19 | #[derive(Clone)] 20 | pub struct DpMergeableStore + Default> { 21 | /// Contains the counts on the data path. 22 | state: HashMap, 23 | cache: Vec<(Flow, T)>, 24 | cache_size: usize, 25 | } 26 | 27 | const CACHE_SIZE: usize = 1 << 14; 28 | impl + Default> DpMergeableStore { 29 | pub fn with_cache_and_size(cache: usize, size: usize) -> DpMergeableStore { 30 | DpMergeableStore { 31 | state: HashMap::with_capacity_and_hasher(size, Default::default()), 32 | cache: Vec::with_capacity(cache), 33 | cache_size: cache, 34 | } 35 | } 36 | 37 | pub fn new() -> DpMergeableStore { 38 | DpMergeableStore::with_cache_and_size(CACHE_SIZE, VEC_SIZE) 39 | } 40 | 41 | fn merge_cache(&mut self) { 42 | self.state.extend(self.cache.drain(0..)); 43 | } 44 | 45 | /// Change the value for the given `Flow`. 46 | #[inline] 47 | pub fn update(&mut self, flow: Flow, inc: T) { 48 | { 49 | self.cache.push((flow, inc)); 50 | } 51 | if self.cache.len() >= self.cache_size { 52 | self.merge_cache(); 53 | } 54 | } 55 | 56 | /// Remove an entry from the table. 57 | #[inline] 58 | pub fn remove(&mut self, flow: &Flow) -> T { 59 | self.merge_cache(); 60 | self.state.remove(flow).unwrap_or_else(Default::default) 61 | } 62 | 63 | /// Iterate over all the stored entries. This is a bit weird to do in the data plane. 64 | /// 65 | /// #[Warning] 66 | /// This might have severe performance penalties. 67 | pub fn iter(&mut self) -> Iter { 68 | self.merge_cache(); 69 | self.state.iter() 70 | } 71 | 72 | /// Length of the table. 73 | pub fn len(&mut self) -> usize { 74 | self.merge_cache(); 75 | self.state.len() 76 | } 77 | 78 | /// Is table empty 79 | pub fn is_empty(&self) -> bool { 80 | self.state.is_empty() && self.cache.is_empty() 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /framework/src/state/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::cp_mergeable::*; 2 | pub use self::dp_mergeable::*; 3 | pub use self::mergeable::*; 4 | pub use self::reordered_buffer::*; 5 | pub use self::ring_buffer::*; 6 | mod dp_mergeable; 7 | mod cp_mergeable; 8 | mod mergeable; 9 | mod ring_buffer; 10 | pub mod reordered_buffer; 11 | -------------------------------------------------------------------------------- /framework/src/state/reordered_buffer/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg_attr(feature = "dev", allow(module_inception))] 2 | mod reordered_buffer; 3 | 4 | pub use self::reordered_buffer::*; 5 | -------------------------------------------------------------------------------- /framework/src/utils/asm.rs: -------------------------------------------------------------------------------- 1 | #[inline] 2 | pub fn cpuid() { 3 | unsafe { 4 | asm!("movl $$0x2, %eax":::"eax"); 5 | asm!("movl $$0x0, %ecx":::"ecx"); 6 | asm!("cpuid" 7 | : 8 | : 9 | : "rax rbx rcx rdx"); 10 | } 11 | } 12 | 13 | #[inline] 14 | pub fn rdtsc_unsafe() -> u64 { 15 | unsafe { 16 | let low: u32; 17 | let high: u32; 18 | asm!("rdtsc" 19 | : "={eax}" (low), "={edx}" (high) 20 | : 21 | : "rdx rax" 22 | : "volatile"); 23 | ((high as u64) << 32) | (low as u64) 24 | } 25 | } 26 | 27 | #[inline] 28 | pub fn rdtscp_unsafe() -> u64 { 29 | let high: u32; 30 | let low: u32; 31 | unsafe { 32 | asm!("rdtscp" 33 | : "={eax}" (low), "={edx}" (high) 34 | : 35 | : "ecx" 36 | : "volatile"); 37 | ((high as u64) << 32) | (low as u64) 38 | } 39 | } 40 | 41 | #[inline] 42 | pub fn pause() { 43 | unsafe { 44 | asm!("pause"::::"volatile"); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /framework/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::asm::*; 2 | pub use self::flow::*; 3 | mod flow; 4 | mod asm; 5 | 6 | pub const PAGE_SIZE: usize = 4096; // Page size in bytes, not using huge pages here. 7 | 8 | /// Round a given buffer to page size units. 9 | #[inline] 10 | pub fn round_to_pages(buffer_size: usize) -> usize { 11 | (buffer_size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1) 12 | } 13 | 14 | /// Round a 64-bit integer to its nearest power of 2. 15 | #[inline] 16 | pub fn round_to_power_of_2(mut size: usize) -> usize { 17 | size = size.wrapping_sub(1); 18 | size |= size >> 1; 19 | size |= size >> 2; 20 | size |= size >> 4; 21 | size |= size >> 8; 22 | size |= size >> 16; 23 | size |= size >> 32; 24 | size = size.wrapping_add(1); 25 | size 26 | } 27 | -------------------------------------------------------------------------------- /framework/tests/address.rs: -------------------------------------------------------------------------------- 1 | extern crate e2d2; 2 | use e2d2::utils::*; 3 | use std::net::Ipv4Addr; 4 | use std::str::FromStr; 5 | 6 | #[test] 7 | fn address_inline() { 8 | let pfx = Ipv4Prefix::new(u32::from(Ipv4Addr::from_str("192.168.0.0").unwrap()), 16); 9 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.0.1").unwrap()),)); 10 | assert!(pfx.in_range(u32::from_be(16820416))); 11 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.100.1").unwrap()),)); 12 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.2.1").unwrap()),)); 13 | assert!(!pfx.in_range(u32::from(Ipv4Addr::from_str("192.163.0.1").unwrap()),)); 14 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.0.0").unwrap()),)); 15 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.255.255").unwrap()),)); 16 | 17 | let pfx = Ipv4Prefix::new(u32::from(Ipv4Addr::from_str("192.168.1.2").unwrap()), 32); 18 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.1.2").unwrap()),)); 19 | assert!(!pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.1.3").unwrap()),)); 20 | 21 | let pfx = Ipv4Prefix::new(u32::from(Ipv4Addr::from_str("0.0.0.0").unwrap()), 0); 22 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.1.2").unwrap()),)); 23 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("2.2.2.2").unwrap()),)); 24 | 25 | let pfx = Ipv4Prefix::new(u32::from(Ipv4Addr::from_str("0.0.0.0").unwrap()), 0); 26 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("192.168.1.2").unwrap()),)); 27 | assert!(pfx.in_range(u32::from(Ipv4Addr::from_str("2.2.2.2").unwrap()),)); 28 | } 29 | -------------------------------------------------------------------------------- /framework/tests/ring_buffer.rs: -------------------------------------------------------------------------------- 1 | extern crate e2d2; 2 | use e2d2::state::*; 3 | 4 | #[test] 5 | fn alloc_test() { 6 | let rb = RingBuffer::new(1).unwrap(); 7 | drop(rb); 8 | } 9 | 10 | #[test] 11 | fn write_at_offset_test() { 12 | let mut rb = RingBuffer::new(4096).unwrap(); 13 | let input = vec![1, 2, 3, 4]; 14 | rb.write_at_offset(4095, &input[..]); 15 | let mut output: Vec<_> = (0..input.len()).map(|_| 0).collect(); 16 | rb.read_from_offset(4095, &mut output[..]); 17 | for idx in 0..input.len() { 18 | assert_eq!(input[idx], output[idx]); 19 | } 20 | } 21 | 22 | #[test] 23 | fn read_write_tail_test() { 24 | let mut rb = RingBuffer::new(4096).unwrap(); 25 | let input: Vec<_> = (0..8192).map(|i| (i & 0xff) as u8).collect(); 26 | let written = rb.write_at_tail(&input[..]); 27 | assert_eq!(written, 4095); 28 | let mut output: Vec<_> = (0..8192).map(|_| 0).collect(); 29 | let read = rb.read_from_head(&mut output[..]); 30 | assert_eq!(read, written); 31 | for idx in 0..read { 32 | assert_eq!(input[idx], output[idx]); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /native/.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: google 2 | IndentWidth: 4 3 | 4 | AlignAfterOpenBracket: true 5 | AlignConsecutiveAssignments: true 6 | AllowShortBlocksOnASingleLine: false 7 | AllowShortCaseLabelsOnASingleLine: false 8 | AllowShortFunctionsOnASingleLine: Empty 9 | AllowShortIfStatementsOnASingleLine: false 10 | AllowShortLoopsOnASingleLine: false 11 | BinPackArguments: true 12 | BinPackParameters: true 13 | ColumnLimit: 100 14 | DerivePointerAlignment: true 15 | PenaltyExcessCharacter: 1 16 | SpaceBeforeParens: ControlStatements 17 | TabWidth: 4 18 | UseTab: Never 19 | -------------------------------------------------------------------------------- /native/Makefile: -------------------------------------------------------------------------------- 1 | CC = gcc 2 | 3 | RTE_SDK = $(abspath ../3rdparty/dpdk/) 4 | #ifndef RTE_SDK 5 | #$(error RTE_SDK is undefined) 6 | #endif 7 | 8 | ifndef RTE_TARGET 9 | RTE_TARGET="build" 10 | endif 11 | 12 | ifneq ($(wildcard $(RTE_SDK)/$(RTE_TARGET)*),) 13 | DPDK_INC_DIR = $(RTE_SDK)/$(RTE_TARGET)/include 14 | DPDK_LIB_DIR = $(RTE_SDK)/$(RTE_TARGET)/lib 15 | else 16 | DPDK_INC_DIR = $(RTE_SDK)/build/include 17 | DPDK_LIB_DIR = $(RTE_SDK)/build/lib 18 | endif 19 | 20 | LDFLAGS += -L$(DPDK_LIB_DIR) 21 | LIBS += -Wl,--whole-archive -ldpdk -Wl,--no-whole-archive -Wl,-rpath=$(DPDK_LIB_DIR) 22 | #LIBS += -ldpdk -Wl,-rpath=$(DPDK_LIB_DIR) 23 | LIBS += -lm -lpthread -ldl -lpcap -lnuma 24 | 25 | # change fpic to fPIC if something fails 26 | CFLAGS = -std=gnu99 -g3 -ggdb3 -O3 -Wall -Werror -m64 -march=native \ 27 | -Wno-unused-function -Wno-unused-but-set-variable \ 28 | -I$(DPDK_INC_DIR) -Iinclude/\ 29 | -D_GNU_SOURCE \ 30 | -fPIC 31 | 32 | SRCS = $(wildcard *.c) 33 | OBJS = $(SRCS:.c=.o) 34 | HEADERS = $(wildcard include/*.h) 35 | PROD = libzcsi.so 36 | PROD_STATIC = libzcsi.a 37 | 38 | DEPS = .make.dep 39 | 40 | # if multiple targets are specified, do them one by one */ 41 | ifneq ($(words $(MAKECMDGOALS)),1) 42 | 43 | .NOTPARALLEL: 44 | $(sort all $(MAKECMDGOALS)): 45 | @$(MAKE) --no-print-directory -f $(firstword $(MAKEFILE_LIST)) $@ 46 | 47 | else 48 | 49 | # parallel build by default 50 | CORES ?= $(shell nproc || echo 1) 51 | MAKEFLAGS += -j $(CORES) 52 | INSTALL_PATH = $(abspath ../target/native) 53 | 54 | .PHONY: all clean tags cscope all-static 55 | 56 | all: $(DEPS) $(PROD) 57 | 58 | all-static: $(DEPS) $(PROD_STATIC) 59 | 60 | install: $(DEPS) $(PROD) | $(INSTALL_PATH) 61 | cp $(PROD) $(INSTALL_PATH) 62 | 63 | $(INSTALL_PATH): 64 | mkdir -p $(INSTALL_PATH) 65 | 66 | 67 | $(DEPS): $(SRCS) $(HEADERS) 68 | @echo $(RTE_SDK) $(DPDK_INC_DIR) 69 | @$(CC) $(CFLAGS) -MM $(SRCS) | sed 's|\(.*\)\.o: \(.*\)\.c|\2.o: \2.c|' > $(DEPS); 70 | 71 | $(PROD_STATIC): $(OBJS) 72 | ar rcs $(PROD_STATIC) $(OBJS) 73 | 74 | $(PROD): $(OBJS) 75 | $(CC) -shared $(OBJS) -o $@ $(LDFLAGS) $(LIBS) 76 | 77 | -include $(DEPS) 78 | 79 | clean: 80 | rm -f $(DEPS) $(PROD) $(PROD_STATIC) *.o || true 81 | rm -f $(INSTALL_PATH)/$(PROD) || true 82 | rmdir $(INSTALL_PATH) || true 83 | 84 | tags: 85 | @ctags -R * 86 | 87 | cscope: 88 | @rm -f cscope.* 89 | @find . -name "*.c" -o -name "*.h" > cscope.files 90 | cscope -b -q -k 91 | @rm -f cscope.files 92 | endif 93 | -------------------------------------------------------------------------------- /native/ethpacket.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "mempool.h" 5 | 6 | void set_packet_data(mbuf_array_t array, int cnt, int offset, void* data, int size) { 7 | for (int i = 0; i < cnt; i++) { 8 | void* dest = rte_pktmbuf_mtod_offset(array[i], void*, offset); 9 | rte_memcpy(dest, data, size); 10 | } 11 | } 12 | 13 | void set_packet_data_at_offset(mbuf_array_t array, int* offsets, int cnt, void* data, int size) { 14 | for (int i = 0; i < cnt; i++) { 15 | void* dest = rte_pktmbuf_mtod_offset(array[i], void*, offsets[i]); 16 | rte_memcpy(dest, data, size); 17 | } 18 | } 19 | 20 | void set_ether_type(mbuf_array_t array, int cnt, uint16_t ether) { 21 | for (int i = 0; i < cnt; i++) { 22 | struct ether_hdr* hdr = rte_pktmbuf_mtod(array[i], struct ether_hdr*); 23 | hdr->ether_type = ether; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /native/fmt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 3 | for c_file in ${BASE_DIR}/*.c; do 4 | echo ${c_file} 5 | clang-format -style=file -i ${c_file} 6 | done 7 | 8 | for h_file in ${BASE_DIR}/include/*.h; do 9 | echo ${h_file} 10 | clang-format -style=file -i ${h_file} 11 | done 12 | -------------------------------------------------------------------------------- /native/include/dpdk.h: -------------------------------------------------------------------------------- 1 | #ifndef __DPDK_H__ 2 | #define __DPDK_H__ 3 | /* Call this from the main thread on ZCSI to initialize things */ 4 | int init_system(int core); 5 | 6 | /* Called from all secondary threads on ZCSI */ 7 | int init_thread(int tid, int core); 8 | #include "mempool.h" 9 | #endif 10 | -------------------------------------------------------------------------------- /native/include/mempool.h: -------------------------------------------------------------------------------- 1 | #ifndef __MEMPOOL_H__ 2 | #define __MEMPOOL_H__ 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | RTE_DECLARE_PER_LCORE(int, _mempool_core); 11 | 12 | typedef struct rte_mbuf* restrict* restrict mbuf_array_t; 13 | /* Called by system initialization */ 14 | int init_mempool_core(int core); 15 | int init_mempool(int master_core, unsigned int mempool_size, unsigned int mcache_size, unsigned short slots); 16 | int init_secondary_mempool(const char* mempool_name); 17 | int find_secondary_mempool(); 18 | struct rte_mbuf* mbuf_alloc(); 19 | void mbuf_free(struct rte_mbuf* buf); 20 | int mbuf_alloc_bulk(mbuf_array_t array, uint16_t len, int cnt); 21 | int mbuf_free_bulk(mbuf_array_t array, int cnt); 22 | struct rte_mempool* get_pframe_pool(int coreid, int sid); 23 | struct rte_mempool* get_mempool_for_core(int coreid); 24 | #endif 25 | -------------------------------------------------------------------------------- /native/include/pmd.h: -------------------------------------------------------------------------------- 1 | #ifndef __PMD_H__ 2 | #define __PMD_H__ 3 | int num_pmd_ports(); 4 | int get_pmd_ports(struct rte_eth_dev_info* info, int len); 5 | void enumerate_pmd_ports(); 6 | int init_pmd_port(int port, int rxqs, int txqs, int rxq_core[], int txq_core[], int nrxd, int ntxd, 7 | int loopback, int tso, int csumoffload); 8 | int free_pmd_port(int port); 9 | int recv_pkts(int port, int qid, mbuf_array_t pkts, int len); 10 | int send_pkts(int port, int qid, mbuf_array_t pkts, int len); 11 | #endif 12 | -------------------------------------------------------------------------------- /native/include/simd.h: -------------------------------------------------------------------------------- 1 | #ifndef _SIMD_H_ 2 | #define _SIMD_H_ 3 | 4 | #include 5 | 6 | #include 7 | 8 | #define __xmm_aligned __attribute__((aligned(16))) 9 | #define __ymm_aligned __attribute__((aligned(32))) 10 | #define __zmm_aligned __attribute__((aligned(64))) 11 | 12 | #if !__SSSE3__ 13 | #error CPU must be at least Core 2 or equivalent (SSSE3 required) 14 | #endif 15 | 16 | static inline void print_m128i(__m128i a) { 17 | uint32_t b[4] __xmm_aligned; 18 | 19 | *((__m128i *)b) = a; 20 | printf("%08x %08x %08x %08x\n", b[0], b[1], b[2], b[3]); 21 | } 22 | 23 | static inline __m128i gather_m128i(void *a, void *b) { 24 | #if 1 25 | /* faster (in a tight loop test. sometimes slower...) */ 26 | __m128i t = _mm_loadl_epi64((__m128i *)a); 27 | return (__m128i)_mm_loadh_pd((__m128d)t, (double *)b); 28 | #else 29 | return _mm_set_epi64x(*((uint64_t *)b), *((uint64_t *)a)); 30 | #endif 31 | } 32 | 33 | #if __AVX__ 34 | 35 | static inline void print_m256i(__m256i a) { 36 | uint32_t b[8] __ymm_aligned; 37 | 38 | *((__m256i *)b) = a; 39 | printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 40 | } 41 | 42 | static inline __m256d concat_two_m128d(__m128d a, __m128d b) { 43 | #if 1 44 | /* faster */ 45 | return _mm256_insertf128_pd(_mm256_castpd128_pd256(a), b, 1); 46 | #else 47 | return _mm256_permute2f128_si256(_mm256_castsi128_si256(a), _mm256_castsi128_si256(b), (2 << 4) | 0); 48 | #endif 49 | } 50 | 51 | #endif /* __AVX__ */ 52 | 53 | #endif 54 | -------------------------------------------------------------------------------- /native/test/Makefile: -------------------------------------------------------------------------------- 1 | CC = gcc 2 | 3 | RTE_SDK = $(abspath ../../3rdparty/dpdk/) 4 | 5 | ifndef RTE_TARGET 6 | $(error RTE_TARGET is undefined) 7 | endif 8 | 9 | ifneq ($(wildcard $(RTE_SDK)/$(RTE_TARGET)*),) 10 | DPDK_INC_DIR = $(RTE_SDK)/$(RTE_TARGET)/include 11 | DPDK_LIB_DIR = $(RTE_SDK)/$(RTE_TARGET)/lib 12 | else 13 | DPDK_INC_DIR = $(RTE_SDK)/build/include 14 | DPDK_LIB_DIR = $(RTE_SDK)/build/lib 15 | endif 16 | ADAPTOR_DIR=$(abspath ../) 17 | DPDK_INCLUDE=$(ADAPTOR_DIR)/include 18 | DPDK_LIB=$(ADAPTOR_DIR) 19 | 20 | LDFLAGS += -L$(DPDK_LIB_DIR) -L$(DPDK_LIB) -Wl,-rpath=$(DPDK_LIB)\ 21 | -Wl,-rpath=$(DPDK_LIB_DIR) 22 | LIBS += -lm -lpthread -ldl -lpcap -ldpdk -lzcsi 23 | 24 | 25 | # change fpic to fPIC if something fails 26 | CFLAGS = -std=gnu99 -g3 -ggdb3 -O3 -Wall -Werror -m64 -march=native -Wno-unused-function -Wno-unused-but-set-variable \ 27 | -I$(DPDK_INC_DIR) \ 28 | -I$(DPDK_INCLUDE) \ 29 | -D_GNU_SOURCE 30 | 31 | SRCS = $(wildcard *.c) 32 | OBJS = $(SRCS:.c=.o) 33 | HEADERS = $(wildcard include/*.h) 34 | PROD = test 35 | 36 | DEPS = .make.dep 37 | 38 | # if multiple targets are specified, do them one by one */ 39 | ifneq ($(words $(MAKECMDGOALS)),1) 40 | 41 | .NOTPARALLEL: 42 | $(sort all $(MAKECMDGOALS)): 43 | @$(MAKE) --no-print-directory -f $(firstword $(MAKEFILE_LIST)) $@ 44 | 45 | else 46 | 47 | # parallel build by default 48 | CORES ?= $(shell nproc || echo 1) 49 | MAKEFLAGS += -j $(CORES) 50 | 51 | .PHONY: all clean tags cscope 52 | 53 | all: $(DEPS) $(PROD) 54 | 55 | $(DEPS): $(SRCS) $(HEADERS) 56 | @$(CC) $(CFLAGS) -MM $(SRCS) | sed 's|\(.*\)\.o: \(.*\)\.c|\2.o: \2.c|' > $(DEPS); 57 | 58 | $(PROD): $(OBJS) 59 | echo $(LDFLAGS) 60 | $(CC) $(OBJS) -o $@ $(LDFLAGS) $(LIBS) 61 | 62 | -include $(DEPS) 63 | 64 | clean: 65 | rm -f $(DEPS) $(PROD) *.o 66 | 67 | tags: 68 | @ctags -R * 69 | 70 | cscope: 71 | @rm -f cscope.* 72 | @find . -name "*.c" -o -name "*.h" > cscope.files 73 | cscope -b -q -k 74 | @rm -f cscope.files 75 | endif 76 | -------------------------------------------------------------------------------- /native/test/test.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | struct node { 12 | int tid; 13 | int core; 14 | int queue; 15 | }; 16 | 17 | int cursec() 18 | { 19 | struct timeval t; 20 | gettimeofday(&t, NULL); 21 | return t.tv_sec; 22 | } 23 | 24 | #define PORT_OUT 1 25 | #define PORT_IN 0 26 | void *thr(void* arg) 27 | { 28 | struct node* n = arg; 29 | struct rte_mbuf* restrict pkts[32]; 30 | int i; 31 | int q = n->queue; 32 | int start_sec = cursec(); 33 | int rcvd = 0; 34 | int sent = 0; 35 | init_thread(n->tid, n->core); 36 | if (q >= 20) { 37 | printf("Somehow, queue beyond 20\n"); 38 | } 39 | while(1) { 40 | /*int recv;*/ 41 | i = mbuf_alloc_bulk(pkts, 60, 32); 42 | if (i != 0) { 43 | printf("Error allocating packets %d\n", i); 44 | break; 45 | } else { 46 | int send, recv; 47 | 48 | /* Start setting MAC address */ 49 | for (i = 0; i < 32; i++) { 50 | struct ether_hdr* hdr = 51 | rte_pktmbuf_mtod(pkts[i], 52 | struct ether_hdr*); 53 | hdr->d_addr.addr_bytes[5] = (10 * q) + 1; 54 | hdr->s_addr.addr_bytes[5] = (10 * q) + 2; 55 | hdr->ether_type = rte_cpu_to_be_16(0x0800); 56 | /*rte_mbuf_sanity_check(pkts[i], 1);*/ 57 | } 58 | send = send_pkts(PORT_OUT, q, pkts, 32); 59 | for (i = send; i < 32; i++) { 60 | mbuf_free(pkts[i]); 61 | } 62 | recv = recv_pkts(PORT_IN, q, pkts, 32); 63 | rcvd += recv; 64 | sent += send; 65 | if (cursec() != start_sec) { 66 | printf("%d %d rx=%d tx=%d\n", n->core, 67 | (cursec() - start_sec), 68 | rcvd, 69 | sent); 70 | /*printf("recv_pkt\n");*/ 71 | /*rte_pktmbuf_dump(stdout, pkts[0], 16384);*/ 72 | start_sec = cursec(); 73 | rcvd = 0; 74 | sent = 0; 75 | } 76 | for (int i = 0; i < recv; i++) { 77 | mbuf_free(pkts[i]); 78 | } 79 | } 80 | } 81 | printf("Socket ID (%d) is %d. DONE\n", n->core, rte_socket_id()); 82 | return NULL; 83 | } 84 | 85 | void dump() { 86 | printf("pkt_len %lu\n", offsetof(struct rte_mbuf, pkt_len)); 87 | printf("sizeof(rte_eth_dev_info) %lu\n", sizeof(struct rte_eth_dev_info)); 88 | } 89 | 90 | #define THREADS 2 91 | int main (int argc, char* argv[]) { 92 | 93 | /*dump();*/ 94 | pthread_t thread[20]; 95 | struct node n[20]; 96 | int rxq_cores[20]; 97 | int txq_cores[20]; 98 | int ret = init_system(1); 99 | 100 | assert(ret == 0); 101 | rxq_cores[0] = 10; 102 | rxq_cores[1] = 11; 103 | txq_cores[0] = 10; 104 | txq_cores[1] = 11; 105 | 106 | /*for (int i = 0; i < 20; i++) {*/ 107 | /*rxq_cores[i] = i;*/ 108 | /*txq_cores[i] = i;*/ 109 | /*}*/ 110 | enumerate_pmd_ports(); 111 | ret = init_pmd_port(PORT_OUT, THREADS, THREADS, 112 | rxq_cores, txq_cores, 256, 256, 113 | PORT_OUT == PORT_IN, 0, 0); 114 | assert(ret == 0); 115 | if (PORT_IN != PORT_OUT) { 116 | ret = init_pmd_port(PORT_IN, THREADS, THREADS, rxq_cores, txq_cores, 128, 512, 0, 0, 0); 117 | assert(ret == 0); 118 | } 119 | n[0].tid = 10; 120 | n[0].core = 10; 121 | n[0].queue = 0; 122 | n[1].tid = 11; 123 | n[1].core = 11; 124 | n[1].queue = 1; 125 | /*thr(&n[0]);*/ 126 | pthread_create(&thread[0], NULL, &thr, &n[0]); 127 | pthread_create(&thread[1], NULL, &thr, &n[1]); 128 | /*for (int i = 0; i < THREADS; i++) {*/ 129 | /*n[i].tid = 64 - i;*/ 130 | /*n[i].core = i;*/ 131 | /*pthread_create(&thread[i],*/ 132 | /*NULL,*/ 133 | /*&thr,*/ 134 | /*&n[i]);*/ 135 | /*}*/ 136 | 137 | for (int i = 0; i < THREADS; i++) { 138 | pthread_join(thread[i], NULL); 139 | } 140 | free_pmd_port(PORT_OUT); 141 | return 0; 142 | } 143 | -------------------------------------------------------------------------------- /native/utils.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | // Make rte_hash_crc available to Rust. This adds some cost, will look into producing a pure Rust 6 | // version. 7 | uint32_t crc_hash_native(const void* data, uint32_t len, uint32_t initial) { 8 | return rte_hash_crc(data, len, initial); 9 | } 10 | 11 | uint16_t ipv4_cksum(const void* iphdr) { 12 | return rte_ipv4_cksum((const struct ipv4_hdr*)iphdr); 13 | } 14 | -------------------------------------------------------------------------------- /patches/ovs-patch-c4623bb.patch: -------------------------------------------------------------------------------- 1 | From bd343b970095b0d568245025ef10761efeb221d1 Mon Sep 17 00:00:00 2001 2 | From: apanda 3 | Date: Wed, 6 Apr 2016 10:08:21 -0700 4 | Subject: [PATCH] Changes for ZCSI evaluation 5 | 6 | - Do not build kernel module when building with ovs-dev 7 | - Support for mainline DPDK head. 8 | - Fix an uninitialized variable 9 | --- 10 | lib/netdev-dpdk.c | 16 ++++++++-------- 11 | lib/odp-util.c | 2 +- 12 | utilities/ovs-dev.py | 4 ++-- 13 | 3 files changed, 11 insertions(+), 11 deletions(-) 14 | 15 | diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c 16 | index e09b471..7fc44fb 100644 17 | --- a/lib/netdev-dpdk.c 18 | +++ b/lib/netdev-dpdk.c 19 | @@ -1740,31 +1740,31 @@ netdev_dpdk_get_features(const struct netdev *netdev_, 20 | link = dev->link; 21 | ovs_mutex_unlock(&dev->mutex); 22 | 23 | - if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) { 24 | + if (link.link_duplex == ETH_LINK_FULL_DUPLEX) { 25 | if (link.link_speed == ETH_LINK_SPEED_AUTONEG) { 26 | *current = NETDEV_F_AUTONEG; 27 | } 28 | } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) { 29 | - if (link.link_speed == ETH_LINK_SPEED_10) { 30 | + if (link.link_speed == ETH_LINK_SPEED_10M_HD) { 31 | *current = NETDEV_F_10MB_HD; 32 | } 33 | - if (link.link_speed == ETH_LINK_SPEED_100) { 34 | + if (link.link_speed == ETH_LINK_SPEED_100M_HD) { 35 | *current = NETDEV_F_100MB_HD; 36 | } 37 | - if (link.link_speed == ETH_LINK_SPEED_1000) { 38 | + if (link.link_speed == ETH_LINK_SPEED_1G) { 39 | *current = NETDEV_F_1GB_HD; 40 | } 41 | } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) { 42 | - if (link.link_speed == ETH_LINK_SPEED_10) { 43 | + if (link.link_speed == ETH_LINK_SPEED_10M) { 44 | *current = NETDEV_F_10MB_FD; 45 | } 46 | - if (link.link_speed == ETH_LINK_SPEED_100) { 47 | + if (link.link_speed == ETH_LINK_SPEED_100M) { 48 | *current = NETDEV_F_100MB_FD; 49 | } 50 | - if (link.link_speed == ETH_LINK_SPEED_1000) { 51 | + if (link.link_speed == ETH_LINK_SPEED_1G) { 52 | *current = NETDEV_F_1GB_FD; 53 | } 54 | - if (link.link_speed == ETH_LINK_SPEED_10000) { 55 | + if (link.link_speed == ETH_LINK_SPEED_10G) { 56 | *current = NETDEV_F_10GB_FD; 57 | } 58 | } 59 | diff --git a/lib/odp-util.c b/lib/odp-util.c 60 | index b4689cc..dc933c2 100644 61 | --- a/lib/odp-util.c 62 | +++ b/lib/odp-util.c 63 | @@ -2366,7 +2366,7 @@ format_odp_tun_vxlan_opt(const struct nlattr *attr, 64 | case OVS_VXLAN_EXT_GBP: { 65 | uint32_t key = nl_attr_get_u32(a); 66 | ovs_be16 id, id_mask; 67 | - uint8_t flags, flags_mask; 68 | + uint8_t flags = 0, flags_mask = 0; 69 | 70 | id = htons(key & 0xFFFF); 71 | flags = (key >> 16) & 0xFF; 72 | diff --git a/utilities/ovs-dev.py b/utilities/ovs-dev.py 73 | index c121706..b24ac57 100755 74 | --- a/utilities/ovs-dev.py 75 | +++ b/utilities/ovs-dev.py 76 | @@ -92,7 +92,7 @@ def conf(): 77 | if options.optimize is None: 78 | options.optimize = 0 79 | 80 | - cflags += " -O%s" % str(options.optimize) 81 | + cflags += " -O%s -march=native" % str(options.optimize) 82 | 83 | ENV["CFLAGS"] = cflags 84 | 85 | @@ -104,7 +104,7 @@ def conf(): 86 | pass # Directory exists. 87 | 88 | os.chdir(BUILD_GCC) 89 | - _sh(*(configure + ["--with-linux=/lib/modules/%s/build" % uname()])) 90 | + _sh(*(configure)) 91 | 92 | try: 93 | _sh("clang --version", check=True) 94 | -- 95 | 2.8.0.rc3 96 | 97 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 120 2 | reorder_imports = true 3 | write_mode="Overwrite" 4 | report_fixme="Always" 5 | -------------------------------------------------------------------------------- /scripts/bind-xl710.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export LD_LIBRARY_PATH=/opt/e2d2/e2d2/3rdparty/dpdk/build/lib 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | DPDK_HOME=$BASE_DIR/../3rdparty/dpdk 5 | modprobe uio 6 | insmod $DPDK_HOME/build/kmod/igb_uio.ko 7 | $DPDK_HOME/tools/dpdk-devbind.py --status \ 8 | | grep XL710 \ 9 | | awk '{print $1}' \ 10 | | xargs \ 11 | $DPDK_HOME/tools/dpdk-devbind.py -b igb_uio 12 | -------------------------------------------------------------------------------- /scripts/check-examples.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | 4 | """ 5 | import sys 6 | import os 7 | def main(*directories): 8 | directory_set = set(directories) 9 | base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'test') 10 | print("Searching in %s"%(base_dir)) 11 | for (dirpath, dirnames, filenames) in os.walk(base_dir): 12 | for filename in filenames: 13 | if filename == "Cargo.toml": 14 | rest,dir=os.path.split(dirpath) 15 | root=os.path.split(rest)[1] 16 | test_dir=os.path.join(root, dir) 17 | if root == 'test' and test_dir not in directory_set: 18 | print("Found Cargo.toml in %s but not in build.sh"%(test_dir)) 19 | sys.exit(1) 20 | sys.exit(0) 21 | if __name__ == "__main__": 22 | if len(sys.argv) == 1: 23 | print("Usage: %s json"%sys.argv[0], file=sys.stderr) 24 | sys.exit(1) 25 | else: 26 | main(*sys.argv[1:]) 27 | -------------------------------------------------------------------------------- /scripts/comparisons/kill-ovs-chain.py: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | OVS_HOME="$BASE_DIR/../ovs" 5 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 6 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 7 | running=$(docker ps -q) 8 | if [ ! -z "$running" ]; then 9 | echo "Killing and removing container" 10 | docker kill ${running} 11 | docker rm ${running} 12 | fi 13 | pushd $OVS_HOME 14 | $( $OVS_HOME/utilities/ovs-dev.py env ) 15 | $OVS_HOME/utilities/ovs-dev.py kill 16 | popd 17 | -------------------------------------------------------------------------------- /scripts/comparisons/start-bess-chain-vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | # args 4 | # 1: Master core (4) 5 | # 2: Number of rings (1) 6 | 7 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 8 | BESS_HOME="$BASE_DIR/../bess" 9 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 10 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 11 | 12 | INP_LCORE=${1-"4"} 13 | MASTER_LCORE=$((INP_LCORE - 1)) 14 | CORE0=$((MASTER_LCORE + 1)) 15 | CORE1=$((MASTER_LCORE + 2)) 16 | PHYNICS=1 17 | RINGS=${2-1} 18 | IFACE=${BENCH_IFACE-"07:00.0"} 19 | BESS_CORE0=${CORE0} BESS_CORE1=${CORE1} BESS_IFACE=${IFACE} BESS_CHAIN=${RINGS} ${BESS_HOME}/bin/bessctl daemon start \ 20 | -- run file ${BASE_DIR}/vhchain.bess 21 | 22 | start_vm () { 23 | MCORE=$1 24 | LCORE=$2 25 | VDEV=$3 26 | PORT=$4 27 | MONITOR_PORT=$5 28 | DISPLAY=$6 29 | echo "Using VDEV ${VDEV}" 30 | 31 | CORE_MASK=$(printf "0x%x" $((2**(MCORE) + 2**(LCORE)))) 32 | OUT_FILE=$(mktemp) 33 | echo "Core Mask" ${CORE_MASK} 34 | sudo taskset ${CORE_MASK} qemu-system-x86_64 --enable-kvm --cpu host,migratable=off --smp 2,cores=2,threads=1,sockets=1 -snapshot -hda \ 35 | ${BASE_DIR}/../debian/debian-nb.img -m 2048M -object memory-backend-file,id=mem,size=2048M,mem-path=/dev/hugepages,share=on -numa \ 36 | node,memdev=mem -mem-prealloc -monitor telnet:127.0.0.1:${MONITOR_PORT},server,nowait -device e1000,netdev=user.0 -netdev \ 37 | user,id=user.0,hostfwd=tcp::${PORT}-:22 -vga std -serial file:${OUT_FILE} -daemonize -vnc :${DISPLAY},password \ 38 | -chardev socket,id=char0,path=/tmp/v${VDEV} -netdev type=vhost-user,id=v${VDEV},chardev=char0,vhostforce \ 39 | -device virtio-net-pci,mac=00:16:3d:22:33:57,netdev=v${VDEV} 40 | echo "Out file is ${OUT_FILE}" 41 | until [ -e ${OUT_FILE} ]; do 42 | sleep 0.1 43 | done 44 | until cat $OUT_FILE | grep "login:"; do 45 | sleep 0.1 46 | done 47 | echo "Booted" 48 | } 49 | 50 | start_app () { 51 | PORT=$1 52 | extra_args=$2 53 | ssh -p${PORT} -i ${BASE_DIR}/../debian/vm_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@localhost \ 54 | /opt/netbricks/scripts/bind-virtio.sh 55 | devs=$( ssh -p${PORT} -i ${BASE_DIR}/../debian/vm_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@localhost \ 56 | /opt/netbricks/3rdparty/dpdk/tools/dpdk-devbind.py --status \ 57 | | grep 'Virtio' \ 58 | | awk '{print $1}' ) 59 | echo "Active ports are $devs" 60 | ports_string="" 61 | for dev in $devs; do 62 | ports_string+="-p ${dev} -c 1" 63 | done 64 | ssh -p${PORT} -i ${BASE_DIR}/../debian/vm_key -o UserKnownHostsFile=/dev/null \ 65 | -o StrictHostKeyChecking=no root@localhost \ 66 | "nohup /opt/netbricks/build.sh run zcsi-chain -l 1 -m 0 ${ports_string} ${extra_args} &> /var/log/nb < /dev/null &" 67 | } 68 | BASE_CORE=$(( CORE1 + 1 )) 69 | BASE_VDEV=0 70 | BASE_PORT=5555 71 | BASE_MONITOR=1234 72 | BASE_DISPLAY=1 73 | start_vm ${BASE_CORE} $(( BASE_CORE + 1)) ${BASE_VDEV} ${BASE_PORT} ${BASE_MONITOR} ${BASE_DISPLAY} 74 | if ((odd == 1)); then 75 | start_app ${BASE_PORT} "" 76 | else 77 | start_app ${BASE_PORT} "-j 1" 78 | fi 79 | 80 | for (( vm = 1; vm < ${RINGS}; vm++ )); do 81 | PORT=$(( BASE_PORT + vm )) 82 | CORE=$(( BASE_CORE + 2*vm )) 83 | start_vm ${CORE} $(( CORE + 1 )) $(( BASE_VDEV + vm )) $PORT $(( BASE_MONITOR + vm )) \ 84 | $(( BASE_DISPLAY + vm )) 85 | start_app ${PORT} 86 | done 87 | -------------------------------------------------------------------------------- /scripts/comparisons/start-bess-chain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | # args 4 | # 1: Master core (4) 5 | # 2: Number of rings (1) 6 | 7 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 8 | BESS_HOME="$BASE_DIR/../bess" 9 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 10 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 11 | #HUGEPAGES_HOME=/opt/e2d2/libhugetlbfs 12 | 13 | INP_LCORE=${1-"4"} 14 | MASTER_LCORE=$((INP_LCORE - 1)) 15 | CORE0=$((MASTER_LCORE + 1)) 16 | CORE1=$((MASTER_LCORE + 2)) 17 | PHYNICS=1 18 | RINGS=${2-1} 19 | IFACE=${BENCH_IFACE-"07:00.0"} 20 | 21 | INP_LCORE=${1-"4"} 22 | MASTER_LCORE=$((INP_LCORE - 1)) 23 | CORE0=$((MASTER_LCORE + 1)) 24 | CORE1=$((MASTER_LCORE + 2)) 25 | PHYNICS=1 26 | RINGS=${2-1} 27 | IFACE=${BENCH_IFACE-"07:00.0"} 28 | ${BESS_HOME}/bin/bessd -a -k 29 | BESS_PID=$(cat /var/run/bessd.pid) 30 | echo "bessd pid is ${BESS_PID}" 31 | sleep 5 32 | BESS_CORE0=${CORE0} BESS_CORE1=${CORE1} BESS_IFACE=${IFACE} BESS_CHAIN=${RINGS} ${BESS_HOME}/bin/bessctl \ 33 | run file ${BASE_DIR}/vpchain.bess 34 | 35 | 36 | odd=$((RINGS%2)) 37 | echo "PMD mask=${PMD_MASK}" 38 | CONTAINER_CORE=$((CORE1 + 1)) # Cores here are 0 numbered 39 | if ((odd==1)); then 40 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 41 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 42 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run -v /tmp/sn_vports:/tmp/sn_vports \ 43 | netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 44 | --secondary -n "rte${BESS_PID}" -l 1 -m ${MASTER_LCORE} -c ${CONTAINER_CORE} -p bess:rte_ring0 45 | else 46 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 47 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 48 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run -v /tmp/sn_vports:/tmp/sn_vports \ 49 | netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 50 | --secondary -n "rte${BESS_PID}" -l 1 -m ${MASTER_LCORE} -c ${CONTAINER_CORE} -p bess:rte_ring0 -j 1 51 | fi 52 | 53 | for (( ctr=1; ctr<$RINGS; ctr++ )); do 54 | CORE=$((CONTAINER_CORE + ctr)) 55 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 56 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 57 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run -v /tmp/sn_vports:/tmp/sn_vports \ 58 | netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 59 | --secondary -n "rte${BESS_PID}" -l 1 -m ${MASTER_LCORE} -c ${CORE} -p bess:rte_ring${ctr} 60 | done 61 | -------------------------------------------------------------------------------- /scripts/comparisons/start-ovs-chain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | # args 4 | # 1: Master core (4) 5 | # 2: Number of rings (1) 6 | 7 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 8 | OVS_HOME="$BASE_DIR/../ovs" 9 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 10 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 11 | echo $OVS_HOME 12 | #HUGEPAGES_HOME=/opt/e2d2/libhugetlbfs 13 | 14 | INP_LCORE=${1-"4"} 15 | MASTER_LCORE=$((INP_LCORE - 1)) 16 | CORE0=$((MASTER_LCORE + 1)) 17 | CORE1=$((MASTER_LCORE + 2)) 18 | PMD_MASK=$(printf "0x%x" $((2**(CORE0 - 1) + 2**(CORE1 - 1)))) 19 | #PMD_MASK=${2-"0x60"} 20 | PHYNICS=1 21 | RINGS=${2-1} 22 | IFACE=${BENCH_IFACE-"07:00.0"} 23 | ${BASE_DIR}/kill-ovs-chain.py 24 | 25 | pushd $OVS_HOME 26 | $( $OVS_HOME/utilities/ovs-dev.py env ) 27 | #$OVS_HOME/utilities/ovs-dev.py kill 28 | $OVS_HOME/utilities/ovs-dev.py \ 29 | reset run --dpdk -c ${MASTER_LCORE} -n 4 -r 1 --socket-mem 1024,0 \ 30 | --file-prefix "ovs" -w ${IFACE} 31 | ovs-vsctl set Open . other_config:n-dpdk-rxqs=1 32 | ovs-vsctl add-br b -- set bridge b datapath_type=netdev 33 | ovs-vsctl set Open . other_config:pmd-cpu-mask="$PMD_MASK" 34 | ovs-vsctl set Open . other_config:n-handler-threads=1 35 | ovs-vsctl set Open . other_config:n-revalidator-threads=1 36 | ovs-vsctl set Open . other_config:max-idle=10000 37 | 38 | for (( pinterface=0; pinterface<$PHYNICS; pinterface++ )); do 39 | iface="dpdk${pinterface}" 40 | echo "Setting up physical interface ${iface}" 41 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdk 42 | done 43 | 44 | for (( rinterface=0; rinterface<$RINGS; rinterface++ )); do 45 | iface="dpdkr${rinterface}" 46 | echo "Setting up DPDK ring interface ${iface}" 47 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdkr 48 | done 49 | ovs-ofctl del-flows b 50 | 51 | ports=$((PHYNICS+RINGS)) 52 | for (( port=0; port<$((ports - 1)); port++ )); do 53 | src_port=$((port+1)) 54 | dst_port=$((port+2)) 55 | echo ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 56 | ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 57 | done 58 | ovs-ofctl add-flow b in_port=${ports},actions=output:1 59 | odd=$((RINGS%2)) 60 | echo "PMD mask=${PMD_MASK}" 61 | CONTAINER_CORE=$((CORE1 + 0)) # Cores here are 0 numbered 62 | if ((odd==1)); then 63 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 64 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 65 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 66 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} -c ${CONTAINER_CORE} -p ovs:0 67 | else 68 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 69 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 70 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 71 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} -c ${CONTAINER_CORE} -p ovs:0 -j 1 72 | fi 73 | 74 | for (( ctr=1; ctr<$RINGS; ctr++ )); do 75 | CORE=$((CONTAINER_CORE + ctr)) 76 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 77 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 78 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 79 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} -c ${CORE} -p ovs:${ctr} 80 | done 81 | popd 82 | -------------------------------------------------------------------------------- /scripts/comparisons/start-ovs-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | # args 4 | # 1: Master core (5) 5 | # 2: PMD mask (0x60) 6 | # 4: Number of rings (1) 7 | 8 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 9 | OVS_HOME="$BASE_DIR/../ovs" 10 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 11 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 12 | echo $OVS_HOME 13 | #HUGEPAGES_HOME=/opt/e2d2/libhugetlbfs 14 | 15 | INP_LCORE=${1-"4"} 16 | MASTER_LCORE=$((INP_LCORE - 1)) 17 | CORE0=$((MASTER_LCORE + 1)) 18 | CORE1=$((MASTER_LCORE + 2)) 19 | PMD_MASK=$(printf "0x%x" $((2**(CORE0 - 1) + 2**(CORE1 - 1)))) 20 | PHYNICS=1 21 | RINGS=${2-1} 22 | IFACE=${BENCH_IFACE-"07:00.0"} 23 | ${BASE_DIR}/kill-ovs-chain.py 24 | 25 | pushd $OVS_HOME 26 | $( $OVS_HOME/utilities/ovs-dev.py env ) 27 | #$OVS_HOME/utilities/ovs-dev.py kill 28 | $OVS_HOME/utilities/ovs-dev.py \ 29 | reset run --dpdk -c ${MASTER_LCORE} -n 4 -r 1 --socket-mem 1024,0 \ 30 | --file-prefix "ovs" -w ${IFACE} 31 | ovs-vsctl set Open . other_config:n-dpdk-rxqs=1 32 | ovs-vsctl add-br b -- set bridge b datapath_type=netdev 33 | ovs-vsctl set Open . other_config:pmd-cpu-mask="$PMD_MASK" 34 | ovs-vsctl set Open . other_config:n-handler-threads=1 35 | ovs-vsctl set Open . other_config:n-revalidator-threads=1 36 | ovs-vsctl set Open . other_config:max-idle=10000 37 | 38 | for (( pinterface=0; pinterface<$PHYNICS; pinterface++ )); do 39 | iface="dpdk${pinterface}" 40 | echo "Setting up physical interface ${iface}" 41 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdk 42 | done 43 | 44 | for (( rinterface=0; rinterface<$RINGS; rinterface++ )); do 45 | iface="dpdkr${rinterface}" 46 | echo "Setting up DPDK ring interface ${iface}" 47 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdkr 48 | done 49 | ovs-ofctl del-flows b 50 | 51 | ports=$((PHYNICS+RINGS)) 52 | for (( port=0; port<$((ports - 1)); port++ )); do 53 | src_port=$((port+1)) 54 | dst_port=$((port+2)) 55 | echo ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 56 | ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 57 | done 58 | ovs-ofctl add-flow b in_port=${ports},actions=output:1 59 | odd=$((RINGS%2)) 60 | echo "PMD mask=${PMD_MASK}" 61 | popd 62 | -------------------------------------------------------------------------------- /scripts/comparisons/start-ovs-thruput.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | # args 4 | # 1: Master core (4) 5 | # 2: Number of rings (1) 6 | 7 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 8 | OVS_HOME="$BASE_DIR/../ovs" 9 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 10 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 11 | echo $OVS_HOME 12 | #HUGEPAGES_HOME=/opt/e2d2/libhugetlbfs 13 | 14 | INP_LCORE=${1-"4"} 15 | MASTER_LCORE=$((INP_LCORE - 1)) 16 | CORE0=$((MASTER_LCORE + 1)) 17 | CORE1=$((MASTER_LCORE + 2)) 18 | PMD_MASK=$(printf "0x%x" $((2**(CORE0 - 1) + 2**(CORE1 - 1)))) 19 | #PMD_MASK=${2-"0x60"} 20 | PHYNICS=4 21 | RINGS=4 22 | #IFACE=${BENCH_IFACE-"07:00.0"} 23 | ${BASE_DIR}/kill-ovs-chain.py 24 | 25 | pushd $OVS_HOME 26 | $( $OVS_HOME/utilities/ovs-dev.py env ) 27 | #$OVS_HOME/utilities/ovs-dev.py kill 28 | $OVS_HOME/utilities/ovs-dev.py \ 29 | reset run --dpdk -c ${MASTER_LCORE} -n 4 -r 1 --socket-mem 1024,0 \ 30 | --file-prefix "ovs" -w 07:00.0 -w 07:00.1 -w 07:00.2 -w 07:00.3 31 | ovs-vsctl set Open . other_config:n-dpdk-rxqs=1 32 | ovs-vsctl add-br b -- set bridge b datapath_type=netdev 33 | ovs-vsctl set Open . other_config:pmd-cpu-mask="$PMD_MASK" 34 | ovs-vsctl set Open . other_config:n-handler-threads=1 35 | ovs-vsctl set Open . other_config:n-revalidator-threads=1 36 | ovs-vsctl set Open . other_config:max-idle=10000 37 | 38 | for (( pinterface=0; pinterface<$PHYNICS; pinterface++ )); do 39 | iface="dpdk${pinterface}" 40 | echo "Setting up physical interface ${iface}" 41 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdk 42 | done 43 | 44 | for (( rinterface=0; rinterface<$RINGS; rinterface++ )); do 45 | iface="dpdkr${rinterface}" 46 | echo "Setting up DPDK ring interface ${iface}" 47 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdkr 48 | done 49 | ovs-ofctl del-flows b 50 | 51 | #ports=$((PHYNICS+RINGS)) 52 | #for (( port=0; port<$((ports - 1)); port++ )); do 53 | #src_port=$((port+1)) 54 | #dst_port=$((port+2)) 55 | #echo ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 56 | #ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 57 | #done 58 | port_string="" 59 | CONTAINER_CORE=$((CORE1 + 0)) # Cores here are 0 numbered 60 | for (( port=0; port <${PHYNICS}; port++)); do 61 | phy_port=$((port + 1)) 62 | virt_port=$((PHYNICS+port+1)) 63 | ovs-ofctl add-flow b in_port=${phy_port},actions=output:${virt_port} 64 | ovs-ofctl add-flow b in_port=${virt_port},actions=output:${phy_port} 65 | port_string+=" -p ovs:${port} -c ${CONTAINER_CORE}" 66 | done 67 | echo "PMD mask=${PMD_MASK}" 68 | echo "port_string=${port_string}" 69 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 70 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 71 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 72 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} ${port_string} 73 | popd 74 | -------------------------------------------------------------------------------- /scripts/comparisons/start-vm-bess.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | OVS_HOME=${BASE_DIR}/../ovs 5 | OUT_FILE=$(mktemp) 6 | CORE_MASK=$(printf "0x%x" $((2**(6-1) + 2**(7-1)))) 7 | sudo taskset ${CORE_MASK} qemu-system-x86_64 --enable-kvm --cpu host,migratable=off --smp 2,cores=2,threads=1,sockets=1 -snapshot -hda \ 8 | ${BASE_DIR}/../debian/debian-nb.img -m 8192M -object memory-backend-file,id=mem,size=8192M,mem-path=/dev/hugepages,share=on -numa \ 9 | node,memdev=mem -mem-prealloc -monitor telnet:127.0.0.1:1234,server,nowait -device e1000,netdev=user.0 -netdev \ 10 | user,id=user.0,hostfwd=tcp::5555-:22 -vga std -serial file:${OUT_FILE} -daemonize -vnc :2,password \ 11 | -chardev socket,id=char0,path=/tmp/v0 -netdev type=vhost-user,id=v0,chardev=char0,vhostforce \ 12 | -device virtio-net-pci,mac=00:16:3d:22:33:57,netdev=v0 13 | echo "Out file is ${OUT_FILE}" 14 | until [ -e ${OUT_FILE} ]; do 15 | sleep 0.1 16 | done 17 | until cat $OUT_FILE | grep "login:"; do 18 | sleep 0.1 19 | done 20 | echo "Booted" 21 | echo "ssh -p5555 -i ${BASE_DIR}/../debian/vm_key apanda@localhost" 22 | ssh -p5555 -i ${BASE_DIR}/../debian/vm_key root@localhost 23 | -------------------------------------------------------------------------------- /scripts/comparisons/start-vm-writable.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | OVS_HOME=${BASE_DIR}/../ovs 5 | OUT_FILE=$(mktemp) 6 | CORE_MASK=$(printf "0x%x" $((2**(6-1) + 2**(7-1)))) 7 | sudo taskset ${CORE_MASK} qemu-system-x86_64 --enable-kvm --cpu host,migratable=off --smp 2,cores=2,threads=1,sockets=1 -hda \ 8 | ${BASE_DIR}/../debian/debian-nb.img -m 8192M -object memory-backend-file,id=mem,size=8192M,mem-path=/dev/hugepages,share=on -numa \ 9 | node,memdev=mem -mem-prealloc -monitor telnet:127.0.0.1:1234,server,nowait -device e1000,netdev=user.0 -netdev \ 10 | user,id=user.0,hostfwd=tcp::5555-:22 -vga std -serial file:${OUT_FILE} -daemonize -vnc :2,password \ 11 | -chardev socket,id=char0,path=${OVS_HOME}/_run/run/v0 -netdev type=vhost-user,id=v0,chardev=char0,vhostforce \ 12 | -device virtio-net-pci,mac=00:16:3d:22:33:57,netdev=v0 13 | echo "Out file is ${OUT_FILE}" 14 | until [ -e ${OUT_FILE} ]; do 15 | sleep 0.1 16 | done 17 | until cat $OUT_FILE | grep "login:"; do 18 | sleep 0.1 19 | done 20 | echo "Booted" 21 | echo "ssh -p5555 -i ${BASE_DIR}/../debian/vm_key apanda@localhost" 22 | ssh -p5555 -i ${BASE_DIR}/../debian/vm_key root@localhost 23 | -------------------------------------------------------------------------------- /scripts/comparisons/start-vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | OVS_HOME=${BASE_DIR}/../ovs 5 | OUT_FILE=$(mktemp) 6 | CORE_MASK=$(printf "0x%x" $((2**(6-1) + 2**(7-1)))) 7 | sudo taskset ${CORE_MASK} qemu-system-x86_64 --enable-kvm --cpu host,migratable=off --smp 2,cores=2,threads=1,sockets=1 -snapshot -hda \ 8 | ${BASE_DIR}/../debian/debian-nb.img -m 8192M -object memory-backend-file,id=mem,size=8192M,mem-path=/dev/hugepages,share=on -numa \ 9 | node,memdev=mem -mem-prealloc -monitor telnet:127.0.0.1:1234,server,nowait -device e1000,netdev=user.0 -netdev \ 10 | user,id=user.0,hostfwd=tcp::5555-:22 -vga std -serial file:${OUT_FILE} -daemonize -vnc :2,password \ 11 | -chardev socket,id=char0,path=${OVS_HOME}/_run/run/v0 -netdev type=vhost-user,id=v0,chardev=char0,vhostforce \ 12 | -device virtio-net-pci,mac=00:16:3d:22:33:57,netdev=v0 13 | echo "Out file is ${OUT_FILE}" 14 | until [ -e ${OUT_FILE} ]; do 15 | sleep 0.1 16 | done 17 | until cat $OUT_FILE | grep "login:"; do 18 | sleep 0.1 19 | done 20 | echo "Booted" 21 | echo "ssh -p5555 -i ${BASE_DIR}/../debian/vm_key apanda@localhost" 22 | ssh -p5555 -i ${BASE_DIR}/../debian/vm_key root@localhost 23 | -------------------------------------------------------------------------------- /scripts/comparisons/vhchain.bess: -------------------------------------------------------------------------------- 1 | core0 = int($BESS_CORE0!'0') 2 | core1 = int($BESS_CORE1!'1') 3 | 4 | bess.add_worker(0, core0) 5 | bess.add_worker(1, core1) 6 | 7 | iface = $BESS_IFACE!'02:00.0' 8 | 9 | chain_len = int($BESS_CHAIN!'1') 10 | print 'Using interface %s for chain of len %d cores %d %d'%(iface, chain_len, core0, core1) 11 | pp = PMDPort(pci=iface) 12 | prev_port = pp 13 | next_worker = 0 14 | 15 | for i in xrange(0, chain_len): 16 | name='eth_vhost%d'%i 17 | dir='/tmp/v%d'%i 18 | vdev_str='%s,iface=%s,queues=1'%(name, dir) 19 | vp = PMDPort(vdev=vdev_str) 20 | name='inc%d'%i 21 | PortInc(name=name, port=prev_port) -> PortOut(port=vp) 22 | bess.attach_task(name, 0, next_worker) 23 | #next_worker = (next_worker + 1) % 2 24 | prev_port = vp 25 | name='ppout' 26 | PortInc(name=name,port=prev_port) -> PortOut(port=pp) 27 | bess.attach_task(name, 0, 0) 28 | -------------------------------------------------------------------------------- /scripts/comparisons/vpchain.bess: -------------------------------------------------------------------------------- 1 | core0 = int($BESS_CORE0!'0') 2 | core1 = int($BESS_CORE1!'1') 3 | 4 | bess.add_worker(0, core0) 5 | bess.add_worker(1, core1) 6 | 7 | iface = $BESS_IFACE!'02:00.0' 8 | 9 | chain_len = int($BESS_CHAIN!'1') 10 | print 'Using interface %s for chain of len %d cores %d %d'%(iface, chain_len, core0, core1) 11 | pp = PMDPort(pci=iface) 12 | prev_port = pp 13 | next_worker = 0 14 | 15 | for i in xrange(0, chain_len): 16 | vp = RteRingVPort() 17 | name='inc%d'%i 18 | PortInc(name=name, port=prev_port) -> PortOut(port=vp) 19 | bess.attach_task(name, 0, next_worker) 20 | next_worker = (next_worker + 1) % 2 21 | prev_port = vp 22 | name='ppout' 23 | PortInc(name=name,port=prev_port) -> PortOut(port=pp) 24 | bess.attach_task(name, 0, 0) 25 | -------------------------------------------------------------------------------- /scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export LD_LIBRARY_PATH=/opt/e2d2/e2d2/3rdparty/dpdk/build/lib 3 | DPDK_HOME=/opt/e2d2/e2d2/3rdparty/dpdk 4 | modprobe uio 5 | insmod $DPDK_HOME/build/kmod/igb_uio.ko 6 | $DPDK_HOME/tools/dpdk_nic_bind.py -b igb_uio 02:00.{0,1} 7 | -------------------------------------------------------------------------------- /scripts/kill-ovs-chain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | OVS_HOME="$BASE_DIR/../ovs" 5 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 6 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 7 | running=$(docker ps -q) 8 | if [ ! -z "$running" ]; then 9 | echo "Killing and removing container" 10 | docker kill ${running} 11 | docker rm ${running} 12 | fi 13 | pushd $OVS_HOME 14 | $( $OVS_HOME/utilities/ovs-dev.py env ) 15 | $OVS_HOME/utilities/ovs-dev.py kill 16 | popd 17 | -------------------------------------------------------------------------------- /scripts/ovs-hairpin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | set -o errexit 4 | OVS_HOME=/opt/e2d2/ovs 5 | VM_HOME=/opt/e2d2/vm 6 | HUGEPAGES_HOME=/opt/e2d2/libhugetlbfs 7 | pushd $OVS_HOME 8 | export LD_LIBRARY_PATH=/opt/e2d2/dpdk/build/lib 9 | $( $OVS_HOME/utilities/ovs-dev.py env ) 10 | $OVS_HOME/utilities/ovs-dev.py kill 11 | LD_PRELOAD="$HUGEPAGES_HOME/obj64/libhugetlbfs.so" $OVS_HOME/utilities/ovs-dev.py reset run --dpdk \ 12 | -c 0x1 -n 4 -r 1 --socket-mem 1024,0 -w 07:00.0 -w 07:00.1 -w 07:00.2 -w 07:00.3 13 | ovs-vsctl set Open . other_config:n-dpdk-rxqs=1 14 | ovs-vsctl add-br b -- set bridge b datapath_type=netdev 15 | ovs-vsctl set Open . other_config:pmd-cpu-mask=0x30 16 | ovs-vsctl set Open . other_config:n-handler-threads=1 17 | ovs-vsctl set Open . other_config:n-revalidator-threads=1 18 | ovs-vsctl set Open . other_config:max-idle=10000 19 | ovs-vsctl add-port b dpdk0 -- set Interface dpdk0 type=dpdk 20 | ovs-ofctl del-flows b 21 | 22 | ovs-ofctl add-flow b in_port=1,actions=in_port 23 | popd 24 | -------------------------------------------------------------------------------- /scripts/read-target.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Take the output of Cargo.toml and print target name. 4 | """ 5 | import sys 6 | import json 7 | 8 | def main(inp): 9 | try: 10 | o = json.loads(inp) 11 | except: 12 | print("Failed to interpret JSON", file=sys.stderr) 13 | if 'targets' in o: 14 | for target in o['targets']: 15 | if 'kind' in target and (target['kind'] == 'bin' or 'bin' in target['kind']): 16 | print(target['name']) 17 | else: 18 | print("No kind found") 19 | 20 | if __name__=="__main__": 21 | if len(sys.argv) < 2: 22 | print("Usage: %s json"%sys.argv[0], file=sys.stderr) 23 | sys.exit(1) 24 | if len(sys.argv) == 2 and sys.argv[1] == '-': 25 | inp = sys.stdin.read() 26 | main(inp) 27 | else: 28 | main(' '.join(sys.argv[1:])) 29 | -------------------------------------------------------------------------------- /scripts/start-ovs-chain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | # args 4 | # 1: Master core (5) 5 | # 2: PMD mask (0x60) 6 | # 4: Number of rings (1) 7 | 8 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 9 | OVS_HOME="$BASE_DIR/../ovs" 10 | DPDK_LIB="$BASE_DIR/../dpdk/build/lib" 11 | export LD_LIBRARY_PATH="${DPDK_LIB}:${LD_LIBRARY_PATH}" 12 | echo $OVS_HOME 13 | #HUGEPAGES_HOME=/opt/e2d2/libhugetlbfs 14 | 15 | INP_LCORE=${1-"4"} 16 | MASTER_LCORE=$((INP_LCORE - 1)) 17 | CORE0=$((MASTER_LCORE + 1)) 18 | CORE1=$((MASTER_LCORE + 2)) 19 | PMD_MASK=$(printf "0x%x" $((2**(CORE0 - 1) + 2**(CORE1 - 1)))) 20 | #PMD_MASK=${2-"0x60"} 21 | PHYNICS=1 22 | RINGS=${2-1} 23 | ${BASE_DIR}/kill-ovs-chain.py 24 | 25 | pushd $OVS_HOME 26 | $( $OVS_HOME/utilities/ovs-dev.py env ) 27 | #$OVS_HOME/utilities/ovs-dev.py kill 28 | $OVS_HOME/utilities/ovs-dev.py \ 29 | reset run --dpdk -c ${MASTER_LCORE} -n 4 -r 1 --socket-mem 1024,0 \ 30 | --file-prefix "ovs" -w 07:00.0 -w 07:00.1 -w 07:00.2 -w 07:00.3 31 | ovs-vsctl set Open . other_config:n-dpdk-rxqs=1 32 | ovs-vsctl add-br b -- set bridge b datapath_type=netdev 33 | ovs-vsctl set Open . other_config:pmd-cpu-mask="$PMD_MASK" 34 | ovs-vsctl set Open . other_config:n-handler-threads=1 35 | ovs-vsctl set Open . other_config:n-revalidator-threads=1 36 | ovs-vsctl set Open . other_config:max-idle=10000 37 | 38 | for (( pinterface=0; pinterface<$PHYNICS; pinterface++ )); do 39 | iface="dpdk${pinterface}" 40 | echo "Setting up physical interface ${iface}" 41 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdk 42 | done 43 | 44 | for (( rinterface=0; rinterface<$RINGS; rinterface++ )); do 45 | iface="dpdkr${rinterface}" 46 | echo "Setting up DPDK ring interface ${iface}" 47 | ovs-vsctl add-port b ${iface} -- set Interface ${iface} type=dpdkr 48 | done 49 | ovs-ofctl del-flows b 50 | 51 | ports=$((PHYNICS+RINGS)) 52 | for (( port=0; port<$((ports - 1)); port++ )); do 53 | src_port=$((port+1)) 54 | dst_port=$((port+2)) 55 | echo ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 56 | ovs-ofctl add-flow b in_port=${src_port},actions=output:${dst_port} 57 | done 58 | ovs-ofctl add-flow b in_port=${ports},actions=output:1 59 | odd=$((RINGS%2)) 60 | echo "PMD mask=${PMD_MASK}" 61 | CONTAINER_CORE=$((CORE1 + 0)) # Cores here are 0 numbered 62 | if ((odd==1)); then 63 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 64 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 65 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 66 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} -c ${CONTAINER_CORE} -p ovs:0 67 | else 68 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CONTAINER_CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 69 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 70 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 71 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} -c ${CONTAINER_CORE} -p ovs:0 -j 1 72 | fi 73 | 74 | for (( ctr=1; ctr<$RINGS; ctr++ )); do 75 | CORE=$((CONTAINER_CORE + ctr)) 76 | sudo docker run -d --privileged --cpuset-cpus="${MASTER_LCORE},${CORE}" -v /sys/bus/pci/drivers:/sys/bus/pci/drivers -v \ 77 | /sys/kernel/mm/hugepages:/sys/kernel/mm/hugepages -v /mnt/huge:/mnt/huge -v /dev:/dev -v \ 78 | /sys/devices/system/node:/sys/devices/system/node -v /var/run:/var/run netbricks:vswitch /opt/netbricks/build.sh run zcsi-chain \ 79 | --secondary -n ovs -l 1 -m ${MASTER_LCORE} -c ${CORE} -p ovs:${ctr} 80 | done 81 | popd 82 | -------------------------------------------------------------------------------- /scripts/tuning/energy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" 4 | sudo modprobe msr 5 | if [ -e ${BASE_DIR}/x86_energy_perf_policy ]; then 6 | sudo $BASE_DIR/x86_energy_perf_policy performance # Set ourselves to performance. 7 | else 8 | sudo x86_energy_perf_policy performance 9 | fi 10 | sudo $BASE_DIR/pmqos-static.py cpu_dma_latency=0 # Tune Linux QoS to reduce DMA latency 11 | sudo wrmsr -a 0x620 0x3f3f # Turn off uncore frequency scaling and select max frequency 12 | -------------------------------------------------------------------------------- /scripts/tuning/read_cpu_dma_latency.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import os 3 | import signal 4 | import struct 5 | import sys 6 | import time 7 | 8 | ALLOWED_INTERFACES = [ "cpu_dma_latency", "network_latency", "network_throughput" ] 9 | def read_pmqos(name): 10 | filename = "/dev/%s" % name 11 | old = open(filename) 12 | old_value = struct.unpack("i", old.read())[0] 13 | print "PMQOS value for %s is %d"%(name, old_value) 14 | if __name__=="__main__": 15 | if len(sys.argv) < 2: 16 | print "Must specify what to read" 17 | sys.exit(1) 18 | read = sys.argv[1] 19 | if read not in ALLOWED_INTERFACES: 20 | print "Cannot read %s"%read 21 | sys.exit(1) 22 | read_pmqos(read) 23 | -------------------------------------------------------------------------------- /test/acl-fw/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-aclfw" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | twox-hash = "*" 13 | 14 | [features] 15 | default = [] 16 | print = [] 17 | -------------------------------------------------------------------------------- /test/acl-fw/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | #![feature(asm)] 3 | extern crate e2d2; 4 | extern crate fnv; 5 | extern crate getopts; 6 | extern crate rand; 7 | extern crate time; 8 | use self::nf::*; 9 | use e2d2::allocators::CacheAligned; 10 | use e2d2::config::*; 11 | use e2d2::interface::*; 12 | use e2d2::operators::*; 13 | use e2d2::scheduler::*; 14 | use e2d2::utils::Ipv4Prefix; 15 | use std::env; 16 | use std::sync::Arc; 17 | use std::thread; 18 | use std::time::Duration; 19 | mod nf; 20 | 21 | const CONVERSION_FACTOR: f64 = 1000000000.; 22 | 23 | fn test(ports: Vec>, sched: &mut S) { 24 | for port in &ports { 25 | println!( 26 | "Receiving port {} rxq {} txq {}", 27 | port.port.mac_address(), 28 | port.rxq(), 29 | port.txq() 30 | ); 31 | } 32 | let acls = vec![ 33 | Acl { 34 | src_ip: Some(Ipv4Prefix::new(0, 0)), 35 | dst_ip: None, 36 | src_port: None, 37 | dst_port: None, 38 | established: None, 39 | drop: false, 40 | }, 41 | ]; 42 | let pipelines: Vec<_> = ports 43 | .iter() 44 | .map(|port| acl_match(ReceiveBatch::new(port.clone()), acls.clone()).send(port.clone())) 45 | .collect(); 46 | println!("Running {} pipelines", pipelines.len()); 47 | for pipeline in pipelines { 48 | sched.add_task(pipeline).unwrap(); 49 | } 50 | } 51 | 52 | fn main() { 53 | let args: Vec = env::args().collect(); 54 | 55 | let opts = basic_opts(); 56 | let matches = match opts.parse(&args[1..]) { 57 | Ok(m) => m, 58 | Err(f) => panic!(f.to_string()), 59 | }; 60 | let configuration = read_matches(&matches, &opts); 61 | 62 | let mut config = initialize_system(&configuration).unwrap(); 63 | config.start_schedulers(); 64 | 65 | config.add_pipeline_to_run(Arc::new(move |p, s: &mut StandaloneScheduler| test(p, s))); 66 | config.execute(); 67 | 68 | let mut pkts_so_far = (0, 0); 69 | let mut last_printed = 0.; 70 | const MAX_PRINT_INTERVAL: f64 = 30.; 71 | const PRINT_DELAY: f64 = 15.; 72 | let sleep_delay = (PRINT_DELAY / 2.) as u64; 73 | let mut start = time::precise_time_ns() as f64 / CONVERSION_FACTOR; 74 | let sleep_time = Duration::from_millis(sleep_delay); 75 | println!("0 OVERALL RX 0.00 TX 0.00 CYCLE_PER_DELAY 0 0 0"); 76 | loop { 77 | thread::sleep(sleep_time); // Sleep for a bit 78 | let now = time::precise_time_ns() as f64 / CONVERSION_FACTOR; 79 | if now - start > PRINT_DELAY { 80 | let mut rx = 0; 81 | let mut tx = 0; 82 | for port in config.ports.values() { 83 | for q in 0..port.rxqs() { 84 | let (rp, tp) = port.stats(q); 85 | rx += rp; 86 | tx += tp; 87 | } 88 | } 89 | let pkts = (rx, tx); 90 | let rx_pkts = pkts.0 - pkts_so_far.0; 91 | if rx_pkts > 0 || now - last_printed > MAX_PRINT_INTERVAL { 92 | println!( 93 | "{:.2} OVERALL RX {:.2} TX {:.2}", 94 | now - start, 95 | rx_pkts as f64 / (now - start), 96 | (pkts.1 - pkts_so_far.1) as f64 / (now - start) 97 | ); 98 | last_printed = now; 99 | start = now; 100 | pkts_so_far = pkts; 101 | } 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /test/acl-fw/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | use e2d2::utils::{Flow, Ipv4Prefix}; 4 | use fnv::FnvHasher; 5 | use std::collections::HashSet; 6 | use std::hash::BuildHasherDefault; 7 | 8 | type FnvHash = BuildHasherDefault; 9 | 10 | #[derive(Clone)] 11 | pub struct Acl { 12 | pub src_ip: Option, 13 | pub dst_ip: Option, 14 | pub src_port: Option, 15 | pub dst_port: Option, 16 | pub established: Option, 17 | // Related not done 18 | pub drop: bool, 19 | } 20 | 21 | impl Acl { 22 | pub fn matches(&self, flow: &Flow, connections: &HashSet) -> bool { 23 | if (self.src_ip.is_none() || self.src_ip.unwrap().in_range(flow.src_ip)) 24 | && (self.dst_ip.is_none() || self.dst_ip.unwrap().in_range(flow.dst_ip)) 25 | && (self.src_port.is_none() || flow.src_port == self.src_port.unwrap()) 26 | && (self.dst_port.is_none() || flow.dst_port == self.dst_port.unwrap()) 27 | { 28 | if let Some(established) = self.established { 29 | let rev_flow = flow.reverse_flow(); 30 | (connections.contains(flow) || connections.contains(&rev_flow)) == established 31 | } else { 32 | true 33 | } 34 | } else { 35 | false 36 | } 37 | } 38 | } 39 | 40 | pub fn acl_match>(parent: T, acls: Vec) -> CompositionBatch { 41 | let mut flow_cache = HashSet::::with_hasher(Default::default()); 42 | parent 43 | .parse::() 44 | .transform(box move |p| { 45 | p.get_mut_header().swap_addresses(); 46 | }) 47 | .parse::() 48 | .filter(box move |p| { 49 | let flow = p.get_header().flow().unwrap(); 50 | for acl in &acls { 51 | if acl.matches(&flow, &flow_cache) { 52 | if !acl.drop { 53 | flow_cache.insert(flow); 54 | } 55 | return !acl.drop; 56 | } 57 | } 58 | return false; 59 | }) 60 | .compose() 61 | } 62 | -------------------------------------------------------------------------------- /test/chain-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-chain" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/chain-test/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::common::EmptyMetadata; 2 | use e2d2::headers::*; 3 | use e2d2::operators::*; 4 | 5 | #[inline] 6 | pub fn chain_nf>(parent: T) -> CompositionBatch { 7 | parent 8 | .parse::() 9 | .transform(box move |pkt| { 10 | let hdr = pkt.get_mut_header(); 11 | hdr.swap_addresses(); 12 | }) 13 | .parse::() 14 | .transform(box |pkt| { 15 | let h = pkt.get_mut_header(); 16 | let ttl = h.ttl(); 17 | h.set_ttl(ttl - 1); 18 | }) 19 | .filter(box |pkt| { 20 | let h = pkt.get_header(); 21 | h.ttl() != 0 22 | }) 23 | .compose() 24 | } 25 | 26 | #[inline] 27 | pub fn chain>( 28 | parent: T, 29 | len: u32, 30 | pos: u32, 31 | ) -> CompositionBatch { 32 | let mut chained = chain_nf(parent); 33 | for _ in 1..len { 34 | chained = chain_nf(chained); 35 | } 36 | if len % 2 == 0 || pos % 2 == 1 { 37 | chained 38 | .parse::() 39 | .transform(box move |pkt| { 40 | let hdr = pkt.get_mut_header(); 41 | hdr.swap_addresses(); 42 | }) 43 | .compose() 44 | } else { 45 | chained 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /test/config-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-config" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/config-test/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate e2d2; 2 | extern crate getopts; 3 | use e2d2::config::*; 4 | use getopts::Options; 5 | use std::env; 6 | use std::process; 7 | fn main() { 8 | let args: Vec = env::args().collect(); 9 | let program = args[0].clone(); 10 | let mut opts = Options::new(); 11 | opts.optflag("h", "help", "print this help menu"); 12 | opts.optopt("", "config", "Configuration file", "TOML file"); 13 | let matches = match opts.parse(&args[1..]) { 14 | Ok(m) => m, 15 | Err(f) => panic!(f.to_string()), 16 | }; 17 | 18 | if matches.opt_present("h") { 19 | print!("{}", opts.usage(&format!("Usage: {} [options]", program))); 20 | process::exit(0) 21 | } 22 | 23 | let cfg = matches 24 | .opt_str("config") 25 | .expect("No configuration supplied, rendering this meaningless"); 26 | let sched_cfg = read_configuration(&cfg[..]).expect("Could not parse configuration"); 27 | println!("Read configuration {}", sched_cfg) 28 | } 29 | -------------------------------------------------------------------------------- /test/delay-test/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /test/delay-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-delay" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/delay-test/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | #[inline] 5 | fn lat() { 6 | unsafe { 7 | asm!("nop" 8 | : 9 | : 10 | : 11 | : "volatile"); 12 | } 13 | } 14 | 15 | #[inline] 16 | fn delay_loop(delay: u64) { 17 | let mut d = 0; 18 | while d < delay { 19 | lat(); 20 | d += 1; 21 | } 22 | } 23 | 24 | pub fn delay>( 25 | parent: T, 26 | delay: u64, 27 | ) -> TransformBatch> { 28 | parent.parse::().transform(box move |pkt| { 29 | assert!(pkt.refcnt() == 1); 30 | let hdr = pkt.get_mut_header(); 31 | hdr.swap_addresses(); 32 | delay_loop(delay); 33 | }) 34 | } 35 | -------------------------------------------------------------------------------- /test/embedded-scheduler-dependency-test/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /test/embedded-scheduler-dependency-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "embedded-dep-test" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/embedded-scheduler-dependency-test/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | #![feature(asm)] 3 | extern crate e2d2; 4 | extern crate fnv; 5 | extern crate rand; 6 | extern crate time; 7 | use e2d2::scheduler::*; 8 | 9 | pub struct DepTask { 10 | id: String, 11 | deps: Vec, 12 | } 13 | 14 | impl Executable for DepTask { 15 | fn execute(&mut self) { 16 | println!("Task -- {}", self.id); 17 | } 18 | fn dependencies(&mut self) -> Vec { 19 | self.deps.clone() 20 | } 21 | } 22 | impl DepTask { 23 | pub fn new(parent: usize, id: &str) -> DepTask { 24 | DepTask { 25 | id: String::from(id), 26 | deps: vec![parent], 27 | } 28 | } 29 | } 30 | 31 | fn test_func(id: &str) { 32 | println!("Base Task -- {}", id); 33 | } 34 | 35 | fn main() { 36 | let mut sched = embedded_scheduler::EmbeddedScheduler::new(); 37 | let handle0 = sched.add_task(|| test_func("task-0")).unwrap(); 38 | let other_handles = { 39 | let mut prev_handle = handle0; 40 | let mut nhandles: Vec<_> = (0..10).map(|_| 0).collect(); 41 | for i in 0..nhandles.capacity() { 42 | nhandles[i] = sched 43 | .add_task(DepTask::new(prev_handle, format!("id-{}", i).as_str())) 44 | .unwrap(); 45 | prev_handle = nhandles[i]; 46 | } 47 | nhandles 48 | }; 49 | let len = other_handles.len(); 50 | sched.exec_task(other_handles[len - 1]); 51 | } 52 | -------------------------------------------------------------------------------- /test/embedded-scheduler-test/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /test/embedded-scheduler-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "embedded-sched-test" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/embedded-scheduler-test/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | #![feature(asm)] 3 | extern crate e2d2; 4 | extern crate fnv; 5 | extern crate rand; 6 | extern crate time; 7 | use e2d2::scheduler::*; 8 | 9 | fn test_func(id: &str) { 10 | println!("Running function {}", id); 11 | } 12 | 13 | fn main() { 14 | let mut sched = embedded_scheduler::EmbeddedScheduler::new(); 15 | let handle0 = sched.add_task(|| test_func("task-0")).unwrap(); 16 | let handle1 = sched.add_task(|| test_func("task-1")).unwrap(); 17 | println!("Initialized"); 18 | sched.exec_task(handle1); 19 | sched.exec_task(handle0); 20 | } 21 | -------------------------------------------------------------------------------- /test/framework-test/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /test/framework-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-test" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | 12 | [features] 13 | default = [] 14 | print = [] 15 | -------------------------------------------------------------------------------- /test/lpm-embedded/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-lpm-embedded" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/lpm-embedded/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | extern crate e2d2; 3 | extern crate fnv; 4 | extern crate getopts; 5 | extern crate rand; 6 | extern crate time; 7 | use self::nf::*; 8 | use e2d2::config::*; 9 | use e2d2::interface::*; 10 | use e2d2::operators::*; 11 | use e2d2::scheduler::*; 12 | use std::process; 13 | mod nf; 14 | 15 | fn main() { 16 | let name = String::from("recv"); 17 | let configuration = NetbricksConfiguration::new_with_name(&name[..]); 18 | let configuration = NetbricksConfiguration { 19 | primary_core: 0, 20 | ..configuration 21 | }; 22 | match initialize_system(&configuration) { 23 | Ok(_) => { 24 | let port = VirtualPort::new(1).unwrap(); 25 | let mut sched = embedded_scheduler::EmbeddedScheduler::new(); 26 | let pipeline0 = lpm( 27 | ReceiveBatch::new(port.new_virtual_queue(1).unwrap()), 28 | &mut sched, 29 | ); 30 | let pipeline1 = lpm( 31 | ReceiveBatch::new(port.new_virtual_queue(1).unwrap()), 32 | &mut sched, 33 | ); 34 | let task = sched.add_task(merge(vec![pipeline0, pipeline1])).unwrap(); 35 | println!("Dependencies for task {}", task); 36 | sched.display_dependencies(task); 37 | } 38 | Err(ref e) => { 39 | println!("Error: {}", e); 40 | if let Some(backtrace) = e.backtrace() { 41 | println!("Backtrace: {:?}", backtrace); 42 | } 43 | process::exit(1); 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /test/lpm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-lpm" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/lpm/sudo: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /test/macswap/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "macswap" 3 | version = "0.1.0" 4 | authors = ["Steven H. Wang "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/macswap/check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TEST_NAME=macswap 3 | PORT_OPTIONS="dpdk:eth_pcap0,rx_pcap=data/http_lemmy.pcap,tx_pcap=/tmp/out.pcap" 4 | ../../build.sh run $TEST_NAME -p $PORT_OPTIONS -c 1 --dur 5 5 | 6 | tcpdump -ter /tmp/out.pcap | tee /dev/tty | diff - data/expect.out 7 | 8 | result=$? 9 | echo ---- 10 | if [[ $result != 0 ]]; then 11 | echo FAIL 12 | exit $result 13 | else 14 | echo PASS 15 | fi 16 | -------------------------------------------------------------------------------- /test/macswap/data/http_lemmy.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NetSys/NetBricks/71dfb94beaeac107d7cd359985f9bd66fd223e1b/test/macswap/data/http_lemmy.pcap -------------------------------------------------------------------------------- /test/macswap/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | #![feature(asm)] 3 | extern crate e2d2; 4 | extern crate fnv; 5 | extern crate getopts; 6 | extern crate rand; 7 | extern crate time; 8 | use self::nf::*; 9 | use e2d2::config::{basic_opts, read_matches}; 10 | use e2d2::interface::*; 11 | use e2d2::operators::*; 12 | use e2d2::scheduler::*; 13 | use std::env; 14 | use std::fmt::Display; 15 | use std::process; 16 | use std::sync::Arc; 17 | use std::thread; 18 | use std::time::Duration; 19 | mod nf; 20 | 21 | fn test(ports: Vec, sched: &mut S) 22 | where 23 | T: PacketRx + PacketTx + Display + Clone + 'static, 24 | S: Scheduler + Sized, 25 | { 26 | for port in &ports { 27 | println!("Receiving port {}", port); 28 | } 29 | 30 | let pipelines: Vec<_> = ports 31 | .iter() 32 | .map(|port| macswap(ReceiveBatch::new(port.clone())).send(port.clone())) 33 | .collect(); 34 | println!("Running {} pipelines", pipelines.len()); 35 | for pipeline in pipelines { 36 | sched.add_task(pipeline).unwrap(); 37 | } 38 | } 39 | 40 | fn main() { 41 | let mut opts = basic_opts(); 42 | opts.optopt( 43 | "", 44 | "dur", 45 | "Test duration", 46 | "If this option is set to a nonzero value, then the \ 47 | test will exit after X seconds.", 48 | ); 49 | 50 | let args: Vec = env::args().collect(); 51 | let matches = match opts.parse(&args[1..]) { 52 | Ok(m) => m, 53 | Err(f) => panic!(f.to_string()), 54 | }; 55 | let mut configuration = read_matches(&matches, &opts); 56 | configuration.pool_size = 255; 57 | 58 | let test_duration: u64 = matches 59 | .opt_str("dur") 60 | .unwrap_or_else(|| String::from("0")) 61 | .parse() 62 | .expect("Could not parse test duration"); 63 | 64 | match initialize_system(&configuration) { 65 | Ok(mut context) => { 66 | context.start_schedulers(); 67 | 68 | context.add_pipeline_to_run(Arc::new(move |p, s: &mut StandaloneScheduler| test(p, s))); 69 | context.execute(); 70 | 71 | if test_duration != 0 { 72 | thread::sleep(Duration::from_secs(test_duration)); 73 | } else { 74 | loop { 75 | thread::sleep(Duration::from_secs(1)); 76 | } 77 | } 78 | } 79 | Err(ref e) => { 80 | println!("Error: {}", e); 81 | if let Some(backtrace) = e.backtrace() { 82 | println!("Backtrace: {:?}", backtrace); 83 | } 84 | process::exit(1); 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /test/macswap/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | pub fn macswap>( 5 | parent: T, 6 | ) -> TransformBatch> { 7 | parent.parse::().transform(box move |pkt| { 8 | assert!(pkt.refcnt() == 1); 9 | let hdr = pkt.get_mut_header(); 10 | hdr.swap_addresses(); 11 | }) 12 | } 13 | -------------------------------------------------------------------------------- /test/maglev/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-maglev" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | twox-hash = "*" 13 | 14 | [features] 15 | default = [] 16 | print = [] 17 | -------------------------------------------------------------------------------- /test/nat/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-nat" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/nat/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | extern crate e2d2; 3 | extern crate fnv; 4 | extern crate getopts; 5 | extern crate rand; 6 | extern crate time; 7 | use self::nf::*; 8 | use e2d2::config::{basic_opts, read_matches}; 9 | use e2d2::interface::*; 10 | use e2d2::operators::*; 11 | use e2d2::scheduler::*; 12 | use std::env; 13 | use std::fmt::Display; 14 | use std::net::Ipv4Addr; 15 | use std::process; 16 | use std::sync::Arc; 17 | use std::thread; 18 | use std::time::Duration; 19 | mod nf; 20 | 21 | const CONVERSION_FACTOR: f64 = 1000000000.; 22 | 23 | fn test(ports: Vec, sched: &mut S) 24 | where 25 | T: PacketRx + PacketTx + Display + Clone + 'static, 26 | S: Scheduler + Sized, 27 | { 28 | println!("Receiving started"); 29 | 30 | let mut pipelines: Vec<_> = ports 31 | .iter() 32 | .map(|port| { 33 | nat( 34 | ReceiveBatch::new(port.clone()), 35 | sched, 36 | &Ipv4Addr::new(10, 0, 0, 1), 37 | ).send(port.clone()) 38 | }) 39 | .collect(); 40 | println!("Running {} pipelines", pipelines.len()); 41 | if pipelines.len() > 1 { 42 | sched.add_task(merge(pipelines)).unwrap() 43 | } else { 44 | sched.add_task(pipelines.pop().unwrap()).unwrap() 45 | }; 46 | } 47 | 48 | fn main() { 49 | let opts = basic_opts(); 50 | 51 | let args: Vec = env::args().collect(); 52 | let matches = match opts.parse(&args[1..]) { 53 | Ok(m) => m, 54 | Err(f) => panic!(f.to_string()), 55 | }; 56 | let configuration = read_matches(&matches, &opts); 57 | 58 | match initialize_system(&configuration) { 59 | Ok(mut context) => { 60 | context.start_schedulers(); 61 | context.add_pipeline_to_run(Arc::new(move |p, s: &mut StandaloneScheduler| test(p, s))); 62 | context.execute(); 63 | 64 | let mut pkts_so_far = (0, 0); 65 | let mut start = time::precise_time_ns() as f64 / CONVERSION_FACTOR; 66 | let sleep_time = Duration::from_millis(500); 67 | loop { 68 | thread::sleep(sleep_time); // Sleep for a bit 69 | let now = time::precise_time_ns() as f64 / CONVERSION_FACTOR; 70 | if now - start > 1.0 { 71 | let mut rx = 0; 72 | let mut tx = 0; 73 | for port in context.ports.values() { 74 | for q in 0..port.rxqs() { 75 | let (rp, tp) = port.stats(q); 76 | rx += rp; 77 | tx += tp; 78 | } 79 | } 80 | let pkts = (rx, tx); 81 | println!( 82 | "{:.2} OVERALL RX {:.2} TX {:.2}", 83 | now - start, 84 | (pkts.0 - pkts_so_far.0) as f64 / (now - start), 85 | (pkts.1 - pkts_so_far.1) as f64 / (now - start) 86 | ); 87 | start = now; 88 | pkts_so_far = pkts; 89 | } 90 | } 91 | } 92 | Err(ref e) => { 93 | println!("Error: {}", e); 94 | if let Some(backtrace) = e.backtrace() { 95 | println!("Backtrace: {:?}", backtrace); 96 | } 97 | process::exit(1); 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /test/nat/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | use e2d2::scheduler::*; 4 | use e2d2::utils::*; 5 | use fnv::FnvHasher; 6 | use std::collections::HashMap; 7 | use std::convert::From; 8 | use std::hash::BuildHasherDefault; 9 | use std::net::Ipv4Addr; 10 | 11 | #[derive(Clone, Default)] 12 | struct Unit; 13 | #[derive(Clone, Copy, Default)] 14 | struct FlowUsed { 15 | pub flow: Flow, 16 | pub time: u64, 17 | pub used: bool, 18 | } 19 | 20 | type FnvHash = BuildHasherDefault; 21 | pub fn nat>( 22 | parent: T, 23 | _s: &mut Scheduler, 24 | nat_ip: &Ipv4Addr, 25 | ) -> CompositionBatch { 26 | let ip = u32::from(*nat_ip); 27 | let mut port_hash = HashMap::::with_capacity_and_hasher(65536, Default::default()); 28 | let mut flow_vec: Vec = (MIN_PORT..65535).map(|_| Default::default()).collect(); 29 | let mut next_port = 1024; 30 | const MIN_PORT: u16 = 1024; 31 | const MAX_PORT: u16 = 65535; 32 | let pipeline = parent.parse::().transform(box move |pkt| { 33 | // let hdr = pkt.get_mut_header(); 34 | let payload = pkt.get_mut_payload(); 35 | if let Some(flow) = ipv4_extract_flow(payload) { 36 | let found = match port_hash.get(&flow) { 37 | Some(s) => { 38 | s.ipv4_stamp_flow(payload); 39 | true 40 | } 41 | None => false, 42 | }; 43 | if !found { 44 | if next_port < MAX_PORT { 45 | let assigned_port = next_port; //FIXME. 46 | next_port += 1; 47 | flow_vec[assigned_port as usize].flow = flow; 48 | flow_vec[assigned_port as usize].used = true; 49 | let mut outgoing_flow = flow.clone(); 50 | outgoing_flow.src_ip = ip; 51 | outgoing_flow.src_port = assigned_port; 52 | let rev_flow = outgoing_flow.reverse_flow(); 53 | 54 | port_hash.insert(flow, outgoing_flow); 55 | port_hash.insert(rev_flow, flow.reverse_flow()); 56 | 57 | outgoing_flow.ipv4_stamp_flow(payload); 58 | } 59 | } 60 | } 61 | }); 62 | pipeline.compose() 63 | } 64 | -------------------------------------------------------------------------------- /test/othertest/feature.rs: -------------------------------------------------------------------------------- 1 | #![feature(cfg_target_feature)] 2 | #![feature(box_syntax)] 3 | 4 | #[cfg(any(target_feature="avx"))] 5 | fn test_comp() { 6 | println!("Found avx") 7 | } 8 | 9 | #[cfg(not(any(target_feature="avx")))] 10 | fn test_comp() { 11 | println!("Did not find avx") 12 | } 13 | 14 | fn main() { 15 | //test_comp(); 16 | //let f = box |x| { x + 5 }; 17 | //println!("Value {}", f(22)); 18 | let x = vec![0, 1, 2, 3, 4]; 19 | let mut y = x.iter_mut().cycle(); 20 | for c in 1..20 { 21 | println!("c {} iter {}", c, y.next().expect("Inf")); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /test/othertest/test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NetSys/NetBricks/71dfb94beaeac107d7cd359985f9bd66fd223e1b/test/othertest/test -------------------------------------------------------------------------------- /test/packet_generation/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "packet-test" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/packet_generation/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::common::*; 2 | use e2d2::headers::*; 3 | use e2d2::interface::*; 4 | use e2d2::queues::*; 5 | use e2d2::scheduler::*; 6 | use std::net::Ipv4Addr; 7 | use std::str::FromStr; 8 | 9 | pub struct PacketCreator { 10 | mac: MacHeader, 11 | ip: IpHeader, 12 | producer: MpscProducer, 13 | } 14 | 15 | impl PacketCreator { 16 | pub fn new(producer: MpscProducer) -> PacketCreator { 17 | let mut mac = MacHeader::new(); 18 | mac.dst = MacAddress { 19 | addr: [0x68, 0x05, 0xca, 0x00, 0x00, 0xac], 20 | }; 21 | mac.src = MacAddress { 22 | addr: [0x68, 0x05, 0xca, 0x00, 0x00, 0x01], 23 | }; 24 | mac.set_etype(0x0800); 25 | let mut ip = IpHeader::new(); 26 | ip.set_src(u32::from(Ipv4Addr::from_str("10.0.0.1").unwrap())); 27 | ip.set_dst(u32::from(Ipv4Addr::from_str("10.0.0.5").unwrap())); 28 | ip.set_ttl(128); 29 | ip.set_version(4); 30 | ip.set_ihl(5); 31 | ip.set_length(20); 32 | PacketCreator { 33 | mac: mac, 34 | ip: ip, 35 | producer: producer, 36 | } 37 | } 38 | 39 | #[inline] 40 | fn initialize_packet(&self, pkt: Packet) -> Packet { 41 | pkt.push_header(&self.mac) 42 | .unwrap() 43 | .push_header(&self.ip) 44 | .unwrap() 45 | } 46 | 47 | #[inline] 48 | pub fn create_packet(&self) -> Packet { 49 | self.initialize_packet(new_packet().unwrap()) 50 | } 51 | } 52 | 53 | impl Executable for PacketCreator { 54 | fn execute(&mut self) { 55 | for _ in 0..16 { 56 | self.producer.enqueue_one(self.create_packet()); 57 | } 58 | } 59 | fn dependencies(&mut self) -> Vec { 60 | vec![] 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /test/packet_test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-pkt" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/packet_test/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | pub fn delay>(parent: T) -> TransformBatch { 4 | let mut m = MacHeader::new(); 5 | m.dst = MacAddress { 6 | addr: [0x68, 0x05, 0xca, 0x33, 0xff, 0x79], 7 | }; 8 | m.src = MacAddress { 9 | addr: [0x68, 0x05, 0xca, 0x33, 0xfd, 0xc8], 10 | }; 11 | m.set_etype(0x800); 12 | parent.transform(box move |pkt| { 13 | pkt.write_header(&m, 0).unwrap(); 14 | }) 15 | // parent.parse::() 16 | // .transform(box move |pkt| { 17 | // assert!(pkt.refcnt() == 1); 18 | // let mut hdr = pkt.get_mut_header(); 19 | // /let src = hdr.src; 20 | // hdr.src[2] += 1; 21 | // hdr.dst[1] += 1; 22 | // delay_loop(delay); 23 | // }) 24 | } 25 | -------------------------------------------------------------------------------- /test/reset-parse/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /test/reset-parse/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-reset" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/reset-parse/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | #[inline] 5 | fn lat() { 6 | unsafe { 7 | asm!("nop" 8 | : 9 | : 10 | : 11 | : "volatile"); 12 | } 13 | } 14 | 15 | #[inline] 16 | fn delay_loop(delay: u64) { 17 | let mut d = 0; 18 | while d < delay { 19 | lat(); 20 | d += 1; 21 | } 22 | } 23 | 24 | pub fn delay>( 25 | parent: T, 26 | delay: u64, 27 | ) -> MapBatch>>> { 28 | parent 29 | .parse::() 30 | .transform(box move |pkt| { 31 | assert!(pkt.refcnt() == 1); 32 | let hdr = pkt.get_mut_header(); 33 | hdr.swap_addresses(); 34 | delay_loop(delay); 35 | }) 36 | .reset() 37 | .map(box move |pkt| assert!(pkt.refcnt() == 1)) 38 | } 39 | -------------------------------------------------------------------------------- /test/sctp-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-sctp" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance", "sctp"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | nix = "*" 13 | # Figure out if we want this permanently or just for now. 14 | rust-sctp = { git="https://github.com/netsys/rust-sctp" } 15 | 16 | [features] 17 | default = [] 18 | print = [] 19 | -------------------------------------------------------------------------------- /test/sctp-test/control-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-ctl" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | nix = "*" 13 | 14 | [features] 15 | default = [] 16 | print = [] 17 | -------------------------------------------------------------------------------- /test/sctp-test/control-test/ctl/controller.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import socket 3 | import sctp 4 | def test(): 5 | tcp = sctp.sctpsocket_tcp(socket.AF_INET) 6 | tcp.connect(('127.0.0.1', 8001)) 7 | tcp.sctp_send("Hello") 8 | if __name__ == "__main__": 9 | test() 10 | 11 | -------------------------------------------------------------------------------- /test/sctp-test/control-test/ctl/requirements.txt: -------------------------------------------------------------------------------- 1 | pysctp 2 | -------------------------------------------------------------------------------- /test/sctp-test/control-test/src/control.rs: -------------------------------------------------------------------------------- 1 | use e2d2::control::tcp::*; 2 | use std::net::*; 3 | use std::io::Read; 4 | use nix::errno; 5 | 6 | pub struct ControlListener { 7 | scheduler: TcpScheduler, 8 | stream: TcpStream, 9 | buffer: Vec, 10 | read_till: usize, 11 | } 12 | impl TcpControlAgent for ControlListener { 13 | fn new(address: SocketAddr, stream: TcpStream, scheduler: TcpScheduler) -> ControlListener { 14 | println!("New connection from {}", address); 15 | scheduler.schedule_read(); 16 | ControlListener { scheduler: scheduler, stream: stream, buffer: (0..14).map(|_| 0).collect(), read_till: 0 } 17 | } 18 | 19 | fn handle_read_ready(&mut self) -> bool { 20 | let mut schedule = true; 21 | while { 22 | let read_till = self.read_till; 23 | let r = self.stream.read(&mut self.buffer[read_till..]); 24 | match r { 25 | Ok(r) => { 26 | if r > 0 { 27 | if read_till + r == 14 { 28 | //println!("Complete message"); 29 | self.read_till = 0; 30 | } 31 | }; 32 | r > 0 33 | }, 34 | Err(e) => { 35 | if let Some(e) = e.raw_os_error() { 36 | if errno::from_i32(e) != errno::Errno::EAGAIN { 37 | schedule = false; 38 | } else { 39 | schedule = true; 40 | } 41 | } else { 42 | schedule = false; 43 | } 44 | false 45 | }, 46 | } 47 | } { 48 | } 49 | if schedule { 50 | self.scheduler.schedule_read(); 51 | }; 52 | schedule 53 | } 54 | 55 | fn handle_write_ready(&mut self) -> bool { 56 | panic!("No writes expected"); 57 | } 58 | 59 | fn handle_hup(&mut self) -> bool { 60 | println!("Hanging up"); 61 | false 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /test/sctp-test/control-test/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | #[inline] 5 | fn lat() { 6 | unsafe { 7 | asm!("nop" 8 | : 9 | : 10 | : 11 | : "volatile"); 12 | } 13 | } 14 | 15 | #[inline] 16 | fn delay_loop(delay: u64) { 17 | let mut d = 0; 18 | while d < delay { 19 | lat(); 20 | d += 1; 21 | } 22 | } 23 | 24 | pub fn delay>(parent: T, 25 | delay: u64) 26 | -> TransformBatch> { 27 | parent.parse::() 28 | .transform(box move |pkt| { 29 | assert!(pkt.refcnt() == 1); 30 | let hdr = pkt.get_mut_header(); 31 | hdr.swap_addresses(); 32 | delay_loop(delay); 33 | }) 34 | } 35 | -------------------------------------------------------------------------------- /test/sctp-test/ctl/controller.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import asyncio 3 | import struct 4 | async def connect_and_test(inter_message_gap): 5 | (reader, writer) = await asyncio.open_connection('127.0.0.1', 8001) 6 | print("Connected") 7 | times = int(300.0 / float(inter_message_gap)) 8 | for i in range(0, times): 9 | to_write = struct.pack('qBBBBBB', 2, 0x68, 0x05, 0xca, 0x33, 0xfd, 0xc9) 10 | writer.write(to_write) 11 | await writer.drain() 12 | await asyncio.sleep(inter_message_gap) 13 | 14 | if __name__ == "__main__": 15 | loop = asyncio.get_event_loop() 16 | loop.run_until_complete(connect_and_test(0.0001)) 17 | loop.close() 18 | -------------------------------------------------------------------------------- /test/sctp-test/src/control.rs: -------------------------------------------------------------------------------- 1 | use e2d2::control::IOScheduler; 2 | use e2d2::control::sctp::*; 3 | use nix::errno; 4 | use sctp::*; 5 | use std::net::SocketAddr; 6 | 7 | pub struct ControlListener { 8 | scheduler: IOScheduler, 9 | stream: SctpStream, 10 | buffer: Vec, 11 | } 12 | impl SctpControlAgent for ControlListener { 13 | fn new(address: SocketAddr, stream: SctpStream, scheduler: IOScheduler) -> ControlListener { 14 | println!("New connection from {}", address); 15 | scheduler.schedule_read(); 16 | ControlListener { 17 | scheduler: scheduler, 18 | stream: stream, 19 | buffer: (0..1024).map(|_| 0).collect(), 20 | } 21 | } 22 | 23 | fn handle_read_ready(&mut self) -> bool { 24 | let mut schedule = true; 25 | while { 26 | let read = self.stream.recvmsg(&mut self.buffer[..]); 27 | match read { 28 | Ok((size, stream)) => { 29 | println!("Received message on stream {} of size {}", stream, size); 30 | true 31 | } 32 | Err(e) => { 33 | if let Some(e) = e.raw_os_error() { 34 | if errno::from_i32(e) != errno::Errno::EAGAIN { 35 | schedule = false; 36 | } else { 37 | schedule = true; 38 | } 39 | } else { 40 | schedule = false; 41 | } 42 | false 43 | } 44 | } 45 | } {} 46 | if schedule { 47 | self.scheduler.schedule_read(); 48 | }; 49 | schedule 50 | } 51 | 52 | fn handle_write_ready(&mut self) -> bool { 53 | panic!("No writes expected"); 54 | } 55 | 56 | fn handle_hup(&mut self) -> bool { 57 | println!("Hanging up"); 58 | false 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /test/sctp-test/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | #[inline] 5 | fn lat() { 6 | unsafe { 7 | asm!("nop" 8 | : 9 | : 10 | : 11 | : "volatile"); 12 | } 13 | } 14 | 15 | #[inline] 16 | fn delay_loop(delay: u64) { 17 | let mut d = 0; 18 | while d < delay { 19 | lat(); 20 | d += 1; 21 | } 22 | } 23 | 24 | pub fn delay>( 25 | parent: T, 26 | delay: u64, 27 | ) -> TransformBatch> { 28 | parent.parse::().transform(box move |pkt| { 29 | assert!(pkt.refcnt() == 1); 30 | let hdr = pkt.get_mut_header(); 31 | hdr.swap_addresses(); 32 | delay_loop(delay); 33 | }) 34 | } 35 | -------------------------------------------------------------------------------- /test/shutdown-test/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 14 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /test/shutdown-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-shutdown" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/shutdown-test/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | #[inline] 5 | fn lat() { 6 | unsafe { 7 | asm!("nop" 8 | : 9 | : 10 | : 11 | : "volatile"); 12 | } 13 | } 14 | 15 | #[inline] 16 | fn delay_loop(delay: u64) { 17 | let mut d = 0; 18 | while d < delay { 19 | lat(); 20 | d += 1; 21 | } 22 | } 23 | 24 | pub fn delay>( 25 | parent: T, 26 | delay: u64, 27 | ) -> TransformBatch> { 28 | parent.parse::().transform(box move |pkt| { 29 | assert!(pkt.refcnt() == 1); 30 | let hdr = pkt.get_mut_header(); 31 | hdr.swap_addresses(); 32 | delay_loop(delay); 33 | }) 34 | } 35 | -------------------------------------------------------------------------------- /test/tcp_check/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-tcp" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | 13 | [features] 14 | default = [] 15 | print = [] 16 | -------------------------------------------------------------------------------- /test/tcp_check/src/nf.rs: -------------------------------------------------------------------------------- 1 | use e2d2::headers::*; 2 | use e2d2::operators::*; 3 | 4 | #[inline] 5 | pub fn tcp_nf>(parent: T) -> CompositionBatch { 6 | parent 7 | .parse::() 8 | .map(box |pkt| { 9 | println!("hdr {}", pkt.get_header()); 10 | let payload = pkt.get_payload(); 11 | print!("Payload: "); 12 | for p in payload { 13 | print!("{:x} ", p); 14 | } 15 | println!(""); 16 | }) 17 | .parse::() 18 | .map(box |pkt| { 19 | let hdr = pkt.get_header(); 20 | let flow = hdr.flow().unwrap(); 21 | let payload = pkt.get_payload(); 22 | println!("hdr {} ihl {} offset {}", hdr, hdr.ihl(), hdr.offset()); 23 | println!( 24 | "payload: {:x} {:x} {:x} {:x}", 25 | payload[0], payload[1], payload[2], payload[3] 26 | ); 27 | let (src, dst) = (flow.src_port, flow.dst_port); 28 | println!("Src {} dst {}", src, dst); 29 | }) 30 | .parse::() 31 | .map(box |pkt| { 32 | println!("UDP header {}", pkt.get_header()); 33 | }) 34 | .compose() 35 | } 36 | -------------------------------------------------------------------------------- /test/tcp_payload/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tcp_payload" 3 | version = "0.1.0" 4 | authors = ["Steven H. Wang "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | twox-hash = "*" 13 | 14 | [features] 15 | default = [] 16 | print = [] 17 | -------------------------------------------------------------------------------- /test/tcp_payload/check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TEST_NAME=tcp_payload 3 | PORT_OPTIONS="dpdk:eth_pcap0,rx_pcap=data/http_lemmy.pcap,tx_pcap=/tmp/out.pcap" 4 | ../../build.sh run $TEST_NAME -p $PORT_OPTIONS -c 1\ 5 | |& tee /dev/tty | sed -n '1,/BEGIN TEST OUTPUT/!p' | diff - data/expect.out 6 | 7 | result=$? 8 | echo ---- 9 | if [[ $result != 0 ]]; then 10 | echo FAIL 11 | exit $result 12 | else 13 | echo PASS 14 | fi 15 | -------------------------------------------------------------------------------- /test/tcp_payload/data/expect.out: -------------------------------------------------------------------------------- 1 | Starting scheduler on 1 2 | GET / HTTP/1.1 3 | Host: lemmykoopa.com 4 | User-Agent: curl/7.47.0 5 | Accept: */* 6 | 7 | 8 | HTTP/1.1 200 OK 9 | Date: Fri, 09 Jun 2017 16:40:07 GMT 10 | Content-Type: text/html 11 | Content-Length: 2719 12 | Connection: keep-alive 13 | Keep-Alive: timeout=30 14 | Server: Apache/2 15 | Last-Modified: Sat, 03 Jul 2010 20:22:32 GMT 16 | ETag: "a9f-48a8176991200" 17 | Cache-Control: max-age=3600 18 | Expires: Fri, 09 Jun 2017 17:39:34 GMT 19 | Accept-Ranges: bytes 20 | Age: 33 21 | 22 | 23 | 24 | 25 | 26 | 27 | Lemmy�s Land 28 | 29 | 30 | 31 |
33 |

34 | 35 |

Ha ha! I am Lemmy 36 | Koopa, and you have foolishly fallen right into my trap. But don't worry- 37 | there is nothing to fear in Lemmy's Land, unless the idea of 43 fun-filled 38 | sections fills you with terror. If you feel brave and want to have some 39 | fun, I dare you to click on me and advance into my trap. But if you feel 40 | the need to stick to some crazy schedule and don't want to have a good 41 | time, then run over to Mario and he will rescue you. 42 |

43 |

Click on me to go into my Land! I dare ya! 44 |

Ha! Not even Mario can help you! There is no way out, so you may as well click on me. 45 |

Thank you for 46 | being the 47 |
48 |
person to allow 49 | me to capture you. Enjoy!

50 | 51 |

Disclaimer: "Super" Mario 52 | and all related characters are property of Nintendo and I didn't make them. 53 | Clawdia, Susan, Bagels, and Playful were introduced by me, other original 54 | characters are property of their respective authors. Most content on this 55 | site is the original work of creative fans and should not be taken as factual 56 | depictions of the official source material. All submissions have been accredited 57 | to their author(s); submissions without a credit belong to me. If you find 58 | something that has been stolen from another site, please 59 | Email 60 | me so that I may remove it. Taking material from Lemmy's Land for anything 61 | other than personal use is prohibited except by permission of the author. 62 | Lemmy's Land is copyright unto to me, all rights and lefts are reserved. 63 | Straights are still available. 64 | 65 | 66 | 67 | packet received for untracked flow did not have SYN flag, skipping. 68 | -------------------------------------------------------------------------------- /test/tcp_payload/data/http_lemmy.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NetSys/NetBricks/71dfb94beaeac107d7cd359985f9bd66fd223e1b/test/tcp_payload/data/http_lemmy.pcap -------------------------------------------------------------------------------- /test/tcp_payload/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | #![feature(asm)] 3 | extern crate e2d2; 4 | extern crate fnv; 5 | extern crate getopts; 6 | extern crate rand; 7 | extern crate time; 8 | use self::nf::*; 9 | use e2d2::allocators::CacheAligned; 10 | use e2d2::config::*; 11 | use e2d2::interface::*; 12 | use e2d2::operators::*; 13 | use e2d2::scheduler::*; 14 | use std::env; 15 | use std::sync::Arc; 16 | use std::thread::sleep; 17 | use std::time::Duration; 18 | mod nf; 19 | 20 | fn test(ports: Vec>, sched: &mut S) { 21 | let pipelines: Vec<_> = ports 22 | .iter() 23 | .map(|port| reconstruction(ReceiveBatch::new(port.clone()), sched).send(port.clone())) 24 | .collect(); 25 | for pipeline in pipelines { 26 | sched.add_task(pipeline).unwrap(); 27 | } 28 | } 29 | 30 | fn main() { 31 | let opts = basic_opts(); 32 | 33 | let args: Vec = env::args().collect(); 34 | let matches = match opts.parse(&args[1..]) { 35 | Ok(m) => m, 36 | Err(f) => panic!(f.to_string()), 37 | }; 38 | let mut configuration = read_matches(&matches, &opts); 39 | configuration.pool_size = 256; // Travis allows 512 hugepages, but reliably continguously produces 256. 40 | 41 | let mut config = initialize_system(&configuration).unwrap(); 42 | config.start_schedulers(); 43 | 44 | config.add_pipeline_to_run(Arc::new(move |p, s: &mut StandaloneScheduler| test(p, s))); 45 | println!("BEGIN TEST OUTPUT"); 46 | config.execute(); 47 | 48 | sleep(Duration::from_secs(10)); 49 | } 50 | -------------------------------------------------------------------------------- /test/tcp_reconstruction/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zcsi-tcprecon" 3 | version = "0.1.0" 4 | authors = ["Aurojit Panda "] 5 | 6 | [dependencies] 7 | e2d2 = { path = "../../framework", features = ["performance"] } 8 | time = ">=0.1.0" 9 | getopts = "*" 10 | rand = "0.3" 11 | fnv = "*" 12 | twox-hash = "*" 13 | 14 | [features] 15 | default = [] 16 | print = [] 17 | -------------------------------------------------------------------------------- /test/tcp_reconstruction/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(box_syntax)] 2 | #![feature(asm)] 3 | extern crate e2d2; 4 | extern crate fnv; 5 | extern crate getopts; 6 | extern crate rand; 7 | extern crate time; 8 | use self::nf::*; 9 | use e2d2::allocators::CacheAligned; 10 | use e2d2::config::*; 11 | use e2d2::interface::*; 12 | use e2d2::operators::*; 13 | use e2d2::scheduler::*; 14 | use std::env; 15 | use std::sync::Arc; 16 | use std::thread; 17 | use std::time::Duration; 18 | mod nf; 19 | 20 | const CONVERSION_FACTOR: f64 = 1000000000.; 21 | 22 | fn test(ports: Vec>, sched: &mut S) { 23 | for port in &ports { 24 | println!( 25 | "Receiving port {} rxq {} txq {}", 26 | port.port.mac_address(), 27 | port.rxq(), 28 | port.txq() 29 | ); 30 | } 31 | 32 | let pipelines: Vec<_> = ports 33 | .iter() 34 | .map(|port| reconstruction(ReceiveBatch::new(port.clone()), sched).send(port.clone())) 35 | .collect(); 36 | println!("Running {} pipelines", pipelines.len()); 37 | for pipeline in pipelines { 38 | sched.add_task(pipeline).unwrap(); 39 | } 40 | } 41 | 42 | fn main() { 43 | let opts = basic_opts(); 44 | 45 | let args: Vec = env::args().collect(); 46 | let matches = match opts.parse(&args[1..]) { 47 | Ok(m) => m, 48 | Err(f) => panic!(f.to_string()), 49 | }; 50 | let configuration = read_matches(&matches, &opts); 51 | 52 | let mut config = initialize_system(&configuration).unwrap(); 53 | config.start_schedulers(); 54 | 55 | config.add_pipeline_to_run(Arc::new(move |p, s: &mut StandaloneScheduler| test(p, s))); 56 | config.execute(); 57 | 58 | let mut pkts_so_far = (0, 0); 59 | let mut last_printed = 0.; 60 | const MAX_PRINT_INTERVAL: f64 = 30.; 61 | const PRINT_DELAY: f64 = 15.; 62 | let sleep_delay = (PRINT_DELAY / 2.) as u64; 63 | let mut start = time::precise_time_ns() as f64 / CONVERSION_FACTOR; 64 | let sleep_time = Duration::from_millis(sleep_delay); 65 | println!("0 OVERALL RX 0.00 TX 0.00 CYCLE_PER_DELAY 0 0 0"); 66 | loop { 67 | thread::sleep(sleep_time); // Sleep for a bit 68 | let now = time::precise_time_ns() as f64 / CONVERSION_FACTOR; 69 | if now - start > PRINT_DELAY { 70 | let mut rx = 0; 71 | let mut tx = 0; 72 | for port in config.ports.values() { 73 | for q in 0..port.rxqs() { 74 | let (rp, tp) = port.stats(q); 75 | rx += rp; 76 | tx += tp; 77 | } 78 | } 79 | let pkts = (rx, tx); 80 | let rx_pkts = pkts.0 - pkts_so_far.0; 81 | if rx_pkts > 0 || now - last_printed > MAX_PRINT_INTERVAL { 82 | println!( 83 | "{:.2} OVERALL RX {:.2} TX {:.2}", 84 | now - start, 85 | rx_pkts as f64 / (now - start), 86 | (pkts.1 - pkts_so_far.1) as f64 / (now - start) 87 | ); 88 | last_printed = now; 89 | start = now; 90 | pkts_so_far = pkts; 91 | } 92 | } 93 | } 94 | } 95 | --------------------------------------------------------------------------------