├── .dockerignore ├── python ├── scanner │ ├── __init__.py │ └── engine │ │ └── __init__.py └── scannerpy │ ├── job.py │ ├── io.py │ ├── build_flags.py │ ├── partitioner.py │ └── util.py ├── .gdbinit ├── examples ├── tutorials │ ├── resize_op │ │ ├── .gitignore │ │ ├── resize.proto │ │ ├── Makefile │ │ └── CMakeLists.txt │ ├── workerpy.py │ ├── masterpy.py │ ├── 09_defining_cpp_sources.py │ ├── 10_defining_cpp_sinks.py │ ├── 07_profiling.py │ ├── 08_defining_cpp_ops.py │ ├── 03_sampling.py │ └── README.md ├── apps │ ├── open-reid-feature-extraction │ │ ├── .gitignore │ │ ├── Dockerfile │ │ └── Readme.md │ ├── object_detection_tensorflow │ │ └── README.md │ ├── gcp_kubernetes │ │ ├── config.toml │ │ ├── master.py │ │ ├── Dockerfile.master │ │ ├── Dockerfile.worker │ │ ├── worker.py │ │ ├── deploy.sh │ │ ├── start_cluster.sh │ │ ├── master.yml.template │ │ ├── worker.yml.template │ │ └── example.py │ ├── aws_kubernetes │ │ ├── Dockerfile.master │ │ ├── Dockerfile.worker │ │ ├── master.py │ │ ├── scanner-config.yaml.template │ │ ├── worker.py │ │ ├── sync-dirs.sh │ │ ├── delete_eks_cluster.sh │ │ ├── spawn_staging_machine.sh │ │ ├── kubeconfig.template │ │ ├── master.yml.template │ │ ├── worker.yml.template │ │ ├── example.py │ │ ├── build_and_deploy.sh │ │ └── scale_eks_workers.sh │ ├── detectron │ │ ├── Dockerfile │ │ ├── README.md │ │ └── main.py │ ├── walkthroughs │ │ └── grayscale_conversion.py │ └── pose_detection │ │ └── main.py ├── how-tos │ ├── halide │ │ ├── resize.proto │ │ ├── CMakeLists.txt │ │ └── resize_op.cpp │ ├── python_kernel │ │ └── python.py │ └── caffe │ │ └── resnet.py └── util.py ├── docs ├── guide │ ├── installation.rst │ ├── extensions.rst │ ├── tutorial.rst │ ├── kubernetes.rst │ └── quickstart.rst ├── overview.rst ├── api │ └── scanner.rst ├── scanner_logo.png ├── _static │ ├── trace_viz.jpg │ ├── trace_worker.jpg │ ├── face_detection.jpg │ ├── pose_detection.jpg │ ├── object_detection.jpg │ ├── trace_processes.jpg │ ├── trace_worker_expand.jpg │ ├── grayscale_conversion.jpg │ └── tvnews_host_narcissism.jpg ├── _templates │ └── guidetoc.html ├── index.rst ├── Makefile ├── api.rst ├── publications.rst ├── guide.rst └── overview_h.html ├── scripts ├── doxypypy-filter.sh ├── startup_node.sh ├── travis-test.sh ├── travis-build.sh └── travis-publish.sh ├── tests ├── pytest.ini ├── conftest.py ├── test_ops.proto ├── spawn_worker.py ├── CMakeLists.txt └── videos.h ├── scanner ├── video │ ├── intel │ │ ├── CMakeLists.txt │ │ └── intel_video_decoder.h │ ├── nvidia │ │ └── CMakeLists.txt │ ├── software │ │ ├── CMakeLists.txt │ │ ├── software_video_decoder.h │ │ └── software_video_encoder.h │ ├── CMakeLists.txt │ ├── video_decoder.h │ └── video_encoder.h ├── CMakeLists.txt ├── api │ ├── CMakeLists.txt │ ├── enumerator.cpp │ ├── source.cpp │ └── sink.cpp ├── util │ ├── halide_context.h │ ├── halide_context.cpp │ ├── bbox.h │ ├── jsoncpp.h │ ├── types.h │ ├── glog.h │ ├── glog.cpp │ ├── CMakeLists.txt │ ├── image.h │ ├── queue.h │ ├── cuda.h │ ├── halide.h │ ├── profiler.inl │ ├── ffmpeg.h │ ├── fs.h │ ├── profiler.h │ └── common.cpp ├── engine │ ├── build_flags.in.cpp │ ├── source_args.proto │ ├── sample_kernel.h │ ├── CMakeLists.txt │ ├── slice_op.cpp │ ├── sample_op.cpp │ ├── unslice_op.cpp │ ├── space_op.cpp │ ├── python_kernel.h │ ├── op_registry.h │ ├── sampler_registry.h │ ├── sink_registry.h │ ├── source_registry.h │ ├── enumerator_registry.h │ ├── enumerator_registry.cpp │ ├── kernel_registry.h │ ├── column_enumerator.h │ ├── ingest.h │ ├── kernel_registry.cpp │ ├── enumerator_factory.h │ ├── table_meta_cache.h │ ├── source_registry.cpp │ ├── op_registry.cpp │ ├── source_factory.h │ ├── video_index_entry.h │ ├── column_source.h │ ├── column_sink.h │ ├── load_worker.h │ └── sink_registry.cpp ├── sink_args.proto ├── types.proto ├── source_args.proto ├── doxygen.md └── sampler_args.proto ├── setup.cfg ├── .travis ├── travisci_rsa.enc ├── travisci_rsa_brew.enc ├── travisci_rsa_brew.pub └── travisci_rsa.pub ├── thirdparty └── resources │ ├── cuda │ ├── libnvcuvid.so.367.48 │ └── libnvcuvid.so.387.26 │ └── mkl │ └── silent.cfg ├── docker ├── ubuntu16.04 │ ├── requirements.txt │ ├── Dockerfile.gpu9 │ ├── Dockerfile.gpu10 │ ├── Dockerfile.gpu8 │ ├── Dockerfile.cpu │ └── Dockerfile.base ├── docker-compose.yml ├── Dockerfile.scanner └── build-all-base.sh ├── .scanner.example.toml ├── .gitignore ├── deps_openvino.sh ├── cmake └── Modules │ ├── FindNVCUVID.cmake │ ├── FindHwang.cmake │ ├── FindHalide.cmake │ ├── FindEigen.cmake │ ├── FindStruck.cmake │ ├── FindGipuma.cmake │ ├── FindTinyToml.cmake │ ├── FindFolly.cmake │ ├── FindProxygen.cmake │ ├── FindGlog.cmake │ ├── FindGoogleTest.cmake │ ├── FindGFlags.cmake │ ├── FindIconv.cmake │ └── FindGRPC.cmake └── google.md /.dockerignore: -------------------------------------------------------------------------------- 1 | .gitignore -------------------------------------------------------------------------------- /python/scanner/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/scanner/engine/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gdbinit: -------------------------------------------------------------------------------- 1 | handle SIG40 nostop noprint 2 | -------------------------------------------------------------------------------- /examples/tutorials/resize_op/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | -------------------------------------------------------------------------------- /docs/guide/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | -------------------------------------------------------------------------------- /docs/overview.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | :file: overview_h.html 3 | -------------------------------------------------------------------------------- /scripts/doxypypy-filter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | doxypypy -a -c $1 3 | -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markets = 3 | gpu: test requires GPU to run. -------------------------------------------------------------------------------- /docs/api/scanner.rst: -------------------------------------------------------------------------------- 1 | Scanner C++ API 2 | =============== 3 | 4 | .. toctree:: 5 | -------------------------------------------------------------------------------- /docs/scanner_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/scanner_logo.png -------------------------------------------------------------------------------- /examples/apps/open-reid-feature-extraction/.gitignore: -------------------------------------------------------------------------------- 1 | model_best.pth.tar 2 | *.mp4 3 | reid_features.npy -------------------------------------------------------------------------------- /scanner/video/intel/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(video_intel OBJECT 2 | intel_video_decoder.cpp) 3 | -------------------------------------------------------------------------------- /scanner/video/nvidia/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(video_nvidia OBJECT 2 | nvidia_video_decoder.cpp) 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [tool:pytest] 5 | addopts = -vvs 6 | testpaths = tests 7 | -------------------------------------------------------------------------------- /.travis/travisci_rsa.enc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/.travis/travisci_rsa.enc -------------------------------------------------------------------------------- /docs/_static/trace_viz.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/trace_viz.jpg -------------------------------------------------------------------------------- /examples/apps/object_detection_tensorflow/README.md: -------------------------------------------------------------------------------- 1 | sudo apt-get install python3-tk 2 | pip3 install pillow 3 | -------------------------------------------------------------------------------- /.travis/travisci_rsa_brew.enc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/.travis/travisci_rsa_brew.enc -------------------------------------------------------------------------------- /docs/_static/trace_worker.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/trace_worker.jpg -------------------------------------------------------------------------------- /docs/_static/face_detection.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/face_detection.jpg -------------------------------------------------------------------------------- /docs/_static/pose_detection.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/pose_detection.jpg -------------------------------------------------------------------------------- /docs/_static/object_detection.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/object_detection.jpg -------------------------------------------------------------------------------- /docs/_static/trace_processes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/trace_processes.jpg -------------------------------------------------------------------------------- /docs/_static/trace_worker_expand.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/trace_worker_expand.jpg -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/config.toml: -------------------------------------------------------------------------------- 1 | [storage] 2 | type = "gcs" 3 | bucket = "k8s-test-buck" 4 | db_path = "test/scanner_db" 5 | -------------------------------------------------------------------------------- /docs/_static/grayscale_conversion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/grayscale_conversion.jpg -------------------------------------------------------------------------------- /docs/_static/tvnews_host_narcissism.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/docs/_static/tvnews_host_narcissism.jpg -------------------------------------------------------------------------------- /examples/how-tos/halide/resize.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message ResizeArgs { 4 | int32 width = 1; 5 | int32 height = 2; 6 | } 7 | -------------------------------------------------------------------------------- /scanner/video/software/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library(video_software OBJECT 2 | software_video_decoder.cpp 3 | software_video_encoder.cpp) 4 | -------------------------------------------------------------------------------- /examples/tutorials/resize_op/resize.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message MyResizeArgs { 4 | int32 width = 1; 5 | int32 height = 2; 6 | } 7 | -------------------------------------------------------------------------------- /examples/tutorials/workerpy.py: -------------------------------------------------------------------------------- 1 | from scannerpy import Client 2 | sc = Client() 3 | 4 | sc.start_worker() 5 | 6 | import time 7 | time.sleep(1000) 8 | -------------------------------------------------------------------------------- /thirdparty/resources/cuda/libnvcuvid.so.367.48: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/thirdparty/resources/cuda/libnvcuvid.so.367.48 -------------------------------------------------------------------------------- /thirdparty/resources/cuda/libnvcuvid.so.387.26: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scanner-research/scanner/HEAD/thirdparty/resources/cuda/libnvcuvid.so.387.26 -------------------------------------------------------------------------------- /examples/tutorials/masterpy.py: -------------------------------------------------------------------------------- 1 | from scannerpy import Client 2 | sc = Client(workers=['ocean.pdl.local.cmu.edu:15559', 'crissy.pdl.local.cmu.edu:15559']) 3 | -------------------------------------------------------------------------------- /examples/tutorials/09_defining_cpp_sources.py: -------------------------------------------------------------------------------- 1 | # This tutorial is still being written. Send an email to apoms@cs.cmu.edu if you are interested in this feature. 2 | -------------------------------------------------------------------------------- /examples/tutorials/10_defining_cpp_sinks.py: -------------------------------------------------------------------------------- 1 | # This tutorial is still being written. Send an email to apoms@cs.cmu.edu if you are interested in this feature. 2 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | def pytest_addoption(parser): 4 | parser.addoption('--runslow', action='store_true', 5 | help='Run slow tests') 6 | -------------------------------------------------------------------------------- /scanner/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SCANNER_OBJS) 2 | add_subdirectory(api) 3 | add_subdirectory(engine) 4 | add_subdirectory(video) 5 | add_subdirectory(util) 6 | set(SCANNER_OBJS ${SCANNER_OBJS} PARENT_SCOPE) 7 | -------------------------------------------------------------------------------- /scanner/api/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SOURCE_FILES 2 | frame.cpp 3 | kernel.cpp 4 | op.cpp 5 | source.cpp 6 | sink.cpp 7 | enumerator.cpp 8 | database.cpp) 9 | 10 | add_library(api OBJECT 11 | ${SOURCE_FILES}) 12 | -------------------------------------------------------------------------------- /docs/_templates/guidetoc.html: -------------------------------------------------------------------------------- 1 |
2 | 8 |
9 | -------------------------------------------------------------------------------- /docs/guide/extensions.rst: -------------------------------------------------------------------------------- 1 | .. _extensions: 2 | 3 | Extensions 4 | ========== 5 | 6 | Scanner supports extending the system in four ways: 7 | 8 | - Ops: 9 | 10 | - Sources: 11 | 12 | - Sinks 13 | 14 | - Storage backends: 15 | -------------------------------------------------------------------------------- /scanner/util/halide_context.h: -------------------------------------------------------------------------------- 1 | #include "scanner/util/cuda.h" 2 | 3 | #pragma once 4 | 5 | namespace Halide { 6 | namespace Runtime { 7 | namespace Internal { 8 | namespace Cuda { 9 | extern CUcontext context; 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/Dockerfile.master: -------------------------------------------------------------------------------- 1 | FROM scannerresearch/scanner:cpu-latest 2 | WORKDIR /app 3 | 4 | COPY master.py . 5 | 6 | ENV LD_LIBRARY_PATH /usr/local/lib/python3.5/dist-packages/scannerpy:$LD_LIBRARY_PATH 7 | CMD python3 master.py 8 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/Dockerfile.worker: -------------------------------------------------------------------------------- 1 | FROM scannerresearch/scanner:cpu-latest 2 | WORKDIR /app 3 | 4 | COPY worker.py . 5 | 6 | ENV LD_LIBRARY_PATH /usr/local/lib/python3.5/dist-packages/scannerpy:$LD_LIBRARY_PATH 7 | CMD python3 worker.py 8 | -------------------------------------------------------------------------------- /scanner/util/halide_context.cpp: -------------------------------------------------------------------------------- 1 | #include "scanner/util/cuda.h" 2 | 3 | #ifdef HAVE_CUDA 4 | namespace Halide { 5 | namespace Runtime { 6 | namespace Internal { 7 | namespace Cuda { 8 | CUcontext context = 0; 9 | } 10 | } 11 | } 12 | } 13 | #endif 14 | -------------------------------------------------------------------------------- /scanner/engine/build_flags.in.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | int main() { 3 | std::cout << "@dirs@" << std::endl; 4 | #ifdef HAVE_CUDA 5 | std::cout << "-DHAVE_CUDA" << std::endl; 6 | #else 7 | std::cout << std::endl; 8 | #endif 9 | return 0; 10 | } 11 | -------------------------------------------------------------------------------- /scanner/sink_args.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package scanner.proto; 4 | 5 | message ColumnSinkArgs { 6 | // For creating storehouse adapter 7 | string storage_type = 1; 8 | string bucket = 2; 9 | string region = 3; 10 | string endpoint = 4; 11 | } 12 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/master.py: -------------------------------------------------------------------------------- 1 | import scannerpy 2 | import os 3 | 4 | if __name__ == "__main__": 5 | scannerpy.start_master(port='8080', 6 | block=True, 7 | watchdog=False, 8 | no_workers_timeout=180) 9 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/master.py: -------------------------------------------------------------------------------- 1 | import scannerpy 2 | import os 3 | 4 | if __name__ == "__main__": 5 | scannerpy.start_master(port='8080', 6 | block=True, 7 | watchdog=False, 8 | no_workers_timeout=180) 9 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/Dockerfile.master: -------------------------------------------------------------------------------- 1 | FROM scannerresearch/scanner:cpu-latest 2 | WORKDIR /app 3 | 4 | COPY config.toml /root/.scanner/config.toml 5 | COPY master.py . 6 | 7 | ENV LD_LIBRARY_PATH /usr/local/lib/python3.5/dist-packages/scannerpy:$LD_LIBRARY_PATH 8 | CMD python3 master.py 9 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/Dockerfile.worker: -------------------------------------------------------------------------------- 1 | FROM scannerresearch/scanner:cpu-latest 2 | WORKDIR /app 3 | 4 | COPY config.toml /root/.scanner/config.toml 5 | COPY worker.py . 6 | 7 | ENV LD_LIBRARY_PATH /usr/local/lib/python3.5/dist-packages/scannerpy:$LD_LIBRARY_PATH 8 | CMD python3 worker.py 9 | -------------------------------------------------------------------------------- /python/scannerpy/job.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class Job(object): 4 | """ 5 | A specification of a table to produce as output of a bulk job. 6 | """ 7 | def __init__(self, op_args): 8 | self._op_args = op_args 9 | 10 | def op_args(self): 11 | return self._op_args 12 | -------------------------------------------------------------------------------- /tests/test_ops.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message BlurArgs { 4 | int32 kernel_size = 1; 5 | float sigma = 2; 6 | } 7 | 8 | message ResizeArgs { 9 | int32 width = 1; 10 | int32 height = 2; 11 | bool min = 3; 12 | bool preserve_aspect = 4; 13 | string interpolation = 5; 14 | } 15 | -------------------------------------------------------------------------------- /docs/guide/tutorial.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial: 2 | 3 | Tutorials 4 | ========= 5 | 6 | Scanner provides a set of tutorials that provide step-by-step examples of many 7 | of the basic features provided by Scanner. These tutorials can be found 8 | `here `__. 9 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/scanner-config.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | data: 4 | config.toml: | 5 | [storage] 6 | type = "s3" 7 | bucket = 8 | db_path = "scanner_dbs/" 9 | region = "us-west-2" 10 | endpoint = "s3.us-west-2.amazonaws.com" 11 | 12 | metadata: 13 | name: scanner-configmap 14 | -------------------------------------------------------------------------------- /examples/apps/open-reid-feature-extraction/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scannerresearch/scanner:gpu-9.1-cudnn7 2 | WORKDIR /opt/openreid 3 | 4 | RUN cd /opt/openreid && \ 5 | git clone https://github.com/Cysu/open-reid.git && \ 6 | cd open-reid && \ 7 | pip3 install http://download.pytorch.org/whl/cu91/torch-0.4.0-cp35-cp35m-linux_x86_64.whl && \ 8 | python3 setup.py install 9 | -------------------------------------------------------------------------------- /scanner/util/bbox.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "scanner/util/common.h" 4 | 5 | namespace scanner { 6 | 7 | std::vector best_nms(const std::vector& boxes, 8 | f32 overlap); 9 | 10 | std::vector average_nms(const std::vector& boxes, 11 | f32 overlap); 12 | } 13 | -------------------------------------------------------------------------------- /docker/ubuntu16.04/requirements.txt: -------------------------------------------------------------------------------- 1 | ipython==5.3.0 2 | numpy==1.12.0 3 | protobuf==3.2.0 4 | toml==0.9.2 5 | youtube-dl 6 | scipy==0.18.1 7 | scikit-learn==0.18.1 8 | scikit-image==0.12.3 9 | matplotlib==2.0.0 10 | seaborn==0.7.1 11 | grpcio==1.7.0 12 | doxypypy==0.8.8.6 13 | pytest==3.0.6 14 | twine==1.8.1 15 | ipaddress==1.0.18 16 | plotly==2.0.6 17 | jupyter==1.0.0 18 | pandas==0.19.0 19 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/worker.py: -------------------------------------------------------------------------------- 1 | import scannerpy 2 | import os 3 | 4 | if __name__ == "__main__": 5 | scannerpy.start_worker('{}:{}'.format( 6 | os.environ['SCANNER_MASTER_SERVICE_HOST'], 7 | os.environ['SCANNER_MASTER_SERVICE_PORT']), 8 | block=True, 9 | watchdog=False, 10 | port=5002) 11 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/worker.py: -------------------------------------------------------------------------------- 1 | import scannerpy 2 | import os 3 | 4 | if __name__ == "__main__": 5 | scannerpy.start_worker('{}:{}'.format( 6 | os.environ['SCANNER_MASTER_SERVICE_HOST'], 7 | os.environ['SCANNER_MASTER_SERVICE_PORT']), 8 | block=True, 9 | watchdog=False, 10 | port=5002) 11 | -------------------------------------------------------------------------------- /.scanner.example.toml: -------------------------------------------------------------------------------- 1 | scanner_path = "/opt/scanner" 2 | 3 | [storage] 4 | type = "posix" 5 | db_path = "/opt/scanner-db" 6 | 7 | # [storage] 8 | # type = "gcs" 9 | # key_path = "/path/to/gcs.key" 10 | # cert_path = "/path/to/gcs.cert" 11 | # bucket = "gcs-bucket" 12 | 13 | [network] 14 | master = "localhost" 15 | master_port = "5001" 16 | worker_port = "5002" 17 | -------------------------------------------------------------------------------- /.travis/travisci_rsa_brew.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw2maYHWQ6cdBi9UJ2Dzv4okW/bw0pmZAKAFZvXOjZ8rRcrhgu8pK289t42Hwso9dsCROJxxVegDTZeOh8jyuQqKziBXu1MqN/3+KuRWJfw3QGwOQNOuwPkBOEn+TZpkgiyz2GMDfvy1WseWxgKzadY2C/R4lHafnmc4Dr/6k2uNST3s73CAyHlGpORItlqZ+oc/IX74qz27PsjMjZsjarCEHDb3UWlsG95D1ymMWrwXUpx6/azyuAJEiWJp/UtdfL8oJVec9vXqK9pzI6IykLdFm0y72FYXtMwBDARugDGpqTpO1xmba0kSAoMe5Ip24nbNHB6gIHL8dnAD9qrbzF apoms@AlexandersMBP2.lan 2 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PROJECT=$(gcloud config get-value project) 4 | 5 | docker build -f Dockerfile.master -t gcr.io/$PROJECT/scanner-master:cpu . 6 | docker build -f Dockerfile.worker -t gcr.io/$PROJECT/scanner-worker:cpu . 7 | 8 | gcloud docker -- push gcr.io/$PROJECT/scanner-master:cpu 9 | gcloud docker -- push gcr.io/$PROJECT/scanner-worker:cpu 10 | 11 | kubectl delete deploy --all 12 | kubectl create -f master.yml 13 | kubectl create -f worker.yml 14 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. scanner documentation master file, created by 2 | sphinx-quickstart on Sun Nov 26 19:06:21 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. raw:: html 7 | :file: index_h.html 8 | 9 | .. toctree:: 10 | :hidden: 11 | 12 | guide/quickstart 13 | guide/getting-started 14 | guide/walkthrough 15 | guide/graphs 16 | guide/ops 17 | guide/stored-streams 18 | guide/profiling 19 | guide/kubernetes 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Warning: this is also used as the .dockerignore, and the matching patterns are 2 | # similar but **not quite** the same. If you add a file here, make sure that 3 | # the Docker build still works. 4 | 5 | dependencies.txt 6 | nvidia-docker-compose.yml 7 | .cache 8 | build 9 | **/*.pyc 10 | **/*.trace 11 | thirdparty 12 | !thirdparty/resources 13 | !thirdparty/CMakeLists.txt 14 | python/*.egg-info 15 | docker/**/deps.sh 16 | docker/**/thirdparty 17 | **/*.mp4 18 | **/*.mkv 19 | docker/*.log 20 | 21 | python/scannerpy/include 22 | dist -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/sync-dirs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/bash 2 | 3 | DEFAULT_SERVER=localhost 4 | DEFAULT_PORT=8022 5 | 6 | LOCAL_DIR=$1 7 | REMOTE_DIR=$2 8 | REMOTE_SERVER=${3:-$DEFAULT_SERVER} 9 | SSH_KEY=${4} 10 | 11 | CMD="rsync -avz -e \"ssh -i $SSH_KEY\" \ 12 | --exclude build \ 13 | --exclude .git* \ 14 | --exclude \#* \ 15 | -r $LOCAL_DIR/ \ 16 | $REMOTE_SERVER:$REMOTE_DIR" 17 | eval $CMD 18 | 19 | # inotifywait, linux 20 | while fswatch -r $LOCAL_DIR/* -1; do 21 | eval $CMD || break; 22 | done 23 | -------------------------------------------------------------------------------- /scanner/types.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package scanner.proto; 4 | 5 | message Frame { 6 | int64 buffer = 1; 7 | int32 width = 2; 8 | int32 height = 3; 9 | } 10 | 11 | message BoundingBox { 12 | float x1 = 1; 13 | float y1 = 2; 14 | float x2 = 3; 15 | float y2 = 4; 16 | float score = 5; 17 | int32 track_id = 6; 18 | double track_score = 7; 19 | int32 label = 8; 20 | } 21 | 22 | message Point { 23 | float x = 1; 24 | float y = 2; 25 | float score = 3; 26 | } 27 | 28 | message ImageEncoderArgs { 29 | string format = 1; 30 | } 31 | -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | services: 3 | gpu: 4 | image: scannerresearch/scannertools:gpu-9.1-cudnn7-latest 5 | command: bash -c "cd /opt/scanner && jupyter notebook --allow-root --ip=0.0.0.0 --port=8888" 6 | ports: 7 | - "8888:8888" 8 | volumes: 9 | - .:/app 10 | runtime: nvidia 11 | cpu: 12 | image: scannerresearch/scannertools:cpu-latest 13 | command: bash -c "cd /opt/scanner && jupyter notebook --allow-root --ip=0.0.0.0 --port=8888" 14 | ports: 15 | - "8888:8888" 16 | volumes: 17 | - .:/app 18 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/start_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ZONE=us-west1-b 4 | 5 | gcloud container clusters create example-cluster \ 6 | --zone "$ZONE" \ 7 | --machine-type "n1-standard-2" \ 8 | --num-nodes 1 9 | 10 | gcloud container clusters get-credentials example-cluster --zone "$ZONE" 11 | 12 | gcloud container node-pools create workers \ 13 | --zone "$ZONE" \ 14 | --cluster example-cluster \ 15 | --machine-type "n1-standard-2" \ 16 | --num-nodes 1 \ 17 | --enable-autoscaling \ 18 | --min-nodes 0 \ 19 | --max-nodes 5 \ 20 | --preemptible 21 | -------------------------------------------------------------------------------- /scanner/engine/source_args.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package scanner.proto; 4 | 5 | message LoadSample { 6 | int32 table_id = 1; 7 | int32 column_id = 2; 8 | repeated int64 input_row_ids = 3 [packed=true]; 9 | } 10 | 11 | message ColumnEnumeratorArgs { 12 | int32 table_id = 1; 13 | int32 column_id = 2; 14 | } 15 | 16 | message ColumnSourceArgs { 17 | // For creating storehouse adapter 18 | string storage_type = 1; 19 | string bucket = 2; 20 | string region = 3; 21 | string endpoint = 4; 22 | } 23 | 24 | message ColumnElementArgs { 25 | int32 table_id = 1; 26 | int32 column_id = 2; 27 | int64 row_id = 3; 28 | } 29 | -------------------------------------------------------------------------------- /deps_openvino.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$OSTYPE" == "linux-gnu" ]]; then 4 | OPENVINO_REQ_PKGS=( 5 | libusb-1.0-0-dev 6 | libgstreamer1.0-0 7 | gstreamer1.0-plugins-base 8 | gstreamer1.0-plugins-good 9 | gstreamer1.0-plugins-bad 10 | ) 11 | apt update 12 | apt install -y ${OPENVINO_REQ_PKGS[@]} 13 | # ... 14 | elif [[ "$OSTYPE" == "darwin"* ]]; then 15 | echo "Support for Scanner with OpenVINO in Mac OSX is not available at this time." 16 | exit 1 17 | # Mac OSX 18 | else 19 | # Unknown. 20 | echo "Unknown OSTYPE: $OSTYPE. Exiting." 21 | exit 1 22 | fi 23 | 24 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/delete_eks_cluster.sh: -------------------------------------------------------------------------------- 1 | programname=$0 2 | 3 | function usage { 4 | echo "usage: $programname name" 5 | echo " name name of the cluster" 6 | exit 1 7 | } 8 | 9 | if [ $# == 0 ]; then 10 | usage 11 | fi 12 | 13 | NAME=$1 14 | 15 | CLUSTER_NAME=$NAME 16 | ROLE_ARN=arn:aws:iam::459065735846:role/eksServiceRole 17 | 18 | ### 1. Delete worker nodes 19 | aws cloudformation delete-stack --stack-name $CLUSTER_NAME-workers 20 | 21 | ### 2. Delete kubectl config for connecting to cluster 22 | rm ~/.kube/config-$CLUSTER_NAME 23 | 24 | ### 3. Delete the EKS cluster 25 | 26 | aws eks delete-cluster --name $CLUSTER_NAME 27 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/spawn_staging_machine.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | AMI= 4 | 5 | INSTANCE_ID=$( 6 | aws ec2 run-instances \ 7 | --image-id $AMI \ 8 | --security-group-ids sg-a6558ed8 \ 9 | --count 1 \ 10 | --instance-type m5.12xlarge \ 11 | --block-device-mappings "[{\"DeviceName\": \"/dev/sda1\",\"Ebs\":{\"VolumeSize\":128}}]" \ 12 | --key-name ec2-key \ 13 | --query 'Instances[0].InstanceId' \ 14 | --output text) 15 | 16 | TEMP=$(aws ec2 describe-instances \ 17 | --instance-ids $INSTANCE_ID \ 18 | --query 'Reservations[0].Instances[0].PublicIpAddress' \ 19 | --output text) 20 | 21 | echo $TEMP 22 | -------------------------------------------------------------------------------- /scanner/source_args.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package scanner.proto; 4 | 5 | message LoadSample { 6 | int32 table_id = 1; 7 | int32 column_id = 2; 8 | repeated int64 input_row_ids = 3 [packed=true]; 9 | } 10 | 11 | message ColumnEnumeratorArgs { 12 | string table_name = 1; 13 | string column_name = 2; 14 | } 15 | 16 | message ColumnSourceArgs { 17 | // For creating storehouse adapter 18 | string storage_type = 1; 19 | string bucket = 2; 20 | string region = 3; 21 | string endpoint = 4; 22 | // Performance flags 23 | int32 load_sparsity_threshold = 5; 24 | } 25 | 26 | message ColumnElementArgs { 27 | int32 table_id = 1; 28 | int32 column_id = 2; 29 | int64 row_id = 3; 30 | } 31 | -------------------------------------------------------------------------------- /scanner/engine/sample_kernel.h: -------------------------------------------------------------------------------- 1 | #include "scanner/api/kernel.h" 2 | #include "scanner/api/op.h" 3 | #include "scanner/util/memory.h" 4 | #include "scanner/metadata.pb.h" 5 | 6 | #include 7 | #include 8 | 9 | namespace scanner { 10 | 11 | class SampleKernel : public Kernel { 12 | public: 13 | SampleKernel(const KernelConfig& config, const std::string& kernel_str, 14 | const std::string& pickled_config); 15 | 16 | ~SamplehonKernel(); 17 | 18 | void execute(const BatchedElements& input_columns, 19 | BatchedElements& output_columns) override; 20 | 21 | private: 22 | KernelConfig config_; 23 | DeviceHandle device_; 24 | }; 25 | 26 | } 27 | -------------------------------------------------------------------------------- /.travis/travisci_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDQVJh7T9r1y2R8a3Ml0K59D7Wj4tVUGKmPC594YGQ9l6GLwE/BGp5pfVkyyBWS/GEy9oVdygaGPVKIpY2L4diygAaVRxp6jBiP3lFXygFsOh7lRShmdQwlCduDPoMQCRjcrYyod8Exk0argEdYPr3FfpY1bl5s0rjjBYsbiVhO7g5LfILi/V5s07sLQUUCmmhR67M2ljBeXF6DtvVbpy9tBoucefVFKz6UILJTnMuSzAMXl5HvbXCng+Rdv8T/mN/tGwMFzP7q5YOFZxtxg0pYkRgBQO4rk5LUMH8wNvMGnoifnf72wK8Na4V+ZnW4BiHLrXxvYBFCvAg5ndqfra7nBuGArD+O7debVjMzpKA4Ok/kbOICDKZfG85J93R6YeKB4gTE+i55RKy80CxjGsW3pwLTVqFEUO73dwIrEBc4+i5L6iRHYTx3TducOEbQWI9DklQykYYMPD1QGC9jn0RT/pA/NUXb7qu7lVxST49guNm+6/zfBWz1em1C503FQAKv5UFM0x089Z/TvOWNS9T4z+yv6Irby0GInMMeBM1UZs1Uwn00eiz375KWG+Cel3cYQD9aTqhS+7hwYuAvi35Ng09IWv7EFCX1EbQlKE5EuLO5ZChusAr5GlljCyeCZQoTwPs4j644JFeYd8mPIZqJ8vRIPjEascUfPJa3oxCbeQ== wcrichto@cs.stanford.edu 2 | -------------------------------------------------------------------------------- /scanner/util/jsoncpp.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include 19 | -------------------------------------------------------------------------------- /scanner/util/types.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | namespace scanner {} 19 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/kubeconfig.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | server: 5 | certificate-authority-data: 6 | name: kubernetes 7 | contexts: 8 | - context: 9 | cluster: kubernetes 10 | user: aws 11 | name: aws 12 | current-context: aws 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: aws 17 | user: 18 | exec: 19 | apiVersion: client.authentication.k8s.io/v1alpha1 20 | command: heptio-authenticator-aws 21 | args: 22 | - "token" 23 | - "-i" 24 | - "" 25 | # - "-r" 26 | # - "" 27 | # env: 28 | # - name: AWS_PROFILE 29 | # value: "" 30 | -------------------------------------------------------------------------------- /docs/guide/kubernetes.rst: -------------------------------------------------------------------------------- 1 | .. _kubernetes: 2 | 3 | Kubernetes Integration 4 | ====================== 5 | 6 | The easiest way to scale out using Scanner is via `kubernetes `__. 7 | Kubernetes is tool for automating the deployment and management of a cluster of 8 | machines that run `containers `__ 9 | (a container is akin to a light-weight VM image). 10 | 11 | We provide a step-by-step example of getting Scanner up and running on your own 12 | kubernetes cluster using either `Google Cloud Platform `__ 13 | or `Amazon Web Services `__. 14 | -------------------------------------------------------------------------------- /tests/spawn_worker.py: -------------------------------------------------------------------------------- 1 | from scannerpy import protobufs, Config, start_worker 2 | import time 3 | import grpc 4 | import sys 5 | 6 | c = Config(None) 7 | 8 | import scanner.metadata_pb2 as metadata_types 9 | import scanner.engine.rpc_pb2 as rpc_types 10 | import scanner.types_pb2 as misc_types 11 | import scannerpy._python as bindings 12 | 13 | con = Config(config_path='/tmp/config_test') 14 | 15 | master_address = str(con.master_address) + ':' + str(con.master_port) 16 | port = int(sys.argv[1]) 17 | 18 | params = bindings.default_machine_params() 19 | mp = protobufs.MachineParameters() 20 | mp.ParseFromString(params) 21 | del mp.gpu_ids[:] 22 | params = mp.SerializeToString() 23 | 24 | start_worker(master_address, machine_params=params, config=con, block=True, 25 | port=port, 26 | watchdog=False) 27 | -------------------------------------------------------------------------------- /scanner/doxygen.md: -------------------------------------------------------------------------------- 1 | API Overview {#mainpage} 2 | ========= 3 | 4 | This is the auto-generated documentation for Scanner's C++ API. You should look here if you are implementing a C++ kernel and want to see, for example, what functions a kernel is expected to implement. 5 | 6 | * Custom kernels: [scanner::BaseKernel](@ref scanner::BaseKernel) and [scanner::Kernel](@ref scanner::Kernel) 7 | * Stream elements: [scanner::Element](@ref scanner::Element) and [scanner::Frame](@ref scanner::Frame) 8 | * Custom sources: [scanner::Source](@ref scanner::Source) and [scanner::Enumerator](@ref scanner::Enumerator) 9 | * Custom sources: [scanner::Sink](@ref scanner::Sink) 10 | * Profiling: [scanner::Profiler](@ref scanner::Profiler) 11 | 12 | System internals are not well documented, please read the source code or contact the authors if you have questions about that. 13 | -------------------------------------------------------------------------------- /examples/how-tos/python_kernel/python.py: -------------------------------------------------------------------------------- 1 | import scannerpy 2 | import os 3 | import struct 4 | 5 | from scannerpy import Database, Job, FrameType, DeviceType, Kernel 6 | from typing import Tuple 7 | 8 | @scannerpy.register_python_op() 9 | class MyOpKernel(Kernel): 10 | def __init__(self, config, protobufs): 11 | self.protobufs = protobufs 12 | 13 | def close(self): 14 | pass 15 | 16 | def execute(self, frame: FrameType) -> bytes: 17 | return struct.pack('=q', 9000) 18 | 19 | 20 | with Database() as db: 21 | frame = db.sources.FrameColumn() 22 | test = db.ops.MyOp(frame=frame) 23 | output = db.sinks.Column(columns={'test': test}) 24 | 25 | job = Job(op_args={ 26 | frame: db.table('example').column('frame'), 27 | output: 'example_py' 28 | }) 29 | db.run(output=output, jobs=[job]) 30 | -------------------------------------------------------------------------------- /cmake/Modules/FindNVCUVID.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find NVCUVID 2 | # 3 | # The following variables are optionally searched for defaults 4 | # NVCUVID_DIR: Base directory where NVCUVID can be found 5 | # 6 | # The following are set after configuration is done: 7 | # NVCUVID_FOUND 8 | # NVCUVID_LIBRARY 9 | 10 | include(FindPackageHandleStandardArgs) 11 | 12 | set(NVCUVID_ROOT_DIR "" CACHE PATH "Folder contains NVCUVID") 13 | 14 | if (NOT "$ENV{NVCUVID_DIR}" STREQUAL "") 15 | set(NVCUVID_ROOT_DIR 16 | $ENV{NVCUVID_DIR} CACHE PATH "Folder contains NVCUVID" FORCE) 17 | endif() 18 | 19 | find_library(NVCUVID_LIBRARY nvcuvid 20 | PATHS 21 | ${NVCUVID_ROOT_DIR}/lib 22 | /usr/local/cuda/lib64) 23 | 24 | find_package_handle_standard_args(NVCUVID DEFAULT_MSG NVCUVID_LIBRARY) 25 | 26 | if(NVCUVID_FOUND) 27 | set(NVCUVID_LIBRARIES ${NVCUVID_LIBRARY}) 28 | endif() 29 | -------------------------------------------------------------------------------- /scanner/engine/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | get_property(dirs DIRECTORY ${CMAKE_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES) 2 | configure_file(build_flags.in.cpp build_flags.cpp) 3 | 4 | add_executable(build_flags build_flags.cpp) 5 | 6 | set(SOURCE_FILES 7 | runtime.cpp 8 | master.cpp 9 | worker.cpp 10 | ingest.cpp 11 | video_index_entry.cpp 12 | load_worker.cpp 13 | evaluate_worker.cpp 14 | save_worker.cpp 15 | sampler.cpp 16 | dag_analysis.cpp 17 | metadata.cpp 18 | kernel_registry.cpp 19 | op_registry.cpp 20 | source_registry.cpp 21 | sink_registry.cpp 22 | column_source.cpp 23 | column_enumerator.cpp 24 | column_sink.cpp 25 | enumerator_registry.cpp 26 | table_meta_cache.cpp 27 | python_kernel.cpp 28 | sample_op.cpp 29 | space_op.cpp 30 | slice_op.cpp 31 | unslice_op.cpp) 32 | 33 | add_library(engine OBJECT 34 | ${SOURCE_FILES}) 35 | -------------------------------------------------------------------------------- /tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(OpenCV COMPONENTS core imgproc highgui video) 2 | 3 | if (OpenCV_FOUND) 4 | include(${CMAKE_SOURCE_DIR}/cmake/Util/Op.cmake) 5 | build_op(LIB_NAME scanner_tests CPP_SRCS test_ops.cpp PROTO_SRC test_ops.proto NO_FLAGS ON) 6 | 7 | target_link_libraries(scanner_tests PUBLIC ${OpenCV_LIBRARIES} scanner) 8 | target_include_directories(scanner_tests PUBLIC ${OpenCV_INCLUDE_DIR}) 9 | 10 | add_test( 11 | NAME PythonTests 12 | COMMAND pytest ${CMAKE_CURRENT_SOURCE_DIR} -x -vv) 13 | else() 14 | message("Scanner tests are disabled. OpenCV not found which is required for building the tests.") 15 | endif() 16 | 17 | if (NO_FFMPEG STREQUAL "false") 18 | add_executable(FfmpegTest ffmpeg_test.cpp) 19 | target_link_libraries(FfmpegTest ${GTEST_LIBRARIES} ${GTEST_LIB_MAIN} scanner) 20 | add_test(FfmpegTests FfmpegTest) 21 | endif() 22 | -------------------------------------------------------------------------------- /docker/ubuntu16.04/Dockerfile.gpu9: -------------------------------------------------------------------------------- 1 | # Scanner base GPU image for Ubuntu 16.04 CUDA 9.X 2 | 3 | ARG base_tag 4 | FROM ${base_tag} 5 | MAINTAINER Will Crichton "wcrichto@cs.stanford.edu" 6 | ARG cores=1 7 | 8 | ADD thirdparty/resources/cuda/libnvcuvid.so.387.26 /usr/lib/x86_64-linux-gnu/libnvcuvid.so 9 | RUN ln -s /usr/local/cuda/targets/x86_64-linux/lib/stubs/libcuda.so \ 10 | /usr/local/cuda/targets/x86_64-linux/lib/stubs/libcuda.so.1 11 | ENV CUDA_LIB_PATH /usr/local/cuda/lib64/stubs 12 | 13 | RUN bash ./deps.sh --root-install --install-all --prefix /usr/local --use-gpu && \ 14 | rm -rf /opt/scanner-base 15 | 16 | ENV LD_LIBRARY_PATH /usr/local/intel/mkl/lib:${LD_LIBRARY_PATH} 17 | ENV PYTHONPATH /usr/local/python:${PYTHONPATH} 18 | ENV PYTHONPATH /usr/local/lib/python3.5/site-packages:${PYTHONPATH} 19 | ENV PYTHONPATH /usr/local/lib/python3.5/dist-packages:${PYTHONPATH} 20 | 21 | WORKDIR / 22 | -------------------------------------------------------------------------------- /scanner/util/glog.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "glog/logging.h" 19 | 20 | #include 21 | #include 22 | #include 23 | 24 | namespace scanner { 25 | 26 | void init_glog(const char* program_name); 27 | 28 | } 29 | -------------------------------------------------------------------------------- /docker/ubuntu16.04/Dockerfile.gpu10: -------------------------------------------------------------------------------- 1 | # Scanner base GPU image for Ubuntu 16.04 CUDA 10.X 2 | 3 | ARG base_tag 4 | FROM ${base_tag} 5 | MAINTAINER Will Crichton "wcrichto@cs.stanford.edu" 6 | ARG cores=1 7 | 8 | ADD thirdparty/resources/cuda/libnvcuvid.so.387.26 /usr/lib/x86_64-linux-gnu/libnvcuvid.so 9 | RUN ln -s /usr/local/cuda/targets/x86_64-linux/lib/stubs/libcuda.so \ 10 | /usr/local/cuda/targets/x86_64-linux/lib/stubs/libcuda.so.1 11 | ENV CUDA_LIB_PATH /usr/local/cuda/lib64/stubs 12 | 13 | RUN bash ./deps.sh --root-install --install-all --prefix /usr/local --use-gpu && \ 14 | rm -rf /opt/scanner-base 15 | 16 | ENV LD_LIBRARY_PATH /usr/local/intel/mkl/lib:${LD_LIBRARY_PATH} 17 | ENV PYTHONPATH /usr/local/python:${PYTHONPATH} 18 | ENV PYTHONPATH /usr/local/lib/python3.5/site-packages:${PYTHONPATH} 19 | ENV PYTHONPATH /usr/local/lib/python3.5/dist-packages:${PYTHONPATH} 20 | 21 | WORKDIR / 22 | -------------------------------------------------------------------------------- /docker/ubuntu16.04/Dockerfile.gpu8: -------------------------------------------------------------------------------- 1 | # Scanner base GPU image for Ubuntu 16.04 CUDA 8.0 2 | 3 | ARG base_tag 4 | FROM ${base_tag} 5 | MAINTAINER Will Crichton "wcrichto@cs.stanford.edu" 6 | ARG cores=1 7 | 8 | ADD thirdparty/resources/cuda/libnvcuvid.so.367.48 /usr/lib/x86_64-linux-gnu/libnvcuvid.so 9 | RUN ln -s /usr/local/cuda-8.0/targets/x86_64-linux/lib/stubs/libcuda.so \ 10 | /usr/local/cuda-8.0/targets/x86_64-linux/lib/stubs/libcuda.so.1 11 | ENV CUDA_LIB_PATH /usr/local/cuda/lib64/stubs 12 | 13 | RUN bash ./deps.sh --root-install --install-all --prefix /usr/local --use-gpu && \ 14 | rm -rf /opt/scanner-base 15 | 16 | ENV LD_LIBRARY_PATH /usr/local/intel/mkl/lib:${LD_LIBRARY_PATH} 17 | ENV PYTHONPATH /usr/local/python:${PYTHONPATH} 18 | ENV PYTHONPATH /usr/local/lib/python3.5/site-packages:${PYTHONPATH} 19 | ENV PYTHONPATH /usr/local/lib/python3.5/dist-packages:${PYTHONPATH} 20 | 21 | WORKDIR / 22 | -------------------------------------------------------------------------------- /python/scannerpy/io.py: -------------------------------------------------------------------------------- 1 | from scannerpy.common import ScannerException 2 | from scannerpy.storage import StoredStream 3 | 4 | class IOGenerator: 5 | def __init__(self, sc): 6 | self._sc = sc 7 | 8 | def Input(self, streams): 9 | if not isinstance(streams, list) or not isinstance(streams[0], StoredStream): 10 | raise ScannerException("io.Input must take a list of streams as input") 11 | 12 | example = streams[0] 13 | source = example.storage().source(self._sc, streams) 14 | source._streams = streams 15 | return source 16 | 17 | def Output(self, op, streams): 18 | if not isinstance(streams, list) or not isinstance(streams[0], StoredStream): 19 | raise ScannerException("io.Output must take a list of streams as input") 20 | 21 | example = streams[0] 22 | sink = example.storage().sink(self._sc, op, streams) 23 | sink._streams = streams 24 | return sink 25 | -------------------------------------------------------------------------------- /scanner/sampler_args.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package scanner.proto; 4 | 5 | // Sampler args 6 | message StridedSamplerArgs { 7 | int64 stride = 1; 8 | } 9 | 10 | message StridedRangeSamplerArgs { 11 | int64 stride = 1; 12 | repeated int64 starts = 2; 13 | repeated int64 ends = 3; 14 | } 15 | 16 | message GatherSamplerArgs { 17 | repeated int64 rows = 1 [packed=true]; 18 | } 19 | 20 | 21 | message SpaceNullSamplerArgs { 22 | int64 spacing = 1; 23 | } 24 | 25 | message SpaceRepeatSamplerArgs { 26 | int64 spacing = 1; 27 | } 28 | 29 | message StridedPartitionerArgs { 30 | int64 stride = 1; 31 | int64 group_size = 2; 32 | } 33 | 34 | message StridedRangePartitionerArgs { 35 | int64 stride = 1; 36 | repeated int64 starts = 2; 37 | repeated int64 ends = 3; 38 | } 39 | 40 | message GatherPartitionerArgs { 41 | message GatherList { 42 | repeated int64 rows = 1 [packed=true]; 43 | } 44 | 45 | repeated GatherList groups = 1; 46 | } 47 | -------------------------------------------------------------------------------- /cmake/Modules/FindHwang.cmake: -------------------------------------------------------------------------------- 1 | # FindHwang.cmake 2 | 3 | set(HWANG_ROOT_DIR "" CACHE PATH "Folder contains Hwang") 4 | 5 | if (NOT "$ENV{Hwang_DIR}" STREQUAL "") 6 | set(HWANG_ROOT_DIR $ENV{Hwang_DIR} CACHE PATH "Folder contains Hwang" 7 | FORCE) 8 | elseif (Hwang_DIR) 9 | set(HWANG_ROOT_DIR ${Hwang_DIR} CACHE PATH "Folder contains Hwang" 10 | FORCE) 11 | endif() 12 | 13 | find_library(HWANG_LIBRARIES 14 | NAMES hwang 15 | HINTS ${HWANG_ROOT_DIR}/lib 16 | ) 17 | 18 | find_path(HWANG_INCLUDE_DIR 19 | NAMES hwang/common.h 20 | HINTS ${HWANG_ROOT_DIR}/include 21 | ) 22 | 23 | include(FindPackageHandleStandardArgs) 24 | find_package_handle_standard_args(Hwang DEFAULT_MSG 25 | HWANG_LIBRARIES 26 | HWANG_INCLUDE_DIR 27 | ) 28 | 29 | set(HWANG_LIBRARY ${HWANG_LIBRARIES}) 30 | set(HWANG_INCLUDE_DIRS ${HWANG_INCLUDE_DIR}) 31 | 32 | mark_as_advanced( 33 | HWANG_ROOT_DIR 34 | HWANG_LIBRARY 35 | HWANG_LIBRARIES 36 | HWANG_INCLUDE_DIR 37 | HWANG_INCLUDE_DIRS 38 | ) 39 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = scanner 8 | SOURCEDIR = . 9 | BUILDDIR = ../build/docs 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | serve: autobuild notifywait 23 | 24 | autobuild: 25 | sphinx-autobuild "$(SOURCEDIR)" $(BUILDDIR)/html -b html $(SPHINXOPTS) $(O) -j `nproc` -p 4567 -H 0.0.0.0 26 | 27 | notifywait: 28 | inotifywait -e close_write,moved_to,create -mr . --format '%f' | while read FILE; do echo $${FILE}; done 29 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | Scanner has three main components to its API: 5 | 6 | * The :ref:`Scanner Python API `, for defining/executing computation graphs and Python kernels 7 | * The :ref:`Scannertools API `, a standard library of pre-made kernels 8 | * The `Scanner C++ API `_, for defining C++ kernels 9 | 10 | scannerpy - the main scanner API 11 | -------------------------------- 12 | 13 | * :any:`scannerpy.client`: entrypoint for running computation graphs, similar to TensorFlow Session 14 | * :any:`scannerpy.kernel`: defining custom Python kernels 15 | * :any:`scannerpy.storage`: defining custom inputs/outputs to Scanner graphs 16 | * :any:`scannerpy.kube`: Kubernetes API 17 | * :any:`scannerpy.profiler`: handle to profiling data output by Scanner 18 | 19 | scannertools - the Scanner standard library 20 | ------------------------------------------- 21 | 22 | .. toctree:: 23 | :maxdepth: 3 24 | 25 | api/scannertools 26 | -------------------------------------------------------------------------------- /scanner/util/glog.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "glog/logging.h" 17 | 18 | #include 19 | 20 | namespace scanner { 21 | namespace { 22 | std::atomic_flag glog_initialized; 23 | } 24 | 25 | void init_glog(const char* prog_name) { 26 | if (!glog_initialized.test_and_set()) { 27 | google::InitGoogleLogging(prog_name); 28 | } 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /examples/tutorials/resize_op/Makefile: -------------------------------------------------------------------------------- 1 | # Scanner outputs build flags that add Scanner to your library and include paths 2 | # as well as cover common flags for building a shared library. 3 | SCANNER_CFLAGS = `python3 -c "import scannerpy.build_flags as b; b.print_compile_flags()"` 4 | SCANNER_LD_FLAGS = `python3 -c "import scannerpy.build_flags as b; b.print_link_flags()"` 5 | 6 | # Change this to wherever you installed deps.sh to 7 | INSTALL_DIR=../../../thirdparty/install 8 | 9 | all: build/libresize_op.so 10 | 11 | clean: 12 | rm -rf build/* 13 | 14 | # protoc generates the Python and C++ bindings for the ResizeArgs class. 15 | build/resize.pb.cc: resize.proto 16 | protoc resize.proto --python_out=build --cpp_out=build 17 | 18 | # g++ builds the op library 19 | build/libresize_op.so: build/resize.pb.cc resize_op.cpp 20 | g++ -shared -fPIC $^ -o $@ ${SCANNER_CFLAGS} ${SCANNER_LD_FLAGS} \ 21 | -I ${INSTALL_DIR}/include -I build \ 22 | -L ${INSTALL_DIR}/lib -lprotobuf -lglog -lopencv_core -lopencv_imgproc 23 | 24 | .PHONY: clean 25 | -------------------------------------------------------------------------------- /cmake/Modules/FindHalide.cmake: -------------------------------------------------------------------------------- 1 | # FindHalide.cmake 2 | # ... shamelessly based on FindJeMalloc.cmake 3 | 4 | set(HALIDE_ROOT_DIR "" CACHE PATH "Folder contains Halide") 5 | 6 | if (NOT "$ENV{Halide_DIR}" STREQUAL "") 7 | set(HALIDE_ROOT_DIR $ENV{Halide_DIR} CACHE PATH "Folder contains Halide" 8 | FORCE) 9 | elseif (Halide_DIR) 10 | set(HALIDE_ROOT_DIR ${Halide_DIR} CACHE PATH "Folder contains Halide" 11 | FORCE) 12 | endif() 13 | 14 | find_library(HALIDE_LIBRARIES 15 | NAMES Halide 16 | HINTS ${HALIDE_ROOT_DIR}/lib 17 | ) 18 | 19 | find_path(HALIDE_INCLUDE_DIR 20 | NAMES Halide.h HalideRuntime.h 21 | HINTS ${HALIDE_ROOT_DIR}/include 22 | ) 23 | 24 | include(FindPackageHandleStandardArgs) 25 | find_package_handle_standard_args(Halide DEFAULT_MSG 26 | HALIDE_LIBRARIES 27 | HALIDE_INCLUDE_DIR 28 | ) 29 | 30 | set(HALIDE_LIBRARY ${HALIDE_LIBRARIES}) 31 | set(HALIDE_INCLUDE_DIRS ${HALIDE_INCLUDE_DIR}) 32 | 33 | mark_as_advanced( 34 | HALIDE_ROOT_DIR 35 | HALIDE_LIBRARY 36 | HALIDE_LIBRARIES 37 | HALIDE_INCLUDE_DIR 38 | HALIDE_INCLUDE_DIRS 39 | ) 40 | -------------------------------------------------------------------------------- /docker/Dockerfile.scanner: -------------------------------------------------------------------------------- 1 | ARG tag=gpu 2 | FROM scannerresearch/scanner-base:ubuntu16.04-${tag} 3 | MAINTAINER Will Crichton "wcrichto@cs.stanford.edu" 4 | ARG cores=1 5 | ARG gpu=ON 6 | ARG deps_opt='' 7 | 8 | ADD . /opt/scanner 9 | WORKDIR /opt/scanner 10 | ENV Caffe_DIR /usr/local 11 | ENV LD_LIBRARY_PATH \ 12 | "/usr/lib/x86_64-linux-gnu:/usr/local/cuda/lib64:$LD_LIBRARY_PATH:/usr/local/cuda/lib64/stubs" 13 | ENV PKG_CONFIG_PATH "/usr/local/lib/pkgconfig:$PKG_CONFIG_PATH" 14 | RUN cd /opt/scanner && \ 15 | (if [ "${gpu}" = "ON" ]; then \ 16 | bash deps.sh -g --install-none --prefix /usr/local ${deps_opt}; \ 17 | else \ 18 | bash deps.sh -ng --install-none --prefix /usr/local ${deps_opt}; \ 19 | fi) && \ 20 | mkdir build && cd build && \ 21 | cmake -D BUILD_TESTS=ON \ 22 | -D BUILD_CUDA=${gpu} \ 23 | -D CMAKE_BUILD_TYPE=RelWithDebinfo \ 24 | .. && \ 25 | cd .. && \ 26 | (yes | pip3 uninstall grpcio protobuf) && \ 27 | bash ./build.sh && \ 28 | ldconfig 29 | 30 | ENV LC_ALL C.UTF-8 31 | ENV LANG C.UTF-8 32 | -------------------------------------------------------------------------------- /cmake/Modules/FindEigen.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Eigen 2 | # 3 | # The following variables are optionally searched for defaults 4 | # EIGEN_ROOT_DIR: Base directory where all Eigen components are found 5 | # 6 | # The following are set after configuration is done: 7 | # EIGEN_FOUND 8 | # EIGEN_INCLUDE_DIRS 9 | 10 | include(FindPackageHandleStandardArgs) 11 | 12 | set(EIGEN_ROOT_DIR "" CACHE PATH "Folder contains Eigen") 13 | 14 | if (NOT "$ENV{Eigen_DIR}" STREQUAL "") 15 | set(EIGEN_ROOT_DIR $ENV{Eigen_DIR} CACHE PATH "Folder contains Eigen" FORCE) 16 | endif() 17 | 18 | # We are testing only a couple of files in the include directories 19 | if(WIN32) 20 | find_path(EIGEN_INCLUDE_DIR Eigen/Core 21 | PATHS ${EIGEN_ROOT_DIR}/src/windows 22 | PATH_SUFFIXES eigen3) 23 | else() 24 | find_path(EIGEN_INCLUDE_DIR Eigen/Core 25 | PATHS ${EIGEN_ROOT_DIR}/include 26 | PATH_SUFFIXES eigen3) 27 | 28 | endif() 29 | 30 | find_package_handle_standard_args(EIGEN DEFAULT_MSG EIGEN_INCLUDE_DIR) 31 | 32 | if(EIGEN_FOUND) 33 | set(EIGEN_INCLUDE_DIRS ${EIGEN_INCLUDE_DIR}) 34 | endif() 35 | -------------------------------------------------------------------------------- /examples/tutorials/resize_op/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # To build your custom op, you can either use our CMake convenience methods 2 | # or do it the slightly harder way in normal Make (see the Makefile). 3 | 4 | cmake_minimum_required(VERSION 3.2.0 FATAL_ERROR) 5 | 6 | execute_process( 7 | OUTPUT_VARIABLE SCANNER_CMAKE_PATH 8 | COMMAND python3 -c "import scannerpy.build_flags as b; b.print_cmake()") 9 | include(${SCANNER_CMAKE_PATH}) 10 | 11 | # build_op will create a shared library called lib${LIB_NAME}.so that builds 12 | # from all of the CPP_SRCS. You can also optionally specify a PROTO_SRC that 13 | # points to a Protobuf file and will generate the C++ and Python bindings. 14 | build_op( 15 | LIB_NAME resize_op 16 | CPP_SRCS resize_op.cpp 17 | PROTO_SRC resize.proto) 18 | 19 | # The library specified in build_op is a normal CMake target, so you can use all 20 | # the normal CMake functions with it. 21 | find_package(OpenCV REQUIRED COMPONENTS core imgproc) 22 | target_include_directories(resize_op PUBLIC ${OpenCV_INCLUDE_DIRS}) 23 | target_link_libraries(resize_op PUBLIC ${OpenCV_LIBRARIES}) 24 | -------------------------------------------------------------------------------- /scanner/engine/slice_op.cpp: -------------------------------------------------------------------------------- 1 | #include "scanner/api/kernel.h" 2 | #include "scanner/api/op.h" 3 | #include "scanner/util/memory.h" 4 | 5 | namespace scanner { 6 | 7 | // Dummy Kernel 8 | class SliceKernel : public BatchedKernel { 9 | public: 10 | SliceKernel(const KernelConfig& config) 11 | : BatchedKernel(config) {} 12 | 13 | void execute(const BatchedElements& input_columns, 14 | BatchedElements& output_columns) override { 15 | // No implementation 16 | } 17 | }; 18 | 19 | 20 | // Reserve Op name as builtin 21 | REGISTER_OP(Slice).input("col").output("out"); 22 | 23 | REGISTER_KERNEL(Slice, SliceKernel).device(DeviceType::CPU).num_devices(1); 24 | 25 | REGISTER_KERNEL(Slice, SliceKernel).device(DeviceType::GPU).num_devices(1); 26 | 27 | 28 | REGISTER_OP(SliceFrame).frame_input("col").frame_output("out"); 29 | 30 | REGISTER_KERNEL(SliceFrame, SliceKernel) 31 | .device(DeviceType::CPU) 32 | .batch() 33 | .num_devices(1); 34 | 35 | REGISTER_KERNEL(SliceFrame, SliceKernel) 36 | .device(DeviceType::GPU) 37 | .batch() 38 | .num_devices(1); 39 | 40 | } 41 | -------------------------------------------------------------------------------- /scanner/video/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SOURCE_FILES 2 | h264_byte_stream_index_creator.cpp 3 | decoder_automata.cpp 4 | video_decoder.cpp 5 | video_encoder.cpp) 6 | 7 | if (BUILD_CUDA) 8 | add_definitions(-DHAVE_NVIDIA_VIDEO_HARDWARE) 9 | list(APPEND SOURCE_FILES 10 | nvidia/nvidia_video_decoder.cpp) 11 | endif() 12 | 13 | if (MFX_FOUND) 14 | add_definitions(-DHAVE_INTEL_VIDEO_HARDWARE) 15 | list(APPEND SOURCE_FILES 16 | intel/intel_video_decoder.cpp) 17 | endif() 18 | 19 | if (NO_FFMPEG STREQUAL "false") 20 | list(APPEND SOURCE_FILES 21 | software/software_video_decoder.cpp 22 | software/software_video_encoder.cpp) 23 | endif() 24 | 25 | add_library(video OBJECT 26 | ${SOURCE_FILES}) 27 | 28 | set_source_files_properties(${PROTO_SRCS} ${GRPC_PROTO_SRCS} PROPERTIES 29 | GENERATED TRUE) 30 | 31 | # TODO(apoms): fix since change in decoder API 32 | # add_executable(DecoderAutomataTest decoder_automata_test.cpp) 33 | # target_link_libraries(DecoderAutomataTest 34 | # ${GTEST_LIBRARIES} ${GTEST_LIB_MAIN} 35 | # scanner) 36 | # add_test(DecoderAutomataTest DecoderAutomataTest) 37 | -------------------------------------------------------------------------------- /scanner/engine/sample_op.cpp: -------------------------------------------------------------------------------- 1 | #include "scanner/api/kernel.h" 2 | #include "scanner/api/op.h" 3 | #include "scanner/util/memory.h" 4 | 5 | namespace scanner { 6 | 7 | // Dummy Kernel 8 | class SampleKernel : public BatchedKernel { 9 | public: 10 | SampleKernel(const KernelConfig& config) 11 | : BatchedKernel(config) {} 12 | 13 | void execute(const BatchedElements& input_columns, 14 | BatchedElements& output_columns) override { 15 | // No implementation 16 | } 17 | }; 18 | 19 | 20 | // Reserve Op name as builtin 21 | REGISTER_OP(Sample).input("col").output("out"); 22 | 23 | REGISTER_KERNEL(Sample, SampleKernel).device(DeviceType::CPU).num_devices(1); 24 | 25 | REGISTER_KERNEL(Sample, SampleKernel).device(DeviceType::GPU).num_devices(1); 26 | 27 | 28 | REGISTER_OP(SampleFrame).frame_input("col").frame_output("out"); 29 | 30 | REGISTER_KERNEL(SampleFrame, SampleKernel) 31 | .device(DeviceType::CPU) 32 | .batch() 33 | .num_devices(1); 34 | 35 | REGISTER_KERNEL(SampleFrame, SampleKernel) 36 | .device(DeviceType::GPU) 37 | .batch() 38 | .num_devices(1); 39 | 40 | } 41 | -------------------------------------------------------------------------------- /scanner/engine/unslice_op.cpp: -------------------------------------------------------------------------------- 1 | #include "scanner/api/kernel.h" 2 | #include "scanner/api/op.h" 3 | #include "scanner/util/memory.h" 4 | 5 | namespace scanner { 6 | 7 | // Dummy Kernel 8 | class UnsliceKernel : public BatchedKernel { 9 | public: 10 | UnsliceKernel(const KernelConfig& config) 11 | : BatchedKernel(config) {} 12 | 13 | void execute(const BatchedElements& input_columns, 14 | BatchedElements& output_columns) override { 15 | // No implementation 16 | } 17 | }; 18 | 19 | 20 | // Reserve Op name as builtin 21 | REGISTER_OP(Unslice).input("col").output("out"); 22 | 23 | REGISTER_KERNEL(Unslice, UnsliceKernel).device(DeviceType::CPU).num_devices(1); 24 | 25 | REGISTER_KERNEL(Unslice, UnsliceKernel).device(DeviceType::GPU).num_devices(1); 26 | 27 | 28 | REGISTER_OP(UnsliceFrame).frame_input("col").frame_output("out"); 29 | 30 | REGISTER_KERNEL(UnsliceFrame, UnsliceKernel) 31 | .device(DeviceType::CPU) 32 | .batch() 33 | .num_devices(1); 34 | 35 | REGISTER_KERNEL(UnsliceFrame, UnsliceKernel) 36 | .device(DeviceType::GPU) 37 | .batch() 38 | .num_devices(1); 39 | 40 | } 41 | -------------------------------------------------------------------------------- /examples/how-tos/halide/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.2.0 FATAL_ERROR) 2 | 3 | # Uncomment the line below and change the Scanner path to the repo you cloned: 4 | # set(SCANNER_PATH /path/to/scanner) 5 | if(NOT SCANNER_PATH) 6 | message(FATAL_ERROR "You need to update the SCANNER_PATH in halide/CMakeLists.txt first.") 7 | endif() 8 | include(${SCANNER_PATH}/cmake/Util/Op.cmake) 9 | 10 | find_package(Halide REQUIRED) 11 | include(${SCANNER_PATH}/cmake/Util/HalideGenerator.cmake) 12 | 13 | halide_add_generator(halide_resize.generator SRCS halide_resize.cpp) 14 | halide_add_aot_library(halide_resize 15 | GENERATOR_TARGET halide_resize.generator 16 | GENERATOR_ARGS target=cuda) 17 | 18 | build_op( 19 | LIB_NAME resize 20 | CPP_SRCS resize_op.cpp 21 | PROTO_SRC resize.proto) 22 | target_compile_definitions(resize PUBLIC -DHAVE_CUDA) 23 | 24 | add_dependencies(resize halide_resize.exec_generator) 25 | target_include_directories(resize PUBLIC "${HALIDE_INCLUDE_DIR}") 26 | target_include_directories(resize PUBLIC "${PROJECT_BINARY_DIR}/generator_genfiles") 27 | halide_add_aot_library_dependency(resize halide_resize) 28 | -------------------------------------------------------------------------------- /scanner/engine/space_op.cpp: -------------------------------------------------------------------------------- 1 | #include "scanner/api/kernel.h" 2 | #include "scanner/api/op.h" 3 | #include "scanner/util/memory.h" 4 | 5 | namespace scanner { 6 | 7 | // Dummy Kernel 8 | class SpaceKernel : public BatchedKernel { 9 | public: 10 | SpaceKernel(const KernelConfig& config) 11 | : BatchedKernel(config) {} 12 | 13 | void execute(const BatchedElements& input_columns, 14 | BatchedElements& output_columns) override { 15 | // No implementation 16 | } 17 | }; 18 | 19 | 20 | // Reserve Op name as builtin 21 | REGISTER_OP(Space).input("col").output("out"); 22 | 23 | REGISTER_KERNEL(Space, SpaceKernel) 24 | .device(DeviceType::CPU) 25 | .batch() 26 | .num_devices(1); 27 | 28 | REGISTER_KERNEL(Space, SpaceKernel) 29 | .device(DeviceType::GPU) 30 | .batch() 31 | .num_devices(1); 32 | 33 | REGISTER_OP(SpaceFrame).frame_input("col").frame_output("out"); 34 | 35 | REGISTER_KERNEL(spaceFrame, SpaceKernel) 36 | .device(DeviceType::CPU) 37 | .batch() 38 | .num_devices(1); 39 | 40 | REGISTER_KERNEL(SpaceFrame, SpaceKernel) 41 | .device(DeviceType::GPU) 42 | .batch() 43 | .num_devices(1); 44 | } 45 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/master.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: scanner-master 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: scanner-master 11 | spec: 12 | containers: 13 | - name: scanner-master 14 | image: gcr.io/YOUR_PROJECT_ID/scanner-master:cpu 15 | imagePullPolicy: Always 16 | env: 17 | - name: AWS_ACCESS_KEY_ID 18 | valueFrom: 19 | secretKeyRef: 20 | name: aws-storage-key 21 | key: AWS_ACCESS_KEY_ID 22 | - name: AWS_SECRET_ACCESS_KEY 23 | valueFrom: 24 | secretKeyRef: 25 | name: aws-storage-key 26 | key: AWS_SECRET_ACCESS_KEY 27 | - name: GLOG_logtostderr 28 | value: '1' 29 | - name: GLOG_minloglevel 30 | value: '0' 31 | - name: GLOG_v 32 | value: '2' 33 | ports: 34 | - containerPort: 8080 35 | nodeSelector: 36 | cloud.google.com/gke-nodepool: default-pool 37 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/worker.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: scanner-worker 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: scanner-worker 11 | spec: 12 | containers: 13 | - name: scanner-worker 14 | image: gcr.io/YOUR_PROJECT_ID/scanner-worker:cpu 15 | imagePullPolicy: Always 16 | env: 17 | - name: AWS_ACCESS_KEY_ID 18 | valueFrom: 19 | secretKeyRef: 20 | name: aws-storage-key 21 | key: AWS_ACCESS_KEY_ID 22 | - name: AWS_SECRET_ACCESS_KEY 23 | valueFrom: 24 | secretKeyRef: 25 | name: aws-storage-key 26 | key: AWS_SECRET_ACCESS_KEY 27 | - name: GLOG_logtostderr 28 | value: '1' 29 | - name: GLOG_minloglevel 30 | value: '0' 31 | - name: GLOG_v 32 | value: '2' 33 | resources: 34 | requests: 35 | cpu: 1.1 36 | nodeSelector: 37 | cloud.google.com/gke-nodepool: workers 38 | -------------------------------------------------------------------------------- /python/scannerpy/build_flags.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import sys 3 | 4 | SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) 5 | 6 | def get_grpc_version(): 7 | with open(os.path.abspath(os.path.join(SCRIPT_DIR, 'cmake', 'grpc_version.txt')), 'r') as f: 8 | return f.read() 9 | 10 | def get_include(): 11 | return os.path.abspath(os.path.join(SCRIPT_DIR, 'include')) 12 | 13 | def print_include(): 14 | sys.stdout.write(get_include()) 15 | 16 | def get_lib(): 17 | return os.path.abspath(os.path.join(SCRIPT_DIR, 'lib')) 18 | 19 | def print_lib(): 20 | sys.stdout.write(get_lib()) 21 | 22 | def get_cmake(): 23 | return os.path.abspath(os.path.join(SCRIPT_DIR, 'cmake', 'Op.cmake')) 24 | 25 | def print_cmake(): 26 | sys.stdout.write(get_cmake()) 27 | 28 | def get_compile_flags(): 29 | return ( 30 | '-std=c++14 -I{include}'.format( 31 | include=get_include())) 32 | 33 | def get_link_flags(): 34 | return ( 35 | '-L{libdir} -lscanner'.format( 36 | libdir=get_lib())) 37 | 38 | def print_compile_flags(): 39 | sys.stdout.write(get_compile_flags()) 40 | 41 | def print_link_flags(): 42 | sys.stdout.write(get_link_flags()) 43 | -------------------------------------------------------------------------------- /scanner/engine/python_kernel.h: -------------------------------------------------------------------------------- 1 | #include "scanner/api/kernel.h" 2 | #include "scanner/api/op.h" 3 | #include "scanner/util/memory.h" 4 | #include "scanner/metadata.pb.h" 5 | 6 | namespace scanner { 7 | 8 | class PythonKernel : public StenciledBatchedKernel { 9 | public: 10 | PythonKernel(const KernelConfig& config, 11 | const std::string& op_name, 12 | const std::string& kernel_code, 13 | const bool can_batch, 14 | const bool con_stencil); 15 | 16 | ~PythonKernel(); 17 | 18 | void new_stream(const std::vector& args) override; 19 | 20 | void execute(const StenciledBatchedElements& input_columns, 21 | BatchedElements& output_columns) override; 22 | 23 | void reset() override; 24 | 25 | void fetch_resources(proto::Result* result) override; 26 | 27 | void setup_with_resources(proto::Result* result) override; 28 | 29 | private: 30 | KernelConfig config_; 31 | DeviceHandle device_; 32 | bool can_batch_; 33 | bool can_stencil_; 34 | std::string op_name_; 35 | std::string process_name_; 36 | std::string send_pipe_name_; 37 | std::string recv_pipe_name_; 38 | std::string kernel_name_; 39 | }; 40 | 41 | } 42 | -------------------------------------------------------------------------------- /cmake/Modules/FindStruck.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Struck 2 | # 3 | # The following variables are optionally searched for defaults 4 | # STRUCK_ROOT_DIR: Base directory where all Struck components are found 5 | # 6 | # The following are set after configuration is done: 7 | # STRUCK_FOUND 8 | # STRUCK_INCLUDE_DIRS 9 | # STRUCK_LIBRARIES 10 | # STRUCK_LIBRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(STRUCK_ROOT_DIR "" CACHE PATH "Folder contains Struck") 15 | 16 | if (NOT "$ENV{Struck_DIR}" STREQUAL "") 17 | set(STRUCK_ROOT_DIR $ENV{Struck_DIR}) 18 | endif() 19 | 20 | # We are testing only a couple of files in the include directories 21 | if(WIN32) 22 | find_path(STRUCK_INCLUDE_DIR struck/tracker.h 23 | PATHS ${STRUCK_ROOT_DIR}/src/windows) 24 | else() 25 | find_path(STRUCK_INCLUDE_DIR struck/tracker.h 26 | PATHS ${STRUCK_ROOT_DIR}/include) 27 | endif() 28 | 29 | find_library(STRUCK_LIBRARY caffe PATHS ${STRUCK_ROOT_DIR}/lib) 30 | 31 | find_package_handle_standard_args(STRUCK DEFAULT_MSG 32 | STRUCK_INCLUDE_DIR STRUCK_LIBRARY) 33 | 34 | if(STRUCK_FOUND) 35 | set(STRUCK_INCLUDE_DIRS ${STRUCK_INCLUDE_DIR}) 36 | set(STRUCK_LIBRARIES ${STRUCK_LIBRARY}) 37 | endif() 38 | -------------------------------------------------------------------------------- /scanner/util/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | set(SOURCE_FILES 16 | common.cpp 17 | memory.cpp 18 | profiler.cpp 19 | fs.cpp 20 | bbox.cpp 21 | glog.cpp 22 | lodepng.cpp 23 | image_encoder.cpp) 24 | 25 | if (NO_FFMPEG STREQUAL "false") 26 | list(APPEND SOURCE_FILES 27 | ffmpeg.cpp) 28 | endif() 29 | 30 | add_library(util OBJECT 31 | ${SOURCE_FILES}) 32 | 33 | if (BUILD_CUDA) 34 | cuda_add_library(util_cuda 35 | image.cu) 36 | install(TARGETS util_cuda 37 | EXPORT ScannerTarget 38 | ARCHIVE DESTINATION lib 39 | LIBRARY DESTINATION lib) 40 | endif() 41 | -------------------------------------------------------------------------------- /cmake/Modules/FindGipuma.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Gipuma 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GIPUMA_ROOT_DIR: Base directory where all Gipuma components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GIPUMA_FOUND 8 | # GIPUMA_INCLUDE_DIRS 9 | 10 | include(FindPackageHandleStandardArgs) 11 | 12 | set(GIPUMA_ROOT_DIR "" CACHE PATH "Folder contains Gipuma") 13 | 14 | if (NOT "$ENV{Gipuma_DIR}" STREQUAL "") 15 | set(GIPUMA_ROOT_DIR $ENV{Gipuma_DIR} CACHE PATH "Folder contains Gipuma" FORCE) 16 | endif() 17 | 18 | # We are testing only a couple of files in the include directories 19 | if(WIN32) 20 | find_path(GIPUMA_INCLUDE_DIR gipuma.h 21 | PATHS ${GIPUMA_ROOT_DIR}/src/windows 22 | PATH_SUFFIXES gipuma) 23 | else() 24 | find_path(GIPUMA_INCLUDE_DIR gipuma.h 25 | PATHS ${GIPUMA_ROOT_DIR}/include 26 | PATH_SUFFIXES gipuma) 27 | 28 | endif() 29 | 30 | find_library(GIPUMA_LIBRARY gipuma PATHS ${GIPUMA_ROOT_DIR}/lib) 31 | 32 | find_package_handle_standard_args(GIPUMA DEFAULT_MSG GIPUMA_INCLUDE_DIR 33 | GIPUMA_LIBRARY) 34 | 35 | if(GIPUMA_FOUND) 36 | set(GIPUMA_INCLUDE_DIRS ${GIPUMA_INCLUDE_DIR}) 37 | set(GIPUMA_LIBRARIES ${GIPUMA_LIBRARY}) 38 | endif() 39 | -------------------------------------------------------------------------------- /examples/util.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | try: 4 | import requests 5 | except ImportError: 6 | print( 7 | 'You need to install requests to run this. Try running:\npip install requests' 8 | ) 9 | exit() 10 | 11 | VID_URL = "https://storage.googleapis.com/scanner-data/public/sample-clip.mp4" 12 | VID_PATH = '/tmp/example.mp4' 13 | 14 | IMG_PATH = '/tmp/example.mp4' 15 | 16 | 17 | def download_video(): 18 | if not os.path.isfile(VID_PATH): 19 | with open(VID_PATH, 'wb') as f: 20 | resp = requests.get(VID_URL, stream=True) 21 | assert resp.ok 22 | for block in resp.iter_content(1024): 23 | f.write(block) 24 | f.flush() 25 | return VID_PATH 26 | 27 | 28 | def download_images(): 29 | img_template = ( 30 | 'https://storage.googleapis.com/scanner-data/public/sample-frame-{:d}.jpg') 31 | output_template = 'sample-frame-{:d}.jpg' 32 | 33 | for i in range(1, 4): 34 | with open(output_template.format(i), 'wb') as f: 35 | resp = requests.get(img_template.format(i), stream=True) 36 | assert resp.ok 37 | for block in resp.iter_content(1024): 38 | f.write(block) 39 | f.flush() 40 | -------------------------------------------------------------------------------- /scanner/engine/op_registry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/op.h" 19 | #include "scanner/engine/op_info.h" 20 | 21 | #include "scanner/util/common.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class OpRegistry { 29 | public: 30 | Result add_op(const std::string& name, OpInfo* info); 31 | 32 | OpInfo* get_op_info(const std::string& name) const; 33 | 34 | bool has_op(const std::string& name) const; 35 | 36 | private: 37 | std::map ops_; 38 | }; 39 | 40 | OpRegistry* get_op_registry(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /scanner/engine/sampler_registry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/op.h" 19 | #include "scanner/engine/op_info.h" 20 | 21 | #include "scanner/util/common.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class SamplerRegistry { 29 | public: 30 | void add_sampler(const std::string& name, Sampler* info); 31 | 32 | OpInfo* get_op_info(const std::string& name) const; 33 | 34 | bool has_op(const std::string& name) const; 35 | 36 | private: 37 | std::map ops_; 38 | }; 39 | 40 | OpRegistry* get_op_registry(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /scanner/engine/sink_registry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/sink.h" 19 | #include "scanner/engine/sink_factory.h" 20 | 21 | #include "scanner/util/common.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class SinkRegistry { 29 | public: 30 | Result add_sink(const std::string& name, SinkFactory* factory); 31 | 32 | bool has_sink(const std::string& name); 33 | 34 | SinkFactory* get_sink(const std::string& name); 35 | 36 | private: 37 | std::map factories_; 38 | }; 39 | 40 | SinkRegistry* get_sink_registry(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /scanner/engine/source_registry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/source.h" 19 | #include "scanner/engine/source_factory.h" 20 | 21 | #include "scanner/util/common.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class SourceRegistry { 29 | public: 30 | Result add_source(const std::string& name, SourceFactory* factory); 31 | 32 | bool has_source(const std::string& name); 33 | 34 | SourceFactory* get_source(const std::string& name); 35 | 36 | private: 37 | std::map factories_; 38 | }; 39 | 40 | SourceRegistry* get_source_registry(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /cmake/Modules/FindTinyToml.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find tinytoml library 2 | # 3 | # The following variables are optionally searched for defaults 4 | # TINYTOML_ROOT_DIR: Base directory where all Storage components are found 5 | # 6 | # The following are set after configuration is done: 7 | # TINYTOML_FOUND 8 | # TINYTOML_INCLUDE_DIRS 9 | 10 | include(FindPackageHandleStandardArgs) 11 | 12 | set(TINYTOML_ROOT_DIR "" CACHE PATH "Folder contains TinyToml") 13 | 14 | if (NOT "$ENV{TinyToml_DIR}" STREQUAL "") 15 | set(TINYTOML_ROOT_DIR $ENV{TinyToml_DIR} CACHE PATH "Folder contains TinyToml" 16 | FORCE) 17 | elseif(TinyToml_DIR) 18 | set(TINYTOML_ROOT_DIR ${TinyToml_DIR} CACHE PATH "Folder contains TinyToml" 19 | FORCE) 20 | endif() 21 | 22 | # We are testing only a couple of files in the include directories 23 | if(WIN32) 24 | find_path(TINYTOML_INCLUDE_DIR toml/toml.h 25 | PATHS ${TINYTOML_ROOT_DIR}/src/windows) 26 | else() 27 | find_path(TINYTOML_INCLUDE_DIR toml/toml.h 28 | PATHS 29 | ${TINYTOML_ROOT_DIR}/include 30 | ${CMAKE_SOURCE_DIR}/thirdparty/build/bin/tinytoml/include) 31 | endif() 32 | 33 | find_package_handle_standard_args(TINYTOML DEFAULT_MSG 34 | TINYTOML_INCLUDE_DIR) 35 | 36 | if(TINYTOML_FOUND) 37 | set(TINYTOML_INCLUDE_DIRS ${TINYTOML_INCLUDE_DIR}) 38 | endif() 39 | -------------------------------------------------------------------------------- /docs/publications.rst: -------------------------------------------------------------------------------- 1 | Publications 2 | ===== 3 | 4 | Scanner is an active research project, part of a collaboration between Carnegie 5 | Mellon and Stanford University. Please contact 6 | `Alex Poms `_ and 7 | `Will Crichton `_ with questions. 8 | 9 | 10 | Paper citation 11 | -------------- 12 | Scanner was published at SIGGRAPH 2018 as `"Scanner: Efficient Video Analysis at Scale `__ by Poms, Crichton, Hanrahan, and Fatahalian. If you use Scanner in your research, we'd appreciate it if you cite the paper. 13 | 14 | .. code-block:: bibtex 15 | 16 | @article{Poms:2018:Scanner, 17 | author = {Poms, Alex and Crichton, Will and Hanrahan, Pat and Fatahalian, Kayvon}, 18 | title = {Scanner: Efficient Video Analysis at Scale}, 19 | journal = {ACM Trans. Graph.}, 20 | issue_date = {August 2018}, 21 | volume = {37}, 22 | number = {4}, 23 | month = jul, 24 | year = {2018}, 25 | issn = {0730-0301}, 26 | pages = {138:1--138:13}, 27 | articleno = {138}, 28 | numpages = {13}, 29 | url = {http://doi.acm.org/10.1145/3197517.3201394}, 30 | doi = {10.1145/3197517.3201394}, 31 | acmid = {3201394}, 32 | publisher = {ACM}, 33 | address = {New York, NY, USA}, 34 | } 35 | -------------------------------------------------------------------------------- /scripts/startup_node.sh: -------------------------------------------------------------------------------- 1 | if [ -z "$1" ] 2 | then 3 | echo "Usage: startup_node.sh " 4 | exit 5 | fi 6 | 7 | num_gpus=$2 8 | if [ -z "$2" ] 9 | then 10 | num_gpus=1 11 | echo "num_gpus not specified. Defaulting to 1" 12 | fi 13 | 14 | gcloud compute --project "visualdb-1046" disks create "hackinstance-$1" --size "20" --zone "us-east1-d" --source-snapshot "hacksnapshot" --type "pd-standard" 15 | gcloud beta compute --project "visualdb-1046" instances create "hackinstance-$1" --zone "us-east1-d" --machine-type "n1-standard-4" --network "default" --metadata "ssh-keys=ubuntu:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXJ3JrrWKc0TAM5KBXYmuTVAG06DyA8F1hHbqUULCNp767bDNN1dTF9zTo+ZDWdCuHm49XWrpRK552G8U0A55HvBEjOj4eEUSuAibd0uDAYMZr3dJNTzXNU/KfgnbJYGbRboBk3fu47D4bhKPmjX5ZDsSN++BuUYpf1bH829invPBzlGeBb/QRe3Jk9DMK/swIqFc4j6PWeOItj4/1flXFFruR/bT0p2/MIxTTAMAWlhHRYqhtia1YYMbfdv38eqZMH1GY+n7GQJTuKBDvz0qPxCus86xaE4vCawD+iQJFuD8XxppsHbc1+oCAmi5AtbUeHXjXirN95itMBi7S2evd ubuntu,node_id=$1" --maintenance-policy "TERMINATE" --service-account "50518136478-compute@developer.gserviceaccount.com" --scopes "https://www.googleapis.com/auth/cloud-platform" --accelerator type=nvidia-tesla-k80,count=$num_gpus --tags "http-server","https-server" --disk "name=hackinstance-$1,device-name=hackinstance-$1,mode=rw,boot=yes,auto-delete=yes" 16 | 17 | -------------------------------------------------------------------------------- /cmake/Modules/FindFolly.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Folly 2 | # 3 | # The following variables are optionally searched for defaults 4 | # FOLLY_ROOT_DIR: Base directory where all folly components are found 5 | # 6 | # The following are set after configuration is done: 7 | # FOLLY_FOUND 8 | # FOLLY_INCLUDE_DIRS 9 | # FOLLY_LIBRARIES 10 | # FOLLY_LIBRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(FOLLY_ROOT_DIR "" CACHE PATH "Folder contains Folly") 15 | 16 | if (NOT "$ENV{Folly_DIR}" STREQUAL "") 17 | set(FOLLY_ROOT_DIR $ENV{Folly_DIR}) 18 | endif() 19 | 20 | # We are testing only a couple of files in the include directories 21 | if(WIN32) 22 | find_path(FOLLY_INCLUDE_DIR folly/FBVector.h 23 | PATHS ${FOLLY_ROOT_DIR}/src/windows) 24 | else() 25 | find_path(FOLLY_INCLUDE_DIR folly/FBVector.h 26 | PATHS ${FOLLY_ROOT_DIR}/include) 27 | endif() 28 | 29 | find_library(FOLLY_LIBRARY folly 30 | PATHS ${FOLLY_ROOT_DIR}/lib) 31 | find_library(FOLLY_BENCHMARK_LIBRARY follybenchmark 32 | PATHS ${FOLLY_ROOT_DIR}/lib) 33 | 34 | find_package_handle_standard_args(FOLLY DEFAULT_MSG 35 | FOLLY_INCLUDE_DIR FOLLY_LIBRARY) 36 | 37 | if(FOLLY_FOUND) 38 | set(FOLLY_INCLUDE_DIRS ${FOLLY_INCLUDE_DIR}) 39 | set(FOLLY_LIBRARIES 40 | ${FOLLY_LIBRARY} 41 | ${FOLLY_BENCHMARK_LIBRARY}) 42 | endif() 43 | -------------------------------------------------------------------------------- /docker/ubuntu16.04/Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | # Scanner base CPU image for Ubuntu 16.04 2 | 3 | ARG base_tag 4 | FROM ${base_tag} 5 | MAINTAINER Will Crichton "wcrichto@cs.stanford.edu" 6 | ARG cores=1 7 | 8 | RUN bash ./deps.sh --root-install --install-all --without-openvino --prefix /usr/local && \ 9 | rm -rf /opt/scanner-base 10 | 11 | ENV PYTHONPATH /usr/local/python:${PYTHONPATH} 12 | ENV PYTHONPATH /usr/local/lib/python3.5/site-packages:${PYTHONPATH} 13 | ENV PYTHONPATH /usr/local/lib/python3.5/dist-packages:${PYTHONPATH} 14 | 15 | #ENV INTEL_OPENVINO_DIR /usr/local/intel/openvino_2019.3.376 16 | #ENV INTEL_CVSDK_DIR $INTEL_OPENVINO_DIR 17 | #ENV InferenceEngine_DIR $INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share 18 | #ENV IE_PLUGINS_PATH $INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/intel64 19 | #ENV HDDL_INSTALL_DIR $INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/hddl 20 | ENV Caffe_DIR /usr/local 21 | #ENV LD_LIBRARY_PATH \ 22 | # "$HDDL_INSTALL_DIR/lib:$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/gna/lib:$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/mkltiny_lnx/lib:$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/tbb/lib:$IE_PLUGINS_PATH:$LD_LIBRARY_PATH" 23 | #ENV PYTHONPATH /usr/local/intel/openvino_2019.3.376/python/python3.5:$PYTHONPATH 24 | 25 | WORKDIR / 26 | -------------------------------------------------------------------------------- /scanner/engine/enumerator_registry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/enumerator.h" 19 | #include "scanner/engine/enumerator_factory.h" 20 | 21 | #include "scanner/util/common.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class EnumeratorRegistry { 29 | public: 30 | void add_enumerator(const std::string& name, EnumeratorFactory* factory); 31 | 32 | bool has_enumerator(const std::string& name); 33 | 34 | EnumeratorFactory* get_enumerator(const std::string& name); 35 | 36 | private: 37 | std::map factories_; 38 | }; 39 | 40 | EnumeratorRegistry* get_enumerator_registry(); 41 | 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/master.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: scanner-master 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: scanner-master 11 | spec: 12 | containers: 13 | - name: scanner-master 14 | image: 15 | imagePullPolicy: Always 16 | env: 17 | - name: AWS_ACCESS_KEY_ID 18 | valueFrom: 19 | secretKeyRef: 20 | name: aws-storage-key 21 | key: AWS_ACCESS_KEY_ID 22 | - name: AWS_SECRET_ACCESS_KEY 23 | valueFrom: 24 | secretKeyRef: 25 | name: aws-storage-key 26 | key: AWS_SECRET_ACCESS_KEY 27 | - name: GLOG_logtostderr 28 | value: '1' 29 | - name: GLOG_minloglevel 30 | value: '0' 31 | - name: GLOG_v 32 | value: '2' 33 | resources: 34 | requests: 35 | cpu: 32.0 36 | ports: 37 | - containerPort: 8080 38 | volumeMounts: 39 | - name: scanner-config 40 | mountPath: /root/.scanner/ 41 | volumes: 42 | - name: scanner-config 43 | configMap: 44 | name: scanner-configmap 45 | -------------------------------------------------------------------------------- /scanner/util/image.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include 19 | 20 | #ifdef HAVE_CUDA 21 | #include 22 | #endif 23 | 24 | namespace scanner { 25 | 26 | using u8 = uint8_t; 27 | 28 | class InputFormat; 29 | 30 | #ifdef HAVE_CUDA 31 | cudaError_t convertNV12toRGBA(const u8* in, size_t in_pitch, u8* out, 32 | size_t out_pitch, int width, int height, 33 | cudaStream_t stream); 34 | 35 | cudaError_t convertRGBInterleavedToPlanar(const u8* in, size_t in_pitch, 36 | u8* out, size_t out_pitch, int width, 37 | int height, cudaStream_t stream); 38 | #endif 39 | } 40 | -------------------------------------------------------------------------------- /scanner/api/enumerator.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/api/enumerator.h" 17 | #include "scanner/engine/enumerator_factory.h" 18 | #include "scanner/engine/enumerator_registry.h" 19 | #include "scanner/util/memory.h" 20 | 21 | namespace scanner { 22 | namespace internal { 23 | 24 | EnumeratorRegistration::EnumeratorRegistration( 25 | const EnumeratorBuilder& builder) { 26 | const std::string& name = builder.name_; 27 | EnumeratorConstructor constructor = builder.constructor_; 28 | internal::EnumeratorFactory* factory = new internal::EnumeratorFactory( 29 | name, builder.protobuf_name_, constructor); 30 | internal::EnumeratorRegistry* registry = internal::get_enumerator_registry(); 31 | registry->add_enumerator(name, factory); 32 | } 33 | 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /scanner/engine/enumerator_registry.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/engine/enumerator_registry.h" 17 | 18 | namespace scanner { 19 | namespace internal { 20 | 21 | void EnumeratorRegistry::add_enumerator(const std::string& name, 22 | EnumeratorFactory* factory) { 23 | factories_.insert({name, factory}); 24 | } 25 | 26 | bool EnumeratorRegistry::has_enumerator(const std::string& name) { 27 | return factories_.count(name) > 0; 28 | } 29 | 30 | EnumeratorFactory* EnumeratorRegistry::get_enumerator(const std::string& name) { 31 | return factories_.at(name); 32 | } 33 | 34 | EnumeratorRegistry* get_enumerator_registry() { 35 | static EnumeratorRegistry* registry = new EnumeratorRegistry; 36 | return registry; 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /docs/guide/quickstart.rst: -------------------------------------------------------------------------------- 1 | .. _quickstart: 2 | 3 | Quickstart 4 | ========== 5 | 6 | If you want to try Scanner out on one of your own videos as quickly as possible, install `Docker `__ (if you have a GPU and you're running on Linux, you can also install `nvidia-docker `__ which provides GPU support inside Docker containers). Then run: 7 | 8 | .. code-block:: bash 9 | 10 | pip3 install --upgrade docker-compose 11 | wget https://raw.githubusercontent.com/scanner-research/scanner/master/docker/docker-compose.yml 12 | docker-compose pull cpu 13 | docker-compose run --service-ports cpu /bin/bash 14 | 15 | This will install docker-compose (a utility for managing docker containers), pull down the docker compose file for Scanner, and then attempt to start a container with Scanner installed. **NOTE:** The first time you run this code, it may require downloading a ~5GB Docker container for Scanner. Now you can run any of the example applications on your video: 16 | 17 | .. code-block:: bash 18 | 19 | cd /opt/scanner/examples/apps/walkthroughs 20 | wget https://storage.googleapis.com/scanner-data/public/sample-clip.mp4 21 | python3 grayscale_conversion.py 22 | 23 | For more information about installation options, check out :ref:`getting-started`. 24 | 25 | To learn more about how Scanner works, check out the walkthroughs :ref:`walkthrough`. 26 | -------------------------------------------------------------------------------- /examples/apps/detectron/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scannerresearch/scanner:gpu-9.1-cudnn7 2 | WORKDIR /opt/detectron 3 | 4 | RUN cd /opt/detectron && \ 5 | git clone --recursive https://github.com/pytorch/pytorch.git && \ 6 | cd pytorch && \ 7 | git submodule update --init && \ 8 | mkdir build && cd build && \ 9 | cmake .. -DCMAKE_DISABLE_FIND_PACKAGE_Eigen3=TRUE \ 10 | -DBUILD_CUSTOM_PROTOBUF=OFF \ 11 | -DBUILD_TEST=OFF \ 12 | -DPYTHON_INCLUDE_DIR=/usr/include/python3.5/ \ 13 | -DPYTHON_EXECUTABLE=/usr/bin/python3 \ 14 | -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so \ 15 | -DBUILD_CUSTOM_PROTOBUF=OFF && \ 16 | make install -j 17 | ENV PYTHONPATH /usr/local/lib/python3/dist-packages:${PYTHONPATH} 18 | 19 | RUN cd /opt/detectron && \ 20 | git clone https://github.com/cocodataset/cocoapi.git && \ 21 | cd cocoapi/PythonAPI && \ 22 | python3 setup.py build_ext install 23 | 24 | RUN pip3 install -y pyyaml urllib2 matplotlib 25 | 26 | RUN cd /opt/detectron && \ 27 | git clone https://github.com/facebookresearch/detectron && \ 28 | git fetch origin pull/110/head:py3 && \ 29 | git checkout py3 && \ 30 | ./python2_to_python3_conversion_automated.sh && \ 31 | sed -i '83s/ref_md5sum/ref_md5sum.decode("utf-8")/' ../lib/utils/io.py && \ 32 | cd detectron/lib && \ 33 | python3 setup.py develop --user 34 | -------------------------------------------------------------------------------- /thirdparty/resources/mkl/silent.cfg: -------------------------------------------------------------------------------- 1 | # Patterns used to check silent configuration file 2 | # 3 | # anythingpat - any string 4 | # filepat - the file location pattern (/file/location/to/license.lic) 5 | # lspat - the license server address pattern (0123@hostname) 6 | # snpat - the serial number pattern (ABCD-01234567) 7 | 8 | # Accept EULA, valid values are: {accept, decline} 9 | ACCEPT_EULA=accept 10 | 11 | # Optional error behavior, valid values are: {yes, no} 12 | CONTINUE_WITH_OPTIONAL_ERROR=yes 13 | 14 | # Install location, valid values are: {/opt/intel, filepat} 15 | PSET_INSTALL_DIR=/opt/intel 16 | 17 | # Continue with overwrite of existing installation directory, valid values are: {yes, no} 18 | CONTINUE_WITH_INSTALLDIR_OVERWRITE=yes 19 | 20 | # List of components to install, valid values are: {ALL, DEFAULTS, anythingpat} 21 | COMPONENTS=DEFAULTS 22 | 23 | # Installation mode, valid values are: {install, repair, uninstall} 24 | PSET_MODE=install 25 | 26 | # Directory for non-RPM database, valid values are: {filepat} 27 | #NONRPM_DB_DIR=filepat 28 | 29 | # Path to the cluster description file, valid values are: {filepat} 30 | #CLUSTER_INSTALL_MACHINES_FILE=filepat 31 | 32 | # Perform validation of digital signatures of RPM files, valid values are: {yes, no} 33 | SIGNING_ENABLED=yes 34 | 35 | # Select target architecture of your applications, valid values are: {IA32, INTEL64, ALL} 36 | ARCH_SELECTED=ALL 37 | 38 | -------------------------------------------------------------------------------- /scanner/engine/kernel_registry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/kernel.h" 19 | #include "scanner/engine/kernel_factory.h" 20 | 21 | #include "scanner/util/common.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class KernelRegistry { 29 | public: 30 | void add_kernel(const std::string& name, KernelFactory* factory); 31 | 32 | bool has_kernel(const std::string& name, DeviceType device_type); 33 | 34 | KernelFactory* get_kernel(const std::string& name, DeviceType device_type); 35 | 36 | protected: 37 | static std::string factory_name(const std::string& name, DeviceType type); 38 | 39 | private: 40 | std::map factories_; 41 | }; 42 | 43 | KernelRegistry* get_kernel_registry(); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/worker.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: scanner-worker 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: scanner-worker 11 | spec: 12 | containers: 13 | - name: scanner-worker 14 | image: 15 | imagePullPolicy: Always 16 | securityContext: 17 | privileged: true 18 | capabilities: 19 | add: 20 | - SYS_ADMIN 21 | env: 22 | - name: AWS_ACCESS_KEY_ID 23 | valueFrom: 24 | secretKeyRef: 25 | name: aws-storage-key 26 | key: AWS_ACCESS_KEY_ID 27 | - name: AWS_SECRET_ACCESS_KEY 28 | valueFrom: 29 | secretKeyRef: 30 | name: aws-storage-key 31 | key: AWS_SECRET_ACCESS_KEY 32 | - name: GLOG_logtostderr 33 | value: '1' 34 | - name: GLOG_minloglevel 35 | value: '0' 36 | - name: GLOG_v 37 | value: '2' 38 | resources: 39 | requests: 40 | cpu: 35.0 41 | volumeMounts: 42 | - name: scanner-config 43 | mountPath: /root/.scanner/ 44 | volumes: 45 | - name: scanner-config 46 | configMap: 47 | name: scanner-configmap 48 | -------------------------------------------------------------------------------- /examples/apps/gcp_kubernetes/example.py: -------------------------------------------------------------------------------- 1 | import scannerpy as sp 2 | import scannertools.imgproc 3 | 4 | import numpy as np 5 | import cv2 6 | import sys 7 | import os.path 8 | import subprocess as sub 9 | 10 | print('Finding master IP...') 11 | ip = sub.check_output( 12 | ''' 13 | kubectl get pods -l 'app=scanner-master' -o json | \ 14 | jq '.items[0].spec.nodeName' -r | \ 15 | xargs -I {} kubectl get nodes/{} -o json | \ 16 | jq '.status.addresses[] | select(.type == "ExternalIP") | .address' -r 17 | ''', 18 | shell=True).strip().decode('utf-8') 19 | 20 | port = sub.check_output( 21 | ''' 22 | kubectl get svc/scanner-master -o json | \ 23 | jq '.spec.ports[0].nodePort' -r 24 | ''', 25 | shell=True).strip().decode('utf-8') 26 | 27 | master = '{}:{}'.format(ip, port) 28 | 29 | print('Connecting to Scanner database...') 30 | sc = sp.Client(master=master, start_cluster=False, config_path='./config.toml') 31 | 32 | print('Running Scanner job...') 33 | example_video_path = 'sample.mp4' 34 | video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path) 35 | 36 | print(db.summarize()) 37 | 38 | frames = sc.io.Input([video_stream]) 39 | r_frame = sc.ops.Resize(frame=frame, width=320, height=240) 40 | output_stream = sp.NamedVideoStream(sc, 'example_frame') 41 | output_op = sc.io.Output(hists, [output_stream]) 42 | 43 | job_id = sc.run(output_op, sp.PerfParams.estimate()) 44 | 45 | output_stream.save_mp4('resized_video') 46 | 47 | print('Complete!') 48 | -------------------------------------------------------------------------------- /examples/apps/walkthroughs/grayscale_conversion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scannerpy as sp 3 | import scannertools.imgproc 4 | 5 | @sp.register_python_op() 6 | def CloneChannels(config, frame: sp.FrameType) -> sp.FrameType: 7 | return np.dstack([frame for _ in range(config.args['replications'])]) 8 | 9 | def main(): 10 | sc = sp.Client() 11 | 12 | # Create a stored stream to represent the input video 13 | input_stream = sp.NamedVideoStream(sc, 'sample-clip', path='sample-clip.mp4') 14 | 15 | # Define a Computation Graph 16 | frames = sc.io.Input([input_stream]) 17 | sampled_frames = sc.streams.Stride(frames, [2]) # Select every other frame 18 | resized_frames = sc.ops.Resize(frame=sampled_frames, width=[640], height=[480]) # Resize input frame 19 | grayscale_frames = sc.ops.ConvertColor(frame=resized_frames, conversion=['COLOR_RGB2GRAY']) 20 | grayscale3_frames = sc.ops.CloneChannels(frame=grayscale_frames, replications=3) 21 | 22 | # Create a stored stream to represent the output video 23 | output_stream = sp.NamedVideoStream(sc, 'sample-grayscale') 24 | output = sc.io.Output(grayscale3_frames, [output_stream]) 25 | 26 | # Execute the computation graph 27 | sc.run(output, sp.PerfParams.manual(50, 250)) 28 | 29 | # Save the resized video as an mp4 file 30 | output_stream.save_mp4('sample-grayscale') 31 | 32 | input_stream.delete(sc) 33 | output_stream.delete(sc) 34 | 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /examples/apps/detectron/README.md: -------------------------------------------------------------------------------- 1 | Detectron on Scanner 2 | ==================== 3 | 4 | Pre-requisites: 5 | --------------- 6 | 7 | 1. Install Caffe2. If you install from source, make sure to build with: 8 | 9 | ```bash 10 | cmake .. \ 11 | -DCMAKE_DISABLE_FIND_PACKAGE_Eigen3=TRUE \ 12 | -DBUILD_CUSTOM_PROTOBUF=OFF \ 13 | -DBUILD_TEST=OFF \ 14 | -DPYTHON_INCLUDE_DIR=/usr/include/python3.5/ \ 15 | -DPYTHON_EXECUTABLE=/usr/bin/python3 \ 16 | -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so 17 | ``` 18 | 19 | 2. Install Detectron. 20 | 21 | 3. Build Scanner without Caffe Ops (Caffe and Caffe2 can not be in the same process): 22 | 23 | ``` 24 | cmake .. -DBUILD_CAFFE_OPS=OFF 25 | ``` 26 | 27 | Example Usage: 28 | -------------- 29 | 30 | ```bash 31 | DETECTRON_PATH=... 32 | 33 | CONFIG_PATH=$DETECTRON_PATH/configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml 34 | WEIGHTS_PATH='https://s3-us-west-2.amazonaws.com/detectron/35861858/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml.02_32_51.SgT4y1cO/output/train/coco_2014_train:coco_2014_valminusminival/generalized_rcnn/model_final.pkl' 35 | 36 | python3 main.py --weights-path $WEIGHTS_PATH --config-path $CONFIG_PATH --video-path example.mp4 37 | ``` 38 | 39 | This will output a video named `example_detected.mp4' overlaid with the network 40 | detections. 41 | 42 | .. note: 43 | 44 | Caffe2 currently crashes when the program is cleaning up. You might see an 45 | error related to CUDA at the end of execution. This is expected. 46 | -------------------------------------------------------------------------------- /examples/how-tos/caffe/resnet.py: -------------------------------------------------------------------------------- 1 | from scannerpy import Database, DeviceType, Job 2 | from scannerpy.stdlib import NetDescriptor 3 | import numpy as np 4 | import cv2 5 | import struct 6 | import sys 7 | import os 8 | sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') 9 | import util 10 | 11 | video_path = util.download_video() if len(sys.argv) <= 1 else sys.argv[1] 12 | print('Performing classification on video {}'.format(video_path)) 13 | video_name = os.path.splitext(os.path.basename(video_path))[0] 14 | 15 | with Database() as db: 16 | [input_table], _ = db.ingest_videos( 17 | [(video_name, video_path)], force=True) 18 | 19 | descriptor = NetDescriptor.from_file(db, 'nets/resnet.toml') 20 | 21 | batch_size = 48 22 | frame = db.sources.FrameColumn() 23 | caffe_input = db.ops.CaffeInput( 24 | frame = frame, 25 | net_descriptor = descriptor.as_proto(), 26 | batch_size = batch_size, 27 | device=DeviceType.GPU) 28 | caffe_output = db.ops.Caffe( 29 | caffe_frame = caffe_input, 30 | net_descriptor = descriptor.as_proto(), 31 | batch_size = batch_size, 32 | batch = batch_size, 33 | device=DeviceType.GPU) 34 | output = db.sinks.Column(columns={'softmax': caffe_output}) 35 | 36 | job = Job(op_args={ 37 | frame: input_table.column('frame'), 38 | output: input_table.name() + '_classification' 39 | }) 40 | 41 | [output] = db.run(output=output, jobs=[job], pipeline_instances_per_node=1) 42 | -------------------------------------------------------------------------------- /cmake/Modules/FindProxygen.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Proxygen 2 | # 3 | # The following variables are optionally searched for defaults 4 | # PROXYGEN_ROOT_DIR: Base directory where all proxygen components are found 5 | # 6 | # The following are set after configuration is done: 7 | # PROXYGEN_FOUND 8 | # PROXYGEN_INCLUDE_DIRS 9 | # PROXYGEN_LIBRARIES 10 | # PROXYGEN_LIBRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(PROXYGEN_ROOT_DIR "" CACHE PATH "Folder contains Proxygen") 15 | 16 | if (NOT "$ENV{Proxygen_DIR}" STREQUAL "") 17 | set(PROXYGEN_ROOT_DIR $ENV{proxygen_DIR}) 18 | endif() 19 | 20 | # We are testing only a couple of files in the include directories 21 | if(WIN32) 22 | find_path(PROXYGEN_INCLUDE_DIR proxygen/lib/http/HTTPHeaders.h 23 | PATHS ${PROXYGEN_ROOT_DIR}/src/windows) 24 | else() 25 | find_path(PROXYGEN_INCLUDE_DIR proxygen/lib/http/HTTPHeaders.h 26 | PATHS ${PROXYGEN_ROOT_DIR}/include) 27 | endif() 28 | 29 | find_library(PROXYGEN_LIBRARY proxygenlib 30 | PATHS ${PROXYGEN_ROOT_DIR}/lib) 31 | find_library(PROXYGEN_HTTP_SERVER_LIBRARY proxygenhttpserver 32 | PATHS ${PROXYGEN_ROOT_DIR}/lib) 33 | find_library(PROXYGEN_CURL_LIBRARY proxygencurl 34 | PATHS ${PROXYGEN_ROOT_DIR}/lib) 35 | 36 | find_package_handle_standard_args(PROXYGEN DEFAULT_MSG 37 | PROXYGEN_INCLUDE_DIR PROXYGEN_LIBRARY) 38 | 39 | if(PROXYGEN_FOUND) 40 | set(PROXYGEN_INCLUDE_DIRS ${PROXYGEN_INCLUDE_DIR}) 41 | set(PROXYGEN_LIBRARIES 42 | ${PROXYGEN_LIBRARY} 43 | ${PROXYGEN_HTTP_SERVER_LIBRARY}) 44 | endif() 45 | -------------------------------------------------------------------------------- /cmake/Modules/FindGlog.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Glog 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GLOG_ROOT_DIR: Base directory where all GLOG components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GLOG_FOUND 8 | # GLOG_INCLUDE_DIRS 9 | # GLOG_LIBRARIES 10 | 11 | include(FindPackageHandleStandardArgs) 12 | 13 | set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") 14 | 15 | if (NOT "$ENV{Glog_DIR}" STREQUAL "") 16 | set(GLOG_ROOT_DIR $ENV{Glog_DIR}) 17 | endif() 18 | 19 | if(WIN32) 20 | find_path(GLOG_INCLUDE_DIR glog/logging.h 21 | HINTS ${GLOG_ROOT_DIR}/src/windows) 22 | else() 23 | find_path(GLOG_INCLUDE_DIR glog/logging.h 24 | HINTS ${GLOG_ROOT_DIR}/include) 25 | endif() 26 | 27 | if(MSVC) 28 | find_library(GLOG_LIBRARY_RELEASE libglog_static 29 | HINTS ${GLOG_ROOT_DIR} 30 | PATH_SUFFIXES Release) 31 | 32 | find_library(GLOG_LIBRARY_DEBUG libglog_static 33 | HINTS ${GLOG_ROOT_DIR} 34 | PATH_SUFFIXES Debug) 35 | 36 | set(GLOG_LIBRARY optimized ${GLOG_LIBRARY_RELEASE} debug ${GLOG_LIBRARY_DEBUG}) 37 | else() 38 | find_library(GLOG_LIBRARY glog 39 | HINTS ${GLOG_ROOT_DIR} 40 | PATH_SUFFIXES 41 | lib 42 | lib64) 43 | endif() 44 | 45 | find_package_handle_standard_args(GLOG DEFAULT_MSG 46 | GLOG_INCLUDE_DIR GLOG_LIBRARY) 47 | 48 | if(GLOG_FOUND) 49 | set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) 50 | set(GLOG_LIBRARIES ${GLOG_LIBRARY}) 51 | endif() 52 | -------------------------------------------------------------------------------- /scanner/engine/column_enumerator.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/api/enumerator.h" 17 | 18 | #include "storehouse/storage_backend.h" 19 | #include "scanner/engine/video_index_entry.h" 20 | #include "scanner/engine/table_meta_cache.h" 21 | 22 | #include 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class ColumnEnumerator : public Enumerator { 29 | public: 30 | ColumnEnumerator(const EnumeratorConfig& config); 31 | 32 | i64 total_elements() override; 33 | 34 | ElementArgs element_args_at(i64 element_idx) override; 35 | 36 | void set_table_meta(TableMetaCache* cache); 37 | 38 | private: 39 | Result valid_; 40 | std::string table_name_; 41 | std::string column_name_; 42 | TableMetaCache* table_metadata_; // Caching table metadata 43 | i32 table_id_; 44 | i32 column_id_; 45 | 46 | i64 total_rows_; 47 | }; 48 | 49 | } 50 | } // namespace scanner 51 | -------------------------------------------------------------------------------- /scanner/engine/ingest.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/database.h" 19 | #include "scanner/util/common.h" 20 | 21 | #include "storehouse/storage_backend.h" 22 | #include "storehouse/storage_config.h" 23 | 24 | #include 25 | 26 | namespace scanner { 27 | namespace internal { 28 | 29 | Result ingest_videos(storehouse::StorageConfig* storage_config, 30 | const std::string& db_path, 31 | const std::vector& table_names, 32 | const std::vector& paths, 33 | bool inplace, 34 | std::vector& failed_videos); 35 | 36 | // void ingest_images(storehouse::StorageConfig *storage_config, 37 | // const std::string &db_path, const std::string &table_name, 38 | // const std::vector &paths); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cmake/Modules/FindGoogleTest.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find googletest library 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GOOGLETEST_ROOT_DIR: Base directory where all Storage components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GOOGLETEST_FOUND 8 | # GOOGLETEST_INCLUDE_DIRS 9 | # GOOGLETEST_LIBRARIES 10 | # GOOGLETEST_LIBRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GOOGLETEST_ROOT_DIR "" CACHE PATH "Folder contains GoogleTest") 15 | 16 | if (NOT "$ENV{Googletest_DIR}" STREQUAL "") 17 | set(GOOGLETEST_ROOT_DIR $ENV{GoogleTest_DIR}) 18 | endif() 19 | 20 | # We are testing only a couple of files in the include directories 21 | if(WIN32) 22 | find_path(GOOGLETEST_INCLUDE_DIR gtest/gtest.h 23 | PATHS ${GOOGLETEST_ROOT_DIR}/src/windows) 24 | else() 25 | find_path(GOOGLETEST_INCLUDE_DIR gtest/gtest.h 26 | PATHS 27 | ${GOOGLETEST_ROOT_DIR}/include 28 | ${CMAKE_SOURCE_DIR}/thirdparty/install/include) 29 | endif() 30 | 31 | find_library(GOOGLETEST_LIBRARY gtest 32 | PATHS 33 | ${GOOGLETEST_ROOT_DIR}/lib 34 | ${CMAKE_SOURCE_DIR}/thirdparty/install/lib) 35 | 36 | find_library(GOOGLETEST_MAIN gtest_main 37 | PATHS 38 | ${GOOGLETEST_ROOT_DIR}/lib 39 | ${CMAKE_SOURCE_DIR}/thirdparty/install/lib) 40 | 41 | find_package_handle_standard_args(GOOGLETEST DEFAULT_MSG 42 | GOOGLETEST_INCLUDE_DIR GOOGLETEST_LIBRARY) 43 | 44 | if(GOOGLETEST_FOUND) 45 | set(GOOGLETEST_INCLUDE_DIRS ${GOOGLETEST_INCLUDE_DIR}) 46 | set(GOOGLETEST_LIBRARIES ${GOOGLETEST_LIBRARY}) 47 | endif() 48 | -------------------------------------------------------------------------------- /docs/guide.rst: -------------------------------------------------------------------------------- 1 | Guide 2 | ===== 3 | 4 | The guide is designed to get you quickly started using Scanner for your application, and then gradually expose you to more of the advanced concepts in Scanner. 5 | 6 | The guide is organized into the following sections: 7 | 8 | - :ref:`quickstart`: run a simple Scanner program in as few steps as possible. 9 | - :ref:`getting-started`: get Scanner set up on your machine and ready for writing your own applications. 10 | - :ref:`walkthrough`: step-by-step walkthrough of a Scanner application. 11 | - :ref:`graphs`: describes computation graphs, which are how applications are represented in Scanner. 12 | - :ref:`ops`: describes the properties of ops (operations). Ops are the nodes in computation graphs that process data. 13 | - :ref:`stored-streams`: describes stored streams, which represent streams of data that Scanner can read and write to. 14 | - :ref:`profiling`: describes how to profile Scanner jobs and tweak parameters to improve performance. 15 | - :ref:`kubernetes`: run Scanner in the cloud with Kubernetes. 16 | 17 | Support 18 | ------- 19 | If you're having trouble using Scanner, the best way to get help is to join the Scanner Research slack channel. Send an email to wcrichto@cs.stanford.edu with the subject line "Slack Access" to join our Slack channel. 20 | 21 | If you'd like to talk with the core team directly, contact Alex Poms (https://www.cs.cmu.edu/~apoms/) or Will Crichton (http://willcrichton.net/). 22 | 23 | If you want to request a new feature or file a bug report, please do so using the `GitHub page `__. 24 | -------------------------------------------------------------------------------- /scanner/util/queue.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "scanner/util/blockingconcurrentqueue.h" 24 | 25 | namespace scanner { 26 | 27 | using namespace moodycamel; 28 | 29 | template 30 | class Queue : public BlockingConcurrentQueue { 31 | public: 32 | Queue(size_t size=8) : BlockingConcurrentQueue(size) {} 33 | 34 | inline void clear() { 35 | T t; 36 | while (BlockingConcurrentQueue::try_dequeue(t)) {} 37 | } 38 | 39 | inline size_t size() { 40 | return BlockingConcurrentQueue::size_approx(); 41 | } 42 | 43 | inline void push(T item) { 44 | bool success = BlockingConcurrentQueue::enqueue(item); 45 | LOG_IF(FATAL, !success) << "Queue push failed"; 46 | } 47 | 48 | inline void pop(T& item) { 49 | BlockingConcurrentQueue::wait_dequeue(item); 50 | } 51 | }; 52 | 53 | } 54 | -------------------------------------------------------------------------------- /cmake/Modules/FindGFlags.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find GFLAGS 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GFLAGS_FOUND 8 | # GFLAGS_INCLUDE_DIRS 9 | # GFLAGS_LIBRARIES 10 | # GFLAGS_LIBRARYRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") 15 | 16 | if (NOT "$ENV{GFlags_DIR}" STREQUAL "") 17 | set(GFLAGS_ROOT_DIR $ENV{GFlags_DIR}) 18 | endif() 19 | 20 | # We are testing only a couple of files in the include directories 21 | if(WIN32) 22 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 23 | HINTS ${GFLAGS_ROOT_DIR}/src/windows) 24 | else() 25 | find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h 26 | HINTS ${GFLAGS_ROOT_DIR}/include) 27 | endif() 28 | 29 | if(MSVC) 30 | find_library(GFLAGS_LIBRARY_RELEASE 31 | NAMES libgflags 32 | HINTS ${GFLAGS_ROOT_DIR} 33 | PATH_SUFFIXES Release) 34 | 35 | find_library(GFLAGS_LIBRARY_DEBUG 36 | NAMES libgflags-debug 37 | HINTS ${GFLAGS_ROOT_DIR} 38 | PATH_SUFFIXES Debug) 39 | 40 | set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) 41 | else() 42 | find_library(GFLAGS_LIBRARY gflags 43 | HINTS ${GFLAGS_ROOT_DIR}/lib) 44 | endif() 45 | 46 | find_package_handle_standard_args(GFLAGS DEFAULT_MSG 47 | GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) 48 | 49 | 50 | if(GFLAGS_FOUND) 51 | set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) 52 | set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) 53 | endif() 54 | -------------------------------------------------------------------------------- /scanner/engine/kernel_registry.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/engine/kernel_registry.h" 17 | 18 | namespace scanner { 19 | namespace internal { 20 | 21 | void KernelRegistry::add_kernel(const std::string& name, 22 | KernelFactory* factory) { 23 | DeviceType type = factory->get_device_type(); 24 | factories_.insert({factory_name(name, type), factory}); 25 | } 26 | 27 | bool KernelRegistry::has_kernel(const std::string& name, DeviceType type) { 28 | return factories_.count(factory_name(name, type)) > 0; 29 | } 30 | 31 | KernelFactory* KernelRegistry::get_kernel(const std::string& name, 32 | DeviceType type) { 33 | return factories_.at(factory_name(name, type)); 34 | } 35 | 36 | std::string KernelRegistry::factory_name(const std::string& name, 37 | DeviceType type) { 38 | return name + ((type == DeviceType::CPU) ? "_cpu" : "_gpu"); 39 | } 40 | 41 | KernelRegistry* get_kernel_registry() { 42 | static KernelRegistry* registry = new KernelRegistry; 43 | return registry; 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /cmake/Modules/FindIconv.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Iconv 2 | # Once done this will define 3 | # 4 | # ICONV_FOUND - system has Iconv 5 | # ICONV_INCLUDE_DIR - the Iconv include directory 6 | # ICONV_LIBRARIES - Link these to use Iconv 7 | # ICONV_SECOND_ARGUMENT_IS_CONST - the second argument for iconv() is const 8 | # 9 | include(CheckCXXSourceCompiles) 10 | 11 | IF (ICONV_INCLUDE_DIR AND ICONV_LIBRARIES) 12 | # Already in cache, be silent 13 | SET(ICONV_FIND_QUIETLY TRUE) 14 | ENDIF (ICONV_INCLUDE_DIR AND ICONV_LIBRARIES) 15 | 16 | FIND_PATH(ICONV_INCLUDE_DIR iconv.h) 17 | 18 | FIND_LIBRARY(ICONV_LIBRARIES NAMES iconv libiconv libiconv-2 c) 19 | 20 | IF(ICONV_INCLUDE_DIR AND ICONV_LIBRARIES) 21 | SET(ICONV_FOUND TRUE) 22 | ENDIF(ICONV_INCLUDE_DIR AND ICONV_LIBRARIES) 23 | 24 | set(CMAKE_REQUIRED_INCLUDES ${ICONV_INCLUDE_DIR}) 25 | set(CMAKE_REQUIRED_LIBRARIES ${ICONV_LIBRARIES}) 26 | IF(ICONV_FOUND) 27 | check_cxx_source_compiles(" 28 | #include 29 | int main(){ 30 | iconv_t conv = 0; 31 | const char* in = 0; 32 | size_t ilen = 0; 33 | char* out = 0; 34 | size_t olen = 0; 35 | iconv(conv, &in, &ilen, &out, &olen); 36 | return 0; 37 | } 38 | " ICONV_SECOND_ARGUMENT_IS_CONST ) 39 | ENDIF(ICONV_FOUND) 40 | set(CMAKE_REQUIRED_INCLUDES) 41 | set(CMAKE_REQUIRED_LIBRARIES) 42 | 43 | IF(ICONV_FOUND) 44 | IF(NOT ICONV_FIND_QUIETLY) 45 | MESSAGE(STATUS "Found Iconv: ${ICONV_LIBRARIES}") 46 | ENDIF(NOT ICONV_FIND_QUIETLY) 47 | ELSE(ICONV_FOUND) 48 | IF(Iconv_FIND_REQUIRED) 49 | MESSAGE(FATAL_ERROR "Could not find Iconv") 50 | ENDIF(Iconv_FIND_REQUIRED) 51 | ENDIF(ICONV_FOUND) 52 | 53 | MARK_AS_ADVANCED( 54 | ICONV_INCLUDE_DIR 55 | ICONV_LIBRARIES 56 | ICONV_SECOND_ARGUMENT_IS_CONST 57 | ) 58 | -------------------------------------------------------------------------------- /scanner/engine/enumerator_factory.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/enumerator.h" 19 | #include "scanner/util/common.h" 20 | 21 | #include 22 | 23 | namespace scanner { 24 | namespace internal { 25 | 26 | /** 27 | * @brief Interface for constructing enumerators at runtime. 28 | */ 29 | class EnumeratorFactory { 30 | public: 31 | EnumeratorFactory(const std::string& enumerator_name, 32 | const std::string& protobuf_name, 33 | EnumeratorConstructor constructor) 34 | : name_(enumerator_name), 35 | protobuf_name_(protobuf_name), 36 | constructor_(constructor) {} 37 | 38 | const std::string& get_name() const { return name_; } 39 | 40 | const std::string& protobuf_name() const { return protobuf_name_; } 41 | 42 | /* @brief Constructs a kernel to be used for processing elements of data. 43 | */ 44 | Enumerator* new_instance(const EnumeratorConfig& config) { 45 | return constructor_(config); 46 | } 47 | 48 | private: 49 | std::string name_; 50 | std::string protobuf_name_; 51 | EnumeratorConstructor constructor_; 52 | }; 53 | 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /examples/tutorials/07_profiling.py: -------------------------------------------------------------------------------- 1 | import scannerpy as sp 2 | import scannertools.imgproc 3 | 4 | import sys 5 | import os.path 6 | sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') 7 | import util 8 | 9 | ################################################################################ 10 | # This tutorial shows how to look at profiling information for your job. # 11 | ################################################################################ 12 | 13 | def main(): 14 | sc = sp.Client() 15 | 16 | example_video_path = util.download_video() 17 | video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path) 18 | frames = sc.io.Input([video_stream]) 19 | 20 | resized_frames = sc.ops.Resize(frame=frames, width=[640], height=[480]) 21 | 22 | output_stream = sp.NamedVideoStream(sc, 'example_profile') 23 | output = sc.io.Output(resized_frames, [output_stream]) 24 | 25 | job_id = sc.run(output, sp.PerfParams.estimate()) 26 | 27 | # The profile contains information about how long different parts of your 28 | # computation take to run. We use Google Chrome's trace format, which you 29 | # can view by going to chrome://tracing in Chrome and scicking "load" in 30 | # the top left. 31 | profile = sc.get_profile(job_id) 32 | profile.write_trace('resize-graph.trace') 33 | print('Wrote trace file to "resize-graph.trace".') 34 | 35 | # Each row corresponds to a different part of the system, e.g. the thread 36 | # loading bytes from disk or the thread running your kernels. If you have 37 | # multiple pipelines or multiple nodes, you will see many of these evaluate 38 | # threads. 39 | 40 | video_stream.delete(sc) 41 | output_stream.delete(sc) 42 | 43 | 44 | if __name__ == "__main__": 45 | main() 46 | -------------------------------------------------------------------------------- /scanner/engine/table_meta_cache.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/engine/metadata.h" 19 | 20 | #include 21 | #include 22 | 23 | namespace scanner { 24 | namespace internal { 25 | 26 | class TableMetaCache { 27 | public: 28 | TableMetaCache(storehouse::StorageBackend* storage, 29 | const DatabaseMetadata& meta); 30 | 31 | const TableMetadata& at(const std::string& table_name) const; 32 | 33 | const TableMetadata& at(i32 table_id) const; 34 | 35 | bool exists(const std::string& table_name) const; 36 | 37 | bool exists(i32 table_id) const; 38 | 39 | bool has(const std::string& table_name) const; 40 | 41 | void update(const TableMetadata& meta); 42 | 43 | void prefetch(const std::vector& table_names); 44 | 45 | void write_megafile(); 46 | 47 | private: 48 | void memoized_read(const std::string& table_name) const; 49 | 50 | void memoized_read(i32 table_id) const; 51 | 52 | storehouse::StorageBackend* storage_; 53 | const DatabaseMetadata& meta_; 54 | mutable std::mutex lock_; 55 | mutable std::unordered_map cache_; 56 | }; 57 | 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /scanner/api/source.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/api/source.h" 17 | #include "scanner/engine/source_factory.h" 18 | #include "scanner/engine/source_registry.h" 19 | #include "scanner/util/memory.h" 20 | 21 | namespace scanner { 22 | namespace internal { 23 | 24 | SourceRegistration::SourceRegistration(const SourceBuilder& builder) { 25 | const std::string& name = builder.name_; 26 | std::vector output_columns; 27 | i32 i = 0; 28 | for (auto& name_type : builder.output_columns_) { 29 | Column col; 30 | col.set_id(i++); 31 | col.set_name(std::get<0>(name_type)); 32 | col.set_type(std::get<1>(name_type)); 33 | output_columns.push_back(col); 34 | } 35 | SourceConstructor constructor = builder.constructor_; 36 | internal::SourceFactory* factory = new internal::SourceFactory( 37 | name, output_columns, builder.protobuf_name_, constructor); 38 | internal::SourceRegistry* registry = internal::get_source_registry(); 39 | Result result = registry->add_source(name, factory); 40 | if (!result.success()) { 41 | LOG(WARNING) << "Failed to register source " << name << ": " << result.msg(); 42 | } 43 | } 44 | 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /scanner/engine/source_registry.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/engine/source_registry.h" 17 | 18 | namespace scanner { 19 | namespace internal { 20 | 21 | Result SourceRegistry::add_source(const std::string& name, 22 | SourceFactory* factory) { 23 | Result result; 24 | result.set_success(true); 25 | if (factories_.count(name) > 0) { 26 | RESULT_ERROR(&result, "Attempted to re-register Source %s", name.c_str()); 27 | return result; 28 | } 29 | if (factory->output_columns().empty()) { 30 | RESULT_ERROR(&result, 31 | "Attempted to register op %s with empty output columns", 32 | name.c_str()); 33 | return result; 34 | } 35 | 36 | factories_.insert({name, factory}); 37 | 38 | return result; 39 | } 40 | 41 | bool SourceRegistry::has_source(const std::string& name) { 42 | return factories_.count(name) > 0; 43 | } 44 | 45 | SourceFactory* SourceRegistry::get_source(const std::string& name) { 46 | return factories_.at(name); 47 | } 48 | 49 | SourceRegistry* get_source_registry() { 50 | static SourceRegistry* registry = new SourceRegistry; 51 | return registry; 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /scanner/util/cuda.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #ifdef HAVE_CUDA 19 | #include 20 | #include 21 | #endif 22 | 23 | #include 24 | 25 | #ifdef HAVE_CUDA 26 | #define CUDA_PROTECT(s) (s); 27 | #else 28 | #define CUDA_PROTECT(s) \ 29 | { LOG(FATAL) << "Cuda not enabled."; } 30 | #endif 31 | 32 | #ifdef HAVE_CUDA 33 | 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | 40 | #define CU_CHECK(ans) \ 41 | { cuAssert((ans), __FILE__, __LINE__); } 42 | 43 | inline void cuAssert(cudaError_t code, const char* file, int line) { 44 | if (code != cudaSuccess) { 45 | LOG(FATAL) << "GPUassert: " << cudaGetErrorString(code) << " " << file 46 | << " " << line; 47 | } 48 | } 49 | 50 | #define CUD_CHECK(ans) \ 51 | { cudAssert((ans), __FILE__, __LINE__); } 52 | 53 | inline void cudAssert(CUresult code, const char* file, int line) { 54 | if (code != CUDA_SUCCESS) { 55 | const char* err_str; 56 | cuGetErrorString(code, &err_str); 57 | LOG(FATAL) << "GPUassert: " << err_str << " " << file << " " << line; 58 | } 59 | } 60 | 61 | #endif 62 | -------------------------------------------------------------------------------- /examples/apps/open-reid-feature-extraction/Readme.md: -------------------------------------------------------------------------------- 1 | Open-ReID on Scanner 2 | ==================== 3 | 4 | # Steps: 5 | ## Build the image 6 | - Build a docker image using the Dockerfile in this folder. 7 | ``` 8 | nvidia-docker build -t scanner-openreid . 9 | ``` 10 | 11 | ## Download the pre-trained model for Open-ReID 12 | ### Option 1: Download the pre-trained model on VIPeR dataset 13 | Link: [model_best.pth.tar](https://drive.google.com/open?id=1LDiX4AyuJbhPzVPJF6BOOjuOaN81F6IZ) 14 | 15 | ### Option 2: Train a model using the original Open-ReID repository. 16 | Link: Follow the instruction on the [Open-ReID repository](https://github.com/Cysu/open-reid) 17 | 18 | ## Run a new docker container 19 | - In this folder, run the following command to create a new docker container and map the current directory to the same path so that we can access the `model_best.pth.tar` from inside the container. 20 | ``` 21 | docker run --runtime=nvidia -it -v $(pwd):/opt/scanner/examples/apps/open-reid-feature-extraction/ scanner-openreid /bin/bash 22 | ``` 23 | 24 | ## Run the example 25 | - Change directory to the `open-reid-feature-extraction` example 26 | ``` 27 | cd /opt/scanner/examples/apps/open-reid-feature-extraction/ 28 | ``` 29 | - Get an example video 30 | ``` 31 | wget https://storage.googleapis.com/scanner-data/public/sample-clip.mp4 32 | ``` 33 | - Run the `extract_features.py` to extract the Open-ReID for every frame. In practice, you should perform the human detection on each frame. Then, provide the frame and the bounding boxes for each person into the Open-ReID kernel. We want to extract the Open-ReID feature for each bounding boxes. In this example, we will skip the human detection part. 34 | ``` 35 | python3 extract_features.py sample-clip.mp4 model_best.pth.tar 36 | ``` 37 | - The results are saved in the `reid_features.npy` which can be loaded using `np.load("reid_features.npy")` -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/example.py: -------------------------------------------------------------------------------- 1 | from scannerpy import Database, DeviceType, Job 2 | from scannerpy.stdlib import readers 3 | 4 | import numpy as np 5 | import cv2 6 | import sys 7 | import os.path 8 | import subprocess as sp 9 | 10 | print('Finding master IP...') 11 | ip = sp.check_output( 12 | ''' 13 | kubectl get services scanner-master --output json | jq -r '.status.loadBalancer.ingress[0].hostname' 14 | ''', 15 | shell=True).strip().decode('utf-8') 16 | 17 | port = sp.check_output( 18 | ''' 19 | kubectl get svc/scanner-master -o json | \ 20 | jq '.spec.ports[0].port' -r 21 | ''', 22 | shell=True).strip().decode('utf-8') 23 | 24 | master = '{}:{}'.format(ip, port) 25 | print('Master ip: {:s}'.format(master)) 26 | 27 | with open('config.toml', 'w') as f: 28 | config_text = sp.check_output( 29 | ''' 30 | kubectl get configmaps scanner-configmap -o json | \ 31 | jq '.data["config.toml"]' -r 32 | ''', 33 | shell=True).strip().decode('utf-8') 34 | f.write(config_text) 35 | 36 | print('Connecting to Scanner database...') 37 | db = Database( 38 | master=master, 39 | start_cluster=False, 40 | config_path='./config.toml', 41 | grpc_timeout=120) 42 | 43 | print('Running Scanner job...') 44 | example_video_path = 'sample.mp4' 45 | [input_table], _ = db.ingest_videos( 46 | [('example', example_video_path)], force=True, inplace=True) 47 | 48 | print(db.summarize()) 49 | 50 | frame = db.sources.FrameColumn() 51 | r_frame = db.ops.Resize(frame=frame, width=320, height=240) 52 | output_op = db.sinks.Column(columns={'frame': r_frame}) 53 | job = Job(op_args={ 54 | frame: db.table('example').column('frame'), 55 | output_op: 'example_frame' 56 | }) 57 | 58 | output_tables = db.run(output=output_op, jobs=[job], force=True) 59 | 60 | output_tables[0].column('frame').save_mp4('resized_video') 61 | 62 | print('Complete!') 63 | -------------------------------------------------------------------------------- /scanner/util/halide.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "scanner/api/kernel.h" 4 | #include "scanner/util/common.h" 5 | 6 | #include "HalideRuntime.h" 7 | 8 | #ifdef HAVE_CUDA 9 | #include "HalideRuntimeCuda.h" 10 | #include "scanner/util/halide_context.h" 11 | #endif 12 | 13 | namespace scanner { 14 | 15 | void setup_halide_frame_buf(buffer_t& halide_buf, FrameInfo& frame_info) { 16 | // Halide has the input format x * stride[0] + y * stride[1] + c * stride[2] 17 | halide_buf.stride[0] = 3; 18 | halide_buf.stride[1] = frame_info.width() * 3; 19 | halide_buf.stride[2] = 1; 20 | halide_buf.extent[0] = frame_info.width(); 21 | halide_buf.extent[1] = frame_info.height(); 22 | halide_buf.extent[2] = 3; 23 | halide_buf.elem_size = 1; 24 | } 25 | 26 | void set_halide_buf_ptr(const DeviceHandle& device, buffer_t& halide_buf, 27 | u8* buf, size_t size) { 28 | if (device.type == DeviceType::GPU) { 29 | CUDA_PROTECT({ 30 | halide_buf.dev = (uintptr_t) nullptr; 31 | 32 | // "You likely want to set the dev_dirty flag for correctness. (It will 33 | // not matter if all the code runs on the GPU.)" 34 | halide_buf.dev_dirty = true; 35 | 36 | i32 err = 37 | halide_cuda_wrap_device_ptr(nullptr, &halide_buf, (uintptr_t)buf); 38 | LOG_IF(FATAL, err != 0) << "Halide wrap device ptr failed"; 39 | 40 | // "You'll need to set the host field of the buffer_t structs to 41 | // something other than nullptr as that is used to indicate bounds query 42 | // calls" - Zalman Stern 43 | halide_buf.host = (u8*)0xdeadbeef; 44 | }); 45 | } else { 46 | halide_buf.host = buf; 47 | } 48 | } 49 | 50 | void unset_halide_buf_ptr(const DeviceHandle& device, buffer_t& halide_buf) { 51 | if (device.type == DeviceType::GPU) { 52 | CUDA_PROTECT({ halide_cuda_detach_device_ptr(nullptr, &halide_buf); }); 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /scanner/util/profiler.inl: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/util/profiler.h" 17 | 18 | namespace scanner { 19 | 20 | /////////////////////////////////////////////////////////////////////////////// 21 | /// Profiler 22 | inline void Profiler::add_interval( 23 | const std::string& key, 24 | timepoint_t start, 25 | timepoint_t end, 26 | ProfilerLevel level) 27 | { 28 | if (level < PROFILER_LEVEL) { return; } 29 | spin_lock(); 30 | records_.emplace_back(TaskRecord{ 31 | key, 32 | std::chrono::duration_cast( 33 | start - base_time_).count(), 34 | std::chrono::duration_cast( 35 | end - base_time_).count()}); 36 | unlock(); 37 | } 38 | 39 | inline void Profiler::increment(const std::string& key, int64_t value) { 40 | spin_lock(); 41 | counters_[key] += value; 42 | unlock(); 43 | } 44 | 45 | inline void Profiler::reset(timepoint_t base_time) { 46 | spin_lock(); 47 | base_time_ = base_time; 48 | records_.clear(); 49 | counters_.clear(); 50 | unlock(); 51 | } 52 | 53 | inline void Profiler::spin_lock() { 54 | while (lock_.test_and_set(std::memory_order_acquire)); 55 | } 56 | 57 | inline void Profiler::unlock() { 58 | lock_.clear(std::memory_order_release); 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /scanner/engine/op_registry.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/engine/op_registry.h" 17 | 18 | namespace scanner { 19 | namespace internal { 20 | 21 | Result OpRegistry::add_op(const std::string& name, OpInfo* info) { 22 | Result result; 23 | result.set_success(true); 24 | if (ops_.count(name) > 0) { 25 | RESULT_ERROR(&result, "Attempted to re-register op %s", name.c_str()); 26 | return result; 27 | } 28 | if (info->input_columns().empty() && !info->variadic_inputs()) { 29 | RESULT_ERROR(&result, 30 | "Attempted to register op %s with empty input columns", 31 | name.c_str()); 32 | return result; 33 | } 34 | if (info->output_columns().empty()) { 35 | RESULT_ERROR(&result, 36 | "Attempted to register op %s with empty output columns", 37 | name.c_str()); 38 | return result; 39 | } 40 | ops_.insert({name, info}); 41 | return result; 42 | } 43 | 44 | OpInfo* OpRegistry::get_op_info(const std::string& name) const { 45 | return ops_.at(name); 46 | } 47 | 48 | bool OpRegistry::has_op(const std::string& name) const { 49 | return ops_.count(name) > 0; 50 | } 51 | 52 | OpRegistry* get_op_registry() { 53 | static OpRegistry* registry = new OpRegistry; 54 | return registry; 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /scanner/engine/source_factory.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/source.h" 19 | #include "scanner/util/common.h" 20 | 21 | #include 22 | 23 | namespace scanner { 24 | 25 | namespace internal { 26 | 27 | /** 28 | * @brief Interface for constructing Sources at runtime. 29 | */ 30 | class SourceFactory { 31 | public: 32 | SourceFactory(const std::string& name, 33 | const std::vector& output_columns, 34 | const std::string& protobuf_name, 35 | SourceConstructor constructor) 36 | : name_(name), 37 | output_columns_(output_columns), 38 | protobuf_name_(protobuf_name), 39 | constructor_(constructor) {} 40 | 41 | const std::string& get_name() const { return name_; } 42 | 43 | const std::vector& output_columns() const { return output_columns_; } 44 | 45 | const std::string& protobuf_name() const { return protobuf_name_; } 46 | 47 | /* @brief Constructs a source to be used for reading elements 48 | */ 49 | Source* new_instance(const SourceConfig& config) { 50 | return constructor_(config); 51 | } 52 | 53 | private: 54 | std::string name_; 55 | std::vector output_columns_; 56 | std::string protobuf_name_; 57 | SourceConstructor constructor_; 58 | }; 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /examples/tutorials/08_defining_cpp_ops.py: -------------------------------------------------------------------------------- 1 | import scannerpy as sp 2 | 3 | import sys 4 | import os.path 5 | sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') 6 | import util 7 | 8 | ################################################################################ 9 | # This tutorial shows how to write and use your own C++ custom op. # 10 | ################################################################################ 11 | 12 | def main(): 13 | # Look at resize_op/resize_op.cpp to start this tutorial. 14 | 15 | sc = sp.Client() 16 | 17 | cwd = os.path.dirname(os.path.abspath(__file__)) 18 | if not os.path.isfile(os.path.join(cwd, 'resize_op/build/libresize_op.so')): 19 | print( 20 | 'You need to build the custom op first: \n' 21 | '$ pushd {}/resize_op; mkdir build && cd build; cmake ..; make; popd'. 22 | format(cwd)) 23 | exit() 24 | 25 | # To load a custom op into the Scanner runtime, we use db.load_op to open the 26 | # shared library we compiled. If the op takes arguments, it also optionally 27 | # takes a path to the generated python file for the arg protobuf. 28 | sc.load_op( 29 | os.path.join(cwd, 'resize_op/build/libresize_op.so'), 30 | os.path.join(cwd, 'resize_op/build/resize_pb2.py')) 31 | 32 | example_video_path = util.download_video() 33 | video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path) 34 | frames = sc.io.Input([video_stream]) 35 | 36 | # Then we use our op just like in the other examples. 37 | resized_frames = sc.ops.MyResize(frame=frames, width=200, height=300) 38 | 39 | output_stream = sp.NamedVideoStream(sc, 'example_resized') 40 | output = sc.io.Output(resized_frames, [output_stream]) 41 | 42 | sc.run(output, sp.PerfParams.estimate()) 43 | 44 | video_stream.delete(sc) 45 | output_stream.delete(sc) 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /tests/videos.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/util/fs.h" 17 | #include "scanner/video/decoder_automata.h" 18 | 19 | #include 20 | 21 | namespace scanner { 22 | struct TestVideoInfo { 23 | TestVideoInfo(i32 w, i32 h, const std::string& u, const std::string& m) 24 | : width(w), height(h), data_url(u), metadata_url(m) {} 25 | 26 | i32 width; 27 | i32 height; 28 | std::string data_url; 29 | std::string metadata_url; 30 | }; 31 | 32 | const TestVideoInfo short_video( 33 | 640, 480, 34 | "https://storage.googleapis.com/scanner-data/test/short_video.h264", 35 | "https://storage.googleapis.com/scanner-data/test/short_video_meta.bin"); 36 | 37 | const TestVideoInfo long_video( 38 | 640, 480, 39 | "https://storage.googleapis.com/scanner-data/test/long_video.h264", 40 | "https://storage.googleapis.com/scanner-data/test/long_video_meta.bin"); 41 | 42 | inline std::string download_video(const TestVideoInfo& info) { 43 | std::string local_video_path; 44 | temp_file(local_video_path); 45 | download(info.data_url, local_video_path); 46 | return local_video_path; 47 | } 48 | 49 | inline std::string download_video_meta(const TestVideoInfo& info) { 50 | std::string local_path; 51 | temp_file(local_path); 52 | download(info.metadata_url, local_path); 53 | return local_path; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /cmake/Modules/FindGRPC.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find grpc library 2 | # 3 | # The following variables are optionally searched for defaults 4 | # GRPC_DIR: Base directory where all components are found 5 | # 6 | # The following are set after configuration is done: 7 | # GRPC_FOUND 8 | # GRPC_INCLUDE_DIRS 9 | # GRPC_LIBRARIES 10 | # GRPC_LIBRARY_DIRS 11 | 12 | include(FindPackageHandleStandardArgs) 13 | 14 | set(GRPC_ROOT_DIR "" CACHE PATH "Folder contains GRPC") 15 | 16 | if (NOT "$ENV{GRPC_DIR}" STREQUAL "") 17 | set(GRPC_DIR $ENV{GRPC_DIR}) 18 | endif() 19 | 20 | # We are testing only a couple of files in the include directories 21 | if(WIN32) 22 | find_path(GRPC_INCLUDE_DIR grpc/grpc.h 23 | PATHS ${GRPC_ROOT_DIR}/src/windows) 24 | else() 25 | find_path(GRPC_INCLUDE_DIR grpc/grpc.h 26 | PATHS 27 | ${GRPC_DIR}/include) 28 | endif() 29 | 30 | find_library(GRPCPP_UNSECURE_LIBRARY grpc++_unsecure 31 | PATHS 32 | ${GRPC_DIR}/lib) 33 | 34 | find_library(GRPC_LIBRARY grpc 35 | PATHS 36 | ${GRPC_DIR}/lib) 37 | 38 | find_library(GPR_LIBRARY gpr 39 | PATHS 40 | ${GRPC_DIR}/lib) 41 | 42 | find_package_handle_standard_args(GRPC DEFAULT_MSG 43 | GRPC_INCLUDE_DIR GRPC_LIBRARY) 44 | 45 | # Get GRPC version info 46 | set(GRPC_VERSION_PROG 47 | "#include 48 | 49 | int main(void) { 50 | std::cout << grpc::Version(); 51 | }") 52 | execute_process(COMMAND "echo" "${GRPC_VERSION_PROG}" OUTPUT_FILE "/tmp/test.cpp") 53 | set(EX ${CMAKE_CXX_FLAGS}) 54 | separate_arguments(EX) 55 | if(APPLE) 56 | set(EX ${EX} "-isysroot" ${CMAKE_OSX_SYSROOT}) 57 | endif() 58 | execute_process(COMMAND "${CMAKE_CXX_COMPILER}" "/tmp/test.cpp" 59 | "-I${GRPC_INCLUDE_DIR}" "${GRPCPP_UNSECURE_LIBRARY}" ${EX} "-o" "/tmp/test.out") 60 | execute_process(COMMAND "/tmp/test.out" OUTPUT_VARIABLE GRPC_VERSION) 61 | 62 | if(GRPC_FOUND) 63 | set(GRPC_INCLUDE_DIRS ${GRPC_INCLUDE_DIR}) 64 | set(GRPC_LIBRARIES ${GRPCPP_UNSECURE_LIBRARY} ${GRPC_LIBRARY} ${GPR_LIBRARY}) 65 | endif() 66 | -------------------------------------------------------------------------------- /scanner/engine/video_index_entry.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/engine/metadata.h" 19 | #include "scanner/engine/runtime.h" 20 | #include "scanner/util/common.h" 21 | 22 | #include "storehouse/storage_backend.h" 23 | 24 | namespace scanner { 25 | namespace internal { 26 | 27 | struct VideoIndexEntry { 28 | std::unique_ptr open_file() const; 29 | 30 | storehouse::StorageBackend* storage; 31 | std::string path; 32 | bool inplace; 33 | i32 table_id; 34 | i32 column_id; 35 | i32 item_id; 36 | i32 width; 37 | i32 height; 38 | i32 channels; 39 | FrameType frame_type; 40 | proto::VideoDescriptor::VideoCodecType codec_type; 41 | u64 file_size; 42 | i32 num_encoded_videos; 43 | std::vector frames_per_video; 44 | std::vector keyframes_per_video; 45 | std::vector size_per_video; 46 | 47 | std::vector keyframe_indices; 48 | std::vector sample_offsets; 49 | std::vector sample_sizes; 50 | std::vector metadata; 51 | }; 52 | 53 | VideoIndexEntry read_video_index(storehouse::StorageBackend *storage, 54 | i32 table_id, i32 column_id, i32 item_id); 55 | 56 | VideoIndexEntry read_video_index(storehouse::StorageBackend *storage, 57 | const VideoMetadata& video_meta); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /scripts/travis-test.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/bash 3 | 4 | # Writing output (bell) keeps travis from timing out 5 | # https://github.com/travis-ci/travis-ci/issues/7961 6 | function bell() { 7 | while true; do 8 | echo -e "\a" 9 | sleep 60 10 | done 11 | } 12 | bell & 13 | 14 | set -e 15 | 16 | # The Travis VM isn't big enough to hold two Docker images of Scanner, 17 | # so we have to push and delete the CPU image before building the GPU one. 18 | 19 | if [[ "$TRAVIS_BRANCH" = "master" ]]; then 20 | TRAVIS_TAG="latest" 21 | fi 22 | 23 | test_docker() { 24 | INSTALL_SCANNERTOOLS="pushd /tmp && \ 25 | git clone https://github.com/scanner-research/scannertools -b master && \ 26 | cd scannertools/scannertools_infra && pip3 install . && \ 27 | cd ../scannertools && pip3 install -v -e . && popd" 28 | 29 | if [[ "$TEST_TYPE" = "cpp" ]]; then 30 | TEST_COMMAND="cd /opt/scanner/build && CTEST_OUTPUT_ON_FAILURE=1 make test ARGS='-V -E PythonTests'" 31 | elif [[ "$TEST_TYPE" = "tutorials" ]]; then 32 | TEST_COMMAND="$INSTALL_SCANNERTOOLS && cd /opt/scanner/ && python3 setup.py test --addopts '-k test_tutorial'" 33 | elif [[ "$TEST_TYPE" = "integration" ]]; then 34 | TEST_COMMAND="cd /opt/scanner/ && python3 setup.py test --addopts '-k \\\"not test_tutorial\\\"'" 35 | fi 36 | # We add -local to make sure it doesn't run the remote image if the build fails. 37 | docker pull $DOCKER_TEST_REPO:$1-$TRAVIS_BUILD_NUMBER 38 | docker run $DOCKER_TEST_REPO:$1-$TRAVIS_BUILD_NUMBER /bin/bash \ 39 | -c "adduser --disabled-password --gecos \"\" user && pip3 uninstall -y grpcio protobuf; chmod -R 777 /opt/scanner && su -c \"cd /opt/scanner/dist && (yes | pip3 install --user *) && $TEST_COMMAND\" user" 40 | docker rm $(docker ps -a -f status=exited -q) 41 | docker rmi -f $DOCKER_REPO:$1-$TRAVIS_BUILD_NUMBER 42 | } 43 | 44 | yes | docker login -u="$DOCKER_USER" -p="$DOCKER_PASS" 45 | 46 | test_docker $BUILD_TYPE 47 | 48 | exit $? 49 | -------------------------------------------------------------------------------- /scanner/engine/column_source.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/api/source.h" 17 | 18 | #include "storehouse/storage_backend.h" 19 | #include "scanner/engine/video_index_entry.h" 20 | #include "scanner/engine/table_meta_cache.h" 21 | 22 | #include 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class ColumnSource : public Source { 29 | public: 30 | ColumnSource(const SourceConfig& config); 31 | 32 | void read(const std::vector& element_args, 33 | std::vector& output_columns) override; 34 | 35 | void get_video_column_information( 36 | proto::VideoDescriptor::VideoCodecType& encoding_type, 37 | bool& inplace_video); 38 | 39 | void set_table_meta(TableMetaCache* cache); 40 | 41 | private: 42 | Result valid_; 43 | i32 load_sparsity_threshold_; 44 | TableMetaCache* table_metadata_; // Caching table metadata 45 | std::unique_ptr 46 | storage_; // Setup a distinct storage backend for each IO thread 47 | 48 | // To ammortize opening files 49 | i32 last_table_id_ = -1; 50 | std::map, VideoIndexEntry> index_; 51 | 52 | // Video Column Information 53 | proto::VideoDescriptor::VideoCodecType codec_type_; 54 | bool inplace_video_; 55 | }; 56 | 57 | } 58 | } // namespace scanner 59 | -------------------------------------------------------------------------------- /scanner/util/ffmpeg.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #ifdef HAVE_FFMPEG 19 | 20 | #include "scanner/util/common.h" 21 | #include "storehouse/storage_backend.h" 22 | 23 | extern "C" { 24 | #include "libavcodec/avcodec.h" 25 | #include "libavfilter/avfilter.h" 26 | #include "libavformat/avformat.h" 27 | #include "libavformat/avio.h" 28 | #include "libavutil/error.h" 29 | #include "libavutil/opt.h" 30 | #include "libavutil/pixdesc.h" 31 | #include "libswscale/swscale.h" 32 | } 33 | 34 | using storehouse::RandomReadFile; 35 | 36 | namespace scanner { 37 | 38 | struct FFStorehouseState { 39 | std::unique_ptr file = nullptr; 40 | u64 size = 0; // total file size 41 | u64 pos = 0; 42 | 43 | u64 buffer_start = 0; 44 | u64 buffer_end = 0; 45 | std::vector buffer; 46 | }; 47 | 48 | bool ffmpeg_storehouse_state_init(FFStorehouseState* file_state, 49 | storehouse::StorageBackend* storage, 50 | const std::string& path, 51 | std::string& error_message); 52 | 53 | // For custom AVIOContext that loads from memory 54 | i32 ffmpeg_storehouse_read_packet(void* opaque, u8* buf, i32 buf_size); 55 | 56 | i64 ffmpeg_storehouse_seek(void* opaque, i64 offset, i32 whence); 57 | 58 | } // namespace scanner 59 | 60 | #endif 61 | -------------------------------------------------------------------------------- /scanner/api/sink.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/api/sink.h" 17 | #include "scanner/engine/sink_factory.h" 18 | #include "scanner/engine/sink_registry.h" 19 | 20 | namespace scanner { 21 | namespace internal { 22 | 23 | SinkRegistration::SinkRegistration(const SinkBuilder& builder) { 24 | const std::string& name = builder.name_; 25 | const bool variadic_inputs = builder.variadic_inputs_; 26 | std::vector input_columns; 27 | size_t i = 0; 28 | for (auto& name_type : builder.input_columns_) { 29 | Column col; 30 | col.set_id(i++); 31 | col.set_name(std::get<0>(name_type)); 32 | col.set_type(std::get<1>(name_type)); 33 | input_columns.push_back(col); 34 | } 35 | bool per_element_output = builder.per_element_output_; 36 | bool entire_stream_output = builder.entire_stream_output_; 37 | 38 | SinkConstructor constructor = builder.constructor_; 39 | internal::SinkFactory* factory = new internal::SinkFactory( 40 | name, variadic_inputs, input_columns, per_element_output, 41 | entire_stream_output, builder.protobuf_name_, 42 | builder.stream_protobuf_name_, constructor); 43 | SinkRegistry* registry = get_sink_registry(); 44 | Result result = registry->add_sink(name, factory); 45 | if (!result.success()) { 46 | LOG(WARNING) << "Failed to register sink " << name << ": " << result.msg(); 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /scripts/travis-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Writing output (bell) keeps travis from timing out 4 | # https://github.com/travis-ci/travis-ci/issues/7961 5 | function bell() { 6 | while true; do 7 | echo -e "\a" 8 | sleep 60 9 | done 10 | } 11 | bell & 12 | 13 | set -e 14 | 15 | # The Travis VM isn't big enough to hold two Docker images of Scanner, 16 | # so we have to push and delete the CPU image before building the GPU one. 17 | 18 | if [[ "$TRAVIS_BRANCH" = "master" ]]; then 19 | TRAVIS_TAG="latest" 20 | fi 21 | 22 | if [[ ("$TRAVIS_BRANCH" = "master" || "$TRAVIS_BRANCH" = "$TRAVIS_TAG") && \ 23 | "$TRAVIS_PULL_REQUEST" = "false" ]]; then 24 | PUSH=0 25 | else 26 | PUSH=1 27 | fi 28 | 29 | build_docker() { 30 | # We add -local to make sure it doesn't run the remote image if the build fails. 31 | if [ "$1" = "cpu" ]; then 32 | docker build -t $DOCKER_REPO:$1-local . \ 33 | --build-arg gpu=OFF --build-arg tag=cpu --build-arg deps_opt='' \ 34 | -f docker/Dockerfile.scanner 35 | else 36 | # Parse gpu build type 37 | local TAG=$1 38 | docker build -t $DOCKER_REPO:$1-local . \ 39 | --build-arg gpu=ON \ 40 | --build-arg tag=$TAG \ 41 | --build-arg deps_opt='-g' \ 42 | -f docker/Dockerfile.scanner 43 | fi 44 | 45 | echo "$TRAVIS_BUILD_STAGE_NAME" 46 | if [ "$TRAVIS_BUILD_STAGE_NAME" = "Test build" ]; then 47 | docker tag $DOCKER_REPO:$1-local $DOCKER_TEST_REPO:$1-$TRAVIS_BUILD_NUMBER 48 | docker push $DOCKER_TEST_REPO:$1-$TRAVIS_BUILD_NUMBER 49 | docker rmi -f $DOCKER_TEST_REPO:$1-$TRAVIS_BUILD_NUMBER 50 | elif [ $PUSH -eq 0 ]; then 51 | docker tag $DOCKER_REPO:$1-local $DOCKER_REPO:$1-$TRAVIS_TAG 52 | docker push $DOCKER_REPO:$1-$TRAVIS_TAG 53 | docker rmi -f $DOCKER_REPO:$1-$TRAVIS_TAG 54 | fi 55 | } 56 | 57 | yes | docker login -u="$DOCKER_USER" -p="$DOCKER_PASS" 58 | build_docker $BUILD_TYPE 59 | 60 | exit $? 61 | -------------------------------------------------------------------------------- /python/scannerpy/partitioner.py: -------------------------------------------------------------------------------- 1 | from scannerpy.common import * 2 | from scannerpy.protobufs import protobufs 3 | 4 | DEFAULT_GROUP_SIZE = 250 5 | 6 | class TaskPartitioner: 7 | """ 8 | Utility for specifying how to partition the output domain of a job into 9 | tasks. 10 | """ 11 | 12 | def __init__(self, sc): 13 | self._sc = sc 14 | 15 | def all(self, group_size=DEFAULT_GROUP_SIZE): 16 | return self.strided(1, group_size=group_size) 17 | 18 | def strided(self, stride, group_size=DEFAULT_GROUP_SIZE): 19 | args = protobufs.StridedPartitionerArgs() 20 | args.stride = stride 21 | args.group_size = group_size 22 | sampling_args = protobufs.SamplingArgs() 23 | sampling_args.sampling_function = 'Strided' 24 | sampling_args.sampling_args = args.SerializeToString() 25 | return sampling_args 26 | 27 | def range(self, start, end): 28 | return self.ranges([(start, end)]) 29 | 30 | def ranges(self, intervals): 31 | return self.strided_ranges(intervals, 1) 32 | 33 | def gather(self, groups): 34 | args = protobufs.GatherSamplerArgs() 35 | for rows in groups: 36 | gather_group = args.groups_add() 37 | gather_group.rows[:] = rows 38 | sampling_args = protobufs.SamplingArgs() 39 | sampling_args.sampling_function = 'Gather' 40 | sampling_args.sampling_args = args.SerializeToString() 41 | return sampling_args 42 | 43 | def strided_range(self, start, end, stride): 44 | return self.strided_ranges([(start, end)], stride) 45 | 46 | def strided_ranges(self, intervals, stride): 47 | args = protobufs.StridedRangePartitionerArgs() 48 | args.stride = stride 49 | for start, end in intervals: 50 | args.starts.append(start) 51 | args.ends.append(end) 52 | sampling_args = protobufs.SamplingArgs() 53 | sampling_args.sampling_function = 'StridedRange' 54 | sampling_args.sampling_args = args.SerializeToString() 55 | return sampling_args 56 | -------------------------------------------------------------------------------- /scanner/video/video_decoder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/kernel.h" 19 | #include "scanner/engine/metadata.h" 20 | #include "scanner/util/common.h" 21 | #include "scanner/util/profiler.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | class InputFormat; 29 | 30 | enum class VideoDecoderType { 31 | NVIDIA, 32 | INTEL, 33 | SOFTWARE, 34 | }; 35 | 36 | /////////////////////////////////////////////////////////////////////////////// 37 | /// VideoDecoder 38 | class VideoDecoder { 39 | public: 40 | static std::vector get_supported_decoder_types(); 41 | 42 | static bool has_decoder_type(VideoDecoderType type); 43 | 44 | static VideoDecoder* make_from_config(DeviceHandle device_handle, 45 | i32 num_devices, VideoDecoderType type); 46 | 47 | virtual ~VideoDecoder(){}; 48 | 49 | virtual void configure(const FrameInfo& metadata) = 0; 50 | 51 | virtual bool feed(const u8* encoded_buffer, size_t encoded_size, 52 | bool discontinuity = false) = 0; 53 | 54 | virtual bool discard_frame() = 0; 55 | 56 | virtual bool get_frame(u8* decoded_buffer, size_t decoded_size) = 0; 57 | 58 | virtual int decoded_frames_buffered() = 0; 59 | 60 | virtual void wait_until_frames_copied() = 0; 61 | 62 | void set_profiler(Profiler* profiler); 63 | 64 | protected: 65 | Profiler* profiler_ = nullptr; 66 | }; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /examples/tutorials/03_sampling.py: -------------------------------------------------------------------------------- 1 | import scannerpy as sp 2 | import scannertools.imgproc 3 | 4 | import sys 5 | import os.path 6 | sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..') 7 | import util 8 | 9 | ################################################################################ 10 | # This tutorial shows how to select different frames of a video to process. # 11 | ################################################################################ 12 | 13 | def main(): 14 | sc = sp.Client() 15 | 16 | example_video_path = util.download_video() 17 | video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path) 18 | 19 | frames = sc.io.Input([video_stream]) 20 | 21 | # You can tell Scanner which frames of the video (or which rows of a video 22 | # table) you want to sample. Here, we indicate that we want to stride 23 | # the frame column by 4 (select every 4th frame) 24 | strided_frames = sc.streams.Stride(frames, [4]) 25 | 26 | # We process the sampled frame same as before. 27 | hists = sc.ops.Histogram(frame=strided_frames) 28 | 29 | hist_stream = sp.NamedVideoStream(sc, 'example_hist_strided') 30 | output = sc.io.Output(hists, [hist_stream]) 31 | 32 | sc.run(output, sp.PerfParams.estimate()) 33 | 34 | # Loop over the column's rows. Each row is a tuple of the frame number and 35 | # value for that row. 36 | video_hists = hist_stream.load() 37 | num_rows = 0 38 | for frame_hists in video_hists: 39 | assert len(frame_hists) == 3 40 | assert frame_hists[0].shape[0] == 16 41 | num_rows += 1 42 | assert num_rows == round(video_stream.len() / 4) 43 | 44 | video_stream.delete(sc) 45 | hist_stream.delete(sc) 46 | 47 | # Here's some examples of other sampling modes: 48 | 49 | # Range takes a specific subset of a video. Here, it runs over all frames 50 | # from 0 to 100 51 | sc.streams.Range(frames, [(0, 100)]) 52 | 53 | # Gather takes an arbitrary list of frames from a video. 54 | sc.streams.Gather(frames, [[10, 17, 32]]) 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /python/scannerpy/util.py: -------------------------------------------------------------------------------- 1 | from .config import mkdir_p 2 | import os 3 | import urllib.request, urllib.error, urllib.parse 4 | import errno 5 | import tarfile 6 | from contextlib import contextmanager 7 | import tempfile 8 | 9 | 10 | def temp_directory(): 11 | path = os.path.expanduser('~/.scanner/resources') 12 | mkdir_p(path) 13 | return path 14 | 15 | 16 | def download_temp_file(url, local_path=None, untar=False): 17 | if local_path is None: 18 | local_path = url.rsplit('/', 1)[-1] 19 | local_path = os.path.join(temp_directory(), local_path) 20 | mkdir_p(os.path.dirname(local_path)) 21 | if not os.path.isfile(local_path): 22 | print('Downloading {:s} to {:s}...'.format(url, local_path)) 23 | f = urllib.request.urlopen(url) 24 | with open(local_path, 'wb') as local_f: 25 | local_f.write(f.read()) 26 | 27 | if untar: 28 | with tarfile.open(local_path) as tar_f: 29 | tar_f.extractall(temp_directory()) 30 | if untar: 31 | return temp_directory() 32 | else: 33 | return local_path 34 | 35 | 36 | def default(d, k, v): 37 | if k not in d: 38 | return v() if callable(v) else v 39 | return d[k] 40 | 41 | 42 | @contextmanager 43 | def sample_video(delete=True): 44 | try: 45 | import requests 46 | except ImportError: 47 | print( 48 | 'You need to install requests to run this. Try running:\npip3 install requests' 49 | ) 50 | exit() 51 | 52 | url = "https://storage.googleapis.com/scanner-data/public/sample-clip.mp4" 53 | 54 | if delete: 55 | f = tempfile.NamedTemporaryFile(suffix='.mp4') 56 | else: 57 | sample_path = '/tmp/sample_video.mp4' 58 | if os.path.isfile(sample_path): 59 | yield sample_path 60 | return 61 | 62 | f = open(sample_path, 'wb') 63 | 64 | with f as f: 65 | resp = requests.get(url, stream=True) 66 | assert resp.ok 67 | for block in resp.iter_content(1024): 68 | f.write(block) 69 | f.flush() 70 | yield f.name 71 | -------------------------------------------------------------------------------- /scanner/util/fs.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | namespace scanner { 28 | 29 | /////////////////////////////////////////////////////////////////////////////// 30 | /// Path utils 31 | inline std::string dirname_s(const std::string& path) { 32 | char* path_copy = strdup(path.c_str()); 33 | char* dir = dirname(path_copy); 34 | return std::string(dir); 35 | } 36 | 37 | inline std::string basename_s(const std::string& path) { 38 | char* path_copy = strdup(path.c_str()); 39 | char* base = basename(path_copy); 40 | return std::string(base); 41 | } 42 | 43 | int mkdir_p(const char* path, mode_t mode); 44 | 45 | void temp_file(FILE** file, std::string& name); 46 | 47 | void temp_file(std::string& name); 48 | 49 | void temp_dir(std::string& name); 50 | 51 | std::string done_file_path(const std::string& path); 52 | 53 | void download(const std::string& url, const std::string& local_path); 54 | 55 | std::string download_temp(const std::string& url); 56 | 57 | void delete_file(const std::string& path); 58 | 59 | std::vector read_entire_file(const std::string& file_name); 60 | 61 | // Cached files 62 | void cached_dir(const std::string& name, std::string& full_path); 63 | 64 | void download_if_uncached(const std::string& url, 65 | const std::string& cache_path); 66 | 67 | 68 | } 69 | -------------------------------------------------------------------------------- /examples/tutorials/README.md: -------------------------------------------------------------------------------- 1 | The following tutorials progressively walk through the features of Scanner 2 | using commented example code. For each tutorial, you should run the code first 3 | to see the output. For example, for the first tutorial: 4 | 5 | ```bash 6 | python3 00_basic.py 7 | ``` 8 | 9 | ## Summary 10 | 11 | 1. [00_basic.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/00_basic.py): Basic rundown of a full Scanner app 12 | 2. [01_defining_python_ops.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/01_defining_python_ops.py): How to define new Ops in Python 13 | 3. [02_op_attributes.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/02_op_attributes.py): Describes the different properties of Ops (such as batching, stencling, state) 14 | 4. [03_sampling.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/03_sampling.py): Introduces sampling operations for sparsely processing frames 15 | 5. [04_slicing.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/04_slicing.py): Introduces slicing operations for limiting temporal dependencies 16 | 6. [05_sources_sinks.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/05_sources_sinks.py): Introduces Sources and Sinks for reading from locations other than the database 17 | 7. [06_compression.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/06_compression.py): How to control the compression of output video streams and export them to mp4 files 18 | 8. [07_profiling.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/07_profiling.py): Introduces the profiling infrastructure 19 | 9. [08_defining_cpp_ops.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/08_defining_cpp_ops.py): How to define new Ops in C++ 20 | 10. [09_defining_cpp_sources.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/09_defining_cpp_sources.py): How to define new Sources in C++ 21 | 11. [10_defining_cpp_sinks.py](https://github.com/scanner-research/scanner/blob/master/examples/tutorials/10_defining_cpp_sinks.py): How to define new Sinks in C++ 22 | -------------------------------------------------------------------------------- /docker/build-all-base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | NO_CACHE=false 5 | CORES=$(nproc) 6 | 7 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 8 | 9 | for dir in $DIR/*/ 10 | do 11 | base=`basename ${dir%*/}` 12 | 13 | cp $DIR/../deps.sh $dir/deps.sh 14 | cp $DIR/../deps_openvino.sh $dir/deps_openvino.sh 15 | 16 | rm -rf $dir/thirdparty 17 | mkdir -p $dir/thirdparty 18 | cp -r $DIR/../thirdparty/resources $dir/thirdparty/ 19 | 20 | function build { 21 | local TYPE=$1 22 | local TAG=$2 23 | local BASE_TAG=$3 24 | 25 | docker build \ 26 | --pull \ 27 | --build-arg cores=$CORES \ 28 | --build-arg base_tag=$BASE_TAG \ 29 | --no-cache=$NO_CACHE \ 30 | -t scannerresearch/scanner-base:$TAG \ 31 | -f $dir/Dockerfile.$TYPE \ 32 | $dir 2>&1 > ${TAG}-output.log \ 33 | && rm ${TAG}-output.log \ 34 | || { echo "Building $TAG failed! Check $TAG-output.log."; } 35 | } 36 | 37 | function build_chain { 38 | local TYPE=$1 39 | local TAG=$2 40 | local BASE_TAG=$3 41 | 42 | build base "$TAG-base" $BASE_TAG && \ 43 | push "$TAG-base" && \ 44 | build $TYPE $TAG "scannerresearch/scanner-base:$TAG-base" 45 | } 46 | 47 | function push { 48 | docker push scannerresearch/scanner-base:$1 49 | } 50 | 51 | function build_push_gpu { 52 | local CUDA_MAJOR_VERSION=$1 53 | local CUDA_VERSION=$2 54 | local CUDNN_VERSION=$3 55 | local BASE_TAG=nvidia/cuda:${CUDA_VERSION}-${CUDNN_VERSION}-devel-ubuntu16.04 56 | local TAG=$base-gpu-$CUDA_VERSION-$CUDNN_VERSION 57 | 58 | build_chain gpu${CUDA_MAJOR_VERSION} $TAG $BASE_TAG && \ 59 | push $TAG 60 | } 61 | 62 | 63 | base_tag=scannerresearch/scanner-base:$base 64 | 65 | # Build cpu with ubuntu:16.04 66 | build_chain cpu $base-cpu ubuntu:16.04 & 67 | 68 | # GPU 69 | build_push_gpu 9 9.0 cudnn7 & 70 | build_push_gpu 9 9.1 cudnn7 & 71 | build_push_gpu 10 10.0 cudnn7 & 72 | build_push_gpu 10 10.1 cudnn7 & 73 | 74 | wait $(jobs -p) 75 | 76 | push $base-cpu 77 | done 78 | -------------------------------------------------------------------------------- /scanner/video/video_encoder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/kernel.h" 19 | #include "scanner/engine/metadata.h" 20 | #include "scanner/util/common.h" 21 | #include "scanner/util/profiler.h" 22 | 23 | #include 24 | 25 | namespace scanner { 26 | namespace internal { 27 | 28 | enum class VideoEncoderType { 29 | NVIDIA, 30 | INTEL, 31 | SOFTWARE, 32 | }; 33 | 34 | struct EncodeOptions { 35 | i32 quality = -1; 36 | i64 bitrate = -1; 37 | i64 keyframe_distance = -1; 38 | }; 39 | 40 | /////////////////////////////////////////////////////////////////////////////// 41 | /// VideoEncoder 42 | class VideoEncoder { 43 | public: 44 | static std::vector get_supported_encoder_types(); 45 | 46 | static bool has_encoder_type(VideoEncoderType type); 47 | 48 | static VideoEncoder* make_from_config(DeviceHandle device_handle, 49 | i32 num_devices, VideoEncoderType type); 50 | 51 | virtual ~VideoEncoder(){}; 52 | 53 | virtual void configure(const FrameInfo& metadata, 54 | const EncodeOptions& opts) = 0; 55 | 56 | virtual bool feed(const u8* frame_buffer, size_t frame_size) = 0; 57 | 58 | virtual bool flush() = 0; 59 | 60 | virtual bool get_packet(u8* decoded_buffer, size_t decoded_size, 61 | size_t& actual_packet_size) = 0; 62 | 63 | virtual int decoded_packets_buffered() = 0; 64 | 65 | virtual void wait_until_packets_copied() = 0; 66 | 67 | void set_profiler(Profiler* profiler); 68 | 69 | protected: 70 | Profiler* profiler_ = nullptr; 71 | }; 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /scanner/video/intel/intel_video_decoder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/eval/evaluator.h" 19 | #include "scanner/video/video_decoder.h" 20 | 21 | extern "C" { 22 | #include "libavcodec/avcodec.h" 23 | #include "libavfilter/avfilter.h" 24 | #include "libavformat/avformat.h" 25 | #include "libavformat/avio.h" 26 | #include "libavutil/error.h" 27 | #include "libavutil/opt.h" 28 | #include "libavutil/pixdesc.h" 29 | #include "libswscale/swscale.h" 30 | } 31 | 32 | #include 33 | #include 34 | 35 | namespace scanner { 36 | 37 | /////////////////////////////////////////////////////////////////////////////// 38 | /// IntelVideoDecoder 39 | class IntelVideoDecoder : public VideoDecoder { 40 | public: 41 | IntelVideoDecoder(int device_id, DeviceType output_type); 42 | 43 | ~IntelVideoDecoder(); 44 | 45 | void configure(const InputFormat& metadata) override; 46 | 47 | bool feed(const u8* encoded_buffer, size_t encoded_size, 48 | bool discontinuity = false) override; 49 | 50 | bool discard_frame() override; 51 | 52 | bool get_frame(u8* decoded_buffer, size_t decoded_size) override; 53 | 54 | int decoded_frames_buffered() override; 55 | 56 | void wait_until_frames_copied() override; 57 | 58 | private: 59 | int device_id_; 60 | DeviceType output_type_; 61 | AVPacket packet_; 62 | AVCodec* codec_; 63 | AVCodecContext* cc_; 64 | 65 | InputFormat metadata_; 66 | std::vector conversion_buffer_; 67 | bool reset_context_; 68 | SwsContext* sws_context_; 69 | 70 | std::vector frame_pool_; 71 | std::deque decoded_frame_queue_; 72 | }; 73 | } 74 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/build_and_deploy.sh: -------------------------------------------------------------------------------- 1 | cd ~/capture 2 | 3 | ### 1. Check if container repo exists 4 | aws ecr describe-repositories --repository-names scanner 5 | REG_EXISTS=$? 6 | if [ $REG_EXISTS -ne 0 ]; then 7 | # Create container repo 8 | aws ecr create-repository --repository-name scanner 9 | fi 10 | 11 | # Get container repo URI 12 | REPO_URI=$(aws ecr describe-repositories --repository-names scanner | jq -r '.repositories[0].repositoryUri') 13 | echo $REPO_URI 14 | 15 | ### 2. Build master and worker docker images 16 | docker pull scannerresearch/scanner:cpu-latest 17 | 18 | docker build -t $REPO_URI:scanner-master . \ 19 | -f Dockerfile.master 20 | 21 | docker build -t $REPO_URI:scanner-worker . \ 22 | -f Dockerfile.worker 23 | 24 | aws configure set default.region us-west-2 25 | 26 | # Provides an auth token to enable pushing to container repo 27 | LOGIN_CMD=$(aws ecr get-login --no-include-email) 28 | eval $LOGIN_CMD 29 | 30 | # Push master and worker images 31 | docker push $REPO_URI:scanner-master 32 | docker push $REPO_URI:scanner-worker 33 | 34 | ### 2. Deploy master and worker services 35 | 36 | # Create secret for sharing AWS credentials with instances 37 | kubectl create secret generic aws-storage-key \ 38 | --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ 39 | --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY 40 | 41 | # Replace REPO_NAME with the location of the docker image 42 | sed "s||$REPO_URI:scanner-master|g" master.yml.template > master.yml 43 | sed "s||$REPO_URI:scanner-worker|g" worker.yml.template > worker.yml 44 | 45 | # Record existing replicas for worker so we can scale the service after deleting 46 | REPLICAS=$(kubectl get deployments scanner-worker -o json | jq '.spec.replicas' -r) 47 | 48 | # Delete and then redeploy the master and worker services 49 | kubectl delete deploy --all 50 | kubectl create -f master.yml 51 | kubectl create -f worker.yml 52 | 53 | # If there was an existing service, scale the new one back up to the same size 54 | if [[ "$REPLICAS" ]]; then 55 | kubectl scale deployment/scanner-worker --replicas=$REPLICAS 56 | fi 57 | 58 | ### 3. Expose the master port for the workers to connect to 59 | kubectl expose -f master.yml --type=LoadBalancer --target-port=8080 60 | -------------------------------------------------------------------------------- /examples/apps/aws_kubernetes/scale_eks_workers.sh: -------------------------------------------------------------------------------- 1 | programname=$0 2 | 3 | function usage { 4 | echo "usage: $programname name nodes" 5 | echo " name name to give the cluster" 6 | echo " nodes number of machines to scale to" 7 | exit 1 8 | } 9 | 10 | if [ $# != 2 ]; then 11 | usage 12 | fi 13 | 14 | NAME=$1 15 | NODES=$(($2 + 1)) 16 | 17 | CLUSTER_NAME=$NAME 18 | 19 | # 1. Get VPC info 20 | VPC_STACK_NAME=eks-vpc 21 | 22 | # Get VPC ID 23 | VPC_ID=$(aws cloudformation describe-stacks --stack-name $VPC_STACK_NAME \ 24 | | jq -r '.Stacks[0].Outputs[] | select(.OutputKey=="VpcId") | .OutputValue') 25 | 26 | # Get security group ids 27 | SECURITY_GROUP_IDS=$(aws cloudformation describe-stacks --stack-name $VPC_STACK_NAME \ 28 | | jq -r '.Stacks[0].Outputs[] | select(.OutputKey=="SecurityGroups") | .OutputValue') 29 | 30 | # Get subnet outputs 31 | SUBNET_IDS=$(aws cloudformation describe-stacks --stack-name $VPC_STACK_NAME \ 32 | | jq -r '.Stacks[0].Outputs[] | select(.OutputKey=="SubnetIds") | .OutputValue') 33 | 34 | # 2. Change the autoscaling group to spawn more nodes 35 | aws cloudformation update-stack --stack-name $CLUSTER_NAME-workers \ 36 | --use-previous-template \ 37 | --capabilities CAPABILITY_IAM \ 38 | --parameters \ 39 | ParameterKey=ClusterName,ParameterValue=$CLUSTER_NAME \ 40 | ParameterKey=ClusterControlPlaneSecurityGroup,ParameterValue=$SECURITY_GROUP_IDS \ 41 | ParameterKey=NodeGroupName,ParameterValue=$CLUSTER_NAME-workers-node-group \ 42 | ParameterKey=NodeAutoScalingGroupMinSize,ParameterValue=1 \ 43 | ParameterKey=NodeAutoScalingGroupMaxSize,ParameterValue=$NODES \ 44 | ParameterKey=NodeInstanceType,ParameterValue=c4.8xlarge \ 45 | ParameterKey=NodeImageId,ParameterValue=ami-73a6e20b \ 46 | ParameterKey=KeyName,ParameterValue=devenv-key \ 47 | ParameterKey=VpcId,ParameterValue=$VPC_ID \ 48 | ParameterKey=Subnets,ParameterValue=\"$SUBNET_IDS\" 49 | 50 | echo "Waiting for EKS worker node group to be updated... (may take a while)" 51 | aws cloudformation wait stack-update-complete --stack-name $CLUSTER_NAME-workers 52 | echo "EKS worker node group updated." 53 | 54 | # 2. Tell kubernetes to start up more pods 55 | kubectl scale deployment/scanner-worker --replicas=$(($NODES - 1)) 56 | 57 | -------------------------------------------------------------------------------- /examples/apps/pose_detection/main.py: -------------------------------------------------------------------------------- 1 | import scannerpy 2 | from scannerpy import Database, DeviceType, Job, FrameType 3 | from scannerpy.stdlib import NetDescriptor, readers 4 | import math 5 | import os 6 | import subprocess 7 | import cv2 8 | import sys 9 | import os.path 10 | sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../..') 11 | import util 12 | 13 | from typing import Tuple 14 | 15 | @scannerpy.register_python_op(name='PoseDraw') 16 | def pose_draw(self, frame: FrameType, frame_poses: bytes) -> FrameType: 17 | for pose in readers.poses(frame_poses, self.protobufs): 18 | pose.draw(frame) 19 | return frame 20 | 21 | if len(sys.argv) <= 1: 22 | print('Usage: main.py ') 23 | exit(1) 24 | 25 | movie_path = sys.argv[1] 26 | print('Detecting poses in video {}'.format(movie_path)) 27 | movie_name = os.path.splitext(os.path.basename(movie_path))[0] 28 | 29 | db = Database() 30 | video_path = movie_path 31 | if not db.has_table(movie_name): 32 | print('Ingesting video into Scanner ...') 33 | db.ingest_videos([(movie_name, video_path)], force=True) 34 | input_table = db.table(movie_name) 35 | 36 | sampler = db.streams.All 37 | sampler_args = {} 38 | 39 | if db.has_gpu(): 40 | print('Using GPUs') 41 | device = DeviceType.GPU 42 | pipeline_instances = -1 43 | scales = 3 44 | else: 45 | print('Using CPUs') 46 | device = DeviceType.CPU 47 | pipeline_instances = 1 48 | scales = 1 49 | 50 | frame = db.sources.FrameColumn() 51 | poses_out = db.ops.OpenPose( 52 | frame=frame, 53 | pose_num_scales=scales, 54 | pose_scale_gap=0.33, 55 | device=device) 56 | drawn_frame = db.ops.PoseDraw(frame=frame, frame_poses=poses_out) 57 | sampled_frames = sampler(drawn_frame) 58 | output = db.sinks.Column(columns={'frame': sampled_frames}) 59 | job = Job( 60 | op_args={ 61 | frame: input_table.column('frame'), 62 | sampled_frames: sampler_args, 63 | output: movie_name + '_drawn_poses', 64 | }) 65 | [drawn_poses_table] = db.run(output=output, jobs=[job], work_packet_size=8, io_packet_size=64, 66 | pipeline_instances_per_node=pipeline_instances, 67 | force=True) 68 | 69 | print('Writing output video...') 70 | drawn_poses_table.column('frame').save_mp4('{:s}_poses'.format(movie_name)) 71 | -------------------------------------------------------------------------------- /scripts/travis-publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Exit if this is not the master branch 6 | if ! [ "$TRAVIS_BRANCH" = "master" -a "$TRAVIS_PULL_REQUEST" = "false" -a "$BUILD_TYPE" = "cpu" ]; then 7 | exit 0 8 | fi 9 | 10 | # Commit docs 11 | REPO_PATH=git@github.com:scanner-research/scanner.git 12 | HTML_PATH=build/docs/html 13 | COMMIT_USER="Documentation Builder" 14 | COMMIT_EMAIL="wcrichto@cs.stanford.edu" 15 | CHANGESET=$(git rev-parse --verify HEAD) 16 | 17 | # Install python package for autodoc 18 | docker run $DOCKER_REPO:cpu-local /bin/bash -c " 19 | cd /opt/scanner 20 | git config --global user.name \"${COMMIT_USER}\" 21 | git config --global user.email \"${COMMIT_EMAIL}\" 22 | 23 | # Unencrypt ssh key 24 | mkdir -p ~/.ssh/ 25 | chmod 0700 ~/.ssh/ 26 | 27 | openssl aes-256-cbc -K $encrypted_519f11e8a6d4_key -iv $encrypted_519f11e8a6d4_iv -in .travis/travisci_rsa.enc -out .travis/travisci_rsa -d 28 | chmod 0600 .travis/travisci_rsa 29 | cp .travis/travisci_rsa ~/.ssh/id_rsa 30 | cp .travis/travisci_rsa.pub ~/.ssh/id_rsa.pub 31 | chmod 0744 ~/.ssh/id_rsa.pub 32 | ls -lah .travis 33 | ls -lah ~/.ssh/ 34 | 35 | eval \`ssh-agent -s\` 36 | ssh-add 37 | rm -fr ~/.ssh/known_hosts 38 | ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 39 | 40 | pip3 install twine 41 | pip3 install Sphinx sphinx-autodoc-typehints 42 | apt-get update && apt-get install -y doxygen 43 | 44 | rm -rf ${HTML_PATH} 45 | mkdir -p ${HTML_PATH} 46 | git clone -b gh-pages ${REPO_PATH} --single-branch ${HTML_PATH} 47 | 48 | cd ${HTML_PATH} 49 | cp CNAME /tmp 50 | git rm -rf . 51 | cd - 52 | 53 | cd docs 54 | make html 55 | cd - 56 | 57 | cd build 58 | cmake -D BUILD_DOCS=ON .. 59 | make doxygen 60 | cd - 61 | cp -r build/doxygen/html ${HTML_PATH}/cpp 62 | 63 | cd ${HTML_PATH} 64 | cp /tmp/CNAME . 65 | git add . 66 | git commit -m \"Automated documentation build for changeset ${CHANGESET}.\" 67 | git push origin gh-pages 68 | cd - 69 | " 70 | 71 | # Tell Scannertools to rebuild 72 | request_body='{ 73 | "request": { 74 | "branch":"master" 75 | }}' 76 | 77 | curl -s -X POST \ 78 | -H "Content-Type: application/json" \ 79 | -H "Accept: application/json" \ 80 | -H "Travis-API-Version: 3" \ 81 | -H "Authorization: token $TRAVIS_TOKEN" \ 82 | -d "$request_body" \ 83 | "https://api.travis-ci.org/repo/scanner-research%2Fscannertools/requests" 84 | -------------------------------------------------------------------------------- /google.md: -------------------------------------------------------------------------------- 1 | # Getting started with Google Cloud 2 | 3 | This guide will walk you through setting up Scanner on Google Cloud. You will need to have a Google account. 4 | 5 | ## 1. Install the Cloud SDK 6 | 7 | On your local machine (laptop/desktop), follow the instructions here to install Google's Cloud SDK: [https://cloud.google.com/sdk/downloads](https://cloud.google.com/sdk/downloads) 8 | 9 | ## 2. Create a project 10 | 11 | If you do not already have a project created, pick a project ID for your application, e.g. `my-scanner-project`. Then run: 12 | ```bash 13 | gcloud projects create 14 | ``` 15 | 16 | ## 3. Make a bucket 17 | 18 | You will need to store your videos in Google Cloud Storage. Cloud Storage is organized into independent buckets (like top-level directories). Pick a name for your bucket, e.g. `scanner-data`, and run: 19 | ```bash 20 | gsutil mb gs://scanner-data 21 | ``` 22 | 23 | ## 4. Enable S3 interoperability 24 | 25 | We use an S3 API to access GCS (for good reasons), so you need to explicitly enable this feature. Go here: [https://console.cloud.google.com/storage/settings](https://console.cloud.google.com/storage/settings) 26 | 27 | Click *Enable interoperability access* and then click *Create a new key*. Into your local shell, run: 28 | ```bash 29 | export AWS_ACCESS_KEY_ID= 30 | export AWS_SECRET_ACCESS_KEY= 31 | ``` 32 | 33 | I would recommend putting these in your shell's `.*rc` file as well. 34 | 35 | ## 5. Set up your Scanner config 36 | 37 | Change the storage heading in your `~/.scanner.toml` to use GCS: 38 | ```toml 39 | [storage] 40 | type = "gcs" 41 | bucket = "" 42 | db_path = "scanner_db" 43 | ``` 44 | 45 | ## 6. Upload your videos into your bucket 46 | 47 | You can copy videos onto GCS like this: 48 | ```bash 49 | gsutil cp example.mp4 gs://scanner-data/videos/ 50 | ``` 51 | 52 | ## 7. You're done! 53 | 54 | Now, whenever you want to specify an ingest path, it does not need a leading slash and should not include the bucket name. For example, with the config above, the following is a valid ingest path: 55 | ``` 56 | videos/example.mp4 57 | ``` 58 | 59 | If you want to use Google Cloud to scale computation instead of just storage, take a look at our Kubernetes adapter: [https://github.com/scanner-research/scanner-kube](https://github.com/scanner-research/scanner-kube) 60 | -------------------------------------------------------------------------------- /docs/overview_h.html: -------------------------------------------------------------------------------- 1 |
2 | 10 |
11 | 12 |
13 |
14 |
15 |

16 | Videos as Streams 17 |

18 |

19 | Videos are first-class citizens in Scanner and are represented as streams of frames, even if the video is stored in a compressed format like h.264. Scanner supports arbitrary sparse gather operations from these streams so your application only needs to read (and possibly decode) exeactly the frames it needs. 20 |

21 |
22 |
23 |
24 | 25 |
26 |
27 |
28 |

29 | Video processing as Streams and Ops 30 |

31 |

32 | Applications are expressed in Scanner as dataflow graphs. Dataflow graphs are composed of streams, which may be videos or other sequences of data, and ops, stateful functions which process elements from streams. 33 |

34 |
35 |
36 |
37 | 38 |
39 |
40 |
41 |

42 | Parallelizing within a single machine 43 |

44 |

45 | Given a dataflow graph, Scanner manages all the details of executing your application on machines with both CPUs and GPUs. This includes reading the data efficiently from local or networked storage, decoding video frames if necessary, scheduling ops to both CPUs and GPUs, handling data movement between ops, and writing data out to an external storage system. 46 |

47 |
48 |
49 |
50 | 51 |
52 |
53 |
54 |

55 | Scaling to hundreds of machines in the cloud 56 |

57 |

58 | Scanner's dataflow graphs are designed to be split up and distributed across machines. 59 |

60 |
61 |
62 |
63 | -------------------------------------------------------------------------------- /scanner/engine/column_sink.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/api/sink.h" 17 | 18 | #include "storehouse/storage_backend.h" 19 | #include "scanner/engine/video_index_entry.h" 20 | #include "scanner/engine/table_meta_cache.h" 21 | #include "scanner/util/thread_pool.h" 22 | 23 | #include 24 | #include 25 | 26 | namespace scanner { 27 | namespace internal { 28 | 29 | class ColumnSink : public Sink { 30 | public: 31 | ColumnSink(const SinkConfig& config); 32 | 33 | ~ColumnSink(); 34 | 35 | void new_stream(const std::vector& args) override; 36 | 37 | void write(const BatchedElements& input_columns) override; 38 | 39 | void new_task(i32 table_id, i32 task_id, 40 | std::vector column_types); 41 | 42 | void finished(); 43 | 44 | void provide_column_info( 45 | const std::vector& compressed, 46 | const std::vector& frame_info); 47 | 48 | private: 49 | Result valid_; 50 | ThreadPool thread_pool_; 51 | // Setup a distinct storage backend for each IO thread 52 | std::unique_ptr storage_; 53 | // Files to write io packets to 54 | std::vector> output_; 55 | std::vector> output_metadata_; 56 | std::vector video_metadata_; 57 | 58 | std::vector column_types_; 59 | std::vector compressed_; 60 | std::vector frame_info_; 61 | 62 | // Continuation state 63 | bool first_item_; 64 | bool needs_configure_; 65 | bool needs_reset_; 66 | 67 | i64 current_work_item_; 68 | i64 current_row_; 69 | i64 total_work_items_; 70 | }; 71 | 72 | } 73 | } // namespace scanner 74 | -------------------------------------------------------------------------------- /scanner/engine/load_worker.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/engine/runtime.h" 19 | #include "scanner/engine/source_factory.h" 20 | #include "scanner/engine/table_meta_cache.h" 21 | #include "scanner/util/common.h" 22 | #include "scanner/util/queue.h" 23 | #include "scanner/util/thread_pool.h" 24 | #include "scanner/api/source.h" 25 | #include "scanner/api/enumerator.h" 26 | 27 | namespace scanner { 28 | namespace internal { 29 | 30 | struct LoadWorkerArgs { 31 | // Uniform arguments 32 | i32 node_id; 33 | TableMetaCache& table_meta; 34 | // Per worker arguments 35 | int worker_id; 36 | storehouse::StorageConfig* storage_config; 37 | Profiler& profiler; 38 | proto::Result& result; 39 | i32 io_packet_size; 40 | i32 work_packet_size; 41 | std::vector source_factories; 42 | std::vector source_configs; 43 | }; 44 | 45 | class LoadWorker { 46 | public: 47 | LoadWorker(const LoadWorkerArgs& args); 48 | 49 | void feed(LoadWorkEntry& input_entry); 50 | 51 | bool yield(i32 item_size, EvalWorkEntry& output_entry); 52 | 53 | bool done(); 54 | 55 | private: 56 | const i32 node_id_; 57 | const i32 worker_id_; 58 | Profiler& profiler_; 59 | i32 io_packet_size_; 60 | i32 work_packet_size_; 61 | i32 num_columns_; 62 | std::vector source_configs_; 63 | std::vector> 64 | sources_; // Provides the implementation for reading 65 | // data under the specified data sources 66 | std::vector source_names_; 67 | ThreadPool thread_pool_; 68 | 69 | // Continuation state 70 | bool first_item_; 71 | bool needs_configure_; 72 | bool needs_reset_; 73 | LoadWorkEntry entry_; 74 | i64 current_row_; 75 | i64 total_rows_; 76 | }; 77 | 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /examples/apps/detectron/main.py: -------------------------------------------------------------------------------- 1 | from scannerpy import Database, Job, FrameType, DeviceType 2 | import os 3 | import sys 4 | import math 5 | import argparse 6 | from tqdm import tqdm 7 | 8 | import scannerpy 9 | import detectron_kernels 10 | from typing import Tuple, Sequence 11 | 12 | 13 | if __name__ == '__main__': 14 | p = argparse.ArgumentParser() 15 | p.add_argument( 16 | '--weights-path', 17 | type=str, 18 | required=True, 19 | help=('Path to the detectron model weights file. ' 20 | 'Can be a URL, in which case it will be cached after ' 21 | 'downloading.')) 22 | p.add_argument( 23 | '--config-path', 24 | type=str, 25 | required=True, 26 | help=('Path to the detectron model config yaml file.')) 27 | p.add_argument( 28 | '--video-path', 29 | type=str, 30 | required=True, 31 | help=('Path to video to process.')) 32 | 33 | args = p.parse_args() 34 | 35 | weights_path = args.weights_path 36 | config_path = args.config_path 37 | movie_path = args.video_path 38 | 39 | print('Detecting objects in movie {}'.format(movie_path)) 40 | movie_name = os.path.splitext(os.path.basename(movie_path))[0] 41 | 42 | db = Database() 43 | [input_table], failed = db.ingest_videos( 44 | [('example', movie_path)], force=True) 45 | 46 | frame = db.sources.FrameColumn() 47 | strided_frame = db.streams.Range(frame, 0, 60) 48 | 49 | # Call the newly created object detect op 50 | cls_boxes, cls_segms, cls_keyps = db.ops.Detectron( 51 | frame=strided_frame, 52 | config_path=config_path, 53 | weights_path=weights_path, 54 | device=DeviceType.GPU) 55 | 56 | objdet_frame = db.ops.DetectronVizualize( 57 | frame=strided_frame, 58 | cls_boxes=cls_boxes, 59 | cls_segms=cls_segms, 60 | cls_keyps=cls_keyps) 61 | 62 | output_op = db.sinks.Column(columns={'frame': objdet_frame}) 63 | job = Job( 64 | op_args={ 65 | frame: db.table('example').column('frame'), 66 | output_op: 'example_obj_detect', 67 | }) 68 | [out_table] = db.run( 69 | output=output_op, 70 | jobs=[job], 71 | force=True, 72 | pipeline_instances_per_node=1) 73 | 74 | out_table.column('frame').save_mp4('{:s}_detected'.format(movie_name)) 75 | print('Successfully generated {:s}_detected.mp4'.format(movie_name)) 76 | -------------------------------------------------------------------------------- /scanner/engine/sink_registry.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/engine/sink_registry.h" 17 | 18 | namespace scanner { 19 | namespace internal { 20 | 21 | Result SinkRegistry::add_sink(const std::string& name, 22 | SinkFactory* factory) { 23 | Result result; 24 | result.set_success(true); 25 | if (factories_.count(name) > 0) { 26 | RESULT_ERROR(&result, "Attempted to re-register Sink %s", name.c_str()); 27 | return result; 28 | } 29 | if (factory->input_columns().empty() && !factory->variadic_inputs()) { 30 | RESULT_ERROR(&result, 31 | "Attempted to register Sink %s with empty input columns", 32 | name.c_str()); 33 | return result; 34 | } 35 | 36 | if (factory->per_element_output() && factory->entire_stream_output()) { 37 | RESULT_ERROR(&result, 38 | "Attempted to register Sink %s with both per-element and " 39 | "entire stream output. Specify only one.", 40 | name.c_str()); 41 | return result; 42 | } 43 | 44 | if (!factory->per_element_output() && !factory->entire_stream_output()) { 45 | RESULT_ERROR(&result, 46 | "Attempted to register Sink %s with neither per-element or " 47 | "entire stream output. One must be specified.", 48 | name.c_str()); 49 | return result; 50 | } 51 | 52 | factories_.insert({name, factory}); 53 | 54 | return result; 55 | } 56 | 57 | bool SinkRegistry::has_sink(const std::string& name) { 58 | return factories_.count(name) > 0; 59 | } 60 | 61 | SinkFactory* SinkRegistry::get_sink(const std::string& name) { 62 | return factories_.at(name); 63 | } 64 | 65 | SinkRegistry* get_sink_registry() { 66 | static SinkRegistry* registry = new SinkRegistry; 67 | return registry; 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /scanner/video/software/software_video_decoder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/kernel.h" 19 | #include "scanner/util/queue.h" 20 | #include "scanner/video/video_decoder.h" 21 | 22 | extern "C" { 23 | #include "libavcodec/avcodec.h" 24 | #include "libavfilter/avfilter.h" 25 | #include "libavformat/avformat.h" 26 | #include "libavformat/avio.h" 27 | #include "libavutil/error.h" 28 | #include "libavutil/opt.h" 29 | #include "libavutil/pixdesc.h" 30 | #include "libswscale/swscale.h" 31 | } 32 | 33 | #include 34 | #include 35 | #include 36 | 37 | namespace scanner { 38 | namespace internal { 39 | 40 | /////////////////////////////////////////////////////////////////////////////// 41 | /// SoftwareVideoDecoder 42 | class SoftwareVideoDecoder : public VideoDecoder { 43 | public: 44 | SoftwareVideoDecoder(i32 device_id, DeviceType output_type, i32 thread_count); 45 | 46 | ~SoftwareVideoDecoder(); 47 | 48 | void configure(const FrameInfo& metadata) override; 49 | 50 | bool feed(const u8* encoded_buffer, size_t encoded_size, 51 | bool discontinuity = false) override; 52 | 53 | bool discard_frame() override; 54 | 55 | bool get_frame(u8* decoded_buffer, size_t decoded_size) override; 56 | 57 | int decoded_frames_buffered() override; 58 | 59 | void wait_until_frames_copied() override; 60 | 61 | private: 62 | void feed_packet(bool flush); 63 | 64 | int device_id_; 65 | DeviceType output_type_; 66 | AVPacket packet_; 67 | AVCodec* codec_; 68 | AVCodecContext* cc_; 69 | 70 | FrameInfo metadata_; 71 | i32 frame_width_; 72 | i32 frame_height_; 73 | std::vector conversion_buffer_; 74 | bool reset_context_; 75 | SwsContext* sws_context_; 76 | 77 | Queue frame_pool_; 78 | Queue decoded_frame_queue_; 79 | }; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /scanner/video/software/software_video_encoder.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/api/kernel.h" 19 | #include "scanner/util/queue.h" 20 | #include "scanner/video/video_encoder.h" 21 | 22 | extern "C" { 23 | #include "libavcodec/avcodec.h" 24 | #include "libavfilter/avfilter.h" 25 | #include "libavformat/avformat.h" 26 | #include "libavformat/avio.h" 27 | #include "libavutil/error.h" 28 | #include "libavutil/opt.h" 29 | #include "libavutil/pixdesc.h" 30 | #include "libswscale/swscale.h" 31 | } 32 | 33 | #include 34 | #include 35 | #include 36 | 37 | namespace scanner { 38 | namespace internal { 39 | 40 | /////////////////////////////////////////////////////////////////////////////// 41 | /// SoftwareVideoEncoder 42 | class SoftwareVideoEncoder : public VideoEncoder { 43 | public: 44 | SoftwareVideoEncoder(i32 device_id, DeviceType output_type, i32 thread_count); 45 | 46 | ~SoftwareVideoEncoder(); 47 | 48 | void configure(const FrameInfo& metadata, const EncodeOptions& opts) override; 49 | 50 | bool feed(const u8* frame_buffer, size_t frame_size) override; 51 | 52 | bool flush() override; 53 | 54 | bool get_packet(u8* packet_buffer, size_t packet_size, 55 | size_t& actual_packet_size) override; 56 | 57 | int decoded_packets_buffered() override; 58 | 59 | void wait_until_packets_copied() override; 60 | 61 | private: 62 | void feed_frame(bool flush); 63 | 64 | int device_id_; 65 | DeviceType output_type_; 66 | AVCodec* codec_; 67 | AVCodecContext* cc_; 68 | AVBitStreamFilterContext* annexb_; 69 | 70 | FrameInfo metadata_; 71 | i32 frame_width_; 72 | i32 frame_height_; 73 | SwsContext* sws_context_; 74 | bool was_reset_; 75 | 76 | i32 frame_id_; 77 | AVFrame* frame_; 78 | std::deque ready_packet_queue_; 79 | }; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /scanner/util/profiler.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University, NVIDIA Corporation 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #pragma once 17 | 18 | #include "scanner/util/util.h" 19 | 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | namespace storehouse { 27 | class WriteFile; 28 | } 29 | 30 | namespace scanner { 31 | 32 | enum ProfilerLevel { 33 | Debug = 0, 34 | Info = 1, 35 | Important = 2 36 | }; 37 | 38 | extern ProfilerLevel PROFILER_LEVEL; 39 | 40 | class Profiler { 41 | public: 42 | Profiler(timepoint_t base_time); 43 | 44 | Profiler(const Profiler& other); 45 | 46 | void add_interval(const std::string& key, timepoint_t start, timepoint_t end, ProfilerLevel level=ProfilerLevel::Info); 47 | 48 | void increment(const std::string& key, int64_t value); 49 | 50 | void reset(timepoint_t base_time); 51 | 52 | struct TaskRecord { 53 | std::string key; 54 | int64_t start; 55 | int64_t end; 56 | }; 57 | 58 | const std::vector& get_records() const; 59 | 60 | const std::map& get_counters() const; 61 | 62 | protected: 63 | void spin_lock(); 64 | void unlock(); 65 | 66 | timepoint_t base_time_; 67 | std::atomic_flag lock_; 68 | std::vector records_; 69 | std::map counters_; 70 | }; 71 | 72 | class ProfileBlock { 73 | public: 74 | ProfileBlock(Profiler* profiler, std::string label); 75 | ~ProfileBlock(); 76 | protected: 77 | Profiler* profiler_; 78 | std::string label_; 79 | timepoint_t start_; 80 | }; 81 | 82 | void write_profiler_to_file(storehouse::WriteFile* file, int64_t node, 83 | std::string type_name, std::string tag, 84 | int64_t worker_num, const Profiler& profiler); 85 | 86 | } // namespace scanner 87 | 88 | #include "scanner/util/profiler.inl" 89 | -------------------------------------------------------------------------------- /docker/ubuntu16.04/Dockerfile.base: -------------------------------------------------------------------------------- 1 | # Scanner base image for Ubuntu 16.04 2 | 3 | ARG base_tag 4 | FROM ${base_tag} 5 | MAINTAINER Fait Poms "fpoms@cs.stanford.edu" 6 | ARG cores=1 7 | ARG cpu_only=OFF 8 | 9 | 10 | # Apt-installable dependencies 11 | RUN apt-get update 12 | RUN apt-get install -y cmake wget software-properties-common 13 | RUN apt-get update 14 | RUN add-apt-repository ppa:deadsnakes/ppa 15 | RUN apt-get update && apt-get upgrade -y && \ 16 | apt-get install -y software-properties-common && \ 17 | add-apt-repository -y ppa:git-core/ppa && \ 18 | apt-get update && \ 19 | apt-get install -y \ 20 | build-essential \ 21 | git libgtk2.0-dev pkg-config unzip llvm-5.0-dev clang-5.0 libc++-dev \ 22 | libgflags-dev libgtest-dev libssl-dev libcurl3-dev liblzma-dev \ 23 | libeigen3-dev libgoogle-glog-dev libatlas-base-dev libsuitesparse-dev \ 24 | libgflags-dev libx264-dev libopenjpeg-dev libxvidcore-dev \ 25 | libpng-dev libjpeg-dev libbz2-dev python-pip wget \ 26 | libleveldb-dev libsnappy-dev libhdf5-serial-dev liblmdb-dev python-dev \ 27 | python-tk autoconf autogen libtool libtbb-dev libopenblas-dev \ 28 | liblapacke-dev swig yasm python3.6 python3.6-dev python3.6-venv python3-pip cpio \ 29 | automake libass-dev \ 30 | libfreetype6-dev libsdl2-dev libtheora-dev libtool \ 31 | libva-dev libvdpau-dev libvorbis-dev libxcb1-dev libxcb-shm0-dev \ 32 | libxcb-xfixes0-dev mercurial texinfo zlib1g-dev curl libcap-dev \ 33 | libgnutls-dev libpq-dev postgresql libx265-dev 34 | 35 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.5 1 36 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 2 37 | 38 | RUN apt-get install -y --no-install-recommends libboost-all-dev 39 | 40 | # Non-apt-installable dependencies 41 | ENV deps /deps 42 | WORKDIR ${deps} 43 | 44 | # CMake (we use 3.7 because >3.8 has issues building OpenCV due to http_proxy) 45 | RUN wget "https://cmake.org/files/v3.12/cmake-3.12.2.tar.gz" && \ 46 | tar -xf cmake-3.12.2.tar.gz && cd ${deps}/cmake-3.12.2 && \ 47 | ./bootstrap --parallel=${cores} -- -DCMAKE_USE_OPENSSL=ON && \ 48 | make -j${cores} && \ 49 | make install && \ 50 | rm -rf ${deps}/cmake-3.12.2.tar.gz ${deps}/cmake-3.12.2 51 | 52 | # Python dependencies 53 | WORKDIR /opt/scanner-base 54 | ADD . . 55 | RUN pip3 install numpy==1.12.0 Cython && pip3 install -r requirements.txt 56 | 57 | ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video 58 | -------------------------------------------------------------------------------- /scanner/util/common.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Carnegie Mellon University 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | #include "scanner/util/common.h" 17 | 18 | namespace scanner { 19 | 20 | std::ostream& operator<<(std::ostream& os, DeviceHandle const& handle) { 21 | std::string name; 22 | if (handle.type == DeviceType::CPU) { 23 | name = "CPU"; 24 | } else if (handle.type == DeviceType::GPU) { 25 | name = "GPU"; 26 | } else { 27 | LOG(FATAL) << "Invalid device type"; 28 | } 29 | return os << "{" << name << ", " << handle.id << "}"; 30 | } 31 | 32 | StridedInterval::StridedInterval(i32 start, i32 end, i32 stride) 33 | : start(start), end(end), stride(stride) {} 34 | 35 | StridedInterval::StridedInterval(const Interval& i) 36 | : start(i.start), end(i.end), stride(1) {} 37 | 38 | bool string_to_image_encoding_type(const std::string& s, 39 | ImageEncodingType& type) { 40 | bool success = true; 41 | if (s == "png" || s == "PNG") { 42 | type = ImageEncodingType::PNG; 43 | } else if (s == "jpeg" || s == "JPEG" || s == "jpg" || s == "JPG") { 44 | type = ImageEncodingType::JPEG; 45 | } else if (s == "bmp" || s == "BMP") { 46 | type = ImageEncodingType::BMP; 47 | } else if (s == "raw" || s == "RAW") { 48 | type = ImageEncodingType::RAW; 49 | } else { 50 | success = false; 51 | } 52 | return success; 53 | } 54 | 55 | std::string image_encoding_type_to_string(ImageEncodingType t) { 56 | std::string s; 57 | switch (t) { 58 | case ImageEncodingType::JPEG: 59 | s = "jpeg"; 60 | break; 61 | case ImageEncodingType::PNG: 62 | s = "png"; 63 | break; 64 | case ImageEncodingType::BMP: 65 | s = "bmp"; 66 | break; 67 | case ImageEncodingType::RAW: 68 | s = "raw"; 69 | break; 70 | default: 71 | assert(false); 72 | } 73 | return s; 74 | } 75 | 76 | i32 NUM_CUDA_STREAMS = 32; // Number of cuda streams for image processing 77 | } 78 | -------------------------------------------------------------------------------- /examples/how-tos/halide/resize_op.cpp: -------------------------------------------------------------------------------- 1 | #include "halide_resize/halide_resize.h" 2 | #include "resize.pb.h" 3 | #include "scanner/api/kernel.h" 4 | #include "scanner/api/op.h" 5 | #include "scanner/util/halide.h" 6 | #include "scanner/util/memory.h" 7 | 8 | class ResizeKernel : public scanner::VideoKernel { 9 | public: 10 | ResizeKernel(const scanner::Kernel::Config& config) 11 | : scanner::VideoKernel(config), device_(config.devices[0]) { 12 | ResizeArgs args; 13 | args.ParseFromArray(config.args.data(), config.args.size()); 14 | width_ = args.width(); 15 | height_ = args.height(); 16 | } 17 | 18 | void execute(const scanner::BatchedElements& input_columns, 19 | scanner::BatchedElements& output_columns) override { 20 | int input_count = input_columns[0].rows.size(); 21 | 22 | // This must be called at the top of the execute method in any VideoKernel. 23 | // See the VideoKernel for the implementation check_frame_info. 24 | check_frame_info(device_, input_columns[1]); 25 | 26 | size_t output_size = width_ * height_ * 3; 27 | unsigned char* output_block = scanner::new_block_buffer( 28 | device_, output_size * input_count, input_count); 29 | 30 | for (int i = 0; i < input_count; ++i) { 31 | buffer_t input_halide_buf = {0}; 32 | scanner::setup_halide_frame_buf(input_halide_buf, frame_info_); 33 | scanner::set_halide_buf_ptr(device_, input_halide_buf, 34 | input_columns[0].rows[i].buffer, 35 | input_columns[0].rows[i].size); 36 | 37 | buffer_t output_halide_buf = {0}; 38 | scanner::setup_halide_frame_buf(output_halide_buf, frame_info_); 39 | scanner::set_halide_buf_ptr(device_, output_halide_buf, 40 | output_block + i * output_size, output_size); 41 | 42 | int error = halide_resize(&input_halide_buf, frame_info_.width(), 43 | frame_info_.height(), width_, height_, 44 | &output_halide_buf); 45 | LOG_IF(FATAL, error != 0) << "Halide error " << error; 46 | 47 | scanner::unset_halide_buf_ptr(device_, input_halide_buf); 48 | scanner::unset_halide_buf_ptr(device_, output_halide_buf); 49 | } 50 | } 51 | 52 | private: 53 | scanner::DeviceHandle device_; 54 | int width_; 55 | int height_; 56 | }; 57 | 58 | REGISTER_OP(Resize).outputs({"frame"}); 59 | 60 | REGISTER_KERNEL(Resize, ResizeKernel) 61 | .device(scanner::DeviceType::GPU) 62 | .num_devices(1); 63 | --------------------------------------------------------------------------------