├── .github
└── workflows
│ └── deploy.yml
├── .gitignore
├── .travis.yml
├── LICENSE
├── Makefile
├── README.md
├── _config.yml
├── _figures
├── DSRS_V1.png
├── HPC_V2_Multi.png
├── VTune-01.png
├── dars_01.png
├── dbrs_01.png
├── dlrs-fig-1.png
├── dlrs-fig-2.png
├── dlrs-fig-3.png
├── dlrs-fig-4.png
├── dlrs-fig-5.png
├── dlrs-transformers-1.png
├── dlrs_single_2.png
├── hpcrs_single_node.png
├── mers-fig-1.png
├── mers-fig-2.png
├── mers_01.png
└── stacks_logo.png
├── _static
└── css
│ └── custom.css
├── _templates
├── breadcrumbs.html
└── layout.html
├── conf.py
├── dlrs
├── README.md
├── authors.txt
├── bert-performance.rst
├── deprecated
│ ├── clearlinux
│ │ ├── README.md
│ │ ├── ml-compiler
│ │ │ ├── Dockerfile
│ │ │ ├── README.md
│ │ │ └── scripts
│ │ │ │ ├── install_dl_frontends.sh
│ │ │ │ ├── install_mkl.sh
│ │ │ │ └── install_tvm.sh
│ │ ├── pytorch
│ │ │ ├── mkl
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── README.md
│ │ │ │ ├── licenses
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── clear_LICENSE
│ │ │ │ │ ├── horovod_LICENSE
│ │ │ │ │ ├── miniconda_LICENSE
│ │ │ │ │ ├── mkl_LICENSE
│ │ │ │ │ └── pytorch_LICENSE
│ │ │ │ └── scripts
│ │ │ │ │ ├── deps
│ │ │ │ │ ├── conda.deps
│ │ │ │ │ └── pip.deps
│ │ │ │ │ ├── generate_defaults.py
│ │ │ │ │ ├── install_addons.sh
│ │ │ │ │ ├── install_conda.sh
│ │ │ │ │ ├── install_pytorch.sh
│ │ │ │ │ ├── install_torch_deps.sh
│ │ │ │ │ ├── install_torchvision.sh
│ │ │ │ │ ├── install_utils.sh
│ │ │ │ │ ├── mkl_env.sh
│ │ │ │ │ └── torch_utils.sh
│ │ │ └── oss
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── README.md
│ │ │ │ └── licenses
│ │ │ │ ├── README.md
│ │ │ │ ├── clear_LICENSE
│ │ │ │ ├── horovod_LICENSE
│ │ │ │ └── pytorch_LICENSE
│ │ ├── releasenote.md
│ │ ├── tensorflow
│ │ │ ├── mkl
│ │ │ │ ├── Dockerfile.builder
│ │ │ │ ├── Dockerfile.clr_ds
│ │ │ │ ├── Dockerfile.dlrs
│ │ │ │ ├── Dockerfile.openvino
│ │ │ │ ├── Dockerfile.tf
│ │ │ │ ├── Makefile
│ │ │ │ ├── README.md
│ │ │ │ ├── config.make
│ │ │ │ ├── licenses
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── clear_LICENSE
│ │ │ │ │ ├── horovod_LICENSE
│ │ │ │ │ ├── miniconda_LICENSE
│ │ │ │ │ └── mkl_LICENSE
│ │ │ │ └── scripts
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── build_openvino_ie.sh
│ │ │ │ │ ├── check_avx512.sh
│ │ │ │ │ ├── install_tensorflow.sh
│ │ │ │ │ ├── serve.sh
│ │ │ │ │ └── set_env.sh
│ │ │ └── oss
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── README.md
│ │ │ │ └── licenses
│ │ │ │ ├── README.md
│ │ │ │ ├── clear_LICENSE
│ │ │ │ └── horovod_LICENSE
│ │ └── tensorflow_2
│ │ │ └── mkl
│ │ │ ├── Dockerfile.builder
│ │ │ ├── Dockerfile.clr_ds
│ │ │ ├── Dockerfile.dlrs
│ │ │ ├── Dockerfile.openvino
│ │ │ ├── Dockerfile.tf
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── config.make
│ │ │ ├── licenses
│ │ │ ├── README.md
│ │ │ ├── clear_LICENSE
│ │ │ ├── horovod_LICENSE
│ │ │ ├── miniconda_LICENSE
│ │ │ └── mkl_LICENSE
│ │ │ └── scripts
│ │ │ ├── README.md
│ │ │ ├── build_openvino_ie.sh
│ │ │ ├── check_avx512.sh
│ │ │ ├── install_addons.sh
│ │ │ ├── install_tensorflow_2.0.sh
│ │ │ ├── serve.sh
│ │ │ └── set_env.sh
│ └── ubuntu
│ │ ├── README.md
│ │ ├── pytorch
│ │ ├── .dockerignore
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── licenses
│ │ │ ├── LICENSE_MIT
│ │ │ ├── third-party-programs-ipp.txt
│ │ │ ├── third-party-programs-openmp.txt
│ │ │ ├── third-party-programs-safestring.txt
│ │ │ ├── third-party-programs_oneapi_DNN.txt
│ │ │ ├── third-party-programs_onemkl.txt
│ │ │ ├── third-party-programs_pytorch.txt
│ │ │ └── third-party-programs_tbb.txt
│ │ └── scripts
│ │ │ ├── generate_defaults.py
│ │ │ ├── install_addons.sh
│ │ │ └── mkl_env.sh
│ │ ├── tensorflow
│ │ ├── .dockerignore
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── licenses
│ │ │ ├── LICENSE_MIT
│ │ │ ├── third-party-programs-ipp.txt
│ │ │ ├── third-party-programs-openmp.txt
│ │ │ ├── third-party-programs-safestring.txt
│ │ │ ├── third-party-programs_oneapi_DNN.txt
│ │ │ ├── third-party-programs_onemkl.txt
│ │ │ ├── third-party-programs_tbb.txt
│ │ │ └── third-party-programs_tensorflow.txt
│ │ ├── patches
│ │ │ ├── ade_gcc9_tmp_fix.patch
│ │ │ ├── findCaller_fix.patch
│ │ │ ├── openvino_gcc9_fix.patch
│ │ │ └── python3.8_fix_for_tf1.15.patch
│ │ └── scripts
│ │ │ ├── Readme.md
│ │ │ ├── build_openvino_ie.sh
│ │ │ ├── check_avx512.sh
│ │ │ ├── install_tensorflow.sh
│ │ │ ├── serve.sh
│ │ │ └── set_env.sh
│ │ └── tensorflow_2
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── licenses
│ │ ├── LICENSE_MIT
│ │ ├── third-party-programs-ipp.txt
│ │ ├── third-party-programs-openmp.txt
│ │ ├── third-party-programs-safestring.txt
│ │ ├── third-party-programs_oneapi_DNN.txt
│ │ ├── third-party-programs_onemkl.txt
│ │ ├── third-party-programs_tbb.txt
│ │ └── third-party-programs_tensorflow.txt
│ │ ├── patches
│ │ ├── ade_gcc9_tmp_fix.patch
│ │ └── openvino_gcc9_fix.patch
│ │ └── scripts
│ │ ├── Readme.md
│ │ ├── build_openvino_ie.sh
│ │ ├── check_avx512.sh
│ │ ├── install_addons.sh
│ │ ├── install_tensorflow_2.sh
│ │ ├── serve.sh
│ │ └── set_env.sh
├── dlrs.rst
├── index.rst
├── ml-compiler
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── config.make
│ ├── licenses
│ │ ├── sources.txt
│ │ └── third-party-programs.txt
│ └── scripts
│ │ ├── install_dl_frontends.sh
│ │ ├── install_onednn.sh
│ │ └── install_tvm.sh
├── pytorch
│ ├── .dockerignore
│ ├── Dockerfile.centos
│ ├── Dockerfile.ubuntu
│ ├── README.md
│ ├── common
│ │ ├── frameworks.txt
│ │ └── requirements.txt
│ ├── dataloader.patch_v1.8.0-rc2
│ ├── licenses
│ │ ├── sources.txt
│ │ └── third-party-programs.txt
│ └── scripts
│ │ ├── cleanup.sh
│ │ ├── generate_defaults.py
│ │ ├── install_addons.sh
│ │ ├── mkl_env.sh
│ │ └── torch_utils.sh
├── serving
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── config.make
│ ├── licenses
│ │ ├── sources.txt
│ │ └── third-party-programs.txt
│ └── scripts
│ │ └── cleanup.sh
├── tensorflow
│ ├── Dockerfile.centos
│ ├── Dockerfile.ubuntu
│ ├── README.md
│ ├── licenses
│ │ ├── sources.txt
│ │ └── third-party-programs.txt
│ ├── patches
│ │ ├── openvino
│ │ │ ├── ade_gcc9_tmp_fix.patch
│ │ │ └── openvino_gcc9_fix.patch
│ │ └── tf1
│ │ │ ├── findCaller_fix.patch
│ │ │ └── python3.8_fix_for_tf1.15.patch
│ └── scripts
│ │ ├── build_openvino_centos.sh
│ │ ├── build_openvino_ubuntu.sh
│ │ ├── cleanup.sh
│ │ ├── get_openvino.sh
│ │ ├── install_ovms.sh
│ │ ├── install_py_packages.sh
│ │ ├── install_tf1_centos.sh
│ │ ├── install_tf1_ubuntu.sh
│ │ ├── install_tf2_centos.sh
│ │ └── install_tf2_ubuntu.sh
└── terms_of_use.md
├── dsrs
├── README.md
├── index.rst
├── memcached
│ ├── Dockerfile
│ ├── README.md
│ ├── licenses
│ │ └── third-party-programs_dsrs.txt
│ └── scripts
│ │ ├── docker-entrypoint.sh
│ │ └── docker-healthcheck
├── redis
│ ├── Dockerfile
│ ├── README.md
│ ├── licenses
│ │ └── third-party-programs_dsrs.txt
│ └── scripts
│ │ ├── docker-entrypoint.sh
│ │ ├── docker-healthcheck
│ │ └── redis.conf
└── terms_of_use.md
├── hpcrs
├── Dockerfile
├── Makefile
├── NEWS.md
├── README.md
├── authors.txt
├── components
│ ├── config_vars.sh
│ ├── horovod
│ │ └── install.sh
│ ├── libfabrics
│ │ └── install.sh
│ ├── licenses
│ │ ├── third-party-programs.txt
│ │ ├── third-party-programs_v2.txt
│ │ └── third-party-programs_v3.txt
│ ├── omnipath
│ │ ├── Readme.md
│ │ └── drivers.sh
│ ├── oneapi
│ │ ├── add_repo_centos.sh
│ │ ├── components.sh
│ │ ├── deprecated
│ │ │ ├── common.sh
│ │ │ ├── dpcpp.sh
│ │ │ ├── oneccl.sh
│ │ │ └── onetbb.sh
│ │ ├── dgpu.sh
│ │ └── pytorch.sh
│ ├── pytorch
│ │ └── install.sh
│ ├── sources.sh
│ ├── spack
│ │ └── install.sh
│ └── utils.sh
├── d2s
│ ├── .gitignore
│ ├── README.md
│ ├── d2s.py
│ ├── setup.py
│ └── tests
│ │ └── d2s_tests.py
├── docs
│ ├── FAQ.md
│ └── hpcrs_tutorial.md
├── index.rst
├── licenses
│ ├── clear_pkg_license.txt
│ ├── third-party-programs.txt
│ ├── third-party-programs_v2.txt
│ └── third-party-programs_v3.txt
└── terms_of_use.md
├── index.rst
├── mers
├── AUTHORS.md
├── BUGS.md
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSES.md
├── NEWS.md
├── README.md
├── deprecated
│ └── clearlinux
│ │ ├── Dockerfile
│ │ ├── INSTALL.md
│ │ ├── aom-patches
│ │ └── stacks-mers-v2-include-aom.diff
│ │ ├── ffmpeg-patches
│ │ ├── 0026-return-NULL-after-breaking-while.patch
│ │ └── CVE-2019-15942.patch
│ │ ├── opencv-patches
│ │ └── CVE-2019-5064.patch
│ │ ├── scripts
│ │ ├── docker-healthcheck
│ │ └── entrypoint.sh
│ │ └── svt-hevc-patches
│ │ └── 0001-include-pbutils-as-gst-plugin-depedency.patch
├── index.rst
├── mers.rst
├── releasenotes.md
├── terms_of_use.md
└── ubuntu
│ ├── Dockerfile
│ ├── INSTALL.md
│ ├── aom-patches
│ └── stacks_mers-v3-include-aom.diff
│ ├── ffmpeg-patches
│ └── 0061-avcodec-add-av1-VAAPI-decoder.patch
│ ├── scripts
│ ├── docker-healthcheck
│ └── entrypoint.sh
│ └── tpp
│ ├── level-zero-tpp.txt
│ ├── opencl-tpp.txt
│ ├── openvino-third-party-programs.txt
│ └── third-party-programs.txt
├── oneContainer
└── index.rst
├── perf.rst
├── processmd.py
├── requirements.txt
└── whitepapers
└── index.rst
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: Deploy
2 | on:
3 | push:
4 | branches:
5 | - master
6 |
7 | jobs:
8 | build:
9 |
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v1
14 | - name: Install dependencies
15 | run: |
16 | export PATH="$HOME/.local/bin:$PATH"
17 | sudo apt-get install -y python3-setuptools
18 | pip3 install --user -r requirements.txt
19 | - name: Build the docs
20 | run: |
21 | export PATH="$HOME/.local/bin:$PATH"
22 | make html
23 | touch _build/html/.nojekyll
24 | - name: Deploy the docs
25 | run: |
26 | mv _build/html $HOME/output
27 | cd $HOME/output
28 | git init
29 | git config --global user.name "${GITHUB_ACTOR}"
30 | git config --global user.email "${GITHUB_ACTOR}@github.com"
31 | git add .
32 | git commit -m "latest html output"
33 | git push -f https://${GITHUB_ACTOR}:${{secrets.ACCESS_TOKEN}}@github.com/intel/stacks.git HEAD:gh-pages
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | _build/
2 | .DS_Store
3 | .vscode/
4 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: required
2 |
3 | services:
4 | - docker
5 |
6 | env:
7 | #- IMAGE_NAME=stacks_dlrs_mkl DIR=dlrs/clearlinux/tensorflow/mkl/
8 | - IMAGE_NAME=stacks_dlrs_oss DIR=dlrs/clearlinux/tensorflow/oss/
9 |
10 | script:
11 | - docker build --build-arg clear_ver="30650" -t $IMAGE_NAME -f $DIR/Dockerfile $DIR
12 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | DISCONTINUATION OF PROJECT
2 |
3 | This project will no longer be maintained by Intel.
4 |
5 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project.
6 |
7 | Intel no longer accepts patches to this project.
8 |
9 | If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the open source software community, please create your own fork of this project.
10 |
11 | Contact: webadmin@linux.intel.com
12 | # Stacks containers have been deprecated and please switch to oneapi based containers, you can find oneapi containers at this link : [https://hub.docker.com/u/intel](https://hub.docker.com/u/intel)
13 |
14 | ## System Stacks for Linux* OS
15 |
16 |
17 |
18 |
19 | The System Stacks for Linux OS are a set of production ready, containerized reference architectures with integrated, highly-performant, open source components optimized for 2nd generation Intel® Xeon® Scalable Processors. Highly-tuned and built for cloud native environments, the stacks help developers to prototype quickly by reducing complexity associated with integrating multiple software components, while still giving users the flexibility to customize their solutions.
20 |
21 | This open source community release is part of an effort to ensure developers have easy access to the features and functionality of Intel Platforms.
22 |
23 | For info on end-to-end usecases using the stacks please refer to the [stacks-usecase](https://github.com/intel/stacks-usecase) repository.
24 |
25 | ### Contributing
26 |
27 | We'd love to accept your patches, if you have improvements to stacks, send us your pull requests or if you find any issues, raise an issue. Contributions can be anything from documentation updates to optimizations!
28 |
29 |
30 | ### Security Issues
31 |
32 | Security issues can be reported to Intel's security incident response team via
33 | [https://intel.com/security](https://intel.com/security).
34 |
35 |
36 | ### Mailing List
37 |
38 | See our public [mailing list](https://lists.01.org/mailman/listinfo/stacks) page for details on how to contact us. You should only subscribe to the Stacks mailing lists using an email address that you don't mind being public.
39 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_config.yml
--------------------------------------------------------------------------------
/_figures/DSRS_V1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/DSRS_V1.png
--------------------------------------------------------------------------------
/_figures/HPC_V2_Multi.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/HPC_V2_Multi.png
--------------------------------------------------------------------------------
/_figures/VTune-01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/VTune-01.png
--------------------------------------------------------------------------------
/_figures/dars_01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dars_01.png
--------------------------------------------------------------------------------
/_figures/dbrs_01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dbrs_01.png
--------------------------------------------------------------------------------
/_figures/dlrs-fig-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs-fig-1.png
--------------------------------------------------------------------------------
/_figures/dlrs-fig-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs-fig-2.png
--------------------------------------------------------------------------------
/_figures/dlrs-fig-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs-fig-3.png
--------------------------------------------------------------------------------
/_figures/dlrs-fig-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs-fig-4.png
--------------------------------------------------------------------------------
/_figures/dlrs-fig-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs-fig-5.png
--------------------------------------------------------------------------------
/_figures/dlrs-transformers-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs-transformers-1.png
--------------------------------------------------------------------------------
/_figures/dlrs_single_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/dlrs_single_2.png
--------------------------------------------------------------------------------
/_figures/hpcrs_single_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/hpcrs_single_node.png
--------------------------------------------------------------------------------
/_figures/mers-fig-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/mers-fig-1.png
--------------------------------------------------------------------------------
/_figures/mers-fig-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/mers-fig-2.png
--------------------------------------------------------------------------------
/_figures/mers_01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/mers_01.png
--------------------------------------------------------------------------------
/_figures/stacks_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/_figures/stacks_logo.png
--------------------------------------------------------------------------------
/_static/css/custom.css:
--------------------------------------------------------------------------------
1 | .wy-nav-content {
2 | max-width: none;
3 | }
--------------------------------------------------------------------------------
/_templates/breadcrumbs.html:
--------------------------------------------------------------------------------
1 | {%- extends "sphinx_rtd_theme/breadcrumbs.html" %}
2 |
3 | {% block breadcrumbs_aside %}
4 | {% endblock %}
5 |
--------------------------------------------------------------------------------
/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 | {% block extrahead %}
3 |
4 | {% endblock %}
--------------------------------------------------------------------------------
/dlrs/README.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack
2 |
3 |
4 | The Deep Learning Reference Stack is an integrated, highly-performant open source stack optimized for Intel® Xeon® Scalable platforms.
5 |
6 |
7 |
8 |
9 | Highly-tuned and built for cloud native environments, the release of DLRS enables developers to quickly prototype by reducing complexity associated with integrating multiple software components, while still giving users the flexibility to customize their solutions.
10 |
11 | The stack includes highly tuned software components across the operating system (Ubuntu* or Centos*), deep learning framework (TensorFlow*, PyTorch*), deep learning libraries ([Intel® oneAPI Deep Neural Network Library (oneDNN)](https://01.org/dnnl)) and other software components. This open source community release is part of an effort to ensure AI developers have easy access to all features and functionality of Intel platforms.To offer more flexibility, there are multiple versions of the Deep Learning Reference Stack.
12 |
13 | ### For Ubuntu based images:
14 |
15 | * [TensorFlow 1.15.3 with Intel® oneDNN primitives, Intel® DL Boost](https://hub.docker.com/r/sysstacks/dlrs-tensorflow-ubuntu)
16 | * [TensorFlow 2.4.0 with Intel® oneDNN primitives, Intel® DL Boost](https://hub.docker.com/r/sysstacks/dlrs-tensorflow2-ubuntu)
17 | * [PyTorch 1.8 with with oneDNN primitives, Intel® DL Boost](https://hub.docker.com/r/sysstacks/dlrs-pytorch-ubuntu)
18 | * [TensorFlow Serving 2.4.1 and OpenVINO™ v2021.2 GPU accelerated serving solution for Deep Learning models](https://hub.docker.com/repository/docker/sysstacks/dlrs-serving-ubuntu)
19 | * [Tensor Virtual Machine or TVM Deep Learning Compiler Image with oneDNN primitives](https://hub.docker.com/r/sysstacks/dlrs-ml-compiler-ubuntu)
20 |
21 | ### For Centos based images:
22 |
23 | * [TensorFlow 1.15.3 with Intel® oneDNN primitives, Intel® DL Boost](https://hub.docker.com/r/sysstacks/dlrs-tensorflow-centos)
24 | * [TensorFlow 2.4.0 with Intel® oneDNN primitives, Intel® DL Boost](https://hub.docker.com/r/sysstacks/dlrs-tensorflow2-centos)
25 | * [PyTorch 1.8 with oneDNN primitives, Intel® DL Boost](https://hub.docker.com/r/sysstacks/dlrs-pytorch-centos)
26 |
27 | Please see the tags tab in dockerhub to find the versions listed above, valid tags for version 0.9 of DLRS are `latest` (default), `v0.9.0` and `v0.9.1` for Pytorch based images.
28 |
29 | ### Sources
30 |
31 | Source code for packages that are part of the solution can be found in the image: https://hub.docker.com/r/sysstacks/dlrs-sources
32 |
33 |
34 |
--------------------------------------------------------------------------------
/dlrs/authors.txt:
--------------------------------------------------------------------------------
1 | Lamego, Jose
2 | Meados, Cord
3 | Plascencia, Daniela
4 | Rascon Garcia, Eduardo I
5 | Robles Gutierrez, Jesus Hector
6 | Unnikrishnan Nair, Rahul
7 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning Reference Stack containers based on Clear Linux OS
2 |
3 | We created the Deep Learning Reference Stack to help AI developers deliver the best experience on Intel® Architecture. This stack reduces complexity common with deep learning software components, provides flexibility for customized solutions, and enables you to quickly prototype and deploy Deep Learning workloads.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/ml-compiler/Dockerfile:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # TVM ML Compiler on Clear Linux
3 | #---------------------------------------------------------------------
4 | ARG clear_ver
5 | FROM clearlinux/stacks-clearlinux:$clear_ver
6 | LABEL maintainer=otc-swstacks@intel.com
7 |
8 | # update os and add required bundles
9 | RUN swupd bundle-add git wget \
10 | python3-basic llvm c-basic \
11 | && rm -rf /var/lib/swupd/*
12 |
13 | # library linking fix
14 | RUN cd /usr/lib64 \
15 | && ln -sf libstdc++.so.6.0.2* libstdc++.so.6 \
16 | && ln -sf libzstd.so.1.4.* libzstd.so.1
17 |
18 | # build mkl, install dl libs and tvm
19 | COPY ./scripts/ /scripts
20 | RUN ./scripts/install_mkl.sh \
21 | && ./scripts/install_tvm.sh
22 |
23 | # this will install popular dl libs like TensorFlow,
24 | # uncomment this line, if you need the frameworks to be prebuilt
25 | # (fixme): add a Readme informing user about this
26 | #RUN ./scripts/install_dl_frontends.sh
27 | # init
28 | WORKDIR /workspace
29 | RUN echo "export LD_LIBRARY_PATH=/usr/local/lib:/usr/lib64:/usr/lib:/usr/lib32" >> /.bashrc \
30 | && echo "export PYTHONPATH=/tvm/python:/tvm/topi/python:/tvm/nnvm/python/:/tvm/vta/python" >> /.bashrc \
31 | && chmod -R a+w /workspace
32 |
33 | # init
34 | SHELL ["/bin/bash", "-c"]
35 |
36 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/ml-compiler/README.md:
--------------------------------------------------------------------------------
1 | # System Stacks Deep Learning Compiler
2 |
3 | [](https://microbadger.com/images/sysstacks/dlrs-ml-compiler-clearlinux "Get your own image badge on microbadger.com")
4 | [](https://microbadger.com/images/sysstacks/dlrs-ml-compiler-clearlinux "Get your own version badge on microbadger.com")
5 |
6 | Stacks Deep Learning Compiler based on TVM on Clear Linux.
7 |
8 |
9 | ### Building Locally
10 |
11 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
12 |
13 | >NOTE: This command is for locally building this image alone.
14 |
15 | ```
16 | docker build --no-cache --build-arg clear_ver="32690" -t clearlinux/stacks-tvm .
17 | ```
18 |
19 | To install front-end deep learning libraries, use:
20 |
21 | ```bash
22 | ./scripts/install_dl_frontends.sh
23 | ```
24 |
25 | This will install TensorFlow, Pytorch, TorchVision and ONNX.
26 |
27 | ### Build ARGs
28 |
29 | * `clear_ver` specifies the latest validated Clearlinux version for this DLRS Dockerfile.
30 | >NOTE: Changing this version may result in errors, if you want to upgrade the OS version, you should use `swupd_args` instead.
31 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/ml-compiler/scripts/install_dl_frontends.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2020 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | run() {
22 | echo "=============================================================="
23 | printf "$(date) -- %s"
24 | printf "%s\n" "$@"
25 | echo "=============================================================="
26 | }
27 |
28 | install_dl_libs() {
29 | pip --no-cache-dir install \
30 | torch==1.4.0 \
31 | torchvision==0.5.0 \
32 | onnx future \
33 | cython scipy Image Pillow \
34 | && rm -rf /tmp/* \
35 | && find /usr/lib/ -follow -type f -name '*.pyc' -delete \
36 | && find /usr/lib/ -follow -type f -name '*.js.map' -delete
37 | }
38 |
39 | begin="$(date +%s)"
40 | run "install DL frameworks" && install_dl_libs
41 | finish="$(date +%s)"
42 | runtime=$(((finish-begin)/60))
43 | run "Done in : $runtime minute(s)"
44 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/ml-compiler/scripts/install_mkl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2020 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export MKL_VERSION=mklml_lnx_2019.0.5.20190502
22 | export MKLDNN=v0.19
23 | export MKL_ROOT=/usr/local/lib/mkl
24 | export CFLAGS="-O3 -falign-functions=32 -fno-lto -fno-math-errno -fno-semantic-interposition -fno-trapping-math "
25 | export CXXFLAGS="-O3 -falign-functions=32 -fno-lto -fno-math-errno -fno-semantic-interposition -fno-trapping-math "
26 | export FCFLAGS="$CFLAGS "
27 | export FFLAGS="$CFLAGS "
28 | export CFLAGS="$CFLAGS -march=skylake-avx512 -m64 -pipe"
29 | export CXXFLAGS="$CXXFLAGS -march=skylake-avx512 -m64 -pipe"
30 | export GCC_IGNORE_WERROR=1
31 |
32 | run()
33 | {
34 | echo "=============================================================="
35 | printf "$(date) -- %s"
36 | printf "%s\n" "$@"
37 | echo "=============================================================="
38 | }
39 |
40 | install_mkl()
41 | {
42 | if [ ! -d ${MKL_ROOT} ]; then
43 | mkdir -p ${MKL_ROOT} \
44 | && wget https://github.com/intel/mkl-dnn/releases/download/"$MKLDNN"/"$MKL_VERSION".tgz \
45 | && tar -xzf "$MKL_VERSION".tgz -C ${MKL_ROOT} \
46 | && mv "${MKL_ROOT:?}"/"${MKL_VERSION:?}"/* ${MKL_ROOT} \
47 | && rm -rf "${MKL_ROOT:?}"/"${MKL_VERSION:?}" \
48 | && rm "${MKL_VERSION}.tgz"
49 | fi
50 | echo "MKL libs are in directory: ${MKL_ROOT}"
51 | }
52 |
53 | start="$(date +%s)"
54 | run "Install mkl" && install_mkl
55 | end="$(date +%s)"
56 | runtime=$(((end-start)/60))
57 | run "Done in : $runtime minute(s)"
58 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/Dockerfile:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------------
2 | # Builder stage for MKL version of Pytorch and Torchvision on ClearLinux OS
3 | #---------------------------------------------------------------------------
4 | ARG clear_ver
5 | FROM clearlinux/stacks-clearlinux:$clear_ver as builder
6 |
7 | # Install SW packages
8 | RUN swupd bundle-add wget \
9 | openssh-server devpkg-openmpi devpkg-llvm git which python3-basic \
10 | && swupd clean && rm -rf /var/lib/swupd/*
11 |
12 | # Install Pytorch build requirements
13 | WORKDIR buildir
14 | COPY scripts/torch_utils.sh .
15 | RUN ./torch_utils.sh
16 |
17 | # Pytorch build
18 | ENV GCC_IGNORE_WERROR=1 \
19 | CFLAGS="$CFLAGS -O3 -mfma -march=skylake-avx512 -mtune=cascadelake" \
20 | CXXFLAGS="$CXXFLAGS -O3 -mfma -march=skylake-avx512 -mtune=cascadelake" \
21 | USE_FBGEMM=0
22 |
23 | RUN cd /buildir/pytorch \
24 | && python setup.py bdist_wheel -d /torch-wheels && python setup.py install
25 |
26 | # Torchvision build
27 | ENV GCC_IGNORE_WERROR=1 \
28 | CFLAGS="$CFLAGS -O3 -mfma -mtune=skylake-avx512" \
29 | CXXFLAGS="$CXXFLAGS -O3 -mfma -mtune=skylake-avx512"
30 |
31 | RUN cd /buildir/vision && python setup.py bdist_wheel -d /torch-wheels
32 |
33 | #--------------------------------------------------------------------
34 | # Pytorch CPU on ClearLinux
35 | #--------------------------------------------------------------------
36 | ARG clear_ver
37 | FROM clearlinux/stacks-clearlinux:$clear_ver
38 | LABEL maintainer=otc-swstacks@intel.com
39 |
40 | # Set workspace
41 | WORKDIR /workspace
42 | COPY scripts/ ./scripts
43 |
44 | # Install SW packages
45 | COPY --from=builder /torch-wheels torch-wheels/
46 | RUN swupd bundle-add \
47 | python3-basic openssh-server openmpi git sysadmin-basic \
48 | devpkg-openmpi devpkg-gperftools devpkg-opencv \
49 | && rm -rf /var/lib/swupd/* \
50 | && ln -s /usr/lib64/libtcmalloc.so /usr/lib/libtcmalloc.so
51 |
52 | # Install torch wheels from builder stage
53 | RUN ldconfig \
54 | && pip --no-cache-dir install torch-wheels/* \
55 | && rm -rf torch-wheels/
56 |
57 | # Install runtime dependencies, utils and add-ons
58 | RUN pip --no-cache-dir install \
59 | psutil jupyterhub==1.1.0 \
60 | seldon-core==1.0.2 \
61 | mkl mkl-include
62 | RUN ldconfig && pip --no-cache-dir install \
63 | typing-extensions horovod==0.19.0 \
64 | && ./scripts/install_addons.sh
65 |
66 | # Generate defaults
67 | ENV BASH_ENV /usr/share/defaults/etc/profile
68 | RUN cp ./scripts/generate_defaults.py /workspace && rm -rf ./scripts \
69 | && python generate_defaults.py --generate \
70 | && cat mkl_env.sh >> /etc/profile \
71 | && chmod -R a+w /workspace
72 |
73 | # Init
74 | SHELL ["/bin/bash", "-c"]
75 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning Reference Stack with Pytorch and Intel® oneAPI Deep Neural Network Library (oneDNN)
2 |
3 | [](https://microbadger.com/images/sysstacks/dlrs-pytorch-clearlinux:v0.6.0 "Get your own image badge on microbadger.com")
4 | [](https://microbadger.com/images/sysstacks/dlrs-pytorch-clearlinux:v0.6.0 "Get your own version badge on microbadger.com")
5 |
6 | ### Building Locally
7 |
8 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
9 |
10 | >NOTE: This command is for locally building this image alone.
11 |
12 | ```
13 | docker build --no-cache --build-arg clear_ver="32690" -t clearlinux/stacks-pytorch-mkl .
14 | ```
15 |
16 | ### Build ARGs
17 |
18 | * `clear_ver` specifies the latest validated Clearlinux version for this DLRS Dockerfile.
19 | >NOTE: Changing this version may result in errors, if you want to upgrade the OS version, you should use `swupd_args` instead.
20 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/licenses/README.md:
--------------------------------------------------------------------------------
1 | # Additional details on licenses
2 |
3 | As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/deps/conda.deps:
--------------------------------------------------------------------------------
1 | # This file may be used to create an environment using:
2 | # $ conda create --name --file
3 | # platform: linux-64
4 | _libgcc_mutex=0.1=main
5 | asn1crypto=1.2.0=py37_0
6 | blas=1.0=mkl
7 | bzip2=1.0.8=h7b6447c_0
8 | ca-certificates=2019.10.16=0
9 | certifi=2019.9.11=py37_0
10 | cffi=1.13.1=py37h2e261b9_0
11 | chardet=3.0.4=py37_1003
12 | cmake=3.14.0=h52cb24c_0
13 | conda=4.7.12=py37_0
14 | conda-package-handling=1.6.0=py37h7b6447c_0
15 | cryptography=2.8=py37h1ba5d50_0
16 | expat=2.2.6=he6710b0_0
17 | idna=2.8=py37_0
18 | intel-openmp=2019.5=281
19 | krb5=1.16.1=h173b8e3_7
20 | libcurl=7.65.3=h20c2e04_0
21 | libedit=3.1.20181209=hc058e9b_0
22 | libffi=3.2.1=hd88cf55_4
23 | libgcc-ng=9.1.0=hdf63c60_0
24 | libgfortran-ng=7.3.0=hdf63c60_0
25 | libssh2=1.8.2=h1ba5d50_0
26 | libstdcxx-ng=9.1.0=hdf63c60_0
27 | mkl=2019.5=281
28 | mkl-include=2019.5=281
29 | mkl-service=2.3.0=py37he904b0f_0
30 | mkl_fft=1.0.14=py37ha843d7b_0
31 | mkl_random=1.1.0=py37hd6b4f25_0
32 | ncurses=6.1=he6710b0_1
33 | ninja=1.9.0=py37hfd86e86_0
34 | numpy=1.17.2=py37haad9e8e_0
35 | numpy-base=1.17.2=py37hde5b4d6_0
36 | openssl=1.1.1=h7b6447c_0
37 | pip=19.3.1=py37_0
38 | pycosat=0.6.3=py37h14c3975_0
39 | pycparser=2.19=py37_0
40 | pyopenssl=19.0.0=py37_0
41 | pysocks=1.7.1=py37_0
42 | python=3.7.4=h265db76_1
43 | pyyaml=5.1.2=py37h7b6447c_0
44 | readline=7.0=h7b6447c_5
45 | requests=2.22.0=py37_0
46 | rhash=1.3.8=h1ba5d50_0
47 | ruamel_yaml=0.15.46=py37h14c3975_0
48 | setuptools=41.6.0=py37_0
49 | six=1.12.0=py37_0
50 | sqlite=3.30.0=h7b6447c_0
51 | tk=8.6.8=hbc83047_0
52 | tqdm=4.36.1=py_0
53 | typing=3.6.4=py37_0
54 | urllib3=1.24.2=py37_0
55 | wheel=0.33.6=py37_0
56 | xz=5.2.4=h14c3975_4
57 | yaml=0.1.7=had09818_2
58 | zlib=1.2.11=h7b6447c_3
59 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/deps/pip.deps:
--------------------------------------------------------------------------------
1 | asn1crypto==1.2.0
2 | certifi==2019.9.11
3 | cffi==1.13.1
4 | chardet==3.0.4
5 | conda==4.7.12
6 | conda-package-handling==1.6.0
7 | cryptography==2.8
8 | idna==2.8
9 | mkl-fft==1.0.14
10 | mkl-random==1.1.0
11 | mkl-service==2.3.0
12 | numpy==1.17.2
13 | pycosat==0.6.3
14 | pycparser==2.19
15 | pyOpenSSL==19.0.0
16 | PySocks==1.7.1
17 | PyYAML==5.1.2
18 | requests==2.22.0
19 | ruamel-yaml==0.15.46
20 | six==1.12.0
21 | tqdm==4.36.1
22 | typing==3.6.4
23 | urllib3==1.24.2
24 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/install_addons.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2020 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | swupd bundle-add devpkg-libjpeg-turbo
22 | swupd clean && rm -rf /var/lib/swupd/*
23 |
24 | addons=( pytorch-lightning flair )
25 | echo "=================installing pkg dependencies=================="
26 | pip install --no-cache-dir -U \
27 | numpy pillow \
28 | scikit-image \
29 | twine \
30 | "pandas" \
31 | "test-tube" \
32 | "fastprogress" \
33 | beautifulsoup4 \
34 | numexpr \
35 | packaging \
36 | requests \
37 | "python-dateutil>=2.6.1" \
38 | "gensim>=3.4.0" \
39 | "pytest>=5.3.2" \
40 | "tqdm>=4.26.0" \
41 | "segtok>=1.5.7" \
42 | "matplotlib>=2.2.3" \
43 | mpld3==0.3 \
44 | "scikit-learn>=0.21.3" \
45 | "sqlitedict>=1.6.0" \
46 | "deprecated>=1.2.4" \
47 | "hyperopt>=0.1.1" \
48 | "transformers>=2.3.0" \
49 | "bpemb>=0.2.9" \
50 | regex \
51 | tabulate \
52 | langdetect \
53 | scipy
54 |
55 | pip install --no-cache-dir --ignore-installed pyyaml==5.3.1
56 | CC="cc -mavx2" pip install --no-cache-dir --force-reinstall "pillow-simd==7.0.0.post3"
57 |
58 | for pkg in "${addons[@]}"
59 | do
60 | echo "=================get and install $pkg======================="
61 | pip install --no-deps --no-cache-dir "$pkg" || { echo "failed installing $pkg"; exit 1; }
62 | echo "==================done======================================"
63 | done
64 | exit 0
65 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/install_conda.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export PYTHON_VERSION=3.7
22 | export MINICONDA_VERSION=latest
23 | export PATH=/opt/conda/bin:$PATH
24 | echo "=================get and install conda========================"
25 | if [ ! -d "/opt/conda" ]; then
26 | wget https://repo.continuum.io/miniconda/Miniconda3-$MINICONDA_VERSION-Linux-x86_64.sh -O /tmp/miniconda.sh
27 | chmod +x /tmp/miniconda.sh \
28 | && /tmp/miniconda.sh -b -p /opt/conda \
29 | && rm -rf /tmp/*
30 | fi
31 | echo "==================done= ========================================="
32 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/install_pytorch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -o pipefail
19 |
20 | export PATH=/opt/conda/bin/:$PATH
21 | export GCC_IGNORE_WERROR=1
22 | # build servers are skylake-avx512; would be built for the arch with `native`
23 | export CFLAGS="$CFLAGS -O3 -mfma -mtune=skylake-avx512 -march=native"
24 | export CXXFLAGS="$CXXFLAGS -O3 -mfma -mtune=skylake-avx512 -march=native"
25 | export GIT_BRANCH=v1.3.1
26 | export CMAKE_PREFIX_PATH=/opt/conda
27 | # build failing when FBGEMM is enabled, disabling until fix is found
28 | export USE_FBGEMM=0
29 | # linker fix
30 | [ -f /opt/conda/compiler_compat/ld ] && mv /opt/conda/compiler_compat/ld /opt/conda/compiler_compat/ld.org
31 | echo "=================get pytorch================================="
32 | if [ ! -d ./pytorch/ ]; then
33 | git clone --recurse-submodules -j15 https://github.com/pytorch/pytorch.git \
34 | && cd pytorch && git checkout $GIT_BRANCH \
35 | && cd ..
36 | fi
37 | echo "=================build and install pytorch with MKL============="
38 | rm -rf pytorch/caffe2/opt/onnxifi_op.*
39 | cd ./pytorch/
40 | /opt/conda/bin/pip --no-cache-dir install pyyaml
41 | # Workaround for https://github.com/pytorch/pytorch/issues/26555
42 | sed -i 's#^ ${CMAKE_CURRENT_SOURCE_DIR}/tensor_iterator_test.cpp##g' aten/src/ATen/test/CMakeLists.txt \
43 | && python setup.py build && python setup.py install \
44 | && cd / && rm -rf /scripts/pytorch \
45 | && find /opt/conda/ -follow -type f -name '*.js.map' -delete \
46 | && find /opt/conda/ -follow -type f -name '*.pyc' -delete
47 | echo "======================done======================================"
48 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/install_torch_deps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export PATH=/opt/conda/bin:$PATH
22 | echo "=================install pytorch from conda====================="
23 | conda config --env --ad channels pytorch \
24 | && conda config --env --ad channels anaconda \
25 | && conda install --file ./deps/conda.deps \
26 | && conda clean -ay \
27 | && find /opt/conda/ -follow -type f -name '*.js.map' -delete \
28 | && find /opt/conda/ -follow -type f -name '*.pyc' -delete \
29 | && rm -rf ./deps/conda.deps \
30 | && rm -rf /tmp/*
31 | echo "==================done=========================================="
32 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/install_torchvision.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -o pipefail
19 |
20 | export PATH=/opt/conda/bin/:$PATH
21 | export GCC_IGNORE_WERROR=1
22 | export CFLAGS="$CFLAGS -O3 -mfma -mtune=skylake-avx512"
23 | export CXXFLAGS="$CXXFLAGS -O3 -mfma -mtune-skylake-avx512"
24 | export GIT_HASH=v1.1.0
25 | export CMAKE_PREFIX_PATH=/opt/conda
26 | # linker fix
27 | [ -f /opt/conda/compiler_compat/ld ] && mv /opt/conda/compiler_compat/ld /opt/conda/compiler_compat/ld.org
28 | echo "=================build and install torchvision=================="
29 | git clone https://github.com/pytorch/vision.git \
30 | && cd vision && python setup.py build && python setup.py install \
31 | && cd / && rm -rf /scripts/vision \
32 | && find /opt/conda/ -follow -type f -name '*.js.map' -delete \
33 | && find /opt/conda/ -follow -type f -name '*.pyc' -delete
34 | echo "======================done======================================"
35 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/install_utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export PATH=/opt/conda/bin:$PATH
22 | echo "=================install utilities============================="
23 | pip --no-cache-dir install -r ./deps/pip.deps \
24 | && pip --no-cache-dir install typing-extensions horovod opencv-python==4.1.0.25 \
25 | && mv /opt/conda/compiler_compat/ld.org /opt/conda/compiler_compat/ld \
26 | && mv /opt/conda/lib/libtinfo.so.6 /opt/conda/lib/libtinfo.so.6.org \
27 | && find /opt/conda/ -follow -type f -name '*.js.map' -delete \
28 | && find /opt/conda/ -follow -type f -name '*.pyc' -delete \
29 | && python -m ipykernel install --user \
30 | && npm cache clean --force \
31 | && rm -rf /tmp/* \
32 | && rm ./deps/pip.deps
33 | echo "==================done=========================================="
34 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/mkl_env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2020 Intel Corporation
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | export OMP_NUM_THEADS=10
17 | export KMP_BLOCKTIME=2
18 | export KMP_AFFINITY=granularity=fine,verbose,compact,1,0
19 | export INTRA_OP_PARALLELISM_THREADS=10
20 | export INTER_OP_PARALLELISM_THREADS=1
21 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/mkl/scripts/torch_utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export GIT_BRANCH=v1.4.0 # Pytorch branch
22 | export GIT_TAG=v0.5.0 # Torchvision tag
23 |
24 | echo "=================install torch dependencies============================="
25 | pip --no-cache-dir install \
26 | cmake numpy \
27 | ninja pyyaml \
28 | mkl mkl-include \
29 | setuptools cffi \
30 | typing wheel
31 | echo "==================done=========================================="
32 |
33 | echo "=================clone pytorch============================="
34 | git clone --recurse-submodules -j15 https://github.com/pytorch/pytorch.git /buildir/pytorch --branch ${GIT_BRANCH} \
35 | && cd /buildir/pytorch && mkdir /torch-wheels \
36 | && rm -rf caffe2/opt/onnxifi_op.* \
37 | && sed -i 's#^ ${CMAKE_CURRENT_SOURCE_DIR}/tensor_iterator_test.cpp##g' aten/src/ATen/test/CMakeLists.txt \
38 | && python setup.py bdist_wheel -d /torch-wheels && python setup.py install
39 | echo "==================done=========================================="
40 |
41 | echo "=================clone torchvison============================="
42 | git clone https://github.com/pytorch/vision.git /buildir/vision \
43 | && cd /buildir/vision && git checkout ${GIT_TAG}
44 | echo "==================done=========================================="
45 |
46 | # Workaround for missing libs
47 | ldconfig
48 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/oss/Dockerfile:
--------------------------------------------------------------------------------
1 | # OSS version of Pytorch on Clear OS
2 | ARG clear_ver
3 | FROM clearlinux/stacks-clearlinux:$clear_ver
4 | LABEL maintainer=otc-swstacks@intel.com
5 |
6 | # update os and install pytorch
7 | RUN swupd bundle-add devpkg-openmpi \
8 | devpkg-libpng desktop-gnomelibs \
9 | openssh-server \
10 | sysadmin-basic \
11 | devpkg-gperftools \
12 | machine-learning-pytorch \
13 | git user-basic-dev \
14 | && ln -s /usr/lib64/libtcmalloc.so /usr/lib/libtcmalloc.so
15 |
16 | # install additional python packages for vision, horovod and notebook
17 | RUN pip --no-cache-dir install torchvision
18 | RUN pip --no-cache-dir install ipython ipykernel jupyter && \
19 | python -m ipykernel.kernelspec
20 | RUN HOROVOD_WITH_TORCH=1 pip install --no-cache-dir horovod
21 |
22 | # setup onnx and helper packages for caffe2
23 | RUN pip install --no-cache-dir \
24 | future hypothesis protobuf onnx networkx opencv-python seldon-core
25 |
26 | # clean up and init
27 | WORKDIR /workspace
28 | RUN chmod -R a+w /workspace
29 | CMD /bin/bash
30 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/oss/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning Reference Stack with Pytorch and OpenBLAS
2 |
3 | [](https://microbadger.com/images/sysstacks/dlrs-pytorch-clearlinux:v0.6.0-oss "Get your own image badge on microbadger.com")
4 | [](https://microbadger.com/images/sysstacks/dlrs-pytorch-clearlinux:v0.6.0-oss "Get your own version badge on microbadger.com")
5 |
6 | ### Building Locally
7 |
8 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
9 |
10 | >NOTE: This command is for locally building this image alone.
11 |
12 | ```
13 | docker build --no-cache --build-arg clear_ver="32690" -t clearlinux/stacks-pytorch-oss .
14 | ```
15 |
16 | ### Build ARGs
17 |
18 | * `clear_ver` specifies the latest validated Clearlinux version for this DLRS Dockerfile.
19 | >NOTE: Changing this version may result in errors, if you want to upgrade the OS version, you should use `swupd_args` instead.
20 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/pytorch/oss/licenses/README.md:
--------------------------------------------------------------------------------
1 | # Additional details on licenses
2 |
3 | As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/Dockerfile.builder:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Base instance to build MKL based images on Clear Linux
3 | #---------------------------------------------------------------------
4 | ARG clear_ver
5 | FROM clearlinux/stacks-clearlinux:$clear_ver as base
6 | LABEL maintainer=otc-swstacks@intel.com
7 |
8 | # update os and add required bundles
9 | RUN swupd bundle-add git curl wget \
10 | java-basic sysadmin-basic package-utils \
11 | devpkg-zlib go-basic devpkg-tbb
12 |
13 | # fix for stdlib not found issue
14 | RUN ln -sf /usr/lib64/libstdc++.so /usr/lib64/libstdc++.so.6
15 |
16 | COPY ./scripts/ /scripts
17 | RUN go get github.com/bazelbuild/bazelisk \
18 | && export PATH=$PATH:/go/bin/ \
19 | && ln -s /go/bin/bazelisk /usr/bin/bazel
20 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/Dockerfile.clr_ds:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # DLRS downstream builder
3 | #---------------------------------------------------------------------
4 | ARG clear_ver
5 | FROM clearlinux/stacks-clearlinux:$clear_ver
6 | LABEL maintainer=otc-swstacks@intel.com
7 | ARG NUMACTL_VERSION=2.0.12
8 |
9 | # update os and add required bundles
10 | RUN swupd bundle-add devpkg-openmpi devpkg-libX11 git openssh-server c-basic nodejs-basic curl python3-basic devpkg-gperftools \
11 | && curl -fSsL -O https://github.com/numactl/numactl/releases/download/v${NUMACTL_VERSION}/numactl-${NUMACTL_VERSION}.tar.gz \
12 | && tar xf numactl-${NUMACTL_VERSION}.tar.gz \
13 | && cd numactl-${NUMACTL_VERSION} \
14 | && ./configure \
15 | && make \
16 | && make install \
17 | && rm -rf /numactl-${NUMACTL_VERSION}* \
18 | && rm -rf /var/lib/swupd/* \
19 | && ln -sf /usr/lib64/libstdc++.so /usr/lib64/libstdc++.so.6 \
20 | && ln -sf /usr/lib64/libzstd.so.1.4.* /usr/lib64/libzstd.so.1 \
21 | && ln -s /usr/lib64/libtcmalloc.so /usr/lib/libtcmalloc.so
22 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/Dockerfile.dlrs:
--------------------------------------------------------------------------------
1 | FROM stacks-tf-mkl as tf_builder
2 | FROM stacks-openvino-omp as serving_builder
3 | #---------------------------------------------------------------------
4 | # DLRS downstream container
5 | #---------------------------------------------------------------------
6 | FROM clearlinux-dlrs-builder as base
7 | LABEL maintainer=otc-swstacks@intel.com
8 |
9 | ARG HOROVOD_VERSION=0.19.1
10 | ARG MODEL_SERVER_TAG=v2020.1
11 |
12 | COPY --from=tf_builder /tmp/tf/ /tmp/tf/
13 | COPY --from=serving_builder /dldt/bin/intel64/Release/lib/ /usr/local/lib/inference-engine/
14 |
15 | COPY --from=serving_builder /dldt/inference-engine/include/*.h* /usr/local/include/inference-engine/include/
16 | COPY --from=serving_builder /dldt/inference-engine/include/cpp/ /usr/local/include/inference-engine/include/cpp/
17 | COPY --from=serving_builder /dldt/inference-engine/include/details /usr/local/include/inference-engine/include/details
18 | COPY --from=serving_builder /dldt/inference-engine/include/multi-device /usr/local/include/inference-engine/include/multi-device
19 |
20 |
21 | # install tensorflow, ntlk, jupyterhub, opencv, seldon-core and horovod
22 | RUN pip --no-cache-dir install /tmp/tf/avx512/tensorflow*.whl \
23 | && rm -rf /tmp/tf
24 | RUN pip --no-cache-dir install horovod==${HOROVOD_VERSION}
25 | RUN npm install -g configurable-http-proxy \
26 | && pip --no-cache-dir install common seldon-core jupyterhub \
27 | && pip --no-cache-dir install notebook protobuf \
28 | && pip --no-cache-dir install numpy tensorflow-serving-api==1.15.0 google-cloud-storage boto3 jsonschema falcon cheroot \
29 | && pip --no-cache-dir install grpcio defusedxml==0.5.0 grpcio-tools test-generator==0.1.1 \
30 | && npm cache clean --force \
31 | && find /usr/lib/ -follow -type f -name '*.pyc' -delete \
32 | && find /usr/lib/ -follow -type f -name '*.js.map' -delete
33 |
34 |
35 | # install openvino inference engine
36 | # init
37 | ENV BASH_ENV /usr/share/defaults/etc/profile
38 | RUN echo "export LD_LIBRARY_PATH=/usr/local/lib/inference-engine:/usr/local/lib" >> /etc/profile \
39 | && echo "export PYTHONPATH=/usr/local/lib/inference-engine/python_api/python3.8:/usr/local/lib/inference-engine/python_api/python3.8/openvino/inference_engine/" >> /etc/profile
40 |
41 | # init ie serving
42 | WORKDIR /ie_serving_py
43 | RUN git clone --depth 1 -b ${MODEL_SERVER_TAG} https://github.com/IntelAI/OpenVINO-model-server.git model_server \
44 | && cd model_server && git checkout ${MODEL_SERVER_TAG} && cd .. \
45 | && cp ./model_server/setup.py /ie_serving_py \
46 | && echo "OpenVINO Model Server version: ${MODEL_SERVER_TAG}" > /ie_serving_py/version \
47 | && echo "Git commit: `cd ./model_server; git rev-parse HEAD; cd ..`" >> /ie_serving_py/version \
48 | && echo "OpenVINO version: ${MODEL_SERVER_TAG} src" >> /ie_serving_py/version \
49 | && echo "# OpenVINO built with: https://github.com/opencv/dldt.git" >> /ie_serving_py/version \
50 | && cp -r ./model_server/ie_serving /ie_serving_py/ie_serving \
51 | && cd /ie_serving_py && python setup.py install \
52 | && rm -rf model_server
53 |
54 | WORKDIR /workspace
55 | COPY /scripts/ /workspace/scripts/
56 | RUN chmod -R a+w /workspace
57 | SHELL ["/bin/bash", "-c"]
58 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/Dockerfile.openvino:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Base instance to build MKL based Tensorflow on Clear Linux
3 | #---------------------------------------------------------------------
4 | FROM stacks-mkl-builder as base
5 | LABEL maintainer=otc-swstacks@intel.com
6 |
7 | RUN cd /usr/lib64/ && ln -sf libzstd.so.1.4.* libzstd.so.1
8 | RUN swupd clean \
9 | && swupd bundle-add devpkg-opencv devpkg-llvm
10 |
11 | RUN ./scripts/build_openvino_ie.sh
12 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/Dockerfile.tf:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Base instance to build MKL based Tensorflow on Clear Linux
3 | #---------------------------------------------------------------------
4 | FROM stacks-mkl-builder
5 | LABEL maintainer=otc-swstacks@intel.com
6 |
7 | RUN ./scripts/install_tensorflow.sh
8 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/Makefile:
--------------------------------------------------------------------------------
1 | include config.make
2 | export BUILDER_DOCKER_IMAGE_MKL
3 | export BUILDER_DOCKER_NAME_MKL
4 | # tensorflow builder
5 | export TF_DOCKER_IMAGE_MKL
6 | export TF_DOCKER_NAME_MKL
7 | # openvino builder
8 | export SERVER_DOCKER_IMAGE_MKL
9 | export SERVER_DOCKER_NAME_MKL
10 | # clr downstream builder
11 | export CLR_DOWNSTREAM_DOCKER_IMAGE_MKL
12 | export CLR_DOWNSTREAM_DOCKER_NAME_MKL
13 | # downstream builder
14 | export DOWNSTREAM_DOCKER_IMAGE_MKL
15 | export DOWNSTREAM_DOCKER_NAME_MKL
16 |
17 | all .DEFAULT:
18 | $(MAKE) builder clr_ds
19 | $(MAKE) tf openvino
20 | $(MAKE) downstream
21 |
22 | # Disable aixprt target
23 | # $(MAKE) aixprt
24 |
25 | builder:
26 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(BUILDER_DOCKER_FILE) -t $(BUILDER_DOCKER_IMAGE_MKL) $(DOCKER_WS)
27 |
28 | tf:
29 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(TF_DOCKER_FILE) -t $(TF_DOCKER_IMAGE_MKL) $(DOCKER_WS)
30 |
31 | openvino:
32 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(SERVER_DOCKER_FILE) -t $(SERVER_DOCKER_IMAGE_MKL) $(DOCKER_WS)
33 |
34 | clr_ds:
35 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(CLR_DOWNSTREAM_DOCKER_FILE) -t $(CLR_DOWNSTREAM_DOCKER_IMAGE_MKL) $(DOCKER_WS)
36 |
37 | downstream:
38 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(DOWNSTREAM_DOCKER_FILE) -t $(DOWNSTREAM_DOCKER_IMAGE_MKL) $(DOCKER_WS)
39 |
40 | aixprt:
41 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(DOCKER_FILE_AIXPRT) -t $(DOCKER_IMAGE_MKL_AIXPRT) $(DOCKER_WS)
42 |
43 | check:
44 | OS=$(OS) \
45 | VERSION=$(VERSION) \
46 | VERSION_DIR=$(VERSION_DIR) \
47 | DL_LIB=$(DL_LIB) \
48 | DOCKER_IMAGE=$(DOCKER_IMAGE_MKL) \
49 | DOCKER_NAME=$(DOCKER_NAME_MKL) \
50 | RUN_BENCH=$(RUN_BENCH) $(MAKE) -C ../../../tests/check
51 |
52 | clean:
53 | docker rmi $(DOCKER_IMAGE_MKL)
54 |
55 | .PHONY: $(DEFAULT_TARGETS) aixprt
56 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/config.make:
--------------------------------------------------------------------------------
1 | # DLaaS / Clear Linux related variables
2 | include ../../config.make
3 |
4 | # DLaaS MKL / Clear Linux related variables
5 | OS=clearlinux
6 | VERSION=mkl
7 | DL_LIB=tensorflow
8 | SERVER_LIB=openvino-omp
9 | DOWNSTREAM=dlrs
10 |
11 | # docker related variables
12 | BUILDER_DOCKER_NAME_MKL?=stacks-$(VERSION)-builder
13 | TF_DOCKER_NAME_MKL?=stacks-tf-$(VERSION)
14 | SERVER_DOCKER_NAME_MKL?=stacks-$(SERVER_LIB)
15 | CLR_DOWNSTREAM_DOCKER_NAME_MKL?=$(OS)-$(DOWNSTREAM)-builder
16 | DOWNSTREAM_DOCKER_NAME_MKL?=stacks-$(DL_LIB)-$(VERSION)
17 | DOCKER_NAME_MKL_AIXPRT?=stacks-tensorflow-$(VERSION)-omp-aixprt
18 |
19 | # append the postfix random name if present so we create random names for
20 | # docker images and containers
21 | ifdef DOCKER_RANDOM_POSTFIX
22 | BUILDER_DOCKER_NAME_MKL?=stacks-$(VERSION)-builder-$(DOCKER_RANDOM_POSTFIX)
23 | TF_DOCKER_NAME_MKL:=$(TF_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
24 | SERVER_DOCKER_NAME_MKL:=$(SERVER_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
25 | CLR_DOWNSTREAM_DOCKER_NAME_MKL:=$(CLR_DOWNSTREAM_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
26 | DOWNSTREAM_DOCKER_NAME_MKL:=$(DOWNSTREAM_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
27 | endif
28 |
29 | BUILDER_DOCKER_IMAGE_MKL?=$(BUILDER_DOCKER_NAME_MKL)
30 | TF_DOCKER_IMAGE_MKL?=$(TF_DOCKER_NAME_MKL)
31 | SERVER_DOCKER_IMAGE_MKL?=$(SERVER_DOCKER_NAME_MKL)
32 | CLR_DOWNSTREAM_DOCKER_IMAGE_MKL?=$(CLR_DOWNSTREAM_DOCKER_NAME_MKL)
33 | DOWNSTREAM_DOCKER_IMAGE_MKL?=$(DOWNSTREAM_DOCKER_NAME_MKL)
34 | DOCKER_IMAGE_MKL_AIXPRT?=$(DOCKER_NAME_MKL_AIXPRT)
35 |
36 | DOCKER_SHELL?=bash
37 | BUILDER_DOCKER_FILE?=../$(VERSION)/Dockerfile.builder
38 | TF_DOCKER_FILE?=../$(VERSION)/Dockerfile.tf
39 | SERVER_DOCKER_FILE?=../$(VERSION)/Dockerfile.openvino
40 | CLR_DOWNSTREAM_DOCKER_FILE?=../$(VERSION)/Dockerfile.clr_ds
41 | DOWNSTREAM_DOCKER_FILE?=../$(VERSION)/Dockerfile.dlrs
42 | DOCKER_FILE_AIXPRT?=../$(VERSION)/Dockerfile.aixprt
43 |
44 | VERSION_DIR=$(shell pwd)
45 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/licenses/README.md:
--------------------------------------------------------------------------------
1 | # Additional details on licenses
2 |
3 | As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/scripts/build_openvino_ie.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export CFLAGS="-O3 "
22 | export CXXFLAGS="-O3 "
23 | export FCFLAGS="$CFLAGS "
24 | export FFLAGS="$CFLAGS "
25 | export CFLAGS="$CFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
26 | export CXXFLAGS="$CXXFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
27 | export GCC_IGNORE_WERROR=1
28 |
29 | #export GIT_HASH=0ef928 # 2019_R1.0.1
30 | #export GIT_HASH=ba6e22b # 2019_R2
31 | #export GIT_HASH=1c794d9 # 2019_R3
32 | #export GIT_HASH=fe3f978 # 2019_R3.1
33 | export GIT_HASH=b2140c0 # 2020.1
34 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
35 |
36 | echo "=================get dldt================================="
37 | if [ ! -d ./dldt/ ]; then
38 | git clone --recursive -j"$N_JOBS" https://github.com/opencv/dldt.git &&\
39 | cd dldt && git checkout -b v2020.1 $GIT_HASH && cd ..
40 | fi
41 | echo "=================config and build inference engine=================="
42 | # install inference engine python bridge dependency
43 | pip install opencv-python numpy cython progress
44 | cd ./dldt/
45 | CMAKE_ARGS="OpenCV_DIR=/usr/lib64/cmake/opencv4 -DENABLE_MKL_DNN=ON -DTHREADING=OMP -DENABLE_GNA=ON -DENABLE_CLDNN=ON -DENABLE_OPENCV=OFF -DENABLE_MYRIAD=OFF -DENABLE_VPU=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=$(command -v python) -DPYTHON_LIBRARY=/usr/lib64/libpython3.8.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.8"
46 | mkdir -p ./build &&\
47 | cd ./build
48 | IE_BUILD_DIR=$(pwd)
49 | cmake $CMAKE_ARGS ..
50 | make -j"$N_JOBS"
51 | #echo "=================config and build IE bridges========================="
52 | #CMAKE_ARGS="-DInferenceEngine_DIR=$IE_BUILD_DIR
53 | #-DPYTHON_EXECUTABLE=$(command -v python)
54 | #-DPYTHON_LIBRARY=/usr/lib64/libpython3.7m.so
55 | #-DPYTHON_INCLUDE_DIR=/usr/include/python3.7m"
56 | #cd "$IE_BUILD_DIR"/../ie_bridges/python && mkdir -p build && cd build
57 | #cmake $CMAKE_ARGS ..
58 | #make -j"$N_JOBS"
59 | echo "===================================================================="
60 | PYTHON_MODULE="inference-engine/bin/intel64/Release"
61 | echo "Inference Engine build directory is: $IE_BUILD_DIR"
62 | echo "IE bridges build directory is: $PYTHON_MODULE"
63 | echo "===================================================================="
64 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/scripts/check_avx512.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | run()
22 | {
23 | line_length=$(echo "$@" | awk '{print length}')
24 | printf "%$((line_length+35))s\n" |tr " " "="
25 | printf "$(date) -- %s"
26 | printf "%s\n" "$@"
27 | printf "%$((line_length+35))s\n" |tr " " "="
28 | }
29 | reqd_xtns=(avx512cd avx512bw avx512dq avx512f avx512vl)
30 | cpuxtns=$(lscpu | grep -i "avx512")
31 | for i in "${reqd_xtns[@]}"
32 | do
33 | if [[ ! $cpuxtns =~ $i ]]
34 | then
35 | run "[Error] : Intel® AVX-512 extensions required by DLRS not available :: ($i)"
36 | exit
37 | fi
38 | done
39 | run "[Done]: Success, the platform supports AVX-512 instructions"
40 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/mkl/scripts/serve.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # start the model server
3 | cd /ie_serving_py
4 | exec "$@"
5 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/oss/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG clear_ver
2 | FROM clearlinux/stacks-clearlinux:$clear_ver
3 |
4 | LABEL maintainer=otc-swstacks@intel.com
5 |
6 | RUN swupd bundle-add curl sysadmin-basic devpkg-gperftools \
7 | git machine-learning-tensorflow devpkg-opencv \
8 | && ln -s /usr/lib64/libtcmalloc.so /usr/lib/libtcmalloc.so
9 |
10 | # install additional python packages for ipython, seldon-core and jupyter notebook
11 | RUN pip --no-cache-dir install ipython ipykernel matplotlib jupyter seldon-core && \
12 | python -m ipykernel.kernelspec
13 |
14 | CMD 'bash'
15 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/oss/README.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack with Tensorflow and Optimized Eigen
2 |
3 | [](https://microbadger.com/images/sysstacks/dlrs-tensorflow-clearlinux:v0.6.0-oss "Get your own image badge on microbadger.com")
4 | [](https://microbadger.com/images/sysstacks/dlrs-tensorflow-clearlinux:v0.6.0-oss "Get your own version badge on microbadger.com")
5 |
6 | ### Building Locally
7 |
8 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
9 |
10 | >NOTE: This command is for locally building this image alone.
11 |
12 | ```
13 | docker build --no-cache --build-arg clear_ver="32690" -t dlrs-tensorflow-clearlinux:v0.6.0-oss .
14 | ```
15 |
16 | ### Build ARGs
17 |
18 | * `clear_ver` specifies the latest validated Clearlinux version for this DLRS Dockerfile.
19 | >NOTE: Changing this version may result in errors, if you want to upgrade the OS version, you should use `swupd_args` instead.
20 |
21 | * `swupd_args` specifies [swupd update](https://github.com/clearlinux/swupd-client/blob/master/docs/swupd.1.rst#options) flags passed to the update during build.
22 |
23 | >NOTE: An empty `swupd_args` will default to 32690. Consider this when building as an OS upgrade won't be performed. If you'd like to upgrade the OS version, you can either do it manually inside a running container or add `swupd_args=""` to the build command. The latest validated version is 32690, using a different one might result in unexpected errors.
24 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow/oss/licenses/README.md:
--------------------------------------------------------------------------------
1 | # Additional details on licenses
2 |
3 | As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/Dockerfile.builder:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Base instance to build MKL based images on Clear Linux
3 | #---------------------------------------------------------------------
4 | ARG clear_ver
5 | FROM clearlinux/stacks-clearlinux:$clear_ver as base
6 | LABEL maintainer=otc-swstacks@intel.com
7 |
8 | # update os and add required bundles
9 | RUN swupd bundle-add git curl wget \
10 | java-basic sysadmin-basic package-utils \
11 | devpkg-zlib go-basic devpkg-tbb
12 |
13 | # fix for stdlib not found issue
14 | RUN ln -sf /usr/lib64/libstdc++.so /usr/lib64/libstdc++.so.6
15 |
16 | COPY ./scripts/ /scripts
17 |
18 | RUN go get github.com/bazelbuild/bazelisk \
19 | && export PATH=$PATH:/go/bin/ \
20 | && ln -s /go/bin/bazelisk /usr/bin/bazel
21 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/Dockerfile.clr_ds:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # DLRS downstream builder
3 | #---------------------------------------------------------------------
4 | ARG clear_ver
5 | FROM clearlinux/stacks-clearlinux:$clear_ver
6 | LABEL maintainer=otc-swstacks@intel.com
7 | ARG NUMACTL_VERSION=2.0.12
8 |
9 | # update os and add required bundles
10 | RUN swupd bundle-add devpkg-openmpi devpkg-libX11 git openssh-server c-basic nodejs-basic curl python3-basic devpkg-gperftools \
11 | && curl -fSsL -O https://github.com/numactl/numactl/releases/download/v${NUMACTL_VERSION}/numactl-${NUMACTL_VERSION}.tar.gz \
12 | && tar xf numactl-${NUMACTL_VERSION}.tar.gz \
13 | && cd numactl-${NUMACTL_VERSION} \
14 | && ./configure \
15 | && make \
16 | && make install \
17 | && rm -rf /numactl-${NUMACTL_VERSION}* \
18 | && rm -rf /var/lib/swupd/* \
19 | && ln -sf /usr/lib64/libstdc++.so /usr/lib64/libstdc++.so.6 \
20 | && ln -sf /usr/lib64/libzstd.so.1.4.* /usr/lib64/libzstd.so.1 \
21 | && ln -s /usr/lib64/libtcmalloc.so /usr/lib/libtcmalloc.so
22 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/Dockerfile.openvino:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Base instance to build MKL based Tensorflow on Clear Linux
3 | #---------------------------------------------------------------------
4 | FROM stacks-mkl-builder as base
5 | LABEL maintainer=otc-swstacks@intel.com
6 |
7 | RUN cd /usr/lib64/ && ln -sf libzstd.so.1.4.* libzstd.so.1
8 | RUN swupd clean \
9 | && swupd bundle-add devpkg-opencv devpkg-llvm
10 |
11 | RUN ./scripts/build_openvino_ie.sh
12 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/Dockerfile.tf:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # Base instance to build MKL based Tensorflow 2.0 on Clear Linux
3 | #---------------------------------------------------------------------
4 | FROM stacks-mkl-builder
5 | LABEL maintainer=otc-swstacks@intel.com
6 |
7 | RUN ./scripts/install_tensorflow_2.0.sh
8 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/Makefile:
--------------------------------------------------------------------------------
1 | include config.make
2 | export BUILDER_DOCKER_IMAGE_MKL
3 | export BUILDER_DOCKER_NAME_MKL
4 | # tensorflow builder
5 | export TF_DOCKER_IMAGE_MKL
6 | export TF_DOCKER_NAME_MKL
7 | # openvino builder
8 | export SERVER_DOCKER_IMAGE_MKL
9 | export SERVER_DOCKER_NAME_MKL
10 | # clr downstream builder
11 | export CLR_DOWNSTREAM_DOCKER_IMAGE_MKL
12 | export CLR_DOWNSTREAM_DOCKER_NAME_MKL
13 | # downstream builder
14 | export DOWNSTREAM_DOCKER_IMAGE_MKL
15 | export DOWNSTREAM_DOCKER_NAME_MKL
16 |
17 | all .DEFAULT:
18 | $(MAKE) builder clr_ds
19 | $(MAKE) tf
20 | $(MAKE) openvino
21 | $(MAKE) downstream
22 |
23 | # Disable aixprt target
24 | # $(MAKE) aixprt
25 |
26 | builder:
27 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(BUILDER_DOCKER_FILE) -t $(BUILDER_DOCKER_IMAGE_MKL) $(DOCKER_WS)
28 |
29 | tf:
30 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(TF_DOCKER_FILE) -t $(TF_DOCKER_IMAGE_MKL) $(DOCKER_WS)
31 |
32 | openvino:
33 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(SERVER_DOCKER_FILE) -t $(SERVER_DOCKER_IMAGE_MKL) $(DOCKER_WS)
34 |
35 | clr_ds:
36 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(CLR_DOWNSTREAM_DOCKER_FILE) -t $(CLR_DOWNSTREAM_DOCKER_IMAGE_MKL) $(DOCKER_WS)
37 |
38 | downstream:
39 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(DOWNSTREAM_DOCKER_FILE) -t $(DOWNSTREAM_DOCKER_IMAGE_MKL) $(DOCKER_WS)
40 |
41 | aixprt:
42 | $(DOCKER_BUILD_CMD) --build-arg clear_ver="${CLR_VER}" -f $(DOCKER_FILE_AIXPRT) -t $(DOCKER_IMAGE_MKL_AIXPRT) $(DOCKER_WS)
43 |
44 | check:
45 | OS=$(OS) \
46 | VERSION=$(VERSION) \
47 | VERSION_DIR=$(VERSION_DIR) \
48 | DL_LIB=$(DL_LIB) \
49 | DOCKER_IMAGE=$(DOCKER_IMAGE_MKL) \
50 | DOCKER_NAME=$(DOCKER_NAME_MKL) \
51 | RUN_BENCH=$(RUN_BENCH) $(MAKE) -C ../../../tests/check
52 |
53 | clean:
54 | docker rmi $(DOCKER_IMAGE_MKL)
55 |
56 | .PHONY: $(DEFAULT_TARGETS) aixprt
57 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning Reference Stack with TensorFlow 2.0 and Intel® oneAPI Deep Neural Network Library (oneDNN)
2 |
3 | [](https://microbadger.com/images/sysstacks/dlrs-tensorflow2-clearlinux:v0.6.0 "Get your own image badge on microbadger.com")
4 | [](https://microbadger.com/images/sysstacks/dlrs-tensorflow2-clearlinux:v0.6.0 "Get your own version badge on microbadger.com")
5 |
6 | ## Building Locally
7 |
8 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
9 |
10 | This directory contains a set of Dockerfiles that serve as "builders". Each of them build a certain component of the stack and are used inside Dockerfile.dlrs, which is the Dockerfile that actually builds `stacks-dlrs_2-mkl`. Please note all of these Dockerfiles are necessary to build the full stack.
11 |
12 | To locally build stacks-dlrs_2-mkl, run the following:
13 |
14 | ```
15 | make
16 | ```
17 |
18 | The above command will create four "builder" images and a `stacks-dlrs_2-mkl` one, please use the latter.
19 |
20 | ## Build ARGs
21 |
22 | * `clear_ver` specifies the latest validated Clearlinux version for this DLRS Dockerfile.
23 | >NOTE: Changing this version may result in errors, if you want to upgrade the OS version, you should use `swupd_args` instead.
24 | * `swupd_args` specifies [swupd update](https://github.com/clearlinux/swupd-client/blob/master/docs/swupd.1.rst#options) flags passed to the update during build.
25 |
26 | >NOTE: An empty `swupd_args` will default to 32690. Consider this when building as an OS upgrade won't be performed. If you'd like to upgrade the OS version, you can either do it manually inside a running container or add `swupd_args=""` to the build command. The latest validated version is 32690, using a different one might result in unexpected errors.
27 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/config.make:
--------------------------------------------------------------------------------
1 | # DLaaS / Clear Linux related variables
2 | include ../../config.make
3 |
4 | # DLaaS MKL / Clear Linux related variables
5 | OS=clearlinux
6 | VERSION=mkl
7 | DL_LIB=tensorflow_2
8 | SERVER_LIB=openvino-tbb
9 | DOWNSTREAM=dlrs_2
10 |
11 | # docker related variables
12 | BUILDER_DOCKER_NAME_MKL?=stacks-$(VERSION)-builder
13 | TF_DOCKER_NAME_MKL?=stacks-tf_2-$(VERSION)
14 | SERVER_DOCKER_NAME_MKL?=stacks-$(SERVER_LIB)
15 | CLR_DOWNSTREAM_DOCKER_NAME_MKL?=$(OS)-$(DOWNSTREAM)-builder
16 | DOWNSTREAM_DOCKER_NAME_MKL?=stacks-$(DL_LIB)-$(VERSION)
17 | DOCKER_NAME_MKL_AIXPRT?=stacks-tensorflow-$(VERSION)-tbb-aixprt
18 |
19 | # append the postfix random name if present so we create random names for
20 | # docker images and containers
21 | ifdef DOCKER_RANDOM_POSTFIX
22 | BUILDER_DOCKER_NAME_MKL?=stacks-$(VERSION)-builder-$(DOCKER_RANDOM_POSTFIX)
23 | TF_DOCKER_NAME_MKL:=$(TF_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
24 | SERVER_DOCKER_NAME_MKL:=$(SERVER_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
25 | CLR_DOWNSTREAM_DOCKER_NAME_MKL:=$(CLR_DOWNSTREAM_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
26 | DOWNSTREAM_DOCKER_NAME_MKL:=$(DOWNSTREAM_DOCKER_NAME_MKL)-$(DOCKER_RANDOM_POSTFIX)
27 | endif
28 |
29 | BUILDER_DOCKER_IMAGE_MKL?=$(BUILDER_DOCKER_NAME_MKL)
30 | TF_DOCKER_IMAGE_MKL?=$(TF_DOCKER_NAME_MKL)
31 | SERVER_DOCKER_IMAGE_MKL?=$(SERVER_DOCKER_NAME_MKL)
32 | CLR_DOWNSTREAM_DOCKER_IMAGE_MKL?=$(CLR_DOWNSTREAM_DOCKER_NAME_MKL)
33 | DOWNSTREAM_DOCKER_IMAGE_MKL?=$(DOWNSTREAM_DOCKER_NAME_MKL)
34 | DOCKER_IMAGE_MKL_AIXPRT?=$(DOCKER_NAME_MKL_AIXPRT)
35 |
36 | DOCKER_SHELL?=bash
37 | BUILDER_DOCKER_FILE?=../$(VERSION)/Dockerfile.builder
38 | TF_DOCKER_FILE?=../$(VERSION)/Dockerfile.tf
39 | SERVER_DOCKER_FILE?=../$(VERSION)/Dockerfile.openvino
40 | CLR_DOWNSTREAM_DOCKER_FILE?=../$(VERSION)/Dockerfile.clr_ds
41 | DOWNSTREAM_DOCKER_FILE?=../$(VERSION)/Dockerfile.dlrs
42 | DOCKER_FILE_AIXPRT?=../$(VERSION)/Dockerfile.aixprt
43 |
44 | VERSION_DIR=$(shell pwd)
45 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/licenses/README.md:
--------------------------------------------------------------------------------
1 | # Additional details on licenses
2 |
3 | As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/scripts/build_openvino_ie.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export CFLAGS="-O3 "
22 | export CXXFLAGS="-O3 "
23 | export FCFLAGS="$CFLAGS "
24 | export FFLAGS="$CFLAGS "
25 | export CFLAGS="$CFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
26 | export CXXFLAGS="$CXXFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
27 | export GCC_IGNORE_WERROR=1
28 |
29 | #export GIT_HASH=0ef928 # 2019_R1.0.1
30 | #export GIT_HASH=ba6e22b # 2019_R2
31 | #export GIT_HASH=1c794d9 # 2019_R3
32 | #export GIT_HASH=fe3f978 # 2019_R3.1
33 | export GIT_HASH=b2140c0 # 2020.1
34 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
35 |
36 | echo "=================get dldt================================="
37 | if [ ! -d ./dldt/ ]; then
38 | git clone --recursive -j"$N_JOBS" https://github.com/opencv/dldt.git &&\
39 | cd dldt && git checkout -b v2020.1 $GIT_HASH && cd ..
40 | fi
41 | echo "=================config and build inference engine=================="
42 | # install inference engine python bridge dependency
43 | pip install opencv-python numpy cython progress
44 | cd ./dldt/
45 | CMAKE_ARGS="OpenCV_DIR=/usr/lib64/cmake/opencv4 -DENABLE_OPENCV=OFF -DENABLE_MKL_DNN=ON -DTHREADING=TBB -DENABLE_GNA=ON -DENABLE_CLDNN=ON -DENABLE_MYRIAD=OFF -DENABLE_VPU=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=$(command -v python) -DPYTHON_LIBRARY=/usr/lib64/libpython3.8.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.8"
46 | mkdir -p ./build &&\
47 | cd ./build
48 | IE_BUILD_DIR=$(pwd)
49 | cmake $CMAKE_ARGS ..
50 | make -j"$N_JOBS"
51 | echo "===================================================================="
52 | PYTHON_MODULE="inference-engine/bin/intel64/Release"
53 | echo "Inference Engine build directory is: $IE_BUILD_DIR"
54 | echo "IE bridges build directory is: $PYTHON_MODULE"
55 | echo "===================================================================="
56 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/scripts/check_avx512.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | run()
22 | {
23 | line_length=$(echo "$@" | awk '{print length}')
24 | printf "%$((line_length+35))s\n" |tr " " "="
25 | printf "$(date) -- %s"
26 | printf "%s\n" "$@"
27 | printf "%$((line_length+35))s\n" |tr " " "="
28 | }
29 | reqd_xtns=(avx512cd avx512bw avx512dq avx512f avx512vl)
30 | cpuxtns=$(lscpu | grep -i "avx512")
31 | for i in "${reqd_xtns[@]}"
32 | do
33 | if [[ ! $cpuxtns =~ $i ]]
34 | then
35 | run "[Error] : Intel® AVX-512 extensions required by DLRS not available :: ($i)"
36 | exit
37 | fi
38 | done
39 | run "[Done]: Success, the platform supports AVX-512 instructions"
40 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/scripts/install_addons.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 |
22 | addons=( tensorflow-datasets sklearn transformers )
23 | echo "=================installing pkg dependencies=================="
24 |
25 | # pillow need libjpeg
26 | swupd clean && swupd bundle-add devpkg-libjpeg-turbo
27 | CC="cc -mavx2" pip install --no-cache-dir --force-reinstall pillow-simd
28 |
29 | for pkg in "${addons[@]}"
30 | do
31 | echo "=================get and install $pkg======================="
32 | pip install --no-cache-dir "$pkg" || { echo "failed installing $pkg"; exit 1; }
33 | echo "==================done======================================"
34 | done
35 | exit 0
36 |
--------------------------------------------------------------------------------
/dlrs/deprecated/clearlinux/tensorflow_2/mkl/scripts/serve.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # start the model server
3 | cd /ie_serving_py
4 | exec "$@"
5 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning Reference Stack containers based on Ubuntu*
2 |
3 | We created the Deep Learning Reference Stack to help AI developers deliver the best experience on Intel® Architecture. This stack reduces complexity common with deep learning software components, provides flexibility for customized solutions, and enables you to quickly prototype and deploy Deep Learning workloads.
4 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/.dockerignore:
--------------------------------------------------------------------------------
1 | Makefile
2 | config.make
3 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/README.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack with Pytorch and Intel® MKL-DNN
2 |
3 | ### Building Locally
4 |
5 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
6 |
7 | >NOTE: This command is for locally building this image alone.
8 |
9 | ```
10 | docker build --no-cache --build-arg ubuntu_ver="20.04" -t dlrs-pytorch-ubuntu:v0.6.1 .
11 | ```
12 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/licenses/LICENSE_MIT:
--------------------------------------------------------------------------------
1 | Copyright 2020 Intel Corporation
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
9 | End license text.
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/licenses/third-party-programs-safestring.txt:
--------------------------------------------------------------------------------
1 | This file is the "third-party-programs.txt" for Intel Open Source Technology Center Safe String Library. This library is based from the Cisco CSafelib. The copyright and license terms are listed below.
2 | MIT License
3 |
4 | Copyright (c) 2014-2018 Intel Corporation
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
24 | ================================================================================
25 |
26 | Copyright (C) 2012, 2013 Cisco Systems
27 | All rights reserved.
28 |
29 | Permission is hereby granted, free of charge, to any person
30 | obtaining a copy of this software and associated documentation
31 | files (the "Software"), to deal in the Software without
32 | restriction, including without limitation the rights to use,
33 | copy, modify, merge, publish, distribute, sublicense, and/or
34 | sell copies of the Software, and to permit persons to whom the
35 | Software is furnished to do so, subject to the following
36 | conditions:
37 |
38 | The above copyright notice and this permission notice shall be
39 | included in all copies or substantial portions of the Software.
40 |
41 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
42 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
43 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
44 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
45 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
46 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
47 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
48 | OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/scripts/generate_defaults.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | """ Helper script that generates a file with sane defaults that can be sourced when using MKL_DNN optimized DLRS stack.
18 | We recommend you fine tune the exported env variables based on the workload. More details can be found at:
19 | https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md.
20 | To get further details, try --verbose."""
21 |
22 | import os
23 | import argparse
24 | import subprocess
25 | import sys
26 |
27 | import psutil
28 |
29 | parser = argparse.ArgumentParser(description=__doc__)
30 | parser.add_argument(
31 | "-v",
32 | "--verbose",
33 | action="store_true",
34 | help="detailed info on the variables being set",
35 | )
36 | parser.add_argument(
37 | "-g",
38 | "--generate",
39 | action="store_true",
40 | help="generate 'mkl_env.sh' file with default settings for MKL DNN",
41 | required=False,
42 | )
43 | args = parser.parse_args()
44 |
45 |
46 | def main():
47 | sockets = int(
48 | subprocess.check_output(
49 | 'cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True
50 | )
51 | )
52 | physical_cores = psutil.cpu_count(logical=False)
53 | vars = {}
54 | vars["OMP_NUM_THREADS"] = {
55 | "value": physical_cores,
56 | "help": "Number of OpenMP threads",
57 | }
58 | vars["KMP_BLOCKTIME"] = {
59 | "value": 1,
60 | "help": "Thread waits until set ms after execution.",
61 | }
62 | vars["KMP_AFFINITY"] = {
63 | "value": "granularity=fine,verbose,compact,1,0",
64 | "help": "OpenMP threads bound to single thread context compactly",
65 | }
66 | vars["INTRA_OP_PARALLELISM_THREADS"] = {
67 | "value": physical_cores,
68 | "help": "scheme for individual op",
69 | }
70 | vars["INTER_OP_PARALLELISM_THREADS"] = {
71 | "value": sockets,
72 | "help": "parllelizing scheme for independent ops",
73 | }
74 | if args.verbose:
75 | print(
76 | (
77 | "variables that can be used to fine tune performance,\n"
78 | "use '-g' or '--generate' to generate a file with these variables\n"
79 | )
80 | )
81 | for var, val in vars.items():
82 | print("variable: {}, description: {}".format(var, val["help"]))
83 | if args.generate:
84 | print("Generating default env vars for MKL and OpenMP, stored in /workspace/mkl_env.sh ")
85 | for var, val in vars.items():
86 | print(
87 | "export {}={}".format(var, str(val["value"])),
88 | file=open("mkl_env.sh", "a"),
89 | )
90 |
91 |
92 | if __name__ == "__main__":
93 | main()
94 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/scripts/install_addons.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | addons=( pytorch-lightning flair )
22 | echo "=================installing pkg dependencies=================="
23 | pip install --no-cache-dir -U \
24 | numpy \
25 | scikit-image \
26 | twine \
27 | "pandas" \
28 | "test-tube" \
29 | "fastprogress" \
30 | beautifulsoup4 \
31 | numexpr \
32 | packaging \
33 | pyyaml \
34 | requests \
35 | "python-dateutil>=2.6.1" \
36 | "gensim>=3.4.0" \
37 | "pytest>=5.3.2" \
38 | "tqdm>=4.26.0" \
39 | "segtok>=1.5.7" \
40 | "matplotlib>=2.2.3" \
41 | mpld3==0.3 \
42 | "scikit-learn>=0.21.3" \
43 | "sqlitedict>=1.6.0" \
44 | "deprecated>=1.2.4" \
45 | "hyperopt>=0.1.1" \
46 | "transformers>=2.3.0" \
47 | "bpemb>=0.2.9" \
48 | regex \
49 | tabulate \
50 | langdetect \
51 | scipy
52 |
53 | CC="cc -mavx2" pip install --no-cache-dir --force-reinstall "pillow-simd==7.0.0.post3"
54 | pip install --no-deps --no-cache-dir --force-reinstall "pillow==7.0.0"
55 |
56 | for pkg in "${addons[@]}"
57 | do
58 | echo "=================get and install $pkg======================="
59 | pip install --no-deps --no-cache-dir "$pkg" || { echo "failed installing $pkg"; exit 1; }
60 | echo "==================done======================================"
61 | done
62 | exit 0
63 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/pytorch/scripts/mkl_env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | export OMP_NUM_THEADS=10
17 | export KMP_BLOCKTIME=2
18 | export KMP_AFFINITY=granularity=fine,verbose,compact,1,0
19 | export INTRA_OP_PARALLELISM_THREADS=10
20 | export INTER_OP_PARALLELISM_THREADS=1
21 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/.dockerignore:
--------------------------------------------------------------------------------
1 | Makefile
2 | config.make
3 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/README.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack with TensorFlow and Intel® MKL-DNN
2 |
3 |
4 | ### Building Locally
5 |
6 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
7 |
8 | >NOTE: This command is for locally building this image alone.
9 |
10 | ```
11 | docker build --no-cache --build-arg ubuntu_ver="20.04" -t dlrs-tensorflow-ubuntu:v0.6.1 .
12 | ```
13 |
14 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/licenses/LICENSE_MIT:
--------------------------------------------------------------------------------
1 | Copyright 2020 Intel Corporation
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
9 | End license text.
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/licenses/third-party-programs-safestring.txt:
--------------------------------------------------------------------------------
1 | This file is the "third-party-programs.txt" for Intel Open Source Technology Center Safe String Library. This library is based from the Cisco CSafelib. The copyright and license terms are listed below.
2 | MIT License
3 |
4 | Copyright (c) 2014-2018 Intel Corporation
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
24 | ================================================================================
25 |
26 | Copyright (C) 2012, 2013 Cisco Systems
27 | All rights reserved.
28 |
29 | Permission is hereby granted, free of charge, to any person
30 | obtaining a copy of this software and associated documentation
31 | files (the "Software"), to deal in the Software without
32 | restriction, including without limitation the rights to use,
33 | copy, modify, merge, publish, distribute, sublicense, and/or
34 | sell copies of the Software, and to permit persons to whom the
35 | Software is furnished to do so, subject to the following
36 | conditions:
37 |
38 | The above copyright notice and this permission notice shall be
39 | included in all copies or substantial portions of the Software.
40 |
41 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
42 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
43 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
44 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
45 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
46 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
47 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
48 | OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/patches/ade_gcc9_tmp_fix.patch:
--------------------------------------------------------------------------------
1 | From 4365541d2605d9f04c66a3e0ee48f81bbc54b136 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Wed, 18 Mar 2020 01:46:51 -0700
4 | Subject: [PATCH] Temporary gcc 9 fix for ade
5 |
6 | ---
7 | sources/ade/CMakeLists.txt | 2 +-
8 | 1 file changed, 1 insertion(+), 1 deletion(-)
9 |
10 | diff --git a/sources/ade/CMakeLists.txt b/sources/ade/CMakeLists.txt
11 | index 2d1dd20..33c3934 100644
12 | --- a/sources/ade/CMakeLists.txt
13 | +++ b/sources/ade/CMakeLists.txt
14 | @@ -11,7 +11,7 @@ file( GLOB_RECURSE sources source/*.cpp )
15 | file( GLOB_RECURSE include *.hpp )
16 |
17 | if (CMAKE_CXX_COMPILER_ID STREQUAL GNU)
18 | - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -Wextra -Wconversion -Wshadow -Wno-error=cpp -Wformat -Wformat-security" )
19 | + set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -Wextra -Wconversion -Wshadow -Wno-error=cpp -Wformat -Wformat-security -Wno-error=redundant-move -Wno-error=pessimizing-move" )
20 | endif()
21 |
22 | add_library( ${PROJECT_NAME} STATIC ${include} ${sources} )
23 | --
24 | 2.17.1
25 |
26 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/patches/findCaller_fix.patch:
--------------------------------------------------------------------------------
1 | From 8c81191eba907a618d085031fe01483ec166bb84 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Fri, 13 Mar 2020 15:59:49 -0700
4 | Subject: [PATCH] Fix for findCaller() error
5 |
6 | ---
7 | tensorflow/python/platform/tf_logging.py | 2 +-
8 | 1 file changed, 1 insertion(+), 1 deletion(-)
9 |
10 | diff --git a/tensorflow/python/platform/tf_logging.py b/tensorflow/python/platform/tf_logging.py
11 | index 86a4957c..f6142462 100644
12 | --- a/tensorflow/python/platform/tf_logging.py
13 | +++ b/tensorflow/python/platform/tf_logging.py
14 | @@ -60,7 +60,7 @@ def _get_caller(offset=3):
15 |
16 | # The definition of `findCaller` changed in Python 3.2
17 | if _sys.version_info.major >= 3 and _sys.version_info.minor >= 2:
18 | - def _logger_find_caller(stack_info=False): # pylint: disable=g-wrong-blank-lines
19 | + def _logger_find_caller(stack_info=False, stacklevel=1): # pylint: disable=g-wrong-blank-lines
20 | code, frame = _get_caller(4)
21 | sinfo = None
22 | if stack_info:
23 | --
24 | 2.17.1
25 |
26 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/patches/openvino_gcc9_fix.patch:
--------------------------------------------------------------------------------
1 | From d734ecafcffcc9384128d19ccce1d49fed028ba4 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Tue, 17 Mar 2020 23:48:23 -0700
4 | Subject: [PATCH 1/2] Temporary fix for gcc 9
5 |
6 | ---
7 | CMakeLists.txt | 2 ++
8 | 1 file changed, 2 insertions(+)
9 |
10 | diff --git a/CMakeLists.txt b/CMakeLists.txt
11 | index e48cee57..d91b06fa 100644
12 | --- a/CMakeLists.txt
13 | +++ b/CMakeLists.txt
14 | @@ -41,6 +41,8 @@ message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
15 | file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
16 | file(REMOVE "${CMAKE_BINARY_DIR}/targets.cmake")
17 |
18 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
19 | +
20 | function(build_ngraph)
21 | if(NOT ENABLE_NGRAPH)
22 | return()
23 | --
24 | 2.17.1
25 |
26 |
27 | From a6211bfa8f0daed7fb92f8fc8ddee6acec0f4bf7 Mon Sep 17 00:00:00 2001
28 | From: Cavus Mustafa
29 | Date: Wed, 18 Mar 2020 02:00:57 -0700
30 | Subject: [PATCH 2/2] mkldnn and cldnn fix for gcc 9
31 |
32 | ---
33 | inference-engine/thirdparty/clDNN/CMakeLists.txt | 3 +++
34 | inference-engine/thirdparty/mkldnn.cmake | 3 ++-
35 | 2 files changed, 5 insertions(+), 1 deletion(-)
36 |
37 | diff --git a/inference-engine/thirdparty/clDNN/CMakeLists.txt b/inference-engine/thirdparty/clDNN/CMakeLists.txt
38 | index b08c2744..5f8681d4 100644
39 | --- a/inference-engine/thirdparty/clDNN/CMakeLists.txt
40 | +++ b/inference-engine/thirdparty/clDNN/CMakeLists.txt
41 | @@ -99,6 +99,9 @@ set(CLDNN_BUILD__DEFAULT_OUT_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/build/out")
42 | # Prefix for all targets in internal pass.
43 | set(CLDNN_BUILD__PROJ_NAME_PREFIX "")
44 |
45 | +# Temporary fix for gcc9
46 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move -Wno-error=deprecated-copy")
47 | +
48 | # Single/multi-configuration generator helpers.
49 | if(CMAKE_CFG_INTDIR STREQUAL ".")
50 | set(CLDNN__TARGET_CFG_VAR "${CMAKE_BUILD_TYPE}")
51 | diff --git a/inference-engine/thirdparty/mkldnn.cmake b/inference-engine/thirdparty/mkldnn.cmake
52 | index 6d026de5..abaaa1b6 100644
53 | --- a/inference-engine/thirdparty/mkldnn.cmake
54 | +++ b/inference-engine/thirdparty/mkldnn.cmake
55 | @@ -142,4 +142,5 @@ endif()
56 | endif()
57 | ## enable jit_gemm from mlk-dnn
58 |
59 | -target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
60 | \ No newline at end of file
61 | +target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
62 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-stringop-overflow")
63 | --
64 | 2.17.1
65 |
66 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/scripts/Readme.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack
2 |
3 | The Deep Learning Reference Stack, is an integrated, highly-performant open source stack optimized for Intel® Xeon® Scalable platforms.
4 | This open source community release is part of an effort to ensure AI developers have easy access to all features and functionality of Intel platforms.
5 | Highly-tuned and built for cloud native environments, the release enables developers to quickly prototype by reducing complexity associated with integrating
6 | multiple software components, while still giving users the flexibility to customize their solutions.
7 |
8 | ### Sanity checks
9 |
10 | DLRS requires availability of AVX512 instructions on your machine, you can test if your platform has these instructions by running the script:
11 |
12 | ```bash
13 | /bin/bash ./check_avx512.sh
14 | ```
15 | if the platform is supported you will get a message:
16 |
17 | ```bash
18 | ==============================================================================================
19 | Fri 12 Jul 2019 02:53:47 PM PDT -- [Done]: Success, the platform supports AVX-512 instructions
20 | ==============================================================================================
21 | ```
22 |
23 | ### Tweaking performance
24 |
25 | We have also added a script to set a number of environment variables which can be tweaked based on the workload to optimize performance. You can source these variables using:
26 |
27 | Topologies supported
28 |
29 | - Vision models (ResNet50 and Inception)
30 | - ResNet101
31 | - Wide and Deep Models
32 | - Language
33 | - Default (if the workload is a custom model)
34 |
35 | ```bash
36 | source ./set_env.sh
37 |
38 | Set default runtime params when using Intel® DLRS stack.
39 | We recommend you fine tune the exported env variables based on the workload
40 | More details can be found at: https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md
41 |
42 | Supported models
43 | - vision
44 | - resnet101
45 | - langauge
46 | - wide_deep
47 | - default
48 |
49 | What type of model are you trying to run? vision
50 | ===========================================================================
51 | Fri 12 Jul 2019 02:56:36 PM PDT -- Setting default params for vision models
52 | ===========================================================================
53 | ==================================================
54 | Fri 12 Jul 2019 02:56:36 PM PDT -- Parameters Set
55 | ==================================================
56 | BATCH_SIZE :: 128
57 | DATA_LAYOUT :: NCHW
58 | OMP_NUM_THREADS :: 10
59 | KMP_BLOCKTIME :: 2
60 | KMP_AFFINITY :: granularity=fine,verbose,compact,1,0
61 | INTER_OP_PARALLELISM_THREADS :: 1
62 | INTRA_OP_PARALLELISM_THREADS :: 10
63 | TENSORFLOW_INTRA_OP_PARALLELISM :: 10
64 | TENSORFLOW_INTER_OP_PARALLELISM :: 1
65 | ```
66 |
67 | This is only a helper script, for you to get started, we recommend you fine tune the exported environment variables based on your workload.
68 | More details can be found at General best practices [page](https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md).
69 |
70 | ### Mailing List
71 |
72 | See our public [mailing list](https://lists.01.org/postorius/lists/stacks.lists.01.org/) page for details on how to contact us. You should only subscribe to the Stacks mailing lists using an email address that you don't mind being public.
73 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/scripts/build_openvino_ie.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export CFLAGS="-O3 "
22 | export CXXFLAGS="-O3 "
23 | export FCFLAGS="$CFLAGS "
24 | export FFLAGS="$CFLAGS "
25 | export CFLAGS="$CFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
26 | export CXXFLAGS="$CXXFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
27 | export GCC_IGNORE_WERROR=1
28 |
29 | #export GIT_HASH=0ef928 # 2019_R1.0.1
30 | #export GIT_HASH=ba6e22b # 2019_R2
31 | #export GIT_HASH=1c794d9 # 2019_R3
32 | #export GIT_HASH=fe3f978 # 2019_R3.1
33 | export GIT_HASH=b2140c0 # 2020.1
34 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
35 |
36 | echo "=================get dldt================================="
37 | if [ ! -d ./dldt/ ]; then
38 | git clone -j"$N_JOBS" https://github.com/opencv/dldt.git &&\
39 | cd dldt && git checkout -b v2020.1 $GIT_HASH &&\
40 | git submodule update --init --recursive && cd ..
41 | fi
42 | echo "=================config and build inference engine=================="
43 | # install inference engine python bridge dependency
44 | pip install opencv-python numpy cython progress
45 | cd ./dldt/
46 |
47 | git config --global user.email "example@example.com"
48 | git config --global user.name "example@example.com"
49 | cd inference-engine/thirdparty/ade
50 | git am /patches/ade_gcc9_tmp_fix.patch
51 | cd ../../../
52 | git am /patches/openvino_gcc9_fix.patch
53 |
54 | echo "=================config and build IE bridges========================="
55 | CMAKE_ARGS="OpenCV_DIR=/usr/lib64/cmake/opencv4 -DENABLE_OPENCV=OFF -DENABLE_MKL_DNN=ON -DTHREADING=TBB -DENABLE_GNA=ON -DENABLE_CLDNN=ON -DENABLE_MYRIAD=OFF -DENABLE_VPU=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=$(command -v python) -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.8.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.8"
56 | mkdir -p ./build &&\
57 | cd ./build
58 | IE_BUILD_DIR=$(pwd)
59 | cmake $CMAKE_ARGS ..
60 | make -j"$N_JOBS"
61 | echo "===================================================================="
62 | PYTHON_MODULE="inference-engine/bin/intel64/Release"
63 | echo "Inference Engine build directory is: $IE_BUILD_DIR"
64 | echo "IE bridges build directory is: $PYTHON_MODULE"
65 | echo "===================================================================="
66 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/scripts/check_avx512.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | run()
22 | {
23 | line_length=$(echo "$@" | awk '{print length}')
24 | printf "%$((line_length+35))s\n" |tr " " "="
25 | printf "$(date) -- %s"
26 | printf "%s\n" "$@"
27 | printf "%$((line_length+35))s\n" |tr " " "="
28 | }
29 | reqd_xtns=(avx512cd avx512bw avx512dq avx512f avx512vl)
30 | cpuxtns=$(lscpu | grep -i "avx512")
31 | for i in "${reqd_xtns[@]}"
32 | do
33 | if [[ ! $cpuxtns =~ $i ]]
34 | then
35 | run "[Error] : Intel® AVX-512 extensions required by DLRS not available :: ($i)"
36 | exit
37 | fi
38 | done
39 | run "[Done]: Success, the platform supports AVX-512 instructions"
40 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow/scripts/serve.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # start the model server
3 | cd /ie_serving_py
4 | exec "$@"
5 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/README.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack with TensorFlow and Intel® MKL-DNN
2 |
3 |
4 | ### Building Locally
5 |
6 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
7 |
8 | >NOTE: This command is for locally building this image alone.
9 |
10 | ```
11 | docker build --no-cache --build-arg ubuntu_ver="20.04" -t dlrs-tensorflow2-ubuntu:v0.6.1 .
12 | ```
13 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/licenses/LICENSE_MIT:
--------------------------------------------------------------------------------
1 | Copyright 2020 Intel Corporation
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
9 | End license text.
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/licenses/third-party-programs-safestring.txt:
--------------------------------------------------------------------------------
1 | This file is the "third-party-programs.txt" for Intel Open Source Technology Center Safe String Library. This library is based from the Cisco CSafelib. The copyright and license terms are listed below.
2 | MIT License
3 |
4 | Copyright (c) 2014-2018 Intel Corporation
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
24 | ================================================================================
25 |
26 | Copyright (C) 2012, 2013 Cisco Systems
27 | All rights reserved.
28 |
29 | Permission is hereby granted, free of charge, to any person
30 | obtaining a copy of this software and associated documentation
31 | files (the "Software"), to deal in the Software without
32 | restriction, including without limitation the rights to use,
33 | copy, modify, merge, publish, distribute, sublicense, and/or
34 | sell copies of the Software, and to permit persons to whom the
35 | Software is furnished to do so, subject to the following
36 | conditions:
37 |
38 | The above copyright notice and this permission notice shall be
39 | included in all copies or substantial portions of the Software.
40 |
41 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
42 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
43 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
44 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
45 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
46 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
47 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
48 | OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/patches/ade_gcc9_tmp_fix.patch:
--------------------------------------------------------------------------------
1 | From 4365541d2605d9f04c66a3e0ee48f81bbc54b136 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Wed, 18 Mar 2020 01:46:51 -0700
4 | Subject: [PATCH] Temporary gcc 9 fix for ade
5 |
6 | ---
7 | sources/ade/CMakeLists.txt | 2 +-
8 | 1 file changed, 1 insertion(+), 1 deletion(-)
9 |
10 | diff --git a/sources/ade/CMakeLists.txt b/sources/ade/CMakeLists.txt
11 | index 2d1dd20..33c3934 100644
12 | --- a/sources/ade/CMakeLists.txt
13 | +++ b/sources/ade/CMakeLists.txt
14 | @@ -11,7 +11,7 @@ file( GLOB_RECURSE sources source/*.cpp )
15 | file( GLOB_RECURSE include *.hpp )
16 |
17 | if (CMAKE_CXX_COMPILER_ID STREQUAL GNU)
18 | - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -Wextra -Wconversion -Wshadow -Wno-error=cpp -Wformat -Wformat-security" )
19 | + set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -Wextra -Wconversion -Wshadow -Wno-error=cpp -Wformat -Wformat-security -Wno-error=redundant-move -Wno-error=pessimizing-move" )
20 | endif()
21 |
22 | add_library( ${PROJECT_NAME} STATIC ${include} ${sources} )
23 | --
24 | 2.17.1
25 |
26 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/patches/openvino_gcc9_fix.patch:
--------------------------------------------------------------------------------
1 | From d734ecafcffcc9384128d19ccce1d49fed028ba4 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Tue, 17 Mar 2020 23:48:23 -0700
4 | Subject: [PATCH 1/2] Temporary fix for gcc 9
5 |
6 | ---
7 | CMakeLists.txt | 2 ++
8 | 1 file changed, 2 insertions(+)
9 |
10 | diff --git a/CMakeLists.txt b/CMakeLists.txt
11 | index e48cee57..d91b06fa 100644
12 | --- a/CMakeLists.txt
13 | +++ b/CMakeLists.txt
14 | @@ -41,6 +41,8 @@ message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
15 | file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
16 | file(REMOVE "${CMAKE_BINARY_DIR}/targets.cmake")
17 |
18 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
19 | +
20 | function(build_ngraph)
21 | if(NOT ENABLE_NGRAPH)
22 | return()
23 | --
24 | 2.17.1
25 |
26 |
27 | From a6211bfa8f0daed7fb92f8fc8ddee6acec0f4bf7 Mon Sep 17 00:00:00 2001
28 | From: Cavus Mustafa
29 | Date: Wed, 18 Mar 2020 02:00:57 -0700
30 | Subject: [PATCH 2/2] mkldnn and cldnn fix for gcc 9
31 |
32 | ---
33 | inference-engine/thirdparty/clDNN/CMakeLists.txt | 3 +++
34 | inference-engine/thirdparty/mkldnn.cmake | 3 ++-
35 | 2 files changed, 5 insertions(+), 1 deletion(-)
36 |
37 | diff --git a/inference-engine/thirdparty/clDNN/CMakeLists.txt b/inference-engine/thirdparty/clDNN/CMakeLists.txt
38 | index b08c2744..5f8681d4 100644
39 | --- a/inference-engine/thirdparty/clDNN/CMakeLists.txt
40 | +++ b/inference-engine/thirdparty/clDNN/CMakeLists.txt
41 | @@ -99,6 +99,9 @@ set(CLDNN_BUILD__DEFAULT_OUT_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/build/out")
42 | # Prefix for all targets in internal pass.
43 | set(CLDNN_BUILD__PROJ_NAME_PREFIX "")
44 |
45 | +# Temporary fix for gcc9
46 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move -Wno-error=deprecated-copy")
47 | +
48 | # Single/multi-configuration generator helpers.
49 | if(CMAKE_CFG_INTDIR STREQUAL ".")
50 | set(CLDNN__TARGET_CFG_VAR "${CMAKE_BUILD_TYPE}")
51 | diff --git a/inference-engine/thirdparty/mkldnn.cmake b/inference-engine/thirdparty/mkldnn.cmake
52 | index 6d026de5..abaaa1b6 100644
53 | --- a/inference-engine/thirdparty/mkldnn.cmake
54 | +++ b/inference-engine/thirdparty/mkldnn.cmake
55 | @@ -142,4 +142,5 @@ endif()
56 | endif()
57 | ## enable jit_gemm from mlk-dnn
58 |
59 | -target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
60 | \ No newline at end of file
61 | +target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
62 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-stringop-overflow")
63 | --
64 | 2.17.1
65 |
66 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/scripts/Readme.md:
--------------------------------------------------------------------------------
1 | ## Deep Learning Reference Stack
2 |
3 | The Deep Learning Reference Stack, is an integrated, highly-performant open source stack optimized for Intel® Xeon® Scalable platforms.
4 | This open source community release is part of an effort to ensure AI developers have easy access to all features and functionality of Intel platforms.
5 | Highly-tuned and built for cloud native environments, the release enables developers to quickly prototype by reducing complexity associated with integrating
6 | multiple software components, while still giving users the flexibility to customize their solutions.
7 |
8 | ### Sanity checks
9 |
10 | DLRS requires availability of AVX512 instructions on your machine, you can test if your platform has these instructions by running the script:
11 |
12 | ```bash
13 | /bin/bash ./check_avx512.sh
14 | ```
15 | if the platform is supported you will get a message:
16 |
17 | ```bash
18 | ==============================================================================================
19 | Fri 12 Jul 2019 02:53:47 PM PDT -- [Done]: Success, the platform supports AVX-512 instructions
20 | ==============================================================================================
21 | ```
22 |
23 | ### Tweaking performance
24 |
25 | We have also added a script to set a number of environment variables which can be tweaked based on the workload to optimize performance. You can source these variables using:
26 |
27 | Topologies supported
28 |
29 | - Vision models (ResNet50 and Inception)
30 | - ResNet101
31 | - Wide and Deep Models
32 | - Language
33 | - Default (if the workload is a custom model)
34 |
35 | ```bash
36 | source ./set_env.sh
37 |
38 | Set default runtime params when using Intel® DLRS stack.
39 | We recommend you fine tune the exported env variables based on the workload
40 | More details can be found at: https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md
41 |
42 | Supported models
43 | - vision
44 | - resnet101
45 | - langauge
46 | - wide_deep
47 | - default
48 |
49 | What type of model are you trying to run? vision
50 | ===========================================================================
51 | Fri 12 Jul 2019 02:56:36 PM PDT -- Setting default params for vision models
52 | ===========================================================================
53 | ==================================================
54 | Fri 12 Jul 2019 02:56:36 PM PDT -- Parameters Set
55 | ==================================================
56 | BATCH_SIZE :: 128
57 | DATA_LAYOUT :: NCHW
58 | OMP_NUM_THREADS :: 10
59 | KMP_BLOCKTIME :: 2
60 | KMP_AFFINITY :: granularity=fine,verbose,compact,1,0
61 | INTER_OP_PARALLELISM_THREADS :: 1
62 | INTRA_OP_PARALLELISM_THREADS :: 10
63 | TENSORFLOW_INTRA_OP_PARALLELISM :: 10
64 | TENSORFLOW_INTER_OP_PARALLELISM :: 1
65 | ```
66 |
67 | This is only a helper script, for you to get started, we recommend you fine tune the exported environment variables based on your workload.
68 | More details can be found at General best practices [page](https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md).
69 |
70 | ### Mailing List
71 |
72 | See our public [mailing list](https://lists.01.org/postorius/lists/stacks.lists.01.org/) page for details on how to contact us. You should only subscribe to the Stacks mailing lists using an email address that you don't mind being public.
73 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/scripts/build_openvino_ie.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export CFLAGS="-O3 "
22 | export CXXFLAGS="-O3 "
23 | export FCFLAGS="$CFLAGS "
24 | export FFLAGS="$CFLAGS "
25 | export CFLAGS="$CFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
26 | export CXXFLAGS="$CXXFLAGS -mfma -msse -msse2 -msse3 -mssse3 -mcx16 -msahf -mmovbe -msse4.2 -msse4.1 -mlzcnt -mavx -mavx2 -march=skylake -mtune=skylake-avx512 -m64"
27 | export GCC_IGNORE_WERROR=1
28 |
29 | #export GIT_HASH=0ef928 # 2019_R1.0.1
30 | #export GIT_HASH=ba6e22b # 2019_R2
31 | #export GIT_HASH=1c794d9 # 2019_R3
32 | #export GIT_HASH=fe3f978 # 2019_R3.1
33 | export GIT_HASH=b2140c0 # 2020.1
34 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
35 |
36 | echo "=================get dldt================================="
37 | if [ ! -d ./dldt/ ]; then
38 | git clone -j"$N_JOBS" https://github.com/opencv/dldt.git &&\
39 | cd dldt && git checkout -b v2020.1 $GIT_HASH &&\
40 | git submodule update --init --recursive && cd ..
41 | fi
42 | echo "=================config and build inference engine=================="
43 | # install inference engine python bridge dependency
44 | pip install opencv-python numpy cython progress
45 | cd ./dldt/
46 |
47 | git config --global user.email "example@example.com"
48 | git config --global user.name "example@example.com"
49 | cd inference-engine/thirdparty/ade
50 | git am /patches/ade_gcc9_tmp_fix.patch
51 | cd ../../../
52 | git am /patches/openvino_gcc9_fix.patch
53 |
54 | echo "=================config and build IE bridges========================="
55 | CMAKE_ARGS="OpenCV_DIR=/usr/lib64/cmake/opencv4 -DENABLE_OPENCV=OFF -DENABLE_MKL_DNN=ON -DTHREADING=TBB -DENABLE_GNA=ON -DENABLE_CLDNN=ON -DENABLE_MYRIAD=OFF -DENABLE_VPU=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=$(command -v python) -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.8.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.8"
56 | mkdir -p ./build &&\
57 | cd ./build
58 | IE_BUILD_DIR=$(pwd)
59 | cmake $CMAKE_ARGS ..
60 | make -j"$N_JOBS"
61 | echo "===================================================================="
62 | PYTHON_MODULE="inference-engine/bin/intel64/Release"
63 | echo "Inference Engine build directory is: $IE_BUILD_DIR"
64 | echo "IE bridges build directory is: $PYTHON_MODULE"
65 | echo "===================================================================="
66 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/scripts/check_avx512.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | run()
22 | {
23 | line_length=$(echo "$@" | awk '{print length}')
24 | printf "%$((line_length+35))s\n" |tr " " "="
25 | printf "$(date) -- %s"
26 | printf "%s\n" "$@"
27 | printf "%$((line_length+35))s\n" |tr " " "="
28 | }
29 | reqd_xtns=(avx512cd avx512bw avx512dq avx512f avx512vl)
30 | cpuxtns=$(lscpu | grep -i "avx512")
31 | for i in "${reqd_xtns[@]}"
32 | do
33 | if [[ ! $cpuxtns =~ $i ]]
34 | then
35 | run "[Error] : Intel® AVX-512 extensions required by DLRS not available :: ($i)"
36 | exit
37 | fi
38 | done
39 | run "[Done]: Success, the platform supports AVX-512 instructions"
40 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/scripts/install_addons.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | apt-get -y install libjpeg-dev zlib1g-dev
22 | addons=( tensorflow-datasets sklearn transformers )
23 | echo "=================installing pkg dependencies=================="
24 |
25 | # pillow need libjpeg
26 | CC="cc -mavx2" pip install --no-cache-dir --force-reinstall pillow-simd
27 |
28 | for pkg in "${addons[@]}"
29 | do
30 | echo "=================get and install $pkg======================="
31 | pip install --no-deps --no-cache-dir "$pkg" || { echo "failed installing $pkg"; exit 1; }
32 | echo "==================done======================================"
33 | done
34 | exit 0
35 |
--------------------------------------------------------------------------------
/dlrs/deprecated/ubuntu/tensorflow_2/scripts/serve.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # start the model server
3 | cd /ie_serving_py
4 | exec "$@"
5 |
--------------------------------------------------------------------------------
/dlrs/index.rst:
--------------------------------------------------------------------------------
1 | .. _dlrs:
2 |
3 |
4 | Deep Learning Reference Stack
5 | #############################
6 | The Deep Learning Reference Stack is an integrated, highly-performant open source stack optimized for Intel® Xeon® Scalable platforms.
7 |
8 |
9 | .. figure:: ../_figures/dlrs_single_2.png
10 | :scale: 80%
11 | :alt: Deep Learning Reference Stack
12 |
13 | Highly-tuned and built for cloud native environments, the release of DLRS enables developers to quickly prototype by reducing complexity associated with integrating multiple software components, while still giving users the flexibility to customize their solutions.
14 |
15 | The stack includes highly tuned software components across the operating system (Ubuntu* or Centos*), deep learning framework (TensorFlow*, PyTorch*), deep learning libraries ([Intel® oneAPI Deep Neural Network Library (oneDNN)](https://01.org/dnnl)) and other software components. This open source community release is part of an effort to ensure AI developers have easy access to all features and functionality of Intel platforms.To offer more flexibility, there are multiple versions of the Deep Learning Reference Stack.
16 |
17 | DLRS Release Announcement and Performance Reports
18 | *************************************************
19 |
20 |
21 | * `DLRS V0.9.0`_ release announcement.
22 | * `DLRS V0.8.0`_ release announcement.
23 | * `DLRS V0.7.0`_ release announcement.
24 | * `DLRS V0.6.0`_ release announcement.
25 | * `DLRS V0.5.0`_ release announcement.
26 |
27 |
28 |
29 | DLRS Guides
30 | ***********
31 |
32 | .. toctree::
33 | :maxdepth: 1
34 |
35 | dlrs.rst
36 | bert-performance.rst
37 | dlrs-inference.rst
38 |
39 | DLRS Releases
40 | *************
41 |
42 | DLRS with TensorFlow*
43 | =====================
44 |
45 | .. toctree::
46 | :maxdepth: 1
47 |
48 | tensorflow/README.md
49 |
50 | DLRS with TensorFlow Serving*
51 | =============================
52 |
53 | .. toctree::
54 | :maxdepth: 1
55 |
56 | serving/README.md
57 |
58 |
59 | DLRS with PyTorch*
60 | ==================
61 |
62 | .. toctree::
63 | :maxdepth: 1
64 |
65 | pytorch/README.md
66 |
67 | DLRS ML Compiler
68 | ================
69 |
70 | .. toctree::
71 | :maxdepth: 1
72 |
73 | ml-compiler/README.md
74 |
75 |
76 |
77 |
78 |
79 | .. _DLRS V0.5.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v5-0-now-available.html
80 |
81 | .. _DLRS V0.6.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v6-0-now-available.html
82 |
83 | .. _DLRS V0.7.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v7-0-now-available.html
84 |
85 | .. _DLRS V0.8.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v8-0-now-available.html
86 |
87 | .. _DLRS V0.9.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v9-0-now-available.html
88 |
--------------------------------------------------------------------------------
/dlrs/ml-compiler/Dockerfile:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------------------
2 | # TVM ML Compiler on Ubuntu 20.04 Linux
3 | #---------------------------------------------------------------------
4 | ARG ubuntu_ver=20.04
5 | FROM ubuntu:$ubuntu_ver as tvm_build
6 | LABEL maintainer=otc-swstacks@intel.com
7 |
8 | ENV DEBIAN_FRONTEND=noninteractive
9 | RUN apt-get update \
10 | && apt-get install -y --no-install-recommends \
11 | git python3-pip wget
12 |
13 | RUN ln -s /usr/bin/python3.8 /usr/bin/python \
14 | && ln -s /usr/bin/pip3 /usr/bin/pip
15 |
16 | # setup onednn and tvm
17 | COPY ./scripts/ /scripts
18 | RUN ./scripts/install_onednn.sh
19 | RUN apt-get install -y llvm-dev cmake \
20 | build-essential wget git gcc python3-dev \
21 | && ./scripts/install_tvm.sh \
22 | && apt-get remove -y llvm-dev cmake wget gcc python3-dev \
23 | build-essential wget && apt-get -y autoremove \
24 | && apt-get install --no-install-recommends -y llvm-runtime libgomp1 \
25 | && rm -r /scripts \
26 | && rm -rf /var/lib/apt/lists/*
27 |
28 | WORKDIR /workspace
29 |
30 | # init
31 | RUN echo "alias python=python3" >> ~/.bashrc \
32 | && echo "alias pip=pip3" >> ~/.bashrc
33 | COPY ./licenses /workspace/licenses
34 | HEALTHCHECK --interval=5m --timeout=3s \
35 | CMD python -c "import sys" || exit 1
36 | SHELL ["/bin/bash", "-c"]
37 |
38 |
--------------------------------------------------------------------------------
/dlrs/ml-compiler/Makefile:
--------------------------------------------------------------------------------
1 | include config.make
2 |
3 | export DOCKER_BUILDKIT=1
4 |
5 | all .DEFAULT:
6 | $(DOCKER_BUILD_CMD) -f $(DOCKER_FILE) -t $(STACK_NAME)-$(OS):$(STACK_VER) $(DOCKER_WS)
7 |
8 |
9 | check:
10 | OS=$(OS) \
11 | DOCKER_IMAGE=$(STACK_NAME) \
12 | VERSION=$(STACK_VER)
13 |
14 |
15 | clean:
16 | docker rmi $(STACK_NAME)
17 |
18 | .PHONY: $(DEFAULT_TARGETS)
19 |
--------------------------------------------------------------------------------
/dlrs/ml-compiler/README.md:
--------------------------------------------------------------------------------
1 | ## Stacks Deep Learning Compiler
2 |
3 | Early release of stacks deep learning compiler based on TVM for System Stacks
4 |
5 | Tensor Virtual Machine or TVM compiler image is a layered deep learning graph
6 | compiler that is optimized for CPUs and have runtime bindings for Python.
7 |
8 | Front end libraries can interface with the compiler, loading pretrained models,
9 | to be compiled and deployed using TVM. There are 2 levels of optimizing phases
10 | in TVM, the first one is when a model is loaded, where graph level optimizations
11 | such as layer fusion and layout transformation are attempted. The next phase
12 | includes operator level optimization and code generation including a specialized operator
13 | generation using an intelligent scheduler, please refer to the [paper](https://arxiv.org/pdf/1802.04799.pdf)
14 | for more details.
15 |
16 | ### Frontends
17 |
18 | To install front-end deep learning libraries, use:
19 |
20 | ```bash
21 | ./scripts/install_dl_frontends.sh
22 | ```
23 |
24 | This will install Pytorch, TorchVision and ONNX.
25 |
26 | ### Smoke tests
27 |
28 | Run some core unit and functional tests:
29 |
30 | ```bash
31 | cd ./tvm/tests
32 | docker run -it -v`pwd`:/workspace dlrs-ml-compiler
33 | ./workspace/run_tests.sh
34 | ```
35 |
36 |
--------------------------------------------------------------------------------
/dlrs/ml-compiler/config.make:
--------------------------------------------------------------------------------
1 | include ../config.make
2 | OS=ubuntu
3 | UB_VER=20.04
4 | DOCKER_FILE=Dockerfile
5 |
6 | TVM_VERSION=v0.6
7 | STACK_NAME=dlrs-ml-compiler
8 |
9 | CACHE_REG=10.219.105.126:5000
10 |
--------------------------------------------------------------------------------
/dlrs/ml-compiler/scripts/install_dl_frontends.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2020 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | run() {
22 | echo "=============================================================="
23 | printf "$(date) -- %s"
24 | printf "%s\n" "$@"
25 | echo "=============================================================="
26 | }
27 |
28 | install_dl_libs() {
29 | pip --no-cache-dir install \
30 | torch \
31 | torchvision \
32 | onnx future \
33 | cython scipy Image Pillow \
34 | && rm -rf /tmp/* \
35 | && find /usr/lib/ -follow -type f -name '*.pyc' -delete \
36 | && find /usr/lib/ -follow -type f -name '*.js.map' -delete
37 | }
38 |
39 | begin="$(date +%s)"
40 | run "install DL frameworks" && install_dl_libs
41 | finish="$(date +%s)"
42 | runtime=$(((finish-begin)/60))
43 | run "Done in : $runtime minute(s)"
44 |
--------------------------------------------------------------------------------
/dlrs/ml-compiler/scripts/install_onednn.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2020 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | DNNL_VER=1.6
19 | URL=https://github.com/oneapi-src/oneDNN/releases/download/v$DNNL_VER
20 |
21 | wget $URL/dnnl_lnx_$DNNL_VER.0_cpu_gomp.tgz -O /tmp/dnnl.tgz \
22 | && cd /tmp/ && tar -xzf dnnl.tgz \
23 | && mv dnnl_lnx_* dnnl/
24 |
25 | # copy libs and headers
26 | mv /tmp/dnnl/lib/libdnnl* /usr/local/lib/ \
27 | && mv /tmp/dnnl/include/* /usr/local/include/ \
28 | && rm -rf /tmp/* \
29 | && ldconfig && cd /
30 |
--------------------------------------------------------------------------------
/dlrs/pytorch/.dockerignore:
--------------------------------------------------------------------------------
1 | Makefile
2 | config.make
3 | tests
4 |
--------------------------------------------------------------------------------
/dlrs/pytorch/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning Reference Stack with Pytorch and Intel® oneAPI Deep Neural Network Library (oneDNN)
2 |
3 | ### Building Locally
4 |
5 | We have created a set of Dockerfiles that allow you to build DLRS Pytorch with different configurations. You will be able to pick the OS (Ubuntu or Centos), then pick the flavour, either `core`, which is a leaner image with just the framework, and `full` which includes the framework and extra features. Please refer to the table below to see all features and configurations.
6 |
7 | | | Framework | Features |
8 | |------|-----------|----------|
9 | | Core | X | |
10 | | Full | X | X |
11 | | | | |
12 |
13 | Framework:
14 |
15 | * PyTorch* 1.8
16 | * Torchvision
17 |
18 | Features:
19 |
20 | * Transformers
21 | * Flair*
22 | * Horovod
23 | * Seldon core
24 | * Pytorch lightning
25 |
26 | > NOTE: We recommend you enable [Docker Buildkit](https://docs.docker.com/develop/develop-images/build_enhancements/) to have concurrent dependency resolution and automatic garbage collection. Docker Buildkit has been integrated in Docker since 18.06, if you have an older version, please ignore this note.
27 |
28 | #### Enable Docker Buildkit (see note above)
29 |
30 | ```bash
31 | export DOCKER_BUILDKIT=1
32 | ```
33 |
34 | #### Building Ubuntu based DLRS
35 |
36 | Core:
37 |
38 | ```
39 | docker build -t dlrs-pytorch-ubuntu:v0.9.1 -f Dockerfile.ubuntu --target ubuntu-core .
40 | ```
41 | Full:
42 |
43 | ```
44 | docker build -t dlrs-pytorch-ubuntu:v0.9.1 -f Dockerfile.ubuntu --target ubuntu-full .
45 | ```
46 |
47 | #### Building Centos based DLRS
48 |
49 | Core:
50 |
51 | ```
52 | docker build -t dlrs-pytorch-centos:v0.9.1 -f Dockerfile.centos --target core-core .
53 | ```
54 | Full:
55 |
56 | ```
57 | docker build -t dlrs-pytorch-centos:v0.9.1 -f Dockerfile.centos --target centos-full .
58 | ```
59 |
--------------------------------------------------------------------------------
/dlrs/pytorch/common/frameworks.txt:
--------------------------------------------------------------------------------
1 | jupyterlab
2 | seldon-core==1.5.1
3 | opencv-python==4.5.1.48
4 | transformers==4.2.*
5 |
--------------------------------------------------------------------------------
/dlrs/pytorch/common/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | ninja
3 | pyyaml
4 | mkl==2021.1.1
5 | mkl-include==2021.1.1
6 | setuptools
7 | cmake
8 | cffi
9 | psutil
10 | dataclasses
11 | typing-extensions
12 |
--------------------------------------------------------------------------------
/dlrs/pytorch/scripts/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | declare -a KEEP_LIST=("libgl1 libopenmpi-dev libgomp1 \
4 | python3-certifi python3-chardet python3-distro \
5 | python3-idna python3-requests python3-six python3-urllib3")
6 | declare -a REMOVE_LIST=("automake build-essential cpp-8 gcc-8 m4 adwaita-icon-theme \
7 | fontconfig fontconfig-config fonts-dejavu-core gsettings-desktop-schemas \
8 | gtk-update-icon-cache hicolor-icon-theme humanity-icon-theme \
9 | libcairo-gobject2 libfontconfig1 libfreetype6 libpango-1.0-0 \
10 | libpangocairo-1.0-0 libpangoft2-1.0-0 libpixman-1-0 libpng16-16 \
11 | librest-0.7-0 libthai-data libthai0 libtiff5 libwayland-client0 \
12 | libwayland-cursor0 cmake autotools protobuf-compiler \
13 | gcc g++ vim wget curl fortan autoconf make git")
14 |
15 | for keep_pkg in ${KEEP_LIST[@]}; do
16 | apt-mark manual $keep_pkg
17 | done
18 |
19 | for pkg in ${REMOVE_LIST[@]};
20 | do apt-get remove -y $pkg > /dev/null || echo "moving on"
21 | done
22 | apt-get autoclean -y && apt-get autoremove -y
23 |
--------------------------------------------------------------------------------
/dlrs/pytorch/scripts/generate_defaults.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | """ Helper script that generates a file with sane defaults that can be sourced when using MKL_DNN optimized DLRS stack.
18 | We recommend you fine tune the exported env variables based on the workload. More details can be found at:
19 | https://github.com/IntelAI/models/blob/master/docs/general/tensorflow_serving/GeneralBestPractices.md.
20 | To get further details, try --verbose."""
21 |
22 | import os
23 | import argparse
24 | import subprocess
25 | import sys
26 |
27 | import psutil
28 |
29 | parser = argparse.ArgumentParser(description=__doc__)
30 | parser.add_argument(
31 | "-v",
32 | "--verbose",
33 | action="store_true",
34 | help="detailed info on the variables being set",
35 | )
36 | parser.add_argument(
37 | "-g",
38 | "--generate",
39 | action="store_true",
40 | help="generate 'mkl_env.sh' file with default settings for MKL DNN",
41 | required=False,
42 | )
43 | args = parser.parse_args()
44 |
45 |
46 | def main():
47 | sockets = int(
48 | subprocess.check_output(
49 | 'cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True
50 | )
51 | )
52 | physical_cores = psutil.cpu_count(logical=False)
53 | vars = {}
54 | vars["OMP_NUM_THREADS"] = {
55 | "value": physical_cores,
56 | "help": "Number of OpenMP threads",
57 | }
58 | vars["KMP_BLOCKTIME"] = {
59 | "value": 1,
60 | "help": "Thread waits until set ms after execution.",
61 | }
62 | vars["KMP_AFFINITY"] = {
63 | "value": "granularity=fine,verbose,compact,1,0",
64 | "help": "OpenMP threads bound to single thread context compactly",
65 | }
66 | vars["INTRA_OP_PARALLELISM_THREADS"] = {
67 | "value": physical_cores,
68 | "help": "scheme for individual op",
69 | }
70 | vars["INTER_OP_PARALLELISM_THREADS"] = {
71 | "value": sockets,
72 | "help": "parllelizing scheme for independent ops",
73 | }
74 | if args.verbose:
75 | print(
76 | (
77 | "variables that can be used to fine tune performance,\n"
78 | "use '-g' or '--generate' to generate a file with these variables\n"
79 | )
80 | )
81 | for var, val in vars.items():
82 | print("variable: {}, description: {}".format(var, val["help"]))
83 | if args.generate:
84 | print("Generating default env vars for MKL and OpenMP, stored in /workspace/mkl_env.sh ")
85 | for var, val in vars.items():
86 | print(
87 | "export {}={}".format(var, str(val["value"])),
88 | file=open("mkl_env.sh", "a"),
89 | )
90 |
91 |
92 | if __name__ == "__main__":
93 | main()
94 |
--------------------------------------------------------------------------------
/dlrs/pytorch/scripts/install_addons.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | addons=( "pytorch-lightning==1.1.5" "flair==0.7.*" "onnx==1.8.0" )
22 | echo "=================installing pkg dependencies=================="
23 | wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/1.1.5/requirements.txt -O requirements.txt \
24 | && wget https://raw.githubusercontent.com/flairNLP/flair/v0.7/requirements.txt -O - >> requirements.txt \
25 | && sed -i '/torch/d' requirements.txt \
26 | && sed -i '/tqdm/d' requirements.txt \
27 | && sed -i '/transformers>=3.5.0,<=3.5.1/d' requirements.txt \
28 | && pip install --no-cache-dir \
29 | -r requirements.txt
30 |
31 | CC="cc -mavx2" pip install --no-cache-dir --force-reinstall "pillow-simd==7.0.*"
32 |
33 | for pkg in "${addons[@]}"
34 | do
35 | echo "=================get and install $pkg======================="
36 | pip install --no-deps --no-cache-dir "$pkg" || { echo "failed installing $pkg"; exit 1; }
37 | echo "==================done======================================"
38 | done
39 | exit 0
40 |
--------------------------------------------------------------------------------
/dlrs/pytorch/scripts/mkl_env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) 2019 Intel Corporation
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | #
16 | export OMP_NUM_THEADS=10
17 | export KMP_BLOCKTIME=2
18 | export KMP_AFFINITY=granularity=fine,verbose,compact,1,0
19 | export INTRA_OP_PARALLELISM_THREADS=10
20 | export INTER_OP_PARALLELISM_THREADS=1
21 |
--------------------------------------------------------------------------------
/dlrs/pytorch/scripts/torch_utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -o pipefail
19 |
20 | if [[ "$1" == "skylake" ]];
21 | then
22 | ARCH=skylake
23 | TUNE=skylake
24 | FBGEMM=0
25 | else
26 | ARCH=skylake-avx512
27 | TUNE=cascadelake
28 | FBGEMM=1
29 | fi
30 |
31 | export TCH_TAG=v1.8.1 # Pytorch branch
32 | export VISION_TAG=v0.9.1 # Torchvision tag
33 | export CFLAGS="-O3 -mfma -mtune=$TUNE -march=$ARCH"
34 | export CXXFLAGS="-O3 -mfma -mtune=$TUNE -march=$ARCH"
35 | export USE_FBGEMM=$FBGEMM
36 | export GCC_IGNORE_WERROR=1
37 |
38 | echo "torch version: $TCH_TAG"
39 | echo "torchvision version: $VISION_TAG"
40 | echo "building for: $ARCH"
41 | echo "tuning for: $TUNE"
42 | echo "fbgemm for int8: $FBGEMM"
43 |
44 | echo "=================clone pytorch============================="
45 | mkdir /torch-wheels
46 | git clone https://github.com/pytorch/pytorch.git /buildir/pytorch \
47 | && cd /buildir/pytorch && git checkout ${TCH_TAG} \
48 | && python setup.py clean \
49 | && git submodule sync && git submodule update --init --recursive \
50 | && git apply /buildir/dataloader.patch_v1.8.0-rc2 \
51 | && sed -i 's#^ ${CMAKE_CURRENT_SOURCE_DIR}/tensor_iterator_test.cpp##g' aten/src/ATen/test/CMakeLists.txt
52 | echo "==================done=========================================="
53 |
54 | echo "=================clone torchvison============================="
55 | git clone -b ${VISION_TAG} https://github.com/pytorch/vision.git /buildir/vision
56 | echo "==================done=========================================="
57 |
--------------------------------------------------------------------------------
/dlrs/serving/Makefile:
--------------------------------------------------------------------------------
1 | include config.make
2 |
3 | export DOCKER_BUILDKIT=1
4 |
5 |
6 | all:
7 | OS=ubuntu $(MAKE) serving
8 |
9 | serving:
10 | docker build -t $(STACK_NAME)-$(OS):$(STACK_VER) -f Dockerfile .
11 |
--------------------------------------------------------------------------------
/dlrs/serving/README.md:
--------------------------------------------------------------------------------
1 | # Build instructions
2 |
3 | ```bash
4 | make all
5 | ```
6 |
--------------------------------------------------------------------------------
/dlrs/serving/config.make:
--------------------------------------------------------------------------------
1 | include ../config.make
2 |
3 | STACK_VER=serving
4 | STACK_NAME=dlrs-serving
5 |
--------------------------------------------------------------------------------
/dlrs/serving/scripts/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | for pkg in automake build-essential cpp-8 gcc-8 m4 adwaita-icon-theme \
4 | fontconfig fontconfig-config fonts-dejavu-core gsettings-desktop-schemas \
5 | gtk-update-icon-cache hicolor-icon-theme humanity-icon-theme \
6 | libcairo-gobject2 libfontconfig1 libfreetype6 libpango-1.0-0 \
7 | libpangocairo-1.0-0 libpangoft2-1.0-0 libpixman-1-0 libpng16-16 \
8 | librest-0.7-0 libthai-data libthai0 libtiff5 libwayland-client0 \
9 | libwayland-cursor0 cmake autotools \
10 | gcc g++ vim wget curl fortan autoconf make cmake git
11 | do apt-get remove -y $pkg > /dev/null || echo "moving on"
12 | done
13 | apt-get autoclean -y && apt-get autoremove -y
14 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Deep Learning Reference Stack with Tensorflow and Intel® oneAPI Deep Neural Network Library (oneDNN)
3 |
4 | ### Building Locally
5 |
6 | We have created a set of Dockerfiles that allow you to build DLRS Tensorflow with different configurations. You will be able to pick the OS (Ubuntu or Centos), then pick the flavour, either `core`, which is a leaner image with just the framework, and `full` which includes the framework and extra features. Please refer to the table below to see all features and configurations.
7 |
8 | | | Framework | Features |
9 | |------|-----------|----------|
10 | | Core | X | |
11 | | Full | X | X |
12 | | | | |
13 |
14 | Framework:
15 |
16 | * TensorFlow* 1.15.3 or
17 | * TensorFlow* 2.4.0
18 |
19 | Features:
20 |
21 | * Transformers
22 | * Horovod
23 | * Seldon core 1.2.0
24 |
25 | > NOTE: We recommend you enable [Docker Buildkit](https://docs.docker.com/develop/develop-images/build_enhancements/) to have concurrent dependency resolution and automatic garbage collection. Docker Buildkit has been integrated in Docker since 18.06, if you have an older version, please ignore this note.
26 |
27 | #### Enable Docker Buildkit (see note above)
28 |
29 | ```bash
30 | export DOCKER_BUILDKIT=1
31 | ```
32 |
33 | ### Building Ubuntu based DLRS
34 |
35 | Tensorflow 1.15.*
36 |
37 | Core:
38 |
39 | ```
40 | docker build --build_arg tf_ver=tf1 -t dlrs-tensorflow-ubuntu:v0.9.0 -f Dockerfile.ubuntu --target tf_core .
41 | ```
42 |
43 | Full:
44 |
45 | ```
46 | docker build --build_arg tf_ver=tf1 -t dlrs-tensorflow-ubuntu:v0.9.0 -f Dockerfile.ubuntu --target tf_full.
47 | ```
48 |
49 | Tensorflow 2.4.0
50 |
51 | Core:
52 |
53 | ```
54 | docker build --build_arg tf_ver=tf2 -t dlrs-tensorflow2-ubuntu:v0.9.0 -f Dockerfile.ubuntu --target tf_core .
55 | ```
56 |
57 | Full:
58 |
59 | ```
60 | docker build --build_arg tf_ver=tf2 -t dlrs-tensorflow2-ubuntu:v0.9.0 -f Dockerfile.ubuntu --target tf_full.
61 | ```
62 |
63 | ### Building Centos based DLRS
64 |
65 | Tensorflow 1.15.0
66 |
67 | Core:
68 |
69 | ```
70 | docker build --build_arg tf_ver=tf1 -t dlrs-tensorflow-centos:v0.9.0 -f Dockerfile.centos --target tf_core .
71 | ```
72 |
73 | Full:
74 |
75 | ```
76 | docker build --build_arg tf_ver=tf1 -t dlrs-tensorflow-centos:v0.9.0 -f Dockerfile.centos --target tf_full.
77 | ```
78 |
79 | Tensorflow 2.4.0
80 |
81 | Core:
82 |
83 | ```
84 | docker build --build_arg tf_ver=tf2 -t dlrs-tensorflow2-centos:v0.9.0 -f Dockerfile.centos --target tf_core .
85 | ```
86 |
87 | Full:
88 |
89 | ```
90 | docker build --build_arg tf_ver=tf2 -t dlrs-tensorflow2-centos:v0.9.0 -f Dockerfile.centos --target tf_full.
91 | ```
92 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/patches/openvino/ade_gcc9_tmp_fix.patch:
--------------------------------------------------------------------------------
1 | From 4cfc5890e3e808de696dc354d0a892c183e5b38e Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Wed, 19 Aug 2020 15:28:15 -0700
4 | Subject: [PATCH] Temporary gcc 9 fix for ade
5 |
6 | Signed-off-by: Daniela Plascencia
7 | ---
8 | sources/ade/CMakeLists.txt | 3 ++-
9 | 1 file changed, 2 insertions(+), 1 deletion(-)
10 |
11 | diff --git a/sources/ade/CMakeLists.txt b/sources/ade/CMakeLists.txt
12 | index 2d1dd20..4a74a29 100644
13 | --- a/sources/ade/CMakeLists.txt
14 | +++ b/sources/ade/CMakeLists.txt
15 | @@ -11,7 +11,8 @@ file( GLOB_RECURSE sources source/*.cpp )
16 | file( GLOB_RECURSE include *.hpp )
17 |
18 | if (CMAKE_CXX_COMPILER_ID STREQUAL GNU)
19 | - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -Wextra -Wconversion -Wshadow -Wno-error=cpp -Wformat -Wformat-security" )
20 | +
21 | + set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wall -Wextra -Wconversion -Wshadow -Wno-error=cpp -Wformat -Wformat-security -Wno-error=redundant-move -Wno-error=pessimizing-move" )
22 | endif()
23 |
24 | add_library( ${PROJECT_NAME} STATIC ${include} ${sources} )
25 | --
26 | 2.28.0
27 |
28 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/patches/openvino/openvino_gcc9_fix.patch:
--------------------------------------------------------------------------------
1 | From 354838c0d5ee851c74c6ce1127f63f52effed290 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Wed, 19 Aug 2020 15:23:23 -0700
4 | Subject: [PATCH 1/2] Temporary fix for gcc 9
5 |
6 | Signed-off-by: Daniela Plascencia
7 | ---
8 | CMakeLists.txt | 2 ++
9 | 1 file changed, 2 insertions(+)
10 |
11 | diff --git a/CMakeLists.txt b/CMakeLists.txt
12 | index 902d3289..ccb5f346 100644
13 | --- a/CMakeLists.txt
14 | +++ b/CMakeLists.txt
15 | @@ -46,6 +46,8 @@ message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
16 | file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
17 | file(REMOVE "${CMAKE_BINARY_DIR}/targets.cmake")
18 |
19 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations")
20 | +
21 | function(build_ngraph)
22 | function(ngraph_set option value)
23 | if(NOT DEFINED ${option})
24 | --
25 | 2.28.0
26 |
27 | From eefc8005cd66a08a66e15d0d3753ebe2eb5a2a24 Mon Sep 17 00:00:00 2001
28 | From: Cavus Mustafa
29 | Date: Wed, 19 Aug 2020 15:23:59 -0700
30 | Subject: [PATCH 2/2] mkldnn and cldnn fix for gcc 9
31 |
32 | Signed-off-by: Daniela Plascencia
33 | ---
34 | inference-engine/thirdparty/clDNN/CMakeLists.txt | 3 +++
35 | inference-engine/thirdparty/mkldnn.cmake | 3 ++-
36 | 2 files changed, 5 insertions(+), 1 deletion(-)
37 |
38 | diff --git a/inference-engine/thirdparty/clDNN/CMakeLists.txt b/inference-engine/thirdparty/clDNN/CMakeLists.txt
39 | index 4b444eca..587fefc2 100644
40 | --- a/inference-engine/thirdparty/clDNN/CMakeLists.txt
41 | +++ b/inference-engine/thirdparty/clDNN/CMakeLists.txt
42 | @@ -102,6 +102,9 @@ set(CLDNN_BUILD__DEFAULT_OUT_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/build/out")
43 | # Prefix for all targets in internal pass.
44 | set(CLDNN_BUILD__PROJ_NAME_PREFIX "")
45 |
46 | +# Temporary fix for gcc9
47 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move -Wno-error=deprecated-copy")
48 | +
49 | # Single/multi-configuration generator helpers.
50 | if(CMAKE_CFG_INTDIR STREQUAL ".")
51 | set(CLDNN__TARGET_CFG_VAR "${CMAKE_BUILD_TYPE}")
52 | diff --git a/inference-engine/thirdparty/mkldnn.cmake b/inference-engine/thirdparty/mkldnn.cmake
53 | index baabd04a..022afeb4 100644
54 | --- a/inference-engine/thirdparty/mkldnn.cmake
55 | +++ b/inference-engine/thirdparty/mkldnn.cmake
56 | @@ -144,4 +144,5 @@ endif()
57 |
58 | add_definitions(-DMKLDNN_ENABLE_CONCURRENT_EXEC)
59 |
60 | -target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
61 | \ No newline at end of file
62 | +target_link_libraries(${TARGET} PRIVATE ${${TARGET}_LINKER_LIBS})
63 | +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-stringop-overflow")
64 | --
65 | 2.28.0
66 |
67 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/patches/tf1/findCaller_fix.patch:
--------------------------------------------------------------------------------
1 | From 8c81191eba907a618d085031fe01483ec166bb84 Mon Sep 17 00:00:00 2001
2 | From: Cavus Mustafa
3 | Date: Fri, 13 Mar 2020 15:59:49 -0700
4 | Subject: [PATCH] Fix for findCaller() error
5 |
6 | ---
7 | tensorflow/python/platform/tf_logging.py | 2 +-
8 | 1 file changed, 1 insertion(+), 1 deletion(-)
9 |
10 | diff --git a/tensorflow/python/platform/tf_logging.py b/tensorflow/python/platform/tf_logging.py
11 | index 86a4957c..f6142462 100644
12 | --- a/tensorflow/python/platform/tf_logging.py
13 | +++ b/tensorflow/python/platform/tf_logging.py
14 | @@ -60,7 +60,7 @@ def _get_caller(offset=3):
15 |
16 | # The definition of `findCaller` changed in Python 3.2
17 | if _sys.version_info.major >= 3 and _sys.version_info.minor >= 2:
18 | - def _logger_find_caller(stack_info=False): # pylint: disable=g-wrong-blank-lines
19 | + def _logger_find_caller(stack_info=False, stacklevel=1): # pylint: disable=g-wrong-blank-lines
20 | code, frame = _get_caller(4)
21 | sinfo = None
22 | if stack_info:
23 | --
24 | 2.17.1
25 |
26 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/scripts/build_openvino_centos.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
22 |
23 | export CFLAGS="-O3 "
24 | export CXXFLAGS="-O3 "
25 | export FCFLAGS="$CFLAGS "
26 | export FFLAGS="$CFLAGS "
27 | export CFLAGS="$CFLAGS -mfma -mcx16 -msahf -mmovbe -mlzcnt -mavx -mtune=skylake-avx512 -m64"
28 | export CXXFLAGS="$CXXFLAGS -mfma -mcx16 -msahf -mmovbe -mlzcnt -mtune=skylake-avx512 -m64"
29 | export GCC_IGNORE_WERROR=1
30 | export CMAKE_CXX_FLAGS="Wno-error=deprecated-declarations -Wno-error=redundant-move -Wno-error=pessimizing-move -Wno-error=unused-function -Wno-error=parentheses"
31 |
32 | echo "================= applying patches ========================="
33 | git config --global user.email "example@example.com"
34 | git config --global user.name "example@example.com"
35 |
36 | echo "================= config and build IE bridges ========================="
37 | mkdir -p /dldt/build && cd /dldt/build
38 | IE_BUILD_DIR=$(pwd)
39 | CMAKE_ARGS="-DTREAT_WARNING_AS_ERROR=OFF"
40 | CMAKE_ARGS="$CMAKE_ARGS OpenCV_DIR=/usr/lib/x86_64-linux-gnu/cmake/opencv4/ -DENABLE_OPENCV=OFF"
41 | CMAKE_ARGS="$CMAKE_ARGS -DENABLE_MKL_DNN=ON -DTHREADING=TBB -DENABLE_GNA=ON -DENABLE_CLDNN=ON -DENABLE_MYRIAD=OFF -DENABLE_VPU=OFF"
42 | CMAKE_ARGS="$CMAKE_ARGS -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=$(command -v python) -DPYTHON_LIBRARY=/usr/lib64/libpython3.6m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m"
43 | cmake $CMAKE_ARGS ..
44 | make -j "$N_JOBS"
45 |
46 | echo "===================================================================="
47 | PYTHON_MODULE="inference-engine/bin/intel64/Release"
48 | echo "Inference Engine build directory is: $IE_BUILD_DIR"
49 | echo "IE bridges build directory is: $PYTHON_MODULE"
50 | echo "===================================================================="
51 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/scripts/build_openvino_ubuntu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
22 |
23 | export CFLAGS="-O3 "
24 | export CXXFLAGS="-O3 "
25 | export FCFLAGS="$CFLAGS "
26 | export FFLAGS="$CFLAGS "
27 | export CFLAGS="$CFLAGS -mfma -mcx16 -msahf -mmovbe -mlzcnt -mavx -mtune=skylake-avx512 -m64"
28 | export CXXFLAGS="$CXXFLAGS -mfma -mcx16 -msahf -mmovbe -mlzcnt -mtune=skylake-avx512 -m64"
29 | export GCC_IGNORE_WERROR=1
30 | export CMAKE_CXX_FLAGS="Wno-error=deprecated-declarations -Wno-error=redundant-move -Wno-error=pessimizing-move -Wno-error=unused-function -Wno-error=parentheses"
31 |
32 | echo "================= applying patches ========================="
33 | git config --global user.email "example@example.com"
34 | git config --global user.name "example@example.com"
35 |
36 | echo "================= config and build IE bridges ========================="
37 | mkdir -p /dldt/build && cd /dldt/build
38 | IE_BUILD_DIR=$(pwd)
39 | CMAKE_ARGS="-DTREAT_WARNING_AS_ERROR=OFF"
40 | CMAKE_ARGS="$CMAKE_ARGS OpenCV_DIR=/usr/lib/x86_64-linux-gnu/cmake/opencv4/ -DENABLE_OPENCV=OFF"
41 | CMAKE_ARGS="$CMAKE_ARGS -DENABLE_MKL_DNN=ON -DTHREADING=TBB -DENABLE_GNA=ON -DENABLE_CLDNN=ON -DENABLE_MYRIAD=OFF -DENABLE_VPU=OFF"
42 | CMAKE_ARGS="$CMAKE_ARGS -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=$(command -v python) -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.8.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.8"
43 | cmake $CMAKE_ARGS ..
44 | make -j "$N_JOBS"
45 |
46 | echo "===================================================================="
47 | PYTHON_MODULE="inference-engine/bin/intel64/Release"
48 | echo "Inference Engine build directory is: $IE_BUILD_DIR"
49 | echo "IE bridges build directory is: $PYTHON_MODULE"
50 | echo "===================================================================="
51 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/scripts/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | declare -a KEEP_LIST=("libgl1 libopenmpi-dev openmpi-bin libgomp1 libpython3.8")
4 | declare -a REMOVE_LIST=("automake build-essential cpp-8 gcc-8 m4 adwaita-icon-theme \
5 | fontconfig fontconfig-config fonts-dejavu-core gsettings-desktop-schemas \
6 | gtk-update-icon-cache hicolor-icon-theme humanity-icon-theme \
7 | libcairo-gobject2 libfontconfig1 libfreetype6 libpango-1.0-0 \
8 | libpangocairo-1.0-0 libpangoft2-1.0-0 libpixman-1-0 libpng16-16 \
9 | librest-0.7-0 libthai-data libthai0 libtiff5 libwayland-client0 \
10 | libwayland-cursor0 cmake autotools protobuf-compiler \
11 | gcc g++ vim wget curl fortran autoconf make git python3-dev")
12 |
13 | for keep_pkg in ${KEEP_LIST[@]}; do
14 | apt-mark manual $keep_pkg
15 | done
16 |
17 | for pkg in ${REMOVE_LIST[@]};
18 | do apt-get remove -y $pkg > /dev/null || echo "moving on"
19 | done
20 | apt-get autoclean -y && apt-get autoremove -y
21 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/scripts/get_openvino.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 | export N_JOBS=$(grep -c ^processor /proc/cpuinfo)
21 |
22 | #export GIT_HASH=0ef928 # 2019_R1.0.1
23 | #export GIT_HASH=ba6e22b # 2019_R2
24 | #export GIT_HASH=1c794d9 # 2019_R3
25 | #export GIT_HASH=fe3f978 # 2019_R3.1
26 | #export GIT_HASH=b2140c0 # 2020.1
27 | #export GIT_HASH=2fe9b15 # 2020.3
28 | export GIT_HASH=f557dca # 2021.1
29 |
30 | echo "================= get dldt ================================="
31 | if [ ! -d /dldt/ ]; then
32 | git clone -j "$N_JOBS" https://github.com/openvinotoolkit/openvino.git /dldt && \
33 | cd /dldt && git checkout -b v2021.1 $GIT_HASH &&\
34 | git submodule update --init --recursive && cd ..
35 | fi
36 | echo "================= install pip dependencies =================="
37 | pip install --upgrade pip
38 | pip install numpy cython progress opencv-python
39 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/scripts/install_ovms.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | MODEL_SERVER_TAG=v2020.3
22 | OVMS_PATH='/ie_serving_py/model_server'
23 |
24 | # ---- Get OVMS ----
25 | git clone --depth 1 -b ${MODEL_SERVER_TAG} https://github.com/openvinotoolkit/model_server.git ${OVMS_PATH}
26 | cd ${OVMS_PATH} && git checkout ${MODEL_SERVER_TAG} && cd ..
27 |
28 | # ---- OVMS setup ----
29 | echo "OpenVINO Model Server version: ${MODEL_SERVER_TAG}" > /ie_serving_py/version
30 | echo "Git commit: `cd ./model_server; git rev-parse HEAD; cd ..`" >> /ie_serving_py/version
31 | echo "OpenVINO version: ${MODEL_SERVER_TAG} src" >> /ie_serving_py/version
32 | echo "# OpenVINO built with: https://github.com/opencv/dldt.git" >> /ie_serving_py/version
33 |
34 | cp -r ./model_server/ie_serving /ie_serving_py/ie_serving
35 | sed -i 29,32d ./model_server/setup.py
36 | cp ./model_server/setup.py /ie_serving_py
37 |
38 | # ---- OVMS install ----
39 | cd /ie_serving_py && python3 setup.py install
40 | rm -rf ${OVMS_PATH}
41 |
--------------------------------------------------------------------------------
/dlrs/tensorflow/scripts/install_py_packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright (c) 2019 Intel Corporation
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | set -e
18 | set -u
19 | set -o pipefail
20 |
21 | if [[ "$1" == "tf2" ]]; then
22 | TFS_API_VER='2.4.*'
23 | else
24 | TFS_API_VER='1.15.*'
25 | fi
26 |
27 | addons=("tensorflow-datasets==4.2.*" "transformers==4.2.*")
28 |
29 | echo "=================installing frameworks=================="
30 | pip install --force jupyterlab
31 | pip install --no-cache-dir -r /workspace/pypkgs.txt
32 | pip install --no-cache-dir --no-deps tensorflow-serving-api==${TFS_API_VER}
33 |
34 | echo "=================installing pkg dependencies=================="
35 |
36 | # pillow need libjpeg
37 | CC="cc -mavx2" pip install --no-cache-dir --force-reinstall "Pillow-SIMD==7.0.0.post3"
38 |
39 | for pkg in "${addons[@]}"
40 | do
41 | echo "=================get and install $pkg======================="
42 | pip install --no-deps --no-cache-dir "$pkg" || { echo "failed installing $pkg"; exit 1; }
43 | echo "==================done======================================"
44 | done
45 | exit 0
46 |
--------------------------------------------------------------------------------
/dsrs/README.md:
--------------------------------------------------------------------------------
1 | # Data Services Reference Stack (DSRS) Overview
2 |
3 | The Data Services Reference Stack consists of two images:
4 |
5 | - Redis* optimized image featuring support for Intel® Optane™ DC persistent memory
6 | - Memcached* optimized image featuring support for Intel® Optane™ DC persistent memory
7 |
8 | Documentation about how to build and use the images is provided in the base directory for each image.
9 |
--------------------------------------------------------------------------------
/dsrs/index.rst:
--------------------------------------------------------------------------------
1 | .. _dsrs:
2 |
3 |
4 | Data Services Reference Stack
5 | #############################
6 | Sysstacks containers have been deprecated and please switch to oneapi based containers, you can find oneapi containers at this link : https://hub.docker.com/u/intel
7 | The Data Services Reference Stack (DSRS) combines the best-in-class database and data analytics management apps. Featuring Intel® Optane™ DC persistent memory, DSRS helps you to store and process large amounts of data at unprecedented speeds using a distributed processing framework.
8 |
9 | .. figure:: ../_figures/DSRS_V1.png
10 | :scale: 80%
11 | :alt: Data Services Reference Stack
12 |
13 | To offer more flexibility, there are multiple versions of the Data Services Reference Stack. Each version includes instructions for using, building and configuring the stack to help get the most out of it.
14 |
15 | Overview
16 | ********
17 |
18 | .. toctree::
19 | :maxdepth: 1
20 |
21 | README.md
22 | terms_of_use.md
23 |
24 |
25 | memcached* versions
26 | ===================
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 |
31 | memcached/README.md
32 |
33 | Redis* versions
34 | ===============
35 |
36 | .. toctree::
37 | :maxdepth: 1
38 |
39 | redis/README.md
40 |
--------------------------------------------------------------------------------
/dsrs/memcached/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:8 as memcached-centos-builder
2 | RUN dnf update -y && \
3 | dnf install gcc make perl libevent-devel curl -y && \
4 | curl -LO https://www.memcached.org/files/memcached-1.6.9.tar.gz && \
5 | tar -zxvf memcached-1.6.9.tar.gz && \
6 | cd memcached-1.6.9 && \
7 | ./configure --prefix=/usr/local/memcached && make && make test && make install
8 |
9 | FROM centos:8 AS memcached-centos
10 | LABEL maintainer="otc-swstacks@intel.com"
11 |
12 | ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/memcached/bin
13 |
14 | COPY scripts/docker-entrypoint.sh /usr/local/bin/
15 | COPY scripts/docker-healthcheck /usr/local/bin/
16 |
17 | RUN dnf update -y && \
18 | dnf install libevent -y && \
19 | useradd memcached-user
20 |
21 | COPY --from=memcached-centos-builder /usr/local/memcached /usr/local/memcached/
22 | COPY licenses/ /home/memcached-user/licenses/
23 |
24 | HEALTHCHECK --interval=15s CMD ["docker-healthcheck"]
25 | USER memcached-user
26 | ENTRYPOINT ["docker-entrypoint.sh"]
27 |
28 |
--------------------------------------------------------------------------------
/dsrs/memcached/licenses/third-party-programs_dsrs.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/dsrs/memcached/licenses/third-party-programs_dsrs.txt
--------------------------------------------------------------------------------
/dsrs/memcached/scripts/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -ex
3 |
4 | if [ ! -d /mnt/pmem0 ]
5 | then
6 | echo "No PMEM devices are attached to the container on /mnt/pmem0, memcached will operate without PMEM support"
7 | else
8 | echo "Verifying Pmem is writable"
9 | echo "Hello World" > /mnt/pmem0/pmem_test
10 | rm /mnt/pmem0/pmem_test
11 | fi
12 |
13 | if [ $# -eq 0 ]; then
14 | exec memcached
15 | else
16 | set -- memcached "$@"
17 | exec "$@"
18 | fi
19 |
--------------------------------------------------------------------------------
/dsrs/memcached/scripts/docker-healthcheck:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eo pipefail
3 |
4 | pid=$(ps -fww -C memcached | grep "memcached" | awk '/memcached/{print $2}')
5 |
6 | if [[ ! -z "$pid" ]]; then
7 | exit 0
8 | fi
9 |
10 | exit 1
11 |
--------------------------------------------------------------------------------
/dsrs/redis/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:8 as redis-centos-builder
2 |
3 | ENV REDIS_PMEMD="/tmp/redis"
4 | ENV CFLAGS=" -O3 -falign-functions=32 -fno-lto -fno-math-errno -fno-semantic-interposition -fno-trapping-math "
5 |
6 | RUN dnf update -y && \
7 | dnf install gcc make automake autoconf libtool git bzip2 ndctl-devel daxctl-devel numactl-devel -y && \
8 | curl -LO https://github.com/jemalloc/jemalloc/releases/download/4.5.0/jemalloc-4.5.0.tar.bz2 && \
9 | tar -xvf jemalloc-4.5.0.tar.bz2 && \
10 | cd jemalloc-4.5.0 && \
11 | ./configure --disable-initial-exec-tls && \
12 | make && make install && \
13 | git clone https://github.com/pmem/pmem-redis $REDIS_PMEMD && \
14 | cd $REDIS_PMEMD && \
15 | git submodule init && git submodule update && \
16 | make USE_NVM=yes install && \
17 | cp /usr/lib64/libnuma.so* /usr/local/lib64/
18 |
19 |
20 | FROM centos:8 AS redis-centos
21 | LABEL maintainer="otc-swstacks@intel.com"
22 |
23 | ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
24 |
25 | COPY scripts/docker-entrypoint.sh scripts/docker-healthcheck /usr/local/bin/
26 | COPY scripts/redis.conf /etc/redis/redis.conf
27 | COPY --from=redis-centos-builder /usr/local/bin/ /usr/local/bin/
28 | COPY --from=redis-centos-builder /usr/local/lib/ /usr/local/lib/
29 | COPY --from=redis-centos-builder /usr/local/lib64/ /usr/local/lib64/
30 |
31 | RUN echo "/usr/local/lib64" | tee -a /etc/ld.so.conf && \
32 | ldconfig && \
33 | chmod 755 /var/cache/ldconfig/ && \
34 | useradd redis-user && \
35 | mkdir -p /var/lib/redis && \
36 | mkdir -p /etc/redis && \
37 | chown redis-user:redis-user /var/lib/redis/
38 |
39 | COPY licenses/ /home/redis-user/licenses/
40 |
41 | HEALTHCHECK --interval=15s CMD ["docker-healthcheck"]
42 | USER redis-user
43 | ENTRYPOINT ["docker-entrypoint.sh"]
44 |
--------------------------------------------------------------------------------
/dsrs/redis/licenses/third-party-programs_dsrs.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/dsrs/redis/licenses/third-party-programs_dsrs.txt
--------------------------------------------------------------------------------
/dsrs/redis/scripts/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | if [ ! -d /mnt/pmem0 ]
5 | then
6 | echo "No PMEM devices are attached to the container on /mnt/pmem0, memcached will operate without PMEM support"
7 | else
8 | echo "Verifying Pmem is writable"
9 | echo "Hello World" > /mnt/pmem0/pmem_test
10 | rm /mnt/pmem0/pmem_test
11 | fi
12 |
13 | if [ $# -eq 0 ]; then
14 | exec redis-server /etc/redis/redis.conf
15 | else
16 | set -- redis-server /etc/redis/redis.conf "$@"
17 | exec "$@"
18 | fi
--------------------------------------------------------------------------------
/dsrs/redis/scripts/docker-healthcheck:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | srv=$(ps -C redis-server -o pid=)
4 | cli=$(ps -C redis-cli -o pid=)
5 | bench=$(ps -C redis-benchmark -o pid=)
6 | sentinel=$(ps -C redis-sentinel -o pid=)
7 |
8 | if [[ ! -z "$srv$cli$bench$sentinel" ]]; then
9 | exit 0
10 | fi
11 |
12 | exit 1
13 |
--------------------------------------------------------------------------------
/dsrs/redis/scripts/redis.conf:
--------------------------------------------------------------------------------
1 | daemonize no
2 | port 6379
3 | protected-mode no
4 | dir /var/lib/redis
--------------------------------------------------------------------------------
/hpcrs/Makefile:
--------------------------------------------------------------------------------
1 | build:
2 | echo "build hpc container with icc and pytorch"
3 | DOCKER_BUILDKIT=1 docker build -t hpcrs-centos -f ./Dockerfile .
4 |
5 | run:
6 | echo "run hpc container with icc and pytorch"
7 | docker run -it hpcrs-centos
8 |
--------------------------------------------------------------------------------
/hpcrs/NEWS.md:
--------------------------------------------------------------------------------
1 | # Release notes for HPCRS
2 |
3 | ## Release `v0.3.0` :
4 |
5 | Latest release of the High Performance Compute Reference Stack (HPCRS). This
6 | release features the `sysstacks/hpcrs-centos` Docker image available on
7 | DockerHub*.
8 |
9 | - Components included:
10 | - CentOS
11 | - PyTorch*
12 | - Horovod*
13 | - OpenMP*
14 | - Intel® MPI Library
15 | - Intel® C++ Compiler
16 | - Intel® Fortran Compiler
17 | - Intel® Math Kernel Library
18 | - Intel® Data Parallel C++
19 | - Intel® oneAPI Collective Communications Library
20 | - Intel® oneAPI Threading BUilding Blocks
21 | - libfabric
22 | - Spack
23 |
24 | - Versions:
25 | - CentOS -> `8`
26 | - PyTorch -> `1.7.1`
27 | - Horovod -> `0.21.3`
28 | - OpenMP -> `4.5`
29 | - Intel® MPI Library -> `2021.1`
30 | - Intel® C++ Compiler -> `2021.1.2`
31 | - Intel® Fortran Compiler -> `2021.1.2`
32 | - Intel® Math Kernel Library -> `2020.0.0`
33 | - Intel® oneAPI Data Parallel C++ Compiler -> `2021.1.2`
34 | - Intel® oneAPI Collective Communications Library -> `2021.1.1`
35 | - Intel® oneAPI Threading BUilding Blocks -> `2021.1.1`
36 | - libfabric -> `v1.11.0`
37 | - Spack -> `0.15.4`
38 |
39 |
40 | *Intel and the Intel logo are trademarks of Intel Corporation or its
41 | subsidiaries.*
42 |
43 | *\*Other names and brands may be claimed as the property of others*
44 |
--------------------------------------------------------------------------------
/hpcrs/authors.txt:
--------------------------------------------------------------------------------
1 | Authors
2 |
3 | Bhendigeri, Swetha
4 | Lamego, Jose A
5 | Meados, Cord cord.meados@intel.com>
6 | Rascon Garcia, Eduardo I
7 | Sethi, Puneet
8 | Unnikrishnan Nair, Rahul
9 |
--------------------------------------------------------------------------------
/hpcrs/components/config_vars.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # configuraton file for hpcrs
4 |
5 | # version info
6 | HPRS_VERSION=0.2.0-beta
7 | INSTALL_ROOT=/opt/intel
8 |
9 | # env vars
10 | export CFLAGS="$CFLAGS -O3 -mfma -march=skylake-avx512 -mtune=skylake-avx512"
11 | export FCFLAGS="$CFLAGS -O3 -mfma -march=skylake-avx512 -mtune=skylake-avx512"
12 | export CXXFLAGS="$CXXFLAGS -O3 -mfma -march=skylake-avx512 -mtune=skylake-avx512"
13 |
--------------------------------------------------------------------------------
/hpcrs/components/horovod/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | unset CCL_ROOT
4 | unset CCL_CONFIGURATION
5 | unset CCL_ATL_TRANSPORT
6 | pip install --no-cache-dir psutil wheel
7 | dnf install -y cmake python3-devel
8 | HOROVOD_WITH_PYTORCH=1 HOROVOD_WITHOUT_MXNET=1 HOROVOD_WITHOUT_MPI=1 HOROVOD_WITHOUT_TENSORFLOW=1 pip --no-cache-dir install horovod
9 |
--------------------------------------------------------------------------------
/hpcrs/components/libfabrics/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #dnf install -y wget bzip2
4 |
5 | # install libfabrics with psm2 support
6 | #wget https://github.com/ofiwg/libfabric/releases/download/v1.11.1/libfabric-1.11.1.tar.bz2 \
7 | # && tar xjf libfabric-1.11.1.tar.bz2 \
8 | # && cd libfabric-1.11.1 \
9 | # && ./configure --enable-psm2=yes --prefix=/usr \
10 | # && make && make install && cd .. \
11 | # && rm -rf libfabric* \
12 | # && echo "libfabrics installed successfully"
13 |
--------------------------------------------------------------------------------
/hpcrs/components/licenses/third-party-programs_v2.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/hpcrs/components/licenses/third-party-programs_v2.txt
--------------------------------------------------------------------------------
/hpcrs/components/omnipath/Readme.md:
--------------------------------------------------------------------------------
1 | ## Omnimpath drivers and deps
2 |
3 | drivers.sh adds powetools repo and installs all omnipath drivers
4 |
--------------------------------------------------------------------------------
/hpcrs/components/omnipath/drivers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # add powertools repo
6 | dnf -y install \
7 | dnf-plugins-core \
8 | https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm \
9 | && dnf config-manager --set-enabled powertools
10 |
11 |
12 | # install opa drivers
13 | dnf install -y \
14 | libibverbs-devel numactl-devel \
15 | libibmad-devel libibumad-devel \
16 | librdmacm-devel ca-certificates \
17 | libxml2-devel libibverbs libibmad \
18 | numactl libibumad librdmacm \
19 | libpfm.i686 ibacm libpsm2 libpsm2-devel \
20 | libpsm2-compat opa-basic-tools opa-fastfabric \
21 | opa-fm opa-address-resolution rdma-core \
22 | perl qperf perftest elfutils-libelf-devel \
23 | libstdc++-devel gcc-gfortran atlas tcl papi \
24 | expect tcsh sysfsutils bc rpm-build redhat-rpm-config \
25 | kernel-devel which iproute net-tools libhfi1 \
26 | && dnf clean all
27 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/add_repo_centos.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 |
6 | # install GPGPU drivers
7 | # https://dgpu-docs.intel.com/installation-guides/redhat/redhat-8.2.html
8 | #dnf install -y intel-opencl intel-media intel-mediasdk level-zero intel-level-zero-gpu
9 | tee > /etc/yum.repos.d/intel-graphics.repo << EOF
10 | [intel-graphics]
11 | name=Intel Graphics Drivers Repository
12 | baseurl=https://repositories.intel.com/graphics/rhel/8.2
13 | enabled=1
14 | gpgcheck=0
15 | repo_gpgcheck=0
16 | EOF
17 |
18 | # Add intel YUM (DNF) repository and install intel basekit
19 | # https://software.intel.com/content/www/us/en/develop/articles/installing-intel-oneapi-toolkits-via-yum.html#instruct
20 | tee > /etc/yum.repos.d/oneAPI.repo << EOF
21 | [oneAPI]
22 | name=Intel(R) oneAPI repository
23 | baseurl=https://yum.repos.intel.com/oneapi
24 | enabled=1
25 | gpgcheck=0
26 | repo_gpgcheck=0
27 | gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
28 | EOF
29 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/components.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # Common components
6 | dnf install -y \
7 | intel-basekit-getting-started \
8 | intel-oneapi-dev-utilities \
9 | intel-oneapi-common-licensing \
10 | intel-oneapi-common-vars
11 | # DPCPP
12 | dnf install -y \
13 | intel-oneapi-dpcpp-cpp-compiler \
14 | intel-oneapi-dpcpp-debugger \
15 | intel-oneapi-libdpstd-devel
16 | # Install ifort (Intel Fortran)
17 | dnf install -y \
18 | intel-oneapi-compiler-fortran
19 | # Install icc (intel c++ compiler classic)
20 | dnf install -y \
21 | intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic
22 | # mkl and mpi libs
23 | dnf install -y \
24 | intel-oneapi-mkl-devel intel-oneapi-mkl
25 | # oneCCL
26 | dnf install -y \
27 | intel-oneapi-ccl \
28 | intel-oneapi-ccl-devel
29 | # oneTBB
30 | dnf install -y intel-oneapi-tbb-devel
31 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/deprecated/common.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | dnf install -y \
4 | intel-basekit-getting-started \
5 | intel-oneapi-dev-utilities \
6 | intel-oneapi-common-licensing \
7 | intel-oneapi-common-vars
8 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/deprecated/dpcpp.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 |
6 | dnf install -y \
7 | intel-oneapi-dpcpp-cpp-compiler \
8 | intel-oneapi-dpcpp-debugger \
9 | intel-oneapi-libdpstd-devel
10 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/deprecated/oneccl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | dnf install -y intel-oneapi-ccl intel-oneapi-ccl-devel
4 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/deprecated/onetbb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | dnf install -y intel-oneapi-tbb-devel
5 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/dgpu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | dnf install -y intel-opencl intel-level-zero-gpu level-zero level-zero-devel
5 |
--------------------------------------------------------------------------------
/hpcrs/components/oneapi/pytorch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # pytorch
4 | #dnf install -y \
5 | # intel-oneapi-pytorch
6 |
--------------------------------------------------------------------------------
/hpcrs/components/pytorch/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | pip install --no-cache-dir \
4 | torch==1.7.1+cpu torchvision==0.8.2+cpu \
5 | -f https://download.pytorch.org/whl/torch_stable.html
6 |
--------------------------------------------------------------------------------
/hpcrs/components/sources.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | INSTALL_ROOT=/opt/intel
4 | # source icc and ifort
5 | source "/etc/profile.d/config_vars.sh"
6 | # source $INSTALL_ROOT/bin/compilervars.sh $ARCH
7 |
8 | # source oneapi components
9 | source $INSTALL_ROOT/oneapi/setvars.sh
10 |
11 | export DPCPP_ROOT=/opt/intel/oneapi/compiler/latest/linux/
12 | export LD_LIBRARY_PATH=${DPCPP_ROOT}/lib:${DPCPP_ROOT}/compiler/lib/intel64_lin:${LD_LIBRARY_PATH}
13 | export INTELOCLSDKROOT=${DPCPP_ROOT}
14 | export PATH=${DPCPP_ROOT}/bin:$PATH
15 | export LD_LIBRARY_PATH=/opt/intel/oneapi/compiler/latest/linux/lib:/opt/intel/oneapi/compiler/latest/linux/lib/x64:/opt/intel/oneapi/compiler/latest/linux/lib/emu:/opt/intel/oneapi/compiler/latest/linux/compiler/lib/intel64_lin:/opt/intel/oneapi/compiler/latest/linux/compiler/lib:/opt/intel/oneapi/debugger/latest/dep/lib:/opt/intel/oneapi/debugger/latest/libipt/intel64/lib:/opt/intel/oneapi/debugger/latest/gdb/intel64/lib:/opt/intel/oneapi/tbb/latest/env/../lib/intel64/gcc4.8:/opt/intel/oneapi/ccl/latest/lib/cpu_gpu_dpcpp:$LD_LIBRARY_PATH
16 | export CPATH=/opt/intel/oneapi/compiler/latest/linux/include:/opt/intel/oneapi/dev-utilities/latest/include:/opt/intel/oneapi/tbb/latest/env/../include:/opt/intel/oneapi/ccl/latest/include/cpu_gpu_dpcpp:$CPATH
17 |
18 |
--------------------------------------------------------------------------------
/hpcrs/components/spack/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Spack recipe builder for HPCRS
3 |
4 | set -e
5 |
6 | # deps
7 | dnf install -y wget bzip2
8 |
9 | SPACK_VER=0.15.4
10 |
11 | # install spack
12 | cd /opt/ \
13 | && wget https://github.com/spack/spack/releases/download/v$SPACK_VER/spack-$SPACK_VER.tar.gz \
14 | && tar -xvf spack-$SPACK_VER.tar.gz \
15 | && mv spack-$SPACK_VER spack \
16 | && rm spack-$SPACK_VER.tar.gz
17 |
18 |
--------------------------------------------------------------------------------
/hpcrs/components/utils.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | function log() {
6 | test ! -f "$LOGFILE" && touch "$LOGFILE"
7 | echo "Log message $1" >> "$LOGFILE"
8 | }
9 |
10 | # download from $1 to $2
11 | function download() {
12 | mkdir -p "$2"
13 | #log "$1 to $2"
14 | wget -P "$2" "$1" || { echo "download failed"; exit 1; }
15 | #log "download complete"
16 | }
17 |
18 | # cleanup temp directories
19 | function cleanup() {
20 | #log "removing $1"
21 | test ! -f "$1" && rm -rf "$1"
22 | #log "cleanup complete"
23 | }
24 |
25 | # extract from compressed tar to a directory
26 | function extract() {
27 | mkdir -p "$2"
28 | #log "extract $1 to $2"
29 | tar -xvf "$1" -C "$2"
30 | #log "extract complete"
31 | }
32 |
--------------------------------------------------------------------------------
/hpcrs/d2s/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # pytype static type analyzer
135 | .pytype/
136 |
137 | # Cython debug symbols
138 | cython_debug/
139 |
--------------------------------------------------------------------------------
/hpcrs/d2s/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | d2s
3 | --------
4 | A tool to convert docker images to singularity images
5 |
6 | """
7 |
8 | from distutils.core import setup
9 |
10 | setup(
11 | name="d2s",
12 | version="0.2.0",
13 | description="A tool to convert docker images to singularity images",
14 | author="swstacks",
15 | license="Apache license 2.0",
16 | classifiers=[
17 | "Programming Language :: Python",
18 | "License :: OSI Approved :: Apache license 2.0",
19 | "Intended Audience :: Developers",
20 | "Topic :: Containers",
21 | ],
22 | py_modules=["d2s/d2s"],
23 | )
24 |
--------------------------------------------------------------------------------
/hpcrs/d2s/tests/d2s_tests.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import subprocess
3 | import os.path
4 |
5 | from d2s import d2s
6 |
7 | class TestScript(unittest.TestCase):
8 |
9 | def test_list_docker_images(self):
10 | print("\n" + self._testMethodName)
11 | docker_list = d2s.list_docker_images()
12 | self.assertTrue('clearlinux/stacks-dlrs-oss' in docker_list)
13 |
14 | def test_tabular_images(self):
15 | print("\n" + self._testMethodName)
16 | img_dict = d2s.tabular_images()
17 | self.assertTrue('clearlinux/stacks-dlrs-oss' in img_dict.values())
18 |
19 | def test_check_singularity_version(self):
20 | print("\n" + self._testMethodName)
21 | d2s.check_singularity_version()
22 | self.assertTrue(True)
23 |
24 | def test_get_sing_image_name(self):
25 | print("\n" + self._testMethodName)
26 | sing_name = d2s.get_sing_image_name('clearlinux/stacks-dlrs-oss:latest')
27 | self.assertEqual(sing_name,'clearlinux_stacks-dlrs-oss_latest')
28 |
29 | def test_convert_to_singularity(self):
30 | print("\n" + self._testMethodName)
31 | d2s.convert_to_singularity('clearlinux/stacks-dlrs-oss')
32 | self.assertTrue(os.path.exists('clearlinux_stacks-dlrs-oss'))
33 |
34 | if __name__ == '__main__':
35 | subprocess.call(['docker','pull','clearlinux/stacks-dlrs-oss'])
36 | unittest.main()
37 |
38 |
--------------------------------------------------------------------------------
/hpcrs/docs/FAQ.md:
--------------------------------------------------------------------------------
1 | # HPCRS Frequently Asked Questions (FAQ)
2 |
3 | 1. My workload is getting killed with signal 9 and/or signal 7 errors, what’s causing this?
4 | * The default shared memory provided to Docker* containers is 64MB. This is
5 | not enough for many applications. Increase the shared memory to 4GB by
6 | adding the `--shm-size=4G` option to your `docker run` command.
7 |
8 | See the [Docker run documentation about runtime constraints on
9 | resources](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources)
10 | for more information.
11 |
12 | 1. Where are the components located in the Docker* image?
13 | * Intel software components are installed into the `/opt/intel` directory and
14 | are sourced using `/etc/profile.d`. For more information, refer to the
15 | documentation for each component:
16 | * Intel® MPI: https://software.intel.com/content/www/us/en/develop/documentation/mpi-developer-guide-linux/top/introduction/introducing-intel-mpi-library.html
17 | * Intel® MKL: https://software.intel.com/content/www/us/en/develop/documentation/mkl-linux-developer-guide/top.html
18 | * Intel® Parallel Studio: https://software.intel.com/content/www/us/en/develop/documentation/get-started-with-parallel-studio-xe/top.html
19 | * Intel® C++ https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top.html
20 | * Intel® Fortran https://software.intel.com/content/www/us/en/develop/documentation/fortran-compiler-developer-guide-and-reference/top.html
21 |
22 | 1. I am getting a licensing error similar to the one below when running `icc` or
23 | `ifort` . What is wrong?
24 |
25 | * Intel® C++ and Intel® Fortran requires a valid license file to use. Place
26 | your license file under `/opt/intel/licenses/`. See the
27 | [README](../README.md) for more information.
28 |
29 | ```bash
30 | Error: A license for Comp-CL could not be obtained. (-1,359,2).
31 |
32 | Is your license file in the right location and readable?
33 | The location of your license file should be specified via
34 | the $INTEL_LICENSE_FILE environment variable.
35 |
36 | License file(s) used were (in this order):
37 | ** 1. /opt/intel/compilers_and_libraries_2020.1.217/linux/licenses
38 | ** 2. /opt/intel/licenses/*.lic
39 | ** 3. //intel/licenses
40 | ** 4. /opt/intel/compilers_and_libraries_2020.1.217/linux/bin/intel64/../../Licenses
41 | ** 5. /Licenses
42 | ** 6. /intel/licenses
43 | ** 7. /Users/Shared/Library/Application Support/Intel/Licenses
44 | ** 8. /opt/intel/compilers_and_libraries_2020.1.217/linux/bin/intel64/*.lic
45 |
46 | Please refer http://software.intel.com/sites/support/ for more information..
47 |
48 | icc: error #10052: could not checkout FLEXlm license
49 | ```
50 |
51 |
52 | *\*Other names and brands may be claimed as the property of others*
--------------------------------------------------------------------------------
/hpcrs/index.rst:
--------------------------------------------------------------------------------
1 | .. _hpcrs:
2 |
3 |
4 | High Performance Computing Reference Stack
5 | ##########################################
6 | Sysstacks containers have been deprecated and please switch to oneapi based containers, you can find oneapi containers at this link : https://hub.docker.com/u/intel
7 | The High Performance Computing Reference Stack (HPCRS) meets the needs of deploying HPC and AI workloads on the same system. This software solution reduces the complexities associated with integrating software components for High Performance Computing (HPC) workloads.
8 |
9 | .. figure:: ../_figures/HPC_V2_Multi.png
10 | :scale: 90%
11 | :alt: High Performance Computing Reference Stack
12 |
13 |
14 |
15 | Overview
16 | ********
17 |
18 | .. toctree::
19 | :maxdepth: 1
20 |
21 | README.md
22 | NEWS.md
23 | terms_of_use.md
24 |
25 | Guides
26 | ******
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 |
31 | d2s/README.md
32 | docs/FAQ.md
33 | docs/hpcrs_tutorial.md
34 |
--------------------------------------------------------------------------------
/hpcrs/licenses/clear_pkg_license.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/hpcrs/licenses/clear_pkg_license.txt
--------------------------------------------------------------------------------
/hpcrs/licenses/third-party-programs_v2.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intel/stacks/0fd68acdbf116b9277fb9bf6799eafbdb5a4390a/hpcrs/licenses/third-party-programs_v2.txt
--------------------------------------------------------------------------------
/index.rst:
--------------------------------------------------------------------------------
1 | .. System Stacks for Linux* OS documentation master file, created by
2 | sphinx-quickstart on Thu Apr 16 13:10:17 2020.
3 |
4 |
5 | System Stacks for Linux* OS Documentation
6 | #########################################
7 |
8 |
9 | These open source community releases are part of an effort to ensure developers have easy access to the features and functionality of Intel Platforms. More information about the stacks is available on the `Intel® oneContainer Portal`_ .
10 |
11 |
12 | :ref:`dlrs`
13 | ************************************
14 | The Deep Learning Reference Stack is an integrated, highly-performant open source stack optimized for Intel® Xeon® Scalable platforms.
15 |
16 | :ref:`dsrs`
17 | ************************************
18 | The Data Services Reference Stack (DSRS) combines the best-in-class database and data analytics management apps. Featuring Intel® Optane™ DC persistent memory, DSRS helps you to store and process large amounts of data at unprecedented speeds using a distributed processing framework.
19 |
20 | :ref:`mers`
21 | ****************************
22 | The Media Reference Stack is designed to accelerate offline and live media processing, analytics, and inference recommendations for real-world use cases such as smart city applications, immersive media enhancement, video surveillance, and product placement.
23 |
24 | :ref:`hpcrs`
25 | **************************************************
26 | The High Performance Computing Reference Stack (HPCRS) meets the needs of deploying HPC and AI workloads on the same system. This software solution reduces the complexities associated with integrating software components for High Performance Computing (HPC) workloads.
27 |
28 | :ref:`oneContainer`
29 | **********************
30 |
31 | A collection of resources to make developing and deploying containers simpler.
32 |
33 | :ref:`whitepapers`
34 | *************************
35 | Covering a range of topics from optimizing for performance to combining stacks to create a real world medical solution, the white papers provide analysis of the how and why the stacks can be used.
36 |
37 | :ref:`perf`
38 | ***********
39 | Find benchmark and performance information about the System Stacks releases.
40 |
41 | Real World Use Cases
42 | ********************
43 | `This repository`_ offers example use cases to explore the possibilities enabled by the System Stacks.
44 |
45 | Source Code
46 | ***********
47 | Find the `source code`_ for the System Stacks for Linux OS in this GitHub repository.
48 |
49 |
50 | .. _Intel® oneContainer Portal: https://software.intel.com/containers
51 |
52 | .. _This repository: https://github.com/intel/stacks-usecase
53 |
54 | .. _source code: https://github.com/intel/stacks
55 |
56 |
57 |
58 | .. toctree::
59 | :hidden:
60 |
61 | README.md
62 | dlrs/index
63 | dsrs/index
64 | mers/index
65 | hpcrs/index
66 | oneContainer/index
67 | whitepapers/index
68 | perf.rst
69 | Real World Use Cases
70 | Project GitHub repository
71 |
--------------------------------------------------------------------------------
/mers/AUTHORS.md:
--------------------------------------------------------------------------------
1 | # Media Reference Stack Authors
2 |
3 | * Luis Ponce, [@luisfponce](https://github.com/luisfponce)
4 | * Hugo Soto, [@hugosoto](https://github.com/hugosoto)
5 | * Puneet Sethi, [@puneetsepuneetse](https://github.com/puneetse)
6 | * Sergio Perez, [@sperezglz](https://github.com/sperezglz)
7 | * David Esparza, [@dborquez](https://github.com/dborquez)
8 | * Leonardo Sandoval, [@lsandov1](https://github.com/lsandov1)
9 |
10 | Thanks to all who have contributed!
11 |
--------------------------------------------------------------------------------
/mers/BUGS.md:
--------------------------------------------------------------------------------
1 | # Known Issues
2 |
3 | ## MeRS `v0.4.0`
4 |
5 | * No Known Issues
6 | ## MeRS `v0.3.0`
7 |
8 | * No Known Issues
9 |
10 | ## MeRS `v0.2.0`
11 |
12 | vaapi sink:
13 |
14 | * Vaapisink is causing SIGSEGV when vaapi transcode required through this sink, i.e. gst-launch-1.0 filesrc location=relax.jpg ! jpegparse ! vaapijpegdec ! imagefreeze ! vaapisink
15 |
16 | The fix is at [libva 2.7.0](https://github.com/intel/libva/releases/tag/2.7.0) available at the release moment of MeRS v0.2.0 series hence, this component bump will be included in the next release.
17 |
18 | gst-vaapi:
19 |
20 | * The gst-vaapi*dec elements crash when fake-sinking on headless enviroments, i.e. vaapih264dec ! fakesink.
21 |
22 | The patch is available at release moment of this version hence, this fix will be included in the next minor release. The workaround is to
23 | use the vaapisink element. Issue is being tracked upstream at https://gitlab.freedesktop.org/gstreamer/gstreamer-vaapi/-/issues/247
24 |
25 | ## MeRS `v0.1.0`
26 |
27 |
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/INSTALL.md:
--------------------------------------------------------------------------------
1 | # Media Reference Stack - Clear Linux* OS
2 | [](https://microbadger.com/images/sysstacks/mers-clearlinux "Get your own image badge on microbadger.com")
3 |
4 | ## Building Locally
5 |
6 | The Dockerfiles for all Intel System Stacks container images are available at
7 | [stacks repository](https://github.com/intel/stacks). These can be used to
8 | build and modify the media image at [MeRS](https://github.com/intel/stacks/tree/master/mers/deprecated/clearlinux)
9 |
10 | ```bash
11 | docker build --no-cache -t sysstacks/stacks-mers-clearlinux .
12 | ```
13 |
14 | > **Note:**
15 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
16 |
17 | ## Pulling from Docker Hub
18 |
19 | In case you want to pull the image directly instead of building it, official
20 | images are located at docker hub so it can be pulled with the following command
21 |
22 | ```bash
23 | docker pull sysstacks/stacks-mers-clearlinux
24 | ```
25 |
26 | > **Note:**
27 | The pulled image Clear Linux version may differ from and image built
28 | locally and the reason is that the OS is a rolling distro. Most
29 | probably, the local built image will have a greater version that the one
30 | pulled from Docker hub, the former not tested for the particular OS version.
31 |
32 | ## Running the Media Container
33 |
34 | Once you have the Media Reference Stack image, run it with
35 |
36 | ```bash
37 | docker run -it sysstacks/stacks-mers-clearlinux
38 | ```
39 |
40 | > **Note:**
41 | Since Clear Linux OS is a stateless system, avoid modifying the
42 | files under the `/usr` directory instead use `/etc` otherwise the software
43 | updater may overwrite `/usr` files.
44 |
45 | ## Run examples
46 |
47 | For transcoding and video analytics examples and pipelines, please see the
48 | official documentation at: https://docs.01.org/clearlinux/latest/guides/stacks/mers.html#using-the-mers-container-image
49 |
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/aom-patches/stacks-mers-v2-include-aom.diff:
--------------------------------------------------------------------------------
1 | diff --git stacks/mers/clearlinux/Dockerfile stacks/mers/clearlinux/Dockerfile
2 | index 930273b..698157e 100644
3 | --- clearlinux/Dockerfile
4 | +++ clearlinux/Dockerfile
5 | @@ -37,6 +37,20 @@ RUN \
6 | ninja install && \
7 | DESTDIR=/home/build ninja install
8 |
9 | +# AOM_DOT=OS
10 | +ARG AOM_VER=9d68e635c0de52ac4a56ab296f0ff2f45ea00fba
11 | +ARG AOM_REPO=https://aomedia.googlesource.com/aom
12 | +
13 | +RUN \
14 | + git clone ${AOM_REPO} && \
15 | + mkdir aom/aom_build && \
16 | + cd aom/aom_build && \
17 | + git checkout ${AOM_VER} && \
18 | + cmake -DBUILD_SHARED_LIBS=ON -DENABLE_NASM=ON -DENABLE_TESTS=OFF -DENABLE_DOCS=OFF -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_INSTALL_LIBDIR=lib64 .. && \
19 | + make -j $(nproc) && \
20 | + make install DESTDIR=/home/build && \
21 | + make install
22 | +
23 | # X264_DOT=OS
24 | ARG X264_VER=1771b556ee45207f8711744ccbd5d42a3949b14c
25 | ARG X264_REPO=https://code.videolan.org/videolan/x264.git
26 | @@ -51,6 +65,7 @@ RUN \
27 | make install
28 |
29 |
30 | +
31 | # SVT_HEVC_DOT=OS
32 | ARG SVT_HEVC_VER=v1.4.3
33 | ARG SVT_HEVC_REPO=https://github.com/OpenVisualCloud/SVT-HEVC
34 | @@ -204,7 +219,7 @@ ARG MERS_ENABLE_ENCODERS=libsvt_hevc,libsvt_av1,libx264,hevc_qsv,h264_qsv,henv_v
35 | # FFMPEG_LIBDAV1D_DEC_DOT=FFMPEG,DAV1D;color=red
36 | # FFMPEG_AAC_DEC_DOT=FFMPEG;color=red
37 | # FFMPEG_MP3_DEC_DOT=FFMPEG;color=red
38 | -ARG MERS_ENABLE_DECODERS=h264,hevc,hevc_qsv,h264_qsv,libdav1d,aac,mp3
39 | +ARG MERS_ENABLE_DECODERS=h264,hevc,libaom_av1,hevc_qsv,h264_qsv,libdav1d,aac,mp3
40 |
41 | # FFMPEG_MP4_MUXER_DOT=FFMPEG;color=red
42 | # FFMPEG_HLS_MUXER_DOT=FFMPEG;color=red
43 | @@ -226,7 +241,7 @@ ARG MERS_ENABLE_DEMUXERS=rtsp,dash,mpegts,avi,webm
44 | # FFMPEG_H264_PARSER_DOT=FFMPEG;color=red
45 | ARG MERS_ENABLE_PARSERS=h264
46 |
47 | -ARG MERS_ENABLES="--enable-libsvthevc --enable-libsvtav1 --enable-nonfree --enable-gpl --enable-libx264 --enable-libdav1d "
48 | +ARG MERS_ENABLES="--enable-libsvthevc --enable-libsvtav1 --enable-nonfree --enable-gpl --enable-libx264 --enable-libdav1d --enable-libaom "
49 | ARG MERS_OTHERS="--enable-ffprobe"
50 |
51 |
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/ffmpeg-patches/0026-return-NULL-after-breaking-while.patch:
--------------------------------------------------------------------------------
1 | From 89d585c1855ee338b6b1a059b3b3b3ce2c10ed28 Mon Sep 17 00:00:00 2001
2 | From: Leonardo Sandoval
3 | Date: Mon, 27 Jan 2020 09:50:52 -0800
4 | Subject: [PATCH] return NULL after breaking while
5 |
6 | ---
7 | fftools/ffmpeg.c | 2 +-
8 | 1 file changed, 1 insertion(+), 1 deletion(-)
9 |
10 | diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
11 | index d8d6dd5..2660429 100755
12 | --- a/fftools/ffmpeg.c
13 | +++ b/fftools/ffmpeg.c
14 | @@ -2447,7 +2447,7 @@ static void *filter_pipeline(void *arg)
15 | break;
16 | }
17 | }
18 | - return;
19 | + return NULL;
20 | }
21 | #endif
22 | static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
23 | --
24 | 2.7.4
25 |
26 |
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/ffmpeg-patches/CVE-2019-15942.patch:
--------------------------------------------------------------------------------
1 | From: Carl Eugen Hoyos
2 | Date: Sun, 25 Aug 2019 14:09:40 +0000 (+0200)
3 | Subject: lavc/x264: Also clean-up libx264rgb after init errors.
4 | X-Git-Url: http://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff_plain/434588596fef6bd2cef17f8c9c2979a010153edd
5 |
6 | lavc/x264: Also clean-up libx264rgb after init errors.
7 |
8 | Missed in c180f0f6
9 | ---
10 |
11 | diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c
12 | index a99c1f3..86e3530 100644
13 | --- a/libavcodec/libx264.c
14 | +++ b/libavcodec/libx264.c
15 | @@ -1195,6 +1195,7 @@ AVCodec ff_libx264rgb_encoder = {
16 | .priv_class = &rgbclass,
17 | .defaults = x264_defaults,
18 | .pix_fmts = pix_fmts_8bit_rgb,
19 | + .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
20 | .wrapper_name = "libx264",
21 | };
22 | #endif
23 |
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/scripts/docker-healthcheck:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [[ ! -z `ps -C bash -o pid=` ]] && exit 0 || exit 1
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/scripts/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # $0 is a script name,
3 | # $1, $2, $3 etc are passed arguments # $1 is our command
4 | CMD=$1
5 |
6 | case "$CMD" in
7 | "help" )
8 | # Display what user will see as help
9 | echo -e 'Execute docker run -it to execute extra argumets.\nE.g. docker run -it "ffmpeg -help && gst-launch-1.0 --version"'
10 | ;;
11 | "start" )
12 | # we can modify files here, using ENV variables passed in
13 | # "docker create" command. It can't be done during build process.
14 | echo -e "Use help to get more available options. E.g. docker run help\n"
15 | exec /bin/bash
16 | ;;
17 |
18 | * )
19 | # Run custom command. Thanks to this line we can still use
20 | # "docker run this-image /bin/bash" and it will work, or any other command.
21 | exec $CMD ${@:2}
22 | ;;
23 | esac
--------------------------------------------------------------------------------
/mers/deprecated/clearlinux/svt-hevc-patches/0001-include-pbutils-as-gst-plugin-depedency.patch:
--------------------------------------------------------------------------------
1 | From 392f99e95d8bdcfe332f9624b2288ea22d6cfd33 Mon Sep 17 00:00:00 2001
2 | From: Leonardo Sandoval
3 | Date: Thu, 10 Oct 2019 13:07:27 -0700
4 | Subject: [PATCH] include pbutils as gst-plugin depedency
5 |
6 | ---
7 | gstreamer-plugin/CMakeLists.txt | 3 +++
8 | 1 file changed, 3 insertions(+)
9 |
10 | diff --git a/gstreamer-plugin/CMakeLists.txt b/gstreamer-plugin/CMakeLists.txt
11 | index 5ac413f..223e4ef 100644
12 | --- a/gstreamer-plugin/CMakeLists.txt
13 | +++ b/gstreamer-plugin/CMakeLists.txt
14 | @@ -10,10 +10,12 @@ find_package(PkgConfig REQUIRED)
15 | pkg_check_modules(GSTREAMER REQUIRED gstreamer-1.0>=1.8)
16 | pkg_check_modules(GSTREAMER_BASE REQUIRED gstreamer-base-1.0>=1.8)
17 | pkg_check_modules(GSTREAMER_VIDEO REQUIRED gstreamer-video-1.0>=1.8)
18 | +pkg_check_modules(GSTREAMER_PBUTILS REQUIRED gstreamer-pbutils-1.0>=1.8)
19 | pkg_check_modules(SVT_HEVC REQUIRED SvtHevcEnc>=1.3)
20 | include_directories(${GSTREAMER_INCLUDE_DIRS}
21 | ${GSTREAMER_BASE_INCLUDE_DIRS}
22 | ${GSTREAMER_VIDEO_INCLUDE_DIRS}
23 | + ${GSTREAMER_PBUTILS_INCLUDE_DIRS}
24 | ${SVT_HEVC_INCLUDE_DIRS})
25 |
26 | set(flags_to_test
27 | @@ -64,6 +66,7 @@ target_link_libraries(gstsvthevcenc
28 | ${GSTREAMER_LIBRARIES}
29 | ${GSTREAMER_BASE_LIBRARIES}
30 | ${GSTREAMER_VIDEO_LIBRARIES}
31 | + ${GSTREAMER_PBUTILS_LIBRARIES}
32 | ${SVT_HEVC_LIBRARIES})
33 |
34 | if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
35 | --
36 | 2.7.4
37 |
38 |
--------------------------------------------------------------------------------
/mers/index.rst:
--------------------------------------------------------------------------------
1 | .. _mers:
2 |
3 | Media Reference Stack
4 | #####################
5 |
6 | Maximize performance of Video on Demand to live broadcasting with container images tuned for hardware acceleration for Intel Platforms. The Media Reference Stack is designed to accelerate offline and live media processing, analytics, and inference recommendations for real-world use cases such as smart city applications, immersive media enhancement, video surveillance, and product placement.
7 |
8 |
9 | .. figure:: ../_figures/mers_01.png
10 | :scale: 100%
11 | :alt: Media Reference Stack
12 |
13 | Overview
14 | ********
15 |
16 | .. toctree::
17 | :maxdepth: 1
18 |
19 | README.md
20 | releasenotes.md
21 | terms_of_use.md
22 | CONTRIBUTING.md
23 | AUTHORS.md
24 | LICENSES.md
25 |
26 | Guide
27 | *****
28 |
29 | .. toctree::
30 | :maxdepth: 1
31 |
32 | mers.rst
33 |
34 |
35 | Ubuntu\* Releases
36 | *****************
37 |
38 | .. toctree::
39 | :maxdepth: 1
40 |
41 | ubuntu/INSTALL.md
42 | NEWS.md
43 | BUGS.md
44 | CHANGELOG.md
45 |
46 | Deprecated Releases
47 | *******************
48 |
49 | .. toctree::
50 | :maxdepth: 1
51 |
52 | deprecated/clearlinux/INSTALL.md
53 |
--------------------------------------------------------------------------------
/mers/ubuntu/INSTALL.md:
--------------------------------------------------------------------------------
1 | # Media Reference Stack - Ubuntu*
2 |
3 | The Media Reference Stack (MeRS) is a highly optimized software stack for Intel® Architecture Processors (the CPU) and Intel® Processor Graphics (the GPU) to enable media prioritized transcode and analytics workloads, such as smart city applications, immersive media enhancement, video
4 | surveillance, and product placement.
5 |
6 | ## Building container image
7 |
8 | The Dockerfiles for all Intel System Stacks container images are available at
9 | [stacks repository](https://github.com/intel/stacks). These can be used to
10 | build and modify the media image at [MeRS](https://github.com/intel/stacks/tree/master/mers/ubuntu)
11 |
12 | ```bash
13 | docker build --no-cache -t sysstacks/mers-ubuntu .
14 | ```
15 |
16 | > **Note:**
17 | Default build args in Docker are on: https://docs.docker.com/engine/reference/builder/#arg
18 |
19 | ## Getting MeRS pre-built image
20 |
21 | In case you want to pull the image directly instead of building it, official
22 | images are located at docker hub so it can be pulled with the following command
23 |
24 | ```bash
25 | docker pull sysstacks/mers-ubuntu
26 | ```
27 |
28 | ## Running the Media Container
29 |
30 | Once you have the Media Reference Stack image, run it with
31 |
32 | ```bash
33 | docker run -it --rm \
34 | sysstacks/mers-ubuntu
35 | ```
36 |
37 | ## Run examples
38 |
39 | For transcoding and video analytics examples and pipelines, please see the
40 | official documentation on the Intel® oneContainer Portal at the [Get Started Guide](https://software.intel.com/content/www/us/en/develop/articles/containers/media-reference-stack-on-ubuntu.html?wapkw=mers)
41 |
42 |
43 | ## Reporting Security Issues
44 |
45 | If you have discovered potential security vulnerability in an Intel product,
46 | please contact the iPSIRT at secure@intel.com.
47 |
48 | It is important to include the following details:
49 |
50 | * The products and versions affected
51 | * Detailed description of the vulnerability
52 | * Information on known exploits
53 |
54 | Vulnerability information is extremely sensitive. The iPSIRT strongly recommends
55 | that all security vulnerability reports sent to Intel be encrypted using the
56 | iPSIRT PGP key. The PGP key is available here:
57 | https://www.intel.com/content/www/us/en/security-center/pgp-public-key.html
58 |
59 | Software to encrypt messages may be obtained from:
60 |
61 | * PGP Corporation
62 | * GnuPG
63 |
64 | For more information on how Intel works to resolve security issues, see:
65 | [Vulnerability handling guidelines](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html)
66 |
67 | ## LEGAL NOTICE
68 |
69 | By accessing, downloading or using this software and any required dependent software (the “Software Package”), you agree to the terms and conditions of the software license agreements for the Software Package, which may also include notices, disclaimers, or license terms for third party software included with the Software Package. Please refer to (https://github.com/intel/stacks/blob/master/mers/LICENSES.md).
70 |
71 | Intel and the Intel logo are trademarks of Intel Corporation or its subsidiaries
72 |
73 | \Other names and brands may be claimed as the property of others*
74 |
--------------------------------------------------------------------------------
/mers/ubuntu/aom-patches/stacks_mers-v3-include-aom.diff:
--------------------------------------------------------------------------------
1 | diff --git a/Dockerfile b/Dockerfile
2 | index 33cba21..100e19c 100644
3 | --- a/Dockerfile
4 | +++ b/Dockerfile
5 | @@ -218,6 +218,19 @@ RUN mkdir -p /usr/local/include/mfx && \
6 | mkdir -p /usr/local/lib/x86_64-linux-gnu/pkgconfig && \
7 | cp -a /home/build//usr/local/lib/pkgconfig/*mfx*.pc /usr/local/lib/x86_64-linux-gnu/pkgconfig/
8 |
9 | +ARG AOM_VER=9d68e635c0de52ac4a56ab296f0ff2f45ea00fba
10 | +ARG AOM_REPO=https://aomedia.googlesource.com/aom
11 | +
12 | +RUN \
13 | + git clone ${AOM_REPO} && \
14 | + mkdir aom/aom_build && \
15 | + cd aom/aom_build && \
16 | + git checkout ${AOM_VER} && \
17 | + cmake -DBUILD_SHARED_LIBS=ON -DENABLE_NASM=ON -DENABLE_TESTS=OFF -DENABLE_DOCS=OFF -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_INSTALL_LIBDIR=lib/x86_64-linux-gnu .. && \
18 | + make -j $(nproc) && \
19 | + make install DESTDIR=/home/build && \
20 | + make install
21 | +
22 | # FFMPEG_DOT=OS;color=red,style=dashed
23 | ARG FFMPEG_VER=7800cc6e82068c6dfb5af53817f03dfda794c568
24 | ARG FFMPEG_REPO=https://github.com/FFmpeg/FFmpeg.git
25 | @@ -240,7 +253,7 @@ ARG MERS_ENABLE_ENCODERS=libsvt_hevc,libsvt_av1,libx264,hevc_qsv,h264_qsv,henv_v
26 | # FFMPEG_LIBDAV1D_DEC_DOT=FFMPEG,DAV1D;color=red
27 | # FFMPEG_AAC_DEC_DOT=FFMPEG;color=red
28 | # FFMPEG_MP3_DEC_DOT=FFMPEG;color=red
29 | -ARG MERS_ENABLE_DECODERS=h264,hevc,hevc_qsv,h264_qsv,libdav1d,aac,mp3
30 | +ARG MERS_ENABLE_DECODERS=h264,hevc,libaom_av1,hevc_qsv,h264_qsv,libdav1d,aac,mp3
31 |
32 | # FFMPEG_MP4_MUXER_DOT=FFMPEG;color=red
33 | # FFMPEG_HLS_MUXER_DOT=FFMPEG;color=red
34 | @@ -262,7 +275,7 @@ ARG MERS_ENABLE_DEMUXERS=rtsp,dash,mpegts,avi,webm
35 | # FFMPEG_H264_PARSER_DOT=FFMPEG;color=red
36 | ARG MERS_ENABLE_PARSERS=h264
37 |
38 | -ARG MERS_ENABLES="--enable-libsvthevc --enable-libsvtav1 --enable-nonfree --enable-gpl --enable-libx264 --enable-libdav1d "
39 | +ARG MERS_ENABLES="--enable-libsvthevc --enable-libsvtav1 --enable-nonfree --enable-gpl --enable-libx264 --enable-libdav1d --enable-libaom"
40 | ARG MERS_OTHERS="--enable-ffprobe"
41 |
42 | RUN apt-get update && apt-get install -y -q --no-install-recommends libass-dev libfreetype6-dev libsdl2-dev libxcb1-dev libxcb-shm0-dev libxcb-xfixes0-dev zlib1g-dev libssl-dev libvdpau-dev librtmp-dev libv4l-dev libvorbis-dev libvpx-dev
43 |
--------------------------------------------------------------------------------
/mers/ubuntu/scripts/docker-healthcheck:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [[ ! -z `ps -C bash -o pid=` ]] && exit 0 || exit 1
--------------------------------------------------------------------------------
/mers/ubuntu/scripts/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # $0 is a script name,
3 | # $1, $2, $3 etc are passed arguments # $1 is our command
4 |
5 | case "$1" in
6 | "help" )
7 | # Display what user will see as help
8 | echo "Execute docker run -it to execute extra argumets.
9 | E.g. docker run -it ffmpeg -help && gst-launch-1.0 --version"
10 | ;;
11 | "start" )
12 | # we can modify files here, using ENV variables passed in
13 | # "docker create" command. It can't be done during build process.
14 | echo "Use help to get more available options. E.g. docker run help"
15 | exec /bin/bash
16 | ;;
17 |
18 | * )
19 | # Run custom command. Thanks to this line we can still use
20 | # "docker run this-image /bin/bash" and it will work, or any other command.
21 | exec "$@"
22 | ;;
23 | esac
24 |
--------------------------------------------------------------------------------
/oneContainer/index.rst:
--------------------------------------------------------------------------------
1 | .. _onecontainer:
2 |
3 | oneContainer Resources
4 | ######################
5 |
6 |
7 |
8 | .. toctree::
9 | :maxdepth: 1
10 |
11 |
12 | oneContainer API
13 | ****************
14 | OneContainer-API is a platform to enable unified APIs for containerized services and backends in multiple segments like AI, Database, and Media.
15 |
16 | `oneContainer API`
17 |
18 | oneContainer Templates
19 | **********************
20 | This project contains collection of component build & install m4 templates which can be used as ingredients of docker images. You will find template definitions for key Intel software and popular frameworks.
21 |
22 | GitHub Repo
23 |
24 | oneContainer Cloud Tool
25 | ***********************
26 | The oneContainer-cloud-tool utility helps to deploy containers to public cloud services. The tool enables a user to map a service to specific hardware and machine image of choice.
27 |
28 | GitHub Repo
29 |
--------------------------------------------------------------------------------
/perf.rst:
--------------------------------------------------------------------------------
1 | .. _perf:
2 |
3 | Performance and Benchmarks
4 | ##########################
5 |
6 | Each stack is fine tuned for Intel® architecture (IA) to provide significant advantages in performance. Here you'll find the performance benchmark results and release announcements for different releases.
7 |
8 |
9 |
10 | .. figure:: _figures/stacks_logo.png
11 | :scale: 80%
12 | :alt: System Stacks for Linux OS logo
13 |
14 |
15 | .. toctree::
16 | :hidden:
17 |
18 |
19 | Deep Learning Reference Stack
20 | *****************************
21 |
22 | * `DLRS V0.9.0`_ Performance and Gains
23 | * `DLRS V0.8.0`_ Performance and Gains
24 | * `DLRS V0.7.0`_ Performance and Gains
25 | * `DLRS V0.6.0`_ Performance and Gains
26 | * `DLRS V0.5.0`_ Performance and Gains
27 |
28 |
29 | High Performance Computing Reference Stack
30 | ******************************************
31 |
32 | * `HPCRS V0.3.0`_ Performance and Gains
33 |
34 | Data Services Reference Stack
35 | *****************************
36 |
37 | * `DSRS V0.2.0`_ Performance and Gains
38 |
39 |
40 | Media Reference Stack
41 | *********************
42 |
43 | * `MeRS V0.4.0`_ Performance and Gains
44 | * `MeRS V0.3.0`_ Performance and Gains
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 | .. _DLRS V0.5.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v5-0-now-available.html
53 |
54 | .. _DLRS V0.6.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v6-0-now-available.html
55 |
56 | .. _DLRS V0.7.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v7-0-now-available.html
57 |
58 | .. _DLRS V0.8.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v8-0-now-available.html
59 |
60 | .. _DLRS V0.9.0: https://software.intel.com/content/www/us/en/develop/articles/deep-learning-reference-stack-v9-0-now-available.html
61 |
62 | .. _HPCRS V0.3.0: https://software.intel.com/content/www/us/en/develop/articles/high-performance-computing-reference-stack-v3-0.html
63 |
64 | .. _MeRS V0.3.0: https://software.intel.com/content/www/us/en/develop/articles/media-reference-stack-v3-0-now-available.html
65 |
66 | .. _MeRS V0.4.0: https://software.intel.com/content/www/us/en/develop/articles/media-reference-stack-v4-0-now-available.html
67 |
68 | .. _DSRS V0.2.0: https://software.intel.com/content/www/us/en/develop/articles/data-service-reference-stack-v2-now-available.html
69 |
70 |
--------------------------------------------------------------------------------
/processmd.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os.path
3 | from pathlib import Path
4 | from shutil import copyfile
5 |
6 | fileExtension = ".md"
7 | files = []
8 | buildDir = "_build"
9 |
10 |
11 | def getFiles():
12 | Path(buildDir).mkdir(parents=True, exist_ok=True)
13 | for dirpath, dirnames, filenames in os.walk("."):
14 | for filename in [f for f in filenames if f.endswith(fileExtension)]:
15 | if buildDir not in dirpath:
16 | lastDirName = os.path.split(dirpath)[1]
17 | if "." != lastDirName:
18 | newFileName = lastDirName + "-" + filename
19 | else:
20 | newFileName = filename
21 | filepath = os.path.join(dirpath,filename)
22 | print(filepath)
23 | newFilePath = os.path.join(buildDir,newFileName)
24 | #copyfile(filepath,newFilePath)
25 | files.append(newFileName)
26 |
27 | getFiles()
28 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 |
3 | sphinx_rtd_theme
4 |
5 | recommonmark
6 |
7 | sphinx-markdown-tables
8 |
9 | sphinx-tabs
10 |
--------------------------------------------------------------------------------