├── .all-contributorsrc
├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── CHANGELOG.md
├── Dockerfile
├── LICENSE
├── README.md
├── code_runner.Dockerfile
├── engine
├── __init__.py
├── api.py
├── code_runner.py
├── docker_util.py
├── glue_code.jl
├── glue_code.js
├── logging.ini
├── run_c.py
├── run_jl.py
├── run_js.py
├── run_py.py
└── util.py
├── requirements.txt
├── run_lovelace_engine.bash
├── setup.cfg
└── tests
├── conftest.py
├── dummy_solutions
├── chaos_84.js
├── infinite_loop.py
└── memory_explosion.py
├── helpers.py
├── test_c_solutions.py
├── test_dummy_solutions.py
├── test_javascript_solutions.py
├── test_julia_solutions.py
└── test_python_solutions.py
/.all-contributorsrc:
--------------------------------------------------------------------------------
1 | {
2 | "files": [
3 | "README.md"
4 | ],
5 | "imageSize": 100,
6 | "commit": false,
7 | "contributors": [
8 | {
9 | "login": "ali-ramadhan",
10 | "name": "Ali Ramadhan",
11 | "avatar_url": "https://avatars.githubusercontent.com/u/20099589?v=4",
12 | "profile": "http://aliramadhan.me",
13 | "contributions": [
14 | "code",
15 | "infra",
16 | "test"
17 | ]
18 | },
19 | {
20 | "login": "basimr",
21 | "name": "br",
22 | "avatar_url": "https://avatars.githubusercontent.com/u/9298270?v=4",
23 | "profile": "https://github.com/basimr",
24 | "contributions": [
25 | "code",
26 | "ideas",
27 | "test"
28 | ]
29 | },
30 | {
31 | "login": "benallan",
32 | "name": "benallan",
33 | "avatar_url": "https://avatars.githubusercontent.com/u/12690582?v=4",
34 | "profile": "https://github.com/benallan",
35 | "contributions": [
36 | "code",
37 | "ideas",
38 | "test"
39 | ]
40 | },
41 | {
42 | "login": "BinaryFissionGames",
43 | "name": "Brandon Johnson",
44 | "avatar_url": "https://avatars.githubusercontent.com/u/10042942?v=4",
45 | "profile": "https://github.com/BinaryFissionGames",
46 | "contributions": [
47 | "code",
48 | "bug"
49 | ]
50 | }
51 | ],
52 | "contributorsPerLine": 7,
53 | "projectName": "lovelace-engine",
54 | "projectOwner": "project-lovelace",
55 | "repoType": "github",
56 | "repoHost": "https://github.com",
57 | "skipCi": true
58 | }
59 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: tests
2 |
3 | on:
4 | - push
5 | - pull_request
6 |
7 | jobs:
8 | test:
9 | runs-on: ubuntu-latest
10 | strategy:
11 | matrix:
12 | python-version: [3.7, 3.8, 3.9]
13 |
14 | steps:
15 | - uses: actions/checkout@v2
16 |
17 | - name: Set up Python ${{ matrix.python-version }}
18 | uses: actions/setup-python@v2
19 | with:
20 | python-version: ${{ matrix.python-version }}
21 |
22 | - name: Install Python dependencies
23 | run: |
24 | pip install --upgrade pip
25 | pip install -r requirements.txt
26 |
27 | - name: Build code runner container
28 | run: docker build -f code_runner.Dockerfile .
29 |
30 | # We need BuildKit to use secrets: https://docs.docker.com/develop/develop-images/build_enhancements/#to-enable-buildkit-builds
31 | - name: Build Docker container
32 | run: |
33 | echo ${{ secrets.LOVELACE_GITHUB_TOKEN }} > token.txt
34 | DOCKER_BUILDKIT=1 docker build -t lovelace-engine --secret id=token,src=token.txt .
35 |
36 | - name: Run Docker container
37 | run: |
38 | docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 14714:14714 lovelace-engine
39 | docker ps -a
40 |
41 | - name: Clone lovelace-problems
42 | run: |
43 | git clone https://github.com/project-lovelace/lovelace-problems.git
44 | ln -s lovelace-problems/problems/ problems
45 |
46 | - name: Clone lovelace-solutions
47 | run: |
48 | git clone https://${{ secrets.LOVELACE_GITHUB_TOKEN }}@github.com/project-lovelace/lovelace-solutions.git
49 | ln -s lovelace-solutions/python/ solutions
50 |
51 | - name: Wait for Docker container to spin up
52 | run: sleep 300
53 |
54 | - name: Run tests
55 | run: pytest --capture=no --verbose tests/
56 | env:
57 | LOVELACE_SOLUTIONS_DIR: ./lovelace-solutions/
58 | LOVELACE_PROBLEMS_DIR: ./lovelace-problems/
59 |
60 | deploy:
61 | name: Push Docker image to Docker Hub
62 | runs-on: ubuntu-latest
63 | needs: test
64 | if: github.ref == 'refs/heads/main'
65 | steps:
66 | - uses: actions/checkout@v2
67 |
68 | - name: Set up Python 3.7
69 | uses: actions/setup-python@v2
70 | with:
71 | python-version: 3.7
72 |
73 | - name: Install Python dependencies
74 | run: |
75 | pip install --upgrade pip
76 | pip install -r requirements.txt
77 |
78 | - name: Build Docker container
79 | run: |
80 | echo ${{ secrets.LOVELACE_GITHUB_TOKEN }} > token.txt
81 | DOCKER_BUILDKIT=1 docker build -t lovelace-engine --secret id=token,src=token.txt .
82 |
83 | - name: Log in to DockerHub
84 | run: echo ${{ secrets.DOCKER_PASSWORD }} | docker login --username ${{ secrets.DOCKER_USERNAME }} --password-stdin
85 |
86 | - name: docker images
87 | run: docker images
88 |
89 | - name: Push to DockerHub
90 | run: |
91 | docker tag lovelace-engine projectlovelace/lovelace-engine
92 | docker push projectlovelace/lovelace-engine
93 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Byte-compiled / optimized / DLL files
3 | __pycache__/
4 | *.py[cod]
5 | *$py.class
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | env/
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *,cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # IPython Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # dotenv
80 | .env
81 |
82 | # virtualenv
83 | .venv/
84 | venv/
85 | ENV/
86 | env/
87 |
88 | # Spyder project settings
89 | .spyderproject
90 |
91 | # Rope project settings
92 | .ropeproject
93 |
94 | # Sensitive problem files
95 | problems
96 | tutorials
97 | resources
98 |
99 | # IDE files
100 | .idea
101 |
102 | # Symlinks and other repos
103 | lovelace-solutions
104 | lovelace-problems
105 | solutions
106 | problems
107 |
108 | # Secrets
109 | token.txt
110 |
111 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## [v3.2.0](https://github.com/project-lovelace/lovelace-engine/tree/v3.2.0) (2021-04-11)
4 |
5 | [Full Changelog](https://github.com/project-lovelace/lovelace-engine/compare/v3.1.0...v3.2.0)
6 |
7 | **Fixed bugs:**
8 |
9 | - Fix issue where python processes run forever [\#23](https://github.com/project-lovelace/lovelace-engine/issues/23)
10 |
11 | **Merged pull requests:**
12 |
13 | - Bump urllib3 from 1.26.3 to 1.26.4 [\#80](https://github.com/project-lovelace/lovelace-engine/pull/80) ([dependabot[bot]](https://github.com/apps/dependabot))
14 | - Modify code runner Dockerfile to support ARM architecture [\#79](https://github.com/project-lovelace/lovelace-engine/pull/79) ([benallan](https://github.com/benallan))
15 | - No need to filter solutions in tests anymore [\#78](https://github.com/project-lovelace/lovelace-engine/pull/78) ([ali-ramadhan](https://github.com/ali-ramadhan))
16 |
17 | ## [v3.1.0](https://github.com/project-lovelace/lovelace-engine/tree/v3.1.0) (2021-03-27)
18 |
19 | [Full Changelog](https://github.com/project-lovelace/lovelace-engine/compare/v3.0.0...v3.1.0)
20 |
21 | **Merged pull requests:**
22 |
23 | - Run CI tests with Python 3.9 [\#77](https://github.com/project-lovelace/lovelace-engine/pull/77) ([ali-ramadhan](https://github.com/ali-ramadhan))
24 |
25 | ## [v3.0.0](https://github.com/project-lovelace/lovelace-engine/tree/v3.0.0) (2021-03-24)
26 |
27 | [Full Changelog](https://github.com/project-lovelace/lovelace-engine/compare/v2.0...v3.0.0)
28 |
29 | **Fixed bugs:**
30 |
31 | - Engine stalls when submitted code returns None. [\#19](https://github.com/project-lovelace/lovelace-engine/issues/19)
32 |
33 | **Closed issues:**
34 |
35 | - Switch from running code in LXC to Docker? [\#59](https://github.com/project-lovelace/lovelace-engine/issues/59)
36 | - Build script for lovelace-image [\#57](https://github.com/project-lovelace/lovelace-engine/issues/57)
37 | - Multiple gunicorn workers [\#52](https://github.com/project-lovelace/lovelace-engine/issues/52)
38 | - Turn into a real Python package? [\#51](https://github.com/project-lovelace/lovelace-engine/issues/51)
39 | - Tiny optimization: lxc file pull all output pickles at once [\#47](https://github.com/project-lovelace/lovelace-engine/issues/47)
40 | - Dynamic resources must all have different names [\#44](https://github.com/project-lovelace/lovelace-engine/issues/44)
41 | - Document setting up lovelace-engine as a systemd service [\#38](https://github.com/project-lovelace/lovelace-engine/issues/38)
42 | - Document symbolic links used [\#32](https://github.com/project-lovelace/lovelace-engine/issues/32)
43 | - Improve Julia runner performance [\#24](https://github.com/project-lovelace/lovelace-engine/issues/24)
44 | - Add support for C and C++ [\#20](https://github.com/project-lovelace/lovelace-engine/issues/20)
45 | - Engine must run users’ programs in parallel [\#12](https://github.com/project-lovelace/lovelace-engine/issues/12)
46 |
47 | **Merged pull requests:**
48 |
49 | - Bump urllib3 from 1.26.2 to 1.26.3 [\#76](https://github.com/project-lovelace/lovelace-engine/pull/76) ([dependabot[bot]](https://github.com/apps/dependabot))
50 | - Verify solutions from the engine [\#75](https://github.com/project-lovelace/lovelace-engine/pull/75) ([ali-ramadhan](https://github.com/ali-ramadhan))
51 | - Include input and user/expected output in test case details [\#74](https://github.com/project-lovelace/lovelace-engine/pull/74) ([ali-ramadhan](https://github.com/ali-ramadhan))
52 | - Delete Makefile [\#73](https://github.com/project-lovelace/lovelace-engine/pull/73) ([ali-ramadhan](https://github.com/ali-ramadhan))
53 | - Upgrade code runner to use Julia 1.5.3 [\#71](https://github.com/project-lovelace/lovelace-engine/pull/71) ([ali-ramadhan](https://github.com/ali-ramadhan))
54 | - Remove DockerHub badge [\#70](https://github.com/project-lovelace/lovelace-engine/pull/70) ([ali-ramadhan](https://github.com/ali-ramadhan))
55 | - Use secrets to deal with private solutions repo [\#69](https://github.com/project-lovelace/lovelace-engine/pull/69) ([ali-ramadhan](https://github.com/ali-ramadhan))
56 | - Nuke all remnants of LXC/LXD! [\#68](https://github.com/project-lovelace/lovelace-engine/pull/68) ([ali-ramadhan](https://github.com/ali-ramadhan))
57 | - Switch to GitHub Actions CI [\#67](https://github.com/project-lovelace/lovelace-engine/pull/67) ([ali-ramadhan](https://github.com/ali-ramadhan))
58 | - Cleanup [\#66](https://github.com/project-lovelace/lovelace-engine/pull/66) ([ali-ramadhan](https://github.com/ali-ramadhan))
59 | - Add Travis and Docker Hub badges [\#65](https://github.com/project-lovelace/lovelace-engine/pull/65) ([ali-ramadhan](https://github.com/ali-ramadhan))
60 | - Use docker containers instead of lxc/lxd [\#64](https://github.com/project-lovelace/lovelace-engine/pull/64) ([benallan](https://github.com/benallan))
61 | - Refactor engine tests to use pytest [\#63](https://github.com/project-lovelace/lovelace-engine/pull/63) ([benallan](https://github.com/benallan))
62 | - Faster julia runner [\#62](https://github.com/project-lovelace/lovelace-engine/pull/62) ([ali-ramadhan](https://github.com/ali-ramadhan))
63 | - Add test case to verify\_user\_solution call [\#60](https://github.com/project-lovelace/lovelace-engine/pull/60) ([benallan](https://github.com/benallan))
64 | - Bash script to build lovelace lxc image from scratch [\#58](https://github.com/project-lovelace/lovelace-engine/pull/58) ([ali-ramadhan](https://github.com/ali-ramadhan))
65 | - Dockerizing the engine [\#54](https://github.com/project-lovelace/lovelace-engine/pull/54) ([ali-ramadhan](https://github.com/ali-ramadhan))
66 | - C support [\#53](https://github.com/project-lovelace/lovelace-engine/pull/53) ([ali-ramadhan](https://github.com/ali-ramadhan))
67 |
68 | ## [v2.0](https://github.com/project-lovelace/lovelace-engine/tree/v2.0) (2019-07-22)
69 |
70 | [Full Changelog](https://github.com/project-lovelace/lovelace-engine/compare/v1.1...v2.0)
71 |
72 | **Fixed bugs:**
73 |
74 | - I think engine forgets to delete dynamic resources [\#36](https://github.com/project-lovelace/lovelace-engine/issues/36)
75 |
76 | **Closed issues:**
77 |
78 | - Merge the different runners? [\#46](https://github.com/project-lovelace/lovelace-engine/issues/46)
79 | - `make test` should glob for \*.py files [\#34](https://github.com/project-lovelace/lovelace-engine/issues/34)
80 | - Do not disable existing loggers when using logging.ini to configure a logger [\#42](https://github.com/project-lovelace/lovelace-engine/issues/42)
81 | - Performance benchmarking [\#28](https://github.com/project-lovelace/lovelace-engine/issues/28)
82 | - Execute test cases in batches [\#25](https://github.com/project-lovelace/lovelace-engine/issues/25)
83 | - Engine should store temporary files in /tmp [\#22](https://github.com/project-lovelace/lovelace-engine/issues/22)
84 |
85 | **Merged pull requests:**
86 |
87 | - Actually do something if user returns None [\#49](https://github.com/project-lovelace/lovelace-engine/pull/49) ([ali-ramadhan](https://github.com/ali-ramadhan))
88 | - Merge runners into a single CodeRunner [\#48](https://github.com/project-lovelace/lovelace-engine/pull/48) ([ali-ramadhan](https://github.com/ali-ramadhan))
89 | - Faster engine that executes test cases all at once [\#45](https://github.com/project-lovelace/lovelace-engine/pull/45) ([ali-ramadhan](https://github.com/ali-ramadhan))
90 | - Show user an error when no code is provided [\#43](https://github.com/project-lovelace/lovelace-engine/pull/43) ([ali-ramadhan](https://github.com/ali-ramadhan))
91 | - Cleanup on\_post and simple\_lxd [\#41](https://github.com/project-lovelace/lovelace-engine/pull/41) ([ali-ramadhan](https://github.com/ali-ramadhan))
92 | - Better errors [\#39](https://github.com/project-lovelace/lovelace-engine/pull/39) ([ali-ramadhan](https://github.com/ali-ramadhan))
93 | - Bypass TEST\_CASE\_TYPE\_ENUM global const for problem modules [\#37](https://github.com/project-lovelace/lovelace-engine/pull/37) ([ali-ramadhan](https://github.com/ali-ramadhan))
94 | - Properly glob files to feed into engine [\#35](https://github.com/project-lovelace/lovelace-engine/pull/35) ([ali-ramadhan](https://github.com/ali-ramadhan))
95 | - Time tests for performance benchmarking [\#31](https://github.com/project-lovelace/lovelace-engine/pull/31) ([ali-ramadhan](https://github.com/ali-ramadhan))
96 | - Select between dev and production paths [\#29](https://github.com/project-lovelace/lovelace-engine/pull/29) ([ali-ramadhan](https://github.com/ali-ramadhan))
97 | - Julia support [\#18](https://github.com/project-lovelace/lovelace-engine/pull/18) ([ali-ramadhan](https://github.com/ali-ramadhan))
98 |
99 | ## [v1.1](https://github.com/project-lovelace/lovelace-engine/tree/v1.1) (2019-01-04)
100 |
101 | [Full Changelog](https://github.com/project-lovelace/lovelace-engine/compare/v1.0...v1.1)
102 |
103 | **Closed issues:**
104 |
105 | - Add support for Julia [\#13](https://github.com/project-lovelace/lovelace-engine/issues/13)
106 | - Augment the Python Runner with the ability to use either Python 2 or 3 [\#10](https://github.com/project-lovelace/lovelace-engine/issues/10)
107 | - Set up automated deployment of the Engine [\#6](https://github.com/project-lovelace/lovelace-engine/issues/6)
108 |
109 | ## [v1.0](https://github.com/project-lovelace/lovelace-engine/tree/v1.0) (2018-11-26)
110 |
111 | [Full Changelog](https://github.com/project-lovelace/lovelace-engine/compare/a61bdfecd254ddc1134e193a6ef4a7c3e314f2d8...v1.0)
112 |
113 | **Closed issues:**
114 |
115 | - Add support for Fortran [\#14](https://github.com/project-lovelace/lovelace-engine/issues/14)
116 | - Engine must run users' code securely [\#11](https://github.com/project-lovelace/lovelace-engine/issues/11)
117 | - Setup a custom global logger. [\#8](https://github.com/project-lovelace/lovelace-engine/issues/8)
118 | - Accurate reporting of CPU time and RAM usage by the engine. [\#7](https://github.com/project-lovelace/lovelace-engine/issues/7)
119 | - As a user, I want my submissions to undergo static Python code analysis. [\#5](https://github.com/project-lovelace/lovelace-engine/issues/5)
120 | - As a developer, I would like to have a Makefile to automate setting up my development environment [\#4](https://github.com/project-lovelace/lovelace-engine/issues/4)
121 | - Allow engine clients to send code for execution [\#3](https://github.com/project-lovelace/lovelace-engine/issues/3)
122 | - Decide: what kind of format should we use for inputs and outputs? [\#2](https://github.com/project-lovelace/lovelace-engine/issues/2)
123 | - Decide: should we expect submitted code to read input from a file, command line, or both? [\#1](https://github.com/project-lovelace/lovelace-engine/issues/1)
124 |
125 | **Merged pull requests:**
126 |
127 | - Alpha2018 [\#17](https://github.com/project-lovelace/lovelace-engine/pull/17) ([ali-ramadhan](https://github.com/ali-ramadhan))
128 | - Secure code execution via lxc [\#16](https://github.com/project-lovelace/lovelace-engine/pull/16) ([ali-ramadhan](https://github.com/ali-ramadhan))
129 | - Small fixes to get the Engine to run in production [\#15](https://github.com/project-lovelace/lovelace-engine/pull/15) ([basimr](https://github.com/basimr))
130 | - Automated virtual env setup for development with a Makefile [\#9](https://github.com/project-lovelace/lovelace-engine/pull/9) ([basimr](https://github.com/basimr))
131 |
132 |
133 |
134 | \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
135 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax = docker/dockerfile:1.0-experimental
2 |
3 | FROM python:3.7
4 | LABEL maintainer="project.ada.lovelace@gmail.com"
5 |
6 | WORKDIR /engine/
7 |
8 | # Install dependencies
9 | RUN apt-get update \
10 | && apt-get install -y --no-install-recommends \
11 | docker.io \
12 | && rm -rf /var/lib/apt/lists/*
13 |
14 | # Install Python dependencies
15 | RUN pip install --upgrade pip
16 | COPY requirements.txt /engine/requirements.txt
17 | RUN pip install -r requirements.txt
18 |
19 | COPY . /engine/
20 |
21 | RUN git clone https://github.com/project-lovelace/lovelace-problems.git /lovelace-problems/
22 | RUN ln -s /lovelace-problems/problems/ problems
23 | RUN ln -s /lovelace-problems/resources/ resources
24 |
25 | RUN --mount=type=secret,id=token git clone https://`cat /run/secrets/token`@github.com/project-lovelace/lovelace-solutions.git /lovelace-solutions/
26 | RUN ln -s /lovelace-solutions/python/ solutions
27 | RUN ln -s /lovelace-solutions/python/ /lovelace-problems/problems/solutions
28 |
29 | EXPOSE 14714
30 |
31 | # https://pythonspeed.com/articles/gunicorn-in-docker/
32 | # https://docs.gunicorn.org/en/stable/faq.html#how-do-i-avoid-gunicorn-excessively-blocking-in-os-fchmod
33 | CMD gunicorn --worker-tmp-dir /dev/shm --workers 1 --log-level debug --timeout 600 --preload --reload --bind 0.0.0.0:14714 engine.api:app
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Project Lovelace
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Project Lovelace engine
2 |
3 | [](#contributors-)
4 |
5 |
6 | [](https://github.com/project-lovelace/lovelace-engine/actions/workflows/ci.yml)
7 |
8 | Project Lovelace's automated tester of code submissions for computational science education.
9 |
10 | ## Contributors ✨
11 |
12 | Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
13 |
14 |
15 |
16 |
17 |
25 |
26 |
27 |
28 |
29 |
30 |
31 | This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
32 |
--------------------------------------------------------------------------------
/code_runner.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-slim
2 |
3 | WORKDIR /root
4 |
5 | COPY ./requirements.txt /root/requirements.txt
6 |
7 | ENV PYTHONIOENCODING=utf-8
8 | ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
9 |
10 | RUN apt-get update &&\
11 | apt-get install -y build-essential curl wget nodejs gnupg
12 |
13 | RUN pip install --upgrade pip &&\
14 | pip install -r requirements.txt
15 |
16 | # Install Julia using the Jill installer script to make sure we get the proper version for this platform
17 | ENV PATH="/usr/local/bin:${PATH}"
18 | RUN pip install jill
19 | RUN jill install 1.5.3 --upstream Official --confirm
20 | RUN julia -e 'import Pkg; Pkg.add("JSON");'
21 |
22 | CMD ["tail", "-f", "/dev/null"]
23 |
--------------------------------------------------------------------------------
/engine/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-lovelace/lovelace-engine/cb4703fb0859a1bfc67f28cb0e08b5e749d23cda/engine/__init__.py
--------------------------------------------------------------------------------
/engine/api.py:
--------------------------------------------------------------------------------
1 | import atexit
2 | import base64
3 | import datetime
4 | import importlib
5 | import json
6 | import logging
7 | import os
8 | import shutil
9 | import subprocess
10 | import traceback
11 | import urllib
12 |
13 | import falcon
14 |
15 | import engine.util as util
16 |
17 | from engine.code_runner import CodeRunner, FilePushError, FilePullError, EngineExecutionError
18 | from engine.docker_util import (
19 | docker_init,
20 | docker_file_push,
21 | docker_file_pull,
22 | create_docker_container,
23 | remove_docker_container,
24 | )
25 |
26 |
27 | log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logging.ini")
28 | logging.config.fileConfig(log_file_path, disable_existing_loggers=False)
29 | logger = logging.getLogger(__name__)
30 |
31 | cwd = os.path.dirname(os.path.abspath(__file__))
32 | os.chdir(cwd)
33 |
34 |
35 | class SubmitResource:
36 | def __init__(self):
37 | self.pid = os.getpid()
38 | # self.container_image = "lovelace-image"
39 | container_name = "lovelace-{:d}-{:s}".format(
40 | self.pid, datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
41 | )
42 |
43 | # Start a container to use for all submissions
44 | # TODO this container_name might not be unique!
45 | self.container_id, self.container_name = create_docker_container(name=container_name)
46 | logger.debug(
47 | "Docker container id: {}; name: {}".format(self.container_id, self.container_name)
48 | )
49 |
50 | atexit.register(remove_docker_container, self.container_id)
51 |
52 | def on_post(self, req, resp):
53 | payload = req.media
54 |
55 | code = payload["code"]
56 | language = payload["language"]
57 |
58 | if not code:
59 | resp_dict = {"error": "No code provided!"}
60 | resp.status = falcon.HTTP_400
61 | resp.set_header("Access-Control-Allow-Origin", "*")
62 | resp.body = json.dumps(resp_dict)
63 | return
64 |
65 | code_filename = write_code_to_file(code, language)
66 |
67 | try:
68 | # Fetch problem ID and load the correct problem module.
69 | problem_name = payload["problem"].replace("-", "_")
70 | problem_module = "problems.{:s}".format(problem_name)
71 |
72 | logger.debug(
73 | "Importing problem_name={:s} problem_module={:s}...".format(
74 | problem_name, problem_module
75 | )
76 | )
77 |
78 | problems = importlib.import_module("problems")
79 | problem = importlib.import_module(problem_module)
80 | except Exception:
81 | explanation = (
82 | "Could not import module {:s}. "
83 | "Returning HTTP 400 Bad Request due to possibly invalid JSON.".format(
84 | problem_module
85 | )
86 | )
87 | add_error_to_response(
88 | resp, explanation, traceback.format_exc(), falcon.HTTP_400, code_filename
89 | )
90 | return
91 |
92 | function_name = problem.FUNCTION_NAME
93 | problem_dir = problem_name
94 |
95 | # Copy static resources into engine directory and push them into the Linux container.
96 | static_resources = []
97 | for resource_file_name in problem.STATIC_RESOURCES:
98 | from_path = os.path.join(cwd, "..", "resources", problem_dir, resource_file_name)
99 | to_path = os.path.join(cwd, resource_file_name)
100 |
101 | logger.debug("Copying static resource from {:s} to {:s}".format(from_path, to_path))
102 |
103 | try:
104 | shutil.copyfile(from_path, to_path)
105 | except Exception:
106 | explanation = "Engine failed to copy a static resource. Returning falcon HTTP 500."
107 | add_error_to_response(
108 | resp, explanation, traceback.format_exc(), falcon.HTTP_500, code_filename
109 | )
110 | return
111 |
112 | static_resources.append(to_path)
113 |
114 | container_path = "/root/{:}".format(resource_file_name)
115 | logger.debug(
116 | "Pushing static resource to container {:}{:}".format(
117 | self.container_id, container_path
118 | )
119 | )
120 | _ = docker_file_push(self.container_id, from_path, container_path)
121 |
122 | if not problem.STATIC_RESOURCES:
123 | logger.debug("No static resources to push")
124 |
125 | logger.info("Generating test cases...")
126 | test_cases = []
127 |
128 | try:
129 | for i, test_type in enumerate(problem.TestCaseType):
130 | for j in range(test_type.multiplicity):
131 | logger.debug(
132 | "Generating test case {:d}: {:s} ({:d}/{:d})...".format(
133 | len(test_cases) + 1, str(test_type), j + 1, test_type.multiplicity
134 | )
135 | )
136 | test_cases.append(problem.generate_test_case(test_type))
137 | except Exception:
138 | explanation = "Engine failed to generate a test case. Returning falcon HTTP 500."
139 | add_error_to_response(
140 | resp, explanation, traceback.format_exc(), falcon.HTTP_500, code_filename
141 | )
142 | return
143 |
144 | # Copy over all the dynamic resources generated by the test cases.
145 | dynamic_resources = []
146 | for i, tc in enumerate(test_cases):
147 | if "DYNAMIC_RESOURCES" in tc.input:
148 | for dynamic_resource_filename in tc.input["DYNAMIC_RESOURCES"]:
149 | resource_path = os.path.join(
150 | cwd, "..", "resources", problem_dir, dynamic_resource_filename
151 | )
152 | destination_path = os.path.join(cwd, dynamic_resource_filename)
153 |
154 | logger.debug(
155 | "Copying test case resource from {:s} to {:s}...".format(
156 | resource_path, destination_path
157 | )
158 | )
159 |
160 | shutil.copyfile(resource_path, destination_path)
161 |
162 | dynamic_resources.append(resource_path)
163 | dynamic_resources.append(destination_path)
164 |
165 | container_path = "/root/{:}".format(dynamic_resource_filename)
166 | logger.debug(
167 | "Pushing dynamic resource to container {:}{:}".format(
168 | self.container_id, container_path
169 | )
170 | )
171 | _ = docker_file_push(self.container_id, resource_path, container_path)
172 |
173 | if not dynamic_resources:
174 | logger.debug("No dynamic resources to push")
175 |
176 | runner = CodeRunner(language)
177 |
178 | input_tuples = [tc.input_tuple() for tc in test_cases]
179 | output_tuples = [tc.output_tuple() for tc in test_cases]
180 | try:
181 | user_outputs, p_infos = runner.run(
182 | self.container_name, code_filename, function_name, input_tuples, output_tuples
183 | )
184 | except (FilePushError, FilePullError):
185 | explanation = "File could not be pushed to or pulled from docker container. Returning falcon HTTP 500."
186 | add_error_to_response(
187 | resp, explanation, traceback.format_exc(), falcon.HTTP_500, code_filename
188 | )
189 | return
190 |
191 | except EngineExecutionError:
192 | explanation = (
193 | "Return code from executing user code in docker container is nonzero. "
194 | "Returning falcon HTTP 400."
195 | )
196 | add_error_to_response(
197 | resp, explanation, traceback.format_exc(), falcon.HTTP_400, code_filename
198 | )
199 | return
200 |
201 | # Pull any user generated files.
202 | files_pulled = False
203 | for i, tc in enumerate(test_cases):
204 | if "USER_GENERATED_FILES" in tc.output:
205 | for user_generated_filename in tc.output["USER_GENERATED_FILES"]:
206 | container_filepath = "/root/{:s}".format(user_generated_filename)
207 |
208 | logger.debug(
209 | "Pulling user generated file from container {:s}{:s}".format(
210 | self.container_name, container_filepath
211 | )
212 | )
213 |
214 | _ = docker_file_pull(
215 | self.container_id, container_filepath, user_generated_filename
216 | )
217 | files_pulled = True
218 |
219 | if not files_pulled:
220 | logger.debug("No user generated files to pull")
221 |
222 | n_cases = len(test_cases)
223 | n_passes = 0 # Number of test cases passed.
224 | test_case_details = (
225 | []
226 | ) # List of dicts each containing the details of a particular test case.
227 |
228 | # Verify that user outputs are all correct (i.e. check whether each test case passes or fails).
229 | for input_tuple, user_output, p_info, tc in zip(
230 | input_tuples, user_outputs, p_infos, test_cases
231 | ):
232 |
233 | if isinstance(user_output, list):
234 | # user_output is a list. This could be a multiple-return, or a legitimate list return.
235 | # Here we will disambiguate dependant on the output variables the problem requires
236 | if len(problem.OUTPUT_VARS) == 1:
237 | # Only one variable should be returned; Thus, this is a "list return"
238 | user_output = (user_output,)
239 | else:
240 | # More than one variable should be returned, so this is a multiple return
241 | user_output = tuple(user_output)
242 |
243 | if user_output[0] is None:
244 | logger.debug(
245 | "Looks like user's function returned None: output={:}".format(user_output)
246 | )
247 | passed = False
248 | expected_output = "Your function returned None. It shouldn't do that."
249 | else:
250 | try:
251 | user_test_case = problem.ProblemTestCase(None, problem.INPUT_VARS, input_tuple, problem.OUTPUT_VARS, user_output)
252 | passed, correct_test_case = problems.test_case.test_case_solution_correct(tc, user_test_case, problem.ATOL, problem.RTOL)
253 | expected_output = correct_test_case.output_tuple()
254 | except Exception:
255 | explanation = "Internal engine error during user test case verification. Returning falcon HTTP 500."
256 | add_error_to_response(
257 | resp, explanation, traceback.format_exc(), falcon.HTTP_500, code_filename
258 | )
259 | return
260 |
261 | if passed:
262 | n_passes += 1
263 |
264 | test_case_details.append(
265 | {
266 | "testCaseType": tc.test_type.test_name,
267 | "input": input_tuple,
268 | "output": user_output,
269 | "expected": expected_output,
270 | "inputString": str(input_tuple),
271 | "outputString": str(user_output),
272 | "expectedString": str(expected_output),
273 | "passed": passed,
274 | "processInfo": p_info,
275 | }
276 | )
277 |
278 | if "DYNAMIC_RESOURCES" in tc.input:
279 | for dynamic_resource_path in dynamic_resources:
280 | logger.debug("Deleting dynamic resource: {:s}".format(dynamic_resource_path))
281 | util.delete_file(dynamic_resource_path)
282 |
283 | logger.info("Passed %d/%d test cases.", n_passes, n_cases)
284 |
285 | resp_dict = {
286 | "success": True if n_passes == n_cases else False,
287 | "numTestCases": n_cases,
288 | "numTestCasesPassed": n_passes,
289 | "testCaseDetails": test_case_details,
290 | }
291 |
292 | resp.status = falcon.HTTP_200
293 | resp.set_header("Access-Control-Allow-Origin", "*")
294 | resp.body = json.dumps(resp_dict)
295 |
296 | util.delete_file(code_filename)
297 | logger.debug("User code file deleted: {:s}".format(code_filename))
298 |
299 | for file_path in static_resources:
300 | logging.debug("Deleting static resource {:s}".format(file_path))
301 | util.delete_file(file_path)
302 |
303 |
304 | def parse_payload(http_request):
305 | try:
306 | raw_payload_data = http_request.stream.read().decode("utf-8")
307 | except Exception as ex:
308 | logger.error("Bad request, reason unknown. Returning 400.")
309 | raise falcon.HTTPError(falcon.HTTP_400, "Error", ex.message)
310 |
311 | try:
312 | json_payload = json.loads(raw_payload_data)
313 | except ValueError:
314 | logger.error("Received invalid JSON: {:}".format(raw_payload_data))
315 | logger.error("Returning 400 error.")
316 | raise falcon.HTTPError(falcon.HTTP_400, "Invalid JSON", "Could not decode request body.")
317 |
318 | return json_payload
319 |
320 |
321 | def write_code_to_file(code, language):
322 | """
323 | Write code into a file with the appropriate file extension.
324 |
325 | :param code: a base64 encoded string representing the user's submitted source code
326 | :param language: the code's programming language
327 | :return: the name of the file containing the user's code
328 | """
329 | decoded_code = str(base64.b64decode(code), "utf-8")
330 | extension = {"python": ".py", "javascript": ".js", "julia": ".jl", "c": ".c"}.get(language)
331 | code_filename = util.write_str_to_file(decoded_code, extension)
332 |
333 | logger.debug("User code saved in: {:s}".format(code_filename))
334 |
335 | return code_filename
336 |
337 |
338 | def add_error_to_response(resp, explanation, tb, falcon_http_error_code, code_filename):
339 | """
340 | Modify the falcon HTTP response object with an error to be shown to the user. Also deletes the user's code as the
341 | engine cannot run it.
342 |
343 | :param resp: The falcon HTTP response object to be modified.
344 | :param explanation: A human-friendly explanation of the error.
345 | :param tb: Traceback string.
346 | :param falcon_http_error_code: Falcon HTTP error code to return.
347 | :param code_filename: Filepath to user code to be deleted.
348 | :return: nothing
349 | """
350 | logger.error(explanation)
351 | logger.error(tb)
352 | util.delete_file(code_filename)
353 | # URL friendly traceback we can embed into a mailto: link.
354 | url_friendly_tb = urllib.parse.quote(tb)
355 |
356 | DISCOURSE_LINK = 'https://discourse.projectlovelace.net/'
357 | EMAIL_LINK = (
358 | 'ada@mg.projectlovelace.net'
361 | )
362 |
363 | NOTICE = (
364 | "A stacktrace should appear below with more information about this error which might help\n"
365 | "you debug your code. But if it's not your code then it might be our fault :( If this is a\n"
366 | "website error and you have the time, we'd really appreciate it if you could report this\n"
367 | "on Discourse (" + DISCOURSE_LINK + ") or via email (" + EMAIL_LINK + ").\n"
368 | "All the information is embedded in the email link so all you have to do is press send.\n"
369 | "Thanks so much!"
370 | )
371 |
372 | error_message = "{:s}\n\n{:s}\n\nError: {:}".format(explanation, NOTICE, tb)
373 | resp_dict = {"error": error_message}
374 |
375 | resp.status = falcon_http_error_code
376 | resp.set_header("Access-Control-Allow-Origin", "*")
377 | resp.body = json.dumps(resp_dict)
378 | return
379 |
380 |
381 | docker_init()
382 | app = falcon.API()
383 | app.add_route("/submit", SubmitResource())
384 | app.add_error_handler(Exception, lambda ex, req, resp, params: logger.exception(ex))
385 |
--------------------------------------------------------------------------------
/engine/code_runner.py:
--------------------------------------------------------------------------------
1 | import fileinput
2 | import json
3 | import logging
4 | import pickle
5 | import shutil
6 | import subprocess
7 | from subprocess import CalledProcessError
8 | from abc import ABCMeta, abstractmethod
9 |
10 | import docker
11 | import numpy as np
12 |
13 | import engine.util as util
14 | from engine.docker_util import docker_file_push, docker_file_pull, docker_execute
15 |
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | class FilePushError(Exception):
21 | def __init__(self, message):
22 | super().__init__(message)
23 |
24 |
25 | class FilePullError(Exception):
26 | def __init__(self, message):
27 | super().__init__(message)
28 |
29 |
30 | class EngineExecutionError(Exception):
31 | def __init__(self, message):
32 | super().__init__(message)
33 |
34 |
35 | class EngineTimeoutError(Exception):
36 | def __init__(self, message):
37 | super().__init__(message)
38 |
39 |
40 | class AbstractRunner(metaclass=ABCMeta):
41 | @abstractmethod
42 | def run(self, container_name, filename, function_name, input_tuples, output_tuples):
43 | """Execute the given file using input_str as input through stdin and return the program's output."""
44 |
45 |
46 | # In case we need to serialize a numpy.ndarray to JSON.
47 | class NumpyEncoder(json.JSONEncoder):
48 | def default(self, obj):
49 | if isinstance(obj, np.ndarray):
50 | return obj.tolist()
51 | return json.JSONEncoder.default(self, obj)
52 |
53 |
54 | class CodeRunner(AbstractRunner):
55 | def __init__(self, language):
56 | self.util_files = []
57 | self.file_type = "pickle"
58 | self.push_correct_output = False
59 |
60 | if language == "python":
61 | self.run_script_filename = "run_py.py"
62 | elif language == "javascript":
63 | self.run_script_filename = "run_js.py"
64 | elif language == "julia":
65 | self.run_script_filename = "run_jl.py"
66 | self.file_type = "json"
67 | elif language == "c":
68 | self.run_script_filename = "run_c.py"
69 | self.push_correct_output = True
70 | else:
71 | raise ValueError("CodeRunner does not support language={:}".format(language))
72 |
73 | def run(self, container_id, code_filename, function_name, input_tuples, correct_output_tuples):
74 | logger.info("Running {:s} with {:d} inputs...".format(code_filename, len(input_tuples)))
75 |
76 | run_id = code_filename.split(".")[0]
77 |
78 | # Pickle all the input tuples into one file.
79 | if self.file_type == "pickle":
80 | input_pickle = "{:s}.input.pickle".format(run_id)
81 | with open(input_pickle, mode="wb") as f:
82 | logger.debug("Pickling input tuples in {:s}...".format(input_pickle))
83 | pickle.dump(input_tuples, file=f, protocol=pickle.HIGHEST_PROTOCOL)
84 | elif self.file_type == "json":
85 | input_pickle = "{:s}.input.json".format(run_id)
86 | with open(input_pickle, mode="w") as f:
87 | logger.debug("Pickling input tuples in {:s}...".format(input_pickle))
88 | json.dump(input_tuples, f, cls=NumpyEncoder)
89 |
90 | # Copy the relevant boilerplate run script into the current working directory.
91 | runner_file = "{:s}.run.py".format(run_id)
92 | shutil.copy(self.run_script_filename, runner_file)
93 |
94 | # Replace "$FUNCTION_NAME" in the run script with the actual function name to call
95 | # (as defined in the problem module).
96 | logger.debug("Replacing $FUNCTION_NAME->{:s} in {:s}...".format(function_name, runner_file))
97 | with fileinput.FileInput(runner_file, inplace=True) as f:
98 | for line in f:
99 | print(line.replace("$FUNCTION_NAME", function_name), end="")
100 |
101 | # Push all the files we need into the Linux container.
102 | required_files = [code_filename, runner_file, input_pickle]
103 | if self.push_correct_output:
104 | correct_output_pickle = "{:s}.correct.pickle".format(run_id)
105 | with open(correct_output_pickle, mode="wb") as f:
106 | logger.debug(
107 | "Pickling correct output tuples in {:s}...".format(correct_output_pickle)
108 | )
109 | pickle.dump(correct_output_tuples, file=f, protocol=pickle.HIGHEST_PROTOCOL)
110 |
111 | required_files.append(correct_output_pickle)
112 |
113 | for file_name in required_files + self.util_files:
114 | source_path = file_name
115 | target_path = "/root/{:s}".format(file_name)
116 |
117 | try:
118 | push_stdout = docker_file_push(container_id, source_path, target_path)
119 | except subprocess.CalledProcessError:
120 | # If pushing a file fails then declutter remaining files and raise an exception.
121 | for fn in required_files:
122 | util.delete_file(fn)
123 | raise FilePushError()
124 |
125 | # Tell the Linux container to execute the run script that will run the user's code.
126 | runner_path = "/root/{}".format(runner_file)
127 | command = ["python3", runner_path]
128 |
129 | logger.debug("Trying to execute function in docker...")
130 | try:
131 | exec_retval, exec_stdout = docker_execute(container_id, command)
132 | except docker.errors.APIError:
133 | # If we fail to connect through docker, clean up the files
134 | for fn in required_files:
135 | util.delete_file(fn)
136 | raise EngineExecutionError(exec_stdout)
137 |
138 | # Or if the code failed to run properly, clean up the files
139 | if exec_retval != 0:
140 | for fn in required_files:
141 | util.delete_file(fn)
142 |
143 | # The `timeout` Linux command exits with return code 124 if the command times out.
144 | if exec_retval == 124:
145 | raise EngineTimeoutError("Your code took too long to run.")
146 | else:
147 | raise EngineExecutionError(exec_stdout)
148 |
149 | user_outputs = []
150 | process_infos = []
151 |
152 | # Read all the output that the user produced.
153 | # Each test case's output will end up it one output pickle file.
154 | for i, _ in enumerate(input_tuples):
155 | output_pickle = "{:s}.output{:d}.pickle".format(run_id, i)
156 | source_path = "/root/{:s}".format(output_pickle)
157 | target_path = output_pickle
158 |
159 | try:
160 | pull_stdout = docker_file_pull(container_id, source_path, target_path)
161 | except CalledProcessError:
162 | for fn in required_files:
163 | util.delete_file(fn)
164 | raise FilePullError(pull_stdout)
165 |
166 | with open(output_pickle, mode="rb") as f:
167 | output_dict = pickle.load(f)
168 |
169 | # TODO: exec_retval will always be zero here, so why return it?
170 | p_info = {
171 | "return_value": exec_retval,
172 | "stdout": exec_stdout,
173 | "runtime": output_dict["runtime"],
174 | "max_mem_usage": output_dict["max_mem_usage"],
175 | }
176 |
177 | user_outputs.append(output_dict["user_output"])
178 | process_infos.append(p_info)
179 |
180 | logger.debug(
181 | "runtime: {:g} s, max_mem_usage: {:g} kB".format(
182 | p_info["runtime"], p_info["max_mem_usage"]
183 | )
184 | )
185 |
186 | util.delete_file(output_pickle)
187 |
188 | logger.info("Finished running user code.")
189 |
190 | for fn in required_files:
191 | util.delete_file(fn)
192 |
193 | return user_outputs, process_infos
194 |
--------------------------------------------------------------------------------
/engine/docker_util.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import subprocess
4 | from subprocess import CalledProcessError
5 |
6 | import docker
7 |
8 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | def docker_init(client=None, image_name="lovelace-code-test"):
13 | """Build docker image for code test containers
14 |
15 | Syntax to build docker image (from inside Dockerfile dir):
16 | docker build -t .
17 |
18 | Syntax to build docker image (from OUTSIDE Dockerfile dir):
19 | docker build -t -f /path/to/Dockerfile /path/to/docker_dir
20 | """
21 |
22 | if not client:
23 | client = docker.from_env()
24 |
25 | docker_dir = os.path.dirname(SCRIPT_DIR)
26 | logger.info(
27 | 'Building docker image "{}" for code test containers in {}'.format(image_name, docker_dir)
28 | )
29 |
30 | try:
31 | image, logs = client.images.build(
32 | path=docker_dir, dockerfile="code_runner.Dockerfile", tag="lovelace-code-test"
33 | )
34 | except (docker.errors.BuildError, docker.errors.APIError):
35 | logger.error(
36 | "Failed to build docker image! Please check that docker is installed and that "
37 | "the engine has access to run docker commands."
38 | )
39 | raise
40 |
41 |
42 | def create_docker_container(client=None, name=None, image_name="lovelace-code-test", remove=False):
43 | """Create a docker container
44 |
45 | Syntax to create a docker container (as daemon):
46 | docker run -d --name
47 |
48 | Note: container name must be unique.
49 | """
50 |
51 | if not client:
52 | client = docker.from_env()
53 |
54 | logger.info('Creating docker container "{}" from image "{}"'.format(name, image_name))
55 |
56 | # Max 40% cpu usage
57 | cpu_period = 100000
58 | cpu_quota = 40000
59 |
60 | # Max 512 MiB memory limit
61 | mem_limit = "512m"
62 |
63 | try:
64 | container = client.containers.run(image_name, detach=True, name=name, remove=remove,
65 | cpu_period=cpu_period, cpu_quota=cpu_quota, mem_limit=mem_limit)
66 | except (docker.errors.ContainerError, docker.errors.ImageNotFound, docker.errors.APIError):
67 | logger.error(
68 | "Failed to start docker container! Please check that docker is installed and that "
69 | "the engine has access to run docker commands."
70 | )
71 | raise
72 |
73 | return container.id, container.name
74 |
75 |
76 | def remove_docker_container(container_id):
77 |
78 | # TODO: this is called TWICE when gunicorn shuts down. Maybe because of the way the reloader
79 | # works?
80 |
81 | logger.info("Clean up: deleting container {}".format(container_id))
82 |
83 | client = docker.from_env()
84 |
85 | try:
86 | container = client.containers.get(container_id)
87 | except docker.errors.NotFound:
88 | logger.info("Container {} already deleted!".format(container_id))
89 | return
90 | container.stop()
91 | container.remove()
92 | logger.info("Container deleted successfully")
93 |
94 |
95 | def docker_file_push(container_id, src_path, tgt_path, container_user="root", chown=True):
96 | """Copy a file into a docker container"""
97 |
98 | cmd = ["docker", "cp", src_path, "{}:{}".format(container_id, tgt_path)]
99 | copy_msg = "{}: {} -> {}".format(container_id, src_path, tgt_path)
100 | logger.debug("Copying file into docker container: " + copy_msg)
101 |
102 | try:
103 | ret = subprocess.run(cmd, check=True, stderr=subprocess.STDOUT, encoding="utf8")
104 | except CalledProcessError as e:
105 | logger.error("Failed to copy file into container " + copy_msg)
106 | logger.error("Process return code: {}; stdout: {}".format(e.returncode, e.stdout))
107 | raise
108 |
109 | # TODO chown?
110 |
111 | return ret.stdout
112 |
113 |
114 | def docker_file_pull(container_id, src_path, tgt_path):
115 | """Copy a file out of a docker container"""
116 |
117 | cmd = ["docker", "cp", "{}:{}".format(container_id, src_path), tgt_path]
118 | copy_msg = "{}: {} -> {}".format(container_id, src_path, tgt_path)
119 | logger.debug("Copying file out of docker container: " + copy_msg)
120 |
121 | try:
122 | ret = subprocess.run(cmd, check=True, stderr=subprocess.STDOUT, encoding="utf8")
123 | except CalledProcessError as e:
124 | logger.error("Failed to copy file out of container " + copy_msg)
125 | logger.error("Process return code: {}; stdout: {}".format(e.returncode, e.stdout))
126 | raise
127 |
128 | return ret.stdout
129 |
130 |
131 | def docker_execute(container_id, cmd, timeout=30, env=None, client=None):
132 | """Execute a command in a docker container"""
133 |
134 | if not client:
135 | client = docker.from_env()
136 |
137 | try:
138 | container = client.containers.get(container_id)
139 | except docker.errors.NotFound:
140 | logger.error(f"Container {container_id} could not be found.")
141 | raise
142 |
143 | timeout_cmd = ["timeout", f"{timeout}"]
144 | full_cmd = timeout_cmd + cmd
145 |
146 | logger.debug(f"Running command {full_cmd} in container {container_id}.")
147 |
148 | try:
149 | exit_code, std_out = container.exec_run(full_cmd, environment=env, workdir=None)
150 | except docker.errors.APIError:
151 | logger.error(
152 | f'Failed to run cmd {full_cmd} in container {container_id}.'
153 | f'Exit code: {exit_code}; Stdout: {std_out.decode("utf8")}'
154 | )
155 | raise
156 |
157 | return exit_code, std_out.decode("utf8")
158 |
--------------------------------------------------------------------------------
/engine/glue_code.jl:
--------------------------------------------------------------------------------
1 | import JSON
2 |
3 | timed_function_call(f, input) = @timed f(input...)
4 |
5 | function json_array_dim(a)
6 | if length(size(a)) > 0
7 | return 1 + json_array_dim(a[1])
8 | else
9 | return 0
10 | end
11 | end
12 |
13 | function json_array_eltype(a)
14 | if eltype(a) == Any
15 | return json_array_eltype(a[1])
16 | else
17 | return eltype(a)
18 | end
19 | end
20 |
21 | juliafy_json(t) = t
22 | juliafy_json(a::Array) = convert(Array{{json_array_eltype(a), json_array_dim(a)}}, hcat(a...))
23 |
24 | tupleit(t) = tuple(t)
25 | tupleit(t::Tuple) = t
26 |
27 | input_tuples = JSON.Parser.parsefile("{:s}")
28 |
29 | for (i, input_tuple) in enumerate(input_tuples)
30 |
31 | input_tuple = [juliafy_json(elem) for elem in input_tuple]
32 |
33 | output_tuple = $FUNCTION_NAME(input_tuple...) |> tupleit
34 |
35 | open("{:s}.output$i.json", "w") do f
36 | JSON.print(f, output_tuple)
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/engine/glue_code.js:
--------------------------------------------------------------------------------
1 | (() => {{ // Double braces to avoid interfering with Python brace-based string formatting.
2 | var timeStart = process.hrtime();
3 | var userOutput = {:s};
4 | var timeDiff = process.hrtime(timeStart);
5 | var runTime = timeDiff[0] + (timeDiff[1] / 1e9);
6 |
7 | var maxMemoryUsage = 0;
8 |
9 | var submissionData = {{
10 | "userOutput": userOutput,
11 | "runTime": runTime,
12 | "maxMemoryUsage": maxMemoryUsage
13 | }};
14 |
15 | var fs = require('fs');
16 | var data = JSON.stringify(submissionData);
17 | fs.writeFileSync('{:s}', data);
18 | }})();
--------------------------------------------------------------------------------
/engine/logging.ini:
--------------------------------------------------------------------------------
1 | [loggers]
2 | keys=root
3 |
4 | [handlers]
5 | keys=consoleHandler, fileHandler
6 |
7 | [formatters]
8 | keys=simpleFormatter
9 |
10 | [logger_root]
11 | level=DEBUG
12 | handlers=consoleHandler, fileHandler
13 |
14 | [handler_consoleHandler]
15 | class=StreamHandler
16 | level=DEBUG
17 | formatter=simpleFormatter
18 | args=(sys.stdout,)
19 |
20 |
21 | [handler_fileHandler]
22 | class=FileHandler
23 | level=DEBUG
24 | formatter=simpleFormatter
25 | args=("lovelace-engine.log", 'w')
26 |
27 | [formatter_simpleFormatter]
28 | format=[%(asctime)s.%(msecs)03d] %(name)-12s:%(levelname)8s: %(message)s
29 | datefmt=%Y-%m-%d %H:%M:%S
30 |
--------------------------------------------------------------------------------
/engine/run_c.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pickle
3 | import subprocess
4 |
5 | from ctypes import cdll, POINTER, c_int, c_double, c_bool, c_char_p, c_void_p
6 |
7 | from numpy import array, ndarray, zeros, arange, issubdtype, integer, uintp, intc
8 | from numpy.ctypeslib import ndpointer
9 |
10 | def infer_simple_ctype(var):
11 | if isinstance(var, int):
12 | return c_int
13 |
14 | elif isinstance(var, float):
15 | return c_double
16 |
17 | elif isinstance(var, bool):
18 | return c_bool
19 |
20 | elif isinstance(var, str):
21 | return c_char_p
22 |
23 | else:
24 | raise NotImplementedError("Cannot infer ctype of type(var)={:}, var={:}".format(type(var), var))
25 |
26 | def preprocess_types(input_tuple, output_tuple):
27 | input_list = []
28 | arg_ctypes = []
29 | output_list = []
30 |
31 | for var in input_tuple:
32 | if isinstance(var, str):
33 | arg_ctypes.append(c_char_p)
34 |
35 | # C wants bytes, not strings.
36 | c_str = bytes(var, "utf-8")
37 | input_list.append(c_char_p(c_str))
38 |
39 | elif isinstance(var, list):
40 | if isinstance(var[0], (list, tuple)):
41 | raise NotImplementedError(f"Cannot infer ctype of a list containing lists or tuples: var={var}")
42 |
43 | elem_ctype = infer_simple_ctype(var[0])
44 | arr_ctype = elem_ctype * len(var)
45 | arg_ctypes.append(arr_ctype)
46 |
47 | if elem_ctype == c_char_p:
48 | var = [bytes(s, "utf-8") for s in var]
49 |
50 | arr = arr_ctype(*var)
51 | input_list.append(arr)
52 |
53 | # For a Python list, we add an extra argument for the size of the C array.
54 | arg_ctypes.append(c_int)
55 | input_list.append(len(var))
56 |
57 | elif isinstance(var, ndarray):
58 | if issubdtype(var.dtype, integer):
59 | var = var.astype(intc)
60 |
61 | if len(var.shape) == 1:
62 | arr_ctype = ndpointer(dtype=var.dtype, ndim=len(var.shape), shape=var.shape, flags="C_CONTIGUOUS")
63 | arg_ctypes.append(arr_ctype)
64 | input_list.append(var)
65 |
66 | elif len(var.shape) == 2:
67 | # If the numpy ndarray is two-dimensional then we want to pass in an array of pointers of type uintp
68 | # which corresponds to a double** type. This enables the C function to index into the array as if it
69 | # were a 2D array, e.g. like arr[i][j]. We could pass it in as we do for the 1D case but then the C
70 | # function would be restricted to indexing the array linearly, e.g. arr[i].
71 | var_pp = (var.ctypes.data + arange(var.shape[0]) * var.strides[0]).astype(uintp)
72 | var_ptr_t = ndpointer(dtype=uintp)
73 |
74 | arg_ctypes.append(var_ptr_t)
75 | input_list.append(var_pp)
76 |
77 | else:
78 | raise NotImplementedError("Cannot preprocess input numpy ndarray of shape {:}".format(var.shape))
79 |
80 | # For a numpy ndarray, we add extra arguments for each dimension size of the input C array.
81 | for s in var.shape:
82 | arg_ctypes.append(c_int)
83 | input_list.append(s)
84 |
85 | else:
86 | arg_ctypes.append(infer_simple_ctype(var))
87 | input_list.append(var)
88 |
89 | if len(output_tuple) == 1:
90 | rvar = output_tuple[0] # Return variable
91 |
92 | if isinstance(rvar, list):
93 | # If the C function needs to return an array, Python must allocate memory for the array and pass it to the
94 | # C function. So we add an extra argument for a pointer to the pre-allocated C array and set the return type
95 | # to void.
96 | if isinstance(rvar[0], (list, tuple)):
97 | raise NotImplementedError(f"Cannot infer ctype of a list containing lists or tuples: var={var}")
98 |
99 | arr = array(rvar)
100 |
101 | arr_ctype = ndpointer(dtype=arr.dtype, ndim=len(arr.shape), shape=arr.shape, flags="C_CONTIGUOUS")
102 | arg_ctypes.append(arr_ctype)
103 |
104 | input_list.append(arr)
105 |
106 | res_ctype = c_void_p
107 |
108 | output_list.append(arr)
109 |
110 | elif isinstance(rvar, ndarray):
111 | new_dtype = intc if issubdtype(rvar.dtype, integer) else rvar.dtype
112 |
113 | if len(rvar.shape) == 1:
114 | arr_ctype = ndpointer(dtype=new_dtype, ndim=len(rvar.shape), shape=rvar.shape, flags="C_CONTIGUOUS")
115 | arr = zeros(rvar.shape, dtype=new_dtype)
116 |
117 | arg_ctypes.append(arr_ctype)
118 | input_list.append(arr)
119 | output_list.append(arr)
120 |
121 | elif len(rvar.shape) == 2:
122 | arr = zeros(rvar.shape, dtype=new_dtype)
123 | arr_pp = (arr.ctypes.data + arange(arr.shape[0]) * arr.strides[0]).astype(uintp)
124 | arr_ptr_t = ndpointer(dtype=uintp)
125 |
126 | arg_ctypes.append(arr_ptr_t)
127 | input_list.append(arr_pp)
128 | output_list.append(arr)
129 |
130 | else:
131 | raise NotImplementedError("Cannot preprocess output numpy ndarray of shape {:}".format(rvar.shape))
132 |
133 | res_ctype = c_void_p
134 |
135 | else:
136 | res_ctype = infer_simple_ctype(rvar)
137 |
138 | else:
139 | # In the case of multiple return types, we add extra input arguments (one pointer per each return variable)
140 | # and the C function will mutate the values pointed to by the pointers. These arguments will always be at
141 | # the very end of the argument list. The return type is set to void.
142 | for var in output_tuple:
143 | type = infer_simple_ctype(var)
144 | ptype = POINTER(type)
145 |
146 | arg_ctypes.append(ptype)
147 |
148 | val = type() # Create a value, e.g. c_int or c_double, that will be mutated by the C function.
149 | input_list.append(val)
150 | output_list.append(val)
151 |
152 | res_ctype = c_void_p
153 |
154 | return arg_ctypes, res_ctype, input_list, output_list
155 |
156 | def ctype_output(var):
157 | if isinstance(var, (c_int, c_double)):
158 | return var.value
159 | elif isinstance(var, bytes):
160 | return var.decode("utf-8")
161 | elif isinstance(var, ndarray):
162 | return var.tolist()
163 | else:
164 | return var
165 |
166 | run_id = os.path.basename(__file__).split('.')[0]
167 | input_pickle = "{:s}.input.pickle".format(run_id)
168 | correct_pickle = "{:s}.correct.pickle".format(run_id)
169 | code_file = "{:s}.c".format(run_id)
170 | lib_file = "{:s}.so".format(run_id)
171 |
172 | with open(input_pickle, mode='rb') as f:
173 | input_tuples = pickle.load(f)
174 |
175 | with open(correct_pickle, mode='rb') as f:
176 | correct_output_tuples = pickle.load(f)
177 |
178 | # Compile the user's C code.
179 | # -fPIC for position-independent code, needed for shared libraries to work no matter where in memory they are loaded.
180 | # check=True will raise a CalledProcessError for non-zero return codes (user code failed to compile.)
181 | subprocess.run(["gcc", "-fPIC", "-shared", "-o", lib_file, code_file], check=True)
182 |
183 | # Load the compiled shared library. We use the absolute path as the cwd is not in LD_LIBRARY_PATH so cdll won't find
184 | # the .so file if we use a relative path or just a filename.
185 | cwd = os.path.dirname(os.path.realpath(__file__))
186 | _lib = cdll.LoadLibrary(os.path.join(cwd, lib_file))
187 |
188 | for i, (input_tuple, correct_output_tuple) in enumerate(zip(input_tuples, correct_output_tuples)):
189 | # Use the input and output tuple to infer the type of input arguments and return value. We do this again for each
190 | # test case in case outputs change type or arrays change size.
191 | arg_ctypes, res_ctype, ctyped_input_list, output_list = preprocess_types(input_tuple, correct_output_tuple)
192 |
193 | _lib.$FUNCTION_NAME.argtypes = arg_ctypes
194 | _lib.$FUNCTION_NAME.restype = res_ctype
195 |
196 | # $FUNCTION_NAME will be replaced by the name of the user's function by the CodeRunner before this script is run.
197 | user_output = _lib.$FUNCTION_NAME(*ctyped_input_list)
198 |
199 | # If the C function returns nothing, then it must have mutated some of its input arguments.
200 | # We'll pull them out here.
201 | if res_ctype == c_void_p:
202 | user_output = []
203 | for var in output_list:
204 | user_output.append(ctype_output(var))
205 | user_output = tuple(user_output)
206 | else:
207 | user_output = ctype_output(user_output)
208 |
209 | output_dict = {
210 | 'user_output': user_output if isinstance(user_output, tuple) else (user_output,),
211 | 'runtime': 0,
212 | 'max_mem_usage': 0
213 | }
214 |
215 | output_pickle = '{:s}.output{:d}.pickle'.format(run_id, i)
216 | with open(output_pickle, mode='wb') as f:
217 | pickle.dump(output_dict, file=f, protocol=pickle.HIGHEST_PROTOCOL)
218 |
--------------------------------------------------------------------------------
/engine/run_jl.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import pickle
4 | import subprocess
5 |
6 | run_id = os.path.basename(__file__).split('.')[0]
7 | input_json = '{:s}.input.json'.format(run_id)
8 | code_file = '{:s}.jl'.format(run_id)
9 |
10 | glue_code = '''
11 | import JSON
12 |
13 | timed_function_call(f, input) = @timed f(input...)
14 |
15 | function json_array_dim(a)
16 | if length(size(a)) > 0
17 | return 1 + json_array_dim(a[1])
18 | else
19 | return 0
20 | end
21 | end
22 |
23 | function json_array_eltype(a)
24 | if eltype(a) == Any
25 | return json_array_eltype(a[1])
26 | else
27 | return eltype(a)
28 | end
29 | end
30 |
31 | juliafy_json(t) = t
32 | juliafy_json(a::Array) = convert(Array{{json_array_eltype(a), json_array_dim(a)}}, hcat(a...))
33 |
34 | tupleit(t) = tuple(t)
35 | tupleit(t::Tuple) = t
36 |
37 | input_tuples = JSON.Parser.parsefile("{:s}")
38 |
39 | for (i, input_tuple) in enumerate(input_tuples)
40 |
41 | input_tuple = [juliafy_json(elem) for elem in input_tuple]
42 |
43 | output_tuple = $FUNCTION_NAME(input_tuple...) |> tupleit
44 |
45 | open("{:s}.output$i.json", "w") do f
46 | JSON.print(f, output_tuple)
47 | end
48 | end
49 | '''.format(input_json, run_id)
50 |
51 | # This will append glue code to the code file to run the test cases.
52 | with open(code_file, mode='a') as f:
53 | f.write(glue_code)
54 |
55 | subprocess.run(["julia", code_file])
56 |
57 | with open(input_json, mode='rb') as f:
58 | input_tuples = json.load(f)
59 |
60 | for i, _ in enumerate(input_tuples):
61 | output_json = "{:s}.output{:d}.json".format(run_id, i+1)
62 |
63 | with open(output_json, mode='r') as f:
64 | user_output = json.loads(f.read())
65 |
66 | if isinstance(user_output, list) and len(user_output) == 1 and isinstance(user_output[0], list):
67 | user_output = (user_output[0],) # Solution is a list
68 | elif isinstance(user_output, list):
69 | user_output = tuple(user_output) # Solution is a "multiple return"
70 | else:
71 | user_output = (user_output,) # Solution is a string or number
72 |
73 | user_output = user_output if isinstance(user_output, tuple) else (user_output,)
74 |
75 | output_dict = {
76 | 'user_output': user_output,
77 | 'runtime': 0,
78 | 'max_mem_usage': 0,
79 | }
80 |
81 | output_pickle = "{:s}.output{:d}.pickle".format(run_id, i)
82 | with open(output_pickle, mode='wb') as f:
83 | pickle.dump(output_dict, file=f, protocol=pickle.HIGHEST_PROTOCOL)
84 |
--------------------------------------------------------------------------------
/engine/run_js.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import pickle
4 | import subprocess
5 |
6 | run_id = os.path.basename(__file__).split('.')[0]
7 | input_pickle = "{:s}.input.pickle".format(run_id)
8 | code_file = "{:s}.js".format(run_id)
9 |
10 | with open(input_pickle, mode='rb') as f:
11 | input_tuples = pickle.load(f)
12 |
13 | for i, input_tuple in enumerate(input_tuples):
14 | output_json = "{:s}.output{:d}.json".format(run_id, i)
15 |
16 | # $FUNCTION_NAME will be replaced by the name of the user's function by the CodeRunner
17 | # before this script is run.
18 | func_call_str = "$FUNCTION_NAME(" + ", ".join([json.dumps(arg) for arg in input_tuple]) + ")"
19 |
20 | glue_code = """
21 | (() => {{ // Double braces to avoid interfering with Python brace-based string formatting.
22 | var timeStart = process.hrtime();
23 | var userOutput = {:s};
24 | var timeDiff = process.hrtime(timeStart);
25 | var runTime = timeDiff[0] + (timeDiff[1] / 1e9);
26 |
27 | var maxMemoryUsage = 0;
28 |
29 | var submissionData = {{
30 | "userOutput": userOutput,
31 | "runTime": runTime,
32 | "maxMemoryUsage": maxMemoryUsage
33 | }};
34 |
35 | var fs = require('fs');
36 | var data = JSON.stringify(submissionData);
37 | fs.writeFileSync('{:s}', data);
38 | }})();
39 | """.format(func_call_str, output_json)
40 |
41 | # This will append glue code to the code file for each test case.
42 | with open(code_file, mode='a') as f:
43 | f.write(glue_code)
44 |
45 | # Run all test cases at the same time so we only run `node` once.
46 | subprocess.run(["node", code_file])
47 |
48 | for i, _ in enumerate(input_tuples):
49 | output_json = "{:s}.output{:d}.json".format(run_id, i)
50 | with open(output_json, mode='r') as f:
51 | submission_data = json.loads(f.read())
52 |
53 | user_output = submission_data['userOutput']
54 | runtime = submission_data['runTime']
55 | max_mem_usage = submission_data['maxMemoryUsage']
56 |
57 | if not isinstance(user_output, list):
58 | user_output = (user_output,) # Solution is a string, number, or dict, meaning it is a single-value return.
59 |
60 | output_dict = {
61 | 'user_output': user_output,
62 | 'runtime': runtime,
63 | 'max_mem_usage': max_mem_usage,
64 | }
65 |
66 | output_pickle = "{:s}.output{:d}.pickle".format(run_id, i)
67 | with open(output_pickle, mode='wb') as f:
68 | pickle.dump(output_dict, file=f, protocol=pickle.HIGHEST_PROTOCOL)
69 |
--------------------------------------------------------------------------------
/engine/run_py.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import pickle
4 | import importlib
5 | import tracemalloc
6 |
7 | run_id = os.path.basename(__file__).split('.')[0]
8 | input_pickle = '{:s}.input.pickle'.format(run_id)
9 |
10 | user_module = importlib.import_module(run_id)
11 |
12 | with open(input_pickle, mode='rb') as f:
13 | input_tuples = pickle.load(f)
14 |
15 | for i, input_tuple in enumerate(input_tuples):
16 | tracemalloc.start()
17 | t1 = time.time()
18 |
19 | # $FUNCTION_NAME will be replaced by the name of the user's function by the CodeRunner before this script is run.
20 | user_output = user_module.$FUNCTION_NAME(*input_tuple)
21 |
22 | t2 = time.time()
23 | _, max_mem_usage = tracemalloc.get_traced_memory()
24 | tracemalloc.stop()
25 |
26 | user_output = user_output if isinstance(user_output, tuple) else (user_output,)
27 | runtime = t2 - t1
28 |
29 | output_dict = {
30 | 'user_output': user_output,
31 | 'runtime': runtime,
32 | 'max_mem_usage': max_mem_usage
33 | }
34 |
35 | output_pickle = '{:s}.output{:d}.pickle'.format(run_id, i)
36 | with open(output_pickle, mode='wb') as f:
37 | pickle.dump(output_dict, file=f, protocol=pickle.HIGHEST_PROTOCOL)
38 |
--------------------------------------------------------------------------------
/engine/util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import hashlib
4 | import logging
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 |
9 | def write_str_to_file(string, extension='', encoding='utf-8'):
10 | """
11 | Save the contents of a string to a file.
12 |
13 | :param string: the data to be saved
14 | :param extension: the file extension to be used for the file's name
15 | :returns the name of the file containing the string data
16 | """
17 |
18 | # Convert the code to a byte string.
19 | blob = string.encode(encoding)
20 |
21 | # Use the time so that two identical files have different file names
22 | time_bytes = bytes(str(time.time()), encoding=encoding)
23 |
24 | # Hash the byte string to generate a filename.
25 | m = hashlib.sha1()
26 | m.update(blob)
27 | m.update(time_bytes)
28 | hash_code = m.hexdigest()
29 |
30 | filename = "{}{}".format(hash_code, extension)
31 |
32 | f = open(filename, 'w')
33 | f.write(string)
34 | f.close()
35 |
36 | return filename
37 |
38 |
39 | def write_list_to_file(the_list):
40 | """Saves a list to a file and returns the filename."""
41 | string = ' '.join(the_list)
42 | filename = write_str_to_file(string)
43 | return filename
44 |
45 |
46 | def read_str_from_file(filename):
47 | f = open(filename, 'r')
48 | file_contents = f.read()
49 | f.close()
50 | return file_contents
51 |
52 |
53 | def read_list_from_file(filename):
54 | file_contents = read_str_from_file(filename)
55 | the_list = file_contents.split(sep=' ')
56 | return the_list
57 |
58 |
59 | def delete_file(filename):
60 | if os.path.isfile(filename):
61 | logger.debug("Deleting file: {:s}".format(filename))
62 | os.remove(filename)
63 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | appdirs==1.4.4
2 | atomicwrites==1.4.0
3 | attrs==20.3.0
4 | bitstring==3.1.7
5 | black==19.3b0
6 | certifi==2020.12.5
7 | chardet==4.0.0
8 | Click==7.1.2
9 | docker==4.4.1
10 | entrypoints==0.3
11 | falcon==2.0.0
12 | flake8==3.8.4
13 | flake8-bugbear==20.11.1
14 | gunicorn==20.0.4
15 | idna==2.10
16 | importlib-metadata==3.3.0
17 | mccabe==0.6.1
18 | more-itertools==8.6.0
19 | numpy==1.19.4
20 | packaging==20.8
21 | pluggy==0.13.1
22 | py==1.10.0
23 | pycodestyle==2.6.0
24 | pyflakes==2.2.0
25 | pyparsing==2.4.7
26 | pytest==6.2.1
27 | requests==2.25.1
28 | scipy==1.5.4
29 | six==1.15.0
30 | toml==0.10.2
31 | urllib3==1.26.4
32 | wcwidth==0.2.5
33 | websocket-client==0.57.0
34 | zipp==3.4.0
35 |
--------------------------------------------------------------------------------
/run_lovelace_engine.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | rm -f solutions && \
4 | rm -rf lovelace-solutions/
5 |
6 | docker build -t lovelace-engine . && \
7 | docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 14714:14714 lovelace-engine && \
8 | docker ps -a
9 |
10 | git clone https://github.com/project-lovelace/lovelace-solutions.git && \
11 | ln -s lovelace-solutions/python/ solutions && \
12 | pip install -r requirements.txt
13 |
14 | export LOVELACE_SOLUTIONS_DIR=./lovelace-solutions/
15 |
16 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 |
2 | [flake8]
3 | max-line-length = 100
4 |
5 | [tool:pytest]
6 | markers =
7 | python: Test API with python solutions.
8 | javascript: Test API with javascript solutions.
9 | julia: Test API with julia solutions.
10 | c: Test API with c solutions.
11 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import json
3 | import os
4 | import time
5 |
6 | import pytest
7 | import requests
8 |
9 |
10 | @pytest.fixture(scope="session")
11 | def engine_uri():
12 | uri = os.environ.get("LOVELACE_ENGINE_URI", "http://localhost:14714")
13 | err_msg = (
14 | "Cannot connect to lovelace engine at {}. Is it running? "
15 | "Check if the env var LOVELACE_ENGINE_URI is set properly. ".format(uri)
16 | )
17 | try:
18 | resp = requests.get(uri)
19 | except requests.exceptions.ConnectionError:
20 | raise ValueError(err_msg)
21 | if resp.ok is not True:
22 | raise ValueError(err_msg)
23 | return uri
24 |
25 |
26 | @pytest.fixture()
27 | def engine_submit_uri(engine_uri):
28 | return engine_uri + "/submit"
29 |
30 |
31 | @pytest.fixture()
32 | def submit_solution(engine_submit_uri):
33 | def _submit_solution(file_path):
34 | with open(file_path, "r") as solution_file:
35 | code = solution_file.read()
36 | code_b64 = base64.b64encode(code.encode("utf-8")).decode("utf-8")
37 |
38 | problem_name, extension = os.path.basename(file_path).split(sep=".")
39 | language = {"py": "python", "js": "javascript", "jl": "julia", "c": "c"}.get(extension)
40 |
41 | if not language:
42 | raise ValueError("Solution file has unrecognized extension: {}".format(file_path))
43 |
44 | payload_dict = {"problem": problem_name, "language": language, "code": code_b64}
45 | payload_json = json.dumps(payload_dict)
46 |
47 | t1 = time.perf_counter()
48 | response = requests.post(engine_submit_uri, data=payload_json)
49 | t2 = time.perf_counter()
50 | print(f"{t2 - t1 : .6f} seconds ", end='')
51 |
52 | return response.json()
53 |
54 | return _submit_solution
55 |
56 |
57 | @pytest.fixture()
58 | def submit_file(engine_submit_uri):
59 | def _submit_solution(file_path, problem, language):
60 | with open(file_path, "r") as solution_file:
61 | code = solution_file.read()
62 | code_b64 = base64.b64encode(code.encode("utf-8")).decode("utf-8")
63 |
64 | payload_dict = {"problem": problem, "language": language, "code": code_b64}
65 | payload_json = json.dumps(payload_dict)
66 |
67 | t1 = time.perf_counter()
68 | response = requests.post(engine_submit_uri, data=payload_json)
69 | t2 = time.perf_counter()
70 | print(f"{t2 - t1 : .6f} seconds ", end='')
71 |
72 | return response.json()
73 |
74 | return _submit_solution
75 |
--------------------------------------------------------------------------------
/tests/dummy_solutions/chaos_84.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 |
3 | function logistic_map(r) {
4 | let x = [0.5];
5 | for (let i = 0; i < 50; i++) {
6 | let last = x[x.length - 1];
7 | x.push(r * last * (1 - last));
8 | }
9 | return x;
10 | }
11 |
--------------------------------------------------------------------------------
/tests/dummy_solutions/infinite_loop.py:
--------------------------------------------------------------------------------
1 | def fahrenheit_to_celsius(F):
2 | while True:
3 | pass
4 | return 0
5 |
--------------------------------------------------------------------------------
/tests/dummy_solutions/memory_explosion.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def light_time(distance):
4 | # Allocate 2^28 float64's, roughly ~2 GiB of memory, much more than the 512 MiB container memory limit.
5 | # For why we're not using `np.zeros` see: https://stackoverflow.com/questions/27574881/why-does-numpy-zeros-takes-up-little-space
6 | A = np.repeat(0, 2**28)
7 | return 0
8 |
--------------------------------------------------------------------------------
/tests/helpers.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 |
4 | # User can set solutions and problems dir and server/port for lovelace engine if it's different
5 | # from the default. Don't forget http:// at the beginning of the engine URI
6 | # export LOVELACE_ENGINE_URI="https://custom-url:12345"
7 | # export LOVELACE_SOLUTIONS_DIR="/home/myuser/lovelace/solutions"
8 | # export LOVELACE_PROBLEMS_DIR="/home/myuser/lovelace/problems"
9 |
10 | def solutions_dir(language=""):
11 | sol_dir = os.environ.get("LOVELACE_SOLUTIONS_DIR", "/home/ada/lovelace/lovelace-solutions/")
12 | if language:
13 | sol_dir = os.path.join(sol_dir, language)
14 | if not os.path.isdir(sol_dir):
15 | raise ValueError(
16 | f"Cannot find solutions dir at: {sol_dir}. "
17 | "Is the env var LOVELACE_SOLUTIONS_DIR set properly?"
18 | )
19 | return sol_dir
20 |
21 | def problems_dir():
22 | prob_dir = os.environ.get("LOVELACE_PROBLEMS_DIR", "/home/ada/lovelace/lovelace-problems/")
23 | if not os.path.isdir(prob_dir):
24 | raise ValueError(
25 | f"Cannot find solutions dir at: {prob_dir}. "
26 | "Is the env var LOVELACE_PROBLEMS_DIR set properly?"
27 | )
28 | return prob_dir
29 |
30 |
31 | language2ext = {"python": "py", "javascript": "js", "julia": "jl", "c": "c"}
32 | ext2language = {"py": "python", "js": "javascript", "jl": "julia", "c": "c"}
33 |
34 |
35 | def get_solution_filepaths(language=""):
36 | all_solution_filepaths = glob.glob(os.path.join(solutions_dir(language), f"*.{language2ext[language]}"))
37 | all_problem_filepaths = glob.glob(os.path.join(problems_dir(), "problems", "*.py"))
38 |
39 | solutions = [os.path.splitext(os.path.basename(fp))[0] for fp in all_solution_filepaths]
40 | problems = [os.path.splitext(os.path.basename(fp))[0] for fp in all_problem_filepaths]
41 |
42 | valid_solution_filepaths = []
43 |
44 | for (n, sol) in enumerate(solutions):
45 | # Might need to replace - with _ for Javascript solutions.
46 | if sol.replace("-", "_") in problems:
47 | valid_solution_filepaths.append(all_solution_filepaths[n])
48 |
49 | return valid_solution_filepaths
50 |
51 |
52 | def problem_name_id(param):
53 | problem_name, ext = os.path.splitext(os.path.basename(param))
54 | return problem_name
55 |
56 |
57 | def filename_id(param):
58 | return os.path.basename(param)
59 |
--------------------------------------------------------------------------------
/tests/test_c_solutions.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pytest
3 |
4 | from helpers import get_solution_filepaths, problem_name_id
5 |
6 |
7 | solution_files = get_solution_filepaths(language="c")
8 |
9 | # Don't test game_of_life.c for now.
10 | solution_files = list(filter(lambda s: "game_of_life" not in s, solution_files))
11 |
12 |
13 | @pytest.mark.c
14 | def test_c_solutions_exist():
15 | assert solution_files
16 |
17 |
18 | @pytest.mark.c
19 | @pytest.mark.parametrize("solution_file", solution_files, ids=problem_name_id)
20 | def test_submit_file(solution_file, submit_solution):
21 | result = submit_solution(solution_file)
22 |
23 | assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
24 | json.dumps(result, indent=4)
25 | )
26 |
--------------------------------------------------------------------------------
/tests/test_dummy_solutions.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import json
4 | import pytest
5 |
6 | from helpers import filename_id, ext2language
7 |
8 |
9 | cwd = os.path.dirname(os.path.realpath(__file__))
10 | solution_files = glob.glob(os.path.join(cwd, "dummy_solutions", "*"))
11 |
12 |
13 | # See: https://github.com/project-lovelace/lovelace-engine/issues/84
14 | def test_84(submit_file):
15 | filepath = os.path.join(cwd, "dummy_solutions", "chaos_84.js")
16 | result = submit_file(filepath, problem="chaos", language="javascript")
17 | assert result.get("success") is True, f"Failed. Engine output:\n{json.dumps(result, indent=4)}"
18 |
19 |
20 | def test_infinite_loop_times_out(submit_file):
21 | filepath = os.path.join(cwd, "dummy_solutions", "infinite_loop.py")
22 | with pytest.raises(Exception) as e_info:
23 | result = submit_file(filepath, problem="scientific_temperatures", language="python")
24 |
25 |
26 | # def test_memory_explosion_times_out(submit_file):
27 | # filepath = os.path.join(cwd, "dummy_solutions", "memory_explosion.py")
28 | # with pytest.raises(Exception) as e_info:
29 | # result = submit_file(filepath, problem="speed_of_light", language="python")
30 |
--------------------------------------------------------------------------------
/tests/test_javascript_solutions.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pytest
3 |
4 | from helpers import get_solution_filepaths, problem_name_id
5 |
6 |
7 | solution_files = get_solution_filepaths(language="javascript")
8 |
9 |
10 | @pytest.mark.javascript
11 | def test_javascript_solutions_exist():
12 | assert solution_files
13 |
14 |
15 | @pytest.mark.javascript
16 | @pytest.mark.parametrize("solution_file", solution_files, ids=problem_name_id)
17 | def test_submit_file(solution_file, submit_solution):
18 | result = submit_solution(solution_file)
19 |
20 | assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
21 | json.dumps(result, indent=4)
22 | )
23 |
--------------------------------------------------------------------------------
/tests/test_julia_solutions.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pytest
3 |
4 | from helpers import get_solution_filepaths, problem_name_id
5 |
6 |
7 | solution_files = get_solution_filepaths(language="julia")
8 |
9 |
10 | @pytest.mark.julia
11 | def test_julia_solutions_exist():
12 | assert solution_files
13 |
14 |
15 | @pytest.mark.julia
16 | @pytest.mark.parametrize("solution_file", solution_files, ids=problem_name_id)
17 | def test_submit_file(solution_file, submit_solution):
18 | result = submit_solution(solution_file)
19 |
20 | assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
21 | json.dumps(result, indent=4)
22 | )
23 |
--------------------------------------------------------------------------------
/tests/test_python_solutions.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pytest
3 |
4 | from helpers import get_solution_filepaths, problem_name_id
5 |
6 |
7 | solution_files = get_solution_filepaths(language="python")
8 |
9 | # Don't test numerical_diff.py for now.
10 | solution_files = list(filter(lambda s: "numerical_diff" not in s, solution_files))
11 |
12 |
13 | @pytest.mark.python
14 | def test_python_solutions_exist():
15 | assert solution_files
16 |
17 |
18 | @pytest.mark.python
19 | @pytest.mark.parametrize("solution_file", solution_files, ids=problem_name_id)
20 | def test_submit_file(solution_file, submit_solution):
21 | result = submit_solution(solution_file)
22 |
23 | assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
24 | json.dumps(result, indent=4)
25 | )
26 |
--------------------------------------------------------------------------------