├── .github ├── dependabot.yml └── workflows │ ├── format.yml │ ├── pypi.yml │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── DESIGN.md ├── LICENSE ├── MANIFEST.in ├── README.ja.md ├── README.md ├── docs ├── INSTALL.ja.md ├── INSTALL.md ├── getting-started.ja.md └── getting-started.md ├── onlinejudge_command ├── __0_workaround_for_conflict.py ├── __about__.py ├── __init__.py ├── download_history.py ├── format_utils.py ├── log_formatter.py ├── main.py ├── output_comparators.py ├── pretty_printers.py ├── subcommand │ ├── __init__.py │ ├── download.py │ ├── generate_input.py │ ├── generate_output.py │ ├── login.py │ ├── submit.py │ ├── test.py │ └── test_reactive.py ├── update_checking.py └── utils.py ├── setup.cfg ├── setup.py └── tests ├── __init__.py ├── command_download.py ├── command_generate_input.py ├── command_generate_output.py ├── command_login.py ├── command_submit.py ├── command_test.py ├── command_test_reactive.py ├── command_version.py ├── format_utils.py ├── implementation_language_guessing.py ├── main.py ├── output_comparators.py ├── pretty_printers.py └── utils.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/workflows/format.yml: -------------------------------------------------------------------------------- 1 | name: format 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | format: 7 | strategy: 8 | matrix: 9 | python-version: 10 | - '3.12' 11 | 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | 22 | - name: Install dependencies 23 | run: | 24 | pip3 install --upgrade setuptools 25 | pip3 install .[dev] 26 | 27 | - name: Run --version 28 | run: oj --version 29 | 30 | - name: Run pylint 31 | run: pylint --rcfile=setup.cfg onlinejudge_command tests setup.py 32 | 33 | - name: Run isort 34 | run: isort --check-only --diff onlinejudge_command tests setup.py 35 | 36 | - name: Run yapf 37 | run: yapf --diff --recursive onlinejudge_command tests setup.py 38 | 39 | - name: Run mypy 40 | run: mypy onlinejudge_command tests setup.py 41 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yml: -------------------------------------------------------------------------------- 1 | name: pypi 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v1 13 | 14 | - name: Set up Python 15 | uses: actions/setup-python@v1 16 | 17 | - name: Install dependencies 18 | run: | 19 | pip3 install . 20 | pip3 install setuptools wheel 21 | 22 | - name: Build package 23 | run: python3 setup.py bdist_wheel 24 | 25 | - name: Publish package 26 | uses: pypa/gh-action-pypi-publish@master 27 | with: 28 | user: __token__ 29 | password: ${{ secrets.pypi_password }} 30 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | pull_request: 5 | schedule: 6 | - cron: '00 15 * * FRI' # At 00:00 on every Saturday in JST 7 | 8 | jobs: 9 | test: 10 | strategy: 11 | matrix: 12 | os: [ubuntu-latest, windows-latest, macos-latest] 13 | python-version: 14 | - '3.8' 15 | - '3.10' 16 | - '3.12' 17 | 18 | runs-on: ${{ matrix.os }} 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - name: Set up Python 24 | uses: actions/setup-python@v5 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - name: Get pip cache 29 | id: pip-cache 30 | run: | 31 | python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" 32 | 33 | - uses: actions/cache@v1 34 | with: 35 | path: ${{ steps.pip-cache.outputs.dir }} 36 | key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}-${{ hashFiles('setup.cfg') }} 37 | restore-keys: | 38 | ${{ runner.os }}-pip- 39 | 40 | - name: Install dependencies 41 | run: | 42 | pip3 install --upgrade setuptools 43 | pip3 install .[dev] 44 | 45 | - name: Run tests 46 | run: | 47 | oj -h 48 | pytest -v tests/*.py 49 | shell: bash 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution and Hacking Guide 2 | 3 | links: 4 | 5 | - [CONTRIBUTING.md](https://github.com/online-judge-tools/.github/blob/master/CONTRIBUTING.md) of [online-judge-tools](https://github.com/online-judge-tools) organization 6 | - [DESIGN.md](https://github.com/online-judge-tools/oj/blob/master/DESIGN.md) 7 | 8 | 9 | ## For committer of `oj` command / `oj` コマンド本体への貢献者へ 10 | 11 | - See also the [CONTRIBUTING.md](https://github.com/online-judge-tools/.github/blob/master/CONTRIBUTING.md) of this GitHub organization. / この GitHub organization の [CONTRIBUTING.md](https://github.com/online-judge-tools/.github/blob/master/CONTRIBUTING.md) も読んでください。 12 | - See also the [DESIGN.md](https://github.com/online-judge-tools/oj/blob/master/DESIGN.md) of this repository, if possible. / 可能なら、この repository の [DESIGN.md](https://github.com/online-judge-tools/oj/blob/master/DESIGN.md) も読んでください。 13 | - The code to interact with web servers of online judges exist in [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client) repository. / オンラインジャッジのサーバと直接通信するコードは [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client) レポジトリにあります。 14 | 15 | 16 | ## For developpers of programs which uses `oj` command / `oj` コマンドを用いたツールの開発者へ 17 | 18 | TL;DR: Use [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client) instead for programs. / プログラムからの利用には代わりに [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client) を使ってください 19 | 20 | There are many ways to use online-judge-tools for your tool. 21 | 22 | 1. `oj-api` command (in [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client)) 23 | - `oj-api` command is the best choice in most cases because it makes things loose coupling and has . 24 | - You should also check [jmerle/competitive-companion](https://github.com/jmerle/competitive-companion). 25 | - Now, there are some missing features in `oj-api` command (e.g. logging in via web browsers). So you may need to use `oj` command for such features. 26 | 1. [`onlinejudge` module](https://online-judge-tools.readthedocs.io/) of Python (in [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client)) 27 | - `onlinejudge` module is the most flexible interface, but it makes tight coupling. You should avoid it unless you really need to optimize. 28 | 1. `oj` command 29 | - `oj` command is basically an interface to humans, not to programs. You can use this for your tool, but please be careful. 30 | - You can re-implement the core functionality of any subcommand of `oj` command with a single-line shell script. 31 | -------------------------------------------------------------------------------- /DESIGN.md: -------------------------------------------------------------------------------- 1 | # Design Doc 2 | 3 | link: [DESIGN.md](https://github.com/online-judge-tools/.github/blob/master/DESIGN.md) of [online-judge-tools](https://github.com/online-judge-tools) organization 4 | 5 | 6 | ## Objective 7 | 8 | `oj` コマンドは、競技プログラミングの問題を解くことを補助する基本的な機能を提供する。 9 | 特に、サンプルケースによるテストや自分で生成したランダムケースによるテストを行なうための機能を提供する。 10 | 11 | 12 | ## Goals 13 | 14 | - ユーザが個別の問題を解く速度を上げること 15 | - ユーザのレートを上げること 16 | - パソコンが苦手な人でも使える自動化の手段を提供し、競技プログラミングにおける標準的な環境のレベルを引き上げること 17 | - 提出前のサンプルケースでのテストは必ずする、バグったら愚直解との突き合わせをして撃墜ケースを探す、という振舞いを競技プログラミング界隈における常識にすること 18 | 19 | 20 | ## Non-Goals 21 | 22 | - ユーザのコンテストにおける立ち回りを改善すること 23 | - 個別の問題を解く速度を上げることで副次的にこれも改善されるが、これをそのものを目標とはしない 24 | - ユーザの環境を便利にすること 25 | - ユーザの行動を誘導するための手段として便利さを利用するが、これをそのものを目標とはしない 26 | 27 | 28 | ## Background 29 | 30 | 競技プログラミングにおいて、問題文中で入力の例およびそのような入力に対する正しい出力の例が与えられるのが通常である。 31 | これらはサンプルケースと呼ばれる。 32 | 解法コード提出前にこれらサンプルケースに対する応答が正しいかを確認するのが通常である。 33 | しかしこれは人間が手でやるには面倒であるので、省略してしまったり間違えてしまったりしやすい (例: )。 34 | これは自動化によって解決されるべきである。 35 | 36 | `oj` コマンド (あるいはその前身の [nodchip/OnlineJudgeHelper](https://github.com/nodchip/OnlineJudgeHelper)) の登場と普及の前は、ほとんどすべての競技プログラマはサンプルケースのテストを手と目視で行うか、そもそも行なっていなかったことに注意したい。 37 | 38 | 39 | ## Overview 40 | 41 | それぞれのオンラインジャッジとの通信は [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client) に任せ、[online-judge-tools/oj](https://github.com/online-judge-tools/oj) の中では基本的に通信をしない。 42 | 43 | 44 | ## Detailed Design 45 | 46 | - 設定ファイルは作成しない。 47 | - 設定ファイルの作成はユーザの環境を多様にしユーザサポートを困難にするためである。「なぜか動かない」と困っているユーザに対し必ずまず初めに「設定ファイルはどうなっていますか?」と聞かなければならない状況は避けるべきである。 48 | - 処理はできる限り安全側に倒す。 49 | - 特に `test` サブコマンドにおける AC 判定のデフォルトは厳しくし「手元でサンプルが AC したならば、提出先でもサンプルには AC する」を期待できるようにする。 50 | - 「手元でサンプルが AC したならば、提出先でもサンプルには AC する」を健全性と呼び「手元でサンプルが WA したならば、提出先でもサンプルには WA する」を完全性と呼ぶことにしよう。RE については忘れる。健全性も完全性も持たないならば「手元での結果と提出先での結果には相関があるが、具体的に保証できる性質はない」ということになってしまうため、どちらかは成立させたい。スペシャルジャッジなどがあるので完全性は明らかに不可能である。一方で健全性は可能であるので、これはできる限り保つもとのする。 51 | - ディレクトリ構造については「問題ごとに異なるディレクトリを用いる」ことを仮定し、それ以外の仮定をしない。 52 | - 無闇に仮定を強めることは異なるディレクトリ構造を利用しているユーザに対しての導入障壁となる。一方で「自分でテストを追加する」という行動を推奨するために `test/` ディレクトリの作成は強制する。 53 | - 役割範囲はできるだけ狭める。テンプレートコードの生成やコンテスト一括でのサンプルケースの取得機能などは [online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator) に移譲する。機能を減らしかつ単純化することで、パソコンに不慣れなユーザでも利用しやすくするためである。 54 | - 便利ではあるが `oj` ではサポートしない機能については他のツールで対応してもらうことになる。 55 | - `oj` コマンドの目指すところは [`ag` コマンド](https://github.com/ggreer/the_silver_searcher)や [`bat` コマンド](https://github.com/sharkdp/bat)のような強力な発展的コマンドではなく [`find` コマンド](https://linux.die.net/man/1/find)や [`grep` コマンド](https://linux.die.net/man/1/grep)や [`cat` コマンド](https://linux.die.net/man/1/cat)のような安定した標準的コマンドである。 56 | 57 | TODO: もうすこし詳しく書く 58 | 59 | 60 | ## Security Considerations 61 | 62 | 特になし。 63 | 内部で利用している [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client) の問題は継承する。 64 | 65 | 66 | ## Privacy Considerations 67 | 68 | 特になし。 69 | ユーザのデータは `submit` サブコマンドを除いて送信されない。 70 | 71 | 72 | ## Metrics Considerations 73 | 74 | PePy を見るとユーザ数の概算が得られる。 75 | 個別の機能の利用状況についての統計は得られない。 76 | 77 | 2020/09/20 時点では、AtCoder に参加する競技プログラマの 1 割程度が利用していると言ってよいだろう。 78 | 最新版が公開されると毎回 1000 人程度がダウンロードしてくれていることが分かる。 79 | なにかエラーがでて動かなくなるまでバージョン更新をしてくれないユーザのことを考えると 2000 から 3000 人程度のアクティブユーザを見込めるだろう。 80 | AtCoder にはアクティブかつ茶色以上のユーザが 22000 人おりこれを潜在的ユーザの全体とする。よって概算で 1 割である。 81 | 82 | 83 | ## Testing Plan 84 | 85 | GitHub Actions を用いて Linux, macOS, Windows すべての環境で end-to-end tests をまわす。 86 | ファイル書き込みやコマンド実行などの入出力操作が重要となるため unit tests は特定の機能を除き役に立ちにくい。 87 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017-2020 Kimiyuki Onaka. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /README.ja.md: -------------------------------------------------------------------------------- 1 | # online-judge-tools/oj 2 | 3 | [![test](https://github.com/online-judge-tools/oj/workflows/test/badge.svg)](https://github.com/online-judge-tools/oj/actions) 4 | [![PyPI](https://img.shields.io/pypi/v/online-judge-tools.svg)](https://pypi.python.org/pypi/online-judge-tools) 5 | [![Downloads](https://pepy.tech/badge/online-judge-tools)](https://pepy.tech/project/online-judge-tools) 6 | [![PyPI](https://img.shields.io/pypi/l/online-judge-tools.svg)](https://github.com/kmyk/online-judge-tools/blob/master/LICENSE) 7 | [![Join the chat at https://gitter.im/online-judge-tools/community](https://badges.gitter.im/online-judge-tools/community.svg)](https://gitter.im/online-judge-tools/community) 8 | 9 | [English version of `README.md`](https://github.com/online-judge-tools/oj/blob/master/README.md) 10 | 11 | `oj` コマンドは様々なオンラインジャッジの問題を解くことを助けるツールです。 12 | このコマンドは、サンプルケースの取得、追加のテストケースの生成、テストの実行、コードの提出などを自動化します。 13 | 14 | ## Screencast 15 | 16 | ![screencast](https://user-images.githubusercontent.com/2203128/34708715-568b13c0-f557-11e7-97ef-9f6b646e4776.gif) 17 | 18 | ## Features 19 | 20 | - サンプルケースを取得 21 | - システムケースを取得 22 | - ログイン 23 | - コードを提出 24 | - テストを実行 25 | - リアクティブ問題のテストを実行 26 | - テストケース生成器からテストケースの入力を生成 27 | - テストケースの入力と愚直解からテストケースの出力を生成 28 | 29 | 詳しいドキュメントは [docs/getting-started.ja.md](https://github.com/online-judge-tools/oj/blob/master/docs/getting-started.ja.md) にあります。 30 | 31 | 様々なオンラインジャッジ (Codeforces, AtCoder, HackerRank など) をサポートしています。 32 | その完全なリストには [the table of online-judge-tools/api-client](https://github.com/online-judge-tools/api-client#supported-websites) を見てください。 33 | 34 | ## How to install 35 | 36 | Python package は [![PyPI](https://img.shields.io/pypi/v/online-judge-tools.svg)](https://pypi.python.org/pypi/online-judge-tools) です。 37 | 38 | ```console 39 | $ pip3 install online-judge-tools 40 | ``` 41 | 42 | より詳しい説明には [docs/INSTALL.ja.md](https://github.com/online-judge-tools/oj/blob/master/docs/INSTALL.ja.md) を読んでください。 43 | 44 | 45 | ## How to use 46 | 47 | ```console 48 | $ oj download [--system] URL 49 | $ oj login URL 50 | $ oj submit [URL] FILE 51 | $ oj test [-c COMMAND] [TEST...] 52 | $ oj test-reactive [-c COMMAND] JUDGE_COMMAND 53 | $ oj generate-input GENERATOR_COMMAND 54 | $ oj generate-output [-c COMMAND] [TEST...] 55 | ``` 56 | 57 | 詳細は `$ oj --help` を見てください。 58 | 59 | ## Example 60 | 61 | ```console 62 | $ oj download http://agc001.contest.atcoder.jp/tasks/agc001_a 63 | [INFO] online-judge-tools 11.2.0 (+ online-judge-api-client 10.8.0) 64 | [INFO] load cookie from: /home/user/.local/share/online-judge-tools/cookie.jar 65 | [NETWORK] GET: https://atcoder.jp/contests/agc001/tasks/agc001_a 66 | [NETWORK] 200 OK 67 | 68 | [INFO] sample 0 69 | [INFO] input: sample-1 70 | 2 71 | 1 3 1 2 72 | 73 | [SUCCESS] saved to: test/sample-1.in 74 | [INFO] output: sample-1 75 | 3 76 | 77 | [SUCCESS] saved to: test/sample-1.out 78 | 79 | [INFO] sample 1 80 | [INFO] input: sample-2 81 | 5 82 | 100 1 2 3 14 15 58 58 58 29 83 | 84 | [SUCCESS] saved to: test/sample-2.in 85 | [INFO] output: sample-2 86 | 135 87 | 88 | [SUCCESS] saved to: test/sample-2.out 89 | 90 | $ cat < main.py 91 | #!/usr/bin/env python3 92 | n = int(input()) 93 | a = list(map(int, input().split())) 94 | ans = max(a) 95 | print(ans) 96 | EOF 97 | 98 | $ oj t -c "python3 main.py" 99 | [INFO] online-judge-tools 11.2.0 (+ online-judge-api-client 10.8.0) 100 | [INFO] 2 cases found 101 | 102 | [INFO] sample-1 103 | [INFO] time: 0.043601 sec 104 | [SUCCESS] AC 105 | 106 | [INFO] sample-2 107 | [INFO] time: 0.043763 sec 108 | [FAILURE] WA 109 | input: 110 | 5 111 | 100 1 2 3 14 15 58 58 58 29 112 | 113 | output: 114 | 3 115 | 116 | expected: 117 | 135 118 | 119 | 120 | [INFO] slowest: 0.043763 sec (for sample-2) 121 | [INFO] max memory: 10.064000 MB (for sample-2) 122 | [FAILURE] test failed: 1 AC / 2 cases 123 | ``` 124 | 125 | ## FAQ 126 | 127 | - 私は C++ でなく Python (あるいは Rust, D, Java, F#, Haskell など) を使っています。それでも利用できますか? 128 | - はい。必要なら `--command` (`-c`) オプションを利用してください。たとえば Python なら `$ oj t -c "python3 main.py"` のようにします。 129 | - 私はいつもひとつのコンテストごとにひとつのディレクトリを使っています。このスタイルのままでも利用できますか? 130 | - はい。`--directory` (`-d`) オプションや `$ rm -rf test/` コマンドを利用してください。しかし、レートの最大化のためには追加のテストケースを自分で生成するべきであり、それをする上ではひとつの問題ごとにひとつのディレクトリを使う方がよいでしょう。 131 | - コンテストごとに一括でサンプルケースを取得できませんか? 132 | - いいえ、`oj` コマンドではできません。代わりに [kmyk/online-judge-template-generator](https://github.com/kmyk/online-judge-template-generator) の `oj-prepare` コマンドを利用してください。 133 | - テストの前に自動でコードをコンパイルするようにはできますか? 134 | - はい。シェルの機能を使ってください。`$ g++ main.cpp && oj t` のように実行してください。 135 | - テストが通ったら自動で提出するようにはできますか? 136 | - はい。シェルの機能を使ってください。`$ oj t && oj s main.cpp` のように実行してください。ところで、サンプルケースがあまり強くない問題の存在には注意が必要です。 137 | - 提出の際のディレイや `[y/N]` の確認をなしにできますか? 138 | - はい。`--wait=0` オプションや `--yes` オプションを利用してください。しかしこれは推奨されません。これらの機能は安全性のためにあるものです。たとえば、もし 3 秒速く提出できていれば順位が 3 位上がるような状況を考えてみましょう。そのような状況で提出ミスをして 5 分のペナルティを受ければ、順位は 300 位は下がるでしょう。 139 | - 入力したパスワードは保存されますか? 140 | - いいえ。パスワードはどのファイルにも保存されません。セッショントークンなど (ただしこれも機密情報です) のみを保存します。必要であれば [`onlinejudge/_implementation/command/login.py`](https://github.com/kmyk/online-judge-tools/blob/master/onlinejudge/_implementation/command/login.py) などを読んで確認してください。 141 | - 設定ファイルはありますか? 142 | - いいえ。シェルの `.bashrc` (あるいはそれに相当するファイル) を利用してください。[man bash](https://linux.die.net/man/1/bash) を読み、シェル alias やシェル関数を書いてください。たとえば、もしテストの実行の際に Python をデフォルトで使いたいのなら `alias oj-test-python='oj t -c "python3 main.py"'` と `.bashrc` に書いて `$ oj-test-python` と実行してください。 143 | 144 | ここにない質問には [Gitter](https://gitter.im/online-judge-tools/community) [![Join the chat at https://gitter.im/online-judge-tools/community](https://badges.gitter.im/online-judge-tools/community.svg)](https://gitter.im/online-judge-tools/community) やその他の SNS を使ってください。 145 | 146 | ## Resources 147 | 148 | ### Articles 149 | 150 | - [online-judge-toolsを導入しよう! · ますぐれメモ](https://blog.masutech.work/posts/compro/oj-introduction/) 151 | - [online-judge-toolsをVimから呼んで楽をする - Leverage Copy](https://maguroguma.hatenablog.com/entry/2020/08/19/090000) 152 | - [VSCodeでAtCoderのサンプルケースをサクッとテストする - Qiita](https://qiita.com/danpe919/items/7c5697df25fb567f1e71) 153 | 154 | ### Related Tools 155 | 156 | 競合: 157 | 158 | - [jmerle/competitive-companion](https://github.com/jmerle/competitive-companion) 159 | - [kyuridenamida/atcoder-tools](https://github.com/kyuridenamida/atcoder-tools) 160 | - [xalanq/cf-tool](https://github.com/xalanq/cf-tool) 161 | - [nodchip/OnlineJudgeHelper](https://github.com/nodchip/OnlineJudgeHelper) 162 | 163 | 非競合: 164 | 165 | - [shivawu/topcoder-greed](https://github.com/shivawu/topcoder-greed) for Topcoder Single Round Match 166 | 167 | kmyk/online-judge-tools と連携するツール: 168 | 169 | - [kmyk/online-judge-template-generator](https://github.com/kmyk/online-judge-template-generator) は競プロの問題を解析して解法コードの入出力部分などを自動生成します 170 | - [kmyk/online-judge-verify-helper](https://github.com/kmyk/online-judge-verify-helper) は競プロライブラリの verify とドキュメントの生成を自動化します 171 | - [Tatamo/atcoder-cli](https://github.com/Tatamo/atcoder-cli) は AtCoder に特化した `oj` コマンドの薄い wrapper です 172 | - [kjnh10/pcm](https://github.com/kjnh10/pcm) は内部で `oj` コマンドを利用しています 173 | - [fukatani/rujaion](https://github.com/fukatani/rujaion) は Rust で競プロをすることに特化した IDE です 174 | 175 | ## Maintainers 176 | 177 | - current maintainers 178 | - [@kmyk](https://github.com/kmyk) (AtCoder: [kimiyuki](https://atcoder.jp/users/kimiyuki), Codeforces: [kimiyuki](https://codeforces.com/profile/kimiyuki)) (original author) 179 | - maintainers who are not working now 180 | - [@fukatani](https://github.com/fukatani) (AtCoder: [ryoryoryo111](https://atcoder.jp/users/ryoryoryo111)) 181 | - [@kawacchu](https://github.com/kawacchu) (AtCoder: [kawacchu](https://atcoder.jp/users/kawacchu)) 182 | 183 | ## License 184 | 185 | MIT License 186 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # online-judge-tools/oj 2 | 3 | [![test](https://github.com/online-judge-tools/oj/workflows/test/badge.svg)](https://github.com/online-judge-tools/oj/actions) 4 | [![PyPI](https://img.shields.io/pypi/v/online-judge-tools.svg)](https://pypi.python.org/pypi/online-judge-tools) 5 | [![Downloads](https://pepy.tech/badge/online-judge-tools)](https://pepy.tech/project/online-judge-tools) 6 | [![PyPI](https://img.shields.io/pypi/l/online-judge-tools.svg)](https://github.com/kmyk/online-judge-tools/blob/master/LICENSE) 7 | [![Join the chat at https://gitter.im/online-judge-tools/community](https://badges.gitter.im/online-judge-tools/community.svg)](https://gitter.im/online-judge-tools/community) 8 | 9 | [日本語版の `README.md`](https://github.com/online-judge-tools/oj/blob/master/README.ja.md) 10 | 11 | `oj` is a command to help solving problems on various online judges. This command automates downloading sample cases, generating additional test cases, testing for your code, and submitting it. 12 | 13 | ## Screencast 14 | 15 | ![screencast](https://user-images.githubusercontent.com/2203128/34708715-568b13c0-f557-11e7-97ef-9f6b646e4776.gif) 16 | 17 | ## Features 18 | 19 | - Download sample cases 20 | - Download system test cases 21 | - Login 22 | - Submit your code 23 | - Test your code 24 | - Test your code for reactive problems 25 | - Generate input files from generators 26 | - Generate output files from input and reference implementation 27 | 28 | For the detailed documentation, read [docs/getting-started.md](https://github.com/online-judge-tools/oj/blob/master/docs/getting-started.md). 29 | 30 | Many online judges (Codeforces, AtCoder, HackerRank, etc.) are supported. 31 | For the full list, see [the table of online-judge-tools/api-client](https://github.com/online-judge-tools/api-client#supported-websites). 32 | 33 | ## How to install 34 | 35 | The package is [![PyPI](https://img.shields.io/pypi/v/online-judge-tools.svg)](https://pypi.python.org/pypi/online-judge-tools). 36 | 37 | ```console 38 | $ pip3 install online-judge-tools 39 | ``` 40 | 41 | For detailed instructions, read [docs/INSTALL.md](https://github.com/online-judge-tools/oj/blob/master/docs/INSTALL.md). 42 | 43 | 44 | ## How to use 45 | 46 | ```console 47 | $ oj download [--system] URL 48 | $ oj login URL 49 | $ oj submit [URL] FILE 50 | $ oj test [-c COMMAND] [TEST...] 51 | $ oj test-reactive [-c COMMAND] JUDGE_COMMAND 52 | $ oj generate-input GENERATOR_COMMAND 53 | $ oj generate-output [-c COMMAND] [TEST...] 54 | ``` 55 | 56 | For details, see `$ oj --help`. 57 | 58 | ## Example 59 | 60 | ```console 61 | $ oj download http://agc001.contest.atcoder.jp/tasks/agc001_a 62 | [INFO] online-judge-tools 11.2.0 (+ online-judge-api-client 10.8.0) 63 | [INFO] load cookie from: /home/user/.local/share/online-judge-tools/cookie.jar 64 | [NETWORK] GET: https://atcoder.jp/contests/agc001/tasks/agc001_a 65 | [NETWORK] 200 OK 66 | 67 | [INFO] sample 0 68 | [INFO] input: sample-1 69 | 2 70 | 1 3 1 2 71 | 72 | [SUCCESS] saved to: test/sample-1.in 73 | [INFO] output: sample-1 74 | 3 75 | 76 | [SUCCESS] saved to: test/sample-1.out 77 | 78 | [INFO] sample 1 79 | [INFO] input: sample-2 80 | 5 81 | 100 1 2 3 14 15 58 58 58 29 82 | 83 | [SUCCESS] saved to: test/sample-2.in 84 | [INFO] output: sample-2 85 | 135 86 | 87 | [SUCCESS] saved to: test/sample-2.out 88 | 89 | $ cat < main.py 90 | #!/usr/bin/env python3 91 | n = int(input()) 92 | a = list(map(int, input().split())) 93 | ans = max(a) 94 | print(ans) 95 | EOF 96 | 97 | $ oj t -c "python3 main.py" 98 | [INFO] online-judge-tools 11.2.0 (+ online-judge-api-client 10.8.0) 99 | [INFO] 2 cases found 100 | 101 | [INFO] sample-1 102 | [INFO] time: 0.043601 sec 103 | [SUCCESS] AC 104 | 105 | [INFO] sample-2 106 | [INFO] time: 0.043763 sec 107 | [FAILURE] WA 108 | input: 109 | 5 110 | 100 1 2 3 14 15 58 58 58 29 111 | 112 | output: 113 | 3 114 | 115 | expected: 116 | 135 117 | 118 | 119 | [INFO] slowest: 0.043763 sec (for sample-2) 120 | [INFO] max memory: 10.064000 MB (for sample-2) 121 | [FAILURE] test failed: 1 AC / 2 cases 122 | ``` 123 | 124 | ## FAQ 125 | 126 | - Can I use Python (or Rust, D, Java, F#, Haskell, etc.) instead of C++? 127 | - Yes. Please use `--command` (`-c`) option if needed. For example, for Python, you can run `$ oj t -c "python3 main.py"`. 128 | - I usually make one directory per one contest (or, site). Can I keep using this style? 129 | - Yes, you can use the `--directory` (`-d`) option or `$ rm -rf test/`. However, we don't recommend this style, because you should make additional test cases by yourself and run stress tests to maximize your rating. 130 | - Can I download all sample cases of all problems at once? 131 | - No, but you can use `oj-prepare` command in [kmyk/online-judge-template-generator](https://github.com/kmyk/online-judge-template-generator). 132 | - Can I automatically compile my source code before testing? 133 | - Yes, use your shell. Run `$ g++ main.cpp && oj t`. 134 | - Can I automatically submit code after it passes tests? 135 | - Yes, use your shell. Run `$ oj t && oj s main.cpp`. By the way, you need to take care of problems whose sample cases are not so strong. 136 | - Can I remove the delays and the `[y/N]` confirmation before submitting code? 137 | - Yes, put `--wait=0` option and `--yes` option to `oj s` subcommand. Of course, we don't recommend this. These options exist for failsafe. For example, please consider a situation where if you save 3 seconds, you will move up 3 places on the standings. In such a case, if you get a penalty of 5 minutes, then you will move down at least 300 places on the standings. 138 | - Are my passwords stored? 139 | - No, your passwords are not stored into any files. This program stores only your session tokens (but of course, they're still credentials). Please read [`onlinejudge/_implementation/command/login.py`](https://github.com/kmyk/online-judge-tools/blob/master/onlinejudge/_implementation/command/login.py). 140 | - Does the config file exist? 141 | - No. You can use your `.bashrc` (or similar files) instead. It's a config file of your shell. Read [man bash](https://linux.die.net/man/1/bash) and write shell aliases or shell functions. For example, if you want to use Python code for tests by default, write `alias oj-test-python='oj t -c "python3 main.py"'` to `.bashrc` and use `$ oj-test-python`. 142 | 143 | For other questions, use [Gitter](https://gitter.im/online-judge-tools/community) [![Join the chat at https://gitter.im/online-judge-tools/community](https://badges.gitter.im/online-judge-tools/community.svg)](https://gitter.im/online-judge-tools/community) or other SNSs. 144 | 145 | ## Resources 146 | 147 | ### Related Tools 148 | 149 | conflicted: 150 | 151 | - [jmerle/competitive-companion](https://github.com/jmerle/competitive-companion) 152 | - [kyuridenamida/atcoder-tools](https://github.com/kyuridenamida/atcoder-tools) 153 | - [xalanq/cf-tool](https://github.com/xalanq/cf-tool) 154 | - [nodchip/OnlineJudgeHelper](https://github.com/nodchip/OnlineJudgeHelper) 155 | 156 | not conflicted: 157 | 158 | - [shivawu/topcoder-greed](https://github.com/shivawu/topcoder-greed) for Topcoder Single Round Match 159 | 160 | projects collaborating with kmyk/online-judge-tools: 161 | 162 | - [kmyk/online-judge-template-generator](https://github.com/kmyk/online-judge-template-generator) analyzes problems and generates templates including auto-generated input/output parts 163 | - [kmyk/online-judge-verify-helper](https://github.com/kmyk/online-judge-verify-helper) automates testing your library for competitive programming and generate documents 164 | - [Tatamo/atcoder-cli](https://github.com/Tatamo/atcoder-cli) is a thin wrapper optimized for AtCoder 165 | - [kjnh10/pcm](https://github.com/kjnh10/pcm) is a tool which internally uses online-judge-tools 166 | - [fukatani/rujaion](https://github.com/fukatani/rujaion) is an IDE for competitive-programming with Rust 167 | 168 | ## Maintainers 169 | 170 | - current maintainers 171 | - [@kmyk](https://github.com/kmyk) (AtCoder: [kimiyuki](https://atcoder.jp/users/kimiyuki), Codeforces: [kimiyuki](https://codeforces.com/profile/kimiyuki)) (original author) 172 | - maintainers who are not working now 173 | - [@fukatani](https://github.com/fukatani) (AtCoder: [ryoryoryo111](https://atcoder.jp/users/ryoryoryo111)) 174 | - [@kawacchu](https://github.com/kawacchu) (AtCoder: [kawacchu](https://atcoder.jp/users/kawacchu)) 175 | 176 | ## License 177 | 178 | MIT License 179 | -------------------------------------------------------------------------------- /docs/INSTALL.ja.md: -------------------------------------------------------------------------------- 1 | # How to Install `oj` command 2 | 3 | [English version of this document](./INSTALL.md) 4 | 5 | 以下の手順を順番に実行してください。 6 | 7 | 1. もし Windows を使っているならば、まず [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/ja-jp/windows/wsl/about) を使ってください。初心者にとって、Linux (特に Ubuntu) はたいてい Windows より簡単であるためです。 8 | - また、Visual Studio Code (あるいは他の IDE) のウィンドウは閉じて、しばらく忘れていてください。IDE に付属するコンソールは利用しないでください。 9 | - もちろん、もしあなたが上級者であったなら、生の Windows 環境で `oj` コマンドを使うことも可能でしょう。 10 | 1. :snake: [Python](https://www.python.org/) をインストールしてください。もし Ubuntu (WSL 内の Ubuntu を含む) を使っているなら `$ sudo apt install python3` を実行すればよいです. 11 | 1. Python がインストールされていることを `$ python3 --version` を実行して確認してください。`Python 3.x.y` のように表示されれば成功です。 12 | - もし `Command 'python3' not found` のように表示されたなら、Python のインストールに失敗しています。 13 | - インストールされている Python のバージョンが古すぎる場合も失敗です。表示された `x` の位置の値は `6` 以上でなければなりません。もし `x` が `6` 未満の場合は、Python を更新してください。 14 | 1. :package: [pip](https://pip.pypa.io/en/stable/) をインストールしてください。もし Ubuntu (WSL 内の Ubuntu を含む) を使っているなら `$ sudo apt install python3-pip` を実行すればよいです。 15 | 1. pip がインストールされていることを `$ pip3 --version` を実行して確認してください。`pip x.y.z ...` のように表示されれば成功です。 16 | - もし `Command 'pip3' not found` のように表示されたなら、pip のインストールに失敗しています。 17 | - もし `pip3` が見付からなかったとしても、`pip3` の代わりに `python3 -m pip` を使うことができるかもしれません。`$ python3 -m pip --version` も試してみましょう。`pip x.y.z ...` のように表示されれば成功です。 18 | - `pip` や `pip2` は使わないでください。`pip3` を使ってください。 19 | 1. :dart: `$ pip3 install online-judge-tools` を実行して `oj` コマンドをインストールしてください。`Successfully installed online-judge-tools-x.y.z` (あるいは `Requirement already satisfied: online-judge-tools`) と表示されれば成功です。 20 | - もし `Permission denied` と表示される場合は、`$ pip3 install --user online-judge-tools` や `$ sudo pip3 install online-judge-tools` を代わりに使ってください。 21 | 1. `oj` コマンドがインストールされていることを `$ oj --version` を実行して確認してください。`online-judge-tools x.y.z` のように表示されれば成功です. 22 | - もし `Command 'oj' not found` のように表示される場合は、環境変数 [`PATH`](https://en.wikipedia.org/wiki/PATH_%28variable%29) を設定する必要があります。以下の手順を順番に実行してください。 23 | 1. `oj` コマンドの本体のファイルのパスを `$ find / -name oj 2> /dev/null` を実行して見つけてください。このファイルはたいてい `/home/ubuntu/.local/bin/oj` か `/usr/local/bin/oj` にあります。 24 | 1. 見つかった `oj` ファイルが本当に `oj` コマンドであることを `$ /home/ubuntu/.local/bin/oj --version` を実行して確認してください。 25 | 1. その `oj` ファイルの親ディレクトリを `PATH` に追加してください。たとえばもし `oj` ファイルが `/home/ubuntu/.local/bin/oj` にあるなら、あなたの `~/.bashrc` の末尾に `export PATH="/home/ubuntu/.local/bin:$PATH"` と書き加えてください。 26 | - `export PATH="/home/ubuntu/.local/bin/oj:$PATH"` とは書かないでください。それはディレクトリではありません。 27 | - もしあなたが bash を使っていないなら、あなたの使っているシェルに依存した適切な方法で設定をする必要があります。たとえばもしあなたが macOS を使っているなら、シェルは zsh かもしれません。zsh の場合は、bash の場合と同じコマンドを `~/.zshrc` に書いてください。 28 | 1. 設定を読み込み直すため `source ~/.bashrc` を実行してください。 29 | - もしあなたが bash を使っていないなら、あなたのシェルにおいて適切な方法を使ってください。 30 | 1. `PATH` が設定されたことを `$ echo $PATH` を実行して確認してください。 もしあなたが設定したように表示されれば (たとえば `/home/ubuntu/.local/bin:...`) 成功です。 31 | - もし `ModuleNotFoundError: No module named 'onlinejudge'` のように表示される場合は、あなたの Python 環境は壊れており、`oj` コマンドのインストールに失敗しています。`$ pip3 install --force-reinstall online-judge-tools` を実行して強制的な再インストールを試してみてください。 32 | - もし `SyntaxError: invalid syntax` のように表示される場合は、間違って `pip2` を使ってしまっていたはずです。`$ pip2 uninstall online-judge-tools` を実行してアンインストールし、もう一度やり直してください。 33 | 1. 以上 34 | 35 | なお、もしあなたが上記の指示のほとんどの文章の意味を理解できない (たとえば、「`$ python3 --version` を実行する」がなにをすることなのか分からないなど) のであれば、誰か親切な友人に直接教えてもらうのがおすすめです。 36 | -------------------------------------------------------------------------------- /docs/INSTALL.md: -------------------------------------------------------------------------------- 1 | # How to Install `oj` command 2 | 3 | [このドキュメントの日本語バージョン](./INSTALL.ja.md) 4 | 5 | Do following steps. 6 | 7 | 1. If you use a Windows environment, use [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/about). For beginners, Linux (especially, Ubuntu) is often easier than Windows. 8 | - Also, if you use Visual Studio Code (or other IDEs), close it and forget it for a while. Don't use consoles in IDEs. 9 | - Of course, if you were a professional, you could use `oj` command in the raw Windows environment. 10 | 1. :snake: Install [Python](https://www.python.org/). If you use Ubuntu (including Ubuntu in WSL), run `$ sudo apt install python3`. 11 | 1. Check your Python with running `$ python3 --version`. If it says like `Python 3.x.y`, it's OK. 12 | - If it says something like `Command 'python3' not found`, you have failed to install Python. 13 | - If the version of Python is too old, it's not OK. The `x` must be greater than or equal to `6`. If `x` is lower than `6`, upgrade your Python. 14 | 1. :package: Install [pip](https://pip.pypa.io/en/stable/). If you use Ubuntu (including Ubuntu in WSL), run `$ sudo apt install python3-pip`. 15 | 1. Check your pip with running `$ pip3 --version`. If it says something like `pip x.y.z ...`, it's OK. 16 | - If it says something like `Command 'pip3' not found`, you have failed to install pip. 17 | - Even if `pip3` is not found, you may be able to use `python3 -m pip` instead of `pip3`. Try `$ python3 -m pip --version`. If it says `pip x.y.z ...`, it's OK. 18 | - Don't use `pip` or `pip2`. Use `pip3`. 19 | 1. :dart: Run `$ pip3 install online-judge-tools` to install `oj` command. If it says `Successfully installed online-judge-tools-x.y.z` (or, `Requirement already satisfied: online-judge-tools`), it's OK. 20 | - If it says `Permission denied`, run `$ pip3 install --user online-judge-tools` or `$ sudo pip3 install online-judge-tools`. 21 | 1. Check `oj` command with `$ oj --version`. If it says something like `online-judge-tools x.y.z`, it's OK. 22 | - If it says something like `Command 'oj' not found`, you need to set [`PATH`](https://en.wikipedia.org/wiki/PATH_%28variable%29). Do following steps. 23 | 1. Find the path of the `oj` file with running `$ find / -name oj 2> /dev/null`. The file is often at `/home/ubuntu/.local/bin/oj` or `/usr/local/bin/oj`. 24 | 1. Check the found `oj` file is actually `oj`, with running `$ /home/ubuntu/.local/bin/oj --version`. 25 | 1. Add the directory which contains the `oj` to your `PATH`. For example, if `oj` is `/home/ubuntu/.local/bin/oj`, write `export PATH="/home/ubuntu/.local/bin:$PATH"` in the end of `~/.bashrc`. 26 | - Don't write `export PATH="/home/ubuntu/.local/bin/oj:$PATH"`. It's not a directory. 27 | - If you don't use bash, write a right settings to the right file depending on your shell. For example, if you use macOS, your shell might zsh. For zsh, write the same command to `~/.zshrc`. 28 | 1. Reload the configuration with `source ~/.bashrc`. 29 | - If you don't use bash, use the appropriate way for your shell. 30 | 1. Check your `PATH` with `$ echo $PATH`. If it says as you specified (e.g. `/home/ubuntu/.local/bin:...`), it's OK. 31 | - If it says something like `ModuleNotFoundError: No module named 'onlinejudge'`, your Python environment is broken and you have failed to install `oj` command. Run `$ pip3 install --force-reinstall online-judge-tools` to reinstall ignoring the old one. 32 | - If it says something like `SyntaxError: invalid syntax`, you have used `pip2` by mistake. Run `$ pip2 uninstall online-judge-tools`, and retry to install. 33 | 1. That's all. 34 | 35 | If you couldn't read many sentences of above instructions (e.g. if you didn't know what "run `$ python3 --version`" means), please ask your friends for help. 36 | -------------------------------------------------------------------------------- /docs/getting-started.ja.md: -------------------------------------------------------------------------------- 1 | # Getting Started for `oj` command (日本語) 2 | 3 | [English version of this document](./getting-started.md) 4 | 5 | `oj` コマンドは競技プログラミングを行う上で存在する典型作業を自動化するためのコマンドです。 6 | 7 | 8 | ## インストール 9 | 10 | Python が導入されている環境であれば、次のコマンドだけでインストールができます。 11 | 12 | ```console 13 | $ pip3 install --user online-judge-tools 14 | ``` 15 | 16 | OS には Linux (Windows Subsystem for Linux を含む) か macOS を推奨しますが、Windows 上でも動作します。 17 | 18 | 詳細な手順については [docs/INSTALL.ja.md](./INSTALL.ja.md) を読んでください。 19 | 20 | 21 | ## サンプルケースのテスト 22 | 23 | あなたは提出前にサンプルケースを使ったテストをしていますか? 24 | 面倒に感じて省略してしまったことはありませんか? 25 | サンプルすら合っていない実装を提出しても無為にペナルティを増やすだけなので、提出前には常にテストを行なうべきです。 26 | デバッグの際にも有用であり、プログラムを書き換えるたびにサンプルケースを試すとよいでしょう。 27 | 28 | しかし、「問題ページを開いてサンプル入力をコピーし、プログラムを実行しそこに貼り付け、出力結果とサンプル出力を比較する」という作業をサンプルケースの個数だけ行なう、これを毎回やるのはかなり面倒です。 29 | 面倒な作業を手動で行うのは省略されたり間違えたりしやすく、よくありません。 30 | この問題は、自動化によって解決できます。 31 | 32 | `oj` コマンドを使えばサンプルケースのテストの自動化が可能です。 33 | 具体的には以下を自動でしてくれます: 34 | 35 | 1. 問題ページを開いてサンプルを取得する 36 | 2. プログラムを実行してサンプルを入力として与える 37 | 3. プログラムの出力とサンプルの出力を比較する 38 | 39 | サンプルのダウンロードは `oj d URL` 40 | で行なえ、ダウンロードしたサンプルに対するテストは `oj t` で行なえます。 41 | たとえば以下のように使います。 42 | 43 | ```console 44 | $ oj d https://atcoder.jp/contests/agc001/tasks/agc001_a 45 | [x] problem recognized: AtCoderProblem.from_url('https://atcoder.jp/contests/agc001/tasks/agc001_a') 46 | [x] load cookie from: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 47 | [x] GET: https://atcoder.jp/contests/agc001/tasks/agc001_a 48 | [x] 200 OK 49 | [x] save cookie to: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 50 | [x] append history to: /home/ubuntu/.cache/online-judge-tools/download-history.jsonl 51 | 52 | [*] sample 0 53 | [x] input: 入力例 1 54 | 2 55 | 1 3 1 2 56 | [+] saved to: test/sample-1.in 57 | [x] output: 出力例 1 58 | 3 59 | [+] saved to: test/sample-1.out 60 | 61 | [*] sample 1 62 | [x] input: 入力例 2 63 | 5 64 | 100 1 2 3 14 15 58 58 58 29 65 | [+] saved to: test/sample-2.in 66 | [x] output: 出力例 2 67 | 135 68 | [+] saved to: test/sample-2.out 69 | 70 | $ g++ main.cpp 71 | 72 | $ oj t 73 | [*] 2 cases found 74 | 75 | [*] sample-1 76 | [x] time: 0.003978 sec 77 | [+] AC 78 | 79 | [*] sample-2 80 | [x] time: 0.004634 sec 81 | [-] WA 82 | output: 83 | 3 84 | 85 | expected: 86 | 135 87 | 88 | 89 | [x] slowest: 0.004634 sec (for sample-2) 90 | [x] max memory: 2.344000 MB (for sample-1) 91 | [-] test failed: 1 AC / 2 cases 92 | ``` 93 | 94 | `oj t` の基本的な機能は `test/sample-1.in`, `test/sample-1.out` 95 | などのファイルを用意した上で 96 | `for f in test/*.in ; do diff <(./a.out < $f) ${f/.in/.out} ; done` 97 | を実行するのとほぼ等価です。 `./a.out` 以外のコマンド (たとえば 98 | `python3 main.py`) に対してテストをしたい場合は `-c` 99 | オプションを使ってください (たとえば `oj t -c "python3 main.py"`)。 100 | サンプルでなくシステムテストに使われるテストケースを取得したい場合は 101 | `--system` オプションを使ってください。 その他の機能について確認するには 102 | `oj d --help` や `oj t --help` を実行してください。 103 | 104 | 105 | ## 提出 106 | 107 | 実装した解法の提出を行う際には、「プログラムの提出先となる問題」と「提出するプログラムの実装言語」をマウスで選択しソースコードをテキストボックスにコピペして送信ボタンを押すことが一般的です。 108 | ところで、提出時に「提出先の問題」「提出の言語」の選択を間違えてしまいペナルティを食らった経験はありますか? 109 | もしそのような経験が一度でもあるのなら、提出を自動化することをおすすめします。 110 | 111 | `oj` コマンドを使えば提出の自動化が可能です。 たとえば、問題 112 | にファイル `main.cpp` 113 | を提出したいときは `oj s https://codeforces.com/contest/1200/problem/F` 114 | を実行すればよいです。実際に実行したときの出力は次のようになります: 115 | 116 | ```console 117 | $ oj s https://codeforces.com/contest/1200/problem/F main.cpp 118 | [x] read history from: /home/ubuntu/.cache/online-judge-tools/download-history.jsonl 119 | [x] found urls in history: 120 | https://codeforces.com/contest/1200/problem/F 121 | [x] problem recognized: CodeforcesProblem.from_url('https://codeforces.com/contest/1200/problem/F'): https://codeforces.com/contest/1200/problem/F 122 | [*] code (2341 byte): 123 | #include 124 | #define REP(i, n) for (int i = 0; (i) < (int)(n); ++ (i)) 125 | using namespace std; 126 | 127 | 128 | constexpr int MAX_M = 10; 129 | constexpr int MOD = 2520; // lcm of { 1, 2, 3, ..., 10 } 130 | int main() { 131 | // config 132 | int n; scanf("%d", &n); 133 | ... (62 lines) ... 134 | 135 | // query 136 | int q; scanf("%d", &q); 137 | while (q --) { 138 | int x, c; scanf("%d%d", &x, &c); 139 | -- x; 140 | printf("%d\n", solve1(x, c)); 141 | } 142 | return 0; 143 | } 144 | 145 | [x] load cookie from: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 146 | [x] GET: https://codeforces.com/contest/1200/problem/F 147 | [x] 200 OK 148 | [x] both GCC and Clang are available for C++ compiler 149 | [x] use: GCC 150 | [*] chosen language: 54 (GNU G++17 7.3.0) 151 | [x] sleep(3.00) 152 | Are you sure? [y/N] y 153 | [x] GET: https://codeforces.com/contest/1200/problem/F 154 | [x] 200 OK 155 | [x] POST: https://codeforces.com/contest/1200/problem/F 156 | [x] redirected: https://codeforces.com/contest/1200/my 157 | [x] 200 OK 158 | [+] success: result: https://codeforces.com/contest/1200/my 159 | [x] open the submission page with: sensible-browser 160 | [1513:1536:0910/223148.485554:ERROR:browser_process_sub_thread.cc(221)] Waited 5 ms for network service 161 | Opening in existing browser session. 162 | [x] save cookie to: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 163 | ``` 164 | 165 | (ただし、提出にはログインが必要なので、事前に 166 | `oj login https://atcoder.jp/` を実行しておいてください。 167 | [Selenium](https://www.seleniumhq.org/) が導入 168 | (`apt install python3-selenium firefox-geckodriver` などを実行) 169 | されていれば GUI 170 | ブラウザが起動するので、その上で普通にログインをしてください。 Selenium 171 | がない場合は CUI 上で直接ユーザ名とパスワードが聞かれます。) 172 | 173 | 同じディレクトリで以前に `oj d URL` を実行したことがあれば、単に 174 | `oj s main.cpp` とするだけで URL を推測して提出してくれます。 URL 175 | の指定ミスを防ぐために、こちらの省力形の利用を推奨しています。 176 | また、言語は自動で認識され適切に設定されます。 177 | 178 | 179 | ## ランダムテスト 180 | 181 | 「実装をしてサンプルが合ったので提出をしたのに、 WA や RE 182 | になってしまった。しかし原因がまったく分からない」という状況になったとき、どうすればいいでしょうか? 183 | これにはランダム生成したケースを使ってのデバッグが有効です。 184 | 具体的には次のようにします。 185 | 186 | 1. 制約を満たす入力をランダムに生成するようなプログラムを実装する 187 | 2. (1.) のプログラムを使ってテストケースの入力をたくさん用意する 188 | 3. (もし可能なら、遅くても確実に正しい答えを出力するような愚直解を実装し、対応する出力をたくさん用意する) 189 | 4. (2.), (3.) 190 | で作ったテストケースを使って、問題のプログラムをテストする 191 | 5. (4.) で見つかった撃墜ケースを分析してバグを見つける 192 | 193 | `oj` コマンドには、これを助ける機能もあります。 (2.) には `oj g/i` 194 | というコマンド、 (3.) には `oj g/o` というコマンドが使えます。 また (1.) 195 | のプログラムを半自動生成するためのツール 196 | [online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator) 197 | も用意されています。 198 | 199 | たとえば 200 | 201 | に対して以下のように利用します。 202 | 203 | ```console 204 | $ cat generate.py 205 | #!/usr/bin/env python3 206 | import random 207 | N = random.randint(1, 100) 208 | W = random.randint(1, 10000) 209 | print(N, W) 210 | for _ in range(N): 211 | v = random.randint(1, 1000) 212 | w = random.randint(1, 1000) 213 | print(v, w) 214 | 215 | $ oj g/i ./generate.py 216 | 217 | [*] random-000 218 | [x] generate input... 219 | [x] time: 0.041610 sec 220 | input: 221 | 1 4138 222 | 505 341 223 | 224 | [+] saved to: test/random-000.in 225 | 226 | ... 227 | 228 | [*] random-099 229 | [x] generate input... 230 | [x] time: 0.036598 sec 231 | input: 232 | 9 2767 233 | 868 762 234 | 279 388 235 | 249 673 236 | 761 227 237 | 958 971 238 | 589 590 239 | 34 100 240 | 689 635 241 | 781 361 242 | 243 | [+] saved to: test/random-099.in 244 | 245 | $ cat tle.cpp 246 | #include 247 | #define REP(i, n) for (int i = 0; (i) < (int)(n); ++ (i)) 248 | using namespace std; 249 | 250 | int main() { 251 | // input 252 | int N, W; cin >> N >> W; 253 | vector v(N), w(N); 254 | REP (i, N) { 255 | cin >> v[i] >> w[i]; 256 | } 257 | 258 | // solve 259 | int answer = 0; 260 | REP (x, 1 << N) { 261 | int sum_v = 0; 262 | int sum_w = 0; 263 | REP (i, N) if (x & (1 << i)) { 264 | sum_v += v[i]; 265 | sum_w += w[i]; 266 | } 267 | if (sum_w <= W) { 268 | answer = max(answer, sum_v); 269 | } 270 | } 271 | 272 | // output 273 | cout << answer << endl; 274 | return 0; 275 | } 276 | 277 | $ g++ tle.cpp -o tle 278 | 279 | $ oj g/o -c ./tle 280 | [*] 102 cases found 281 | 282 | [*] random-000 283 | [x] time: 0.003198 sec 284 | 505 285 | 286 | [+] saved to: test/random-000.out 287 | 288 | ... 289 | 290 | [*] random-099 291 | [x] time: 0.005680 sec 292 | 3722 293 | 294 | [+] saved to: test/random-099.out 295 | 296 | [*] sample-1 297 | [*] output file already exists. 298 | [*] skipped. 299 | 300 | [*] sample-2 301 | [*] output file already exists. 302 | [*] skipped. 303 | ``` 304 | 305 | `oj g/i ./generate.py` の基本的な機能は 306 | `for i in $(seq 100) ; do ./generate.py > test/random-$i.in ; done` 307 | とだいたい等価であり、 `oj g/o` の基本的な機能は 308 | `for i in test/*.in ; do ./a.out < $f > ${f/.in/.out} ; done` 309 | とだいたい等価です。 310 | なかなか撃墜ケースが見つからない場合のために、より効率的に行なうオプション 311 | `--hack` や並列化オプション `-j` なども用意されています。 312 | 313 | 314 | ## 特殊な形式の問題に対するテスト 315 | 316 | ### 誤差ジャッジ 317 | 318 | 「絶対誤差あるいは相対誤差で 10⁻⁶ 319 | 以内の出力を正答とします」のような問題に対するテストは、 `-e` 320 | オプションで対応できます。 たとえば `oj t -e 1e-6` とします。 321 | 322 | ### 解が複数ある問題 323 | 324 | 実装したプログラムの中で 325 | [assert](https://cpprefjp.github.io/reference/cassert/assert.html) 326 | を用いることで、解答の正当性を簡易にチェックすることが可能です。 327 | 328 | あるいは、複雑なチェックが必要な場合や想定解答の内容をチェックに用いたい場合は、ジャッジ側のプログラムを自作して解答の正否の判定に用いることができます。 329 | たとえば問題 330 | であれば、次のようなジャッジ側プログラムを書いて `judge.py` 331 | という名前で保存し、 `oj t --judge-command "python3 judge.py"` 332 | とすればテストが実行されます。 333 | 334 | ```python 335 | import sys 336 | # input 337 | with open(sys.argv[1]) as testcase: 338 | A, B, C, D, E, F = list(map(int, testcase.readline().split())) 339 | with open(sys.argv[2]) as your_output: 340 | y_all, y_sugar = list(map(int, your_output.readline().split())) 341 | with open(sys.argv[3]) as expected_output: 342 | e_all, e_sugar = list(map(int, expected_output.readline().split())) 343 | # check 344 | assert 100 * A <= y_all <= F 345 | y_water = y_all - y_sugar 346 | assert any(100 * A * i + 100 * B * j == y_water for i in range(3001) for j in range(3001)) 347 | assert any(C * i + D * j == y_sugar for i in range(3001) for j in range(3001)) 348 | assert y_sugar <= E * y_water / 100 349 | assert y_sugar * e_all == e_sugar * y_all 350 | assert (e_sugar > 0 and y_sugar == 0) is False 351 | ``` 352 | 353 | ジャッジ側のプログラムは、テストケースの入力、解答(あなたのプログラムの出力)、想定解答をファイル入力を用いて取得することができます。 354 | judgeのコマンドは ` ` 355 | のように実行され、 `` 356 | には引数で指定したジャッジの実行コマンドが入ります。 `` , 357 | `` , `` 358 | にはそれぞれ、テストケースの入力、解答、想定解答が格納されたファイルのパスが入ります。 359 | サンプルに示すようにコマンドライン引数を用いて各ファイルを読み込み、解答の正否を判定してください。 360 | ジャッジプログラムの終了コードが0になった場合に正答(AC)となり、それ以外は誤答(WA)となります。 361 | 362 | ### リアクティブ問題 363 | 364 | ジャッジプログラムと対話的に動作するプログラムを提出する問題があります。 365 | これをテストするためのコマンド `oj t/r` が用意されています。 366 | 367 | たとえば問題 368 | であれば、次のようなジャッジ側プログラムを書いて `judge.py` 369 | という名前で保存し、 `oj t/r ./judge.py` 370 | とすればテストが実行されます。 371 | 372 | ```python 373 | #!/usr/bin/env python3 374 | import sys 375 | import random 376 | n = random.randint(1, 10 ** 6) 377 | print('[*] n =', n, file=sys.stderr) 378 | for i in range(25 + 1): 379 | s = input() 380 | if s.startswith('!'): 381 | x = int(s.split()[1]) 382 | assert x == n 383 | exit() 384 | else: 385 | print('<' if n < int(s) else '>=') 386 | sys.stdout.flush() 387 | assert False 388 | ``` 389 | 390 | 391 | ## 対応しているサービスの一覧 392 | 393 | [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client#supported-websites) にある表を参照してください。 394 | 395 | 396 | ## 存在しない機能 397 | 398 | 「それが何であるか」を説明するには「何ができるか」を言う必要がありますが、それだけでは十分ではありません。 399 | 「何ができないか」についても言うべきです。 400 | 401 | `oj` コマンドには、次のような機能は存在しません: 402 | 403 | - コンテストのためのディレクトリを一括で用意する機能 404 | 405 | 「コンテストのためのディレクトリを一括で用意する機能」は `oj` コマンドには含まれていません。この機能がほしい場合は [online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator/blob/master/README.ja.md) に含まれている `oj-prepare` コマンドや、[@Tatamo](https://github.com/Tatamo) が作成している [Tatamo/atcoder-cli](https://github.com/Tatamo/atcoder-cli) など) を利用してください。 406 | 407 | `oj` コマンドは「個々の問題を解くことを (主にテストの実行によって) 助ける」ためのコマンドであり、それ以外は責任の範囲外です。 408 | 409 | - テンプレートを生成する機能 410 | 411 | 競技プログラミングの問題を解析してその問題専用の main 関数や入出力部分を生成するには、[online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator/blob/master/README.ja.md) に含まれている `oj-tempate` コマンドを使ってください。 412 | 同様の機能は [kyuridenamida/atcoder-tools](https://github.com/kyuridenamida/atcoder-tools) にもあります。 413 | 414 | - 自動でコンパイルする機能 415 | 416 | シェルの機能を使えば十分であるため、そのような機能はありません。 417 | テストの前に再コンパイルしたいのなら `$ g++ main.cpp && oj t` などとしてください。 418 | 419 | また、コンパイルの方法などは言語や環境の数だけ無数にあり、すべて対応していくのはかなり大変なためです。また、マイナー言語のユーザやマイナーなオンラインジャッジのユーザを無視したくはありません。 420 | 421 | - 提出予約をする機能 422 | 423 | シェルの機能を使えば十分であるため、そのような機能はありません。 424 | たとえば 1 時間後に提出するには `$ sleep 3600 && oj s --yes main.cpp` としてください。 425 | 426 | - 設定ファイル 427 | 428 | シェルの設定ファイル (`~/.bashrc` など) を代わりに利用してください。 429 | alias やシェル関数を書いてください。 430 | 431 | 設定ファイルはある種の「隠れた状態」を導入し、メンテナンスやサポートのコストを増大させるためです。 432 | 内部で HTTP の通信に使っているクッキー (+ 例外として、提出先 URL の推測 `oj s` のための履歴) 以外は、入力されたコマンドのみに依存して動作します。 433 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | # Getting Started for `oj` command 2 | 3 | [このドキュメントの日本語バージョン](./getting-started.ja.md) 4 | 5 | `oj` command is a command to automate typical tasks that exist in 6 | competitive programming. 7 | 8 | 9 | ## How to Install 10 | 11 | You can install with the following command if Python is already 12 | installed. 13 | 14 | ```console 15 | $ pip3 install --user online-judge-tools 16 | ``` 17 | 18 | Linux (including Windows Subsystem for Linux) or macOS is recommended for the OS, but it also works on Windows. 19 | 20 | For detailed instructions, read [docs/INSTALL.md](./INSTALL.md). 21 | 22 | 23 | ## Testing with sample cases 24 | 25 | Do you test with sample cases before submission? Have you ever felt it 26 | troublesome and omitted? You should always test before submitting, since 27 | submitting a solution that doesn't even pass the sample cases will just 28 | make a penalty. It is also useful for debugging, so you should test your 29 | program with the sample cases every time you rewrite your program. 30 | 31 | However, "opening the problem page, copying the sample inputs, running 32 | the program, pasting it into shell, and comparing the output result with 33 | the sample output" is tedious tasks. Doing this for every sample case 34 | and for every submission is quite troublesome. Doing the tedious tasks 35 | manually is easy to be omitted or be mistaken. This problem can be 36 | solved by automation. 37 | 38 | By `oj` command, you can automate testing with sample cases. 39 | Specifically, it automatically does the following: 40 | 41 | 1. Open the problem page and get sample cases 42 | 2. Run the program and give the sample inputs 43 | 3. Compare program outputs with sample outputs 44 | 45 | You can download the sample cases by `oj d URL` and test your solution 46 | with the downloaded sample cases by `oj t`. For example: 47 | 48 | ```console 49 | $ oj d https://atcoder.jp/contests/agc001/tasks/agc001_a 50 | [x] problem recognized: AtCoderProblem.from_url('https://atcoder.jp/contests/agc001/tasks/agc001_a') 51 | [x] load cookie from: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 52 | [x] GET: https://atcoder.jp/contests/agc001/tasks/agc001_a 53 | [x] 200 OK 54 | [x] save cookie to: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 55 | [x] append history to: /home/ubuntu/.cache/online-judge-tools/download-history.jsonl 56 | 57 | [*] sample 0 58 | [x] input: Input example 1 59 | 2 60 | 1 3 1 2 61 | [+] saved to: test/sample-1.in 62 | [x] output: Input example 1 63 | 3 64 | [+] saved to: test/sample-1.out 65 | 66 | [*] sample 1 67 | [x] input: Input example 2 68 | 5 69 | 100 1 2 3 14 15 58 58 58 29 70 | [+] saved to: test/sample-2.in 71 | [x] output: Sample output 2 72 | 135 73 | [+] saved to: test/sample-2.out 74 | 75 | $ g++ main.cpp 76 | 77 | $ oj t 78 | [*] 2 cases found 79 | 80 | [*] sample-1 81 | [x] time: 0.003978 sec 82 | [+] AC 83 | 84 | [*] sample-2 85 | [x] time: 0.004634 sec 86 | [-] WA 87 | output: 88 | 3 89 | 90 | expected: 91 | 135 92 | 93 | 94 | [x] slowest: 0.004634 sec (for sample-2) 95 | [x] max memory: 2.344000 MB (for sample-1) 96 | [-] test failed: 1 AC / 2 cases 97 | ``` 98 | 99 | The basic feature of `oj t` is almost equivalent to prepare files such 100 | as `test/sample-1.in`, `test/sample-1.out` and then to run 101 | `for f in test/*.in ; do diff <(./a.out < $f) ${f/.in/.out} ; done`. If 102 | you want to test against commands other than `./a.out` (e.g. 103 | `python3 main.py`), use the `-c` option (e.g. 104 | `oj t -c "python3 main.py"`). Use the `--system` option if you want to 105 | get testcases that are used for system tests instead of samples. Run 106 | `oj d --help` or `oj t --help` to see other features. 107 | 108 | 109 | ## Submit 110 | 111 | When submitting your solution, you have to select "Problem to submit 112 | for" and "Language of the solution" with your mouse, copy and paste the 113 | source code into the text box, and click the send button. This series of 114 | operations is tedious. Have you ever experienced a penalty when you made 115 | a mistake in selecting the "problem" or "language" at the time of 116 | submission? If you have any such experience, we recommend automating 117 | submission. 118 | 119 | By `oj` command, you can automate submission. For exampl,e if you 120 | want to submit the file `main.cpp` to the problem 121 | , you can do 122 | `oj s https://codeforces.com/contest/1200/problem/F`. The actual output 123 | is as follows: 124 | 125 | ```console 126 | $ oj d https://atcoder.jp/contests/agc001/tasks/agc001_a 127 | [x] problem recognized: AtCoderProblem.from_url('https://atcoder.jp/contests/agc001/tasks/agc001_a') 128 | [x] load cookie from: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 129 | [x] GET: https://atcoder.jp/contests/agc001/tasks/agc001_a 130 | [x] 200 OK 131 | [x] save cookie to: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 132 | [x] append history to: /home/ubuntu/.cache/online-judge-tools/download-history.jsonl 133 | 134 | [*] sample 0 135 | [x] input: Input example 1 136 | 2 137 | 1 3 1 2 138 | [+] saved to: test/sample-1.in 139 | [x] output: Input example 1 140 | 3 141 | [+] saved to: test/sample-1.out 142 | 143 | [*] sample 1 144 | [x] input: Input example 2 145 | 5 146 | 100 1 2 3 14 15 58 58 58 29 147 | [+] saved to: test/sample-2.in 148 | [x] output: Sample output 2 149 | 135 150 | [+] saved to: test/sample-2.out 151 | 152 | $ g++ main.cpp 153 | 154 | $ oj t 155 | [*] 2 cases found 156 | 157 | [*] sample-1 158 | [x] time: 0.003978 sec 159 | [+] AC 160 | 161 | [*] sample-2 162 | [x] time: 0.004634 sec 163 | [-] WA 164 | output: 165 | 3 166 | 167 | expected: 168 | 135 169 | 170 | 171 | [x] slowest: 0.004634 sec (for sample-2) 172 | [x] max memory: 2.344000 MB (for sample-1) 173 | [-] test failed: 1 AC / 2 cases 174 | $ oj s https://codeforces.com/contest/1200/problem/F main.cpp 175 | [x] read history from: /home/ubuntu/.cache/online-judge-tools/download-history.jsonl 176 | [x] found urls in history: 177 | https://codeforces.com/contest/1200/problem/F 178 | [x] problem recognized: CodeforcesProblem.from_url('https://codeforces.com/contest/1200/problem/F'): https://codeforces.com/contest/1200/problem/F 179 | [*] code (2341 byte): 180 | #include 181 | #define REP(i, n) for (int i = 0; (i) < (int)(n); ++ (i)) 182 | using namespace std; 183 | 184 | 185 | constexpr int MAX_M = 10; 186 | constexpr int MOD = 2520; // lcm of { 1, 2, 3, ..., 10 } 187 | int main() { 188 | // config 189 | int n; scanf("%d", &n); 190 | ... (62 lines) ... 191 | 192 | // query 193 | int q; scanf("%d", &q); 194 | while (q --) { 195 | int x, c; scanf("%d%d", &x, &c); 196 | -- x; 197 | printf("%d\n", solve1(x, c)); 198 | } 199 | return 0; 200 | } 201 | 202 | [x] load cookie from: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 203 | [x] GET: https://codeforces.com/contest/1200/problem/F 204 | [x] 200 OK 205 | [x] both GCC and Clang are available for C++ compiler 206 | [x] use: GCC 207 | [*] chosen language: 54 (GNU G++17 7.3.0) 208 | [x] sleep(3.00) 209 | Are you sure? [y/N] y 210 | [x] GET: https://codeforces.com/contest/1200/problem/F 211 | [x] 200 OK 212 | [x] POST: https://codeforces.com/contest/1200/problem/F 213 | [x] redirected: https://codeforces.com/contest/1200/my 214 | [x] 200 OK 215 | [+] success: result: https://codeforces.com/contest/1200/my 216 | [x] open the submission page with: sensible-browser 217 | [1513:1536:0910/223148.485554:ERROR:browser_process_sub_thread.cc(221)] Waited 5 ms for network service 218 | Opening in existing browser session. 219 | [x] save cookie to: /home/ubuntu/.local/share/online-judge-tools/cookie.jar 220 | ``` 221 | 222 | (However, since login is required for submission, please execute 223 | `oj login https://atcoder.jp/` in advance. If 224 | [Selenium](https://www.seleniumhq.org/) is installed 225 | (`apt install python3-selenium firefox-geckodriver` etc. is executed), 226 | the GUI browser will start, so please login normally on it. (If you 227 | don't have Selenium, you will be asked for your username and password 228 | directly on the CUI.) 229 | 230 | If you already executed `oj d URL` in the same directory, 231 | `oj s main.cpp` will guess the URL and submit it. In order to prevent 232 | URL specification mistakes, we recommend using this labor-saving form. 233 | The language is automatically recognized and set appropriately. 234 | 235 | 236 | ## Random testing 237 | 238 | What should you do when you get a situation where you implemented your 239 | solution and submitted it because it passes the sample cases but it gets 240 | WA or RE and you don't know the cause at all? In such a situation, you 241 | can debug using randomly generated cases. Specifically: 242 | 243 | 1. Implement a program that randomly generates test inputs that 244 | satisfies the constraints 245 | 2. Prepare many test inputs with the program of (1.) 246 | 3. (If possible, implement a straightforward solution that you can 247 | believe that it always outputs the correct answer, and prepare the 248 | test outputs for the inputs) 249 | 4. Test your solution using the testcases generated in (2.) and (3.) 250 | 5. Analyze the hack case found in (4.) to find bugs 251 | 252 | `oj` command also has features to help with this. You can use the 253 | command `oj g/i` for (2.) and the command `oj g/o` for (3.). Also, another command [online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator) can automatically generates a program of (1.). 254 | 255 | For example, for a problem 256 | , you 257 | can use `oj` command as follows. 258 | 259 | ```console 260 | $ cat generate.py 261 | #!/usr/bin/env python3 262 | import random 263 | N = random.randint(1, 100) 264 | W = random.randint(1, 10000) 265 | print(N, W) 266 | for _ in range(N): 267 | v = random.randint(1, 1000) 268 | w = random.randint(1, 1000) 269 | print(v, w) 270 | 271 | $ oj g/i ./generate.py 272 | 273 | [*] random-000 274 | [x] generate input... 275 | [x] time: 0.041610 sec 276 | input: 277 | 1 4138 278 | 505 341 279 | 280 | [+] saved to: test/random-000.in 281 | 282 | ... 283 | 284 | [*] random-099 285 | [x] generate input... 286 | [x] time: 0.036598 sec 287 | input: 288 | 9 2767 289 | 868 762 290 | 279 388 291 | 249 673 292 | 761 227 293 | 958 971 294 | 589 590 295 | 34 100 296 | 689 635 297 | 781 361 298 | 299 | [+] saved to: test/random-099.in 300 | 301 | $ cat tle.cpp 302 | #include 303 | #define REP(i, n) for (int i = 0; (i) < (int)(n); ++ (i)) 304 | using namespace std; 305 | 306 | int main() { 307 | // input 308 | int N, W; cin >> N >> W; 309 | vector v(N), w(N); 310 | REP (i, N) { 311 | cin >> v[i] >> w[i]; 312 | } 313 | 314 | // solve 315 | int answer = 0; 316 | REP (x, 1 << N) { 317 | int sum_v = 0; 318 | int sum_w = 0; 319 | REP (i, N) if (x & (1 << i)) { 320 | sum_v += v[i]; 321 | sum_w += w[i]; 322 | } 323 | if (sum_w <= W) { 324 | answer = max(answer, sum_v); 325 | } 326 | } 327 | 328 | // output 329 | cout << answer << endl; 330 | return 0; 331 | } 332 | 333 | $ g++ tle.cpp -o tle 334 | 335 | $ oj g/o -c ./tle 336 | [*] 102 cases found 337 | 338 | [*] random-000 339 | [x] time: 0.003198 sec 340 | 505 341 | 342 | [+] saved to: test/random-000.out 343 | 344 | ... 345 | 346 | [*] random-099 347 | [x] time: 0.005680 sec 348 | 3722 349 | 350 | [+] saved to: test/random-099.out 351 | 352 | [*] sample-1 353 | [*] output file already exists. 354 | [*] skipped. 355 | 356 | [*] sample-2 357 | [*] output file already exists. 358 | [*] skipped. 359 | ``` 360 | 361 | The basic feature of `oj g/i ./generate.py` is almost equivalent to 362 | `for i in $(seq 100) ; do ./generate.py > test/random-$i.in ;`. And the 363 | basic feature of `oj g/o` is almost equivalent to 364 | `for i in test/*.in ; do ./a.out < $f > ${f/.in/.out} ; done`. There are 365 | some ways such as `--hack` option and parallelization option `-j`, etc., 366 | for cases where it is difficult to find hacking cases. 367 | 368 | 369 | ## Test for problems with special judge 370 | 371 | ### Problems with accepted errors 372 | 373 | You can use the `-e` option for problems with errors, e.g. problems 374 | which accept answers which absolute or relative error are within 375 | 10⁻⁶. In this case, use `oj t -e 1e-6`. 376 | 377 | ### Problems with multiple solutions 378 | 379 | You can validate simply by using 380 | [assert](https://cpprefjp.github.io/reference/cassert/assert.html) 381 | in your solution. 382 | 383 | Also, you can write a program for the judge side, and use it for 384 | test. For example, if the problem is 385 | , write the 386 | following program as the judge program and save it as `judge.py`, 387 | then `oj t --judge-command "python3 judge.py"` will run the tests. 388 | 389 | ```python 390 | import sys 391 | # input 392 | with open(sys.argv[1]) as testcase: 393 | A, B, C, D, E, F = list(map(int, testcase.readline().split())) 394 | with open(sys.argv[2]) as your_output: 395 | y_all, y_sugar = list(map(int, your_output.readline().split())) 396 | with open(sys.argv[3]) as expected_output: 397 | e_all, e_sugar = list(map(int, expected_output.readline().split())) 398 | # check 399 | assert 100 * A <= y_all <= F 400 | y_water = y_all - y_sugar 401 | assert any(100 * A * i + 100 * B * j == y_water for i in range(3001) for j in range(3001)) 402 | assert any(C * i + D * j == y_sugar for i in range(3001) for j in range(3001)) 403 | assert y_sugar <= E * y_water / 100 404 | assert y_sugar * e_all == e_sugar * y_all 405 | assert (e_sugar > 0 and y_sugar == 0) is False 406 | ``` 407 | 408 | A program for judge can get the input of a testcase, the output of 409 | your program, and the expected output of the testcase, via files. 410 | The command for judge is executed as 411 | ` `. `` is 412 | the command specified via the option of `oj` command. ``, 413 | ``, and `` are file paths of the input 414 | of the testcase, the output of your program, and the expected output 415 | of the testcase, respectively. If the exit code of the judge command 416 | is 0, then the output becomes `AC`, otherwise `WA`. 417 | 418 | ### Reactive problems 419 | 420 | There is a problem submitting a program that works interactively with 421 | the judge program.    The command `oj t/r` is provided to run tests for 422 | such a problem. 423 | 424 | For example, if the problem is 425 | , write the following 426 | program as the judge program and save it as `judge.py` and run 427 | `oj t/r ./judge.py` command. 428 | 429 | ```python 430 | #!/usr/bin/env python3 431 | import sys 432 | import random 433 | n = random.randint(1, 10 ** 6) 434 | print('[*] n =', n, file=sys.stderr) 435 | for i in range(25 + 1): 436 | s = input() 437 | if s.startswith('!'): 438 | x = int(s.split()[1]) 439 | assert x == n 440 | exit() 441 | else: 442 | print('<' if n < int(s) else '>=') 443 | sys.stdout.flush() 444 | assert False 445 | ``` 446 | 447 | 448 | ## List of supported services 449 | 450 | Please see the table at [online-judge-tools/api-client](https://github.com/online-judge-tools/api-client#supported-websites). 451 | 452 | 453 | ## Missing features 454 | 455 | To describe "what it is", it's necessary to tell about "what it can do". But it's not sufficient. Also we should tell about "what it cannot do". 456 | 457 | In `oj` command, there are no features like: 458 | 459 | - The feature to prepare the directory for a contest at once 460 | 461 | For the feature to prepare the directory for a contest, please use a related command, `oj-prepare` in [online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator). 462 | 463 | `oj` command is "a command to help with solving individual problems (mainly, with testing)", so other things are out of scope. 464 | 465 | - The feature to generate template code 466 | 467 | To analyze a problem of competitive programming and automatically generate the template code for the given problem including the main function and the input/output part, you can use `oj-template` command in [online-judge-tools/template-generator](https://github.com/online-judge-tools/template-generator). 468 | 469 | - The feature to automatically compile code before running tests 470 | 471 | `oj` command doesn't have such a feature because using shell is sufficient. 472 | Please use your shell. For example, `$ g++ main.cpp && oj t` does this feature. 473 | 474 | There are too many ways to compile and run source code of various language. Also, we don't ignore users who use minor programming languages or minor online judges. So, implementing this feature is not realistic. 475 | 476 | - The feature to schedule to submit code 477 | 478 | `oj` command doesn't have such a feature because using shell is sufficient. 479 | Please use your shell. For example, `$ sleep 3600 && oj s --yes main.cpp` will submit your code after one hour. 480 | 481 | 482 | - Configuration files 483 | 484 | `oj` command doesn't have such a feature because using shell is sufficient. 485 | Please use the configuration file of your shell (e.g. `~/.bashrc`). 486 | Please use aliases of shell functions. 487 | 488 | Configuration files introduce "implicit states" and increase the costs of maintenance and user-supporting. 489 | Except internal cookies for HTTP accessing (and, as an exception, the history to guess URLs to submit in `oj s`) 490 | -------------------------------------------------------------------------------- /onlinejudge_command/__0_workaround_for_conflict.py: -------------------------------------------------------------------------------- 1 | """ 2 | isort: skip_file 3 | """ 4 | 5 | # This is a workaround for the issue https://github.com/online-judge-tools/oj/issues/755 6 | # You can reproduce this issue with: 7 | # $ pip3 uninstall online-judge-tools online-judge-api-client 8 | # $ pip3 install online-judge-tools==9.2.2 9 | # $ pip3 install online-judge-api-client 10 | # pylint: disable=unused-import,ungrouped-imports 11 | try: 12 | import onlinejudge.__about__ # type: ignore 13 | except ImportError: 14 | import sys 15 | import textwrap 16 | print(textwrap.dedent("""\ 17 | You failed to upgrade online-judge-tools because you upgraded from a too old verion (< 10.0.0). 18 | Please execute: 19 | 20 | 1. Uninstall online-judge-tools and online-judge-api-client. 21 | $ pip3 uninstall online-judge-tools online-judge-api-client 22 | 23 | 2. Check if they are completely uninstalled. It has successfully uninstalled when the following commands say something like "not found". 24 | $ command oj 25 | oj: command not found 26 | 27 | $ python3 -c 'import pathlib, sys ; print(*[path for path in sys.path if (pathlib.Path(path) / "onlinejudge").exists()] or ["not installed"])' 28 | not installed 29 | 30 | $ pip3 show online-judge-tools online-judge-api-client 31 | (no output) 32 | 33 | 3. Reinstall online-judge-tools. 34 | $ pip3 install online-judge-tools"""), file=sys.stderr) 35 | sys.exit(1) 36 | # pylint: enable=unused-import,ungrouped-imports 37 | -------------------------------------------------------------------------------- /onlinejudge_command/__about__.py: -------------------------------------------------------------------------------- 1 | __package_name__ = 'online-judge-tools' 2 | __author__ = 'Kimiyuki Onaka' 3 | __email__ = 'kimiyuki95@gmail.com' 4 | __license__ = 'MIT License' 5 | __url__ = 'https://github.com/online-judge-tools/oj' 6 | __version_info__ = (12, 0, 0, 'final', 0) 7 | __version__ = '.'.join(map(str, __version_info__[:3])) 8 | __description__ = 'CLI tool to solve problems of competitive programming' 9 | -------------------------------------------------------------------------------- /onlinejudge_command/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/online-judge-tools/oj/d90b0a2bd87ae72cf89951b80c8fa4bd834afd0a/onlinejudge_command/__init__.py -------------------------------------------------------------------------------- /onlinejudge_command/download_history.py: -------------------------------------------------------------------------------- 1 | import json 2 | import pathlib 3 | import time 4 | import traceback 5 | from logging import getLogger 6 | from typing import * 7 | 8 | from onlinejudge.type import Problem 9 | from onlinejudge_command import utils 10 | 11 | logger = getLogger(__name__) 12 | 13 | 14 | class DownloadHistory: 15 | def __init__(self, path: pathlib.Path = utils.user_cache_dir / 'download-history.jsonl'): 16 | self.path = path 17 | 18 | def add(self, problem: Problem, *, directory: pathlib.Path) -> None: 19 | logger.info('append the downloading history: %s', self.path) 20 | self.path.parent.mkdir(parents=True, exist_ok=True) 21 | with open(self.path, 'a') as fh: 22 | fh.write(json.dumps({ 23 | 'timestamp': int(time.time()), # this should not be int, but Python's strptime is too weak and datetime.fromisoformat is from 3.7 24 | 'directory': str(directory), 25 | 'url': problem.get_url(), 26 | }) + '\n') 27 | self._flush() 28 | 29 | def remove(self, *, directory: pathlib.Path) -> None: 30 | if not self.path.exists(): 31 | return 32 | logger.info('clear the downloading history for this directory: %s', self.path) 33 | with open(self.path) as fh: 34 | history_lines = fh.readlines() 35 | with open(self.path, 'w') as fh: 36 | pred = lambda line: pathlib.Path(json.loads(line)['directory']) != directory 37 | fh.write(''.join(filter(pred, history_lines))) 38 | 39 | def _flush(self) -> None: 40 | # halve the size if it is more than 1MiB 41 | if self.path.stat().st_size >= 1024 * 1024: 42 | with open(self.path) as fh: 43 | history_lines = fh.readlines() 44 | with open(self.path, 'w') as fh: 45 | fh.write(''.join(history_lines[:-len(history_lines) // 2])) 46 | logger.info('halve history at: %s', self.path) 47 | 48 | def get(self, *, directory: pathlib.Path) -> List[str]: 49 | if not self.path.exists(): 50 | return [] 51 | 52 | logger.info('read history from: %s', self.path) 53 | found = set() 54 | with open(self.path) as fh: 55 | for line in fh: 56 | try: 57 | data = json.loads(line) 58 | except json.decoder.JSONDecodeError: 59 | logger.warning('corrupted line found in: %s', self.path) 60 | logger.debug('%s', traceback.format_exc()) 61 | continue 62 | if pathlib.Path(data['directory']) == directory: 63 | found.add(data['url']) 64 | logger.info('found urls in history:\n%s', '\n'.join(found)) 65 | return list(found) 66 | -------------------------------------------------------------------------------- /onlinejudge_command/format_utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import glob 3 | import os 4 | import pathlib 5 | import re 6 | import sys 7 | from logging import getLogger 8 | from typing import Dict, Generator, List, Match, Optional, Set 9 | 10 | logger = getLogger(__name__) 11 | 12 | 13 | def percentsplit(s: str) -> Generator[str, None, None]: 14 | for m in re.finditer('[^%]|%(.)', s): 15 | yield m.group(0) 16 | 17 | 18 | def percentformat(s: str, table: Dict[str, str]) -> str: 19 | assert '%' not in table or table['%'] == '%' 20 | table['%'] = '%' 21 | result = '' 22 | for c in percentsplit(s): 23 | if c.startswith('%'): 24 | result += table[c[1]] 25 | else: 26 | result += c 27 | return result 28 | 29 | 30 | def percentparse(s: str, format: str, table: Dict[str, str]) -> Optional[Dict[str, str]]: 31 | table = {key: '(?P<{}>{})'.format(key, value) for key, value in table.items()} 32 | used: Set[str] = set() 33 | pattern = '' 34 | for token in percentsplit(re.escape(format).replace('\\%', '%')): 35 | if token.startswith('%'): 36 | c = token[1] 37 | if c not in used: 38 | pattern += table[c] 39 | used.add(c) 40 | else: 41 | pattern += r'(?P={})'.format(c) 42 | else: 43 | pattern += token 44 | m = re.match(pattern, s) 45 | if not m: 46 | return None 47 | return m.groupdict() 48 | 49 | 50 | def glob_with_format(directory: pathlib.Path, format: str) -> List[pathlib.Path]: 51 | if os.name == 'nt': 52 | format = format.replace('/', '\\') 53 | table = {} 54 | table['s'] = '*' 55 | table['e'] = '*' 56 | pattern = (glob.escape(str(directory) + os.path.sep) + percentformat(glob.escape(format).replace(glob.escape('%'), '%'), table)) 57 | paths = list(map(pathlib.Path, glob.glob(pattern))) 58 | for path in paths: 59 | logger.debug('testcase globbed: %s', path) 60 | return paths 61 | 62 | 63 | def match_with_format(directory: pathlib.Path, format: str, path: pathlib.Path) -> Optional[Match[str]]: 64 | if os.name == 'nt': 65 | format = format.replace('/', '\\') 66 | table = {} 67 | table['s'] = '(?P.+)' 68 | table['e'] = '(?Pin|out)' 69 | pattern = re.compile(re.escape(str(directory.resolve()) + os.path.sep) + percentformat(re.escape(format).replace(re.escape('%'), '%'), table)) 70 | return pattern.match(str(path.resolve())) 71 | 72 | 73 | def path_from_format(directory: pathlib.Path, format: str, name: str, ext: str) -> pathlib.Path: 74 | table = {} 75 | table['s'] = name 76 | table['e'] = ext 77 | return directory / percentformat(format, table) 78 | 79 | 80 | def is_backup_or_hidden_file(path: pathlib.Path) -> bool: 81 | basename = path.name 82 | return basename.endswith('~') or (basename.startswith('#') and basename.endswith('#')) or basename.startswith('.') 83 | 84 | 85 | def drop_backup_or_hidden_files(paths: List[pathlib.Path]) -> List[pathlib.Path]: 86 | result: List[pathlib.Path] = [] 87 | for path in paths: 88 | if is_backup_or_hidden_file(path): 89 | logger.warning('ignore a backup file: %s', path) 90 | else: 91 | result += [path] 92 | return result 93 | 94 | 95 | def construct_relationship_of_files(paths: List[pathlib.Path], directory: pathlib.Path, format: str) -> Dict[str, Dict[str, pathlib.Path]]: 96 | tests: Dict[str, Dict[str, pathlib.Path]] = collections.defaultdict(dict) 97 | for path in paths: 98 | m = match_with_format(directory, format, path.resolve()) 99 | if not m: 100 | logger.error('unrecognizable file found: %s', path) 101 | sys.exit(1) 102 | name = m.groupdict()['name'] 103 | ext = m.groupdict()['ext'] 104 | assert ext not in tests[name] 105 | tests[name][ext] = path 106 | for name in tests: 107 | if 'in' not in tests[name]: 108 | assert 'out' in tests[name] 109 | logger.error('dangling output case: %s', tests[name]['out']) 110 | sys.exit(1) 111 | if not tests: 112 | logger.error('no cases found') 113 | sys.exit(1) 114 | logger.info('%d cases found', len(tests)) 115 | return tests 116 | -------------------------------------------------------------------------------- /onlinejudge_command/log_formatter.py: -------------------------------------------------------------------------------- 1 | import http.client 2 | import logging 3 | from typing import * 4 | 5 | import colorama 6 | 7 | colorama.init() 8 | 9 | log_colors_level = { 10 | logging.DEBUG: '[' + colorama.Fore.RED + 'DEBUG' + colorama.Style.RESET_ALL + '] ', 11 | logging.INFO: '[' + colorama.Fore.BLUE + 'INFO' + colorama.Style.RESET_ALL + '] ', 12 | logging.WARNING: '[' + colorama.Fore.YELLOW + 'WARNING' + colorama.Style.RESET_ALL + '] ', 13 | logging.ERROR: '[' + colorama.Fore.RED + 'ERROR' + colorama.Style.RESET_ALL + '] ', 14 | logging.CRITICAL: '[' + colorama.Fore.RED + colorama.Style.BRIGHT + 'CRITICAL' + colorama.Style.RESET_ALL + '] ', 15 | } 16 | 17 | log_colors_semantics = { 18 | 'NO_HEADER': '', 19 | 'HINT': '[' + colorama.Fore.YELLOW + 'HINT' + colorama.Style.RESET_ALL + '] ', 20 | 'NETWORK': '[' + colorama.Fore.MAGENTA + 'NETWORK' + colorama.Style.RESET_ALL + '] ', 21 | 'SUCCESS': '[' + colorama.Fore.GREEN + 'SUCCESS' + colorama.Style.RESET_ALL + '] ', 22 | 'FAILURE': '[' + colorama.Fore.RED + 'FAILURE' + colorama.Style.RESET_ALL + '] ', 23 | } 24 | 25 | status_code_messages: Set[str] = {str(int(key)) + ' ' + str(value) for key, value in http.client.responses.items()} 26 | 27 | 28 | class LogFormatter(logging.Formatter): 29 | def __init__(self, datefmt: Optional[str] = None): 30 | fmt = '[%(levelname)s] %(message)s' 31 | super().__init__(fmt=fmt, datefmt=datefmt) 32 | 33 | def format(self, record: logging.LogRecord) -> str: 34 | if record.levelno not in log_colors_level: 35 | return super().format(record) 36 | 37 | # detect the heading from the record 38 | heading = None 39 | message = record.getMessage() 40 | if not message and record.exc_info is None: 41 | heading = '' 42 | if heading is None: 43 | for key, value in log_colors_semantics.items(): 44 | if message.upper().startswith(key + ':'): 45 | heading = value 46 | message = message[len(key + ':'):].lstrip() 47 | break 48 | if heading is None: 49 | heading = log_colors_level[record.levelno] 50 | 51 | # exception 52 | if record.exc_info is not None: 53 | message += '\n' + self.formatException(record.exc_info) 54 | 55 | # make a string 56 | if not heading: 57 | return message 58 | return heading + message 59 | -------------------------------------------------------------------------------- /onlinejudge_command/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import sys 4 | import traceback 5 | from logging import DEBUG, INFO, StreamHandler, basicConfig, getLogger 6 | from typing import * 7 | 8 | import onlinejudge.__about__ as api_version 9 | import onlinejudge_command.__0_workaround_for_conflict # pylint: disable=unused-import 10 | import onlinejudge_command.__about__ as version 11 | import onlinejudge_command.subcommand.download as subcommand_download 12 | import onlinejudge_command.subcommand.generate_input as subcommand_generate_input 13 | import onlinejudge_command.subcommand.generate_output as subcommand_generate_output 14 | import onlinejudge_command.subcommand.login as subcommand_login 15 | import onlinejudge_command.subcommand.submit as subcommand_submit 16 | import onlinejudge_command.subcommand.test as subcommand_test 17 | import onlinejudge_command.subcommand.test_reactive as subcommand_test_reactive 18 | from onlinejudge_command import log_formatter, update_checking, utils 19 | 20 | logger = getLogger(__name__) 21 | 22 | 23 | def get_parser() -> argparse.ArgumentParser: 24 | parser = argparse.ArgumentParser( 25 | description='Tools for online judge services', 26 | formatter_class=argparse.RawTextHelpFormatter, 27 | epilog='''\ 28 | tips: 29 | The official tutorial exists on the web: https://github.com/online-judge-tools/oj/blob/master/docs/getting-started.md 30 | ''', 31 | ) 32 | parser.add_argument('-v', '--verbose', action='store_true') 33 | parser.add_argument('-c', '--cookie', type=pathlib.Path, default=utils.default_cookie_path, help='path to cookie. (default: {})'.format(utils.default_cookie_path)) 34 | parser.add_argument('--version', action='store_true', help='print the online-judge-tools version number') 35 | 36 | subparsers = parser.add_subparsers(dest='subcommand', help='for details, see "{} COMMAND --help"'.format(sys.argv[0])) 37 | subcommand_download.add_subparser(subparsers) 38 | subcommand_login.add_subparser(subparsers) 39 | subcommand_submit.add_subparser(subparsers) 40 | subcommand_test.add_subparser(subparsers) 41 | subcommand_generate_output.add_subparser(subparsers) 42 | subcommand_generate_input.add_subparser(subparsers) 43 | subcommand_test_reactive.add_subparser(subparsers) 44 | 45 | return parser 46 | 47 | 48 | def run_program(args: argparse.Namespace, parser: argparse.ArgumentParser) -> int: 49 | if args.version: 50 | print('online-judge-tools {} (+ online-judge-api-client {})'.format(version.__version__, api_version.__version__)) 51 | return 0 52 | logger.debug('args: %s', str(args)) 53 | 54 | # print the version to use for user-supporting 55 | logger.info('online-judge-tools %s (+ online-judge-api-client %s)', version.__version__, api_version.__version__) 56 | 57 | # TODO: make functions for subcommand take a named tuple instead of the raw result of argparse. Using named tuples make code well-typed. 58 | if args.subcommand in ['download', 'd', 'dl']: 59 | if not subcommand_download.run(args): 60 | return 1 61 | elif args.subcommand in ['login', 'l']: 62 | if not subcommand_login.run(args): 63 | return 1 64 | elif args.subcommand in ['submit', 's']: 65 | if not subcommand_submit.run(args): 66 | return 1 67 | elif args.subcommand in ['test', 't']: 68 | if not subcommand_test.run(args): 69 | return 1 70 | elif args.subcommand in ['test-reactive', 't/r', 'test-interactive', 't/i']: 71 | if not subcommand_test_reactive.run(args): 72 | return 1 73 | elif args.subcommand in ['generate-output', 'g/o']: 74 | subcommand_generate_output.run(args) 75 | elif args.subcommand in ['generate-input', 'g/i']: 76 | subcommand_generate_input.run(args) 77 | else: 78 | parser.print_help(file=sys.stderr) 79 | return 1 80 | return 0 81 | 82 | 83 | def main(args: Optional[List[str]] = None) -> 'NoReturn': 84 | parser = get_parser() 85 | parsed = parser.parse_args(args=args) 86 | 87 | # configure the logger 88 | level = INFO 89 | if parsed.verbose: 90 | level = DEBUG 91 | handler = StreamHandler(sys.stdout) 92 | handler.setFormatter(log_formatter.LogFormatter()) 93 | basicConfig(level=level, handlers=[handler]) 94 | 95 | # check update 96 | is_updated = update_checking.run() 97 | 98 | try: 99 | sys.exit(run_program(parsed, parser=parser)) 100 | except NotImplementedError: 101 | logger.debug('\n' + traceback.format_exc()) 102 | logger.error('NotImplementedError') 103 | logger.info('The operation you specified is not supported yet. Pull requests are welcome.') 104 | logger.info('see: https://github.com/online-judge-tools/oj') 105 | if not is_updated: 106 | logger.info(utils.HINT + 'try updating the version of online-judge-tools: $ pip3 install -U online-judge-tools online-judge-api-client') 107 | sys.exit(1) 108 | except Exception as e: 109 | logger.debug('\n' + traceback.format_exc()) 110 | logger.exception(str(e)) 111 | if not is_updated: 112 | logger.info(utils.HINT + 'try updating the version of online-judge-tools: $ pip3 install -U online-judge-tools online-judge-api-client') 113 | sys.exit(1) 114 | 115 | 116 | if __name__ == '__main__': 117 | main() 118 | -------------------------------------------------------------------------------- /onlinejudge_command/output_comparators.py: -------------------------------------------------------------------------------- 1 | """This module collects helper classes to compare outputs for `test` subcommand. 2 | """ 3 | 4 | import abc 5 | import enum 6 | import math 7 | from logging import getLogger 8 | from typing import * 9 | 10 | logger = getLogger(__name__) 11 | 12 | 13 | class OutputComparator(abc.ABC): 14 | @abc.abstractmethod 15 | def __call__(self, actual: bytes, expected: bytes) -> bool: 16 | """ 17 | :returns: True is the two are matched. 18 | """ 19 | raise NotImplementedError 20 | 21 | 22 | class ExactComparator(OutputComparator): 23 | def __call__(self, actual: bytes, expected: bytes) -> bool: 24 | return actual == expected 25 | 26 | 27 | class FloatingPointNumberComparator(OutputComparator): 28 | def __init__(self, *, rel_tol: float, abs_tol: float): 29 | if max(rel_tol, abs_tol) > 1: 30 | logger.warning('the tolerance is too large: relative = %s, absolute = %s', rel_tol, abs_tol) 31 | self.rel_tol = rel_tol 32 | self.abs_tol = abs_tol 33 | 34 | def __call__(self, actual: bytes, expected: bytes) -> bool: 35 | """ 36 | :returns: True if the relative error or absolute error is smaller than the accepted error 37 | """ 38 | try: 39 | x: Optional[float] = float(actual) 40 | except ValueError: 41 | x = None 42 | try: 43 | y: Optional[float] = float(expected) 44 | except ValueError: 45 | y = None 46 | if x is not None and y is not None: 47 | return math.isclose(x, y, rel_tol=self.rel_tol, abs_tol=self.abs_tol) 48 | else: 49 | return actual == expected 50 | 51 | 52 | class SplitComparator(OutputComparator): 53 | def __init__(self, word_comparator: OutputComparator): 54 | self.word_comparator = word_comparator 55 | 56 | def __call__(self, actual: bytes, expected: bytes) -> bool: 57 | # str.split() also removes trailing '\r' 58 | actual_words = actual.split() 59 | expected_words = expected.split() 60 | if len(actual_words) != len(expected_words): 61 | return False 62 | for x, y in zip(actual_words, expected_words): 63 | if not self.word_comparator(x, y): 64 | return False 65 | return True 66 | 67 | 68 | class SplitLinesComparator(OutputComparator): 69 | def __init__(self, line_comparator: OutputComparator): 70 | self.line_comparator = line_comparator 71 | 72 | def __call__(self, actual: bytes, expected: bytes) -> bool: 73 | actual_lines = actual.rstrip(b'\n').split(b'\n') 74 | expected_lines = expected.rstrip(b'\n').split(b'\n') 75 | if len(actual_lines) != len(expected_lines): 76 | return False 77 | for x, y in zip(actual_lines, expected_lines): 78 | if not self.line_comparator(x, y): 79 | return False 80 | return True 81 | 82 | 83 | class CRLFInsensitiveComparator(OutputComparator): 84 | def __init__(self, file_comparator: OutputComparator): 85 | self.file_comparator = file_comparator 86 | 87 | def __call__(self, actual: bytes, expected: bytes) -> bool: 88 | return self.file_comparator(actual.replace(b'\r\n', b'\n'), expected.replace(b'\r\n', b'\n')) 89 | 90 | 91 | class CompareMode(enum.Enum): 92 | EXACT_MATCH = 'exact-match' 93 | CRLF_INSENSITIVE_EXACT_MATCH = 'crlf-insensitive-exact-match' 94 | IGNORE_SPACES = 'ignore-spaces' 95 | IGNORE_SPACES_AND_NEWLINES = 'ignore-spaces-and-newlines' 96 | 97 | 98 | # This function is used from onlinejudge_command.pretty_printers. 99 | def check_lines_match(a: str, b: str, *, compare_mode: CompareMode) -> bool: 100 | if compare_mode == CompareMode.EXACT_MATCH: 101 | comparator: OutputComparator = ExactComparator() 102 | elif compare_mode == CompareMode.CRLF_INSENSITIVE_EXACT_MATCH: 103 | comparator = CRLFInsensitiveComparator(ExactComparator()) 104 | elif compare_mode == CompareMode.IGNORE_SPACES: 105 | comparator = SplitComparator(ExactComparator()) 106 | elif compare_mode == CompareMode.IGNORE_SPACES_AND_NEWLINES: 107 | raise RuntimeError('CompareMode.IGNORE_SPACES_AND_NEWLINES is not allowed for this function') 108 | else: 109 | assert False 110 | return comparator(a.encode(), b.encode()) 111 | -------------------------------------------------------------------------------- /onlinejudge_command/subcommand/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/online-judge-tools/oj/d90b0a2bd87ae72cf89951b80c8fa4bd834afd0a/onlinejudge_command/subcommand/__init__.py -------------------------------------------------------------------------------- /onlinejudge_command/subcommand/download.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import pathlib 5 | import textwrap 6 | from logging import getLogger 7 | from typing import * 8 | 9 | import requests.exceptions 10 | 11 | import onlinejudge_command.download_history 12 | from onlinejudge import dispatch 13 | from onlinejudge.service.atcoder import AtCoderProblem 14 | from onlinejudge.service.yukicoder import YukicoderProblem 15 | from onlinejudge.type import SampleParseError, TestCase 16 | from onlinejudge_command import format_utils, pretty_printers, utils 17 | 18 | logger = getLogger(__name__) 19 | 20 | 21 | def add_subparser(subparsers: argparse.Action) -> None: 22 | subparsers_add_parser: Callable[..., argparse.ArgumentParser] = subparsers.add_parser # type: ignore 23 | subparser = subparsers_add_parser('download', aliases=['d', 'dl'], help='download sample cases', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ 24 | supported services: 25 | Anarchy Golf 26 | Aizu Online Judge (including the Arena) 27 | AtCoder 28 | Codeforces 29 | yukicoder 30 | CS Academy 31 | HackerRank 32 | PKU JudgeOnline 33 | Kattis 34 | Toph (Problem Archive) 35 | CodeChef 36 | Facebook Hacker Cup 37 | Google Code Jam 38 | Library Checker (https://judge.yosupo.jp/) 39 | 40 | supported services with --system: 41 | Aizu Online Judge 42 | AtCoder 43 | yukicoder 44 | Library Checker (https://judge.yosupo.jp/) 45 | 46 | format string for --format: 47 | %i index: 1, 2, 3, ... 48 | %e extension: "in" or "out" 49 | %n name: e.g. "Sample Input 1", "system_test3.txt", ... 50 | %b os.path.basename(name) 51 | %d os.path.dirname(name) 52 | %% '%' itself 53 | 54 | tips: 55 | This subcommand doesn't have the feature to download all test cases for all problems in a contest at once. If you want to do this, please use `oj-prepare` command at https://github.com/online-judge-tools/template-generator instead. 56 | 57 | You can do similar things with shell and oj-api command. see https://github.com/online-judge-tools/api-client 58 | e.g. $ oj-api get-problem https://atcoder.jp/contests/agc001/tasks/agc001_a | jq -cr '.result.tests | to_entries[] | [{path: "test/sample-\\(.key).in", data: .value.input}, {path: "test/sample-\\(.key).out", data: .value.output}][] | {path, data: @sh "\\(.data)"} | "mkdir -p test; echo -n \\(.data) > \\(.path)"' | sh 59 | ''') 60 | subparser.add_argument('url') 61 | subparser.add_argument('-f', '--format', help='a format string to specify paths of cases (default: "sample-%%i.%%e" if not --system)') # default must be None for --system 62 | subparser.add_argument('-d', '--directory', type=pathlib.Path, help='a directory name for test cases (default: test/)') # default must be None for guessing in submit command 63 | subparser.add_argument('-n', '--dry-run', action='store_true', help='don\'t write to files') 64 | subparser.add_argument('-a', '--system', action='store_true', help='download system testcases') 65 | subparser.add_argument('-s', '--silent', action='store_true') 66 | subparser.add_argument('--yukicoder-token', type=str) 67 | subparser.add_argument('--dropbox-token', type=str) 68 | subparser.add_argument('--log-file', type=pathlib.Path, help=argparse.SUPPRESS) 69 | 70 | 71 | def convert_sample_to_dict(sample: TestCase) -> Dict[str, str]: 72 | data: Dict[str, str] = {} 73 | data["name"] = sample.name 74 | data["input"] = sample.input_data.decode() 75 | if sample.output_data is not None: 76 | data["output"] = sample.output_data.decode() 77 | return data 78 | 79 | 80 | def run(args: argparse.Namespace) -> bool: 81 | # prepare values 82 | problem = dispatch.problem_from_url(args.url) 83 | if problem is None: 84 | if dispatch.contest_from_url(args.url) is not None: 85 | logger.warning('You specified a URL for a contest instead of a problem. If you want to download for all problems of a contest at once, please try to use `oj-prepare` command of https://github.com/online-judge-tools/template-generator') 86 | logger.error('The URL "%s" is not supported', args.url) 87 | return False 88 | is_default_format = args.format is None and args.directory is None # must be here since args.directory and args.format are overwritten 89 | if args.directory is None: 90 | args.directory = pathlib.Path('test') 91 | if args.format is None: 92 | args.format = '%b.%e' 93 | 94 | # get samples from the server 95 | with utils.new_session_with_our_user_agent(path=args.cookie) as sess: 96 | if isinstance(problem, AtCoderProblem) and args.system: 97 | if not args.dropbox_token: 98 | logger.info(utils.HINT + 'You need to give the access token. Please do the following:\n%s', textwrap.dedent(""" 99 | 1. Open the following URL in your browser: 100 | https://www.dropbox.com/oauth2/authorize?client_id=153gig8dqgk3ujg&response_type=code 101 | 2. Authorize the app and take the access code. 102 | 3. Run the following command with replacing the "${YOUR_ACCESS_CODE}": 103 | $ curl https://api.dropbox.com/oauth2/token --user 153gig8dqgk3ujg:5l7o7lh73o8i9ux --data grant_type=authorization_code --data code=${YOUR_ACCESS_CODE} 104 | 4. Get the access token from the JSON. It is in the "access_token" field. 105 | 5. Use the access token. For example: 106 | $ oj download """ + problem.get_url() + """ --system --dropbox-token=${YOUR_ACCESS_TOKEN} 107 | 108 | (Please take care that the access code and the access token are CONFIDENTIAL information. DON'T SHARE with other people!) 109 | """)) 110 | raise SampleParseError("--dropbox-token is not given") 111 | sess.headers['Authorization'] = 'Bearer {}'.format(args.dropbox_token) 112 | if args.yukicoder_token and isinstance(problem, YukicoderProblem): 113 | sess.headers['Authorization'] = 'Bearer {}'.format(args.yukicoder_token) 114 | try: 115 | if args.system: 116 | samples = problem.download_system_cases(session=sess) 117 | else: 118 | samples = problem.download_sample_cases(session=sess) 119 | except requests.exceptions.RequestException as e: 120 | logger.error('%s', e) 121 | logger.error(utils.HINT + 'You may need to login to use `$ oj download ...` during contest. Please run: $ oj login %s', problem.get_service().get_url()) 122 | return False 123 | except SampleParseError as e: 124 | logger.error('%s', e) 125 | return False 126 | 127 | if not samples: 128 | logger.error("Sample not found") 129 | return False 130 | 131 | # append the history for submit subcommand 132 | if not args.dry_run and is_default_format: 133 | history = onlinejudge_command.download_history.DownloadHistory() 134 | if not list(args.directory.glob('*')): 135 | # reset the history to help users who use only one directory for many problems 136 | history.remove(directory=pathlib.Path.cwd()) 137 | history.add(problem, directory=pathlib.Path.cwd()) 138 | 139 | # prepare files to write 140 | def iterate_files_to_write(sample: TestCase, *, i: int) -> Iterator[Tuple[str, pathlib.Path, bytes]]: 141 | for ext in ['in', 'out']: 142 | data = getattr(sample, ext + 'put_data') 143 | if data is None: 144 | continue 145 | name = sample.name 146 | table = {} 147 | table['i'] = str(i + 1) 148 | table['e'] = ext 149 | table['n'] = name 150 | table['b'] = os.path.basename(name) 151 | table['d'] = os.path.dirname(name) 152 | path: pathlib.Path = args.directory / format_utils.percentformat(args.format, table) 153 | yield ext, path, data 154 | 155 | for i, sample in enumerate(samples): 156 | for _, path, _ in iterate_files_to_write(sample, i=i): 157 | if path.exists(): 158 | logger.error('Failed to download since file already exists: %s', str(path)) 159 | logger.info(utils.HINT + 'We recommend adding your own test cases to test/ directory, and using one directory per one problem. Please see also https://github.com/online-judge-tools/oj/blob/master/docs/getting-started.md#random-testing. If you wanted to keep using one directory per one contest, you can run like `$ rm -rf test/ && oj d https://...`.') 160 | return False 161 | 162 | # write samples to files 163 | for i, sample in enumerate(samples): 164 | logger.info('') 165 | logger.info('sample %d', i) 166 | for ext, path, data in iterate_files_to_write(sample, i=i): 167 | content = '' 168 | if not args.silent: 169 | content = '\n' + pretty_printers.make_pretty_large_file_content(data, limit=40, head=20, tail=10) 170 | logger.info('%sput: %s%s', ext, sample.name, content) 171 | if not args.dry_run: 172 | path.parent.mkdir(parents=True, exist_ok=True) 173 | with path.open('wb') as fh: 174 | fh.write(data) 175 | logger.info(utils.SUCCESS + 'saved to: %s', path) 176 | 177 | if args.log_file: 178 | with args.log_file.open(mode='w') as fhs: 179 | json.dump(list(map(convert_sample_to_dict, samples)), fhs) 180 | 181 | return True 182 | -------------------------------------------------------------------------------- /onlinejudge_command/subcommand/generate_input.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import concurrent.futures 3 | import contextlib 4 | import hashlib 5 | import itertools 6 | import os 7 | import pathlib 8 | import subprocess 9 | import threading 10 | from logging import getLogger 11 | from typing import * 12 | 13 | import onlinejudge_command.format_utils as fmtutils 14 | from onlinejudge_command import pretty_printers, utils 15 | 16 | logger = getLogger(__name__) 17 | 18 | 19 | def add_subparser(subparsers: argparse.Action) -> None: 20 | subparsers_add_parser: Callable[..., argparse.ArgumentParser] = subparsers.add_parser # type: ignore 21 | subparser = subparsers_add_parser('generate-input', aliases=['g/i'], help='generate input files from given generator', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ 22 | format string for --format: 23 | %s name 24 | %e extension: "in" or "out" 25 | (both %d and %e are required.) 26 | 27 | tips: 28 | For the random testing, you can read a tutorial: https://github.com/online-judge-tools/oj/blob/master/docs/getting-started.md#random-testing 29 | 30 | There is a command to automatically generate a input generator, `oj-template` command. See https://github.com/online-judge-tools/template-generator . 31 | 32 | This subcommand has also the feature to find a hack case. 33 | e.g. for a target program `a.out`, a correct (but possibly slow) program `naive`, and a random input-case generator `generate.py`, run $ oj g/i --hack-actual ./a.out --hack-expected ./naive 'python3 generate.py' 34 | 35 | You can do similar things with shell 36 | e.g. $ for i in `seq 100` ; do python3 generate.py > test/random-$i.in ; done 37 | ''') 38 | subparser.add_argument('-f', '--format', default='%s.%e', help='a format string to recognize the relationship of test cases. (default: "%%s.%%e")') 39 | subparser.add_argument('-d', '--directory', type=pathlib.Path, default=pathlib.Path('test'), help='a directory name for test cases (default: test/)') 40 | subparser.add_argument('-t', '--tle', type=float, help='set the time limit (in second) (default: inf)') 41 | subparser.add_argument('-j', '--jobs', type=int, help='run tests in parallel') 42 | subparser.add_argument('--width', type=int, default=3, help='specify the width of indices of cases. (default: 3)') 43 | subparser.add_argument('--name', help='specify the base name of cases. (default: "random")') 44 | subparser.add_argument('-c', '--command', help='specify your solution to generate output') 45 | subparser.add_argument('--hack-expected', dest='command', help='alias of --command. If this is not given, --hack runs until the actual solution fails with RE or TLE.') 46 | subparser.add_argument('--hack', '--hack-actual', dest='hack', help='specify your wrong solution to be compared with the reference solution given by --hack-expected') 47 | subparser.add_argument('generator', type=str, help='your program to generate test cases') 48 | subparser.add_argument('count', nargs='?', type=int, help='the number of cases to generate (default: 100)') 49 | 50 | 51 | @contextlib.contextmanager 52 | def BufferedExecutor(lock: Optional[threading.Lock]): 53 | buf: List[Tuple[Callable, List[Any], Dict[str, Any]]] = [] 54 | 55 | def submit(f, *args, **kwargs): 56 | nonlocal buf 57 | if lock is None: 58 | f(*args, **kwargs) 59 | else: 60 | buf += [(f, args, kwargs)] 61 | 62 | result = yield submit 63 | 64 | if lock is not None: 65 | with lock: 66 | for f, args, kwargs in buf: 67 | f(*args, **kwargs) 68 | return result 69 | 70 | 71 | def write_result(input_data: bytes, output_data: Optional[bytes], *, input_path: pathlib.Path, output_path: pathlib.Path, print_data: bool, lock: Optional[threading.Lock] = None) -> None: 72 | # acquire lock to print logs properly, if in parallel 73 | nullcontext = contextlib.nullcontext() 74 | with lock or nullcontext: 75 | 76 | if not input_path.parent.is_dir(): 77 | os.makedirs(str(input_path.parent), exist_ok=True) 78 | 79 | if print_data: 80 | logger.info(utils.NO_HEADER + 'input:') 81 | logger.info(utils.NO_HEADER + '%s', pretty_printers.make_pretty_large_file_content(input_data, limit=40, head=20, tail=10)) 82 | with input_path.open('wb') as fh: 83 | fh.write(input_data) 84 | logger.info(utils.SUCCESS + 'saved to: %s', input_path) 85 | 86 | if output_data is not None: 87 | if print_data: 88 | logger.info(utils.NO_HEADER + 'output:') 89 | logger.info(pretty_printers.make_pretty_large_file_content(output_data, limit=40, head=20, tail=10)) 90 | with output_path.open('wb') as fh: 91 | fh.write(output_data) 92 | logger.info(utils.SUCCESS + 'saved to: %s', output_path) 93 | 94 | 95 | def check_status(info: Dict[str, Any], proc: subprocess.Popen, *, submit: Callable[..., None], input_data: Optional[bytes]) -> bool: 96 | submit(logger.info, 'time: %f sec', info['elapsed']) 97 | if proc.returncode is None: 98 | submit(logger.info, utils.FAILURE + utils.red('TLE')) 99 | if input_data is not None: 100 | submit(logger.info, utils.NO_HEADER + 'input:') 101 | submit(logger.info, utils.NO_HEADER + '%s', pretty_printers.make_pretty_large_file_content(input_data, limit=40, head=20, tail=10)) 102 | submit(logger.info, 'skipped.') 103 | return False 104 | elif proc.returncode != 0: 105 | submit(logger.info, utils.FAILURE + utils.red('RE') + ': return code %d', proc.returncode) 106 | if input_data is not None: 107 | submit(logger.info, utils.NO_HEADER + 'input:') 108 | submit(logger.info, utils.NO_HEADER + '%s', pretty_printers.make_pretty_large_file_content(input_data, limit=40, head=20, tail=10)) 109 | submit(logger.info, 'skipped.') 110 | return False 111 | assert info['answer'] is not None 112 | return True 113 | 114 | 115 | def check_randomness_of_generator(input_data: bytes, *, name: str, lock: Optional[threading.Lock], generated_input_hashes: Dict[bytes, str]) -> Optional[str]: 116 | """check_randomness_of_generator() checks the generated inputs. This adds some overheads but is needed for foolproof. Many users forget to initialize their library and use fixed seeds. 117 | 118 | :returns: a previous name of the input when it was already once generated. None if it's a new input. 119 | """ 120 | 121 | # To prevent consuming unlimited memories, do nothing if the user's generator is properly implemented. 122 | limit = 1000 123 | if len(generated_input_hashes) >= limit: 124 | return None 125 | 126 | input_digest = hashlib.sha1(input_data).digest() 127 | nullcontext = contextlib.nullcontext() 128 | with lock or nullcontext: 129 | if len(generated_input_hashes) < limit: 130 | if input_digest in generated_input_hashes: 131 | return generated_input_hashes[input_digest] 132 | else: 133 | generated_input_hashes[input_digest] = name 134 | if len(generated_input_hashes) == limit: 135 | logger.info('Conflict checking of generated inputs is disabled now because it seems the given input generator has enough randomness.') # This prints a log line but it's safe because here is in a lock. 136 | return None 137 | 138 | 139 | def generate_input_single_case(generator: str, *, input_path: pathlib.Path, output_path: pathlib.Path, command: Optional[str], tle: Optional[float], name: str, lock: Optional[threading.Lock] = None, generated_input_hashes: Dict[bytes, str]) -> None: 140 | with BufferedExecutor(lock) as submit: 141 | 142 | # print the header 143 | submit(logger.info, '') 144 | submit(logger.info, '%s', name) 145 | 146 | # generate input 147 | submit(logger.info, 'generate input...') 148 | info, proc = utils.exec_command(generator, timeout=tle) 149 | input_data: bytes = info['answer'] 150 | if not check_status(info, proc, submit=submit, input_data=input_data): 151 | return 152 | 153 | # check the randomness of generator 154 | conflicted_name = check_randomness_of_generator(input_data, name=name, lock=lock, generated_input_hashes=generated_input_hashes) 155 | if conflicted_name is not None: 156 | submit(logger.warning, 'The same input is already generated at %s. Please use a random input generator.', conflicted_name) 157 | 158 | # generate output 159 | if command is None: 160 | output_data: Optional[bytes] = None 161 | else: 162 | submit(logger.info, 'generate output...') 163 | info, proc = utils.exec_command(command, input=input_data, timeout=tle) 164 | output_data = info['answer'] 165 | if not check_status(info, proc, submit=submit, input_data=input_data): 166 | return 167 | 168 | # write result 169 | submit(write_result, input_data=input_data, output_data=output_data, input_path=input_path, output_path=output_path, print_data=True) 170 | 171 | 172 | def simple_match(a: str, b: str) -> bool: 173 | if a == b: 174 | return True 175 | if a.rstrip() == b.rstrip(): 176 | logger.warning('WA if no rstrip') 177 | return True 178 | return False 179 | 180 | 181 | def try_hack_once(generator: str, command: Optional[str], hack: str, *, tle: Optional[float], attempt: int, lock: Optional[threading.Lock] = None, generated_input_hashes: Dict[bytes, str]) -> Optional[Tuple[bytes, Optional[bytes]]]: 182 | with BufferedExecutor(lock) as submit: 183 | 184 | # print the header 185 | submit(logger.info, '') 186 | submit(logger.info, '%d-th attempt', attempt) 187 | 188 | # generate input 189 | submit(logger.info, 'generate input...') 190 | info, proc = utils.exec_command(generator, stdin=None, timeout=tle) 191 | input_data: Optional[bytes] = info['answer'] 192 | if not check_status(info, proc, submit=submit, input_data=input_data): 193 | return None 194 | assert input_data is not None 195 | 196 | # check the randomness of generator 197 | name = '{}-th attempt'.format(attempt) 198 | conflicted_name = check_randomness_of_generator(input_data, name=name, lock=lock, generated_input_hashes=generated_input_hashes) 199 | if conflicted_name is not None: 200 | submit(logger.warning, 'The same input is already generated at %s. Please use a random input generator.', conflicted_name) 201 | submit(logger.info, utils.NO_HEADER + 'input:') 202 | submit(logger.info, utils.NO_HEADER + '%s', pretty_printers.make_pretty_large_file_content(input_data, limit=40, head=20, tail=10)) 203 | 204 | # generate expected output 205 | output_data: Optional[bytes] = None 206 | if command is not None: 207 | submit(logger.info, 'generate output...') 208 | info, proc = utils.exec_command(command, input=input_data, timeout=tle) 209 | output_data = info['answer'] 210 | if not check_status(info, proc, submit=submit, input_data=input_data): 211 | return None 212 | assert output_data is not None 213 | 214 | # hack 215 | submit(logger.info, 'hack...') 216 | info, proc = utils.exec_command(hack, input=input_data, timeout=tle) 217 | answer: str = (info['answer'] or b'').decode() 218 | 219 | # compare 220 | status = 'AC' 221 | if proc.returncode is None: 222 | submit(logger.info, 'FAILURE: ' + utils.red('TLE')) 223 | status = 'TLE' 224 | elif proc.returncode != 0: 225 | logger.info(utils.FAILURE + '' + utils.red('RE') + ': return code %d', proc.returncode) 226 | status = 'RE' 227 | if output_data is not None: 228 | expected = output_data.decode() 229 | if not simple_match(answer, expected): 230 | logger.info(utils.FAILURE + '' + utils.red('WA')) 231 | logger.info(utils.NO_HEADER + 'input:\n%s', pretty_printers.make_pretty_large_file_content(input_data, limit=40, head=20, tail=10)) 232 | logger.info(utils.NO_HEADER + 'output:\n%s', pretty_printers.make_pretty_large_file_content(answer.encode(), limit=40, head=20, tail=10)) 233 | logger.info(utils.NO_HEADER + 'expected:\n%s', pretty_printers.make_pretty_large_file_content(output_data, limit=40, head=20, tail=10)) 234 | status = 'WA' 235 | 236 | if status == 'AC': 237 | return None 238 | else: 239 | return (input_data, output_data) 240 | 241 | 242 | def run(args: argparse.Namespace) -> None: 243 | if args.hack and not args.command: 244 | logger.info('--hack-actual is given but --hack-expected is not given. It will run until the actual solution gets RE or TLE.') 245 | 246 | if args.name is None: 247 | if args.hack: 248 | args.name = 'hack' 249 | else: 250 | args.name = 'random' 251 | 252 | if args.count is None: 253 | if args.hack: 254 | args.count = 1 255 | else: 256 | args.count = 100 257 | 258 | def iterate_path(): 259 | for i in itertools.count(): 260 | name = '{}-{}'.format(args.name, str(i).zfill(args.width)) 261 | input_path = fmtutils.path_from_format(args.directory, args.format, name=name, ext='in') 262 | output_path = fmtutils.path_from_format(args.directory, args.format, name=name, ext='out') 263 | if not input_path.exists() and not output_path.exists(): 264 | yield (name, input_path, output_path) 265 | 266 | # generate cases 267 | generated_input_hashes: Dict[bytes, str] = {} 268 | if args.jobs is None: 269 | for name, input_path, output_path in itertools.islice(iterate_path(), args.count): 270 | if not args.hack: 271 | # generate serially 272 | generate_input_single_case(args.generator, input_path=input_path, output_path=output_path, command=args.command, tle=args.tle, name=name, generated_input_hashes=generated_input_hashes) 273 | 274 | else: 275 | # hack serially 276 | for attempt in itertools.count(1): 277 | data = try_hack_once(args.generator, command=args.command, hack=args.hack, tle=args.tle, attempt=attempt, generated_input_hashes=generated_input_hashes) 278 | if data is not None: 279 | write_result(*data, input_path=input_path, output_path=output_path, print_data=False) 280 | break 281 | else: 282 | with concurrent.futures.ThreadPoolExecutor(max_workers=args.jobs) as executor: 283 | lock = threading.Lock() 284 | futures: List[concurrent.futures.Future] = [] 285 | 286 | if not args.hack: 287 | # generate concurrently 288 | for name, input_path, output_path in itertools.islice(iterate_path(), args.count): 289 | futures += [executor.submit(generate_input_single_case, args.generator, input_path=input_path, output_path=output_path, command=args.command, tle=args.tle, name=name, lock=lock, generated_input_hashes=generated_input_hashes)] 290 | for future in futures: 291 | future.result() 292 | 293 | else: 294 | # hack concurrently 295 | attempt = 0 296 | for _ in range(args.jobs): 297 | attempt += 1 298 | futures += [executor.submit(try_hack_once, args.generator, command=args.command, hack=args.hack, tle=args.tle, attempt=attempt, lock=lock, generated_input_hashes=generated_input_hashes)] 299 | for _, input_path, output_path in itertools.islice(iterate_path(), args.count): 300 | data = None 301 | while data is None: 302 | concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED) 303 | for i in range(len(futures)): 304 | if not futures[i].done(): 305 | continue 306 | data = futures[i].result() 307 | attempt += 1 308 | futures[i] = executor.submit(try_hack_once, args.generator, command=args.command, hack=args.hack, tle=args.tle, attempt=attempt, lock=lock, generated_input_hashes=generated_input_hashes) 309 | if data is not None: 310 | break 311 | write_result(*data, input_path=input_path, output_path=output_path, print_data=False, lock=lock) 312 | -------------------------------------------------------------------------------- /onlinejudge_command/subcommand/generate_output.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import concurrent.futures 3 | import contextlib 4 | import os 5 | import pathlib 6 | import threading 7 | from logging import getLogger 8 | from typing import * 9 | 10 | import onlinejudge_command.format_utils as fmtutils 11 | from onlinejudge_command import pretty_printers, utils 12 | 13 | logger = getLogger(__name__) 14 | 15 | 16 | def add_subparser(subparsers: argparse.Action) -> None: 17 | subparsers_add_parser: Callable[..., argparse.ArgumentParser] = subparsers.add_parser # type: ignore 18 | subparser = subparsers_add_parser('generate-output', aliases=['g/o'], help='generate output files from input and reference implementation', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ 19 | format string for --format: 20 | %s name 21 | %e extension: "in" or "out" 22 | (both %s and %e are required.) 23 | 24 | tips: 25 | You can do similar things with shell 26 | e.g. $ for f in test/*.in ; do ./a.out < $f > ${f%.in}.out ; done 27 | ''') 28 | subparser.add_argument('-c', '--command', default=utils.get_default_command(), help='your solution to be tested. (default: "{}")'.format(utils.get_default_command())) 29 | subparser.add_argument('-f', '--format', default='%s.%e', help='a format string to recognize the relationship of test cases. (default: "%%s.%%e")') 30 | subparser.add_argument('-d', '--directory', type=pathlib.Path, default=pathlib.Path('test'), help='a directory name for test cases (default: test/)') 31 | subparser.add_argument('-t', '--tle', type=float, help='set the time limit (in second) (default: inf)') 32 | subparser.add_argument('-j', '--jobs', type=int, help='run tests in parallel') 33 | subparser.add_argument('test', nargs='*', type=pathlib.Path, help='paths of input cases. (if empty: globbed from --format)') 34 | subparser.add_argument('--no-ignore-backup', action='store_false', dest='ignore_backup') 35 | subparser.add_argument('--ignore-backup', action='store_true', help='ignore backup files and hidden files (i.e. files like "*~", "\\#*\\#" and ".*") (default)') 36 | 37 | 38 | def generate_output_single_case(test_name: str, test_input_path: pathlib.Path, *, lock: Optional[threading.Lock] = None, args: argparse.Namespace) -> None: 39 | 40 | # print the header 41 | if lock is None: 42 | logger.info('') 43 | logger.info('%s', test_name) 44 | 45 | # run the command 46 | with test_input_path.open('rb') as inf: 47 | info, proc = utils.exec_command(args.command, stdin=inf, timeout=args.tle) 48 | answer: Optional[bytes] = info['answer'] 49 | elapsed: float = info['elapsed'] 50 | 51 | # acquire lock to print logs properly, if in parallel 52 | nullcontext = contextlib.ExitStack() 53 | with lock or nullcontext: 54 | if lock is not None: 55 | logger.info('') 56 | logger.info('%s', test_name) 57 | 58 | # check the result 59 | logger.info('time: %f sec', elapsed) 60 | if proc.returncode is None: 61 | logger.info(utils.red('TLE')) 62 | logger.info('skipped.') 63 | return 64 | elif proc.returncode != 0: 65 | logger.info('FIALURE: ' + utils.red('RE') + ': return code %d', proc.returncode) 66 | logger.info('skipped.') 67 | return 68 | assert answer is not None 69 | logger.info(utils.NO_HEADER + '' + pretty_printers.make_pretty_large_file_content(answer, limit=40, head=20, tail=10)) 70 | 71 | # find the destination path 72 | match_result: Optional[Match[Any]] = fmtutils.match_with_format(args.directory, args.format, test_input_path) 73 | if match_result is not None: 74 | matched_name: str = match_result.groupdict()['name'] 75 | else: 76 | assert False 77 | test_output_path = fmtutils.path_from_format(args.directory, args.format, name=matched_name, ext='out') 78 | 79 | # write the result to the file 80 | if not test_output_path.parent.is_dir(): 81 | os.makedirs(str(test_output_path.parent), exist_ok=True) 82 | with test_output_path.open('wb') as fh: 83 | fh.write(answer) 84 | logger.info(utils.SUCCESS + 'saved to: %s', test_output_path) 85 | 86 | 87 | def generate_output_single_case_exists_ok(test_name: str, test_input_path: pathlib.Path, test_output_path: Optional[pathlib.Path], *, lock: Optional[threading.Lock] = None, args: argparse.Namespace) -> None: 88 | if test_output_path is not None: 89 | nullcontext = contextlib.ExitStack() 90 | with lock or nullcontext: 91 | logger.info('') 92 | logger.info('%s', test_name) 93 | logger.info('output file already exists.') 94 | logger.info('skipped.') 95 | else: 96 | generate_output_single_case(test_name, test_input_path, lock=lock, args=args) 97 | 98 | 99 | def run(args: argparse.Namespace) -> None: 100 | # list tests 101 | if not args.test: 102 | args.test = fmtutils.glob_with_format(args.directory, args.format) # by default 103 | if args.ignore_backup: 104 | args.test = fmtutils.drop_backup_or_hidden_files(args.test) 105 | tests = fmtutils.construct_relationship_of_files(args.test, args.directory, args.format) 106 | 107 | # generate cases 108 | if args.jobs is None: 109 | for name, paths in sorted(tests.items()): 110 | generate_output_single_case_exists_ok(name, paths['in'], paths.get('out'), args=args) 111 | else: 112 | with concurrent.futures.ThreadPoolExecutor(max_workers=args.jobs) as executor: 113 | lock = threading.Lock() 114 | futures: List[concurrent.futures.Future] = [] 115 | for name, paths in sorted(tests.items()): 116 | futures += [executor.submit(generate_output_single_case_exists_ok, name, paths['in'], paths.get('out'), lock=lock, args=args)] 117 | for future in futures: 118 | future.result() 119 | -------------------------------------------------------------------------------- /onlinejudge_command/subcommand/login.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import datetime 3 | import getpass 4 | import http.cookies 5 | import textwrap 6 | import time 7 | from logging import getLogger 8 | from typing import * 9 | 10 | import requests 11 | 12 | from onlinejudge import dispatch 13 | from onlinejudge.type import LoginError, Service 14 | from onlinejudge_command import utils 15 | 16 | logger = getLogger(__name__) 17 | 18 | 19 | def add_subparser(subparsers: argparse.Action) -> None: 20 | subparsers_add_parser: Callable[..., argparse.ArgumentParser] = subparsers.add_parser # type: ignore 21 | subparser = subparsers_add_parser('login', aliases=['l'], help='login to a service', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ 22 | supported services: 23 | AtCoder 24 | Codeforces 25 | yukicoder 26 | HackerRank 27 | Toph 28 | 29 | tips: 30 | You can do similar things with shell and oj-api command. see https://github.com/online-judge-tools/api-client 31 | e.g. $ USERNAME=foo PASSWORD=bar oj-api login-service https://atcoder.jp/ 32 | ''') 33 | subparser.add_argument('url') 34 | subparser.add_argument('-u', '--username') 35 | subparser.add_argument('-p', '--password') 36 | subparser.add_argument('--check', action='store_true', help='check whether you are logged in or not') 37 | subparser.add_argument('--use-browser', choices=('always', 'auto', 'never'), default='auto', help='specify whether it uses a GUI web browser to login or not (default: auto)') 38 | 39 | 40 | def login_with_password(service: Service, *, username: Optional[str], password: Optional[str], session: requests.Session) -> None: 41 | def get_credentials() -> Tuple[str, str]: 42 | nonlocal username, password 43 | if username is None: 44 | username = input('Username: ') 45 | if password is None: 46 | password = getpass.getpass() 47 | return username, password 48 | 49 | service.login(get_credentials=get_credentials, session=session) 50 | 51 | 52 | # a wrapper class, because selenium.common.exceptions.* is not always imported 53 | class WebDriverException(Exception): 54 | pass 55 | 56 | 57 | def get_webdriver() -> Any: 58 | """get_webdriver() detects an available webdriver and returns it. 59 | 60 | :raises ImportError: of Selenium 61 | """ 62 | 63 | import selenium.webdriver # pylint: disable=import-error,import-outside-toplevel 64 | 65 | logger.info('Trying to open Chrome via WebDriver...') 66 | try: 67 | return selenium.webdriver.Chrome() 68 | except selenium.common.exceptions.WebDriverException as e: 69 | logger.error(e) 70 | 71 | logger.info('Trying to open Firefox via WebDriver...') 72 | try: 73 | return selenium.webdriver.Firefox() 74 | except Exception as e: 75 | logger.error(e) 76 | 77 | logger.info('Trying to open Edge via WebDriver...') 78 | try: 79 | return selenium.webdriver.Edge() 80 | except Exception as e: 81 | logger.error(e) 82 | 83 | logger.info('Trying to open Internet Explorer via WebDriver...') 84 | try: 85 | return selenium.webdriver.Ie() 86 | except Exception as e: 87 | logger.error(e) 88 | 89 | logger.info('Trying to open Safari via WebDriver...') 90 | try: 91 | return selenium.webdriver.Safari() 92 | except Exception as e: 93 | logger.error(e) 94 | 95 | logger.info('Trying to open Opera via WebDriver...') 96 | try: 97 | return selenium.webdriver.Opera() 98 | except Exception as e: 99 | logger.error(e) 100 | 101 | logger.error('No WebDriver is available.') 102 | logger.info(utils.HINT + textwrap.dedent(""" 103 | Please install a WebDriver. 104 | See https://www.selenium.dev/documentation/en/webdriver/driver_requirements/ 105 | 106 | Detailed instructions: 107 | If you use Ubuntu: 108 | 1. Run $ sudo apt install chromium-chromedriver firefox-geckodriver 109 | If you use Ubuntu under Windows Subsystem for Linux: 110 | 1. Make a symbolic link from cookie.jar in WSL to cookie.jar out of WSL. For example, run $ ln -s /mnt/c/Users/%USERNAME%/AppData/Local/online-judge-tools/online-judge-tools/cookie.jar /home/ubuntu/.local/share/online-judge-tools/cookie.jar 111 | 2. Use `oj login` outside of WSL 112 | If you use Windows: 113 | 1. Install Chocolatey. See https://chocolatey.org/ 114 | 2. Run > choco install selenium-all-drivers 115 | """)) 116 | raise WebDriverException('No WebDriver is installed.') 117 | 118 | 119 | def login_with_browser(service: Service, *, session: requests.Session) -> None: 120 | """ 121 | :raises ImportError: of Selenium 122 | :raises WebDriverException: 123 | """ 124 | 125 | import selenium.webdriver # pylint: disable=import-error,import-outside-toplevel 126 | 127 | with get_webdriver() as driver: 128 | # get cookies via Selenium 129 | url = service.get_url_of_login_page() 130 | logger.info('Opening the URL via WebDriver: %s', url) 131 | logger.info('Please do the followings:\n 1. login in the GUI browser\n 2. close the GUI browser') 132 | driver.get(url) 133 | cookies: List[Dict[str, str]] = [] 134 | try: 135 | while driver.current_url: 136 | cookies = driver.get_cookies() 137 | time.sleep(0.1) 138 | except selenium.common.exceptions.WebDriverException as e: 139 | logger.debug(e) # the window is closed 140 | 141 | # set cookies to the requests.Session 142 | logger.info('Copying cookies via WebDriver...') 143 | for c in cookies: 144 | logger.debug('set cookie: %s', c['name']) 145 | morsel: http.cookies.Morsel = http.cookies.Morsel() 146 | morsel.set(c['name'], c['value'], c['value']) 147 | morsel.update({key: value for key, value in c.items() if morsel.isReservedKey(key)}) 148 | if not morsel['expires']: 149 | expires = datetime.datetime.now(datetime.timezone.utc).astimezone() + datetime.timedelta(days=180) 150 | morsel.update({'expires': expires.strftime('%a, %d-%b-%Y %H:%M:%S GMT')}) # RFC2109 format 151 | cookie = requests.cookies.morsel_to_cookie(morsel) 152 | session.cookies.set_cookie(cookie) # type: ignore 153 | 154 | 155 | def is_logged_in_with_message(service: Service, *, session: requests.Session) -> bool: 156 | if service.is_logged_in(session=session): 157 | logger.info(utils.SUCCESS + 'You have already signed in.') 158 | return True 159 | else: 160 | logger.info(utils.FAILURE + 'You are not signed in.') 161 | return False 162 | 163 | 164 | def run(args: argparse.Namespace) -> bool: 165 | """ 166 | :returns: whether it is logged in or not. 167 | """ 168 | 169 | service = dispatch.service_from_url(args.url) 170 | if service is None: 171 | return False 172 | 173 | with utils.new_session_with_our_user_agent(path=args.cookie) as session: 174 | 175 | if is_logged_in_with_message(service, session=session): 176 | return True 177 | else: 178 | if args.check: 179 | return False 180 | 181 | if args.use_browser in ('always', 'auto'): 182 | try: 183 | login_with_browser(service, session=session) 184 | except ImportError: 185 | logger.error('Selenium is not installed. Please run $ pip3 install selenium') 186 | except WebDriverException as e: 187 | logger.debug(e) 188 | else: 189 | return is_logged_in_with_message(service, session=session) 190 | 191 | if args.use_browser in ('never', 'auto'): 192 | if args.use_browser == 'auto': 193 | logger.warning('Switch to use CUI-based login instead of Selenium') 194 | try: 195 | login_with_password(service, username=args.username, password=args.password, session=session) 196 | except NotImplementedError as e: 197 | logger.exception(e) 198 | except LoginError as e: 199 | logger.debug(e) 200 | except Exception as e: 201 | logger.exception(e) 202 | else: 203 | return is_logged_in_with_message(service, session=session) 204 | 205 | return False 206 | -------------------------------------------------------------------------------- /onlinejudge_command/subcommand/test_reactive.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import contextlib 3 | import os 4 | import subprocess 5 | import sys 6 | from logging import getLogger 7 | from typing import * 8 | 9 | from onlinejudge_command import utils 10 | 11 | logger = getLogger(__name__) 12 | 13 | 14 | def add_subparser(subparsers: argparse.Action) -> None: 15 | subparsers_add_parser: Callable[..., argparse.ArgumentParser] = subparsers.add_parser # type: ignore 16 | subparser = subparsers_add_parser('test-reactive', aliases=['t/r', 'test-interactive', 't/i'], help='test for interactive problem', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ 17 | tips: 18 | You can do similar things with shell: 19 | e.g. $ mkfifo a.pipe && ./a.out < a.pipe | python3 judge.py > a.pipe 20 | You can repeat running until your solution fails, with while-statement of shell: 21 | e.g. $ while oj t/i 'python3 judge.py' ; do : ; done 22 | ''') 23 | subparser.add_argument('-c', '--command', default=utils.get_default_command(), help='your solution to be tested. (default: "{}")'.format(utils.get_default_command())) 24 | subparser.add_argument('judge', help='judge program using standard I/O') 25 | 26 | 27 | @contextlib.contextmanager 28 | def fifo() -> Generator[Tuple[Any, Any], None, None]: 29 | fdr, fdw = os.pipe() 30 | fhr = os.fdopen(fdr, 'r') 31 | fhw = os.fdopen(fdw, 'w') 32 | yield fhr, fhw 33 | fhw.close() 34 | fhr.close() 35 | # os.close(fdw), os.close(fdr) are unnecessary 36 | 37 | 38 | def run(args: argparse.Namespace) -> bool: 39 | with fifo() as (fhr1, fhw1): 40 | with fifo() as (fhr2, fhw2): 41 | with subprocess.Popen(args.command, shell=True, stdin=fhr2, stdout=fhw1, stderr=sys.stderr) as proc1: 42 | with subprocess.Popen(args.judge, shell=True, stdin=fhr1, stdout=fhw2, stderr=sys.stderr) as proc2: 43 | proc1.communicate() 44 | proc2.communicate() 45 | 46 | result = True 47 | if proc1.returncode != 0: 48 | logger.info(utils.FAILURE + 'RE: solution returns %d', proc1.returncode) 49 | result = False 50 | if proc2.returncode == 0: 51 | logger.info(utils.SUCCESS + 'AC') 52 | else: 53 | logger.info(utils.FAILURE + 'WA: judge returns %d', proc2.returncode) 54 | result = False 55 | return result 56 | -------------------------------------------------------------------------------- /onlinejudge_command/update_checking.py: -------------------------------------------------------------------------------- 1 | import http.client 2 | import json 3 | import time 4 | from logging import getLogger 5 | from typing import * 6 | 7 | import packaging.version 8 | import requests 9 | 10 | import onlinejudge.__about__ as api_version 11 | import onlinejudge_command.__about__ as version 12 | from onlinejudge.utils import user_cache_dir 13 | 14 | logger = getLogger(__name__) 15 | 16 | 17 | def describe_status_code(status_code: int) -> str: 18 | return '{} {}'.format(status_code, http.client.responses[status_code]) 19 | 20 | 21 | def request(method: str, url: str, session: requests.Session, raise_for_status: bool = True, **kwargs) -> requests.Response: 22 | assert method in ['GET', 'POST'] 23 | kwargs.setdefault('allow_redirects', True) 24 | logger.info('%s: %s', method, url) 25 | if 'data' in kwargs: 26 | logger.debug('data: %s', repr(kwargs['data'])) 27 | resp = session.request(method, url, **kwargs) 28 | if resp.url != url: 29 | logger.info('redirected: %s', resp.url) 30 | logger.info(describe_status_code(resp.status_code)) 31 | if raise_for_status: 32 | resp.raise_for_status() 33 | return resp 34 | 35 | 36 | def get_latest_version_from_pypi(package_name: str) -> str: 37 | pypi_url = 'https://pypi.org/pypi/{}/json'.format(package_name) 38 | version_cache_path = user_cache_dir / "pypi.json" 39 | update_interval = 60 * 60 * 8 # 8 hours 40 | 41 | # load cache 42 | cache: Dict[str, Any] = {} 43 | if version_cache_path.exists(): 44 | try: 45 | logger.debug('load the cache for update checking: %s', str(version_cache_path)) 46 | with version_cache_path.open() as fh: 47 | cache = json.load(fh) 48 | if time.time() < cache[package_name]['time'] + update_interval: 49 | return cache[package_name]['version'] 50 | except Exception as e: 51 | logger.warning('failed to load the cache in update checking: %s', e) 52 | 53 | # get 54 | try: 55 | resp = request('GET', pypi_url, session=requests.Session()) 56 | data = json.loads(resp.content.decode()) 57 | value = data['info']['version'] 58 | except requests.RequestException as e: 59 | logger.error(str(e)) 60 | value = '0.0.0' # ignore since this failure is not important 61 | cache[package_name] = { 62 | 'time': int(time.time()), # use timestamp because Python's standard datetime library is too weak to parse strings 63 | 'version': value, 64 | } 65 | 66 | # store cache 67 | logger.debug('store the cache for update checking: %s', str(version_cache_path)) 68 | version_cache_path.parent.mkdir(parents=True, exist_ok=True) 69 | with version_cache_path.open('w') as fh: 70 | json.dump(cache, fh) 71 | 72 | return value 73 | 74 | 75 | def is_update_available_on_pypi(package_name: str, current_version: str) -> bool: 76 | a = packaging.version.parse(current_version) 77 | b = packaging.version.parse(get_latest_version_from_pypi(package_name)) 78 | return a < b 79 | 80 | 81 | def run_for_package(*, package_name: str, current_version: str) -> bool: 82 | is_updated = not is_update_available_on_pypi(package_name, current_version) 83 | if not is_updated: 84 | logger.warning('update available for %s: %s -> %s', package_name, current_version, get_latest_version_from_pypi(package_name)) 85 | logger.info('run: $ pip3 install -U %s', package_name) 86 | return is_updated 87 | 88 | 89 | def run() -> bool: 90 | """ 91 | :returns: :any:`True` if they are updated. 92 | """ 93 | 94 | try: 95 | is_updated = run_for_package(package_name=version.__package_name__, current_version=version.__version__) 96 | is_api_updated = run_for_package(package_name=api_version.__package_name__, current_version=api_version.__version__) 97 | return is_updated and is_api_updated 98 | 99 | except Exception as e: 100 | logger.error('failed to check update: %s', e) 101 | return True 102 | -------------------------------------------------------------------------------- /onlinejudge_command/utils.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import datetime 3 | import functools 4 | import http.cookiejar 5 | import os 6 | import pathlib 7 | import platform 8 | import shlex 9 | import signal 10 | import subprocess 11 | import sys 12 | import tempfile 13 | import time 14 | import webbrowser 15 | from logging import getLogger 16 | from typing import * 17 | from typing import BinaryIO # It seems we cannot import BinaryIO with wildcard-import 18 | 19 | import colorama 20 | import requests 21 | 22 | import onlinejudge_command.__about__ as version 23 | from onlinejudge import utils 24 | from onlinejudge.type import * 25 | 26 | logger = getLogger(__name__) 27 | 28 | # These strings can control logging output. 29 | NO_HEADER = 'NO_HEADER: ' 30 | HINT = 'HINT: ' 31 | SUCCESS = 'SUCCESS: ' 32 | FAILURE = 'FAILURE: ' 33 | 34 | user_data_dir = utils.user_data_dir 35 | user_cache_dir = utils.user_cache_dir 36 | default_cookie_path = utils.default_cookie_path 37 | 38 | 39 | @contextlib.contextmanager 40 | def new_session_with_our_user_agent(*, path: pathlib.Path) -> Iterator[requests.Session]: 41 | session = requests.Session() 42 | session.headers['User-Agent'] = '{}/{} (+{})'.format(version.__package_name__, version.__version__, version.__url__) 43 | logger.debug('User-Agent: %s', session.headers['User-Agent']) 44 | try: 45 | with utils.with_cookiejar(session, path=path) as session: 46 | yield session 47 | except http.cookiejar.LoadError: 48 | logger.info(HINT + 'You can delete the broken cookie.jar file: %s', str(path)) 49 | raise 50 | 51 | 52 | def textfile(s: str) -> str: # should have trailing newline 53 | if s.endswith('\n'): 54 | return s 55 | elif '\r\n' in s: 56 | return s + '\r\n' 57 | else: 58 | return s + '\n' 59 | 60 | 61 | def exec_command(command_str: str, *, stdin: Optional[BinaryIO] = None, input: Optional[bytes] = None, timeout: Optional[float] = None, gnu_time: Optional[str] = None) -> Tuple[Dict[str, Any], subprocess.Popen]: 62 | if input is not None: 63 | assert stdin is None 64 | stdin = subprocess.PIPE # type: ignore 65 | if gnu_time is not None: 66 | context: Any = tempfile.NamedTemporaryFile(delete=True) 67 | else: 68 | context = contextlib.ExitStack() # TODO: we should use contextlib.nullcontext() if possible 69 | with context as fh: 70 | command = shlex.split(command_str) 71 | if gnu_time is not None: 72 | command = [gnu_time, '-f', '%M', '-o', fh.name, '--'] + command 73 | if os.name == 'nt': 74 | # HACK: without this encoding and decoding, something randomly fails with multithreading; see https://github.com/kmyk/online-judge-tools/issues/468 75 | command = command_str.encode().decode() # type: ignore 76 | begin = time.perf_counter() 77 | 78 | # We need kill processes called from the "time" command using process groups. Without this, orphans spawn. see https://github.com/kmyk/online-judge-tools/issues/640 79 | preexec_fn = None 80 | if gnu_time is not None and os.name == 'posix': 81 | preexec_fn = os.setsid 82 | 83 | try: 84 | proc = subprocess.Popen(command, stdin=stdin, stdout=subprocess.PIPE, stderr=sys.stderr, preexec_fn=preexec_fn) # pylint: disable=subprocess-popen-preexec-fn 85 | except FileNotFoundError: 86 | logger.error('No such file or directory: %s', command) 87 | sys.exit(1) 88 | except PermissionError: 89 | logger.error('Permission denied: %s', command) 90 | sys.exit(1) 91 | answer: Optional[bytes] = None 92 | try: 93 | answer, _ = proc.communicate(input=input, timeout=timeout) 94 | except subprocess.TimeoutExpired: 95 | pass 96 | finally: 97 | if preexec_fn is not None: 98 | try: 99 | os.killpg(os.getpgid(proc.pid), signal.SIGTERM) 100 | except ProcessLookupError: 101 | pass 102 | else: 103 | proc.terminate() 104 | 105 | end = time.perf_counter() 106 | memory: Optional[float] = None 107 | if gnu_time is not None: 108 | with open(fh.name) as fh1: 109 | reported = fh1.read() 110 | logger.debug('GNU time says:\n%s', reported) 111 | if reported.strip() and reported.splitlines()[-1].isdigit(): 112 | memory = int(reported.splitlines()[-1]) / 1000 113 | info = { 114 | 'answer': answer, # Optional[byte] 115 | 'elapsed': end - begin, # float, in second 116 | 'memory': memory, # Optional[float], in megabyte 117 | } 118 | return info, proc 119 | 120 | 121 | def green(s: str) -> str: 122 | """green(s) color s with green. 123 | 124 | This function exists to encapsulate the coloring methods only in utils.py. 125 | """ 126 | 127 | return colorama.Fore.GREEN + s + colorama.Fore.RESET 128 | 129 | 130 | def red(s: str) -> str: 131 | """red(s) color s with red. 132 | 133 | This function exists to encapsulate the coloring methods only in utils.py. 134 | """ 135 | 136 | return colorama.Fore.RED + s + colorama.Fore.RESET 137 | 138 | 139 | def green_diff(s: str) -> str: 140 | """green_diff(s) is deprecated. 141 | """ 142 | 143 | return colorama.Fore.RESET + colorama.Back.GREEN + colorama.Style.BRIGHT + s + colorama.Style.NORMAL + colorama.Back.RESET + colorama.Fore.GREEN 144 | 145 | 146 | def red_diff(s: str) -> str: 147 | """red_diff(s) is deprecated. 148 | """ 149 | 150 | return colorama.Fore.RESET + colorama.Back.RED + colorama.Style.BRIGHT + s + colorama.Style.NORMAL + colorama.Back.RESET + colorama.Fore.RED 151 | 152 | 153 | def success(msg: str) -> str: 154 | """success(msg) adds a header to msg for logging. 155 | """ 156 | 157 | return colorama.Fore.GREEN + 'SUCCESS' + colorama.Style.RESET + ': ' + msg 158 | 159 | 160 | def failure(msg: str) -> str: 161 | """success(msg) adds a header to msg for logging. 162 | """ 163 | 164 | return colorama.Fore.RED + 'FAILURE' + colorama.Style.RESET + ': ' + msg 165 | 166 | 167 | def remove_suffix(s: str, suffix: str) -> str: 168 | assert s.endswith(suffix) 169 | return s[:-len(suffix)] 170 | 171 | 172 | tzinfo_jst = datetime.timezone(datetime.timedelta(hours=+9), 'JST') 173 | 174 | 175 | def is_windows_subsystem_for_linux() -> bool: 176 | return platform.uname().system == 'Linux' and 'microsoft' in platform.uname().release.lower() 177 | 178 | 179 | @functools.lru_cache(maxsize=None) 180 | def webbrowser_register_explorer_exe() -> None: 181 | """webbrowser_register_explorer registers `explorer.exe` in the list of browsers under Windows Subsystem for Linux. 182 | 183 | See https://github.com/online-judge-tools/oj/issues/773 184 | """ 185 | 186 | # There is an issue that the terminal is cleared after `.open_new_tab()`. The reason is unknown, but adding an argurment `preferred=True` to `webbrowser.register` resolves this issues. 187 | 188 | # See https://github.com/online-judge-tools/oj/pull/784 189 | 190 | if not is_windows_subsystem_for_linux(): 191 | return 192 | instance = webbrowser.GenericBrowser('explorer.exe') 193 | webbrowser.register('explorer', None, instance, preferred=True) # `preferred=True` solves the issue that terminal logs are cleared on cmd.exe with stopping using wslview via www-browser. TODO: remove `preferred=True` after https://github.com/wslutilities/wslu/issues/199 is fixed. 194 | 195 | 196 | def get_default_command() -> str: 197 | """get_default_command returns a command to execute the default output of g++ or clang++. The value is basically `./a.out`, but `.\a.exe` on Windows. 198 | 199 | The type of return values must be `str` and must not be `pathlib.Path`, because the strings `./a.out` and `a.out` are different as commands but same as a path. 200 | """ 201 | if platform.system() == 'Windows': 202 | return r'.\a.exe' 203 | return './a.out' 204 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | long_description = file: README.md 3 | long_description_content_type = text/markdown 4 | classifiers = 5 | Development Status :: 5 - Production/Stable 6 | Environment :: Console 7 | Intended Audience :: Developers 8 | License :: OSI Approved :: MIT License 9 | Operating System :: OS Independent 10 | Programming Language :: Python :: 3.8 11 | Programming Language :: Python :: 3.10 12 | Programming Language :: Python :: 3.12 13 | Topic :: Internet :: WWW/HTTP 14 | Topic :: Software Development 15 | Topic :: Text Processing :: Markup :: HTML 16 | Topic :: Utilities 17 | 18 | [options.extras_require] 19 | selenium = 20 | selenium >= 3.141.0 21 | dev = 22 | isort == 5.7.0 23 | mypy == 1.10.0 24 | pylint == 3.1.0 25 | yapf == 0.30.0 26 | pytest >= 6.2.2, < 7 27 | 28 | [yapf] 29 | column_limit = 9999 30 | 31 | [isort] 32 | line_length = 9999 33 | default_section = THIRDPARTY 34 | known_first_party = onlinejudge 35 | 36 | [pylint] 37 | # run: $ pylint --rcfile=setup.cfg modules_or_packages 38 | disable = 39 | broad-except, 40 | consider-using-enumerate, 41 | consider-using-f-string, # TODO: remove this 42 | consider-using-with, # TODO: remove this 43 | duplicate-code, 44 | fixme, 45 | invalid-name, 46 | line-too-long, 47 | logging-not-lazy, # TODO: remove this. logging-not-lazy should not be disabled if possible. 48 | missing-class-docstring, 49 | missing-function-docstring, 50 | missing-module-docstring, 51 | no-else-break, 52 | no-else-raise, 53 | no-else-return, 54 | no-member, 55 | redefined-builtin, 56 | too-few-public-methods, 57 | too-many-arguments, 58 | too-many-boolean-expressions, 59 | too-many-branches, 60 | too-many-instance-attributes, 61 | too-many-lines, 62 | too-many-locals, 63 | too-many-public-methods, 64 | too-many-return-statements, 65 | too-many-statements, 66 | unnecessary-lambda-assignment, # TODO: remove this 67 | unspecified-encoding, # TODO: remove this 68 | unused-argument, 69 | unused-wildcard-import, 70 | wildcard-import, 71 | wrong-import-order, 72 | 73 | [mypy] 74 | 75 | [mypy-*.*] 76 | ignore_missing_imports = True 77 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from setuptools import find_packages, setup 3 | 4 | import onlinejudge_command.__about__ as version 5 | 6 | setup( 7 | name=version.__package_name__, 8 | version=version.__version__, 9 | author=version.__author__, 10 | author_email=version.__email__, 11 | url=version.__url__, 12 | license=version.__license__, 13 | description=version.__description__, 14 | python_requires='>=3.8', 15 | install_requires=[ 16 | 'online-judge-api-client >= 10.9.0, < 11', 17 | 'colorama >= 0.3, < 1', 18 | 'packaging >= 24', 19 | 'requests >= 2, < 3', 20 | ], 21 | packages=find_packages(exclude=('tests', 'docs')), 22 | entry_points={ 23 | 'console_scripts': [ 24 | 'oj = onlinejudge_command.main:main', 25 | ], 26 | }, 27 | ) 28 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/online-judge-tools/oj/d90b0a2bd87ae72cf89951b80c8fa4bd834afd0a/tests/__init__.py -------------------------------------------------------------------------------- /tests/command_download.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | import unittest 4 | 5 | import onlinejudge_command.subcommand.download as subcommand_download 6 | import tests.utils 7 | from onlinejudge_command.main import get_parser 8 | 9 | 10 | def get_files_from_json(samples): 11 | files = {} 12 | for i, sample in enumerate(samples): 13 | for ext in ('in', 'out'): 14 | if 'name' in sample: 15 | name = sample['name'] + '.' + ext 16 | else: 17 | name = 'sample-{}.{}'.format(i + 1, ext) 18 | files[name] = hashlib.md5(sample[ext + 'put'].encode()).hexdigest() 19 | return files 20 | 21 | 22 | def snippet_call_download(self, url, files, is_system=False, is_silent=False, type='files'): 23 | assert type in ('files', 'json') 24 | if type == 'json': 25 | files = get_files_from_json(files) 26 | 27 | with tests.utils.sandbox([]): 28 | args = ['download', url] 29 | if is_system: 30 | args += ['--system'] 31 | if is_silent: 32 | args += ['--silent'] 33 | tests.utils.run(args, check=True) 34 | result = {} 35 | if os.path.exists('test'): 36 | for name in os.listdir('test'): 37 | with open(os.path.join('test', name)) as fh: 38 | result[name] = hashlib.md5(fh.buffer.read()).hexdigest() 39 | self.assertEqual(files, result) 40 | 41 | 42 | def snippet_call_download_failure(self, url, is_system=False, is_silent=False): 43 | args = ["download", url] 44 | if is_system: 45 | args.append("--system") 46 | if is_silent: 47 | args.append("--silent") 48 | args = get_parser().parse_args(args=args) 49 | self.assertFalse(subcommand_download.run(args)) 50 | 51 | 52 | def snippet_call_download_twice(self, url1, url2, files, is_system=False, is_silent=False, type='files'): 53 | assert type in ('files', 'json') 54 | if type == 'json': 55 | files = get_files_from_json(files) 56 | 57 | with tests.utils.sandbox([]): 58 | args = ['download', url1] 59 | if is_system: 60 | args += ['--system'] 61 | if is_silent: 62 | args += ['--silent'] 63 | args = get_parser().parse_args(args=args) 64 | self.assertTrue(subcommand_download.run(args)) 65 | 66 | args = ['download', url2] 67 | if is_system: 68 | args += ['--system'] 69 | if is_silent: 70 | args += ['--silent'] 71 | args = get_parser().parse_args(args=args) 72 | # download from url2 should be aborted. 73 | self.assertFalse(subcommand_download.run(args)) 74 | 75 | # check download from url1 is not overwritten 76 | result = {} 77 | if os.path.exists('test'): 78 | for name in os.listdir('test'): 79 | with open(os.path.join('test', name)) as fh: 80 | result[name] = hashlib.md5(fh.buffer.read()).hexdigest() 81 | self.assertEqual(files, result) 82 | 83 | 84 | class DownloadTest(unittest.TestCase): 85 | """DownloadTest is a class to test `download` subcommand itself. Don't try to test sample parsers. 86 | """ 87 | def snippet_call_download(self, *args, **kwargs): 88 | tests.command_download.snippet_call_download(self, *args, **kwargs) 89 | 90 | def snippet_call_download_failure(self, *args, **kwargs): 91 | tests.command_download.snippet_call_download_failure(self, *args, **kwargs) 92 | 93 | def test_call_download_atcoder_abc114_c(self): 94 | self.snippet_call_download('https://atcoder.jp/contests/abc114/tasks/abc114_c', [ 95 | { 96 | "input": "575\n", 97 | "output": "4\n" 98 | }, 99 | { 100 | "input": "3600\n", 101 | "output": "13\n" 102 | }, 103 | { 104 | "input": "999999999\n", 105 | "output": "26484\n" 106 | }, 107 | ], type='json') 108 | 109 | def test_call_download_atcoder_abc003_4(self): 110 | self.snippet_call_download('https://atcoder.jp/contests/abc003/tasks/abc003_4', [ 111 | { 112 | "input": "3 2\n2 2\n2 2\n", 113 | "output": "12\n" 114 | }, 115 | { 116 | "input": "4 5\n3 1\n3 0\n", 117 | "output": "10\n" 118 | }, 119 | { 120 | "input": "23 18\n15 13\n100 95\n", 121 | "output": "364527243\n" 122 | }, 123 | { 124 | "input": "30 30\n24 22\n145 132\n", 125 | "output": "976668549\n" 126 | }, 127 | ], type='json') 128 | 129 | def test_call_download_invalid_url(self): 130 | self.snippet_call_download_failure('http://abc001.contest.atcoder.jp/tasks/abc001_100') 131 | 132 | def test_call_download_413(self): 133 | # This task is not supported. 134 | self.snippet_call_download_failure('https://chokudai001.contest.atcoder.jp/tasks/chokudai_001_a') 135 | 136 | 137 | class DownloadInvalidTest(unittest.TestCase): 138 | def snippet_call_download_failure(self, *args, **kwargs): 139 | tests.command_download.snippet_call_download_failure(self, *args, **kwargs) 140 | 141 | def snippet_call_download_twice(self, *args, **kwargs): 142 | tests.command_download.snippet_call_download_twice(self, *args, **kwargs) 143 | 144 | def test_call_download_invalid(self): 145 | self.snippet_call_download_failure('https://not_exist_contest.jp/tasks/001_a') 146 | 147 | def test_call_download_no_sample_found(self): 148 | self.snippet_call_download_failure('https://atcoder.jp/contests/tenka1-2013-quala/tasks/tenka1_2013_qualA_a') 149 | self.snippet_call_download_failure('https://open.kattis.com/problems/hello') 150 | 151 | def test_call_download_twice(self): 152 | self.snippet_call_download_twice('https://atcoder.jp/contests/abc114/tasks/abc114_c', 'https://atcoder.jp/contests/abc003/tasks/abc003_4', [ 153 | { 154 | "input": "575\n", 155 | "output": "4\n" 156 | }, 157 | { 158 | "input": "3600\n", 159 | "output": "13\n" 160 | }, 161 | { 162 | "input": "999999999\n", 163 | "output": "26484\n" 164 | }, 165 | ], type='json') 166 | -------------------------------------------------------------------------------- /tests/command_generate_input.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import tests.utils 5 | 6 | 7 | class GenerateInputTest(unittest.TestCase): 8 | def snippet_call_generate_input(self, args, input_files, expected_values, disallowed_files=None): 9 | with tests.utils.sandbox(input_files) as _: 10 | tests.utils.run(['generate-input'] + args, check=True) 11 | for expect in expected_values: 12 | self.assertTrue(os.path.exists(expect['path'])) 13 | if expect['data'] is not None: 14 | with open(expect['path']) as f: 15 | self.assertEqual(''.join(f.readlines()), expect['data']) 16 | if disallowed_files is not None: 17 | for file in disallowed_files: 18 | self.assertFalse(os.path.exists(file)) 19 | 20 | def test_call_generate_input_parallel(self): 21 | self.snippet_call_generate_input( 22 | args=[tests.utils.python_script('generate.py'), '-j', '4'], 23 | input_files=[ 24 | { 25 | 'path': 'generate.py', 26 | 'data': 'print("Hello, world!")\n' 27 | }, 28 | ], 29 | expected_values=[{ 30 | 'path': 'test/random-{}.in'.format(str(i).zfill(3)), 31 | 'data': 'Hello, world!\n' 32 | } for i in range(100)], 33 | disallowed_files=[ 34 | 'test/random-100.in', 35 | ], 36 | ) 37 | 38 | def test_call_generate_input_with_output(self): 39 | expected_values = [] 40 | for i in range(20): 41 | expected_values += [{ 42 | 'path': 'test/random-{}.in'.format(str(i).zfill(3)), 43 | 'data': None, 44 | }] 45 | expected_values += [{ 46 | 'path': 'test/random-{}.out'.format(str(i).zfill(3)), 47 | 'data': None, 48 | }] 49 | self.snippet_call_generate_input( 50 | args=[tests.utils.python_script('generate.py'), '20', '--command', tests.utils.python_script('main.py')], 51 | input_files=[ 52 | { 53 | 'path': 'generate.py', 54 | 'data': 'import random\nprint(random.randrange(100))\n' 55 | }, 56 | { 57 | 'path': 'main.py', 58 | 'data': 'print(int(input()) * 2)\n' 59 | }, 60 | ], 61 | expected_values=expected_values, 62 | disallowed_files=[ 63 | 'test/random-100.in', 64 | ], 65 | ) 66 | 67 | def test_call_generate_input_hack(self): 68 | self.snippet_call_generate_input(args=[tests.utils.python_script('generate.py'), '--command', tests.utils.python_script('ac.py'), '--hack', tests.utils.python_script('wa.py')], input_files=[ 69 | { 70 | 'path': 'generate.py', 71 | 'data': 'import random\nprint(random.randint(1, 10))\n' 72 | }, 73 | { 74 | 'path': 'ac.py', 75 | 'data': 'print(int(input()) // 10)\n' 76 | }, 77 | { 78 | 'path': 'wa.py', 79 | 'data': 'print(0)\n' 80 | }, 81 | ], expected_values=[ 82 | { 83 | 'path': 'test/hack-000.in', 84 | 'data': '10\n' 85 | }, 86 | { 87 | 'path': 'test/hack-000.out', 88 | 'data': '1\n' 89 | }, 90 | ]) 91 | 92 | def test_call_generate_input_hack_parallel(self): 93 | self.snippet_call_generate_input(args=[tests.utils.python_script('generate.py'), '--command', tests.utils.python_script('ac.py'), '--hack', tests.utils.python_script('wa.py'), '-j', '4'], input_files=[ 94 | { 95 | 'path': 'generate.py', 96 | 'data': 'import random\nprint(random.randint(1, 100))\n' 97 | }, 98 | { 99 | 'path': 'ac.py', 100 | 'data': 'print(int(input()) // 100)\n' 101 | }, 102 | { 103 | 'path': 'wa.py', 104 | 'data': 'print(0)\n' 105 | }, 106 | ], expected_values=[ 107 | { 108 | 'path': 'test/hack-000.in', 109 | 'data': '100\n' 110 | }, 111 | { 112 | 'path': 'test/hack-000.out', 113 | 'data': '1\n' 114 | }, 115 | ]) 116 | 117 | def test_call_generate_input_hack_with_re(self): 118 | self.snippet_call_generate_input(args=[tests.utils.python_script('generate.py'), '--hack-actual', tests.utils.python_script('err.py'), '-j', '4'], input_files=[ 119 | { 120 | 'path': 'generate.py', 121 | 'data': 'import random\nprint(random.randint(0, 10))\n' 122 | }, 123 | { 124 | 'path': 'err.py', 125 | 'data': 'n = int(input())\nassert n != 0\nprint(n)\n' 126 | }, 127 | ], expected_values=[ 128 | { 129 | 'path': 'test/hack-000.in', 130 | 'data': '0\n' 131 | }, 132 | ]) 133 | 134 | def test_call_generate_input_failure(self): 135 | self.snippet_call_generate_input( 136 | args=[tests.utils.python_script('generate.py'), '3'], 137 | input_files=[ 138 | { 139 | 'path': 'generate.py', 140 | 'data': 'raise RuntimeError()\n' 141 | }, 142 | ], 143 | expected_values=[], 144 | disallowed_files=['test/random-{}.in'.format(str(i).zfill(3)) for i in range(3)], 145 | ) 146 | -------------------------------------------------------------------------------- /tests/command_generate_output.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import tests.utils 5 | from tests.utils import cat 6 | 7 | 8 | class GenerateOutputTest(unittest.TestCase): 9 | def snippet_call_generate_output(self, args, input_files, expected_values, disallowed_files=None): 10 | with tests.utils.sandbox(input_files) as _: 11 | tests.utils.run(['generate-output'] + args, check=True) 12 | for expect in expected_values: 13 | with open(expect['path']) as f: 14 | self.assertEqual(''.join(f.readlines()), expect['data']) 15 | if disallowed_files is not None: 16 | for file in disallowed_files: 17 | self.assertFalse(os.path.exists(file)) 18 | 19 | def test_call_generate_output_simple(self): 20 | self.snippet_call_generate_output( 21 | args=['-c', cat()], 22 | input_files=[ 23 | { 24 | 'path': 'test/sample-1.in', 25 | 'data': 'foo\n' 26 | }, 27 | { 28 | 'path': 'test/sample-2.in', 29 | 'data': 'bar\n' 30 | }, 31 | ], 32 | expected_values=[ 33 | { 34 | 'path': 'test/sample-1.out', 35 | 'data': 'foo\n' 36 | }, 37 | { 38 | 'path': 'test/sample-2.out', 39 | 'data': 'bar\n' 40 | }, 41 | ], 42 | ) 43 | 44 | def test_call_generate_output_select(self): 45 | self.snippet_call_generate_output(args=['-c', cat(), 'test/sample-1.in', 'test/sample-2.in'], input_files=[ 46 | { 47 | 'path': 'test/sample-1.in', 48 | 'data': 'foo\n' 49 | }, 50 | { 51 | 'path': 'test/sample-2.in', 52 | 'data': 'bar\n' 53 | }, 54 | { 55 | 'path': 'test/sample-3.in', 56 | 'data': 'baz\n' 57 | }, 58 | ], expected_values=[ 59 | { 60 | 'path': 'test/sample-1.out', 61 | 'data': 'foo\n' 62 | }, 63 | { 64 | 'path': 'test/sample-2.out', 65 | 'data': 'bar\n' 66 | }, 67 | ], disallowed_files=['test/sample-3.out']) 68 | 69 | def test_call_generate_output_already_exists(self): 70 | # Since sample-1.out already exists, sample-1.out will not be updated. 71 | self.snippet_call_generate_output( 72 | args=['-c', cat()], 73 | input_files=[ 74 | { 75 | 'path': 'test/sample-1.in', 76 | 'data': 'foo\n' 77 | }, 78 | { 79 | 'path': 'test/sample-1.out', 80 | 'data': 'bar\n' 81 | }, 82 | ], 83 | expected_values=[ 84 | { 85 | 'path': 'test/sample-1.out', 86 | 'data': 'bar\n' 87 | }, 88 | ], 89 | ) 90 | 91 | def test_call_generate_output_dir(self): 92 | self.snippet_call_generate_output( 93 | args=['-c', cat(), '-d', 'p/o/../../p/o/y/o'], 94 | input_files=[ 95 | { 96 | 'path': 'p/o/y/o/sample-1.in', 97 | 'data': 'foo\n' 98 | }, 99 | { 100 | 'path': 'p/o/y/o/sample-2.in', 101 | 'data': 'bar\n' 102 | }, 103 | ], 104 | expected_values=[ 105 | { 106 | 'path': 'p/o/y/o/sample-1.out', 107 | 'data': 'foo\n' 108 | }, 109 | { 110 | 'path': 'p/o/y/o/sample-2.out', 111 | 'data': 'bar\n' 112 | }, 113 | ], 114 | ) 115 | 116 | def test_call_generate_output_format(self): 117 | self.snippet_call_generate_output( 118 | args=['-c', cat(), '-d', 'yuki/coder', '-f', 'test_%e/%s'], 119 | input_files=[ 120 | { 121 | 'path': 'yuki/coder/test_in/sample-1.txt', 122 | 'data': 'foo\n' 123 | }, 124 | { 125 | 'path': 'yuki/coder/test_in/sample-2.txt', 126 | 'data': 'bar\n' 127 | }, 128 | ], 129 | expected_values=[ 130 | { 131 | 'path': 'yuki/coder/test_out/sample-1.txt', 132 | 'data': 'foo\n' 133 | }, 134 | { 135 | 'path': 'yuki/coder/test_out/sample-2.txt', 136 | 'data': 'bar\n' 137 | }, 138 | ], 139 | ) 140 | 141 | def test_call_generate_output_format_select(self): 142 | self.snippet_call_generate_output( 143 | args=['-c', cat(), '-d', 'yuki/coder', '-f', 'test_%e/%s', 'yuki/coder/test_in/sample-2.txt', 'yuki/coder/test_in/sample-3.txt'], 144 | input_files=[ 145 | { 146 | 'path': 'yuki/coder/test_in/sample-2.txt', 147 | 'data': 'bar\n' 148 | }, 149 | { 150 | 'path': 'yuki/coder/test_in/sample-3.txt', 151 | 'data': 'baz\n' 152 | }, 153 | ], 154 | expected_values=[ 155 | { 156 | 'path': 'yuki/coder/test_out/sample-2.txt', 157 | 'data': 'bar\n' 158 | }, 159 | { 160 | 'path': 'yuki/coder/test_out/sample-3.txt', 161 | 'data': 'baz\n' 162 | }, 163 | ], 164 | ) 165 | 166 | def test_call_generate_output_format_hack(self): 167 | self.snippet_call_generate_output( 168 | args=['-c', cat(), '-d', 'a/b', '-f', 'c/test_%e/d/%s/e.case.txt'], 169 | input_files=[ 170 | { 171 | 'path': 'a/b/c/test_in/d/sample.case.1/e.case.txt', 172 | 'data': 'foo\n' 173 | }, 174 | { 175 | 'path': 'a/b/c/test_in/d/sample.case.2/e.case.txt', 176 | 'data': 'bar\n' 177 | }, 178 | ], 179 | expected_values=[ 180 | { 181 | 'path': 'a/b/c/test_out/d/sample.case.1/e.case.txt', 182 | 'data': 'foo\n' 183 | }, 184 | { 185 | 'path': 'a/b/c/test_out/d/sample.case.2/e.case.txt', 186 | 'data': 'bar\n' 187 | }, 188 | ], 189 | ) 190 | 191 | def test_call_generate_output_in_parallel(self): 192 | TOTAL = 100 193 | PARALLEL = 32 194 | input_files = [] 195 | expected_values = [] 196 | for i in range(TOTAL): 197 | name = 'sample-%03d' % i 198 | input_files += [{ 199 | 'path': 'test/{}.in'.format(name), 200 | 'data': str(i), 201 | }] 202 | if i > TOTAL * 0.95: 203 | input_files += [{ 204 | 'path': 'test/{}.out'.format(name), 205 | 'data': str(i), 206 | }] 207 | expected_values += [{ 208 | 'path': 'test/{}.in'.format(name), 209 | 'data': str(i), 210 | }] 211 | expected_values += [{ 212 | 'path': 'test/{}.out'.format(name), 213 | 'data': str(i), 214 | }] 215 | self.snippet_call_generate_output( 216 | args=['--jobs', str(PARALLEL), '-c', tests.utils.python_c("import sys, time; time.sleep(1); sys.stdout.write(sys.stdin.read())")], 217 | input_files=input_files, 218 | expected_values=expected_values, 219 | ) 220 | -------------------------------------------------------------------------------- /tests/command_login.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import tests.utils 4 | from onlinejudge.service.atcoder import AtCoderService 5 | from onlinejudge.service.codeforces import CodeforcesService 6 | from onlinejudge.service.hackerrank import HackerRankService 7 | from onlinejudge.service.toph import TophService 8 | from onlinejudge.service.yukicoder import YukicoderService 9 | 10 | 11 | # TODO: add tests for login without `--check` option. Copy credentials used in tests https://github.com/online-judge-tools/api-client and write tests. 12 | class LoginCheckTest(unittest.TestCase): 13 | def snippet_call_login_check_failure(self, url): 14 | with tests.utils.sandbox(files=[]) as _: 15 | path = 'cookie.jar' # use dummy cookie to check in an empty state 16 | proc = tests.utils.run(['--cookie', path, 'login', '--check', url]) 17 | self.assertEqual(proc.returncode, 1) 18 | 19 | def snippet_call_login_check_success(self, url): 20 | tests.utils.run(['login', '--check', url], check=True) 21 | 22 | def test_call_login_check_atcoder_failure(self): 23 | self.snippet_call_login_check_failure('https://atcoder.jp/') 24 | 25 | def test_call_login_check_codeforces_failure(self): 26 | self.snippet_call_login_check_failure('https://codeforces.com/') 27 | 28 | def test_call_login_check_hackerrank_failure(self): 29 | self.snippet_call_login_check_failure('https://www.hackerrank.com/') 30 | 31 | def test_call_login_check_toph_failure(self): 32 | self.snippet_call_login_check_failure('https://toph.co/') 33 | 34 | def test_call_login_check_yukicoder_failure(self): 35 | self.snippet_call_login_check_failure('https://yukicoder.me/') 36 | 37 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 38 | def test_call_login_check_atcoder_success(self): 39 | self.snippet_call_login_check_success('https://atcoder.jp/') 40 | 41 | @unittest.skipIf(not tests.utils.is_logged_in(CodeforcesService()), 'login is required') 42 | def test_call_login_check_codeforces_success(self): 43 | self.snippet_call_login_check_success('https://codeforces.com/') 44 | 45 | @unittest.skipIf(not tests.utils.is_logged_in(HackerRankService()), 'login is required') 46 | def test_call_login_check_hackerrank_success(self): 47 | self.snippet_call_login_check_success('https://www.hackerrank.com/') 48 | 49 | @unittest.skipIf(not tests.utils.is_logged_in(TophService()), 'login is required') 50 | def test_call_login_check_toph_success(self): 51 | self.snippet_call_login_check_success('https://toph.co/') 52 | 53 | @unittest.skipIf(not tests.utils.is_logged_in(YukicoderService()), 'login is required') 54 | def test_call_login_check_yukicoder_success(self): 55 | self.snippet_call_login_check_success('https://yukicoder.me/') 56 | -------------------------------------------------------------------------------- /tests/command_submit.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import time 4 | import unittest 5 | 6 | import requests.exceptions 7 | 8 | import tests.utils 9 | from onlinejudge.service.atcoder import AtCoderService 10 | from onlinejudge.service.codeforces import CodeforcesService 11 | from onlinejudge.service.hackerrank import HackerRankService 12 | from onlinejudge.service.toph import TophService 13 | from onlinejudge.service.yukicoder import YukicoderService 14 | 15 | 16 | class SubmitArgumentsTest(unittest.TestCase): 17 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 18 | def test_call_submit_atcoder_practice_2_with_history_simple(self): 19 | 20 | url = 'https://atcoder.jp/contests/practice/tasks/practice_2' 21 | files = [ 22 | { 23 | 'path': 'a.cpp', 24 | 'data': 'compile error' 25 | }, 26 | ] 27 | with tests.utils.sandbox(files): 28 | tests.utils.run(['dl', url], check=False) 29 | tests.utils.run(['s', '-y', '--no-open', url, 'a.cpp'], check=True) 30 | 31 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 32 | def test_call_submit_atcoder_practice_2_with_history_remove(self): 33 | 34 | url = 'https://atcoder.jp/contests/practice/tasks/practice_2' 35 | files = [ 36 | { 37 | 'path': 'a.cpp', 38 | 'data': 'compile error' 39 | }, 40 | ] 41 | with tests.utils.sandbox(files): 42 | tests.utils.run(['dl', 'https://atcoder.jp/contests/abc099/tasks/abc099_a'], check=True) 43 | shutil.rmtree('test/') 44 | tests.utils.run(['dl', url], check=False) 45 | tests.utils.run(['s', '-y', '--no-open', url, 'a.cpp'], check=True) 46 | 47 | @unittest.skipIf(os.name == 'nt', "shell script doesn't work on Windows") 48 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 49 | def test_call_submit_atcoder_practice_1_with_open(self): 50 | 51 | url = 'https://atcoder.jp/contests/practice/tasks/practice_1' 52 | files = [ 53 | { 54 | 'path': 'a.pl', 55 | 'data': 'print<>+(<>=~$",$`+$\'),$",<>' 56 | }, 57 | { 58 | 'path': 'browse.sh', 59 | 'data': '#!/bin/sh\necho "$@" > url.txt\n', 60 | 'executable': True, 61 | }, 62 | ] 63 | with tests.utils.sandbox(files) as tempdir: 64 | env = dict(os.environ) 65 | env['BROWSER'] = os.path.join(tempdir, 'browse.sh') 66 | 67 | tests.utils.run(['s', '-y', '--open', url, 'a.pl'], env=env, check=True) 68 | with open('url.txt') as fh: 69 | url = fh.read().strip() 70 | self.assertTrue(url.startswith('https://atcoder.jp/contests/practice/submissions/')) 71 | 72 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 73 | def test_call_submit_atcoder_invalid_url(self): 74 | 75 | url = 'https://atcoder.jp/contests/practice/tasks/practice_111' 76 | code = '''\ 77 | #include 78 | using namespace std; 79 | int main() { 80 | int a; cin >> a; 81 | int b, c; cin >> b >> c; 82 | string s; cin >> s; 83 | cout << a + b + c << ' ' << s << endl; 84 | return 0; 85 | } 86 | ''' 87 | files = [ 88 | { 89 | 'path': 'main.cpp', 90 | 'data': code 91 | }, 92 | ] 93 | 94 | with tests.utils.sandbox(files): 95 | with self.assertRaises(requests.exceptions.HTTPError): 96 | tests.utils.run(["submit", '-y', '--no-open', url, 'main.cpp'], check=True) 97 | 98 | 99 | class SubmitAtCoderTest(unittest.TestCase): 100 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 101 | def test_call_submit_practice_1(self): 102 | 103 | url = 'https://atcoder.jp/contests/practice/tasks/practice_1' 104 | code = '''\ 105 | #include 106 | using namespace std; 107 | int main() { 108 | int a; cin >> a; 109 | int b, c; cin >> b >> c; 110 | string s; cin >> s; 111 | cout << a + b + c << ' ' << s << endl; 112 | return 0; 113 | } 114 | ''' 115 | files = [ 116 | { 117 | 'path': 'main.cpp', 118 | 'data': code 119 | }, 120 | ] 121 | 122 | with tests.utils.sandbox(files): 123 | tests.utils.run(['submit', '-y', '--no-open', url, 'main.cpp'], check=True) 124 | 125 | @unittest.skipIf(not tests.utils.is_logged_in(AtCoderService()), 'login is required') 126 | def test_call_submit_practice_2(self): 127 | 128 | url = 'https://atcoder.jp/contests/practice/tasks/practice_2' 129 | code = '''\ 130 | # Python Version: 3.x 131 | import string 132 | import sys 133 | def quick_sort(s): 134 | if len(s) <= 1: 135 | return s 136 | pivot = s[0] 137 | lo, hi = '', '' 138 | for c in s[1 :]: 139 | print('?', pivot, c) 140 | sys.stdout.flush() 141 | if input() == '<': 142 | hi += c 143 | else: 144 | lo += c 145 | return quick_sort(lo) + pivot + quick_sort(hi) 146 | n, q = map(int, input().split()) 147 | assert n == 26 and q == 1000 148 | print('!', ''.join(quick_sort(string.ascii_uppercase[: n]))) 149 | ''' 150 | files = [ 151 | { 152 | 'path': 'main.py', 153 | 'data': code 154 | }, 155 | ] 156 | 157 | with tests.utils.sandbox(files): 158 | tests.utils.run(['submit', '-y', '--no-open', url, 'main.py'], check=True) 159 | 160 | 161 | class SubmitCodeforcesTest(unittest.TestCase): 162 | @unittest.skipIf(not tests.utils.is_logged_in(CodeforcesService()), 'login is required') 163 | def test_call_submit_beta_1_a(self): 164 | 165 | url = 'https://codeforces.com/contest/1/problem/A' 166 | code = '\n'.join([ 167 | '#!/usr/bin/env python3', 168 | 'h, w, a = map(int, input().split())', 169 | 'print(((h + a - 1) // a) * ((w + a - 1) // a))', 170 | '# ' + str(int(time.time())), # to bypass the "You have submitted exactly the same code before" error 171 | ]) + '\n' 172 | files = [ 173 | { 174 | 'path': 'a.py', 175 | 'data': code 176 | }, 177 | ] 178 | with tests.utils.sandbox(files): 179 | tests.utils.run(['s', '-y', '--no-open', url, 'a.py'], check=True) 180 | 181 | @unittest.skipIf(not tests.utils.is_logged_in(CodeforcesService()), 'login is required') 182 | def test_call_submit_beta_3_b(self): 183 | 184 | url = 'https://codeforces.com/contest/3/problem/B' 185 | code = r'''#include 186 | #define REP(i, n) for (int i = 0; (i) < (int)(n); ++ (i)) 187 | #define ALL(x) begin(x), end(x) 188 | using namespace std; 189 | 190 | int main() { 191 | // input 192 | int n, v; cin >> n >> v; 193 | vector > one; 194 | vector > two; 195 | REP (i, n) { 196 | int t, p; cin >> t >> p; 197 | if (t == 1) { 198 | one.emplace_back(p, i); 199 | } else { 200 | two.emplace_back(p, i, -1); 201 | } 202 | } 203 | 204 | // solve 205 | int sum_p = 0; 206 | vector used; 207 | sort(ALL(one)); 208 | if (v % 2 == 1 and not one.empty()) { 209 | int p_i, i; tie(p_i, i) = one.back(); 210 | one.pop_back(); 211 | sum_p += p_i; 212 | used.push_back(i); 213 | v -= 1; 214 | } 215 | while (one.size() >= 2) { 216 | int p_i, i; tie(p_i, i) = one.back(); 217 | one.pop_back(); 218 | int p_j, j; tie(p_j, j) = one.back(); 219 | one.pop_back(); 220 | two.emplace_back(p_i + p_j, i, j); 221 | } 222 | if (one.size() == 1) { 223 | int p_i, i; tie(p_i, i) = one.back(); 224 | two.emplace_back(p_i, i, -1); 225 | one.pop_back(); 226 | } 227 | sort(ALL(two)); 228 | while (v >= 2 and not two.empty()) { 229 | int p, i, j; tie(p, i, j) = two.back(); 230 | two.pop_back(); 231 | sum_p += p; 232 | used.push_back(i); 233 | if (j != -1) used.push_back(j); 234 | v -= 2; 235 | } 236 | 237 | // output 238 | cout << sum_p << endl; 239 | REP (i, used.size()) { 240 | cout << used[i] + 1 << (i + 1 < used.size() ? ' ' : '\n'); 241 | } 242 | return 0; 243 | } 244 | ''' + '// ' + str(int(time.time())) + '\n' # to bypass the "You have submitted exactly the same code before" error 245 | files = [ 246 | { 247 | 'path': 'main.cpp', 248 | 'data': code 249 | }, 250 | ] 251 | with tests.utils.sandbox(files): 252 | tests.utils.run(['s', '-y', '--no-open', url, 'main.cpp'], check=True) 253 | 254 | 255 | class SubmitYukicoderTest(unittest.TestCase): 256 | @unittest.skipIf(not tests.utils.is_logged_in(YukicoderService()), 'login is required') 257 | def test_call_submit_9000(self): 258 | 259 | url = 'https://yukicoder.me/problems/no/9000' 260 | code = '\n'.join([ 261 | '#!/usr/bin/env python2', 262 | 'print "Hello World!"', 263 | ]) + '\n' 264 | files = [ 265 | { 266 | 'path': 'a.py', 267 | 'data': code 268 | }, 269 | ] 270 | with tests.utils.sandbox(files): 271 | tests.utils.run(['s', '-y', '--no-open', url, 'a.py'], check=True) 272 | 273 | @unittest.skipIf(not tests.utils.is_logged_in(YukicoderService()), 'login is required') 274 | def test_call_submit_beta_3_b(self): 275 | 276 | url = 'https://yukicoder.me/problems/527' 277 | code = r'''#include 278 | using namespace std; 279 | int main() { 280 | int a, b; cin >> a >> b; 281 | string s; cin >> s; 282 | cout << a + b << ' ' << s << endl; 283 | return 0; 284 | } 285 | ''' 286 | files = [ 287 | { 288 | 'path': 'main.cpp', 289 | 'data': code 290 | }, 291 | ] 292 | with tests.utils.sandbox(files): 293 | tests.utils.run(['s', '-y', '--no-open', url, 'main.cpp'], check=True) 294 | 295 | 296 | class SubmitHackerRankTest(unittest.TestCase): 297 | @unittest.skipIf(not tests.utils.is_logged_in(HackerRankService()), 'login is required') 298 | def test_call_submit_worldcodesprint_mars_exploration(self): 299 | url = 'https://www.hackerrank.com/contests/worldcodesprint/challenges/mars-exploration' 300 | code = '''#!/usr/bin/env python3 301 | s = input() 302 | ans = 0 303 | for i in range(len(s) // 3): 304 | if s[3 * i] != 'S': 305 | ans += 1 306 | if s[3 * i + 1] != 'O': 307 | ans += 1 308 | if s[3 * i + 2] != 'S': 309 | ans += 1 310 | print(ans) 311 | ''' 312 | files = [ 313 | { 314 | 'path': 'a.py', 315 | 'data': code 316 | }, 317 | ] 318 | with tests.utils.sandbox(files): 319 | tests.utils.run(['s', '-y', '--no-open', url, 'a.py'], check=True) 320 | 321 | 322 | class SubmitTophTest(unittest.TestCase): 323 | @unittest.skipIf(not tests.utils.is_logged_in(TophService()), 'login is required') 324 | def test_call_submit_copycat(self): 325 | url = 'https://toph.co/p/copycat' 326 | code = '''#!/usr/bin/env python3 327 | s = input() 328 | print(s) 329 | ''' 330 | files = [ 331 | { 332 | 'path': 'a.py', 333 | 'data': code 334 | }, 335 | ] 336 | with tests.utils.sandbox(files): 337 | tests.utils.run(['s', '-l', '58482c1804469e2585024324', '-y', '--no-open', url, 'a.py'], check=True) 338 | 339 | @unittest.skipIf(not tests.utils.is_logged_in(TophService()), 'login is required') 340 | def test_call_submit_divisors(self): 341 | url = 'https://toph.co/p/divisors' 342 | code = '''#include 343 | using namespace std; 344 | int main() 345 | { 346 | int a; 347 | cin>>a; 348 | for (int i=1;i<=a;i++) 349 | { 350 | if (a%i==0) 351 | { 352 | cout <=') 21 | sys.stdout.flush() 22 | assert False 23 | """ 24 | 25 | accepted_code = """\ 26 | #!/usr/bin/env python3 27 | import sys 28 | l = 1 29 | r = 10 ** 6 + 1 30 | while r - l >= 2: 31 | m = (l + r) // 2 32 | print(m) 33 | sys.stdout.flush() 34 | if input() == '<': 35 | r = m 36 | else: 37 | l = m 38 | print('!', l) 39 | """ 40 | 41 | wrong_answer_code = """\ 42 | #!/usr/bin/env python3 43 | import sys 44 | x = 21 45 | print(x) 46 | sys.stdout.flush() 47 | if input() == '>=': 48 | x -= 1 49 | print('!', x) 50 | """ 51 | 52 | 53 | class TestReactiveTest(unittest.TestCase): 54 | def test_simple_success(self) -> None: 55 | files = [ 56 | { 57 | 'path': 'main.py', 58 | 'data': accepted_code, 59 | }, 60 | { 61 | 'path': 'judge.py', 62 | 'data': judge_code, 63 | }, 64 | ] 65 | with tests.utils.sandbox(files): 66 | tests.utils.run(['t/r', '-c', tests.utils.python_script('main.py'), tests.utils.python_script('judge.py')], check=True) 67 | 68 | def test_simple_failure(self) -> None: 69 | files = [ 70 | { 71 | 'path': 'main.py', 72 | 'data': wrong_answer_code, 73 | }, 74 | { 75 | 'path': 'judge.py', 76 | 'data': judge_code, 77 | }, 78 | ] 79 | with tests.utils.sandbox(files): 80 | proc = tests.utils.run(['t/r', '-c', tests.utils.python_script('main.py'), tests.utils.python_script('judge.py')]) 81 | self.assertNotEqual(proc.returncode, 0) 82 | -------------------------------------------------------------------------------- /tests/command_version.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import tests.utils 4 | 5 | 6 | class PrintVersionTest(unittest.TestCase): 7 | def test_version(self): 8 | pattern = rb'^online-judge-tools \d+\.\d+\.\d+ \(\+ online-judge-api-client \d+\.\d+\.\d+\)$' 9 | result = tests.utils.run_in_sandbox(args=['--version'], files=[]) 10 | self.assertRegex(result['proc'].stdout.strip(), pattern) 11 | -------------------------------------------------------------------------------- /tests/format_utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from onlinejudge_command.format_utils import * 4 | 5 | 6 | class PercentFormatTest(unittest.TestCase): 7 | def test_percentformat(self): 8 | self.assertEqual(percentformat("foo %a%a bar %b", {"a": "AA", "b": "12345"}), 'foo AAAA bar 12345') 9 | self.assertEqual(percentformat("foo %%a bar %%%a %b", {"a": "%a%b", "b": "12345"}), 'foo %a bar %%a%b 12345') 10 | self.assertRaises(KeyError, lambda: percentformat("%z", {})) 11 | 12 | def test_percentparse(self): 13 | self.assertEqual(percentparse("foo AAAA bar 12345", "foo %a%a bar %b", {"a": "AA", "b": "12345"}), {'a': 'AA', 'b': '12345'}) 14 | self.assertEqual(percentparse("123456789", "%x%y%z", {"x": r"\d+", "y": r"\d", "z": r"(\d\d\d)+"}), {'x': '12345', 'y': '6', 'z': '789'}) 15 | self.assertRaises(KeyError, lambda: percentparse("foo", "%a", {})) 16 | -------------------------------------------------------------------------------- /tests/main.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from onlinejudge_command import main 4 | 5 | 6 | class RequestExceptionTest(unittest.TestCase): 7 | def test_invalid_url(self): 8 | with self.assertRaises(SystemExit) as e: 9 | main.main(["d", "http://invalid_contest"]) 10 | self.assertEqual(e.exception.code, 1) 11 | -------------------------------------------------------------------------------- /tests/output_comparators.py: -------------------------------------------------------------------------------- 1 | """This module has unit tests for onlinejudge_command.output_comparators module. 2 | """ 3 | 4 | import unittest 5 | 6 | from onlinejudge_command.output_comparators import * 7 | 8 | 9 | class ExactComparatorTest(unittest.TestCase): 10 | def test_same(self) -> None: 11 | x = b'Hello, world!' 12 | y = b'Hello, world!' 13 | result = True 14 | 15 | compare = ExactComparator() 16 | self.assertEqual(compare(x, y), result) 17 | 18 | def test_different(self) -> None: 19 | x = b'Hello, world!' 20 | y = b'hello world' 21 | result = False 22 | 23 | compare = ExactComparator() 24 | self.assertEqual(compare(x, y), result) 25 | 26 | 27 | class FloatingPointNumberComparatorTest(unittest.TestCase): 28 | def test_exact_same(self) -> None: 29 | rel_tol = 0 30 | abs_tol = 0 31 | x = b'1.23' 32 | y = b'1.23' 33 | result = True 34 | 35 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 36 | self.assertEqual(compare(x, y), result) 37 | 38 | def test_relative_close(self) -> None: 39 | rel_tol = 0.00001 40 | abs_tol = 0 41 | x = b'1000000000' 42 | y = b'1000000007' 43 | result = True 44 | 45 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 46 | self.assertEqual(compare(x, y), result) 47 | 48 | def test_relative_not_close(self) -> None: 49 | rel_tol = 0.00001 50 | abs_tol = 0 51 | x = b'333333336' 52 | y = b'1000000007' 53 | result = False 54 | 55 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 56 | self.assertEqual(compare(x, y), result) 57 | 58 | def test_absolute_close(self) -> None: 59 | rel_tol = 0 60 | abs_tol = 0.001 61 | x = b'3.142' 62 | y = b'3.141592' 63 | result = True 64 | 65 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 66 | self.assertEqual(compare(x, y), result) 67 | 68 | def test_absolute_not_close(self) -> None: 69 | rel_tol = 0 70 | abs_tol = 0.001 71 | x = b'3.1415926535' 72 | y = b'3.0' 73 | result = False 74 | 75 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 76 | self.assertEqual(compare(x, y), result) 77 | 78 | def test_non_float_same(self) -> None: 79 | rel_tol = 0 80 | abs_tol = 0 81 | x = b'foo' 82 | y = b'foo' 83 | result = True 84 | 85 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 86 | self.assertEqual(compare(x, y), result) 87 | 88 | def test_non_float_diff(self) -> None: 89 | rel_tol = 0 90 | abs_tol = 0 91 | x = b'foo' 92 | y = b'bar' 93 | result = False 94 | 95 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 96 | self.assertEqual(compare(x, y), result) 97 | 98 | def test_float_and_non_float(self) -> None: 99 | rel_tol = 0 100 | abs_tol = 0 101 | x = b'3.14' 102 | y = b'pi' 103 | result = False 104 | 105 | compare = FloatingPointNumberComparator(rel_tol=rel_tol, abs_tol=abs_tol) 106 | self.assertEqual(compare(x, y), result) 107 | 108 | 109 | class SplitComparatorTest(unittest.TestCase): 110 | def test_same(self) -> None: 111 | x = b' a \n b \r\n\r\n\r\n c \t\t d efg\nxyz \n\n\n ' 112 | y = b'a b c d efg\nxyz\n' 113 | result = True 114 | 115 | compare = SplitComparator(ExactComparator()) 116 | self.assertEqual(compare(x, y), result) 117 | 118 | def test_diff(self) -> None: 119 | x = b' a \n b \r\n\r\n\r\n c \t\t d efg\nxyz \n\n\n ' 120 | y = b'a b changed deleted efg\nxyz\n' 121 | result = False 122 | 123 | compare = SplitComparator(ExactComparator()) 124 | self.assertEqual(compare(x, y), result) 125 | 126 | def test_only_spaces(self) -> None: 127 | x = b' ' 128 | y = b'' 129 | result = True 130 | 131 | compare = SplitComparator(ExactComparator()) 132 | self.assertEqual(compare(x, y), result) 133 | 134 | 135 | class SplitLinesComparatorTest(unittest.TestCase): 136 | def test_same(self) -> None: 137 | line_comparator = SplitComparator(ExactComparator()) 138 | x = b'a b c d e\nf\ng\n xyz \n' 139 | y = b'a b c d e\nf\ng\nxyz\n' 140 | result = True 141 | 142 | compare = SplitLinesComparator(line_comparator) 143 | self.assertEqual(compare(x, y), result) 144 | 145 | def test_diff(self) -> None: 146 | line_comparator = SplitComparator(ExactComparator()) 147 | x = b'a b\nc d e\nf\ng\nxyz\n' 148 | y = b'a b c d e\nf\ng\nxyz\n' 149 | result = False 150 | 151 | compare = SplitLinesComparator(line_comparator) 152 | self.assertEqual(compare(x, y), result) 153 | 154 | def test_trailing_spaces(self) -> None: 155 | line_comparator = SplitComparator(ExactComparator()) 156 | x = b'foo\n ' 157 | y = b'foo\n' 158 | result = False 159 | 160 | compare = SplitLinesComparator(line_comparator) 161 | self.assertEqual(compare(x, y), result) 162 | 163 | def test_no_trailing_newline(self) -> None: 164 | line_comparator = ExactComparator() 165 | x = b'foo' 166 | y = b'foo\n' 167 | result = True 168 | 169 | compare = SplitLinesComparator(line_comparator) 170 | self.assertEqual(compare(x, y), result) 171 | 172 | def test_many_trailing_newlines(self) -> None: 173 | line_comparator = ExactComparator() 174 | x = b'foo\n\n\n' 175 | y = b'foo\n' 176 | result = True 177 | 178 | compare = SplitLinesComparator(line_comparator) 179 | self.assertEqual(compare(x, y), result) 180 | 181 | 182 | class CRLFInsensitiveComparatorTest(unittest.TestCase): 183 | def test_same(self) -> None: 184 | file_comparator = ExactComparator() 185 | x = b'foo\r\nbar\r\nbaz\n' 186 | y = b'foo\nbar\r\nbaz\r\n' 187 | result = True 188 | 189 | compare = CRLFInsensitiveComparator(file_comparator) 190 | self.assertEqual(compare(x, y), result) 191 | 192 | def test_diff(self) -> None: 193 | file_comparator = ExactComparator() 194 | x = b'foo\r\n' 195 | y = b'foo\n\n' 196 | result = False 197 | 198 | compare = CRLFInsensitiveComparator(file_comparator) 199 | self.assertEqual(compare(x, y), result) 200 | -------------------------------------------------------------------------------- /tests/pretty_printers.py: -------------------------------------------------------------------------------- 1 | """This module has unit tests for onlinejudge_command.pretty_printers module. 2 | """ 3 | 4 | import textwrap 5 | import unittest 6 | from typing import * 7 | 8 | from onlinejudge_command.output_comparators import CompareMode 9 | from onlinejudge_command.pretty_printers import _LineDiffOp, _make_diff_between_file_and_file, _PrettyToken, _PrettyTokenType, _render_tokens, _tokenize_file_content_without_snipping, _tokenize_large_file_content, _tokenize_line, _tokenize_pretty_diff 10 | 11 | 12 | class TokenizeLineTest(unittest.TestCase): 13 | def test_simple(self) -> None: 14 | line = 'hello\n' 15 | expected = [ 16 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 17 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 18 | ] 19 | 20 | actual = _tokenize_line(line=line) 21 | self.assertEqual(actual, expected) 22 | 23 | def test_crlf(self) -> None: 24 | line = 'hello\r\n' 25 | expected = [ 26 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 27 | _PrettyToken(_PrettyTokenType.NEWLINE, '\r\n'), 28 | ] 29 | 30 | actual = _tokenize_line(line=line) 31 | self.assertEqual(actual, expected) 32 | 33 | def test_with_whitespace(self) -> None: 34 | line = 'hello \t\tworld\n' 35 | expected = [ 36 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 37 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' \t\t'), 38 | _PrettyToken(_PrettyTokenType.BODY, 'world'), 39 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 40 | ] 41 | 42 | actual = _tokenize_line(line=line) 43 | self.assertEqual(actual, expected) 44 | 45 | def test_without_newline(self) -> None: 46 | line = 'hello' 47 | expected = [ 48 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 49 | ] 50 | 51 | actual = _tokenize_line(line=line) 52 | self.assertEqual(actual, expected) 53 | 54 | def test_trailing_whitespace(self) -> None: 55 | line = 'hello \n' 56 | expected = [ 57 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 58 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 59 | _PrettyToken(_PrettyTokenType.HINT, '(trailing whitespace)'), 60 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 61 | ] 62 | 63 | actual = _tokenize_line(line=line) 64 | self.assertEqual(actual, expected) 65 | 66 | def test_only_newline(self) -> None: 67 | line = '\n' 68 | expected = [ 69 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 70 | ] 71 | 72 | actual = _tokenize_line(line=line) 73 | self.assertEqual(actual, expected) 74 | 75 | def test_empty_string(self) -> None: 76 | line = '' 77 | expected: List[_PrettyToken] = [] 78 | 79 | actual = _tokenize_line(line=line) 80 | self.assertEqual(actual, expected) 81 | 82 | 83 | class TokenizeLargeFileContentTest(unittest.TestCase): 84 | def test_small(self) -> None: 85 | content = b'hello\nworld\n' 86 | limit = 40 87 | head = 20 88 | tail = 10 89 | char_in_line = 40 90 | expected = [ 91 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 92 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 93 | _PrettyToken(_PrettyTokenType.BODY, 'world'), 94 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 95 | ] 96 | 97 | actual = _tokenize_large_file_content(content=content, limit=limit, head=head, tail=tail, char_in_line=char_in_line) 98 | self.assertEqual(actual, expected) 99 | 100 | def test_too_many_chars(self) -> None: 101 | content_chars = 100000 102 | content = b'hello' * (content_chars // len(b'hello')) 103 | limit = 40 104 | head = 20 105 | tail = 10 106 | char_in_line = 40 107 | expected = [ 108 | _PrettyToken(_PrettyTokenType.BODY, 'hello' * (head * char_in_line // len('hello'))), 109 | _PrettyToken(_PrettyTokenType.HINT, '... ({} chars) ...'.format(content_chars - head * char_in_line - tail * char_in_line)), 110 | _PrettyToken(_PrettyTokenType.BODY, 'hello' * (tail * char_in_line // len('hello'))), 111 | _PrettyToken(_PrettyTokenType.HINT, '(no trailing newline)'), 112 | ] 113 | 114 | actual = _tokenize_large_file_content(content=content, limit=limit, head=head, tail=tail, char_in_line=char_in_line) 115 | self.assertEqual(actual, expected) 116 | 117 | def test_too_many_lines(self) -> None: 118 | content_lines = 100 119 | content = b'hello\n' * content_lines 120 | limit = 40 121 | head = 20 122 | tail = 10 123 | char_in_line = 40 124 | expected = [] 125 | for _ in range(head): 126 | expected += [ 127 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 128 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 129 | ] 130 | expected += [ 131 | _PrettyToken(_PrettyTokenType.HINT, '... ({} lines) ...\n'.format(content_lines - head - tail)), 132 | ] 133 | for _ in range(tail): 134 | expected += [ 135 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 136 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 137 | ] 138 | 139 | actual = _tokenize_large_file_content(content=content, limit=limit, head=head, tail=tail, char_in_line=char_in_line) 140 | self.assertEqual(actual, expected) 141 | 142 | def test_empty(self) -> None: 143 | content = b'' 144 | limit = 40 145 | head = 20 146 | tail = 10 147 | char_in_line = 40 148 | expected = [ 149 | _PrettyToken(_PrettyTokenType.HINT, '(empty)'), 150 | ] 151 | 152 | actual = _tokenize_large_file_content(content=content, limit=limit, head=head, tail=tail, char_in_line=char_in_line) 153 | self.assertEqual(actual, expected) 154 | 155 | def test_only_newlines(self) -> None: 156 | content = b'\r\n\n' 157 | limit = 40 158 | head = 20 159 | tail = 10 160 | char_in_line = 40 161 | expected = [ 162 | _PrettyToken(_PrettyTokenType.NEWLINE, '\r\n'), 163 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 164 | _PrettyToken(_PrettyTokenType.HINT, '(only newline)'), 165 | ] 166 | 167 | actual = _tokenize_large_file_content(content=content, limit=limit, head=head, tail=tail, char_in_line=char_in_line) 168 | self.assertEqual(actual, expected) 169 | 170 | 171 | class TokenizeFileContentWithoutSnippingTest(unittest.TestCase): 172 | def test_small(self) -> None: 173 | content = b'hello\nworld\n' 174 | expected = [ 175 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 176 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 177 | _PrettyToken(_PrettyTokenType.BODY, 'world'), 178 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 179 | ] 180 | 181 | actual = _tokenize_file_content_without_snipping(content=content) 182 | self.assertEqual(actual, expected) 183 | 184 | def test_empty(self) -> None: 185 | content = b'' 186 | expected = [ 187 | _PrettyToken(_PrettyTokenType.HINT, '(empty)'), 188 | ] 189 | 190 | actual = _tokenize_file_content_without_snipping(content=content) 191 | self.assertEqual(actual, expected) 192 | 193 | def test_only_newlines(self) -> None: 194 | content = b'\r\n\n' 195 | expected = [ 196 | _PrettyToken(_PrettyTokenType.NEWLINE, '\r\n'), 197 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 198 | _PrettyToken(_PrettyTokenType.HINT, '(only newline)'), 199 | ] 200 | 201 | actual = _tokenize_file_content_without_snipping(content=content) 202 | self.assertEqual(actual, expected) 203 | 204 | 205 | class RenderTokensTest(unittest.TestCase): 206 | def test_simple(self) -> None: 207 | tokens = [ 208 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 209 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 210 | _PrettyToken(_PrettyTokenType.BODY, 'world'), 211 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 212 | ] 213 | expected = ''.join([ 214 | 'hello', 215 | '\n', 216 | 'world', 217 | '\n', 218 | ]) 219 | 220 | font_dim = lambda s: '' + s + '' 221 | font_bold = lambda s: '' + s + '' 222 | actual = _render_tokens(tokens=tokens, font_bold=font_bold, font_dim=font_dim) 223 | self.assertEqual(actual, expected) 224 | 225 | def test_complicated(self) -> None: 226 | tokens = [ 227 | _PrettyToken(_PrettyTokenType.BODY, 'hello world'), 228 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' \t'), 229 | _PrettyToken(_PrettyTokenType.HINT, 'this is a hint message'), 230 | _PrettyToken(_PrettyTokenType.NEWLINE, '\r\n'), 231 | ] 232 | expected = ''.join([ 233 | 'hello world', 234 | '_\\t', 235 | 'this is a hint message', 236 | '\\r\n', 237 | ]) 238 | 239 | font_dim = lambda s: '' + s + '' 240 | font_bold = lambda s: '' + s + '' 241 | actual = _render_tokens(tokens=tokens, font_bold=font_bold, font_dim=font_dim) 242 | self.assertEqual(actual, expected) 243 | 244 | 245 | class MakeDiffBetweenFileAndFileTest(unittest.TestCase): 246 | def test_word_by_word(self) -> None: 247 | a = ''.join([ 248 | '1 2 3\n', 249 | '4 -1\n', 250 | '6\n', 251 | ]) 252 | b = ''.join([ 253 | '1 2 3\n', 254 | '4 5\n', 255 | '6\n', 256 | ]) 257 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 258 | expected = [ 259 | _LineDiffOp(lineno=1, left=[ 260 | _PrettyToken(_PrettyTokenType.BODY, '4'), 261 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 262 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_LEFT, '-1'), 263 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 264 | ], right=[ 265 | _PrettyToken(_PrettyTokenType.BODY, '4'), 266 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 267 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_RIGHT, '5'), 268 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 269 | ]), 270 | ] 271 | 272 | actual = _make_diff_between_file_and_file(a, b, compare_mode=compare_mode) 273 | self.assertEqual(actual, expected) 274 | 275 | def test_line_difflib(self) -> None: 276 | a = ''.join([ 277 | '1 3\n', 278 | 'wow\n', 279 | 'he llo word\n', 280 | ]) 281 | b = ''.join([ 282 | '1 2 3\n', 283 | 'wow\n', 284 | 'hello world\n', 285 | ]) 286 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 287 | expected = [ 288 | _LineDiffOp(lineno=0, left=[ 289 | _PrettyToken(_PrettyTokenType.BODY, '1'), 290 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 291 | _PrettyToken(_PrettyTokenType.BODY, '3'), 292 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 293 | ], right=[ 294 | _PrettyToken(_PrettyTokenType.BODY, '1'), 295 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 296 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_RIGHT, '2'), 297 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 298 | _PrettyToken(_PrettyTokenType.BODY, '3'), 299 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 300 | ]), 301 | _LineDiffOp(lineno=2, left=[ 302 | _PrettyToken(_PrettyTokenType.BODY, 'he'), 303 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 304 | _PrettyToken(_PrettyTokenType.BODY, 'llo'), 305 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 306 | _PrettyToken(_PrettyTokenType.BODY, 'word'), 307 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 308 | ], right=[ 309 | _PrettyToken(_PrettyTokenType.BODY, 'hello'), 310 | _PrettyToken(_PrettyTokenType.WHITESPACE, ' '), 311 | _PrettyToken(_PrettyTokenType.BODY, 'wor'), 312 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_RIGHT, 'l'), 313 | _PrettyToken(_PrettyTokenType.BODY, 'd'), 314 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 315 | ]), 316 | ] 317 | 318 | actual = _make_diff_between_file_and_file(a, b, compare_mode=compare_mode) 319 | self.assertEqual(actual, expected) 320 | 321 | def test_file_difflib(self) -> None: 322 | a = ''.join([ 323 | 'foo\n', 324 | 'baz\n', 325 | 'hello\n', 326 | 'world\n', 327 | 'hey\n', 328 | 'wow\n', 329 | ]) 330 | b = ''.join([ 331 | 'foo\n', 332 | 'bar\n', 333 | 'baz\n', 334 | 'hello\n', 335 | 'world\n', 336 | 'wow\n', 337 | 'wow\n', 338 | ]) 339 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 340 | expected = [ 341 | _LineDiffOp(lineno=1, left=None, right=[ 342 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_RIGHT, 'bar'), 343 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 344 | ]), 345 | _LineDiffOp(lineno=4, left=[ 346 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_LEFT, 'hey'), 347 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 348 | ], right=None), 349 | _LineDiffOp(lineno=6, left=None, right=[ 350 | _PrettyToken(_PrettyTokenType.BODY_HIGHLIGHT_RIGHT, 'wow'), 351 | _PrettyToken(_PrettyTokenType.NEWLINE, '\n'), 352 | ]), 353 | ] 354 | 355 | actual = _make_diff_between_file_and_file(a, b, compare_mode=compare_mode) 356 | self.assertEqual(actual, expected) 357 | 358 | 359 | class MakePrettyDiffTest(unittest.TestCase): 360 | def test_word_by_word(self) -> None: 361 | a = ''.join([ 362 | '1 2 3\n', 363 | '4 -1\n', 364 | '6\n', 365 | ]) 366 | b = ''.join([ 367 | '1 2 3\n', 368 | '4 5\n', 369 | '6\n', 370 | ]) 371 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 372 | char_in_line = 40 373 | limit = 40 374 | expected = textwrap.dedent("""\ 375 | output: expected: 376 | 1| 1_2_3 1| 1_2_3 377 | 2| 4_-1 2| 4_5 378 | 3| 6 3| 6 379 | """) 380 | 381 | font_dim = lambda s: s 382 | font_bold = lambda s: s 383 | font_red = lambda s: s 384 | font_blue = lambda s: s 385 | tokens = _tokenize_pretty_diff(a, expected=b, compare_mode=compare_mode, char_in_line=char_in_line, limit=limit) 386 | actual = _render_tokens(tokens=tokens, font_dim=font_dim, font_bold=font_bold, font_red=font_red, font_blue=font_blue) 387 | self.assertEqual(actual, expected) 388 | 389 | def test_line_difflib(self) -> None: 390 | a = ''.join([ 391 | '1 3\n', 392 | 'wow\n', 393 | 'he llo word\n', 394 | ]) 395 | b = ''.join([ 396 | '1 2 3\n', 397 | 'wow\n', 398 | 'hello world\n', 399 | ]) 400 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 401 | char_in_line = 40 402 | limit = 40 403 | expected = textwrap.dedent("""\ 404 | output: expected: 405 | 1| 1_3 1| 1_2_3 406 | 2| wow 2| wow 407 | 3| he_llo_word 3| hello_world 408 | """) 409 | 410 | font_dim = lambda s: s 411 | font_bold = lambda s: s 412 | font_red = lambda s: s 413 | font_blue = lambda s: s 414 | tokens = _tokenize_pretty_diff(a, expected=b, compare_mode=compare_mode, char_in_line=char_in_line, limit=limit) 415 | actual = _render_tokens(tokens=tokens, font_dim=font_dim, font_bold=font_bold, font_red=font_red, font_blue=font_blue) 416 | self.assertEqual(actual, expected) 417 | 418 | def test_file_difflib(self) -> None: 419 | a = ''.join([ 420 | 'foo\n', 421 | 'baz\n', 422 | 'hello\n', 423 | 'world\n', 424 | 'hey\n', 425 | 'wow\n', 426 | ]) 427 | b = ''.join([ 428 | 'foo\n', 429 | 'bar\n', 430 | 'baz\n', 431 | 'hello\n', 432 | 'world\n', 433 | 'wow\n', 434 | 'wow\n', 435 | ]) 436 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 437 | char_in_line = 40 438 | limit = 40 439 | expected = textwrap.dedent("""\ 440 | output: expected: 441 | 1| foo 1| foo 442 | 2| bar 443 | 2| baz 3| baz 444 | 3| hello 4| hello 445 | 4| world 5| world 446 | 5| hey 447 | 6| wow 6| wow 448 | 7| wow 449 | """) 450 | 451 | font_dim = lambda s: s 452 | font_bold = lambda s: s 453 | font_red = lambda s: s 454 | font_blue = lambda s: s 455 | tokens = _tokenize_pretty_diff(a, expected=b, compare_mode=compare_mode, char_in_line=char_in_line, limit=limit) 456 | actual = _render_tokens(tokens=tokens, font_dim=font_dim, font_bold=font_bold, font_red=font_red, font_blue=font_blue) 457 | self.assertEqual(actual, expected) 458 | 459 | 460 | class MakePrettyDiffLimitTest(unittest.TestCase): 461 | def test_with_limit(self) -> None: 462 | a = ''.join([ 463 | 'a\n', 464 | ] * 100) 465 | b = ''.join([ 466 | 'b\n', 467 | ] * 100) 468 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 469 | char_in_line = 40 470 | limit = 40 471 | expected = 1 + limit + 1 472 | 473 | font_dim = lambda s: s 474 | font_bold = lambda s: s 475 | font_red = lambda s: s 476 | font_blue = lambda s: s 477 | tokens = _tokenize_pretty_diff(a, expected=b, compare_mode=compare_mode, char_in_line=char_in_line, limit=limit) 478 | actual = _render_tokens(tokens=tokens, font_dim=font_dim, font_bold=font_bold, font_red=font_red, font_blue=font_blue) 479 | self.assertEqual(len(actual.splitlines()), expected) 480 | 481 | def test_without_limit(self) -> None: 482 | a = ''.join([ 483 | 'a\n', 484 | ] * 100) 485 | b = ''.join([ 486 | 'b\n', 487 | ] * 100) 488 | compare_mode = CompareMode.CRLF_INSENSITIVE_EXACT_MATCH 489 | char_in_line = 40 490 | limit = -1 491 | expected = 1 + 100 492 | 493 | font_dim = lambda s: s 494 | font_bold = lambda s: s 495 | font_red = lambda s: s 496 | font_blue = lambda s: s 497 | tokens = _tokenize_pretty_diff(a, expected=b, compare_mode=compare_mode, char_in_line=char_in_line, limit=limit) 498 | actual = _render_tokens(tokens=tokens, font_dim=font_dim, font_bold=font_bold, font_red=font_red, font_blue=font_blue) 499 | self.assertEqual(len(actual.splitlines()), expected) 500 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import os 3 | import pathlib 4 | import subprocess 5 | import sys 6 | import tempfile 7 | from typing import * 8 | 9 | 10 | @contextlib.contextmanager 11 | def chdir(path): 12 | cwd = os.getcwd() 13 | try: 14 | os.chdir(path) 15 | yield 16 | finally: 17 | os.chdir(cwd) 18 | 19 | 20 | def prepare_files(files): 21 | for f in files: 22 | path = pathlib.Path(f['path']) 23 | path.parent.mkdir(parents=True, exist_ok=True) 24 | with open(str(path), 'wb') as fh: 25 | fh.write(f['data'].encode()) 26 | if f.get('executable', False): 27 | path.chmod(0o755) 28 | 29 | 30 | @contextlib.contextmanager 31 | def sandbox(files): 32 | with tempfile.TemporaryDirectory() as tempdir: 33 | tempdir = str(pathlib.Path(tempdir).resolve()) # to expand paths like "C:\PROGRA~1" on Windows 34 | with chdir(tempdir): 35 | prepare_files(files) 36 | yield tempdir 37 | 38 | 39 | def run(args: List[str], *, env=None, check=False, pipe_stderr=False) -> subprocess.CompletedProcess: 40 | env = env or dict(os.environ) 41 | env['PYTHONPATH'] = str(pathlib.Path(__file__).parent.parent) # this is required to run in sandboxes 42 | err = subprocess.PIPE if pipe_stderr else sys.stderr 43 | return subprocess.run([sys.executable, '-m', 'onlinejudge_command.main'] + args, stdout=subprocess.PIPE, stderr=err, env=env, check=check) # type: ignore 44 | 45 | 46 | def run_in_sandbox(args, files, pipe_stderr=False): 47 | with sandbox(files) as tempdir: 48 | proc = run(args, pipe_stderr=pipe_stderr) 49 | return { 50 | 'proc': proc, 51 | 'tempdir': tempdir, 52 | } 53 | 54 | 55 | def cat(): 56 | if os.name == 'nt': 57 | return '{} -c "import sys; sys.stdout.buffer.write(sys.stdin.buffer.read())"'.format(sys.executable) 58 | else: 59 | return 'cat' 60 | 61 | 62 | def sleep_1sec(): 63 | if os.name == 'nt': 64 | return '{} -c "import time; time.sleep(1)"'.format(sys.executable) 65 | else: 66 | return 'sleep 1.0' 67 | 68 | 69 | def python_c(cmd): 70 | assert '"' not in cmd 71 | return '{} -c "{}"'.format(sys.executable, cmd) 72 | 73 | 74 | def python_script(path): 75 | assert '"' not in path 76 | return '{} "{}"'.format(sys.executable, path) 77 | 78 | 79 | def is_logged_in(service, memo={}): # pylint: disable=dangerous-default-value 80 | # functools.lru_cache is unusable since Service are unhashable 81 | url = service.get_url() 82 | if url not in memo: 83 | proc = run(['login', '--check', url]) 84 | memo[url] = proc.returncode == 0 85 | return memo[url] 86 | --------------------------------------------------------------------------------