├── .gitignore ├── Gemfile ├── Gemfile.lock ├── README.md ├── _config.yml ├── _includes ├── extra │ └── styles.scss └── templates │ ├── buttons.liquid │ └── sidebar.liquid ├── appendix ├── directory_structure.md ├── languages.md └── readme.md ├── assets └── js │ └── script.js ├── examples ├── problem.yaml.md ├── problems │ ├── README.md │ ├── interactive │ │ ├── README.md │ │ ├── problem.yaml │ │ ├── statement │ │ │ └── problem.en.tex │ │ └── submissions │ │ │ └── submissions.yaml │ ├── maximal │ │ ├── README.md │ │ ├── attachments │ │ │ └── my_attachment.txt │ │ ├── data │ │ │ ├── sample │ │ │ │ ├── 1.ans │ │ │ │ └── 1.in │ │ │ └── secret │ │ │ │ ├── 1.ans │ │ │ │ ├── 1.in │ │ │ │ ├── 2.ans │ │ │ │ ├── 2.in │ │ │ │ ├── 3.ans │ │ │ │ ├── 3.in │ │ │ │ ├── 4.ans │ │ │ │ └── 4.in │ │ ├── include │ │ │ ├── default │ │ │ │ └── data.txt │ │ │ └── python3 │ │ │ │ └── include.py │ │ ├── input_validators │ │ │ └── validator.ctd │ │ ├── output_validators │ │ │ └── validator.py │ │ ├── problem.yaml │ │ ├── solution │ │ │ ├── solution.en.md │ │ │ └── solution.sv.md │ │ ├── statement │ │ │ ├── kattis.png │ │ │ ├── problem.en.tex │ │ │ └── problem.sv.tex │ │ └── submissions │ │ │ ├── accepted │ │ │ ├── accepted.py │ │ │ ├── with_include.php │ │ │ └── without_include.php │ │ │ ├── run_time_error │ │ │ └── not_defined │ │ │ │ ├── main.py │ │ │ │ └── util.py │ │ │ ├── submissions.yaml │ │ │ ├── time_limit_exceeded │ │ │ └── tle.py │ │ │ └── wrong_answer │ │ │ └── wrong.py │ ├── multipass │ │ ├── README.md │ │ ├── problem.yaml │ │ ├── statement │ │ │ └── problem.en.tex │ │ └── submissions │ │ │ └── submissions.yaml │ ├── passfail │ │ ├── README.md │ │ ├── data │ │ │ ├── sample │ │ │ │ ├── 1.ans │ │ │ │ ├── 1.in │ │ │ │ └── testdata.yaml │ │ │ └── secret │ │ │ │ ├── 1.ans │ │ │ │ ├── 1.in │ │ │ │ ├── 2.ans │ │ │ │ ├── 2.in │ │ │ │ ├── 3.ans │ │ │ │ ├── 3.in │ │ │ │ └── testdata.yaml │ │ ├── input_validators │ │ │ └── validator.ctd │ │ ├── problem.yaml │ │ ├── statement │ │ │ └── problem.en.tex │ │ └── submissions │ │ │ ├── accepted │ │ │ └── solution.py │ │ │ ├── submissions.yaml │ │ │ └── wrong_answer │ │ │ ├── constant.py │ │ │ └── wrong.py │ ├── scoring │ │ ├── README.md │ │ ├── data │ │ │ ├── sample │ │ │ │ ├── 1.ans │ │ │ │ └── 1.in │ │ │ └── secret │ │ │ │ ├── subtask1 │ │ │ │ ├── 1.ans │ │ │ │ ├── 1.in │ │ │ │ ├── 2.ans │ │ │ │ ├── 2.in │ │ │ │ ├── 3.ans │ │ │ │ ├── 3.in │ │ │ │ └── testdata.yaml │ │ │ │ ├── subtask2 │ │ │ │ ├── 1.ans │ │ │ │ ├── 1.in │ │ │ │ ├── 2.ans │ │ │ │ ├── 2.in │ │ │ │ ├── 3.ans │ │ │ │ ├── 3.in │ │ │ │ └── testdata.yaml │ │ │ │ └── testdata.yaml │ │ ├── input_validators │ │ │ └── validator.ctd │ │ ├── problem.yaml │ │ ├── statement │ │ │ └── problem.en.tex │ │ └── submissions │ │ │ ├── accepted │ │ │ └── solution.py │ │ │ ├── partially_accepted │ │ │ └── partial_solution.py │ │ │ ├── submissions.yaml │ │ │ └── wrong_answer │ │ │ └── constant.py │ └── submit_answer │ │ ├── README.md │ │ ├── problem.yaml │ │ ├── statement │ │ └── problem.en.tex │ │ └── submissions │ │ ├── accepted │ │ └── solution.py │ │ ├── submissions.yaml │ │ └── wrong_answer │ │ └── wrong.py └── readme.md ├── spec ├── 2023-07-draft.md ├── changelog.md ├── legacy-icpc.md ├── legacy.md └── readme.md └── support └── schemas ├── problem.cue └── test_group.cue /.gitignore: -------------------------------------------------------------------------------- 1 | _site 2 | .sass-cache 3 | .jekyll-cache 4 | .jekyll-metadata 5 | vendor 6 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | #ruby '2.6.6' 3 | # Hello! This is where you manage which Jekyll version is used to run. 4 | # When you want to use a different version, change it below, save the 5 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so: 6 | # 7 | # bundle exec jekyll serve 8 | # 9 | # This will help ensure the proper Jekyll version is running. 10 | # Happy Jekylling! 11 | #gem "jekyll", "~> 4.1.1" 12 | # This is the default theme for new Jekyll sites. You may change this to anything you like. 13 | #gem "minima", "~> 2.5" 14 | # If you want to use GitHub Pages, remove the "gem "jekyll"" above and 15 | # uncomment the line below. To upgrade, run `bundle update github-pages`. 16 | # If you have any plugins, put them here! 17 | group :jekyll_plugins do 18 | gem "github-pages" 19 | gem 'jekyll-commonmark-ghpages' 20 | gem "jekyll-remote-theme" 21 | gem 'faraday', '< 1' 22 | gem 'liquid', '>= 4.0.1' 23 | gem 'webrick' 24 | end 25 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | activesupport (7.0.6) 5 | concurrent-ruby (~> 1.0, >= 1.0.2) 6 | i18n (>= 1.6, < 2) 7 | minitest (>= 5.1) 8 | tzinfo (~> 2.0) 9 | addressable (2.8.4) 10 | public_suffix (>= 2.0.2, < 6.0) 11 | coffee-script (2.4.1) 12 | coffee-script-source 13 | execjs 14 | coffee-script-source (1.11.1) 15 | colorator (1.1.0) 16 | commonmarker (0.23.9) 17 | concurrent-ruby (1.2.2) 18 | dnsruby (1.70.0) 19 | simpleidn (~> 0.2.1) 20 | em-websocket (0.5.3) 21 | eventmachine (>= 0.12.9) 22 | http_parser.rb (~> 0) 23 | ethon (0.16.0) 24 | ffi (>= 1.15.0) 25 | eventmachine (1.2.7) 26 | execjs (2.8.1) 27 | faraday (0.17.6) 28 | multipart-post (>= 1.2, < 3) 29 | ffi (1.15.5) 30 | forwardable-extended (2.6.0) 31 | gemoji (3.0.1) 32 | github-pages (228) 33 | github-pages-health-check (= 1.17.9) 34 | jekyll (= 3.9.3) 35 | jekyll-avatar (= 0.7.0) 36 | jekyll-coffeescript (= 1.1.1) 37 | jekyll-commonmark-ghpages (= 0.4.0) 38 | jekyll-default-layout (= 0.1.4) 39 | jekyll-feed (= 0.15.1) 40 | jekyll-gist (= 1.5.0) 41 | jekyll-github-metadata (= 2.13.0) 42 | jekyll-include-cache (= 0.2.1) 43 | jekyll-mentions (= 1.6.0) 44 | jekyll-optional-front-matter (= 0.3.2) 45 | jekyll-paginate (= 1.1.0) 46 | jekyll-readme-index (= 0.3.0) 47 | jekyll-redirect-from (= 0.16.0) 48 | jekyll-relative-links (= 0.6.1) 49 | jekyll-remote-theme (= 0.4.3) 50 | jekyll-sass-converter (= 1.5.2) 51 | jekyll-seo-tag (= 2.8.0) 52 | jekyll-sitemap (= 1.4.0) 53 | jekyll-swiss (= 1.0.0) 54 | jekyll-theme-architect (= 0.2.0) 55 | jekyll-theme-cayman (= 0.2.0) 56 | jekyll-theme-dinky (= 0.2.0) 57 | jekyll-theme-hacker (= 0.2.0) 58 | jekyll-theme-leap-day (= 0.2.0) 59 | jekyll-theme-merlot (= 0.2.0) 60 | jekyll-theme-midnight (= 0.2.0) 61 | jekyll-theme-minimal (= 0.2.0) 62 | jekyll-theme-modernist (= 0.2.0) 63 | jekyll-theme-primer (= 0.6.0) 64 | jekyll-theme-slate (= 0.2.0) 65 | jekyll-theme-tactile (= 0.2.0) 66 | jekyll-theme-time-machine (= 0.2.0) 67 | jekyll-titles-from-headings (= 0.5.3) 68 | jemoji (= 0.12.0) 69 | kramdown (= 2.3.2) 70 | kramdown-parser-gfm (= 1.1.0) 71 | liquid (= 4.0.4) 72 | mercenary (~> 0.3) 73 | minima (= 2.5.1) 74 | nokogiri (>= 1.13.6, < 2.0) 75 | rouge (= 3.26.0) 76 | terminal-table (~> 1.4) 77 | github-pages-health-check (1.17.9) 78 | addressable (~> 2.3) 79 | dnsruby (~> 1.60) 80 | octokit (~> 4.0) 81 | public_suffix (>= 3.0, < 5.0) 82 | typhoeus (~> 1.3) 83 | html-pipeline (2.14.3) 84 | activesupport (>= 2) 85 | nokogiri (>= 1.4) 86 | http_parser.rb (0.8.0) 87 | i18n (1.14.1) 88 | concurrent-ruby (~> 1.0) 89 | jekyll (3.9.3) 90 | addressable (~> 2.4) 91 | colorator (~> 1.0) 92 | em-websocket (~> 0.5) 93 | i18n (>= 0.7, < 2) 94 | jekyll-sass-converter (~> 1.0) 95 | jekyll-watch (~> 2.0) 96 | kramdown (>= 1.17, < 3) 97 | liquid (~> 4.0) 98 | mercenary (~> 0.3.3) 99 | pathutil (~> 0.9) 100 | rouge (>= 1.7, < 4) 101 | safe_yaml (~> 1.0) 102 | jekyll-avatar (0.7.0) 103 | jekyll (>= 3.0, < 5.0) 104 | jekyll-coffeescript (1.1.1) 105 | coffee-script (~> 2.2) 106 | coffee-script-source (~> 1.11.1) 107 | jekyll-commonmark (1.4.0) 108 | commonmarker (~> 0.22) 109 | jekyll-commonmark-ghpages (0.4.0) 110 | commonmarker (~> 0.23.7) 111 | jekyll (~> 3.9.0) 112 | jekyll-commonmark (~> 1.4.0) 113 | rouge (>= 2.0, < 5.0) 114 | jekyll-default-layout (0.1.4) 115 | jekyll (~> 3.0) 116 | jekyll-feed (0.15.1) 117 | jekyll (>= 3.7, < 5.0) 118 | jekyll-gist (1.5.0) 119 | octokit (~> 4.2) 120 | jekyll-github-metadata (2.13.0) 121 | jekyll (>= 3.4, < 5.0) 122 | octokit (~> 4.0, != 4.4.0) 123 | jekyll-include-cache (0.2.1) 124 | jekyll (>= 3.7, < 5.0) 125 | jekyll-mentions (1.6.0) 126 | html-pipeline (~> 2.3) 127 | jekyll (>= 3.7, < 5.0) 128 | jekyll-optional-front-matter (0.3.2) 129 | jekyll (>= 3.0, < 5.0) 130 | jekyll-paginate (1.1.0) 131 | jekyll-readme-index (0.3.0) 132 | jekyll (>= 3.0, < 5.0) 133 | jekyll-redirect-from (0.16.0) 134 | jekyll (>= 3.3, < 5.0) 135 | jekyll-relative-links (0.6.1) 136 | jekyll (>= 3.3, < 5.0) 137 | jekyll-remote-theme (0.4.3) 138 | addressable (~> 2.0) 139 | jekyll (>= 3.5, < 5.0) 140 | jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0) 141 | rubyzip (>= 1.3.0, < 3.0) 142 | jekyll-sass-converter (1.5.2) 143 | sass (~> 3.4) 144 | jekyll-seo-tag (2.8.0) 145 | jekyll (>= 3.8, < 5.0) 146 | jekyll-sitemap (1.4.0) 147 | jekyll (>= 3.7, < 5.0) 148 | jekyll-swiss (1.0.0) 149 | jekyll-theme-architect (0.2.0) 150 | jekyll (> 3.5, < 5.0) 151 | jekyll-seo-tag (~> 2.0) 152 | jekyll-theme-cayman (0.2.0) 153 | jekyll (> 3.5, < 5.0) 154 | jekyll-seo-tag (~> 2.0) 155 | jekyll-theme-dinky (0.2.0) 156 | jekyll (> 3.5, < 5.0) 157 | jekyll-seo-tag (~> 2.0) 158 | jekyll-theme-hacker (0.2.0) 159 | jekyll (> 3.5, < 5.0) 160 | jekyll-seo-tag (~> 2.0) 161 | jekyll-theme-leap-day (0.2.0) 162 | jekyll (> 3.5, < 5.0) 163 | jekyll-seo-tag (~> 2.0) 164 | jekyll-theme-merlot (0.2.0) 165 | jekyll (> 3.5, < 5.0) 166 | jekyll-seo-tag (~> 2.0) 167 | jekyll-theme-midnight (0.2.0) 168 | jekyll (> 3.5, < 5.0) 169 | jekyll-seo-tag (~> 2.0) 170 | jekyll-theme-minimal (0.2.0) 171 | jekyll (> 3.5, < 5.0) 172 | jekyll-seo-tag (~> 2.0) 173 | jekyll-theme-modernist (0.2.0) 174 | jekyll (> 3.5, < 5.0) 175 | jekyll-seo-tag (~> 2.0) 176 | jekyll-theme-primer (0.6.0) 177 | jekyll (> 3.5, < 5.0) 178 | jekyll-github-metadata (~> 2.9) 179 | jekyll-seo-tag (~> 2.0) 180 | jekyll-theme-slate (0.2.0) 181 | jekyll (> 3.5, < 5.0) 182 | jekyll-seo-tag (~> 2.0) 183 | jekyll-theme-tactile (0.2.0) 184 | jekyll (> 3.5, < 5.0) 185 | jekyll-seo-tag (~> 2.0) 186 | jekyll-theme-time-machine (0.2.0) 187 | jekyll (> 3.5, < 5.0) 188 | jekyll-seo-tag (~> 2.0) 189 | jekyll-titles-from-headings (0.5.3) 190 | jekyll (>= 3.3, < 5.0) 191 | jekyll-watch (2.2.1) 192 | listen (~> 3.0) 193 | jemoji (0.12.0) 194 | gemoji (~> 3.0) 195 | html-pipeline (~> 2.2) 196 | jekyll (>= 3.0, < 5.0) 197 | kramdown (2.3.2) 198 | rexml 199 | kramdown-parser-gfm (1.1.0) 200 | kramdown (~> 2.0) 201 | liquid (4.0.4) 202 | listen (3.8.0) 203 | rb-fsevent (~> 0.10, >= 0.10.3) 204 | rb-inotify (~> 0.9, >= 0.9.10) 205 | mercenary (0.3.6) 206 | mini_portile2 (2.8.4) 207 | minima (2.5.1) 208 | jekyll (>= 3.5, < 5.0) 209 | jekyll-feed (~> 0.9) 210 | jekyll-seo-tag (~> 2.1) 211 | minitest (5.18.1) 212 | multipart-post (2.3.0) 213 | nokogiri (1.15.3) 214 | mini_portile2 (~> 2.8.2) 215 | racc (~> 1.4) 216 | octokit (4.22.0) 217 | faraday (>= 0.9) 218 | sawyer (~> 0.8.0, >= 0.5.3) 219 | pathutil (0.16.2) 220 | forwardable-extended (~> 2.6) 221 | public_suffix (4.0.7) 222 | racc (1.7.1) 223 | rb-fsevent (0.11.2) 224 | rb-inotify (0.10.1) 225 | ffi (~> 1.0) 226 | rexml (3.2.5) 227 | rouge (3.26.0) 228 | rubyzip (2.3.2) 229 | safe_yaml (1.0.5) 230 | sass (3.7.4) 231 | sass-listen (~> 4.0.0) 232 | sass-listen (4.0.0) 233 | rb-fsevent (~> 0.9, >= 0.9.4) 234 | rb-inotify (~> 0.9, >= 0.9.7) 235 | sawyer (0.8.2) 236 | addressable (>= 2.3.5) 237 | faraday (> 0.8, < 2.0) 238 | simpleidn (0.2.1) 239 | unf (~> 0.1.4) 240 | terminal-table (1.8.0) 241 | unicode-display_width (~> 1.1, >= 1.1.1) 242 | typhoeus (1.4.0) 243 | ethon (>= 0.9.0) 244 | tzinfo (2.0.6) 245 | concurrent-ruby (~> 1.0) 246 | unf (0.1.4) 247 | unf_ext 248 | unf_ext (0.0.8.2) 249 | unicode-display_width (1.8.0) 250 | webrick (1.9.1) 251 | 252 | PLATFORMS 253 | ruby 254 | 255 | DEPENDENCIES 256 | faraday (< 1) 257 | github-pages 258 | jekyll-commonmark-ghpages 259 | jekyll-remote-theme 260 | liquid (>= 4.0.1) 261 | webrick 262 | 263 | BUNDLED WITH 264 | 2.1.4 265 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Main page 3 | sort: 1 4 | --- 5 | 6 | # Problem Package Format Specification 7 | 8 | This site contains the specification for the Kattis Problem Package Format. 9 | There are currently three versions: 10 | 11 | - The latest (draft) version: . 12 | - The current version: . 13 | - The ICPC subset of the current version: . 14 | 15 | The latest (draft) version is not yet widely supported, 16 | but if you're building tools and systems you should definitely take a look at it. 17 | If you're creating problems for official ICPC contests you should not assume more than the ICPC subset without talking to your technical staff. 18 | 19 | Development happens in the GitHub repository: . 20 | Contrubutions and comments are very welcome! 21 | 22 | ## System support 23 | 24 | This is an (incomplete?) list of systems supporting Problem Package Format: 25 | 26 | - [problemtools](https://github.com/kattis/problemtools): 27 | This is the reference validation tool for the Kattis format. 28 | It is not intended to be more than that. 29 | - [BAPCtools](https://github.com/RagnarGrootKoerkamp/BAPCtools): 30 | Development tool for creating and problems using the Kattis format. 31 | - [Testdata Tools](https://github.com/Kodsport/testdata_tools): 32 | Bash helper functions for working with problems for the Kattis format, particularly ones that use multiple test groups. 33 | - [Kattis](https://open.kattis.com/): 34 | Online Judge. 35 | - [DOMjudge](https://www.domjudge.org/): 36 | Judging system. 37 | - [PC^2](https://pc2ccs.github.io/): 38 | Judging system. 39 | - [ICPC Problem Archive](https://github.com/icpc-problem-archive): 40 | Archive of all problems from official ICPC contests. 41 | 42 | If you have a system that supports or uses the problem package format and want it included here, please make a pull request or an issue on this repository. 43 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | title: Problem Package Format 2 | repository: Kattis/problem-package-format 3 | lang: en 4 | description: Problem Package Format Specification 5 | 6 | readme_index: 7 | with_frontmatter: true 8 | 9 | theme: null 10 | 11 | # https://github.com/rundocs/jekyll-rtd-theme 12 | remote_theme: kattis/jekyll-rtd-theme 13 | 14 | plugins: 15 | - jekyll-remote-theme 16 | 17 | markdown: CommonMarkGhPages 18 | commonmark: 19 | # Unsafe allows passthrough of
20 | options: ["SMART", "UNSAFE"] 21 | extensions: ["table"] 22 | -------------------------------------------------------------------------------- /_includes/extra/styles.scss: -------------------------------------------------------------------------------- 1 | .not-icpc:not(.full_view):not(.icpc_view), 2 | .not-icpc:not(.full_view):not(.icpc_view) td, 3 | .not-icpc:not(.full_view):not(.icpc_view) code { 4 | color: red; 5 | } 6 | 7 | .not-icpc.full_view, 8 | .not-icpc.full_view td, 9 | .not-icpc.full_view code { 10 | } 11 | 12 | .not-icpc.icpc_view, 13 | .not-icpc.icpc_view td, 14 | .not-icpc.icpc_view code { 15 | display: none; 16 | } 17 | 18 | .buttons { 19 | width: fit-content; 20 | margin: auto; 21 | margin-top: 5px; 22 | margin-bottom: 5px; 23 | } 24 | -------------------------------------------------------------------------------- /_includes/templates/buttons.liquid: -------------------------------------------------------------------------------- 1 | 2 |
3 | Variant: 4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 | -------------------------------------------------------------------------------- /_includes/templates/sidebar.liquid: -------------------------------------------------------------------------------- 1 | 22 | -------------------------------------------------------------------------------- /appendix/directory_structure.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Directory Structure 4 | sort: 5 5 | --- 6 | 7 | # Directory structure 8 | 9 | These examples are for the latest version of the spec, `2023-07-draft`. 10 | 11 | ```text 12 | / 13 | problem.yaml - problem configuration file 14 | statement/ 15 | problem.en.tex - English problem statement as LaTeX 16 | problem.sv.md - Swedish problem statement as Markdown 17 | problem.nl.pdf - Dutch problem statement as PDF 18 | - any files that problem.xy.{tex,md,pdf} needs to include, e.g. images 19 | attachments/ 20 | - public files available to contestants 21 | solution/ 22 | solution.en.tex - English problem solution as LaTeX 23 | solution.sv.md - Swedish problem solution as Markdown 24 | solution.nl.pdf - Dutch problem solution as PDF 25 | - any files that solution.xy.{tex,md,pdf} needs to include, e.g. images 26 | data/ 27 | sample/ 28 | *.in - sample input files 29 | *.ans - answer files 30 | *.out - sample output files 31 | *.interaction - sample interaction protocol files 32 | *.args - optional command-line arguments 33 | *.files/ 34 | - any files that should be available to the program when running the current testcase 35 | secret/(optional_group)/ 36 | *.in - input files 37 | *.ans - answer files 38 | *.hint - optional hint for the team 39 | *.desc - optional data description 40 | *.{jpg,png,svg} - visualization of the testcase, at most one per testcase 41 | *.args - optional command-line arguments 42 | *.files/ 43 | - any files that should be available to the program when running the current testcase 44 | generators/ 45 | - any generator scripts that were used to generate testcases 46 | include/ 47 | / 48 | - any files that should be included with all submissions in 49 | default/ 50 | - any files that should be included with all submissions in any other language 51 | submissions/ 52 | submissions.yaml - sample submissions configuration file 53 | accepted/ 54 | - a file/directory for each submission with verdict AC for all testcases (at least one required) 55 | rejected / 56 | - a file/directory for each submission with final verdict other than AC 57 | wrong_answer/ 58 | - a file/directory for each submission with verdict WA for some testcase 59 | time_limit_exceeded/ 60 | - a file/directory for each submission with verdict TLE for some testcase 61 | run_time_error/ 62 | - a file/directory for each submission with verdict RTE for some testcase 63 | brute_force/ 64 | - a file/directory for each submission with either verdict RTE or TLE for some testcase 65 | input_validators/ 66 | - a single output validator, either as a .viva file, a .ctd file, or a program. 67 | input_visualizer/ 68 | - any tools that were used to generate testcases illustrations 69 | output_validator/ 70 | - a single output validator program. 71 | output_visualizer/ 72 | - a single output visualizer program. 73 | ``` 74 | 75 | ## Example 76 | 77 | This is a sample list of directories/files for a problem named `heightprofile`: 78 | 79 | ```sh 80 | heightprofile 81 | ├── problem.yaml 82 | ├── statement 83 | │ ├── bike.eps 84 | │ ├── problem.en.tex 85 | │ ├── profile.asy 86 | │ └── profile.pdf 87 | ├── data 88 | │ ├── sample 89 | │ │ ├── 1.ans 90 | │ │ ├── 1.in 91 | │ │ ├── 1.png 92 | │ │ ├── 2.ans 93 | │ │ ├── 2.in 94 | │ │ └── 2.png 95 | │ ├── secret 96 | │ │ ├── 01.ans 97 | │ │ ├── 01.desc 98 | │ │ ├── 01.in 99 | │ │ ├── 01.png 100 | │ │ ├── 02.ans 101 | │ │ ├── 02.in 102 | │ │ ├── 02.png 103 | │ │ └── ... 104 | ├── input_validators 105 | │ ├── input_validator 106 | │ │ ├── input_validator.cpp 107 | │ │ └── validation.h 108 | │ ├── profile.ctd 109 | │ └── validate.py 110 | ├── output_validator 111 | │ └── validate.ctd 112 | └── submissions 113 | ├── accepted 114 | │ ├── alex.java 115 | │ ├── paul.cpp 116 | │ ├── ragnar.cpp 117 | │ └── tobi.java 118 | ├── time_limit_exceeded 119 | │ ├── jeroen_n2k.java 120 | │ ├── lukas_n2k.cc 121 | │ ├── lukas_n2k_sse.cc 122 | │ ├── lukas_n2k_v2.cc 123 | │ └── lukas_n2k_v2_sse.cc 124 | └── wrong_answer 125 | ├── jeroen_parsingerror.java 126 | ├── paul-unstable-sort.cpp 127 | ├── ragnar-2.cpp 128 | ├── ragnar-4.cpp 129 | ├── ragnar.cpp 130 | └── tobi.cpp 131 | ``` 132 | -------------------------------------------------------------------------------- /appendix/languages.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Languages 4 | sort: 4 5 | --- 6 | 7 | # Languages 8 | 9 | File endings in parentheses are not used for determining language. 10 | 11 | | Code | Language | Default entry point | File endings | 12 | | ------------ | ------------------- |---------------------| ------------------------------- | 13 | | ada | Ada | | .adb, .ads | 14 | | algol68 | Algol 68 | | .a68 | 15 | | apl | APL | | .apl | 16 | | bash | Bash | | .sh | 17 | | c | C | | .c | 18 | | cgmp | C with GMP | | (.c) | 19 | | cobol | COBOL | | .cob | 20 | | cpp | C++ | | .cc, .cpp, .cxx, .c++, .C | 21 | | cppgmp | C++ with GMP | | (.cc, .cpp, .cxx, .c++, .C) | 22 | | crystal | Crystal | | .cr | 23 | | csharp | C\# | | .cs | 24 | | d | D | | .d | 25 | | dart | Dart | | .dart | 26 | | elixir | Elixir | | .ex | 27 | | erlang | Erlang | | .erl | 28 | | forth | Forth | | .fth,. 4th, .forth, .frt, (.fs) | 29 | | fortran | Fortran | | .f90 | 30 | | fsharp | F\# | | .fs | 31 | | gerbil | Gerbil | | .ss | 32 | | go | Go | | .go | 33 | | haskell | Haskell | | .hs | 34 | | java | Java | Main | .java | 35 | | javaalgs4 | Java with Algs4 | Main | (.java) | 36 | | javascript | JavaScript | `main.js` | .js | 37 | | julia | Julia | | .jl | 38 | | kotlin | Kotlin | MainKt | .kt | 39 | | lisp | Common Lisp | `main.{lisp,cl}` | .lisp, .cl | 40 | | lua | Lua | | .lua | 41 | | modula2 | Modula-2 | | .mod, .def | 42 | | nim | Nim | | .nim | 43 | | objectivec | Objective-C | | .m | 44 | | ocaml | OCaml | | .ml | 45 | | octave | Octave | | (.m) | 46 | | odin | Odin | | .odin | 47 | | pascal | Pascal | | .pas | 48 | | perl | Perl | | .pm, (.pl) | 49 | | php | PHP | `main.php` | .php | 50 | | prolog | Prolog | | .pl | 51 | | python2 | Python 2 | `__main__.py` | (.py), .py2 | 52 | | python3 | Python 3 | `__main__.py` | .py, .py3 | 53 | | python3numpy | Python 3 with NumPy | `__main__.py` | (.py, .py3) | 54 | | racket | Racket | | .rkt | 55 | | ruby | Ruby | | .rb | 56 | | rust | Rust | | .rs | 57 | | scala | Scala | | .scala | 58 | | simula | Simula | | .sim | 59 | | smalltalk | Smalltalk | | .st | 60 | | snobol | Snobol | | .sno | 61 | | swift | Swift | | .swift | 62 | | typescript | TypeScript | | .ts | 63 | | visualbasic | Visual Basic | | .vb | 64 | | zig | Zig | | .zig | 65 | -------------------------------------------------------------------------------- /appendix/readme.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Appendix 3 | sort: 2 4 | --- 5 | -------------------------------------------------------------------------------- /assets/js/script.js: -------------------------------------------------------------------------------- 1 | function unified() { 2 | var divs = document.getElementsByClassName("not-icpc"); 3 | for (var i = 0; i < divs.length; ++i) { 4 | divs[i].classList.remove("icpc_view"); 5 | divs[i].classList.remove("full_view"); 6 | } 7 | } 8 | function icpc() { 9 | var divs = document.getElementsByClassName("not-icpc"); 10 | for (var i = 0; i < divs.length; ++i) { 11 | divs[i].classList.remove("full_view"); 12 | divs[i].classList.add("icpc_view"); 13 | } 14 | } 15 | function full() { 16 | var divs = document.getElementsByClassName("not-icpc"); 17 | for (var i = 0; i < divs.length; ++i) { 18 | divs[i].classList.remove("icpc_view"); 19 | divs[i].classList.add("full_view"); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/problem.yaml.md: -------------------------------------------------------------------------------- 1 | --- 2 | permalink: /examples/problem_yaml 3 | --- 4 | 5 | # `problem.yaml` 6 | 7 | ## Minimal 8 | 9 | ```yaml 10 | problem_format_version: 2023-07-draft 11 | name: Sample problem 12 | uuid: b9f846aa-c233-45ee-a70a-473aecc8fe77 13 | source: ICPC Mid-Atlantic Regional Contest 14 | author: John von Judge 15 | rights_owner: ICPC 16 | ``` 17 | 18 | ## Typical 19 | 20 | ```yaml 21 | problem_format_version: 2023-07-draft 22 | name: Sample problem 23 | uuid: 7594abe6-08e3-4743-8cf9-15c4693cdbf5 24 | author: John von Judge 25 | source: ICPC World Finals 2023 26 | source_url: https://2023.icpc.global 27 | license: cc by-sa 28 | rights_owner: author 29 | 30 | validation: custom 31 | ``` 32 | 33 | ## Maximal 34 | 35 | ```yaml 36 | problem_format_version: 2023-07-draft 37 | name: 38 | en: Sample problem 39 | nl: Voorbeeld probleem 40 | uuid: 0bf1d986-afc4-4475-a696-3bfca6276b12 41 | # for non-icpc style problems this may also be scoring 42 | type: pass-fail 43 | author: John von Judge 44 | source: ICPC World Finals 2023 45 | source_url: https://2023.icpc.global 46 | license: cc by-sa 47 | rights_owner: ICPC 48 | 49 | # shown values are the defaults 50 | limits: 51 | time_multipliers: 52 | ac_to_time_limit: 2.0 53 | time_limit_to_tle: 1.5 54 | time_limit: 1.0 55 | time_resolution: 1.0 56 | memory: 2048 57 | output: 8 58 | code: 128 59 | compilation_time: 60 60 | compilation_memory: 2048 61 | validation_time: 60 62 | validation_memory: 2048 63 | validation_output: 8 64 | 65 | validation: custom 66 | 67 | keywords: [graph, dijkstra] 68 | 69 | languages: [c, cpp, java, kotlin, python3] 70 | ``` 71 | -------------------------------------------------------------------------------- /examples/problems/README.md: -------------------------------------------------------------------------------- 1 | # Sample Problem Packages 2 | 3 | Work in progress. 4 | 5 | This directory contains some sample problem packages that are meant to illustrate how the Problem Package Format works. 6 | 7 | `passfail`, `submit_answer`, `scoring`, `interactive`, `multipass` are meant to resemble typical problem packages of the corresponding types. `maximal` is meant to illustrate all features (as far as it is possible) of the Problem Package Format — it is *not* meant to resemble a typical problem package. 8 | -------------------------------------------------------------------------------- /examples/problems/interactive/README.md: -------------------------------------------------------------------------------- 1 | # Sample interactive problem 2 | 3 | This is a Problem Package for a sample interactive problem. 4 | 5 | This problem package uses the Default Output Validator. 6 | -------------------------------------------------------------------------------- /examples/problems/interactive/problem.yaml: -------------------------------------------------------------------------------- 1 | problem_format_version: 2023-07-draft # or `legacy` for the previous version of the Problem Package Format 2 | type: pass-fail interactive 3 | name: Sample problem 4 | uuid: 117b564d-429b-44d4-bf8c-c264a3703e07 5 | credits: 6 | authors: Author One 7 | source: My Contest 2024 8 | source_url: https://my.contest.com/2024 9 | license: cc by-sa 10 | rights_owner: My Contest 11 | -------------------------------------------------------------------------------- /examples/problems/interactive/statement/problem.en.tex: -------------------------------------------------------------------------------- 1 | \problemname{Sample Problem} 2 | This is a sample problem to illustrate how the Problem Package Format works. 3 | \illustration{1}{img.png}{This is an illustration...} 4 | \section*{Input} 5 | There is no input to this problem. 6 | \section*{Output} 7 | Print {%example_constantm%}. 8 | -------------------------------------------------------------------------------- /examples/problems/interactive/submissions/submissions.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/interactive/submissions/submissions.yaml -------------------------------------------------------------------------------- /examples/problems/maximal/README.md: -------------------------------------------------------------------------------- 1 | # Maximal pass-fail problem 2 | 3 | This is a Problem Package for a pass-fail problem with as many features as possible. 4 | 5 | This problem package uses a custom Output Validator. 6 | -------------------------------------------------------------------------------- /examples/problems/maximal/attachments/my_attachment.txt: -------------------------------------------------------------------------------- 1 | This file will be publicly available for contestants when viewing the problem. 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/data/sample/1.ans: -------------------------------------------------------------------------------- 1 | 42 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/data/sample/1.in: -------------------------------------------------------------------------------- 1 | 41 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/1.ans: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/maximal/data/secret/1.ans -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/1.in: -------------------------------------------------------------------------------- 1 | 13 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/2.ans: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/maximal/data/secret/2.ans -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/2.in: -------------------------------------------------------------------------------- 1 | -100 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/3.ans: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/maximal/data/secret/3.ans -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/3.in: -------------------------------------------------------------------------------- 1 | 99 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/4.ans: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/maximal/data/secret/4.ans -------------------------------------------------------------------------------- /examples/problems/maximal/data/secret/4.in: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/include/default/data.txt: -------------------------------------------------------------------------------- 1 | {{example_constant}} 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/include/python3/include.py: -------------------------------------------------------------------------------- 1 | magic_constant = {{example_constant}} 2 | -------------------------------------------------------------------------------- /examples/problems/maximal/input_validators/validator.ctd: -------------------------------------------------------------------------------- 1 | INT(-1000, 1000) NEWLINE 2 | EOF 3 | -------------------------------------------------------------------------------- /examples/problems/maximal/output_validators/validator.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | try: 4 | in_file: str = sys.argv[1] 5 | except IndexError: 6 | sys.exit(43) # Rejected 7 | 8 | try: 9 | output: str = str(sys.stdin.read()) 10 | except UnicodeDecodeError: 11 | sys.exit(43) # Rejected 12 | 13 | try: 14 | indata: str 15 | with open(in_file) as f: 16 | indata = f.read() 17 | except: 18 | sys.exit(43) # Rejected 19 | 20 | try: 21 | if abs(int(output) - int(indata)) == {{example_constant}}: 22 | sys.exit(42) # Accepted 23 | else: 24 | sys.exit(43) # Rejected 25 | except ValueError: 26 | sys.exit(43) # Rejected 27 | -------------------------------------------------------------------------------- /examples/problems/maximal/problem.yaml: -------------------------------------------------------------------------------- 1 | problem_format_version: 2023-07-draft # or `legacy` for the previous version of the Problem Package Format 2 | type: pass-fail 3 | name: # May be just a string if there's only a problem statement for one language 4 | en: Sample problem 5 | sv: Exempel problem 6 | uuid: b2792508-b645-480a-8357-da047d57a17d 7 | credits: 8 | authors: 9 | - Author One 10 | - Author Two 11 | contributors: Contributor 12 | testers: 13 | - Tester 1 14 | - Tester 2 15 | translators: 16 | sv: Sven Translator 17 | acknowledgements: [Inspirational Speaker 1, Inspirational Speaker 2] 18 | source: My Contest 2024 19 | source_url: https://my.contest.com/2024 20 | license: cc by-sa 21 | rights_owner: My Contest 22 | limits: # There are many more limits that can be set... 23 | time_limit: 10 24 | keywords: [] 25 | languages: [python3, cpp] 26 | constants: 27 | example_constant: 1 28 | -------------------------------------------------------------------------------- /examples/problems/maximal/solution/solution.en.md: -------------------------------------------------------------------------------- 1 | # Solution 2 | 3 | The solution to the sample problem is to read the integer N, and print N ± 1 to stdout. 4 | -------------------------------------------------------------------------------- /examples/problems/maximal/solution/solution.sv.md: -------------------------------------------------------------------------------- 1 | # Lösning 2 | 3 | Läsningen till problemet är att läsa talet N från indatan och skriva N ± 1 till stdout. 4 | -------------------------------------------------------------------------------- /examples/problems/maximal/statement/kattis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/maximal/statement/kattis.png -------------------------------------------------------------------------------- /examples/problems/maximal/statement/problem.en.tex: -------------------------------------------------------------------------------- 1 | \problemname{Sample Problem} 2 | This is a sample problem to illustrate how the Problem Package Format works. 3 | \illustration{1}{kattis.png}{This is an illustration...} 4 | \section*{Input} 5 | Input consists of one integer $N (-1000 \le N \le 1000)$. 6 | \section*{Output} 7 | Print $N \pm {{example_constant}}$. 8 | -------------------------------------------------------------------------------- /examples/problems/maximal/statement/problem.sv.tex: -------------------------------------------------------------------------------- 1 | \problemname{Exempel Problem} 2 | Detta är ett exempel problem som visar hur Problem Package Formatet fungerar. 3 | \illustration{1}{kattis.png}{Detta är en illustration...} 4 | \section*{Indata} 5 | Indatan består av ett heltal $N (-1000 \le N \le 1000)$. 6 | \section*{Utdata} 7 | Skriv $N \pm {{example_constant}}$. 8 | -------------------------------------------------------------------------------- /examples/problems/maximal/submissions/accepted/accepted.py: -------------------------------------------------------------------------------- 1 | import include 2 | 3 | print(int(input()) + include.magic_constant) 4 | -------------------------------------------------------------------------------- /examples/problems/maximal/submissions/accepted/with_include.php: -------------------------------------------------------------------------------- 1 | 4 | 5 | accepted/*.php: 6 | language: php 7 | authors: PHP Judge 8 | 9 | run_time_error/not_defined: 10 | language: python3 11 | entrypoint: main.py 12 | authors: Python Judge 13 | 14 | time_limit_exceeded/*: 15 | authors: [Committee Member One , Committee Member Two ] 16 | 17 | wrong_answer/*: 18 | authors: python Judge 19 | -------------------------------------------------------------------------------- /examples/problems/maximal/submissions/time_limit_exceeded/tle.py: -------------------------------------------------------------------------------- 1 | import time 2 | time.sleep(10000) 3 | -------------------------------------------------------------------------------- /examples/problems/maximal/submissions/wrong_answer/wrong.py: -------------------------------------------------------------------------------- 1 | print('Forty-Two') 2 | -------------------------------------------------------------------------------- /examples/problems/multipass/README.md: -------------------------------------------------------------------------------- 1 | # Sample multi-pass problem 2 | 3 | This is a Problem Package for a sample pass-fail multi-pass problem. 4 | 5 | This problem package uses the Default Output Validator. 6 | -------------------------------------------------------------------------------- /examples/problems/multipass/problem.yaml: -------------------------------------------------------------------------------- 1 | problem_format_version: 2023-07-draft # or `legacy` for the previous version of the Problem Package Format 2 | type: pass-fail multi-pass 3 | name: Sample problem 4 | uuid: c0c117b7-c70c-4369-b574-06e7802beb9f 5 | credits: 6 | authors: Author One 7 | source: My Contest 2024 8 | source_url: https://my.contest.com/2024 9 | license: cc by-sa 10 | rights_owner: My Contest 11 | -------------------------------------------------------------------------------- /examples/problems/multipass/statement/problem.en.tex: -------------------------------------------------------------------------------- 1 | \problemname{Sample Problem} 2 | This is a sample problem to illustrate how the Problem Package Format works. 3 | \illustration{1}{img.png}{This is an illustration...} 4 | \section*{Input} 5 | There is no input to this problem. 6 | \section*{Output} 7 | Print {%example_constantm%}. 8 | -------------------------------------------------------------------------------- /examples/problems/multipass/submissions/submissions.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kattis/problem-package-format/c46e836350435f27eaa89c87b20efe2af052e4af/examples/problems/multipass/submissions/submissions.yaml -------------------------------------------------------------------------------- /examples/problems/passfail/README.md: -------------------------------------------------------------------------------- 1 | # Sample pass-fail problem 2 | 3 | This is a Problem Package for a sample pass-fail problem. 4 | 5 | This problem package uses the Default Output Validator. 6 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/sample/1.ans: -------------------------------------------------------------------------------- 1 | 42 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/sample/1.in: -------------------------------------------------------------------------------- 1 | 41 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/sample/testdata.yaml: -------------------------------------------------------------------------------- 1 | # We don't want to modify the defaults as listed below: 2 | #scoring: 3 | # score: 0 4 | # max_score: 0 5 | # aggregation: sum 6 | #input_validator_flags: '' # (Empty String) 7 | #output_validator_flags: '' # (Empty String) 8 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/1.ans: -------------------------------------------------------------------------------- 1 | 8 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/1.in: -------------------------------------------------------------------------------- 1 | 7 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/2.ans: -------------------------------------------------------------------------------- 1 | 14 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/2.in: -------------------------------------------------------------------------------- 1 | 13 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/3.ans: -------------------------------------------------------------------------------- 1 | 3 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/3.in: -------------------------------------------------------------------------------- 1 | 2 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/data/secret/testdata.yaml: -------------------------------------------------------------------------------- 1 | # We don't want to modify the defaults as listed below: 2 | #scoring: 3 | # score: 1 4 | # max_score: 3 # This is automatically computed based on this file 5 | # # and the testcases in the group (see documentation for details). 6 | # aggregation: sum 7 | #input_validator_flags: '' # (Empty String) 8 | #output_validator_flags: '' # (Empty String) 9 | -------------------------------------------------------------------------------- /examples/problems/passfail/input_validators/validator.ctd: -------------------------------------------------------------------------------- 1 | INT(-1000, 1000) NEWLINE 2 | EOF 3 | -------------------------------------------------------------------------------- /examples/problems/passfail/problem.yaml: -------------------------------------------------------------------------------- 1 | problem_format_version: 2023-07-draft 2 | type: pass-fail 3 | name: Sample problem 4 | uuid: 789c94bb-11e7-47f4-bfe6-4988f460f021 5 | credits: 6 | authors: Author 7 | source: My Contest 2024 8 | source_url: https://my.contest.com/2024 9 | license: cc by-sa 10 | rights_owner: My Contest 11 | -------------------------------------------------------------------------------- /examples/problems/passfail/statement/problem.en.tex: -------------------------------------------------------------------------------- 1 | \problemname{Sample Problem} 2 | This is a sample problem to illustrate how the Problem Package Format works. 3 | \section*{Input} 4 | Input consists of one integer $N (-1000 \le N \le 1000)$. 5 | \section*{Output} 6 | Print $N + 1$. 7 | -------------------------------------------------------------------------------- /examples/problems/passfail/submissions/accepted/solution.py: -------------------------------------------------------------------------------- 1 | print(int(input()) + 1) 2 | -------------------------------------------------------------------------------- /examples/problems/passfail/submissions/submissions.yaml: -------------------------------------------------------------------------------- 1 | accepted/*: 2 | authors: Author 3 | wrong_answer/*: 4 | authors: Author 5 | -------------------------------------------------------------------------------- /examples/problems/passfail/submissions/wrong_answer/constant.py: -------------------------------------------------------------------------------- 1 | # This solution does solve the sample case, but fails all secret test cases 2 | print(42) 3 | -------------------------------------------------------------------------------- /examples/problems/passfail/submissions/wrong_answer/wrong.py: -------------------------------------------------------------------------------- 1 | print(input()) 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/README.md: -------------------------------------------------------------------------------- 1 | # Sample scoring problem 2 | 3 | This is a Problem Package for a sample scoring problem. 4 | 5 | This problem package uses the Default Output Validator. 6 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/sample/1.ans: -------------------------------------------------------------------------------- 1 | 42 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/sample/1.in: -------------------------------------------------------------------------------- 1 | 42 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/1.ans: -------------------------------------------------------------------------------- 1 | 7 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/1.in: -------------------------------------------------------------------------------- 1 | 7 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/2.ans: -------------------------------------------------------------------------------- 1 | 14 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/2.in: -------------------------------------------------------------------------------- 1 | 14 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/3.ans: -------------------------------------------------------------------------------- 1 | 3 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/3.in: -------------------------------------------------------------------------------- 1 | 3 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask1/testdata.yaml: -------------------------------------------------------------------------------- 1 | scoring: 2 | score: 30 3 | aggregation: min 4 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/1.ans: -------------------------------------------------------------------------------- 1 | -42 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/1.in: -------------------------------------------------------------------------------- 1 | -42 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/2.ans: -------------------------------------------------------------------------------- 1 | 82 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/2.in: -------------------------------------------------------------------------------- 1 | 82 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/3.ans: -------------------------------------------------------------------------------- 1 | -1 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/3.in: -------------------------------------------------------------------------------- 1 | -1 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/subtask2/testdata.yaml: -------------------------------------------------------------------------------- 1 | scoring: 2 | score: 70 3 | aggregation: min 4 | -------------------------------------------------------------------------------- /examples/problems/scoring/data/secret/testdata.yaml: -------------------------------------------------------------------------------- 1 | scoring: 2 | aggregation: sum 3 | -------------------------------------------------------------------------------- /examples/problems/scoring/input_validators/validator.ctd: -------------------------------------------------------------------------------- 1 | INT(-1000, 1000) NEWLINE 2 | EOF 3 | -------------------------------------------------------------------------------- /examples/problems/scoring/problem.yaml: -------------------------------------------------------------------------------- 1 | problem_format_version: 2023-07-draft 2 | type: scoring 3 | name: Sample Scoring problem 4 | uuid: db3e1e32-dd6f-4158-8f9f-909a383fe8d5 5 | credits: 6 | authors: Author 7 | source: My Contest 2024 8 | source_url: https://my.contest.com/2024 9 | license: cc by-sa 10 | rights_owner: My Contest 11 | -------------------------------------------------------------------------------- /examples/problems/scoring/statement/problem.en.tex: -------------------------------------------------------------------------------- 1 | \problemname{Sample Scoring Problem} 2 | This is a sample problem to illustrate how the Problem Package Format works. 3 | \section*{Input} 4 | Input consists of one integer $N (-1000 \le N \le 1000)$. 5 | \section*{Output} 6 | Print N. 7 | \section*{Constraints and Scoring} 8 | Your solution will be tested on a set of test groups, each worth a number of points. 9 | Each test group contains a set of test cases. 10 | To get the points for a test group you need to solve all test cases in the test group. 11 | 12 | \noindent 13 | \begin{tabular}{| l | l | l |} 14 | \hline 15 | Group & Points & Constraints\\ \hline 16 | $1$ & $30$ & $0 \le N \le 1000$ \\ \hline 17 | $2$ & $70$ & No further constraints \\ \hline 18 | \end{tabular} 19 | -------------------------------------------------------------------------------- /examples/problems/scoring/submissions/accepted/solution.py: -------------------------------------------------------------------------------- 1 | print(input()) 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/submissions/partially_accepted/partial_solution.py: -------------------------------------------------------------------------------- 1 | print(abs(int(input()))) 2 | -------------------------------------------------------------------------------- /examples/problems/scoring/submissions/submissions.yaml: -------------------------------------------------------------------------------- 1 | accepted/*: 2 | authors: Author 3 | partially_accepted/*: 4 | authors: Author 5 | wrong_answer/*: 6 | authors: Author 7 | -------------------------------------------------------------------------------- /examples/problems/scoring/submissions/wrong_answer/constant.py: -------------------------------------------------------------------------------- 1 | print(42) 2 | -------------------------------------------------------------------------------- /examples/problems/submit_answer/README.md: -------------------------------------------------------------------------------- 1 | # Sample submit-answer problem 2 | 3 | This is a Problem Package for a sample pass-fail submit-answer problem. 4 | 5 | This problem package uses the Default Output Validator. 6 | -------------------------------------------------------------------------------- /examples/problems/submit_answer/problem.yaml: -------------------------------------------------------------------------------- 1 | problem_format_version: 2023-07-draft 2 | type: pass-fail submit-answer 3 | name: Sample problem 4 | uuid: 9b3df34b-165a-4ed6-8248-4cee2bd6985c 5 | credits: 6 | authors: Author One 7 | source: My Contest 2024 8 | source_url: https://my.contest.com/2024 9 | license: cc by-sa 10 | rights_owner: My Contest 11 | -------------------------------------------------------------------------------- /examples/problems/submit_answer/statement/problem.en.tex: -------------------------------------------------------------------------------- 1 | \problemname{Sample Problem} 2 | This is a sample problem to illustrate how the Problem Package Format works. 3 | \section*{Input} 4 | There is no input to this problem. 5 | \section*{Output} 6 | Print 42. 7 | -------------------------------------------------------------------------------- /examples/problems/submit_answer/submissions/accepted/solution.py: -------------------------------------------------------------------------------- 1 | print(42) 2 | -------------------------------------------------------------------------------- /examples/problems/submit_answer/submissions/submissions.yaml: -------------------------------------------------------------------------------- 1 | accepted/*: 2 | authors: Author 3 | wrong_answer/*: 4 | authors: Author 5 | -------------------------------------------------------------------------------- /examples/problems/submit_answer/submissions/wrong_answer/wrong.py: -------------------------------------------------------------------------------- 1 | print(24) 2 | -------------------------------------------------------------------------------- /examples/readme.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Examples 3 | sort: 3 4 | --- 5 | -------------------------------------------------------------------------------- /spec/2023-07-draft.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: 2023-07-draft (latest) 4 | sort: 2 5 | --- 6 | 7 | # Problem Package Format 8 | 9 | This is the `2023-07-draft` version of the Kattis problem package format. 10 | 11 | ## Overview 12 | 13 | This document describes the format of a _Kattis problem package_, 14 | used for distributing and sharing problems for algorithmic programming contests as well as educational use. 15 | 16 | This document does not explicitly specify an access policy to the data in a problem package. 17 | Normally most data (such as [test data](#test-data)) should be considered privileged and only 18 | to be available to those managing the problems/contest (from now on referred to as "judges"), 19 | unless it is indicated as meant to be shared with those attempting to solve the problem (from now on referred to as "teams"). 20 | 21 | ### General Requirements 22 | 23 | - The package must consist of a single directory containing files as described below. 24 | The directory name must consist solely of lowercase letters a–z and digits 0–9. 25 | Alternatively, the package can be a ZIP-compressed archive of such a directory with identical base name and extension `.kpp` or `.zip`. 26 | - All file names for files included in the package must match the regexp 27 | ```regex 28 | ^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,253}[a-zA-Z0-9]$ 29 | ``` 30 | i.e., they must be of length at least 2, at most 255, consist solely of lower- or uppercase letters a–z, A–Z, digits 0–9, period, dash, or underscore, 31 | but must not begin or end with a period, dash, or underscore. 32 | - All directory names inside the package must match the regexp 33 | ```regex 34 | ^[a-zA-Z0-9]([a-zA-Z0-9_-]{0,253}[a-zA-Z0-9])?$ 35 | ``` 36 | that is, they must be of length at least 1, at most 255, consist solely of lower- or uppercase letters a–z, A–Z, digits 0–9, dash, or underscore, 37 | but must not begin or end with a dash, or underscore. 38 | - All text files for a problem must be UTF-8 encoded and not have a byte-order mark (BOM). 39 | - All text files must have Unix-style line endings (newline/LF byte only). 40 | Note that LF is line-ending and not line-separating in POSIX, which means that all non-empty text files must end with a newline. 41 | - Natural language (for example, in the [problem statement](#problem-statements) filename) must be specified as 2-letter [ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) code if it exists, otherwise as a 3-letter code from ISO 639. 42 | Optionally, it may be suffixed with a hyphen and an ISO 3166-1 alpha-2 code, as defined in BCP 47, for example, `pt-BR` to indicate Brazilian Portuguese. 43 | - All floating-point numbers (in any files, including in the problem package or submission output, that are parsed by contest system tools or the default output validator), 44 | must be given in decimal and may use scientific notation. 45 | More specifically, floating-point numbers must satisfy the following grammar, 46 | which accepts formatted floating-point output from most major programming languages: 47 | ```regex 48 | sign [+-] 49 | digit [0123456789] 50 | expIndicator [Ee] 51 | significand ( {digit}* "." {digit}+ | {digit}+ "." | {digit}+ ) 52 | exponent {expIndicator} {sign}? {digit}+ 53 | float {sign}? {significand} {exponent}? 54 | ``` 55 | - The systems parsing these floating-point numbers may impose reasonable limits on the number of digits, 56 | but must support at least 30 digits before and after the decimal point. 57 | They must use an internal representation with at least 52 bits of mantissa precision and should make a "best effort" to parse floating-point numbers to their closest representable values. 58 | - The problem package may include symbolic links to other files in the problem package. 59 | Symlinks must not have targets outside the problem package directory tree. 60 | 61 | ### Problem Package Structure Overview 62 | 63 | The following table summarizes the elements of a problem package described in this specification: 64 | 65 | File or Folder | Required? | Described In | Description 66 | ---------------------- | --------- | --------------------------------------------- | ----------- 67 | `problem.yaml` | Yes | [Problem Metadata](#problem-metadata) | Metadata about the problem (e.g., source, license, limits) 68 | `statement/` | Yes | [Problem Statements](#problem-statements) | Problem statement files 69 | `attachments/` | No | [Attachments](#attachments) | Files available to teams other than the problem statement and sample test data 70 | `solution/` | No | [Solution Description](#solution-description) | Written explanations of how to solve the problem 71 | `data/sample/` | No | [Test Data](#test-data) | Sample test data 72 | `data/secret/` | Yes | [Test Data](#test-data) | Secret test data 73 | `data/invalid_input/` | No | [Invalid Test Cases](#invalid-test-cases) | Invalid test case input for testing input validation 74 | `data/invalid_output/` | No | [Invalid Test Cases](#invalid-test-cases) | Invalid test case output for testing output validation 75 | `data/valid_output/` | No | [Valid Output](#valid-output) | Valid test case output for testing output validation 76 | `generators/` | No | [Generators](#generators) | Scripts and documentation about how test cases were automatically generated 77 | `include/` | No | [Included Files](#included-files) | Files appended to all submitted solutions 78 | `submissions/` | Yes | [Example Submissions](#example-submissions) | Correct and incorrect judge solutions of the problem 79 | `input_validators/` | Yes | [Input Validators](#input-validators) | Programs that verifies correctness of the test data inputs 80 | `static_validator/` | No | [Static Validator](#static-validator) | Custom program for judging solutions with source files as input 81 | `output_validator/` | No | [Output Validator](#output-validator) | Custom program for judging solutions 82 | `input_visualizer/` | No | [Input Visualizer](#input-visualizer) | Scripts and documentation about how test case illustrations were generated 83 | `output_visualizer/` | No | [Output Visualizer](#output-visualizer) | Program to generate images illustrating submission output 84 | 85 | A minimal problem package must contain `problem.yaml`, a problem statement, a secret test case, an accepted judge solution, and an input validator. 86 | 87 | ### Programs 88 | 89 | There are a number of different kinds of programs that may be provided in the problem package: 90 | submissions, input validators, output validators, and output visualizers. 91 | All programs are always represented by a single file or directory. 92 | In other words, if a program consists of several files, these must be provided in a single directory. 93 | In the case that a program is a single file, it is treated as if a directory with the same name takes its place, which contains only that file. 94 | The name of the program, for the purpose of referring to it within the package, is the base name of the file or the name of the directory. 95 | There can't be two programs of the same kind with the same name. 96 | 97 | #### Languages and Compilation 98 | 99 | ##### Submissions 100 | 101 | The language of a submission program is determined by the `language` key in `submissions.yaml` if present; 102 | otherwise, by comparing the file extensions of the submission file(s) to those specified in the [languages table](../appendix/languages.md). 103 | If a single language can't be determined, building fails. 104 | [Included files](#included-files), if any, must be copied into the submission folder before building the submission. 105 | 106 | For languages where there could be several entry points, the entry point specified by the `entrypoint` key in `submissions.yaml` is used if present; 107 | otherwise, the default entry point in the [languages table](../appendix/languages.md) is used. 108 | 109 | ##### Other Programs 110 | 111 | Other programs (validators and visualizers) provided as a directory may include one of two POSIX-compliant shell scripts, `build` and `run`. 112 | If at least one of these two files is included: 113 | 1. First, if the `build` script is present, it must be executable and will be run. 114 | The working directory will be (a copy of) the program directory. 115 | The `run` file must exist in that directory and be executable after `build` is done. 116 | 2. Then, the `run` file (which now exists, and is an executable binary or POSIX-compliant shell script) 117 | will be used as the validator or visualizer program. 118 | 119 | Scripts may assume a POSIX-compliant shell and that a Python 3 interpreter, C compiler, and C++ compiler are available on the system search path, aliased to `python3`, `cc`, and `c++` respectively. 120 | Problem packages with `build` or `run` scripts are strongly encouraged to include a `README` file in the program directory documenting any such additional dependencies. 121 | 122 | Programs that do not include a `build` or `run` script must have one of the following forms: 123 | - a single Python 3 file; 124 | - a directory containing multiple Python 3 source files, two of which are `__init__.py` (defining a module) and `__main__.py` (which will be used as the program entry point); 125 | - a single C or C++ source file, or a directory containing one or more such files. 126 | 127 | The language of files is inferred from their extension as listed in the [languages table](../appendix/languages.md). 128 | 129 | #### Working Directory 130 | 131 | Each program must be run in a working directory with the following contents and **nothing else**: 132 | 133 | - For input validators: the files in the program directory of the input validator in question. 134 | - For submissions: the submitted files, any compiled binaries of the submitted files, any [included files](#included-files), and the contents of the `.files` directory of the test case being tested (if this directory exists). 135 | - For output validators, output visualizers, and static validators: the submitted files, any compiled binaries of the submitted files, as well as any included files. 136 | 137 | Please note that in particular: 138 | - the working directory for submissions **must not** contain any of the [test data](#test-data) files, except for the contents of the test case `.files` directory; 139 | - except for input validators, the files in a program's directory are not included in the working directory. 140 | 141 | ## Problem Metadata 142 | 143 | Metadata about the problem (e.g., source, license, limits) are provided in a YAML file named `problem.yaml` placed in the root directory of the package. 144 | 145 | The keys are defined as below. 146 | Keys are optional unless explicitly stated. 147 | Any unknown keys should be treated as an error. 148 | 149 | Key | Type | Required | Default 150 | ------------------------------------------------- | --------------------------------------------- | --------- | ------- 151 | [problem_format_version](#problem-format-version) | String | Yes | 152 | [type](#type) | String or non-empty sequence of strings | No | `pass-fail` 153 | [name](#name) | String or map of strings | Yes | 154 | [uuid](#uuid-and-version) | String | Yes | 155 | [version](#uuid-and-version) | String | No | 156 | [credits](#credits) | String or map with keys as defined below | No | 157 | [source](#source) | String, a sequence, or a map as defined below | No | 158 | [license](#license) | String | No | `unknown` 159 | [rights_owner](#rights-owner) | String | See below | See below 160 | [embargo_until](#problem-publication-embargo) | Date | No | 161 | [limits](#limits) | Map with keys as defined below | No | See below 162 | [keywords](#keywords) | Sequence of strings | No | 163 | [languages](#languages) | String or non-empty sequence of strings | No | `all` 164 | [allow_file_writing](#allow-file-writing) | Boolean | No | `false` 165 | [constants](#constants) | Map of strings to int, float, or string | No | 166 | 167 | 168 | ### Problem format version 169 | 170 | Version of the Problem Package Format used for this package. 171 | If using this version of the Format, it must be the string `2023-07-draft`. 172 | The string will be in the form `-` for a stable version, 173 | `--draft` or `draft` for a draft version, 174 | or `legacy` or `legacy-icpc` for the version before the addition of problem_format_version. 175 | Documentation for version `` is available at `https://www.kattis.com/problem-package-format/spec/`. 176 | 177 | ### Type 178 | 179 | Type of problem. 180 | Must be either a single string or a non-empty sequence of strings, from the table below, with no repetition. 181 | Two values listed as incompatible must not both be in the sequence. 182 | 183 | Value | Incompatible with | Comments 184 | --------------- | --------------------------- | -------- 185 | `pass-fail` | `scoring` | Default. Submissions are judged as either accepted or rejected (though the "rejected" judgement is more fine-grained and divided into results such as "Wrong Answer", "Time Limit Exceeded", etc). 186 | `scoring` | `pass-fail` | An accepted submission is additionally given a score, which is a non-negative numeric value (and the goal is to maximize this value). 187 | `multi-pass` | `submit-answer` | A submission should run multiple times with inputs for the next pass generated by the output validator of the current pass. 188 | `interactive` | `submit-answer` | The output validator is run interactively with the submission. 189 | `submit-answer` | `multi-pass`, `interactive` | A submission consists of the answers to the test cases instead of source code for a program that produces the answers. 190 | 191 | ### Name 192 | 193 | The name of the problem in each language for which a problem statement exists. 194 | The `name` field is a map with the language codes as keys and the problem names as values. 195 | If there is only one language **and** that language is English, the `name` field can simply be the problem name instead. 196 | The set of languages for which `name` is given must **exactly** match the set of languages for which a [problem statement](#problem-statements) exists. 197 | 198 | A deliberately complex example: 199 | ```yaml 200 | name: 201 | en: Hello World! 202 | pt-BR: Olá mundo! 203 | pt-PT: Oi mundo! 204 | fil: Kumusta mundo! 205 | ``` 206 | 207 | The simplest example, which implies that the only provided language is `en`; 208 | ```yaml 209 | name: Hello World! 210 | ``` 211 | 212 | ### UUID and version 213 | 214 | The `uuid` is meant to track a problem, even if its package name and/or `name` changes. 215 | For example, it can be used to identify the existing problem to update in an online problem archive and not accidentally upload it as a new one. 216 | The intention is that a new `uuid` should be assigned if the problem significantly changes. 217 | 218 | The `version` is meant for tracking (slightly) evolving versions of a problem, possibly during development, but also to track fixes to it. 219 | This can be used to check whether a problem uploaded to a contest system needs to be updated since it does not contain the latest fixes. 220 | 221 | This specification currently does not imply any more semantic meaning to these fields. 222 | 223 | ### Credits 224 | 225 | Map specifying who should get credits for creating this problem. 226 | A person is specified as a string with the full name, optionally followed by an email wrapped in `<>`, (e.g.: `Full Name` or `Full Name `), 227 | alternatively it can be specified as a map with the mandatory key `name`, specifying the name, 228 | and the optional keys `email`, `orcid`, `kattis`, specifying the email, [ORCID](https://orcid.org/), and [Kattis username](https://open.kattis.com/) respectively. 229 | 230 | Each of the keys in this section is optional. 231 | 232 | Key | Type | Comments 233 | ------------------ | ----------------------------------------------------------- | -------- 234 | `authors` | Person or non-empty sequence of persons | The people who conceptualized the problem. 235 | `contributors` | Person or non-empty sequence of persons | The people who developed the problem package, such as the statement, validators, and test data. 236 | `testers` | Person or non-empty sequence of persons | The people who tested the problem package, for example, by providing a solution and reviewing the statement. 237 | `translators` | Map of strings to persons or non-empty sequences of persons | The people who translated the statement to other languages. Each key must be a language code as described in [General Requirements](#general-requirements). 238 | `packagers` | Person or non-empty sequence of persons | The people who created the problem package out of an existing problem. 239 | `acknowledgements` | Person or non-empty sequence of persons | Extra acknowledgements or special thanks in addition to the previously mentioned. 240 | 241 | A full example would be 242 | ```yaml 243 | credits: 244 | authors: Authy McAuth 245 | contributors: 246 | - Authy McAuth 247 | - Additional Contributor 248 | testers: 249 | - name: Tester One 250 | email: one@tester.com 251 | - name: Tester Two 252 | orcid: 0000-0001-7414-8743 253 | - name: Tester Three 254 | kattis: tester-three 255 | translators: 256 | da: Mads Jensen 257 | eo: Ludoviko Lazaro Zamenhofo 258 | acknowledgements: 259 | - Inspirational Speaker 1 260 | - Inspirational Speaker 2 261 | packagers: 262 | - Package Creatorson 263 | ``` 264 | which demonstrates all the available credit types. 265 | 266 | Credits are sometimes omitted when authors instead choose to only give source credit, but both may be specified. 267 | If a string is provided instead of a map for credits, such as 268 | ```yaml 269 | credits: Authy McAuth 270 | ``` 271 | it is treated as if only a single author is being specified, so it is equivalent to 272 | ```yaml 273 | credits: 274 | authors: Authy McAuth 275 | ``` 276 | to support a less verbose credits section. 277 | 278 | ### Source 279 | 280 | The `source` key contains one or more source entries that this problem originates from. 281 | Each entry consists of either a map with keys `name` and `url`, where `name` is required, 282 | but `url` is optional, or alternatively a string with value equivalent to that of the `name` key. 283 | If there is only a single source entry, it can be specified directly as the value of `source`; 284 | otherwise `source` contains a list with all entries. 285 | 286 | The `name` should typically contain the name (and year) of the problem set (such as a contest or a course), 287 | where the problem was first used or for which it was created, and the key `url` should map to a link to the event's page. 288 | 289 | The following are valid examples: 290 | ``` 291 | source: 292 | name: NWERC 2024 293 | url: https://2024.nwerc.example/contest 294 | ``` 295 | which without `url` can be shortened to 296 | ``` 297 | source: NWERC 2024 298 | ``` 299 | A more extensive example: 300 | ``` 301 | source: 302 | - name: NWERC 2024 303 | url: https://2024.nwerc.example/contest 304 | - SWERC 2024 305 | - name: SEERC 2024 306 | ``` 307 | 308 | ### License 309 | 310 | License under which the problem may be used. 311 | Must be one of the values below. 312 | 313 | Value | Comments | Link 314 | --------------- | ---------------------------------------------------------------------------------- | ---- 315 | `unknown` | The default value. In practice means that the problem can not be used. | 316 | `public domain` | There are no known copyrights on the problem, anywhere in the world. | 317 | `cc0` | CC0, "no rights reserved", version 1 or later. | 318 | `cc by` | CC attribution license, version 4 or later. | 319 | `cc by-sa` | CC attribution, share alike license, version 4 or later. | 320 | `educational` | May be freely used for educational purposes. | 321 | `permission` | Used with permission. The rights owner must be contacted for every additional use. | 322 | 323 | ### Rights Owner 324 | 325 | A rights owner is needed if the [license](#license) is anything other than `unknown` or `public domain`. 326 | If `license` is `public domain` there is no rights owner and `rights_owner` must not be set. 327 | 328 | If `rights_owner` is provided, this is the rights owner. 329 | Otherwise, if one or several authors are specified in `credits`, that group or individual is the rights owner. 330 | Otherwise, if a `source` is specified, the legal entity owning the rights associated with that source is the rights owner. 331 | 332 | ### Problem Publication Embargo 333 | 334 | The `embargo_until` key, if present, declares that the problem package should not be made publicly available (in problem archives, online judges, etc.) until a certain date and time. 335 | The value of this key must be a calendar date, or date and time of day in **UTC**, in ISO-8601 extended format (`YYYY-MM-DD` or `YYYY-MM-DD'T'hh:mm:ss'Z'`, where T and Z are constants not to be changed). 336 | The time of day defaults to the start of the day in UTC if not specified. 337 | 338 | ### Limits 339 | 340 | Time, memory, and other limits to be imposed on submissions. 341 | A map with the following keys: 342 | 343 | Key | Comments | Default | Typical system default 344 | -------------------- | -------------------------------------- | -------------- | ---------------------- 345 | `time_multipliers` | optional map as defined below | see below | 346 | `time_limit` | optional float > 0, in seconds | see below | 347 | `time_resolution` | optional float > 0, in seconds | 1.0 | 348 | `memory` | optional int > 0, in MiB | system default | 2048 349 | `output` | optional int > 0, in MiB | system default | 8 350 | `code` | optional int > 0, in KiB | system default | 128 351 | `compilation_time` | optional int > 0, in seconds | system default | 60 352 | `compilation_memory` | optional int > 0, in MiB | system default | 2048 353 | `validation_time` | optional int > 0, in seconds | system default | 60 354 | `validation_memory` | optional int > 0, in MiB | system default | 2048 355 | `validation_output` | optional int > 0, in MiB | system default | 8 356 | `validation_passes` | optional int >= 2, only for multi-pass | 2 | 357 | 358 | For most keys, the system default will be used if nothing is specified. 359 | This can vary, but you **should** assume that it's reasonable. 360 | Only specify limits when the problem needs a specific limit, but do specify limits even if the "typical system default" is what is needed. 361 | 362 | #### Problem Timing 363 | 364 | `time_multipliers` is a map with the following keys: 365 | 366 | Key | Comments | Default 367 | ------------------- | ------------------- | ------- 368 | `ac_to_time_limit` | optional float >= 1 | 2.0 369 | `time_limit_to_tle` | optional float >= 1 | 1.5 370 | 371 | The value of `time_limit` is an integer or floating-point problem time limit in seconds. 372 | The time multipliers specify safety margins relative to the slowest accepted submission, `T_ac`, and fastest time_limit_exceeded submission, `T_tle`. 373 | The `time_limit` must satisfy `T_ac * ac_to_time_limit <= time_limit` and `time_limit * time_limit_to_tle <= T_tle`. 374 | In these calculations, `T_tle` is treated as infinity if the problem does not provide at least one time_limit_exceeded submission. 375 | 376 | If no `time_limit` is provided, the default value is the smallest integer multiple of `time_resolution` that satisfies the above inequalities. 377 | It is an error if no such multiple exists. 378 | The `time_resolution` key is ignored if the problem provides an explicit time limit (and in particular, 379 | the time limit is not required to be a multiple of the resolution). 380 | Since time multipliers are more future-proof than absolute time limits, avoid specifying `time_limit` whenever practical. 381 | 382 | Judge systems should make a best effort to respect the problem time limit, 383 | and should warn when importing a problem whose time limit is specified with precision greater than can be resolved by system timers. 384 | 385 | ### Keywords 386 | 387 | List of keywords describing the problem. 388 | 389 | ### Languages 390 | 391 | List of one or more programming languages codes from the [languages table](../appendix/languages.md) or the string `all`. 392 | If the value is not `all`, the problem may only be solved using any supported programming languages. 393 | 394 | File endings in parenthesis are not used for determining language. 395 | 396 | ### Allow File Writing 397 | 398 | Flag for configuring whether submissions should have access to creating, editing and deleting files in their working directory. 399 | A value of `true` means submissions can read and write files, while the default value of `false` means submissions can only read from files. 400 | 401 | ### Constants 402 | 403 | Global constant values used by the problem, specified by a map of names to values. 404 | Names must match the following regex: `[a-zA-Z_][a-zA-Z0-9_]*`. 405 | _Constant sequences_ are tokens (regex words) of the form {% raw %}`{{name}}`{% endraw %}, 406 | where `name` is one of the names defined in `constants`. 407 | Tags {% raw %}`{{xyz}}`{% endraw %} containing a name that is not defined are not modified but may be warned for. 408 | 409 | All constant sequences in the following files will be replaced by the value of the corresponding constant: 410 | - Markdown problem statements 411 | - input and output validators 412 | - included code 413 | - example submissions 414 | - `test_group.yaml` 415 | 416 | Note that constants are also available in LaTeX problem statements via the dedicated command `\constant{name}`. 417 | 418 | Constant sequences are **not** replaced in test data files or in `problem.yaml` itself. 419 | 420 | ## Problem Statements 421 | 422 | The problem statement of the problem is provided in the directory `statement/`. 423 | 424 | This directory must contain one file per language, for at least one language, named `problem..`, 425 | that contains the problem text itself, including input and output specifications. 426 | Here, `` is a language code as described in [General Requirements](#general-requirements). 427 | Filetype can be either `.tex` for LaTeX files, `.md` for Markdown, or `.pdf` for PDF. 428 | 429 | Please note that many kinds of transformations on the problem statements, 430 | such as conversion to HTML or styling to fit in a single document containing many problems will not be possible for PDF problem statements, 431 | so using this format should be avoided if at all possible. 432 | 433 | Auxiliary files needed by the problem statement files must all be in `statement/`. 434 | `problem..` should reference auxiliary files as if the working directory is `statement/`. 435 | All statement types support the image file formats `.png`, `.jpg`, `.jpeg`. 436 | LaTeX statements also support `.pdf`. 437 | Markdown statements also support `.svg`. 438 | 439 | ### Sample Data 440 | 441 | - For problem statements provided in LaTeX or Markdown: 442 | the statement file must contain only the problem description and input/output specifications and no [sample data](#test-data). 443 | It is the judge system's responsibility to append the sample data. 444 | - For problem statements provided as PDFs: 445 | the judge system will display the PDF verbatim; 446 | therefore any sample data must be included in the PDF. 447 | The judge system is not required to reconcile sample data embedded in PDFs with the `sample` test data group nor to validate it in any other way. 448 | 449 | ### LaTeX Environment and Supported Subset 450 | 451 | Problem statements provided in LaTeX must consist only of the problem statement body 452 | (i.e., the content that would be placed within a `document` environment). 453 | It is the judging system's responsibility to wrap this text in an appropriate LaTeX class. 454 | 455 | The LaTeX class shall provide the convenience environments `Input`, `Output`, and `Interaction` for delineating sections of the problem statement. 456 | It shall also provide the following commands: 457 | 458 | - `\problemname{name}`, which should typically be the first line of the problem statement and places the problem name into the problem statement header. 459 | The argument `name` is optional. 460 | If it is missing, the `name` value from `problem.yaml` matching the problem statement's language is used. 461 | If it is present, it is used instead, and must be a LaTeX-formatted version of the `name` value from `problem.yaml` matching the problem statement's language. 462 | In some cases, the problem name might contain math formulas or other text that should be typeset specially. 463 | In this case, the `\problemname{name}` command should be used instead and overrides the name in the header with `name`, a LaTeX-formatted version of the problem name. 464 | - `\illustration{width}{filename}{caption}`, a convenience command for adding a figure to the problem statement. 465 | `width` is a floating-point argument specifying the width of the figure as a fraction of the total width of the problem statement; 466 | `filename` is the image to display, and `caption`, the text to include below the figure. 467 | The illustration should be flushed right with text flowing around it (as in a `wrapfigure`). 468 | - `\nextsample` tells the judge system to include the next sample test case here. 469 | It is an error to use `\nextsample` when there are no remaining sample test cases. 470 | - `\remainingsamples`, tells the judge system to include all sample test cases that have not previously been included by `\nextsample`. 471 | It is allowed to use `\remainingsamples` even if there are no remaining sample test cases, which will simply include nothing. 472 | - `\constant{name}` evaluates to the value of the corresponding constant, see [constants](#constants). 473 | 474 | Arbitrary LaTeX is not guaranteed to render correctly by HTML-based judging systems. 475 | However, judging systems must make a best effort to correctly render **at minimum** the following LaTeX subset when displaying a LaTeX problem statement: 476 | 477 | - All [MathJax-supported TeX commands](https://docs.mathjax.org/en/latest/input/tex/macros/index.html) within inline (`$ $`) and display (`$$ $$`) math mode. 478 | - The following text-mode environments: `itemize`, `enumerate`, `lstlisting`, `verbatim`, `quote`, `center`, `tabular`, `figure`, `wrapfigure` (from the `wrapfig` package). 479 | - `\item` within list environments and `\hline`, `\cline`, `\multirow`, `\multicol` within tables. 480 | - The following typesetting constructs: smart quotes (`' '`, `<< >>`, ` `` '' `), dashes (`--`, `---`), non-breaking space (`~`), ellipses (`\ldots` and `\textellipsis`), and `\noindent`. 481 | - The following font weight and size modifiers: `\bf`, `\textbf`, `\it`, `\textit`, `\t`, `\tt`, `\texttt`, `\emph`, `\underline`, `\sout`, `\textsc`, `\tiny`, `\scriptsize`, `\small`, `\normalsize`, `\large`, `\Large`, `\LARGE`, `\huge`, `\Huge`. 482 | - `\includegraphics` from the package `graphicx`, including the Polygon-style workaround for scaling the image using `\def \htmlPixelsInCm`. 483 | - The miscellaneous commands `\url`, `\href`, `\section`, `\subsection`, and `\epigraph`. 484 | 485 | ### Markdown Environment and Supported Features 486 | 487 | Problem statements in Markdown must not include the problem name, as the judging system will automatically prepend it. 488 | Statements must also not contain scripting or reference external resources for content, such as images. 489 | Due to security concerns, it is strongly recommended to pass the compiled statement through a sanitizer. 490 | 491 | Markdown statements may use `.svg` files. Any `.svg` files must not contain scripting or references to external resources. 492 | 493 | The judging system shall provide the following commands: 494 | 495 | - {% raw %}`{{nextsample}}`{% endraw %} tells the judge system to include the next sample test case here. 496 | It is an error to use {% raw %}`{{nextsample}}`{% endraw %} when there are no remaining sample test cases. 497 | - {% raw %}`{{remainingsamples}}`{% endraw %} tells the judge system to include all sample test cases that have not previously been included by {% raw %}`{{nextsample}}`{% endraw %}. 498 | It is allowed to use {% raw %}`{{remainingsamples}}`{% endraw %} even if there are no remaining sample test cases, which will simply include nothing. 499 | - {% raw %}`{{name}}`{% endraw %} evaluates to the value of the corresponding constant, see [constants](#constants). 500 | 501 | The judging system shall support the Markdown flavor described by [CommonMark](https://commonmark.org/). 502 | However, as many implementations are not fully compliant, full compliance with CommonMark is not required. 503 | Still, a reasonable effort shall be made to ensure that CommonMark-compliant statements render correctly. 504 | 505 | Additionally, the following extensions shall be supported: 506 | 507 | - All [MathJax-supported TeX commands](https://docs.mathjax.org/en/latest/input/tex/macros/index.html) within inline (`$ $`) and display (`$$ $$`) math mode. 508 | - Tables and footnotes as described in Markdown Guide's [extended syntax](https://www.markdownguide.org/extended-syntax/). 509 | 510 | ## Attachments 511 | 512 | Public, i.e., non-secret, files to be made available in addition to the problem statement and sample test data are provided in the directory `attachments/`. 513 | 514 | ## Solution description 515 | 516 | A description of how the problem is intended to be solved is provided in the directory `solution/`. 517 | 518 | This directory must contain one file per language, for at least one language, named `solution..`. 519 | Language is given the same way as for problem statements. 520 | Optionally, the language code can be left out; the default is then English (`en`). 521 | The set of languages used can be different from what was used for the problem statement. 522 | Filetype can be either `.tex` for LaTeX files, `.md` for Markdown, or `.pdf` for PDF. 523 | 524 | Auxiliary files needed exclusively by the solution description files should all be in `solution/`. 525 | `solution..` should reference auxiliary files as if the working directory is `solution/`. 526 | Additionally, all images in `statement/` can also be referenced as if the working directory is `statement/`. 527 | Note that if a file with the same name exists in both `statement/` and `solution/`, only the one in `solution/` can be referenced. 528 | 529 | Exactly how the solution description is used is up to the user or tooling. 530 | 531 | ## Test data 532 | 533 | The test data are provided in subdirectories of `data/`. 534 | The sample data in `data/sample/` and the secret data in `data/secret/`. 535 | 536 | The `sample` directory may be omitted if a problem has no sample test cases. 537 | The `secret` directory must exist, and contain either some test cases, or some [test data groups](#test-data-groups). 538 | 539 | All files and directories associated with a single test case have the same base name with varying extensions. 540 | Here, base name is defined to be the relative path from the `data` directory to the test case input file, without extensions. 541 | For example, the files `secret/test.in` and `secret/test.ans` are associated with the same test case that has the base name `secret/test`. 542 | The existence of the `.in` file declares the existence of the test case. 543 | If the test case exists, then an associated `.ans` file must exist while the others are optional. 544 | If the test case does not exist, then the other files must not exist. 545 | Note that a test case must not be named `*/test_group`, since `test_group.yaml` would then be configuration for both the test case and test group. 546 | The table below summarizes the supported test data: 547 | 548 | Extension | Described In | Summary 549 | ------------------------------- | --------------------------------------------------- | ------- 550 | `.in` | [Input](#input) | Input piped to standard input 551 | `.ans` | [Output Validator](#output-validator) | Answer file given to the Output Validator 552 | `.files` | [Input](#input) | Input available via file I/O 553 | `.yaml` | [Test Case Configuration](#test-case-configuration) | Additional configuration of the test case 554 | `.png`, `.jpg`, `.jpeg`, `.svg` | [Illustrations](#illustrations) | Illustration of the test case 555 | 556 | Judge systems may assume that the result of running a program on a test case is deterministic. 557 | For any two test cases, if the contents of their `.in` and `.files` directory are equivalent, 558 | as well as the `args` sequence in the `.yaml` file, then the input of the two test cases is equivalent. 559 | This means that for any two test cases, if their input, output validator arguments and the contents of their `.ans` files are equivalent, then the test cases are equivalent. 560 | The assumption of determinism means that a judge system could choose to reuse the result of a previous run, or to re-run the equivalent test case. 561 | 562 | Test cases and [test data groups](#test-data-groups) will be used in lexicographical order on base name. 563 | It is good practice to use a numbered prefix such as `00`, `01`, `02`, `03`, and so on, to get the desired order of test cases, while keeping the file names descriptive. 564 | Remember that the numbered prefixes should be zero padded to the same length to get the expected lexicographical order. 565 | 566 | ### Test Data Configuration 567 | 568 | In `data/sample/`, `data/secret` and each [test data group](#test-data-groups), a YAML file `test_group.yaml` may be placed to specify how the result should be computed. 569 | 570 | The format of `test_group.yaml` is as follows: 571 | 572 | Key | Type | Default | Comments 573 | ----------------------- | ----------------------------------- | ---------------------------------------------- | -------- 574 | `scoring` | Map | [See Result Aggregation](#result-aggregation) | Description of how the results of the group test cases and subgroups should be aggregated. This key is only permitted for the `secret` group and its subgroups. 575 | `input_validator_args` | Sequence of strings or map of strings to sequences of strings | empty string | See [Test Case Configuration](#test-case-configuration). 576 | `output_validator_args` | Sequence of strings | empty sequence | See [Test Case Configuration](#test-case-configuration). 577 | `static_validation` | Map or boolean | false | Configuration of static validation test data node. See [Static Validator](#static-validator) 578 | `full_feedback` | Boolean | `false` in `secret`, `true` in `sample` | See [Test Case Configuration](#test-case-configuration). 579 | 580 | ### Test Data Groups 581 | 582 | The secret data may be sudivided into test data groups. 583 | Every subdirectory of `data/secret/` is a test data group and may contain a `test_group.yaml` configuration file. 584 | `data/secret` can only have test data groups *or* test cases, never both. 585 | That is, if there are any directories under `data/secret/` there must not be any `.in` files directly in `data/secret/` and vice versa. 586 | 587 | The test groups themselves can contain directories, but not further groups. 588 | This means that there are no `test_group.yaml` furter down in the directory hierarchy. 589 | 590 | A directory must not have the same name as a test case in the same directory. 591 | For example, if the file `data/secret/group1/huge.in` exists then the directory `data/secret/group1/huge/` must not, and vice versa. 592 | 593 | Each test data group must contain at least one test case, or a static validation test case. 594 | 595 | ### Input 596 | 597 | Each test case can supply input via standard input, command-line arguments, and/or the file system. 598 | These options are not exclusive. 599 | For a test case with base name `test`, the file `test.in` is piped to the submission as standard input. 600 | The submission will be run with the `args` sequence defined in the `test.yaml` file as command-line arguments. 601 | Note that usually the submission's entry point, whether it be a binary or an interpreted file, will be the absolute first command line argument. 602 | However, there exist languages, such as Java, where there is no initial command line argument representing the entry point. 603 | 604 | The directory `test.files`, if it exists, contains input files available to the submission via file I/O. 605 | All files in this directory must be copied into the submission's working directory after compiling, but before executing the submission, 606 | possibly overwriting the compiled submission file or included data in the case of name conflicts. 607 | 608 | ### Test Case Configuration 609 | 610 | One YAML file with additional configuration may be provided per test case. 611 | The file must share the base name of the associated test case. 612 | 613 | The allowed keys are defined as follows. 614 | Keys are optional unless explicitly stated. 615 | Any unknown keys should be treated as an error. 616 | 617 | Key | Type | Default 618 | --------------------- | ------------------- | ------- 619 | args | Sequence of strings | Inherited from [`test_group.yaml`](#test-data-groups), which defaults to empty sequence 620 | output_validator_args | Sequence of strings | Inherited from [`test_group.yaml`](#test-data-groups), which defaults to empty sequence 621 | input_validator_args | Sequence of strings or map of strings to sequences of strings | Inherited from [`test_group.yaml`](#test-data-groups), which defaults to empty sequence 622 | full_feedback | Boolean | Inherited from [`test_group.yaml`](#test-data-groups), which defaults to `false` in `secret` and `true` in `sample` 623 | hint | String | 624 | description | String | 625 | 626 | For each test case: 627 | - `args` defines arguments passed to the submission for this test case. 628 | - `output_validator_args` defines arguments passed to the output validator for the test case. 629 | - `input_validator_args` defines arguments passed to each input validator for the test case. 630 | If a sequence of strings, then those are the arguments that will be passed to each input validator for this the case. 631 | If a map, then each key is the name of the input validator and the value is the arguments to pass to that input validator for the test case. 632 | Validators not present in the map are run without any arguments. 633 | - When `full_feedback` is `true`, somebody whose submission didn't pass case should be shown: 634 | - the given input, 635 | - the produced output (stdout), 636 | - any error messages (stderr), 637 | - the illustration created by the output visualizer (if applicable), 638 | - the expected output. 639 | - A _hint_ provides feedback for solving a test case to, e.g., somebody whose submission didn't pass. 640 | - A _description_ conveys the purpose of a test case. 641 | It is an explanation of what aspect or edge case of the solution the input file is meant to test. 642 | 643 | ### Illustrations 644 | 645 | An illustration provides a visualization of the associated test case, meant for the judges. 646 | At most one illustration file may be provided per test case. 647 | The file must share the base name of the associated test case. 648 | The supported file extensions are `.png`, `.jpg`, `jpeg`, and `.svg`. 649 | 650 | ### Invalid Test Cases 651 | 652 | The `data` directory may contain directories with test cases that must be rejected by validation. 653 | Their goal is to ensure the integrity and quality of the test data and validation programs. 654 | 655 | #### Invalid Input 656 | 657 | The files under `invalid_input` are invalid inputs. 658 | Unlike in `sample` and `secret`, there are no `.ans` files. 659 | Each `tc.in` under `invalid_input` must be rejected by at least one input validator. 660 | 661 | #### Invalid Output 662 | 663 | The test cases in `invalid_output` describe invalid outputs for non-interactive problems. 664 | They consist of three files. 665 | The input file `tc.in`, which must contain valid input. 666 | The output file `tc.out` must fail output validation for the given answer file `tc.ans`. 667 | 668 | In particular, for any test case in `invalid_output/`, for example `invalid_output/tc`: 669 | ```bash 670 | tc.in tc.ans dir [arguments] < tc.ans # MUST PASS 671 | tc.in tc.ans dir [arguments] < tc.out # MUST FAIL 672 | ``` 673 | 674 | The directory `invalid_output` must be organized into a tree-like structure similar to `secret` and may contain arguments in `test_group.yaml` files that are passed to the validators. 675 | 676 | ### Valid Output 677 | 678 | The `data` directory may contain a directory of test cases that must pass validation. 679 | Their goal is to ensure the integrity and quality of validation programs. 680 | The test cases in `valid_output` describe valid outputs for non-interactive problems. 681 | They consist of three files. 682 | The input file `tc.in`, which must contain valid input. 683 | The output file `tc.out` must pass output validation for the given answer file `tc.ans`. 684 | 685 | In particular, for any test case in `valid_output/`, for example `valid_output/tc`: 686 | ```bash 687 | tc.in tc.ans dir [arguments] < tc.ans # MUST PASS 688 | tc.in tc.ans dir [arguments] < tc.out # MUST PASS 689 | ``` 690 | 691 | The directory `valid_output` must be organized into a tree-like structure similar to `secret` and may contain arguments in `test_group.yaml` files that are passed to the validators. 692 | 693 | ### Samples 694 | 695 | Sample test cases can be used in three places: 696 | - As test cases for team submissions (with feedback possibly provided to the teams). 697 | - As sample input and output displayed in the problem statement. 698 | - As sample input and output files available for download, or otherwise made available. 699 | 700 | By default the sample data for all three cases is taken from the `.in` and `.ans` file pairs under `data/sample`. 701 | Some problems require (slightly) different data in each of these cases. 702 | We allow customizing which data is used for each purpose with the additional extensions `.statement` and `.download`. 703 | 704 | #### Samples For Judging Team Submissions 705 | 706 | The `data/sample` directory contains test cases similar to those in `data/secret`. 707 | Every submission is run on these test cases. 708 | Sample test cases do not contribute to the problem score for [scoring problems](#scoring-problems). 709 | If a `score.txt` file is produced on sample test cases on a scoring problem, it is not an error, but simply ignored. 710 | 711 | `data/sample` must not contain test groups. 712 | It may be missing (for problems with no samples) or empty. 713 | 714 | #### Samples Shown in the Problem Statement 715 | 716 | By default, the `.in` and `.ans` pairs from `data/sample` are shown in the problem statement. 717 | If a `.out` file exists the `.out` file is shown instead of the `.ans` file in the problem statement. 718 | This behavior can be customized by creating files with extension `.in.statement` and `.ans.statement`. 719 | If one of these files exists, its contents replaces that of the file with the same name -- except the `.statement` extension -- for purposes of the problem statement. 720 | Note that it is an error to provide both a `.out` and a `.ans.statement` file. 721 | 722 | ##### Interactive Problems 723 | 724 | Interactive problems require a custom output validator, which interacts with the submission. 725 | The validator gets access to the `.in` and `.ans` files for each test case and communicates with the submission by reading from standard in and writing to standard out. 726 | Standard in of the output validator corresponds to standard out of the submission, and standard out of the output validator corresponds to standard in of the submission. 727 | Therefore, the output validator can control what information from the `.in` and `.ans` files is provided to the submission. 728 | 729 | For interactive problems displaying two files is typically not meaningfully to capture how users are expected to interact with the output validator. 730 | Therefore, it is advised to instead provide samples for the problem statement in the form of a `.interaction` file. 731 | This file contains lines starting with `<` and `>`, containing an interaction log with output from the output validator starting with `<` and output from the submission starting with `>`. 732 | 733 | Note that if you are using a `.interaction` file you must not provide a `.in.statement`, `.ans.statement`, or `.out` file. 734 | 735 | ##### Multi-Pass Problems 736 | 737 | Multi-pass problems require a custom output validator, which interacts with the submission see [multi-pass](#multi-pass-validation). 738 | 739 | For multi-pass problems displaying two files is typically not meaningfully to capture how users are expected to interact with the output validator. 740 | Therefore, it is advised to instead provide samples for the problem statement in the form of a `.interaction` file. 741 | This file contains lines starting with `<` and `>`, like for interactive problems. 742 | Passes are separated by a line containing `---` (three dashes). 743 | When the problem is not interactive, simply start each pass by a number of lines starting with `<`, containing the sample input, followed by some lines starting with `>`, containing the sample output. 744 | 745 | Note that if you are using a `.interaction` file you must not provide a `.in.statement`, `.ans.statement`, or `.out` file. 746 | 747 | #### Samples Available for Download 748 | 749 | By default, the `.in`, `.ans`, and `.files` files in `data/sample` are available for download. 750 | Note that the content of `.in.statement` replaces that of `.in` and that the content of `.out` or `.ans.statement` replace that of `.ans` for the download. 751 | This behavior can be further customized by providing files with the extension `in.download` or `ans.download`. 752 | If one of these files exists, its contents replaces that of the file with the same name -- except the `.download` extension -- for the problem download. 753 | Additionally, any other file or directory with the extension `.download` is also available for download (without the `.download` extension). 754 | 755 | If you want to make other files -- like testing tools -- available for download, you can use [attachments](#attachments). 756 | 757 | #### Validation 758 | 759 | All `data/sample/*.in` files are input-validated. 760 | For non interactive and non multi-pass problems the `.out` files must pass the output validator. 761 | For non interactive and non multi-pass problems the `.ans` files must pass the output validator if they are not overriden in any way, i.e., if they are shown in the statement. 762 | All other files are not validated in any way. 763 | 764 | Note that `.ans.statement` and `.out` can both be used to change what is shown in the statement. 765 | However, since only the `.out` files are validated it is advised to use these if possible. 766 | 767 | Validation can be customized by specifying `input_validator_args` and `output_validator_args` in `data/sample/test_group.yaml`. 768 | 769 | 770 | ## Generators 771 | 772 | If any generator scripts were used to automate writing test cases, 773 | it is recommended to include the generator source code in the directory `generators/` along with invocation instructions in a file such as `generators/README.txt`. 774 | This information can be useful as a debugging aid and for archival completeness: 775 | judge systems are not responsible for executing the provided generators and all test data written by the generators must be included in the problem package. 776 | 777 | ## Included Files 778 | 779 | Files that should be included with all submissions are provided in one non-empty directory per supported language. 780 | Files that should be included for all languages are placed in the non-empty directory `include/default/`. 781 | Files that should be included for a specific language, overriding the default, are provided in the non-empty directory `include//`, where `` is a language code as given in the [languages table](../appendix/languages.md). 782 | 783 | The files should be copied from a language directory based on the language of the submission, 784 | to the submission files before compiling, but after checking whether the submission exceeds the code limit, 785 | overwriting files from the submission in the case of name collision. 786 | Language must be one of the allowed submission languages as specified by `languages` in `problem.yaml`. 787 | If any of the included files are supposed to be the main file (i.e., a driver), 788 | that file must have the language-dependent name as given in the table referred to above. 789 | 790 | ## Example Submissions 791 | 792 | Correct and incorrect solutions (file or directory programs) to the problem are provided in direct subdirectories of `submissions/`. 793 | That is By default, the possible subdirectories are as in the table below, but they can be customized, and more can be added; see [Default directories](#default-directories). 794 | Submission programs (either a single file or a directory of files) **must** be placed in a direct subdirectory of `submissions`, e.g., `submissions/accepted/`. 795 | 796 | Directory | Requirement | Comment 797 | --------------------- | ------------------------------------------------------------------------------------- | ------- 798 | `accepted` | Accepted as a correct solution for all test cases. | At least one is required. Used to lower bound the time limit. 799 | `rejected` | At least one case is not accepted. | 800 | `wrong_answer` | At least one case is wrong answer, and all cases are either wrong answer or accepted. | Used to lower bound the time limit. 801 | `time_limit_exceeded` | Too slow on at least one case, and all cases are either too slow or accepted. | Used to upper bound the time limit. 802 | `run_time_error` | Crashes for at least one case, and all cases either crash or are accepted. | Used to lower bound the time limit. 803 | `brute_force` | Never gives the wrong answer, but not accepted because run time error or timeout. | 804 | 805 | Every file or directory in these directories represents a separate solution. 806 | It is mandatory to provide at least one accepted solution. 807 | 808 | Metadata about the example submissions is provided in a YAML file `submissions/submissions.yaml`. 809 | The top-level keys in `submissions.yaml` are glob patterns matching files or directories under `submissions/`. 810 | For example, `accepted` and `accepted/*` match all submissions in the `submissions/accepted/` directory. 811 | See also [Glob-patterns](#glob-patterns) 812 | 813 | Each glob pattern maps to a map with keys as defined below, specifying metadata for all submissions that are matched by the glob pattern. 814 | 815 | Key | Type | Default | Comment 816 | -------------------- | -------------------------------- | ------------------------------------------------------------------------------------ | ------- 817 | `language` | String | As determined by file endings given in the [language list](../appendix/languages.md) | 818 | `entrypoint` | String | As specified in the [language list](../appendix/languages.md) | 819 | `authors` | Person or sequence of persons | | Author(s) of submission(s). 820 | `model_solution` | Bool | false | Suggested model solution, suitable to be published. 821 | `permitted` | Sequence of strings | `[AC, WA, TLE, RTE]` | All test cases must have a verdict in this subset of `AC`, `WA`, `TLE`, `RTE`. 822 | `required` | Sequence of strings | `[AC, WA, TLE, RTE]` | At least one test case must have a verdict in this subset of `AC`, `WA`, `TLE`, `RTE`. 823 | `score` | Float or list of two floats | | The score of the submission equals the given number, or is in the given inclusive range. Only for scoring problems. 824 | `message` | String | Empty string | This must appear as a substring in at least one `judgemessage.txt`. 825 | `use_for_time_limit` | Bool or string (`lower`/`upper`) | See below. | Controls whether this submission is used to determine the time limit. 826 | 827 | Every submission matched by the glob pattern must satisfy: 828 | - all test cases must have only verdicts present in `permitted`; 829 | - at least one test case must have a verdict in `required`; 830 | - if given, the score must be in the given inclusive range or equal the given score; 831 | - if given, the `message` string must be included as a case-sensitive substring in the `judgemessage.txt` for at least one test case. 832 | 833 | The tooling should check the constraints for consistency, 834 | such as that two disjoint `permitted` sets are never applied to a single `(submission, testcase)` pair. 835 | 836 | ### Groups 837 | 838 | The `permitted`, `required`, `score`, `message`, and `use_for_time_limit` requirements can also be given for only a subset of test cases, 839 | by adding them under a key with the name of a test group (relative to `data/`). 840 | In this case, the `permitted`, `required`, `message`, and `use_for_time_limit` keys only apply to the set of test cases (recursively) in the given group. 841 | 842 | The `score` key puts a constraint on the aggregated score of a given test group, _not_ on the _set_ of test cases the group contains. 843 | 844 | For example, the configuration below tests that the submission solves all cases in `group1`, but times out on at least one case in `group2`. 845 | ```yaml 846 | solves_group_1.py: 847 | sample: 848 | permitted: [AC] 849 | secret/group1: 850 | permitted: [AC] 851 | secret/group2: 852 | permitted: [AC, TLE] 853 | required: [TLE] 854 | ``` 855 | 856 | #### Glob patterns 857 | 858 | Glob patterns can be used to apply restrictions to a subset of submissions. It is also possible to use glob patterns to put restrictions on a subset of test 859 | cases and test groups, for example, when test groups are not used: 860 | ```yaml 861 | time_limit_exceeded/solves_easy_cases.py: 862 | sample: 863 | permitted: [AC] 864 | secret/*-easy: 865 | permitted: [AC] 866 | secret/*-hard: 867 | permitted: [AC, TLE] 868 | required: [TLE] 869 | ``` 870 | This means that the submission must solve all samples and all easy cases, 871 | but must time out on at least one of the hard cases. 872 | 873 | Submission glob patterns are matched against all paths to files and directories of submissions inside and relative to the `submissions/` directory. 874 | Test case glob patterns are matched against all paths of test groups and test cases relative to `data/`, 875 | excluding the trailing `.in`. Wildcards (`*`) only match within a file name (i.e., do not match `/`). 876 | A test case is matched by the glob pattern if either itself or any of its parent test groups is matched by it, 877 | and similarly a submission is matched if either itself or a parent directory is matched. 878 | 879 | Using `**` to match any number of directories and `[xyz]` to match only a subset of characters is not supported. 880 | Brace expansion _is_ supported for both submissions and test cases. 881 | Thus, one can write `{simple,complex}.py` or `author.{py,cpp}` to match multiple files. 882 | 883 | ### Default directories 884 | 885 | By default, the following requirements are defined: 886 | ```yaml 887 | # All cases must be accepted. 888 | accepted: 889 | permitted: [AC] 890 | # At least one case is not accepted. 891 | rejected: 892 | required: [RTE, TLE, WA] 893 | # All cases AC or WA, at least one WA. 894 | wrong_answer: 895 | permitted: [AC, WA] 896 | required: [WA] 897 | # All cases AC or TLE, at least one TLE. 898 | time_limit_exceeded: 899 | permitted: [AC, TLE] 900 | required: [TLE] 901 | # All cases AC or RTE, at least one RTE. 902 | run_time_error: 903 | permitted: [AC, RTE] 904 | required: [RTE] 905 | # Must not WA, but fail at least once. 906 | # Note that by default these are not used for determining the time limit. 907 | brute_force: 908 | permitted: [AC, RTE, TLE] 909 | required: [RTE, TLE] 910 | ``` 911 | 912 | The defaults can be overwritten in the `submissions.yaml` file by simply specifying the name of the directory. 913 | Keys that are not specified are inherited from the default configuration above. 914 | This is supported for backwards compatibility and is not recommended for normal usage. 915 | 916 | ```yaml 917 | time_limit_exceeded: 918 | permitted: [AC, WA, TLE] 919 | required: [TLE] 920 | ``` 921 | 922 | Note that the glob `time_limit_exceeded/*` would impose an _additional_ requirement, instead of _replacing_ the original requirement. 923 | 924 | ### Timelimit inference 925 | 926 | Any submission that must satisfy a `required: TLE` requirement, 927 | i.e., must `TLE` on at least one test case, is used to provide an _upper bound_ on the time limit. 928 | Precisely, the time limit must be at most `T / time_limit_to_tle`, 929 | where `T` is the slowest runtime over the set of test cases to which the rule applies. 930 | Note that this excludes submissions that, e.g., have `required: [TLE, RTE]`. 931 | 932 | Any submission that is not permitted to get `TLE` at all (on some subset of cases), 933 | i.e., must satisfy a `permitted:` rule that does not contain `TLE`, 934 | is used to provide a _lower bound_ on the time limit. 935 | Precisely, the time limit must be at least `T * ac_to_time_limit`, 936 | where `T` is the slowest runtime over the set of test cases to which the rule applies. 937 | 938 | To _opt out_ of a (set of) submission(s) from influencing the time limit, 939 | set `use_for_time_limit: false` alongside the `permitted:` or `required:` key that satisfies the constraints above. 940 | To _opt out_ of a glob for submission(s) and optional subset of testcases from influencing the time limit, 941 | set `use_for_time_limit: false` alongside the `permitted:` and/or `required:` keys. 942 | Note that this means that if you want to exclude a submission completely, 943 | then you must add `use_for_time_limit: false` to every glob that matches that 944 | submission and would otherwise include it for determining the time limit. 945 | 946 | To explicitly _opt in_ a (set of) submissions(s) to be used for determining the time limit, 947 | use `use_for_time_limit: lower` and `use_for_time_limit: upper`. 948 | The first is equivalent to a `permitted: [AC, WA, RTE]` constraint, 949 | and the second to a `required: [TLE]` constraint. 950 | The system may warn when this makes other constraints redundant and should error when it is inconsistent with other constraints. 951 | 952 | It is required that at least one submission is used to lower bound the time limit. 953 | 954 | ## Input Validators 955 | 956 | Input Validators, verifying the correctness of the input files, are provided in `input_validators/`. 957 | Input validators can be specified as [VIVA](http://viva.vanb.org/)-files (with file ending `.viva`), 958 | [Checktestdata](https://github.com/DOMjudge/checktestdata)-file (with file ending `.ctd`), 959 | or as a program (as specified [above](#programs)). 960 | 961 | All input validators provided will be run on every input file. 962 | Validation fails if any validator fails. 963 | 964 | ### Invocation {#input-validators-invocation} 965 | 966 | An input validator program must be an application (executable or interpreted) capable of being invoked with a command line call. 967 | 968 | All input validators provided will be run on every test data file using the arguments specified for the test data group they are part of. 969 | Validation fails if any validator fails. 970 | 971 | When invoked, the input validator will get the input file on stdin. 972 | 973 | The validator should be possible to use as follows on the command line: 974 | ```sh 975 | [arguments] < inputfile 976 | ``` 977 | 978 | Here, `arguments` is the `input_validator_args`. 979 | 980 | ### Output 981 | 982 | The input validator may output debug information on stdout and stderr. 983 | This information may be displayed to the user upon invocation of the validator. 984 | 985 | ### Exit codes 986 | 987 | The input validator must exit with code 42 on successful validation. 988 | Any other exit code means that the input file could not be confirmed as valid. 989 | 990 | #### Dependencies 991 | 992 | The validator **must not** read any files outside those defined in the Invocation section. 993 | Its result **must** depend only on these files and the arguments. 994 | 995 | ## Static Validator 996 | 997 | ### Overview 998 | 999 | A static validator is a program that is given the submission files as input and can analyze the contents to accept or reject the submission. 1000 | Optionally, the static validator may assign a score to the submission for each validation test case. 1001 | By default there is no static validator. 1002 | A static validator may be provided under the `static_validator` directory, similar to a custom output validator. 1003 | 1004 | ### Static Validation Test Cases 1005 | 1006 | Each test group may define a static validation test case. 1007 | It is an error to define static validation test cases without providing a static validator. 1008 | A static validation test case is defined within a group's `test_group.yaml` file by specifying the key `static_validation`. 1009 | If a map is specified, its allowed key are: 1010 | - `args`, which maps to a string which represents the additional arguments passed to the static validator in this group's static validation test case; 1011 | - `score`, the maximum score of the static validation test case (see [Scoring Problems](#scoring-problems) for details). 1012 | 1013 | The `static_validation` key can also have the value of `false` meaning there is no static validation, or `true` meaning that static validation is enabled with no additional arguments and unspecified maximum score (to be determined by [maximum score inference](#maximum-score-inference)). 1014 | 1015 | It is an error to: 1016 | - provide a static validator for `submit-answer` type problems, 1017 | - specify a `score` in a test group with `pass-fail` aggregation or a problem that does not have the type `scoring`, 1018 | - *not* specify a `score` in a test group that has `sum` or `min` aggregation. 1019 | 1020 | ### Invocation {#static-validator-invocation} 1021 | 1022 | When invoked, the static validator will be passed at least three command line parameters. 1023 | 1024 | The validator should be possible to use as follows on the command line: 1025 | ```sh 1026 | language entry_point feedback_dir [additional_arguments] 1027 | ``` 1028 | 1029 | The meaning of the parameters listed above are: 1030 | 1031 | - language: 1032 | a string specifying the code of the language of the submission as shown in the [languages table](../appendix/languages.md). A static validator must handle all of the programming languages specified in the `languages` key of `problem.yaml`. 1033 | 1034 | - entry_point: 1035 | a string specifying the entry point, that is a filename, class name, or some other identifier, which the static validator should know how to use depending on the language of the submission. 1036 | 1037 | - feedback_dir: 1038 | a string which specifies the name of a "feedback directory" in which the validator can produce "feedback files" in order to report additional information on the validation of the submission. 1039 | The feedback_dir must end with a path separator (typically '/' or '\\' depending on operating system), 1040 | so that simply appending a filename to feedback_dir gives the path to a file in the feedback directory. 1041 | 1042 | - additional_arguments: 1043 | in case the static validation test case specifies additional args, these are passed as additional arguments to the validator on the command line. 1044 | 1045 | The static validator follows the semantics of an output validator for [reporting a judgment](#reporting-a-judgement). 1046 | 1047 | ## Output Validator 1048 | 1049 | ### Overview 1050 | 1051 | Output validators are [programs](#programs) used to check that the output of a submission on a test case is correct. 1052 | A trivial output validator could check that the submission output is equal to the answer file. 1053 | The [default validator](#default-output-validator-specification) does essentially this, 1054 | and supports some other commonly useful options. 1055 | 1056 | For problems that require more complex checks, you can create a custom output validator 1057 | and provide it as a program (as specified [above](#programs)) in the directory `output_validator/`. 1058 | If no custom output validator is specified, the default validator is used. 1059 | 1060 | The subsections below explain how a (default or custom) output validator must be 1061 | [invoked](#invocation-2) and how it must [report a judgement](#reporting-a-judgement) 1062 | and optionally [report additional feedback](#reporting-additional-feedback). 1063 | 1064 | ### Default Output Validator Specification 1065 | 1066 | The default output validator is essentially a beefed-up diff that can be used in the common case where the output validator needs only compare the output of a submitted program against a trusted judge reference solution. 1067 | The default output validator supports the following command-line arguments: 1068 | 1069 | Arguments | Description 1070 | ---------------------------- | ----------- 1071 | `case_sensitive` | indicates that comparisons should be case-sensitive (see below for details). 1072 | `space_change_sensitive` | indicates that differences in type or amount of whitespace should be rejected (see below for details). 1073 | `float_relative_tolerance ε` | indicates that floating-point tokens should be accepted if they are within relative error ≤ ε (see below for details). 1074 | `float_absolute_tolerance ε` | indicates that floating-point tokens should be accepted if they are within absolute error ≤ ε (see below for details). 1075 | `float_tolerance ε` | short-hand for applying ε as both relative and absolute tolerance. 1076 | 1077 | #### Output Parsing 1078 | 1079 | The default output validator parses the submission output and answer files as a string of single-byte characters and tokenizes the files by splitting on sequences of one or more consecutive whitespaces. 1080 | Whitespace characters are: space (`0x20`), form feed (`0x0c`), line feed (`0x0a`), carriage return (`0x0d`), horizontal tab (`0x09`), and vertical tab (`0x0b`). 1081 | In its default mode, the default output validator then ignores the whitespace and compares the submission output and answer files token by token. 1082 | If there is a disagreement in the number of tokens in the two files, the validator rejects the submission instead. 1083 | The validator may also reject any submission output that is unreasonably large (including due to containing unreasonable amounts of whitespace). 1084 | 1085 | #### Comparing Floating-Point Tokens 1086 | 1087 | If a floating-point tolerance has been set, the default output validator will attempt to parse each answer file token as a floating-point number (see [general requirements](#general-requirements) for details). 1088 | For each success, the token is compared to the submission output token using the following floating-point semantics. 1089 | (If no floating-point tolerance has been set, floating-point tokens are [compared as strings](#comparing-string-tokens).) 1090 | 1091 | If the submission output token cannot be parsed as floating-point, the validator rejects the submission as incorrect. 1092 | Otherwise, if `s` is the submission output floating-point value and `a` is the answer file value: 1093 | - if an absolute tolerance `ε` has been set, the token is accepted if `|s-a| ≤ ε`; 1094 | - if a relative tolerance `ε` has been set, the token is accepted if `|s-a| ≤ ε|a|`; 1095 | - when supplying both a relative and an absolute tolerance, the token is accepted if it is within either of the two tolerances. 1096 | 1097 | It is an error to provide any of the `float_relative_tolerance`, `float_absolute_tolerance`, or `float_tolerance` arguments more than once, or to provide a `float_tolerance` alongside `float_relative_tolerance` and/or `float_absolute_tolerance`. 1098 | 1099 | Note that if a floating-point tolerance has been set, the default output validator will parse exact integers as floating-point and apply the above semantics to them. 1100 | For problems containing a mix of integer and floating-point output, a custom output validator must be used if exact comparison of the integer tokens is required. 1101 | 1102 | #### Comparing String Tokens 1103 | 1104 | If `case_sensitive` is specified, the two tokens must match exactly, i.e. consist of exactly the same byte sequence. 1105 | 1106 | Otherwise, the submitted output token is accepted if it matches the answer file token up to case. The default output validator treats uppercase ASCII letters `A`--`Z` as equivalent to their lowercase counterparts. 1107 | 1108 | #### Whitespace Sensitivity 1109 | 1110 | By default, whitespace in the submission output is only used for tokenization and is otherwise ignored. 1111 | As a consequence, the default output validator is lenient with regard to leading and trailing whitespace, 1112 | and it treats any sequence of one or more whitespace characters in between tokens as equivalent to any other such sequence. 1113 | 1114 | If the `space_change_sensitive` argument is set, the default output validator will instead reject any submission output whose whitespace sequences (including leading and trailing) differ from those in the answer file in type or amount. 1115 | 1116 | ### Invocation {#ouput-validator-invocation} 1117 | 1118 | The output validator must be invoked and must support being invoked as: 1119 | ```sh 1120 | input_file answer_file feedback_dir [additional_arguments] < team_output [ > team_input ] 1121 | ``` 1122 | 1123 | The meanings of the parameters listed above are: 1124 | 1125 | - input_file: 1126 | a string specifying the name of the `.in` [input data file](#test-data) given on standard input to the program whose results are being validated. 1127 | 1128 | - answer_file: 1129 | a string specifying the name of the `.ans` [answer file](#test-data). 1130 | The answer file may, but is not necessarily required to, contain the "correct answer" for the problem. 1131 | For example, it might contain the output that was produced by a judge's solution for the problem when run with `input_file` as input. 1132 | Alternatively, the "answer file" might contain information, in some arbitrary format, which instructs the validator in some way about how to accomplish its task. 1133 | Note in particular that the output validator does not have direct access to the `.files` directory or the `.yaml` file containing the `args` used for the test. 1134 | If this information is needed by the validator it must be added to the "answer file". 1135 | 1136 | - feedback_dir: 1137 | a string which specifies the name of a "feedback directory" in which the validator can produce "feedback files" in order to report additional information on the validation of the output file. 1138 | The `feedback_dir` must end with a path separator (typically '/' or '\\' depending on operating system), 1139 | so that simply appending a filename to `feedback_dir` gives the path to a file in the feedback directory. 1140 | 1141 | - additional_arguments: 1142 | in case `output_validator_args` are specified for the test case, these are passed as additional arguments to the validator on the command line. 1143 | 1144 | - team_output: 1145 | the output produced by the program being validated is given on the validator's standard input. 1146 | 1147 | - team_input: 1148 | when running the validator in interactive mode everything written on the validator's standard output is given to the program being validated. 1149 | Please note that when running interactively the program will only receive the output produced by the validator and will not have direct access to the input file. 1150 | 1151 | The two files named by `input_file` and `answer_file` must exist (though they are allowed to be empty) and the validator program must be allowed to open them for reading. 1152 | The directory named by `feedback_dir` must also exist and the validator program must be allowed to create and write to new and existing files there. 1153 | 1154 | ### Reporting a judgement 1155 | 1156 | A validator program must report its judgement by exiting with specific exit codes: 1157 | 1158 | - If the output is a correct output for the input file (i.e., the submission that produced the output is to be Accepted), 1159 | the validator must exit with exit code 42. 1160 | - If the output is incorrect (i.e., the submission that produced the output is to be judged as Wrong Answer), 1161 | the validator must exit with exit code 43. 1162 | 1163 | Any other exit code, **including 0**, indicates that the validator did not operate properly, 1164 | and the judging system invoking the validator must take measures to report this to contest personnel. 1165 | The purpose of these somewhat exotic exit codes is to avoid conflict with other exit codes that results when the validator crashes. 1166 | For instance, if the validator is written in Java, any unhandled exception results in the program crashing with an exit code of 1, 1167 | making it unsuitable to assign a judgement meaning to this exit code. 1168 | 1169 | ### Reporting Additional Feedback 1170 | 1171 | The purpose of the feedback directory is to allow the validator program to report more information to the judging system than just the accept/reject verdict. 1172 | Using the feedback directory is optional for a validator program, so if one just wants to write a bare-bones minimal validator, it can be ignored. 1173 | 1174 | The validator is free to create different files in the feedback directory, 1175 | in order to provide different kinds of information to the judging system, in a simple but organized way. 1176 | The following files have special meaning and are described below: 1177 | 1178 | - `nextpass.in` may be present in a [multi-pass](#multi-pass-validation) problem to indicate another pass follows 1179 | - `score.txt` may be present in a [scoring](#scoring-test-cases) problem 1180 | - `score_multiplier.txt` may be present in a [scoring](#scoring-test-cases) problem 1181 | - `judgemessage.txt`may contain feedback for the judges 1182 | - `teammessage.txt` may contain feedback for the team 1183 | - `judgeimage.` may contain graphical feedback for the judges 1184 | - `teamimage.` may contain graphical feedback for the team 1185 | 1186 | The validator may create a `score.txt`, a `score_multiplier.txt`, or neither, but it must not create both. 1187 | This applies over all invocations. 1188 | That is, if a validator ever creates a `score.txt`, then it must never create a `score_multiplier.txt`, and vice versa. 1189 | 1190 | The contents of a `judgemessage.txt` gives a message that is presented to a judge reviewing the current submission 1191 | (typically used to help the judge verify why the submission was judged as incorrect, by specifying exactly what was wrong with its output). 1192 | Other examples of files that may be useful in some contexts (though not in the ICPC) are a `score.txt` file, 1193 | giving the submission a score based on other factors than correctness, 1194 | or a `teammessage.txt` file, giving a message to the team that submitted the solution, providing additional feedback on the submission. 1195 | 1196 | A judging system that implements this format must support the `judgemessage.txt` file described above 1197 | (I.e., content of the `judgemessage.txt` file, if produced by the validator, must be provided by the judging system to a human judge examining the submission). 1198 | Having the judging system support other files is optional. 1199 | 1200 | The validator may create one or more image files in the feedback directory with the name `teamimage.` and/or `judgeimage.`, where `` is one of: `png`, `jpg`, `jpeg`, or `svg`. 1201 | The [output visualizer](#output-visualizer) may modify or create these files as well, 1202 | and the output validator may create files in the feedback directory containing metadata that helps the visualizer in this task. 1203 | The intent is for the `teamimage` to be displayed to teams for `judgeimage` to be used as a debugging aid by judges, but the judge system may display or ignore these files as it sees fit. 1204 | 1205 | Note that a validator may choose to ignore the feedback directory entirely. 1206 | In particular, the judging system must not assume that the validator program creates any files there at all. 1207 | 1208 | #### Multi-pass validation 1209 | 1210 | A multi-pass validator can be used for problems that should run the submission multiple times sequentially, 1211 | using a new input generated by output validator during the previous invocation of the submission. 1212 | 1213 | The time and memory limit apply for each invocation separately. 1214 | 1215 | To signal that the submission should be run again, the output validator must exit with code 42 and output the new input in the file `nextpass.in` in the feedback directory. 1216 | Judging stops if no `nextpass.in` was created or the output validator exited with any other code. 1217 | Note that the `nextpass.in` will be removed before the next pass. 1218 | 1219 | It is a judge error to create the `nextpass.in` file and exit with any other code than 42. 1220 | It is a judge error to run more passes than specified by the `limits.validation_passes` value in `problem.yaml`. 1221 | 1222 | All other files inside the feedback directory are guaranteed to persist between passes. 1223 | In particular, the validator should only append text to the `judgemessage.txt` to provide combined feedback for all passes. 1224 | 1225 | #### Examples 1226 | 1227 | An example of a `judgemessage.txt` file: 1228 | ```text 1229 | Team failed at test case 14. 1230 | Team output: "31", Judge answer: "30". 1231 | Team failed at test case 18. 1232 | Team output: "hovercraft", Judge answer: "7". 1233 | Summary: 2 test cases failed. 1234 | ``` 1235 | 1236 | An example of a `teammessage.txt` file: 1237 | ```text 1238 | Almost all test cases failed — are you even trying to solve the problem? 1239 | ``` 1240 | 1241 | #### Validator standard error 1242 | 1243 | A validator program is allowed to write any kind of debug information to its standard error pipe. 1244 | This information may be displayed to the user upon invocation of the validator. 1245 | 1246 | ## Input Visualizer 1247 | 1248 | If a tool was used to automate creating test case illustration annotations, 1249 | it is recommended to include the input visualizer source code in the directory `input_visualizer/` along with invocation instructions in a file such as `input_visualizer/README.txt`. 1250 | 1251 | ## Output Visualizer 1252 | 1253 | An output visualizer is an optional [program](#programs) that is run after every invocation of the output validator in order to generate images illustrating the submission output. 1254 | A visualizer program must be an application (executable or interpreted) capable of being invoked with a command line call. It is invoked using the same arguments as the output validator. 1255 | It must be provided as a program (as specified [above](#programs)) in the directory `output_visualizer/`. 1256 | 1257 | All files written to the feedback directory by the output validator are accessible to the visualizer. 1258 | The visualizer may overwrite or create image files in the feedback directory with the name `teamimage.ext` or `judgeimage.ext`, where `ext` is one of: `png`, `jpg`, `jpeg`, or `svg`. 1259 | It must not write to `score.txt`, `teammessage.txt`, or any other files in the feedback directory other than those of the form `teamimage.ext` or `judgeimage.ext`. 1260 | 1261 | Compile or run-time errors in the visualizer are not judge errors. The return value and any data written by the visualizer to standard error or standard output are ignored. 1262 | 1263 | ## Result Aggregation 1264 | 1265 | ### Pass-Fail Problems 1266 | 1267 | For pass-fail problems, the verdict of a submission is accepted if and only if every test case in `sample`, `secret`, and any [test data groups](#test-data-groups) are accepted. 1268 | 1269 | ### Scoring Problems 1270 | 1271 | For scoring problems, submissions are given a non-negative score instead of a verdict. 1272 | The goal of each submission is to maximize this score. 1273 | 1274 | Given a submission, scores are determined for test cases, [test data groups](#test-data-groups), and `secret` (which is the score of the submission itself). 1275 | The scoring behavior is configured for `secret` and each test data group by the following arguments in the `scoring` dictionary of its `test_group.yaml`: 1276 | 1277 | Key | Type | Description 1278 | ------------- | ----------------------------- | ----------- 1279 | `score` | Integer or `unbounded` | The maximum possible score of the test data group. Must be a non-negative integer or `unbounded`. 1280 | `aggregation` | `pass-fail`, `sum`, or `min` | How the score is determined based on the scores of the contained groups or test cases. See below. 1281 | `require_pass`| String or sequence of strings | Other test data groups whose test cases a submission must pass in order to receive a score for this test group. See below. 1282 | 1283 | The default value of `aggregation` is `sum` for `secret` and `pass-fail` for test data groups. 1284 | The default value of `require_pass` is an empty sequence. 1285 | 1286 | For `secret`, all test data groups, and every test case in a group with `sum` or `min` aggregation, there is a maximum possible score. 1287 | The default value of `score` for `secret` is 100. 1288 | The default value of `score` for test data groups is `unbounded`. 1289 | Test data groups may only have `unbounded` maximum score if `secret` is unbounded. 1290 | 1291 | The maximum score of a test case in a group with `min` aggregation is `score`. 1292 | The maximum score of a test case in a group with `sum` aggregation is `(score - static_score) / N`, 1293 | where `static_score` is the score of the static validation test case if it exists, and 0 if not, 1294 | and `N` is the number of test cases in the group. 1295 | 1296 | #### Scoring Test Cases 1297 | 1298 | Only test cases in test case groups with `sum` or `min` aggregation receive a score. 1299 | 1300 | The score of a failed test case is always 0. 1301 | 1302 | A custom output validator or static validator may produce a `score.txt` or `score_multiplier.txt` file for an accepted test case: 1303 | 1304 | - for test cases with bounded maximum score, `score_multiplier.txt`, if produced, must contain a single floating-point number in the range `[0,1]`. 1305 | The score of the test case is this number *multiplied* by the test case maximum score. 1306 | - for test cases with bounded maximum score, `score.txt`, if produced, must contain a single single non-negative floating-point number. 1307 | The score of the test case is that number. 1308 | - for test cases with bounded maximum score, if no `score_multiplier.txt` or `score.txt` is produced, the test case score is its maximum score. 1309 | - for test cases with unbounded maximum score, `score.txt` must be produced and must contain a non-negative floating-point number. 1310 | The score of the test case is that number. 1311 | 1312 | It is a judge error if: 1313 | 1314 | - an output or static validator accepts a test case in an unbounded group and does not produce a `score.txt`; 1315 | - an output or static validator does not accept a test case, but does produce a `score.txt` or a `score_multiplier.txt`; 1316 | - an output or static validator produces a `score_multiplier.txt` for a test case with unbounded maximum score; 1317 | - an output or static validator produces both a `score.txt` and a `score_multiplier.txt` for a test case; 1318 | - an output or static validator produces a `score.txt` or `score_multiplier.txt` for a test case in a group with `pass-fail` aggregation; 1319 | - an output or static validator produces a `score.txt` or `score_multiplier.txt` with invalid contents. 1320 | 1321 | #### Scoring Test Groups 1322 | 1323 | The score of `secret` is determined by its groups or test cases (it can only have one or the other). 1324 | The score of a test group is determined by test cases. 1325 | The score depends on the aggregation mode, which is either `pass-fail`, `sum`, or `min`. 1326 | 1327 | - If a group uses `pass-fail` aggregation, the group must have bounded maximum score. 1328 | If the submission receives an accepted verdict for all test cases in the group, 1329 | the score of the group is equal to its maximum possible score. 1330 | Otherwise the group score is 0. 1331 | - If a group uses `sum` aggregation, the group score is the sum of the scores of its test cases or groups. 1332 | - If a group uses `min` aggregation, then the group score is the minimum of these scores. 1333 | 1334 | The submission score is the score of the `secret` group. 1335 | 1336 | It is a judge error if the score of any group or subgroup exceeds its maximum score. 1337 | 1338 | #### Required Dependent Groups 1339 | 1340 | A group may specify that it depends on some other test data groups. 1341 | Each required group must be either `sample` or have `pass-fail` aggregation. 1342 | The dependent group will only be run if the group being depended on receives an accepted verdict for all test cases in the group. 1343 | If the dependent group is not run, the group score is 0. 1344 | 1345 | The paths of these required groups, relative to the `data` folder, are listed under the `require_pass` key. 1346 | A path consists of zero or more directory names followed by a directory or file name, with a `/` character separating consecutive names. Each name must conform to the [general requirements](#general-requirements) on directory and file names and the specified test data group must exist. 1347 | 1348 | The path of a group, relative to the `data/` folder, must come later lexicographically than the paths of all test cases and groups it depends on. 1349 | -------------------------------------------------------------------------------- /spec/changelog.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Changelog 4 | sort: 1 5 | --- 6 | 7 | # Changelog 8 | 9 | ## Version 2023-07-draft 10 | 11 | - Removed scoring objective; this now always is "maximize". 12 | - Removed `scoring` keyword from `problem.yaml`. 13 | - Python 3 is now used for `.py` files; for backward compatibility 14 | `.py2` can still be used for Python 2. 15 | - Clarify various things: sample files for interactive problems, 16 | `testdata.yaml` inheritance. 17 | - Updated the CC BY-SA license key to mean version 4.0. 18 | - Allow either a build or run script to be present. 19 | - Change specification of time limit multipliers and allow to 20 | explicitly specify a problem time limit. 21 | - Add a multi-pass problem type. 22 | - Support invalid testdata that validators must fail on. 23 | - Only allow a single output validator, remove `validator_flags` from 24 | `problem.yaml`, update `{input,output}_validator_flags` in 25 | `testdata.yaml`. 26 | - Make `name` required and allow a map from language code to name in that language. 27 | - Add `uuid` to `problem.yaml`. 28 | - Add `languages` to `problem.yaml`. 29 | - Add support for Markdown problem statements. 30 | - Change `languages:` and `keywords:` in `problem.yaml` to be lists of strings 31 | rather than a string of space separated words. 32 | - Clarified when code limit is applied in the case of included code 33 | - Make `uuid` required in `problem.yaml`. 34 | - Add `/submissions/rejected` directory. 35 | - Clarify working directories for submissions and validators. 36 | - Add `allow_writing_files` to `problem.yaml`. 37 | - Rename `testdata.yaml` to `test_group.yaml`. 38 | 39 | ## Legacy version (changes since beginning 2021) 40 | 41 | - Removed `libraries` keyword from `problem.yaml`. 42 | - Add specification of `.interaction` files. 43 | - Clarify directory or zip file name. 44 | - Fix typos and broken links. 45 | -------------------------------------------------------------------------------- /spec/legacy-icpc.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Legacy (ICPC) 4 | sort: 4 5 | --- 6 | 7 | # Problem Package Format 8 | 9 | This is the `legacy-icpc` version of the Kattis problem package format. 10 | It is also known as the ICPC subset. 11 | 12 | ## Overview 13 | 14 | This document describes the format of a _Kattis problem package_, 15 | used for distributing and sharing problems for algorithmic programming contests as well as educational use. 16 | 17 | ### General Requirements 18 | 19 | - The package must consist of a single directory containing files as described below. 20 | The directory name must consist solely of lowercase letters a–z and digits 0–9. 21 | Alternatively, the package can be a ZIP-compressed archive of such a directory with identical base name and extension `.kpp` or `.zip`. 22 | - All file names for files included in the package must match the regexp 23 | ```regex 24 | ^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,253}[a-zA-Z0-9]$ 25 | ``` 26 | i.e., they must be of length at least 2, at most 255, consist solely of lower- or uppercase letters a–z, A–Z, digits 0–9, period, dash, or underscore, 27 | but must not begin or end with a period, dash, or underscore. 28 | - All text files for a problem must be UTF-8 encoded and not have a byte-order mark (BOM). 29 | - All text files must have Unix-style line endings (newline/LF byte only). 30 | Note that LF is line-ending and not line-separating in POSIX, which means that all non-empty text files must end with a newline. 31 | - Natural language (for example, in the [problem statement](#problem-statements) filename) must be specified as 2-letter [ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) code if it exists, otherwise as a 3-letter code from ISO 639. 32 | Optionally, it may be suffixed with a hyphen and an ISO 3166-1 alpha-2 code, as defined in BCP 47, for example, `pt-BR` to indicate Brazilian Portuguese. 33 | - All floating-point numbers must be given as the external character sequences defined by IEEE 754-2008 and may use up to double precision. 34 | - The problem package may include symbolic links to other files in the problem package. 35 | Symlinks must not have targets outside the problem package directory tree. 36 | 37 | ### Problem Package Structure Overview 38 | 39 | The following table summarizes the elements of a problem package described in this specification: 40 | 41 | File or Folder | Required? | Described In | Description 42 | ---------------------- | --------- | --------------------------------------------- | ----------- 43 | `problem.yaml` | Yes | [Problem Metadata](#problem-metadata) | Metadata about the problem (e.g., source, license, limits) 44 | `problem_statement/` | Yes | [Problem Statements](#problem-statements) | Problem statement files 45 | `attachments/` | No | [Attachments](#attachments) | Files available to problem-solvers other than the problem statement and sample test data 46 | `data/sample/` | No | [Test Data](#test-data) | Sample test data 47 | `data/secret/` | Yes | [Test Data](#test-data) | Secret test data 48 | `submissions/` | Yes | [Example Submissions](#example-submissions) | Correct and incorrect judge solutions of the problem 49 | `input_validators/` | Yes | [Input Validators](#input-validators) | Programs that verifies correctness of the test data inputs 50 | `output_validators/` | No | [Output Validators](#output-validators) | Custom programs for judging solutions 51 | 52 | A minimal problem package must contain `problem.yaml`, a problem statement, a secret test case, an accepted judge solution, and an input validator. 53 | 54 | ### Programs 55 | 56 | There are a number of different kinds of programs that may be provided in the problem package: 57 | submissions, input validators, output validators. 58 | All programs are always represented by a single file or directory. 59 | In other words, if a program consists of several files, these must be provided in a single directory. 60 | In the case that a program is a single file, it is treated as if a directory with the same name takes its place, which contains only that file. 61 | The name of the program, for the purpose of referring to it within the package, is the base name of the file or the name of the directory. 62 | There can't be two programs of the same kind with the same name. 63 | 64 | Validators and graders, but not submissions, 65 | in the form of a directory may include two POSIX-compliant shell scripts, `build` and `run`. 66 | These scripts must be executable when they exist or get generated. 67 | If at least one of these two files is included: 68 | 69 | 1. First, if the `build` script is present, it will be run. 70 | The working directory will be (a copy of) the program directory. 71 | The `run` file must exist after `build` is done. 72 | 2. Then, the `run` file (which now exists) 73 | will be invoked in the same way as a single file program. 74 | 75 | Programs without `build` and `run` scripts are built and run according to what language is used. 76 | Language is determined by looking at the file endings as specified in the [languages table](../appendix/languages.md). 77 | In the case of Python 2 and 3 which share the same file ending, 78 | language will be determined by looking at the shebang line which must match the regular expressions `^#!.*python2` for Python 2 and `^#!.*python3` for Python 3. 79 | If a single language can't be determined, building fails. 80 | 81 | For languages where there could be several entry points, 82 | the default entry point in the [languages table](../appendix/languages.md) will be used. 83 | 84 | ## Problem Metadata 85 | 86 | Metadata about the problem (e.g., source, license, limits) are provided in a YAML file named `problem.yaml` placed in the root directory of the package. 87 | 88 | The keys are defined as below. 89 | Keys are optional unless explicitly stated. 90 | Any unknown keys should be treated as an error. 91 | 92 | Key | Type | Required | Default 93 | ------------------------------------------------- | --------------------------------------------- | --------- | ------- 94 | [problem_format_version](#problem-format-version) | String | No | `legacy` 95 | [name](#name) | String | No | 96 | [uuid](#uuid) | String | No | 97 | [author](#author) | String | No | 98 | [source](#source) | String | No | 99 | [source_url](#source) | String | No | 100 | [license](#license) | String | No | `unknown` 101 | [rights_owner](#license) | String | See below | See below 102 | [limits](#limits) | Map with keys as defined below | No | See below 103 | [validation](#validation) | String | No | `default` 104 | [validator_flags](#validation) | String | No | 105 | [keywords](#keywords) | String | No | 106 | 107 | ### Problem format version 108 | 109 | Version of the Problem Package Format used for this package. 110 | If using this version of the Format, it must be the string `legacy-icpc`. 111 | Note though, that the default (`legacy`) is a strict superset of `legacy-icpc`. 112 | Documentation for version `` is available at `https://www.kattis.com/problem-package-format/spec/`. 113 | 114 | ### Name 115 | 116 | The name of the problem in one of the languages for which a problem statement exists. 117 | 118 | ### UUID 119 | 120 | The `uuid` is meant to track a problem, even if its package name and/or `name` changes. 121 | For example, it can be used to identify the existing problem to update in an online problem archive and not accidentally upload it as a new one. 122 | The intention is that a new `uuid` should be assigned if the problem significantly changes. 123 | 124 | This specification currently does not imply any more semantic meaning to this field. 125 | 126 | ### Author 127 | 128 | Who should get author credits. 129 | Given as a string separated by `,` or `and`. 130 | This would typically be the people that came up with the idea, wrote the problem specification and created the test data. 131 | This is sometimes omitted when authors choose to instead only give source credit, but both may be specified. 132 | 133 | ### Source 134 | 135 | Who should get source credit. 136 | This would typically be the name (and year) of the event where the problem was first used or created for. 137 | 138 | The `source` key contains the source that this problem originates from. 139 | This should typically contain the name (and year) of the problem set (such as a contest or a course), 140 | where the problem was first used or for which it was created, 141 | and the `source_url` key contains a link to the event's page. 142 | `source_url` must not be given if `source` is not. 143 | 144 | ### License 145 | 146 | License under which the problem may be used. 147 | Must be one of the values below. 148 | 149 | Value | Comments | Link 150 | --------------- | ---------------------------------------------------------------------------------- | ---- 151 | `unknown` | The default value. In practice means that the problem can not be used. | 152 | `public domain` | There are no known copyrights on the problem, anywhere in the world. | 153 | `cc0` | CC0, "no rights reserved", version 1 or later. | 154 | `cc by` | CC attribution license, version 4 or later. | 155 | `cc by-sa` | CC attribution, share alike license, version 4 or later. | 156 | `educational` | May be freely used for educational purposes. | 157 | `permission` | Used with permission. The rights owner must be contacted for every additional use. | 158 | 159 | `rights_owner` is the owner of the copyright of the problem. 160 | If `license` is not `unknown` or `public domain`, `rights_owner` is required to have a value. 161 | If `license` is `public domain`, `rights_owner` must not be set. 162 | `rights_owner` defaults to `credits.authors`, if present, otherwise the value of `source`, otherwise it will have no value. 163 | 164 | ### Limits 165 | 166 | Time, memory, and other limits to be imposed on submissions. 167 | A map with the following keys: 168 | 169 | Key | Comments | Default | Typical system default 170 | -------------------- | -------------------------- | -------------- | ---------------------- 171 | `time_multiplier` | optional float | 5 | 172 | `time_safety_margin` | optional float | 2 | 173 | `memory` | optional, in MiB | system default | 2048 174 | `output` | optional, in MiB | system default | 8 175 | `code` | optional, in KiB | system default | 128 176 | `compilation_time` | optional, in seconds | system default | 60 177 | `compilation_memory` | optional, in MiB | system default | 2048 178 | `validation_time` | optional, in seconds | system default | 60 179 | `validation_memory` | optional, in MiB | system default | 2048 180 | `validation_output` | optional, in MiB | system default | 8 181 | 182 | For most keys, the system default will be used if nothing is specified. 183 | This can vary, but you **should** assume that it's reasonable. 184 | Only specify limits when the problem needs a specific limit, but do specify limits even if the "typical system default" is what is needed. 185 | 186 | ### Validation 187 | 188 | `validation` is a space separated list of strings describing how validation is done. 189 | Must begin with one of `default` or `custom`. 190 | If `custom`, may be followed by `interactive`, 191 | where `interactive` specifies that the validator is run interactively with a submission. 192 | For example, `custom interactive`. 193 | 194 | `validator_flags` will be passed as command-line arguments to each of the output validators. 195 | 196 | ### Keywords 197 | 198 | Space separated list of keywords describing the problem. 199 | Keywords must not contain spaces. 200 | 201 | ## Problem Statements 202 | 203 | The problem statement of the problem is provided in the directory `problem_statement/`. 204 | 205 | This directory must contain one file per language, for at least one language, named `problem..`, 206 | that contains the problem text itself, including input and output specifications, but not sample input and output. 207 | Language must be given as the shortest ISO 639 code. 208 | If needed, a hyphen and an ISO 3166-1 alpha-2 code may be appended to an ISO 639 code. 209 | Optionally, the language code can be left out; the default is then English (`en`). 210 | Filetype can be either `.tex` for LaTeX files, or `.pdf` for PDF. 211 | 212 | Please note that many kinds of transformations on the problem statements, 213 | such as conversion to HTML or styling to fit in a single document containing many problems will not be possible for PDF problem statements, 214 | so using this format should be avoided if at all possible. 215 | 216 | Auxiliary files needed by the problem statement files must all be in `problem_statement/`. 217 | `problem..` should reference auxiliary files as if the working directory is `problem_statement/`. 218 | Image file formats supported are `.png`, `.jpg`, `.jpeg`, and `.pdf`. 219 | 220 | A LaTeX file may include the Problem name using the LaTeX command `\problemname` in case LaTeX formatting of the title is wanted. 221 | 222 | The problem statements must only contain the actual problem statement, no sample data. 223 | 224 | ## Attachments 225 | 226 | Public, i.e., non-secret, files to be made available in addition to the problem statement and sample test data are provided in the directory `attachments/`. 227 | 228 | ## Test data 229 | 230 | The test data are provided in subdirectories of `data/`. 231 | The sample data in `data/sample/` and the secret data in `data/secret/`. 232 | 233 | All input and answer files have the filename extension `.in` and `.ans` respectively. 234 | 235 | ### Annotations 236 | 237 | One hint, description, and/or illustration file may be provided per test case. 238 | The files must share the base name of the associated test case. 239 | Description and illustration files are meant to be privileged information. 240 | 241 | Category | File type | Filename extension | Remark 242 | ------------ | --------- | ---------------------------------- | ------ 243 | hint | text | `.hint` | 244 | description | text | `.desc` | privileged information 245 | illustration | image | `.png`, `.jpg`, `.jpeg`, or `.svg` | privileged information 246 | 247 | - A *hint* provides feedback for solving a test case to, e.g., somebody whose submission didn't pass. 248 | 249 | - A *description* conveys the purpose of a test case. 250 | It is an explanation of what aspect or edge case of the solution that the input file is meant to test. 251 | 252 | - An *illustration* provides a visualization of the associated test case. 253 | Note that at most one image file may exist for each test case. 254 | 255 | ### Interactive Problems 256 | 257 | Unlike in non-interactive problems, `.in` and `.ans` files in interactive problems **must not** be displayed to teams: 258 | not in the problem statement, nor as part of sample input download. 259 | Instead, all sample test cases **must** provide an interaction protocol as a text file with the extension `.interaction` demonstrating the communication between the submission and the output validator, meant to be displayed in the problem statement. 260 | 261 | Additional sample interaction protocols may be defined by creating an `.interaction` file without corresponding `.in` and `.ans` files. 262 | 263 | An interaction protocol consists of a series of lines starting with `>` and `<`. 264 | Lines starting with `>` signify an output from the submission to the output validator, 265 | while those starting with `<` signify an input from the output validator to the submission. 266 | 267 | If you want to provide files related to interactive problems (such as testing tools or input files), you can use [attachments](#attachments). 268 | 269 | ### Test Data Groups 270 | 271 | At the top level, the test data is divided into exactly two groups: `sample` and `secret`. 272 | 273 | Test cases and groups will be used in lexicographical order on file base name. 274 | If a specific order is desired, a numbered prefix such as `00`, `01`, `02`, `03`, and so on, can be used. 275 | 276 | ## Example Submissions 277 | 278 | Correct and incorrect solutions to the problem are provided in subdirectories of `submissions/`. 279 | The possible subdirectories are: 280 | 281 | Value | Requirement | Comment 282 | --------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------- 283 | `accepted` | Accepted as a correct solution for all test cases. | At least one is required. 284 | `wrong_answer` | Wrong answer for some test case, but is not too slow and does not crash for any test case. | 285 | `time_limit_exceeded` | Too slow for some test case. May also give wrong answer but not crash for any test case. | 286 | `run_time_error` | Crashes for some test case. | 287 | 288 | Every file or directory in these directories represents a separate solution. 289 | It is mandatory to provide at least one accepted solution. 290 | 291 | Submissions must read input data from standard input, and write output to standard output. 292 | 293 | ## Input Validators 294 | 295 | Input Validators, verifying the correctness of the input files, are provided in `input_validators/` (or the deprecated `input_format_validators/`). 296 | Input validators can be specified as [VIVA](http://viva.vanb.org/)-files (with file ending `.viva`), 297 | [Checktestdata](https://github.com/DOMjudge/checktestdata)-file (with file ending `.ctd`), 298 | or as a program. 299 | 300 | All input validators provided will be run on every input file. 301 | Validation fails if any validator fails. 302 | 303 | ### Invocation 304 | 305 | An input validator program must be an application (executable or interpreted) capable of being invoked with a command line call. 306 | 307 | All input validators provided will be run on every test data file using the arguments specified for the test data group they are part of. 308 | Validation fails if any validator fails. 309 | 310 | When invoked, the input validator will get the input file on stdin. 311 | 312 | The validator should be possible to use as follows on the command line: 313 | ```sh 314 | [arguments] < inputfile 315 | ``` 316 | 317 | ### Output 318 | 319 | The input validator may output debug information on stdout and stderr. 320 | This information may be displayed to the user upon invocation of the validator. 321 | 322 | ### Exit codes 323 | 324 | The input validator must exit with code 42 on successful validation. 325 | Any other exit code means that the input file could not be confirmed as valid. 326 | 327 | #### Dependencies 328 | 329 | The validator **must not** read any files outside those defined in the Invocation section. 330 | Its result **must** depend only on these files and the arguments. 331 | 332 | ## Output Validators 333 | 334 | ### Overview 335 | 336 | An output validator is a [program](#programs) that is given the output of a submitted program, 337 | together with the corresponding input file, 338 | and an answer file for the input, 339 | and then decides whether the output provided is a correct output for the given input file. 340 | 341 | A validator program must be an application (executable or interpreted) capable of being invoked with a command line call. 342 | The details of this invocation are described below. 343 | The validator program has two ways of reporting back the results of validating: 344 | 345 | 1. The validator must give a judgement (see [Reporting a judgement](#reporting-a-judgement)). 346 | 2. The validator may give additional feedback, 347 | e.g., an explanation of the judgement to humans (see [Reporting Additional Feedback](#reporting-additional-feedback)). 348 | 349 | Custom output validators are used if the problem requires more complicated output validation than what is provided by the default diff variant described below. 350 | They are provided in `output_validators/`, and must adhere to the [Output validator](#output-validators) specification. 351 | 352 | All output validators provided will be run on the output for every test data file using the arguments specified for the test data group they are part of. 353 | Validation fails if any validator fails. 354 | 355 | ### Default Output Validator Specification 356 | 357 | The default output validator is essentially a beefed-up diff. 358 | In its default mode, it tokenizes the output and answer files and compares them token by token. 359 | It supports the following command-line arguments to control how tokens are compared. 360 | 361 | Arguments | Description 362 | ---------------------------- | ----------- 363 | `case_sensitive` | indicates that comparisons should be case-sensitive. 364 | `space_change_sensitive` | indicates that changes in the amount of whitespace should be rejected (the default is that any sequence of 1 or more whitespace characters are equivalent). 365 | `float_relative_tolerance ε` | indicates that floating-point tokens should be accepted if they are within relative error ≤ ε (see below for details). 366 | `float_absolute_tolerance ε` | indicates that floating-point tokens should be accepted if they are within absolute error ≤ ε (see below for details). 367 | `float_tolerance ε` | short-hand for applying ε as both relative and absolute tolerance. 368 | 369 | When supplying both a relative and an absolute tolerance, the semantics are that a token is accepted if it is within either of the two tolerances. 370 | When a floating-point tolerance has been set, any valid formatting of floating-point numbers is accepted for floating-point tokens. 371 | So, for instance, if a token in the answer file says `0.0314`, a token of `3.14000000e-2` in the output file would be accepted. 372 | If no floating-point tolerance has been set, floating-point tokens are treated just like any other token and have to match exactly. 373 | 374 | ### Invocation 375 | 376 | When invoked the output validator will be passed at least three command line parameters and the output stream to validate on stdin. 377 | 378 | The validator should be possible to use as follows on the command line: 379 | ```sh 380 | input answer_file feedback_dir [additional_arguments] < team_output [ > team_input ] 381 | ``` 382 | 383 | The meaning of the parameters listed above are: 384 | 385 | - input: 386 | a string specifying the name of the input data file that was used to test the program whose results are being validated. 387 | - answer_file: 388 | a string specifying the name of an arbitrary "answer file" which acts as input to the validator program. 389 | The answer file may, but is not necessarily required to, contain the "correct answer" for the problem. 390 | For example, it might contain the output that was produced by a judge's solution for the problem when run with input file as input. 391 | Alternatively, the "answer file" might contain information, in arbitrary format, which instructs the validator in some way about how to accomplish its task. 392 | - feedback_dir: 393 | a string which specifies the name of a "feedback directory" in which the validator can produce "feedback files" in order to report additional information on the validation of the output file. 394 | The feedbackdir must end with a path separator (typically '/' or '\\' depending on operating system), 395 | so that simply appending a filename to feedbackdir gives the path to a file in the feedback directory. 396 | - additional_arguments: 397 | in case the problem specifies additional `validator_flags`, these are passed as additional arguments to the validator on the command line. 398 | - team_output: 399 | the output produced by the program being validated is given on the validator's standard input pipe. 400 | - team_input: 401 | when running the validator in interactive mode everything written on the validator's standard output pipe is given to the program being validated. 402 | Please note that when running interactive the program will only receive the output produced by the validator and will not have direct access to the input file. 403 | 404 | The two files pointed to by input and answer_file must exist (though they are allowed to be empty) and the validator program must be allowed to open them for reading. 405 | The directory pointed to by feedback_dir must also exist. 406 | 407 | ### Reporting a judgement 408 | 409 | A validator program is required to report its judgement by exiting with specific exit codes: 410 | 411 | - If the output is a correct output for the input file (i.e., the submission that produced the output is to be Accepted), 412 | the validator exits with exit code 42. 413 | - If the output is incorrect (i.e., the submission that produced the output is to be judged as Wrong Answer), 414 | the validator exits with exit code 43. 415 | 416 | Any other exit code (including 0\!) indicates that the validator did not operate properly, 417 | and the judging system invoking the validator must take measures to report this to contest personnel. 418 | The purpose of these somewhat exotic exit codes is to avoid conflict with other exit codes that results when the validator crashes. 419 | For instance, if the validator is written in Java, any unhandled exception results in the program crashing with an exit code of 1, 420 | making it unsuitable to assign a judgement meaning to this exit code. 421 | 422 | ### Reporting Additional Feedback 423 | 424 | The purpose of the feedback directory is to allow the validator program to report more information to the judging system than just the accept/reject verdict. 425 | Using the feedback directory is optional for a validator program, so if one just wants to write a bare-bones minimal validator, it can be ignored. 426 | 427 | The validator is free to create different files in the feedback directory, 428 | in order to provide different kinds of information to the judging system, in a simple but organized way. 429 | For instance, there may be a `judgemessage.txt` file, 430 | the contents of which gives a message that is presented to a judge reviewing the current submission 431 | (typically used to help the judge verify why the submission was judged as incorrect, by specifying exactly what was wrong with its output). 432 | Other examples of files that may be useful in some contexts (though not in the ICPC) are a `score.txt` file, 433 | giving the submission a score based on other factors than correctness, 434 | or a `teammessage.txt` file, giving a message to the team that submitted the solution, providing additional feedback on the submission. 435 | 436 | A judging system that implements this format must support the `judgemessage.txt` file described above 437 | (I.e., content of the `judgemessage.txt` file, if produced by the validator, must be provided by the judging system to a human judge examining the submission). 438 | Having the judging system support other files is optional. 439 | 440 | Note that a validator may choose to ignore the feedback directory entirely. 441 | In particular, the judging system must not assume that the validator program creates any files there at all. 442 | 443 | #### Examples 444 | 445 | An example of a `judgemessage.txt` file: 446 | ```text 447 | Team failed at test case 14. 448 | Team output: "31", Judge answer: "30". 449 | Team failed at test case 18. 450 | Team output: "hovercraft", Judge answer: "7". 451 | Summary: 2 test cases failed. 452 | ``` 453 | 454 | An example of a `teammessage.txt` file: 455 | ```text 456 | Almost all test cases failed — are you even trying to solve the problem? 457 | ``` 458 | 459 | #### Validator standard error 460 | 461 | A validator program is allowed to write any kind of debug information to its standard error pipe. 462 | This information may be displayed to the user upon invocation of the validator. 463 | -------------------------------------------------------------------------------- /spec/legacy.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Legacy 4 | sort: 3 5 | --- 6 | 7 | # Problem Package Format 8 | 9 | This is the `legacy` version of the Kattis problem package format. 10 | 11 | ## Overview 12 | 13 | This document describes the format of a _Kattis problem package_, 14 | used for distributing and sharing problems for algorithmic programming contests as well as educational use. 15 | 16 | ### General Requirements 17 | 18 | - The package must consist of a single directory containing files as described below. 19 | The directory name must consist solely of lowercase letters a–z and digits 0–9. 20 | Alternatively, the package can be a ZIP-compressed archive of such a directory with identical base name and extension `.kpp` or `.zip`. 21 | - All file names for files included in the package must match the regexp 22 | ```regex 23 | ^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,253}[a-zA-Z0-9]$ 24 | ``` 25 | i.e., they must be of length at least 2, at most 255, consist solely of lower- or uppercase letters a–z, A–Z, digits 0–9, period, dash, or underscore, 26 | but must not begin or end with a period, dash, or underscore. 27 | - All text files for a problem must be UTF-8 encoded and not have a byte-order mark (BOM). 28 | - All text files must have Unix-style line endings (newline/LF byte only). 29 | Note that LF is line-ending and not line-separating in POSIX, which means that all non-empty text files must end with a newline. 30 | - Natural language (for example, in the [problem statement](#problem-statements) filename) must be specified as 2-letter [ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) code if it exists, otherwise as a 3-letter code from ISO 639. 31 | Optionally, it may be suffixed with a hyphen and an ISO 3166-1 alpha-2 code, as defined in BCP 47, for example, `pt-BR` to indicate Brazilian Portuguese. 32 | - All floating-point numbers must be given as the external character sequences defined by IEEE 754-2008 and may use up to double precision. 33 | - The problem package may include symbolic links to other files in the problem package. 34 | Symlinks must not have targets outside the problem package directory tree. 35 | 36 | ### Problem Package Structure Overview 37 | 38 | The following table summarizes the elements of a problem package described in this specification: 39 | 40 | File or Folder | Required? | Described In | Description 41 | ---------------------- | --------- | --------------------------------------------- | ----------- 42 | `problem.yaml` | Yes | [Problem Metadata](#problem-metadata) | Metadata about the problem (e.g., source, license, limits) 43 | `problem_statement/` | Yes | [Problem Statements](#problem-statements) | Problem statement files 44 | `attachments/` | No | [Attachments](#attachments) | Files available to problem-solvers other than the problem statement and sample test data 45 | `data/sample/` | No | [Test Data](#test-data) | Sample test data 46 | `data/secret/` | Yes | [Test Data](#test-data) | Secret test data 47 | `include/` | No | [Included Files](#included-files) | Files appended to all submitted solutions 48 | `submissions/` | Yes | [Example Submissions](#example-submissions) | Correct and incorrect judge solutions of the problem 49 | `input_validators/` | Yes | [Input Validators](#input-validators) | Programs that verifies correctness of the test data inputs 50 | `output_validators/` | No | [Output Validators](#output-validators) | Custom programs for judging solutions 51 | 52 | A minimal problem package must contain `problem.yaml`, a problem statement, a secret test case, an accepted judge solution, and an input validator. 53 | 54 | ### Programs 55 | 56 | There are a number of different kinds of programs that may be provided in the problem package: 57 | submissions, input validators, output validators, and graders. 58 | All programs are always represented by a single file or directory. 59 | In other words, if a program consists of several files, these must be provided in a single directory. 60 | In the case that a program is a single file, it is treated as if a directory with the same name takes its place, which contains only that file. 61 | The name of the program, for the purpose of referring to it within the package, is the base name of the file or the name of the directory. 62 | There can't be two programs of the same kind with the same name. 63 | 64 | Validators and graders, but not submissions, 65 | in the form of a directory may include two POSIX-compliant shell scripts, `build` and `run`. 66 | These scripts must be executable when they exist or get generated. 67 | If at least one of these two files is included: 68 | 69 | 1. First, if the `build` script is present, it will be run. 70 | The working directory will be (a copy of) the program directory. 71 | The `run` file must exist after `build` is done. 72 | 2. Then, the `run` file (which now exists) 73 | will be invoked in the same way as a single file program. 74 | 75 | Programs without `build` and `run` scripts are built and run according to what language is used. 76 | Language is determined by looking at the file endings as specified in the [languages table](../appendix/languages.md). 77 | In the case of Python 2 and 3 which share the same file ending, 78 | language will be determined by looking at the shebang line which must match the regular expressions `^#!.*python2` for Python 2 and `^#!.*python3` for Python 3. 79 | If a single language can't be determined, building fails. 80 | 81 | For languages where there could be several entry points, 82 | the default entry point in the [languages table](../appendix/languages.md) will be used. 83 | 84 | ### Problem Types 85 | 86 | There are two types of problems: *pass-fail* problems and *scoring* problems. 87 | In pass-fail problems, submissions are basically judged as either accepted or rejected (though the "rejected" judgement is more fine-grained and divided into results such as "Wrong Answer", "Time Limit Exceeded", etc). 88 | In scoring problems, a submission that is accepted is additionally given a score, which is a numeric value (and the goal is to either maximize or minimize this value). 89 | 90 | ## Problem Metadata 91 | 92 | Metadata about the problem (e.g., source, license, limits) are provided in a YAML file named `problem.yaml` placed in the root directory of the package. 93 | 94 | The keys are defined as below. 95 | Keys are optional unless explicitly stated. 96 | Any unknown keys should be treated as an error. 97 | 98 | Key | Type | Required | Default 99 | ------------------------------------------------- | --------------------------------------------- | --------- | ------- 100 | [problem_format_version](#problem-format-version) | String | No | `legacy` 101 | [type](#type) | String | No | `pass-fail` 102 | [name](#name) | String | No | 103 | [uuid](#uuid) | String | No | 104 | [author](#author) | String | No | 105 | [source](#source) | String | No | 106 | [source_url](#source) | String | No | 107 | [license](#license) | String | No | `unknown` 108 | [rights_owner](#license) | String | See below | See below 109 | [limits](#limits) | Map with keys as defined below | No | See below 110 | [validation](#validation) | String | No | `default` 111 | [validator_flags](#validation) | String | No | 112 | [grading](#grading) | Map with keys as defined below | No | See below 113 | [keywords](#keywords) | String | No | 114 | 115 | ### Problem format version 116 | 117 | Version of the Problem Package Format used for this package. 118 | If using this version of the Format, it must be the string `legacy` (which is also the default). 119 | Documentation for version `` is available at `https://www.kattis.com/problem-package-format/spec/`. 120 | 121 | ### Type 122 | 123 | Type of problem. 124 | Must be one of `pass-fail` and `scoring`. 125 | 126 | ### Name 127 | 128 | The name of the problem in one of the languages for which a problem statement exists. 129 | 130 | ### UUID 131 | 132 | The `uuid` is meant to track a problem, even if its package name and/or `name` changes. 133 | For example, it can be used to identify the existing problem to update in an online problem archive and not accidentally upload it as a new one. 134 | The intention is that a new `uuid` should be assigned if the problem significantly changes. 135 | 136 | This specification currently does not imply any more semantic meaning to this field. 137 | 138 | ### Author 139 | 140 | Who should get author credits. 141 | Given as a string separated by `,` or `and`. 142 | This would typically be the people that came up with the idea, wrote the problem specification and created the test data. 143 | This is sometimes omitted when authors choose to instead only give source credit, but both may be specified. 144 | 145 | ### Source 146 | 147 | Who should get source credit. 148 | This would typically be the name (and year) of the event where the problem was first used or created for. 149 | 150 | The `source` key contains the source that this problem originates from. 151 | This should typically contain the name (and year) of the problem set (such as a contest or a course), 152 | where the problem was first used or for which it was created, 153 | and the `source_url` key contains a link to the event's page. 154 | `source_url` must not be given if `source` is not. 155 | 156 | ### License 157 | 158 | License under which the problem may be used. 159 | Must be one of the values below. 160 | 161 | Value | Comments | Link 162 | --------------- | ---------------------------------------------------------------------------------- | ---- 163 | `unknown` | The default value. In practice means that the problem can not be used. | 164 | `public domain` | There are no known copyrights on the problem, anywhere in the world. | 165 | `cc0` | CC0, "no rights reserved", version 1 or later. | 166 | `cc by` | CC attribution license, version 4 or later. | 167 | `cc by-sa` | CC attribution, share alike license, version 4 or later. | 168 | `educational` | May be freely used for educational purposes. | 169 | `permission` | Used with permission. The rights owner must be contacted for every additional use. | 170 | 171 | `rights_owner` is the owner of the copyright of the problem. 172 | If `license` is not `unknown` or `public domain`, `rights_owner` is required to have a value. 173 | If `license` is `public domain`, `rights_owner` must not be set. 174 | `rights_owner` defaults to `credits.authors`, if present, otherwise the value of `source`, otherwise it will have no value. 175 | 176 | ### Limits 177 | 178 | Time, memory, and other limits to be imposed on submissions. 179 | A map with the following keys: 180 | 181 | Key | Comments | Default | Typical system default 182 | -------------------- | -------------------------- | -------------- | ---------------------- 183 | `time_multiplier` | optional float | 5 | 184 | `time_safety_margin` | optional float | 2 | 185 | `memory` | optional, in MiB | system default | 2048 186 | `output` | optional, in MiB | system default | 8 187 | `code` | optional, in KiB | system default | 128 188 | `compilation_time` | optional, in seconds | system default | 60 189 | `compilation_memory` | optional, in MiB | system default | 2048 190 | `validation_time` | optional, in seconds | system default | 60 191 | `validation_memory` | optional, in MiB | system default | 2048 192 | `validation_output` | optional, in MiB | system default | 8 193 | 194 | For most keys, the system default will be used if nothing is specified. 195 | This can vary, but you **should** assume that it's reasonable. 196 | Only specify limits when the problem needs a specific limit, but do specify limits even if the "typical system default" is what is needed. 197 | 198 | ### Validation 199 | 200 | `validation` is a space separated list of strings describing how validation is done. 201 | Must begin with one of `default` or `custom`. 202 | If `custom`, may be followed by some subset of `score` and `interactive`, 203 | where `score` indicates that the validator produces a score (this is only valid for scoring problems), 204 | and `interactive` specifies that the validator is run interactively with a submission. 205 | For example, `custom interactive score`. 206 | 207 | `validator_flags` will be passed as command-line arguments to each of the output validators. 208 | 209 | ### Grading 210 | 211 | Must only be used on scoring problems. 212 | A map with the following keys: 213 | 214 | Key | Type | Default | Comments 215 | ----------------------- | ------- | ------- | -------- 216 | `objective` | String | max | One of "min" or "max" specifying whether it is a minimization or a maximization problem. 217 | `show_test_data_groups` | boolean | false | Specifies whether test group results should be shown to the end user. 218 | 219 | ### Keywords 220 | 221 | Space separated list of keywords describing the problem. 222 | Keywords must not contain spaces. 223 | 224 | ## Problem Statements 225 | 226 | The problem statement of the problem is provided in the directory `problem_statement/`. 227 | 228 | This directory must contain one file per language, for at least one language, named `problem..`, 229 | that contains the problem text itself, including input and output specifications, but not sample input and output. 230 | Language must be given as the shortest ISO 639 code. 231 | If needed, a hyphen and an ISO 3166-1 alpha-2 code may be appended to an ISO 639 code. 232 | Optionally, the language code can be left out; the default is then English (`en`). 233 | Filetype can be either `.tex` for LaTeX files, or `.pdf` for PDF. 234 | 235 | Please note that many kinds of transformations on the problem statements, 236 | such as conversion to HTML or styling to fit in a single document containing many problems will not be possible for PDF problem statements, 237 | so using this format should be avoided if at all possible. 238 | 239 | Auxiliary files needed by the problem statement files must all be in `problem_statement/`. 240 | `problem..` should reference auxiliary files as if the working directory is `problem_statement/`. 241 | Image file formats supported are `.png`, `.jpg`, `.jpeg`, and `.pdf`. 242 | 243 | A LaTeX file may include the problem name using the LaTeX command `\problemname` in case LaTeX formatting of the title is wanted. 244 | If a plaintext version of the problem name is required, the `name` value from `problem.yaml` shall be used if present. 245 | If not, the name given in `\problemname` is used verbatim. 246 | 247 | The problem statements must only contain the actual problem statement, no sample data. 248 | 249 | ## Attachments 250 | 251 | Public, i.e., non-secret, files to be made available in addition to the problem statement and sample test data are provided in the directory `attachments/`. 252 | 253 | ## Test data 254 | 255 | The test data are provided in subdirectories of `data/`. 256 | The sample data in `data/sample/` and the secret data in `data/secret/`. 257 | 258 | All input and answer files have the filename extension `.in` and `.ans` respectively. 259 | 260 | ### Annotations 261 | 262 | One hint, description, and/or illustration file may be provided per test case. 263 | The files must share the base name of the associated test case. 264 | Description and illustration files are meant to be privileged information. 265 | 266 | Category | File type | Filename extension | Remark 267 | ------------ | --------- | ---------------------------------- | ------ 268 | hint | text | `.hint` | 269 | description | text | `.desc` | privileged information 270 | illustration | image | `.png`, `.jpg`, `.jpeg`, or `.svg` | privileged information 271 | 272 | - A *hint* provides feedback for solving a test case to, e.g., somebody whose submission didn't pass. 273 | 274 | - A *description* conveys the purpose of a test case. 275 | It is an explanation of what aspect or edge case of the solution that the input file is meant to test. 276 | 277 | - An *illustration* provides a visualization of the associated test case. 278 | Note that at most one image file may exist for each test case. 279 | 280 | ### Interactive Problems 281 | 282 | Unlike in non-interactive problems, `.in` and `.ans` files in interactive problems **must not** be displayed to teams: 283 | not in the problem statement, nor as part of sample input download. 284 | Instead, all sample test cases **must** provide an interaction protocol as a text file with the extension `.interaction` demonstrating the communication between the submission and the output validator, meant to be displayed in the problem statement. 285 | 286 | Additional sample interaction protocols may be defined by creating an `.interaction` file without corresponding `.in` and `.ans` files. 287 | 288 | An interaction protocol consists of a series of lines starting with `>` and `<`. 289 | Lines starting with `>` signify an output from the submission to the output validator, 290 | while those starting with `<` signify an input from the output validator to the submission. 291 | 292 | If you want to provide files related to interactive problems (such as testing tools or input files), you can use [attachments](#attachments). 293 | 294 | ### Test Data Groups 295 | 296 | The test data for the problem can be organized into a tree-like structure. 297 | Each node of this tree is represented by a directory and referred to as a test data group. 298 | Each test data group may consist of zero or more test cases (i.e., input-answer files) and zero or more subgroups of test data (i.e., subdirectories). 299 | 300 | At the top level, the test data is divided into exactly two groups: `sample` and `secret`. 301 | These two groups may be further split into subgroups as desired. 302 | 303 | The *result* of a test data group is computed by applying a *grader* to all of the sub-results (test cases and subgroups) in the group. 304 | See [Graders](#graders) for more details. 305 | 306 | Test cases and groups will be used in lexicographical order on file base name. 307 | If a specific order is desired, a numbered prefix such as `00`, `01`, `02`, `03`, and so on, can be used. 308 | 309 | In each test data group, a YAML file `testdata.yaml` may be placed to specify how the result of the test data group should be computed. 310 | If a test data group has no `testdata.yaml` file, the `testdata.yaml` in the closest ancestor group that has one will be used. 311 | If there is no `testdata.yaml` file in the root `data` group, one is implicitly added with the default values. 312 | 313 | The format of `testdata.yaml` is as follows: 314 | 315 | Key | Type | Default | Comments 316 | ------------------------ | ---------------------------------------------- | ------------ | -------- 317 | `on_reject` | String | break | One of "break" or "continue". Specifies how judging should proceed when a submission gets a non-Accept judgement on an individual test case or subgroup. If "break", judging proceeds immediately to grading. If "continue", judging continues judging the rest of the test cases and subgroups within the group. 318 | `grading` | String | default | One of "default" and "custom". 319 | `grader_flags` | String | empty string | arguments passed to the grader for this test data group. 320 | `input_validator_flags` | String or map with the keys "name" and "flags" | empty string | arguments passed to the input validator for this test data group. If a string, then those are the arguments that will be passed to each input validator for this test data group. If a map, then this is the name of the input validator as well as the arguments to pass to that input validator for this test data group. Validators not present in the map are run without any arguments. 321 | `output_validator_flags` | String or map with the keys "name" and "flags" | empty string | arguments passed to the output validator for this test data group. If a string, this is the name of the output validator that will be used for this test data group. If a map, then this is the name as well as the arguments that will be passed to the output validator. 322 | `accept_score` | Floating-point number | 1 | Default score for accepted input files. May only be specified for scoring problems. 323 | `reject_score` | Floating-point number | 0 | Default score for rejected input files. May only be specified for scoring problems. 324 | `range` | Two space separated floating-point numbers | \-inf +inf | Two floating-point numbers A and B ("inf", "-inf", "+inf" are allowed for plus/minus infinity) specifying the range of possible scores. May only be specified for scoring problems. 325 | 326 | ## Included Code 327 | 328 | Code that should be included with all submissions are provided in one directory per supported language, called `include//`. 329 | 330 | The files should be copied from a language directory based on the language of the submission, 331 | to the submission files before compiling, 332 | overwriting files from the submission in the case of name collision. 333 | Language must be given as one of the language codes in the language table in the overview section. 334 | If any of the included files are supposed to be the main file (i.e., a driver), 335 | that file must have the language-dependent name as given in the table referred above. 336 | 337 | ## Example Submissions 338 | 339 | Correct and incorrect solutions to the problem are provided in subdirectories of `submissions/`. 340 | The possible subdirectories are: 341 | 342 | Value | Requirement | Comment 343 | --------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------- 344 | `accepted` | Accepted as a correct solution for all test cases. | At least one is required. 345 | `partially_accepted` | Overall verdict must be Accepted. Overall score must not be max of range if objective is max and min of range if objective is min. | Must not be used for pass-fail problems. 346 | `wrong_answer` | Wrong answer for some test case, but is not too slow and does not crash for any test case. | 347 | `time_limit_exceeded` | Too slow for some test case. May also give wrong answer but not crash for any test case. | 348 | `run_time_error` | Crashes for some test case. | 349 | 350 | Every file or directory in these directories represents a separate solution. 351 | It is mandatory to provide at least one accepted solution. 352 | 353 | Submissions must read input data from standard input, and write output to standard output. 354 | 355 | ## Input Validators 356 | 357 | Input Validators, verifying the correctness of the input files, are provided in `input_validators/` (or the deprecated `input_format_validators/`). 358 | Input validators can be specified as [VIVA](http://viva.vanb.org/)-files (with file ending `.viva`), 359 | [Checktestdata](https://github.com/DOMjudge/checktestdata)-file (with file ending `.ctd`), 360 | or as a program. 361 | 362 | All input validators provided will be run on every input file. 363 | Validation fails if any validator fails. 364 | 365 | ### Invocation 366 | 367 | An input validator program must be an application (executable or interpreted) capable of being invoked with a command line call. 368 | 369 | All input validators provided will be run on every test data file using the arguments specified for the test data group they are part of. 370 | Validation fails if any validator fails. 371 | 372 | When invoked, the input validator will get the input file on stdin. 373 | 374 | The validator should be possible to use as follows on the command line: 375 | ```sh 376 | [arguments] < inputfile 377 | ``` 378 | 379 | ### Output 380 | 381 | The input validator may output debug information on stdout and stderr. 382 | This information may be displayed to the user upon invocation of the validator. 383 | 384 | ### Exit codes 385 | 386 | The input validator must exit with code 42 on successful validation. 387 | Any other exit code means that the input file could not be confirmed as valid. 388 | 389 | #### Dependencies 390 | 391 | The validator **must not** read any files outside those defined in the Invocation section. 392 | Its result **must** depend only on these files and the arguments. 393 | 394 | ## Output Validators 395 | 396 | ### Overview 397 | 398 | An output validator is a [program](#programs) that is given the output of a submitted program, 399 | together with the corresponding input file, 400 | and an answer file for the input, 401 | and then decides whether the output provided is a correct output for the given input file. 402 | 403 | A validator program must be an application (executable or interpreted) capable of being invoked with a command line call. 404 | The details of this invocation are described below. 405 | The validator program has two ways of reporting back the results of validating: 406 | 407 | 1. The validator must give a judgement (see [Reporting a judgement](#reporting-a-judgement)). 408 | 2. The validator may give additional feedback, 409 | e.g., an explanation of the judgement to humans (see [Reporting Additional Feedback](#reporting-additional-feedback)). 410 | 411 | Custom output validators are used if the problem requires more complicated output validation than what is provided by the default diff variant described below. 412 | They are provided in `output_validators/`, and must adhere to the [Output validator](#output-validators) specification. 413 | 414 | All output validators provided will be run on the output for every test data file using the arguments specified for the test data group they are part of. 415 | Validation fails if any validator fails. 416 | 417 | ### Default Output Validator Specification 418 | 419 | The default output validator is essentially a beefed-up diff. 420 | In its default mode, it tokenizes the output and answer files and compares them token by token. 421 | It supports the following command-line arguments to control how tokens are compared. 422 | 423 | Arguments | Description 424 | ---------------------------- | ----------- 425 | `case_sensitive` | indicates that comparisons should be case-sensitive. 426 | `space_change_sensitive` | indicates that changes in the amount of whitespace should be rejected (the default is that any sequence of 1 or more whitespace characters are equivalent). 427 | `float_relative_tolerance ε` | indicates that floating-point tokens should be accepted if they are within relative error ≤ ε (see below for details). 428 | `float_absolute_tolerance ε` | indicates that floating-point tokens should be accepted if they are within absolute error ≤ ε (see below for details). 429 | `float_tolerance ε` | short-hand for applying ε as both relative and absolute tolerance. 430 | 431 | When supplying both a relative and an absolute tolerance, the semantics are that a token is accepted if it is within either of the two tolerances. 432 | When a floating-point tolerance has been set, any valid formatting of floating-point numbers is accepted for floating-point tokens. 433 | So, for instance, if a token in the answer file says `0.0314`, a token of `3.14000000e-2` in the output file would be accepted. 434 | If no floating-point tolerance has been set, floating-point tokens are treated just like any other token and have to match exactly. 435 | 436 | ### Invocation 437 | 438 | When invoked the output validator will be passed at least three command line parameters and the output stream to validate on stdin. 439 | 440 | The validator should be possible to use as follows on the command line: 441 | ```sh 442 | input answer_file feedback_dir [additional_arguments] < team_output [ > team_input ] 443 | ``` 444 | 445 | The meaning of the parameters listed above are: 446 | 447 | - input: 448 | a string specifying the name of the input data file that was used to test the program whose results are being validated. 449 | - answer_file: 450 | a string specifying the name of an arbitrary "answer file" which acts as input to the validator program. 451 | The answer file may, but is not necessarily required to, contain the "correct answer" for the problem. 452 | For example, it might contain the output that was produced by a judge's solution for the problem when run with input file as input. 453 | Alternatively, the "answer file" might contain information, in arbitrary format, which instructs the validator in some way about how to accomplish its task. 454 | - feedback_dir: 455 | a string which specifies the name of a "feedback directory" in which the validator can produce "feedback files" in order to report additional information on the validation of the output file. 456 | The feedbackdir must end with a path separator (typically '/' or '\\' depending on operating system), 457 | so that simply appending a filename to feedbackdir gives the path to a file in the feedback directory. 458 | - additional_arguments: 459 | in case the problem specifies additional `validator_flags`, these are passed as additional arguments to the validator on the command line. 460 | - team_output: 461 | the output produced by the program being validated is given on the validator's standard input pipe. 462 | - team_input: 463 | when running the validator in interactive mode everything written on the validator's standard output pipe is given to the program being validated. 464 | Please note that when running interactive the program will only receive the output produced by the validator and will not have direct access to the input file. 465 | 466 | The two files pointed to by input and answer_file must exist (though they are allowed to be empty) and the validator program must be allowed to open them for reading. 467 | The directory pointed to by feedback_dir must also exist. 468 | 469 | ### Reporting a judgement 470 | 471 | A validator program is required to report its judgement by exiting with specific exit codes: 472 | 473 | - If the output is a correct output for the input file (i.e., the submission that produced the output is to be Accepted), 474 | the validator exits with exit code 42. 475 | - If the output is incorrect (i.e., the submission that produced the output is to be judged as Wrong Answer), 476 | the validator exits with exit code 43. 477 | 478 | Any other exit code (including 0\!) indicates that the validator did not operate properly, 479 | and the judging system invoking the validator must take measures to report this to contest personnel. 480 | The purpose of these somewhat exotic exit codes is to avoid conflict with other exit codes that results when the validator crashes. 481 | For instance, if the validator is written in Java, any unhandled exception results in the program crashing with an exit code of 1, 482 | making it unsuitable to assign a judgement meaning to this exit code. 483 | 484 | ### Reporting Additional Feedback 485 | 486 | The purpose of the feedback directory is to allow the validator program to report more information to the judging system than just the accept/reject verdict. 487 | Using the feedback directory is optional for a validator program, so if one just wants to write a bare-bones minimal validator, it can be ignored. 488 | 489 | The validator is free to create different files in the feedback directory, 490 | in order to provide different kinds of information to the judging system, in a simple but organized way. 491 | For instance, there may be a `judgemessage.txt` file, 492 | the contents of which gives a message that is presented to a judge reviewing the current submission 493 | (typically used to help the judge verify why the submission was judged as incorrect, by specifying exactly what was wrong with its output). 494 | Other examples of files that may be useful in some contexts (though not in the ICPC) are a `score.txt` file, 495 | giving the submission a score based on other factors than correctness, 496 | or a `teammessage.txt` file, giving a message to the team that submitted the solution, providing additional feedback on the submission. 497 | 498 | A judging system that implements this format must support the `judgemessage.txt` file described above 499 | (I.e., content of the `judgemessage.txt` file, if produced by the validator, must be provided by the judging system to a human judge examining the submission). 500 | Having the judging system support other files is optional. 501 | 502 | Note that a validator may choose to ignore the feedback directory entirely. 503 | In particular, the judging system must not assume that the validator program creates any files there at all. 504 | 505 | #### Examples 506 | 507 | An example of a `judgemessage.txt` file: 508 | ```text 509 | Team failed at test case 14. 510 | Team output: "31", Judge answer: "30". 511 | Team failed at test case 18. 512 | Team output: "hovercraft", Judge answer: "7". 513 | Summary: 2 test cases failed. 514 | ``` 515 | 516 | An example of a `teammessage.txt` file: 517 | ```text 518 | Almost all test cases failed — are you even trying to solve the problem? 519 | ``` 520 | 521 | #### Validator standard error 522 | 523 | A validator program is allowed to write any kind of debug information to its standard error pipe. 524 | This information may be displayed to the user upon invocation of the validator. 525 | 526 | ## Graders 527 | 528 | Graders are programs that are given the sub-results of a test data group and aggregate a result for the group. 529 | They are provided in `graders/`. 530 | 531 | For pass-fail problems, this grader will typically just set the verdict to accepted if all sub-results in the group were accepted and otherwise select the "worst" error in the group (see below for definition of "worst"), 532 | though it is possible to write a custom grader which e.g. accepts if at least half the sub-results are accepted. 533 | For scoring problems, one common grader behaviour would be to always set the verdict to Accepted, 534 | with the score being the sum of scores of the items in the test group. 535 | 536 | ### Invocation 537 | 538 | A grader program must be an application (executable or interpreted) capable of being invoked with a command line call. 539 | 540 | When invoked the grader will get the judgement for test cases or groups on stdin and is expected to produce an aggregate result on stdout. 541 | 542 | The grader should be possible to use as follows on the command line: 543 | ```sh 544 | [arguments] < judgeresults 545 | ``` 546 | 547 | On success, the grader must exit with exit code 0. 548 | 549 | ### Input 550 | 551 | A grader simply takes a list of results on standard input, and produces a single result on standard output. 552 | The input file will have the one line per test case containing the result of judging the testfile, 553 | using the code from the table below, followed by whitespace, followed by the score. 554 | 555 | Code | Meaning 556 | ---- | ------- 557 | AC | Accepted 558 | WA | Wrong Answer 559 | RTE | Run-Time Error 560 | TLE | Time-Limit Exceeded 561 | 562 | The score is taken from the `score.txt` files produced by the output validator. 563 | If no `score.txt` exists the score will be as defined by the grading accept_score and reject_score setting from `problem.yaml`. 564 | 565 | ### Output 566 | 567 | The grader must output the aggregate result on stdout in the same format as its input. 568 | Any other output, including no output, will result in a Judging Error. 569 | 570 | For pass-fail problems, or for non-Accepted results on scoring problems, the score provided by the grader will always be ignored. 571 | 572 | The grader may output debug information on stderr. 573 | This information may be displayed to the user upon invocation of the grader. 574 | 575 | ### Default Grader Specification 576 | 577 | The default grader has three different modes for aggregating the verdict 578 | -- _worst_error_, _first_error_ and _always_accept_ -- 579 | four different modes for aggregating the score 580 | -- _sum_, _avg_, _min_, _max_ -- 581 | and two flags 582 | -- _ignore_sample_ and _accept_if_any_accepted_. 583 | These modes can be set by providing their names as command line arguments (through the "grader_flags" option in [`testdata.yaml`](#test-data-groups)). 584 | If multiple conflicting modes are given, the last one is used. Their meaning are as follows. 585 | 586 | Argument | Type | Description 587 | ------------------------ | ------------ | ----------- 588 | `worst_error` | verdict mode | Default. Verdict is accepted if all sub-results are accepted, otherwise it is the first of JE, IF, RTE, MLE, TLE, OLE, WA that is the sub-result of some item in the test case group. Note that in combination with the on_reject:break policy in `testdata.yaml`, the result will be the first error encountered. 589 | `first_error` | verdict mode | Verdict is accepted if all sub-results are accepted, otherwise it is the verdict of the first sub-result with a non-accepted verdict. Please note `worst_error` and `first_error` always give the same result if `on_reject` is set to `break`, and as such it is recommended to use the default. 590 | `always_accept` | verdict mode | Verdict is always accepted. 591 | `sum` | scoring mode | Default. Score is sum of input scores. 592 | `avg` | scoring mode | score is average of input scores. 593 | `min` | scoring mode | score is minimum of input scores. 594 | `max` | scoring mode | score is maximum of input scores. 595 | `ignore_sample` | flag | Must only be used on the root level. The first sub-result (sample) will be ignored, the second sub-result (secret) will be used, both verdict and score. 596 | `accept_if_any_accepted` | flag | Verdict is accepted if any sub-result is accepted, otherwise as specified by the verdict aggregation mode. 597 | -------------------------------------------------------------------------------- /spec/readme.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Specification versions 3 | sort: 1 4 | --- 5 | -------------------------------------------------------------------------------- /support/schemas/problem.cue: -------------------------------------------------------------------------------- 1 | package problem_package 2 | 3 | import "time" 4 | import "strings" 5 | 6 | #ProgrammingLanguage: "ada" | "algol68" | "apl" | "bash" | "c" | "cgmp" | "cobol" | "cpp" | "cppgmp" | "crystal" | "csharp" | "d" | "dart" | "elixir" | "erlang" | "forth" | "fortran" | "fsharp" | "gerbil" | "go" | "haskell" | "java" | "javaalgs4" | "javascript" | "julia" | "kotlin" | "lisp" | "lua" | "modula2" | "nim" | "objectivec" | "ocaml" | "octave" | "odin" | "pascal" | "perl" | "php" | "prolog" | "python2" | "python3" | "python3numpy" | "racket" | "ruby" | "rust" | "scala" | "simula" | "smalltalk" | "snobol" | "swift" | "typescript" | "visualbasic" | "zig" 7 | 8 | 9 | // A problem source is typically a contest or course, such as "NWERC 2023" or { name: "NWERC 2023", url: "2023.nwerc.eu" } 10 | #Source: string | { 11 | name!: string 12 | url?: string 13 | } 14 | 15 | #Person: string 16 | 17 | // Persons are one or more people, such as "Ada Lovelace " or ["Alice", "Bob"] 18 | #Persons: #Person | [#Person, ...#Person] 19 | 20 | #Problem: { 21 | // Problem package format used by this file, such as "2023-12-draft" 22 | problem_format_version!: =~"^[0-9]{4}-[0-9]{2}(-draft)?$" | "draft" | "legacy" | "legacy-icpc" 23 | 24 | // The judgement type is "pass-fail" (the default) or "scoring" 25 | judgement_type?: *"pass-fail" | "scoring" 26 | 27 | // The submission types are standard, interactive or multi-pass, or submit_answer. 28 | submission_type?: *"standard" | "submit_answer" | "interactive" | "multi-pass" | "interactive multi-pass" 29 | 30 | // The name of this problem, such as "Hello" or { en: "Hello", da: "Hej" } 31 | name!: string | {[string]: string} 32 | 33 | // A unique identifier for this problem, such as "8ee7605a-ab1a-8226-1d71-e346ab1e688d" 34 | uuid!: string 35 | 36 | // A version for this problem, such as "draft" or "1.1" 37 | version?: string 38 | 39 | // The people who created this problem. Can be a single person such as "Ada Lovelace". 40 | credits?: #Person | { 41 | // The people who conceptualised this problem. 42 | authors?: #Persons 43 | // The people who developed the problem package, such as the statement, validators, and test data. 44 | contributors?: #Persons 45 | // The people who tested the problem package, for example, by providing a solution and reviewing the statement. 46 | testers?: #Persons 47 | // The people who created the problem package out of an existing problem. 48 | packagers?: #Persons 49 | // Extra acknowledgements or special thanks in addition to the previously mentioned. 50 | acknowledgements?: #Persons 51 | // The people who translated the statement to other languages, mapped by language code. 52 | translators?: [string]: #Persons 53 | } 54 | 55 | // The source(s) of this problem, such as "NWERC 2024" 56 | source?: #Source | [#Source, ...#Source] 57 | 58 | // The license of this problem. 59 | *{license?: *"unknown" | "public domain"} | { 60 | license!: "cc0" | "cc by" | "cc by-sa" | "educational" | "permission" 61 | // Who owns the rights to this problem. 62 | rights_owner?: #Person 63 | } 64 | 65 | // Do not publish this problem until the embargo is lifted. 66 | embargo_until?: time.Format("2006-01-02") | time.Format("2006-01-02T15:04:05Z") 67 | 68 | // Time and size limits for this problem. 69 | limits: { 70 | // The time limit for submission, in seconds 71 | time_limit?: number & >0 72 | 73 | // Safety margins relative to the slowest accepted submission 74 | time_multipliers?: { 75 | ac_to_time_limit?: number & >=1 | *2.0 76 | time_limit_to_tle?: number & >=1 | *1.5 77 | } 78 | 79 | // Resolution for determining the time_limit from the slowest accepted solution 80 | time_resolution?: number & >0 | *1.0 81 | 82 | // Time bounds in seconds 83 | ["compilation_time" | "compilation_time"]: int & >0 84 | 85 | // Size bounds in MiB 86 | ["memory" | "output" | "compilation_memory" | "validation_memory" | "validation_output"]: int & >0 87 | 88 | // Code length in kiB 89 | code?: int & >0 90 | 91 | if submission_type != _|_ { 92 | if strings.Contains(submission_type, "multi-pass") { 93 | // For multi-pass problems, how many passes does validation use? 94 | validation_passes: *2 | int & >=2 95 | } 96 | } 97 | } 98 | 99 | // A sequence of keywords describing the problem, such as ["brute force", "real-life"]. 100 | keywords?: [...string] 101 | 102 | // If not "all", restrict the programming languages that this problem may be solved in. 103 | languages?: *"all" | [#ProgrammingLanguage, ...#ProgrammingLanguage] 104 | 105 | // Should submissions have access to creating, editing and deleting files in their working directory? 106 | allow_file_writing?: *false | true 107 | 108 | // Constants for templates in the rest of the package, such as { max_n: 2000, name: "Alice" } 109 | constants?: [=~"^[a-zA-Z_][a-zA-Z0-9_]*$"]: number | string 110 | } 111 | -------------------------------------------------------------------------------- /support/schemas/test_group.cue: -------------------------------------------------------------------------------- 1 | package problem_package 2 | 3 | #test_group_settings: { 4 | input_validator_flags?: *"" | string | {[string]: string} 5 | output_validator_flags?: *"" | string 6 | grading?: { 7 | score?: number 8 | max_score?: number 9 | aggregation?: "sum" | "min" 10 | } 11 | } 12 | 13 | #test_group_settings 14 | --------------------------------------------------------------------------------