├── requirements.txt ├── Makefile ├── .pre-commit-config.yaml ├── .github └── workflows │ ├── ci.yml │ └── docs.yml ├── footnote_build_info.py ├── CONTRIBUTING.md ├── .gitignore ├── make_md.py ├── jheppub.sty ├── JHEP.bst ├── HEPML.tex └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | PyYAML~=5.3 2 | requests~=2.25 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | FILENAME = HEPML 2 | 3 | date = $(shell date +%Y-%m-%d) 4 | output_file = draft_$(date).pdf 5 | 6 | LATEX = lualatex 7 | BIBTEX = bibtex 8 | 9 | all: default 10 | 11 | default: document copy_draft 12 | 13 | document: 14 | latexmk -$(LATEX) -logfilewarnings -halt-on-error $(FILENAME) 15 | 16 | copy_draft: 17 | rsync $(FILENAME).pdf $(output_file) 18 | 19 | clean: 20 | rm -f *.aux *.bak *.bbl *.blg *.dvi *.idx *.lof *.log *.lot *.toc \ 21 | *.glg *.gls *.glo *.xdy *.nav *.out *.snm *.vrb *.mp \ 22 | *.synctex.gz *.run.xml *.bcf *.brf *.fls *.fdb_latexmk 23 | 24 | realclean: clean 25 | rm -f *.ps *.pdf 26 | 27 | final: 28 | if [ -f *.aux ]; \ 29 | then make clean; \ 30 | fi 31 | make document 32 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | ci: 2 | autoupdate_commit_msg: "chore: [pre-commit.ci] pre-commit autoupdate" 3 | autoupdate_schedule: "monthly" 4 | 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.0.1 8 | hooks: 9 | - id: check-added-large-files 10 | - id: check-case-conflict 11 | - id: check-merge-conflict 12 | - id: check-symlinks 13 | - id: check-json 14 | - id: check-yaml 15 | - id: check-toml 16 | - id: check-xml 17 | - id: debug-statements 18 | # - id: end-of-file-fixer # TODO: Fix make_md.py 19 | - id: mixed-line-ending 20 | - id: requirements-txt-fixer 21 | - id: trailing-whitespace 22 | 23 | - repo: https://github.com/asottile/pyupgrade 24 | rev: v2.25.0 25 | hooks: 26 | - id: pyupgrade 27 | args: ["--py37-plus"] 28 | 29 | - repo: https://github.com/pycqa/isort 30 | rev: 5.9.3 31 | hooks: 32 | - id: isort 33 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | pull_request: 6 | # Run daily at 0:01 UTC 7 | schedule: 8 | - cron: '1 0 * * *' 9 | 10 | jobs: 11 | test: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: [ '3.8' ] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v1 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip setuptools wheel 26 | python -m pip install --quiet --no-cache-dir --requirement requirements.txt 27 | python -m pip list 28 | - name: Test README generation 29 | run: | 30 | cp README.md README.bak 31 | python make_md.py 32 | if [[ "$(diff README.md README.bak)" ]]; then 33 | echo "ERROR: README is out of sync with HEPML.bib" 34 | echo " run make_md.py and commit changes" 35 | exit 1 36 | fi 37 | ls -lhtra 38 | - name: Add build info footnote to jheppub.sty 39 | run: | 40 | python footnote_build_info.py 41 | ls -lhtra 42 | - name: Compile LaTeX document 43 | uses: xu-cheng/texlive-action/full@v1 44 | with: 45 | run: | 46 | apk add make 47 | make document 48 | - name: List directory contents 49 | run: ls -lhtra 50 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Publish docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | tags: 8 | - v* 9 | 10 | jobs: 11 | docs: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: [ '3.8' ] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v1 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip setuptools wheel 26 | python -m pip install --quiet --no-cache-dir --requirement requirements.txt 27 | python -m pip list 28 | - name: Generate README 29 | run: | 30 | cp README.md README.bak 31 | python make_md.py 32 | if [[ "$(diff README.md README.bak)" ]]; then 33 | echo "ERROR: README is out of sync with HEPML.bib" 34 | echo " run make_md.py and commit changes" 35 | exit 1 36 | fi 37 | - name: Add build info footnote to jheppub.sty 38 | run: | 39 | python footnote_build_info.py 40 | - name: Compile LaTeX document 41 | uses: xu-cheng/texlive-action/full@v1 42 | with: 43 | run: | 44 | apk add make 45 | make document 46 | - name: Setup docs for deployment 47 | run: | 48 | mkdir -p docs/_build/review 49 | cp README.md docs/_build/ 50 | cp HEPML.pdf docs/_build/review/hepml-review.pdf 51 | - name: Deploy docs to GitHub Pages 52 | uses: peaceiris/actions-gh-pages@v3 53 | with: 54 | github_token: ${{ secrets.GITHUB_TOKEN }} 55 | publish_dir: docs/_build 56 | enable_jekyll: true # for README render 57 | force_orphan: true 58 | user_name: 'github-actions[bot]' 59 | user_email: 'github-actions[bot]@users.noreply.github.com' 60 | -------------------------------------------------------------------------------- /footnote_build_info.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from shutil import copyfile 4 | 5 | 6 | def make_patch(): 7 | """ 8 | While being run in CI by GitHub Actions, collect repo, commit, and CI run 9 | information to patch jheppub.sty. 10 | """ 11 | commit_SHA = ( 12 | subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]) 13 | .strip() 14 | .decode("utf-8") 15 | ) 16 | # GITHUB_REPOSITORY is set by GitHub Actions 17 | try: 18 | github_repository = os.environ["GITHUB_REPOSITORY"] 19 | except KeyError: 20 | github_repository = ( 21 | subprocess.check_output(["git", "config", "--get", "remote.origin.url"]) 22 | .strip() 23 | .decode("utf-8") 24 | .replace("git@github.com:", "") 25 | .replace(".git", "") 26 | ) 27 | github_repository_url = "https://github.com/" + github_repository 28 | # GITHUB_RUN_ID is set by GitHub Actions 29 | try: 30 | github_run_id = os.environ["GITHUB_RUN_ID"] 31 | except KeyError: 32 | github_run_id = "" 33 | 34 | github_actions_url = github_repository_url + "/actions/runs/" + github_run_id 35 | github_commit_url = github_repository_url + "/tree/" + commit_SHA 36 | 37 | footnote = ( 38 | r"\footnotesize Built \href{" 39 | + github_actions_url 40 | + r"}{\today}\ from \href{" 41 | + github_commit_url 42 | + "}{" 43 | + commit_SHA 44 | + "}" 45 | ) 46 | 47 | patch = ( 48 | r"\usepackage{fancyhdr}" 49 | + "\n" 50 | + r"\newcommand\ps@titlepage{\renewcommand\@oddfoot{}\renewcommand\@oddhead{}" 51 | + "\n" 52 | + r"\pagestyle{fancy}" 53 | + "\n" 54 | + r"\renewcommand{\headrulewidth}{0pt}" 55 | + "\n" 56 | + r"\fancyfoot{}" 57 | + "\n" 58 | + r"\rfoot{" 59 | + footnote 60 | + "}" 61 | + "\n}" 62 | ) 63 | return patch 64 | 65 | 66 | def main(): 67 | """ 68 | Patch jheppub.sty to record build information as a footnote on the title page. 69 | """ 70 | patch = make_patch() 71 | 72 | copyfile("jheppub.sty", "jheppub.sty.bak") 73 | with open("jheppub.sty.bak") as read_file, open( 74 | "jheppub.sty", "w+" 75 | ) as write_file: 76 | for line in read_file: 77 | write_file.write( 78 | line.replace( 79 | r"\newcommand\ps@titlepage{\renewcommand\@oddfoot{}\renewcommand\@oddhead{}}", 80 | patch, 81 | ) 82 | ) 83 | 84 | 85 | if __name__ == "__main__": 86 | main() 87 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | To contribute to this project please either open an Issue with the details of the change you would like. 4 | If you have already discussed this with the maintainers or have contributed in the past you can also open a Pull Request. 5 | 6 | ## Opening an Issue 7 | 8 | You can open an Issue from the [GitHub Issue tracker page](https://github.com/iml-wg/HEPML-LivingReview/issues). 9 | Before you open an Issue please search through both the closed and open Issues to make sure that your Issue hasn't already been discussed or addressed in the past. 10 | 11 | ## Pull Request Process 12 | 13 | 1. Create a [fork of the project](https://docs.github.com/en/free-pro-team@latest/github/getting-started-with-github/fork-a-repo). 14 | 2. Create your Pull Request (PR) from your fork (see the FAQ below). 15 | 3. Verify that you have run `make_md.py` to update the `README`. 16 | 4. Ensure that the tests in the CI are passing. 17 | 5. Request that a maintainer review your PR. 18 | 6. Your PR can be merged in once you have the sign-off of at least one maintainer. If you do not have permission to make the merge, request the approving maintainer to merge it for you. 19 | 20 | ## Areas of Requested Help 21 | 22 | 1. Adding content across experiments 23 | 2. An additional volunteer maintainer 24 | 25 | ## FAQ 26 | 27 | ### There is a subject not listed that I think should be. How do I get it added to the listing? 28 | 29 | If there is content missing that you'd like added please create an issue with as much description as possible (and maybe some examples). 30 | A maintainer will add the content once it has been approved. 31 | Alternatively, feel free to fork the repository and add the content you want and then create a pull request. 32 | 33 | ### Should I add papers that are only about machine learning? 34 | 35 | No. 36 | While the ML papers that have inspired the work of the particle physics community are invaluable, trying to keep an updated list of all of them would be both beyond the scope of the project and unmaintainable. 37 | We welcome and encourage contributions of papers that cover modern machine learning applications to particle physics! 38 | 39 | ### How do I add a paper? 40 | 41 | All paper additions should be submitted as a single pull request on a source branch that isn't `master`. 42 | 43 | 1. Make a new branch on your fork for the pull request 44 | 2. Find the paper on [INSPIRE](https://inspirehep.net/?ln=en) 45 | - **N.B.:** If you have already found the paper on [arXiv](https://arxiv.org/) you should be able to find the INSPIRE listing linked under "References & Citations" 46 | 3. Get the BibTeX for the paper citation provided by INSPIRE (under "Export" at the bottom of the page) 47 | 4. Add this BibTeX entry to [`HEPML.bib`](https://github.com/iml-wg/HEPML-LivingReview/blob/master/HEPML.bib) in the appropriate chronological position 48 | 5. Add the citation to [`HEPML.tex`](https://github.com/iml-wg/HEPML-LivingReview/blob/master/HEPML.tex) in the appropriate categories 49 | 6. Verify that if you run `make` the LaTeX will compile 50 | 7. Run `make_md.py` to update the `README` with the new references 51 | 8. Add and commit `HEPML.bib`, `HEPML.tex`, and `README.md` to your pull request 52 | 9. If you haven't yet, push your branch to GitHub and open a pull request to the main project 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Core latex/pdflatex auxiliary files: 2 | *.aux 3 | *.lof 4 | *.log 5 | *.lot 6 | *.fls 7 | *.out 8 | *.toc 9 | *.fmt 10 | *.fot 11 | *.cb 12 | *.cb2 13 | .*.lb 14 | 15 | ## Intermediate documents: 16 | *.dvi 17 | *.xdv 18 | *-converted-to.* 19 | # these rules might exclude image files for figures etc. 20 | # *.ps 21 | # *.eps 22 | *.pdf 23 | 24 | ## Generated if empty string is given at "Please type another file name for output:" 25 | .pdf 26 | 27 | ## Bibliography auxiliary files (bibtex/biblatex/biber): 28 | *.bbl 29 | *.bcf 30 | *.blg 31 | *-blx.aux 32 | *-blx.bib 33 | *.run.xml 34 | 35 | ## Build tool auxiliary files: 36 | *.fdb_latexmk 37 | *.synctex 38 | *.synctex(busy) 39 | *.synctex.gz 40 | *.synctex.gz(busy) 41 | *.pdfsync 42 | 43 | ## Build tool directories for auxiliary files 44 | # latexrun 45 | latex.out/ 46 | 47 | ## Auxiliary and intermediate files from other packages: 48 | # algorithms 49 | *.alg 50 | *.loa 51 | 52 | # achemso 53 | acs-*.bib 54 | 55 | # amsthm 56 | *.thm 57 | 58 | # beamer 59 | *.nav 60 | *.pre 61 | *.snm 62 | *.vrb 63 | 64 | # changes 65 | *.soc 66 | 67 | # comment 68 | *.cut 69 | 70 | # cprotect 71 | *.cpt 72 | 73 | # elsarticle (documentclass of Elsevier journals) 74 | *.spl 75 | 76 | # endnotes 77 | *.ent 78 | 79 | # fixme 80 | *.lox 81 | 82 | # feynmf/feynmp 83 | *.mf 84 | *.mp 85 | *.t[1-9] 86 | *.t[1-9][0-9] 87 | *.tfm 88 | 89 | #(r)(e)ledmac/(r)(e)ledpar 90 | *.end 91 | *.?end 92 | *.[1-9] 93 | *.[1-9][0-9] 94 | *.[1-9][0-9][0-9] 95 | *.[1-9]R 96 | *.[1-9][0-9]R 97 | *.[1-9][0-9][0-9]R 98 | *.eledsec[1-9] 99 | *.eledsec[1-9]R 100 | *.eledsec[1-9][0-9] 101 | *.eledsec[1-9][0-9]R 102 | *.eledsec[1-9][0-9][0-9] 103 | *.eledsec[1-9][0-9][0-9]R 104 | 105 | # glossaries 106 | *.acn 107 | *.acr 108 | *.glg 109 | *.glo 110 | *.gls 111 | *.glsdefs 112 | *.lzo 113 | *.lzs 114 | 115 | # uncomment this for glossaries-extra (will ignore makeindex's style files!) 116 | # *.ist 117 | 118 | # gnuplottex 119 | *-gnuplottex-* 120 | 121 | # gregoriotex 122 | *.gaux 123 | *.gtex 124 | 125 | # htlatex 126 | *.4ct 127 | *.4tc 128 | *.idv 129 | *.lg 130 | *.trc 131 | *.xref 132 | 133 | # hyperref 134 | *.brf 135 | 136 | # knitr 137 | *-concordance.tex 138 | # TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files 139 | # *.tikz 140 | *-tikzDictionary 141 | 142 | # listings 143 | *.lol 144 | 145 | # luatexja-ruby 146 | *.ltjruby 147 | 148 | # makeidx 149 | *.idx 150 | *.ilg 151 | *.ind 152 | 153 | # minitoc 154 | *.maf 155 | *.mlf 156 | *.mlt 157 | *.mtc[0-9]* 158 | *.slf[0-9]* 159 | *.slt[0-9]* 160 | *.stc[0-9]* 161 | 162 | # minted 163 | _minted* 164 | *.pyg 165 | 166 | # morewrites 167 | *.mw 168 | 169 | # nomencl 170 | *.nlg 171 | *.nlo 172 | *.nls 173 | 174 | # pax 175 | *.pax 176 | 177 | # pdfpcnotes 178 | *.pdfpc 179 | 180 | # sagetex 181 | *.sagetex.sage 182 | *.sagetex.py 183 | *.sagetex.scmd 184 | 185 | # scrwfile 186 | *.wrt 187 | 188 | # sympy 189 | *.sout 190 | *.sympy 191 | sympy-plots-for-*.tex/ 192 | 193 | # pdfcomment 194 | *.upa 195 | *.upb 196 | 197 | # pythontex 198 | *.pytxcode 199 | pythontex-files-*/ 200 | 201 | # tcolorbox 202 | *.listing 203 | 204 | # thmtools 205 | *.loe 206 | 207 | # TikZ & PGF 208 | *.dpth 209 | *.md5 210 | *.auxlock 211 | 212 | # todonotes 213 | *.tdo 214 | 215 | # vhistory 216 | *.hst 217 | *.ver 218 | 219 | # easy-todo 220 | *.lod 221 | 222 | # xcolor 223 | *.xcp 224 | 225 | # xmpincl 226 | *.xmpi 227 | 228 | # xindy 229 | *.xdy 230 | 231 | # xypic precompiled matrices and outlines 232 | *.xyc 233 | *.xyd 234 | 235 | # endfloat 236 | *.ttt 237 | *.fff 238 | 239 | # Latexian 240 | TSWLatexianTemp* 241 | 242 | ## Editors: 243 | # WinEdt 244 | *.bak 245 | *.sav 246 | 247 | # Texpad 248 | .texpadtmp 249 | 250 | # LyX 251 | *.lyx~ 252 | 253 | # Kile 254 | *.backup 255 | 256 | # gummi 257 | .*.swp 258 | 259 | # KBibTeX 260 | *~[0-9]* 261 | 262 | # TeXnicCenter 263 | *.tps 264 | 265 | # auto folder when using emacs and auctex 266 | ./auto/* 267 | *.el 268 | 269 | # expex forward references with \gathertags 270 | *-tags.tex 271 | 272 | # standalone packages 273 | *.sta 274 | 275 | # Makeindex log files 276 | *.lpz 277 | 278 | # REVTeX puts footnotes in the bibliography by default, unless the nofootinbib 279 | # option is specified. Footnotes are the stored in a file with suffix Notes.bib. 280 | # Uncomment the next line to have this generated file ignored. 281 | #*Notes.bib 282 | -------------------------------------------------------------------------------- /make_md.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | import requests 5 | import yaml 6 | 7 | update_journal = False 8 | 9 | myfile = open("HEPML.tex") 10 | myfile_out = open("README.md","w") 11 | 12 | myfile_out.write("# **A Living Review of Machine Learning for Particle Physics**\n\n") 13 | 14 | myfile_out.write("*Modern machine learning techniques, including deep learning, is rapidly being applied, adapted, and developed for high energy physics. The goal of this document is to provide a nearly comprehensive list of citations for those developing and applying these approaches to experimental, phenomenological, or theoretical analyses. As a living document, it will be updated as often as possible to incorporate the latest developments. A list of proper (unchanging) reviews can be found within. Papers are grouped into a small set of topics to be as useful as possible. Suggestions are most welcome.*\n\n") 15 | 16 | myfile_out.write("[![download](https://img.shields.io/badge/download-review-blue.svg)](https://iml-wg.github.io/HEPML-LivingReview/review/hepml-review.pdf)\n\n") 17 | 18 | myfile_out.write("The purpose of this note is to collect references for modern machine learning as applied to particle physics. A minimal number of categories is chosen in order to be as useful as possible. Note that papers may be referenced in more than one category. The fact that a paper is listed in this document does not endorse or validate its content - that is for the community (and for peer-review) to decide. Furthermore, the classification here is a best attempt and may have flaws - please let us know if (a) we have missed a paper you think should be included, (b) a paper has been misclassified, or (c) a citation for a paper is not correct or if the journal information is now available. In order to be as useful as possible, this document will continue to evolve so please check back before you write your next paper. If you find this review helpful, please consider citing it using \\cite{hepmllivingreview} in HEPML.bib.\n\n") 19 | 20 | ###This bit is slightly modified from Kyle Cranmer https://github.com/cranmer/inspire_play 21 | def summarize_record(recid): 22 | url = 'https://labs.inspirehep.net/api/arxiv/'+str(recid) 23 | max_authors = 5 24 | r = requests.get(url) 25 | mini_dict = {'recid':recid} 26 | if 'metadata' in r.json(): 27 | data = r.json()['metadata'] 28 | mini_dict.update({'title':data['titles'][0]['title']}) 29 | if len(data['authors'])>max_authors: 30 | #mini_dict.update({'authors':[a['full_name'] for a in data['authors'][:max_authors]]+['et. al.']}) 31 | mini_dict.update({'authors':"; ".join([a['full_name'] for a in data['authors'][:max_authors]]+['et. al.'])}) 32 | else: 33 | mini_dict.update({'authors':[a['full_name'] for a in data['authors']]}) 34 | 35 | if 'collaborations' in data: 36 | mini_dict.update({'collaboration': data['collaborations'][0]['value']}) 37 | 38 | mini_dict.update({'arxiv_eprint': data['arxiv_eprints'][0]['value']}) 39 | mini_dict.update({'url': 'https://arxiv.org/abs/'+data['arxiv_eprints'][0]['value']}) 40 | mini_dict.update({'creation_date': data['legacy_creation_date']}) 41 | 42 | if 'journal_title' in data: 43 | mini_dict.update({'journal_title':data['publication_info'][0]['journal_title']}) 44 | if 'journal_volume' in data: 45 | mini_dict.update({'journal_volume':data['publication_info'][0]['journal_volume']}) 46 | if 'page_start' in data: 47 | mini_dict.update({'page_start':data['publication_info'][0]['page_start']}) 48 | if 'journal_year' in data: 49 | mini_dict.update({'journal_year':data['publication_info'][0]['year']}) 50 | 51 | if 'dois' in data: 52 | mini_dict.update({'doi': data['dois'][0]['value']}) 53 | return mini_dict 54 | 55 | def convert_from_bib(myline): 56 | #Not the most elegant way, but quick and dirty. Files are not big, so this doesn't take long. 57 | 58 | myline = myline.replace(" ","").replace("\n","") 59 | 60 | myfile_bib = open("HEPML.bib") 61 | mylines = [] 62 | for line in myfile_bib: 63 | mylines+=[line] 64 | pass 65 | myentry = [] 66 | for i in range(len(mylines)): 67 | if myline in mylines[i]: 68 | j = i+1 69 | myentry+=[mylines[i]] 70 | while "@" not in mylines[j]: 71 | myentry+=[mylines[j]] 72 | j+=1 73 | pass 74 | pass 75 | pass 76 | myentry_dict = {} 77 | for entry in myentry: 78 | entry_cleaned = entry.replace("\"{","").replace("}\",","").replace("},","") 79 | entry_cleaned = entry_cleaned.replace(" =","=") 80 | entry_cleaned = entry_cleaned.replace("= ","=") 81 | first_entry = entry_cleaned.split("=")[0] 82 | if "title" in first_entry and not "booktitle" in first_entry: 83 | myentry_dict["title"] = entry_cleaned.split("title")[1].split("=")[1].split("\n")[0] 84 | pass 85 | elif "eprint" in first_entry: 86 | myentry_dict["eprint"] = entry_cleaned.split("eprint")[1].split("=")[1].split("\n")[0].replace("\"","").replace(",","").replace("\'","").replace(" ","") 87 | if "{" in myentry_dict["eprint"]: 88 | myentry_dict["eprint"] = myentry_dict["eprint"][1:] 89 | pass 90 | elif "doi" in first_entry: 91 | myentry_dict["doi"] = entry_cleaned.split("doi")[1].split("=")[1].split("\n")[0].replace("\"","").replace(",","").replace("\'","").replace(" ","") 92 | elif "url" in first_entry: 93 | myentry_dict["url"] = entry_cleaned.split("url")[1].split("=")[1].split("\n")[0].replace("\"","").replace(",","").replace("\'","").replace(" ","") 94 | else: 95 | #print(entry_cleaned) 96 | pass 97 | pass 98 | 99 | if "eprint" in myentry_dict and 'doi' not in myentry_dict and update_journal: 100 | #check inspire 101 | inspire_dict = summarize_record(myentry_dict['eprint']) 102 | if 'doi' in inspire_dict: 103 | print("Updating journal ref for ",myline) 104 | myentry_dict['doi'] = inspire_dict['doi'] 105 | 106 | #print(inspire_dict) 107 | 108 | myfile_bib_copy = open("HEPML_copy.bib","w") 109 | myfile_bib = open("HEPML.bib") 110 | for line in myfile_bib: 111 | myfile_bib_copy.write(line) 112 | if myentry_dict['eprint'] in line and "eprint" in line: 113 | if "journal_title" in inspire_dict: 114 | myfile_bib_copy.write(" journal=\""+inspire_dict['journal_title']+"\",\n") 115 | if "journal_volume" in inspire_dict: 116 | myfile_bib_copy.write(" volume=\""+inspire_dict['journal_volume']+"\",\n") 117 | if "page_start" in inspire_dict: 118 | myfile_bib_copy.write(" pages=\""+inspire_dict['page_start']+"\",\n") 119 | if "doi" in inspire_dict: 120 | myfile_bib_copy.write(" doi=\""+inspire_dict['doi']+"\",\n") 121 | pass 122 | pass 123 | pass 124 | os.system("mv HEPML_copy.bib HEPML.bib") 125 | #exit(1) 126 | 127 | if "title" not in myentry_dict: 128 | print(myline) 129 | print(myentry) 130 | print("We are in trouble ! ") 131 | if "eprint" in myentry_dict: 132 | paper="" 133 | if "doi" in myentry_dict: 134 | paper=f" [[DOI](https://doi.org/{myentry_dict['doi']})]" 135 | elif "url" in myentry_dict: 136 | paper=f" [[url]({myentry_dict['url']})]" 137 | return "["+myentry_dict["title"]+"](https://arxiv.org/abs/"+myentry_dict["eprint"]+")"+paper 138 | elif "doi" in myentry_dict: 139 | return "["+myentry_dict["title"]+"](https://doi.org/"+myentry_dict["doi"]+")" 140 | elif "url" in myentry_dict: 141 | return "["+myentry_dict["title"]+"]("+myentry_dict["url"]+")" 142 | else: 143 | return myentry_dict["title"] 144 | return myline 145 | 146 | itemize_counter = 0 147 | for line in myfile: 148 | 149 | if "author" in line: 150 | continue 151 | 152 | if "\\item \\textbf{" in line: 153 | line = line[0:line.find("}")]+line[line.find("}")+1:-1] 154 | line = line.replace("\\textbf{","") 155 | 156 | if "textit{" in line: 157 | continue 158 | 159 | if "item" in line: 160 | if "begin{itemize}" in line: 161 | itemize_counter+=1 162 | elif "end{itemize}" in line: 163 | itemize_counter-=1 164 | else: 165 | #print(itemize_counter,line) 166 | if (itemize_counter==1): 167 | hascites = len(line.split("cite")) 168 | if (hascites==1): 169 | if "Experimental" not in line: 170 | myfile_out.write("* "+line.replace(r"\item","")+"\n") 171 | else: 172 | myfile_out.write("* Experimental results. *This section is incomplete as there are many results that directly and indirectly (e.g. via flavor tagging) use modern machine learning techniques. We will try to highlight experimental results that use deep learning in a critical way for the final analysis sensitivity.*\n\n") 173 | else: 174 | myfile_out.write("* "+line.replace(r"\item","").split(r"~\cite")[0]+".\n\n") 175 | mycites = line.split(r"~\cite{")[1].split("}")[0].split(",") 176 | for cite in mycites: 177 | myfile_out.write(" * "+convert_from_bib(cite)+"\n") 178 | pass 179 | myfile_out.write("\n") 180 | pass 181 | pass 182 | elif "cite" in line: 183 | mybuffer = "" 184 | for j in range(itemize_counter-1): 185 | mybuffer+=" " 186 | pass 187 | if (":~" in line): 188 | myfile_out.write(mybuffer+"* "+line.split(r"~\cite{")[0].split(r"\item")[1]+"\n\n") 189 | mycites = line.split(r"~\cite{")[1].replace("}","").split(",") 190 | for cite in mycites: 191 | myfile_out.write(mybuffer+" * "+convert_from_bib(cite)+"\n") 192 | pass 193 | myfile_out.write("\n") 194 | else: 195 | myfile_out.write(mybuffer+"* "+line.split(r"~\cite{")[0].split(r"\item")[1]+"\n\n") 196 | mycites = line.split(r"~\cite{")[1].split("}")[0].split(",") 197 | for cite in mycites: 198 | myfile_out.write(mybuffer+" * "+convert_from_bib(cite)+"\n") 199 | pass 200 | myfile_out.write("\n") 201 | pass 202 | pass 203 | -------------------------------------------------------------------------------- /jheppub.sty: -------------------------------------------------------------------------------- 1 | %% jheppub.sty 2 | %% Copyright 2013 SISSA Medialab 3 | % 4 | % This work may be distributed and/or modified under the 5 | % conditions of the LaTeX Project Public License, either version 1.3 6 | % of this license or (at your option) any later version. 7 | % The latest version of this license is in 8 | % http://www.latex-project.org/lppl.txt 9 | % and version 1.3 or later is part of all distributions of LaTeX 10 | % version 2005/12/01 or later. 11 | % 12 | % This work has the LPPL maintenance status `author-maintained'. 13 | % 14 | % The Current Maintainer of this work is 15 | % SISSA Medialab 16 | % 17 | % This work consists of the file jheppub.sty. 18 | \NeedsTeXFormat{LaTeX2e} 19 | \ProvidesPackage{jheppub}[2013/03/21 r534] 20 | 21 | 22 | \gdef\@fpheader{ } 23 | \gdef\@journal{jhep} 24 | 25 | \RequirePackage{amsmath} 26 | \RequirePackage{amssymb} 27 | \RequirePackage{epsfig} 28 | \RequirePackage{graphicx} 29 | \RequirePackage[numbers,sort&compress]{natbib} 30 | \RequirePackage{color} 31 | \RequirePackage[pagebackref=true,colorlinks=true 32 | ,urlcolor=blue 33 | ,anchorcolor=blue 34 | ,citecolor=blue 35 | ,filecolor=blue 36 | ,linkcolor=blue 37 | ,menucolor=blue 38 | ,pagecolor=blue 39 | ,linktocpage=true 40 | ,pdfproducer=medialab 41 | ,pdfa=true 42 | ]{hyperref} 43 | 44 | \newif\ifnotoc\notocfalse 45 | \newif\ifemailadd\emailaddfalse 46 | \newif\iftoccontinuous\toccontinuousfalse 47 | 48 | \def\@subheader{\@empty} 49 | \def\@keywords{\@empty} 50 | \def\@abstract{\@empty} 51 | \def\@xtum{\@empty} 52 | \def\@dedicated{\@empty} 53 | \def\@arxivnumber{\@empty} 54 | \def\@collaboration{\@empty} 55 | \def\@collaborationImg{\@empty} 56 | \def\@proceeding{\@empty} 57 | \def\@preprint{\@empty} 58 | 59 | \newcommand{\subheader}[1]{\gdef\@subheader{#1}} 60 | \newcommand{\keywords}[1]{\if!\@keywords!\gdef\@keywords{#1}\else% 61 | \PackageWarningNoLine{\jname}{Keywords already defined.\MessageBreak Ignoring last definition.}\fi} 62 | \renewcommand{\abstract}[1]{\gdef\@abstract{#1}} 63 | \newcommand{\dedicated}[1]{\gdef\@dedicated{#1}} 64 | \newcommand{\arxivnumber}[1]{\gdef\@arxivnumber{#1}} 65 | \newcommand{\proceeding}[1]{\gdef\@proceeding{#1}} 66 | \newcommand{\xtumfont}[1]{\textsc{#1}} 67 | \newcommand{\correctionref}[3]{\gdef\@xtum{\xtumfont{#1} \href{#2}{#3}}} 68 | \newcommand\jname{JHEP} 69 | \newcommand\acknowledgments{\section*{Acknowledgments}} 70 | \newcommand\notoc{\notoctrue} 71 | \newcommand\preprint[1]{\gdef\@preprint{\hfill #1}} 72 | 73 | 74 | 75 | %Equivalent to ``\footnote'', but can be used inside the \author macro 76 | % because it does not write the footnote mark 77 | % it has an optional argument that will be used as footnote mark when given 78 | % WARNING: when the optional argument is used, the footnotecounter is not increased 79 | % WARNING: the character ``!'' cannot be used. 80 | % If you really need it use somthing like [\relax !] as the optional arg. 81 | \newcommand\note[2][]{% 82 | \if!#1!% 83 | \stepcounter{footnote}\footnotetext{#2}% 84 | \else% 85 | {\renewcommand\thefootnote{#1}% 86 | \footnotetext{#2}}% 87 | \fi} 88 | 89 | 90 | 91 | %Use this if you want to try to keep some piece of the abstract on a 92 | %very long first page 93 | \newcommand\compress{% 94 | \renewcommand\afterProceedingsSpace{\bigskip}% 95 | \renewcommand\afterTitleSpace{\bigskip}% 96 | \renewcommand\afterRuleSpace{\bigskip\bigskip} 97 | \renewcommand\afterEmailSpace{\par\bigskip}} 98 | 99 | %authors and affiliations 100 | \newtoks\auth@toks 101 | \renewcommand{\author}[2][]{% 102 | \if!#1!% 103 | \auth@toks=\expandafter{\the\auth@toks#2\ }% 104 | \else 105 | \auth@toks=\expandafter{\the\auth@toks#2$^{#1}$\ }% 106 | \fi 107 | } 108 | 109 | \newtoks\affil@toks\newif\ifaffil\affilfalse 110 | \newcommand{\affiliation}[2][]{% 111 | \affiltrue 112 | \if!#1!% 113 | \affil@toks=\expandafter{\the\affil@toks{\item[]#2}}% 114 | \else 115 | \affil@toks=\expandafter{\the\affil@toks{\item[$^{#1}$]#2}}% 116 | \fi 117 | } 118 | 119 | %emails 120 | %automatically put a comma between emails 121 | \newtoks\email@toks\newcounter{email@counter}% 122 | \setcounter{email@counter}{0}% 123 | \newcommand{\emailAdd}[1]{% 124 | \emailaddtrue% 125 | \ifnum\theemail@counter>0\email@toks=\expandafter{\the\email@toks, \@email{#1}}% 126 | \else\email@toks=\expandafter{\the\email@toks\@email{#1}}% 127 | \fi\stepcounter{email@counter}} 128 | \newcommand{\@email}[1]{\href{mailto:#1}{\tt #1}} 129 | 130 | 131 | % Collaboration macros 132 | \newcommand*\collaboration[1]{\gdef\@collaboration{#1}} 133 | \newcommand*\collaborationImg[2][]{\gdef\@collaborationImg{#2}} 134 | 135 | %all pieces get a ``after'' spacing 136 | \newcommand\afterLogoSpace{\smallskip} 137 | \newcommand\afterSubheaderSpace{\vskip3pt plus 2pt minus 1pt} 138 | \newcommand\afterProceedingsSpace{\vskip21pt plus0.4fil minus15pt} 139 | \newcommand\afterTitleSpace{\vskip23pt plus0.06fil minus13pt} 140 | \newcommand\afterRuleSpace{\vskip23pt plus0.06fil minus13pt} 141 | \newcommand\afterCollaborationSpace{\vskip3pt plus 2pt minus 1pt} 142 | \newcommand\afterCollaborationImgSpace{\vskip3pt plus 2pt minus 1pt} 143 | \newcommand\afterAuthorSpace{\vskip5pt plus4pt minus4pt} 144 | \newcommand\afterAffiliationSpace{\vskip3pt plus3pt} 145 | \newcommand\afterEmailSpace{\vskip16pt plus9pt minus10pt\filbreak} 146 | \newcommand\afterXtumSpace{\par\bigskip} 147 | \newcommand\afterAbstractSpace{\vskip16pt plus9pt minus13pt} 148 | \newcommand\afterKeywordsSpace{\vskip16pt plus9pt minus13pt} 149 | \newcommand\afterArxivSpace{\vskip3pt plus0.01fil minus10pt} 150 | \newcommand\afterDedicatedSpace{\vskip0pt plus0.01fil} 151 | \newcommand\afterTocSpace{\bigskip\medskip} 152 | \newcommand\afterTocRuleSpace{\bigskip\bigskip} 153 | %this is the ``itemsep'' of the affiliations list 154 | \newlength{\affiliationsSep}\setlength{\affiliationsSep}{-3pt} 155 | %this hook is needed if the toc starts on the first page 156 | \newcommand\beforetochook{\pagestyle{myplain}\pagenumbering{roman}} 157 | 158 | \DeclareFixedFont\trfont{OT1}{phv}{b}{sc}{11} 159 | 160 | %first page 161 | \renewcommand\maketitle{ 162 | %% First page 163 | \pagestyle{empty} 164 | \thispagestyle{titlepage} 165 | \setcounter{page}{0} 166 | \noindent{\small\scshape\@fpheader}\@preprint\par 167 | \afterLogoSpace 168 | % Subheader 169 | \if!\@subheader!\else\noindent{\trfont{\@subheader}}\fi 170 | \afterSubheaderSpace 171 | % Proceedings 172 | \if!\@proceeding!\else\noindent{\sc\@proceeding}\fi 173 | \afterProceedingsSpace 174 | % Title 175 | {\LARGE\flushleft\sffamily\bfseries\@title\par} 176 | \afterTitleSpace 177 | % Rule 178 | \hrule height 1.5\p@% 179 | \afterRuleSpace 180 | % Collaboration 181 | \if!\@collaboration!\else 182 | {\Large\bfseries\sffamily\raggedright\@collaboration}\par 183 | \afterCollaborationSpace 184 | \fi 185 | % 186 | \if!\@collaborationImg!\else 187 | {\normalsize\bfseries\sffamily\raggedright\@collaborationImg}\par 188 | \afterCollaborationImgSpace 189 | %% I leave the size and font so that if there are two collaboration 190 | %% they can be linked with an 'and' 191 | \fi 192 | % Author 193 | {\raggedright\sffamily\the\auth@toks\par} 194 | \afterAuthorSpace 195 | % Affiliation 196 | \ifaffil\begin{list}{}{% 197 | \setlength{\leftmargin}{0.28cm}% 198 | \setlength{\labelsep}{0pt}% 199 | \setlength{\itemsep}{\affiliationsSep}% 200 | \setlength{\topsep}{-\parskip}} 201 | \itshape\small% 202 | \the\affil@toks 203 | \end{list}\fi 204 | \afterAffiliationSpace 205 | % E-mail 206 | \ifemailadd %% if emailadd is true 207 | \noindent\hspace{0.28cm}\begin{minipage}[l]{.9\textwidth} 208 | \begin{flushleft} 209 | \textit{E-mail:} \the\email@toks 210 | \end{flushleft} 211 | \end{minipage} 212 | \else %% if emailaddfalse do nothing 213 | \PackageWarningNoLine{\jname}{E-mails are missing.\MessageBreak Plese use \protect\emailAdd\space macro to provide e-mails.} 214 | \fi 215 | \afterEmailSpace 216 | %Erratum or addendum 217 | \if!\@xtum!\else\noindent{\@xtum}\afterXtumSpace\fi 218 | % Abstract 219 | \if!\@abstract!\else\noindent{\renewcommand\baselinestretch{.9}\textsc{Abstract:}}\ \@abstract\afterAbstractSpace\fi 220 | % Keywords 221 | \if!\@keywords!\else\noindent{\textsc{Keywords:}} \@keywords\afterKeywordsSpace\fi 222 | % Arxivnumber 223 | \if!\@arxivnumber!\else\noindent{\textsc{ArXiv ePrint:}} \href{http://arxiv.org/abs/\@arxivnumber}{\@arxivnumber}\afterArxivSpace\fi 224 | % Dedication 225 | \if!\@dedicated!\else\vbox{\small\it\raggedleft\@dedicated}\afterDedicatedSpace\fi 226 | % 227 | \ifnotoc\else 228 | \iftoccontinuous\else\newpage\fi 229 | %\beforetochook\hrule 230 | %\tableofcontents 231 | \afterTocSpace 232 | %\hrule 233 | \afterTocRuleSpace 234 | \fi 235 | \setcounter{footnote}{0} 236 | \pagestyle{myplain}\pagenumbering{arabic} 237 | } % close the \renewcommand\maketitle{ 238 | 239 | 240 | % Page layout 241 | \renewcommand{\baselinestretch}{1.1}\normalsize 242 | \setlength\lineskip{1\p@} 243 | \setlength\parindent{1.2\parindent} 244 | \setlength\normallineskip{1\p@} 245 | \setlength\parskip{0\p@ \@plus \p@} 246 | \@lowpenalty 51 247 | \@medpenalty 151 248 | \@highpenalty 301 249 | \widowpenalty 1000 250 | \clubpenalty 1000 251 | 252 | \setcounter{topnumber}{4} 253 | \renewcommand\topfraction{1} 254 | \setcounter{bottomnumber}{1} 255 | \renewcommand\bottomfraction{.6} 256 | \setcounter{totalnumber}{5} 257 | \renewcommand\textfraction{0} 258 | \renewcommand\floatpagefraction{1} 259 | 260 | \textwidth .72\paperwidth 261 | \setlength\@tempdima{.76\paperheight} 262 | \divide\@tempdima\baselineskip 263 | \@tempcnta=\@tempdima 264 | \setlength\textheight{\@tempcnta\baselineskip} 265 | \addtolength\textheight{\topskip} 266 | 267 | \voffset -1in 268 | \topmargin .05\paperheight 269 | \headheight .02\paperheight 270 | \headsep .03\paperheight 271 | \footskip .07\paperheight 272 | 273 | \marginparsep 9\p@ 274 | \marginparpush 6\p@ 275 | 276 | \hoffset -1in 277 | \oddsidemargin .14\paperwidth 278 | \evensidemargin .14\paperwidth 279 | \marginparwidth .11\paperwidth 280 | 281 | \setlength\arraycolsep{2\p@} 282 | \setlength\tabcolsep{6\p@} 283 | \setlength\arrayrulewidth{.4\p@} 284 | \setlength\doublerulesep{2\p@} 285 | \setlength\tabbingsep{\labelsep} 286 | \skip\@mpfootins = \skip\footins 287 | \setlength\fboxsep{3\p@} 288 | \setlength\fboxrule{.4\p@} 289 | 290 | % No dots in the table of contents 291 | \renewcommand{\@dotsep}{10000} 292 | 293 | % Footer and header of the body 294 | % the command ``\pagestyle{myplain}'' must be inserted 295 | % just after ``\begin{document}'' 296 | \newcommand\ps@myplain{ 297 | \pagenumbering{arabic} 298 | \renewcommand\@oddfoot{\hfill-- \thepage\ --\hfill} 299 | \renewcommand\@oddhead{}} 300 | \let\ps@plain=\ps@myplain 301 | 302 | 303 | 304 | % no header or footer in the title page 305 | \newcommand\ps@titlepage{\renewcommand\@oddfoot{}\renewcommand\@oddhead{}} 306 | 307 | 308 | 309 | %number equations after the sections 310 | \renewcommand{\theequation}{\thesection.\arabic{equation}} 311 | \numberwithin{equation}{section} 312 | 313 | 314 | %headings style 315 | \renewcommand\section{\@startsection{section}{1}{\z@}% 316 | {-3.5ex \@plus -1.3ex \@minus -.7ex}% 317 | {2.3ex \@plus.4ex \@minus .4ex}% 318 | {\normalfont\large\bfseries}} 319 | \renewcommand\subsection{\@startsection{subsection}{2}{\z@}% 320 | {-2.3ex\@plus -1ex \@minus -.5ex}% 321 | {1.2ex \@plus .3ex \@minus .3ex}% 322 | {\normalfont\normalsize\bfseries}} 323 | \renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% 324 | {-2.3ex\@plus -1ex \@minus -.5ex}% 325 | {1ex \@plus .2ex \@minus .2ex}% 326 | {\normalfont\normalsize\bfseries}} 327 | \renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}% 328 | {1.75ex \@plus1ex \@minus.2ex}% 329 | {-1em}% 330 | {\normalfont\normalsize\bfseries}} 331 | \renewcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}% 332 | {1.75ex \@plus1ex \@minus .2ex}% 333 | {-1em}% 334 | {\normalfont\normalsize\bfseries}} 335 | 336 | 337 | %Caption of figure and table 338 | \def\fnum@figure{\textbf{\figurename\nobreakspace\thefigure}} 339 | \def\fnum@table{\textbf{\tablename\nobreakspace\thetable}} 340 | 341 | %redefining \@makecaption, to have captions with \small font size 342 | %taken from article.cls l.489--498 343 | %the only differences are the ``\small '' commands added on two lines 344 | \long\def\@makecaption#1#2{% 345 | \vskip\abovecaptionskip 346 | \sbox\@tempboxa{\small #1. #2}% 347 | \ifdim \wd\@tempboxa >\hsize 348 | \small #1. #2\par 349 | \else 350 | \global \@minipagefalse 351 | \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% 352 | \fi 353 | \vskip\belowcaptionskip} 354 | 355 | 356 | 357 | 358 | 359 | %apply some formatting on the biblio 360 | %without redefining the whole env 361 | \let\oldthebibliography=\thebibliography 362 | \let\endoldthebibliography=\endthebibliography 363 | \renewenvironment{thebibliography}[1]{% 364 | \begin{oldthebibliography}{#1}% 365 | \small% 366 | \raggedright% 367 | \setlength{\itemsep}{5pt plus 0.2ex minus 0.05ex}% 368 | }% 369 | {% 370 | \end{oldthebibliography}% 371 | } 372 | -------------------------------------------------------------------------------- /JHEP.bst: -------------------------------------------------------------------------------- 1 | % JHEP bibliography style ver. 2.3 2 | % 3 | % The bibtex output produced by inSPIRE, while far from perfect, is pretty 4 | % suitable for use with this style. Indeed, this style was designed with 5 | % inSPIRE in mind. 6 | % 7 | % 8 | % 9 | % Copyright 2015 SISSA Medialab 10 | % 11 | % This work may be distributed and/or modified under the 12 | % conditions of the LaTeX Project Public License, either version 1.3 13 | % of this license or (at your option) any later version. 14 | % The latest version of this license is in 15 | % http://www.latex-project.org/lppl.txt 16 | % and version 1.3 or later is part of all distributions of LaTeX 17 | % version 2005/12/01 or later. 18 | % 19 | % This work has the LPPL maintenance status `author-maintained'. 20 | % 21 | % The Current Maintainer of this work is 22 | % SISSA Medialab 23 | % 24 | % This work consists of the file JHEP.bst. 25 | 26 | 27 | ENTRY 28 | { address 29 | author 30 | booktitle 31 | chapter 32 | edition 33 | editor 34 | howpublished 35 | institution 36 | journal 37 | key 38 | month 39 | note 40 | number 41 | organization 42 | pages 43 | publisher 44 | school 45 | series 46 | title 47 | doi 48 | SLACcitation 49 | type 50 | volume 51 | year 52 | archive 53 | eprint 54 | report 55 | collaboration 56 | } 57 | {} 58 | { label } 59 | 60 | INTEGERS { output.state before.all mid.sentence after.quote after.sentence 61 | after.quoted.block after.block } 62 | 63 | FUNCTION {init.state.consts} 64 | { #0 'before.all := 65 | #1 'mid.sentence := 66 | #2 'after.quote := 67 | #3 'after.sentence := 68 | #4 'after.quoted.block := 69 | #5 'after.block := 70 | } 71 | 72 | STRINGS { s t ref } 73 | 74 | FUNCTION {output.nonnull} 75 | { 's := 76 | output.state mid.sentence = 77 | { ", " * write$ } 78 | { output.state after.quote = 79 | { " " * write$ } 80 | { output.state after.block = 81 | { add.period$ write$ 82 | newline$ 83 | "\newblock " write$ 84 | } 85 | { output.state before.all = 86 | 'write$ 87 | { output.state after.quoted.block = 88 | { write$ 89 | newline$ 90 | "\newblock " write$ 91 | } 92 | { add.period$ " " * write$ } 93 | if$ 94 | } 95 | if$ 96 | } 97 | if$ 98 | } 99 | if$ 100 | mid.sentence 'output.state := 101 | } 102 | if$ 103 | s 104 | } 105 | 106 | FUNCTION {output} 107 | { duplicate$ empty$ 108 | 'pop$ 109 | 'output.nonnull 110 | if$ 111 | } 112 | 113 | FUNCTION {output.check} 114 | { 't := 115 | duplicate$ empty$ 116 | { pop$ "empty " t * " in " * cite$ * warning$ } 117 | 'output.nonnull 118 | if$ 119 | } 120 | 121 | FUNCTION {output.bibitem} 122 | { newline$ 123 | "\bibitem{" write$ 124 | cite$ write$ 125 | "}" write$ 126 | newline$ 127 | "" 128 | before.all 'output.state := 129 | } 130 | 131 | FUNCTION {blank.sep} 132 | { after.quote 'output.state := 133 | } 134 | 135 | 136 | 137 | FUNCTION {fin.entry} 138 | { output.state after.quoted.block = 139 | 'skip$ 140 | 'add.period$ 141 | if$ 142 | write$ 143 | newline$ 144 | } 145 | 146 | FUNCTION {new.block} 147 | { output.state before.all = 148 | 'skip$ 149 | { output.state after.quote = 150 | { after.quoted.block 'output.state := } 151 | { after.block 'output.state := } 152 | if$ 153 | } 154 | if$ 155 | } 156 | 157 | FUNCTION {new.sentence} 158 | { output.state after.block = 159 | 'skip$ 160 | { output.state before.all = 161 | 'skip$ 162 | { after.sentence 'output.state := } 163 | if$ 164 | } 165 | if$ 166 | } 167 | 168 | FUNCTION {not} 169 | { { #0 } 170 | { #1 } 171 | if$ 172 | } 173 | 174 | FUNCTION {and} 175 | { 'skip$ 176 | { pop$ #0 } 177 | if$ 178 | } 179 | 180 | FUNCTION {or} 181 | { { pop$ #1 } 182 | 'skip$ 183 | if$ 184 | } 185 | 186 | FUNCTION {new.block.checka} 187 | { empty$ 188 | 'skip$ 189 | 'new.block 190 | if$ 191 | } 192 | 193 | FUNCTION {new.block.checkb} 194 | { empty$ 195 | swap$ empty$ 196 | and 197 | 'skip$ 198 | 'new.block 199 | if$ 200 | } 201 | 202 | FUNCTION {new.sentence.checka} 203 | { empty$ 204 | 'skip$ 205 | 'new.sentence 206 | if$ 207 | } 208 | 209 | FUNCTION {field.or.null} 210 | { duplicate$ empty$ 211 | { pop$ "" } 212 | 'skip$ 213 | if$ 214 | } 215 | 216 | FUNCTION {emphasize} 217 | { duplicate$ empty$ 218 | { pop$ "" } 219 | { "\emph{" swap$ * "}" * } 220 | if$ 221 | } 222 | 223 | %% this functions should append the correct url prefix to doi 224 | FUNCTION {format.doi} 225 | { doi empty$ 226 | { "" } 227 | {"\href{http://dx.doi.org/" doi * "}" * } 228 | if$ 229 | } 230 | 231 | FUNCTION {formatfull.doi} 232 | { doi empty$ 233 | { "" } 234 | {"\href{http://dx.doi.org/" doi * 235 | "}{DOI}" * } 236 | if$ 237 | } 238 | 239 | 240 | INTEGERS { nameptr namesleft numnames } 241 | 242 | FUNCTION {format.names} 243 | { 's := 244 | #1 'nameptr := 245 | s num.names$ 'numnames := 246 | numnames 'namesleft := 247 | { namesleft #0 > } 248 | { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't := 249 | nameptr #1 > 250 | { namesleft #1 > 251 | { ", " * t * } 252 | { numnames #2 > 253 | { "" * } 254 | 'skip$ 255 | if$ 256 | t "others" = 257 | { " et~al." * } 258 | { " and " * t * } 259 | if$ 260 | } 261 | if$ 262 | } 263 | nameptr #6 > 264 | { #0 'namesleft := 265 | "others" 't := 266 | 't 267 | } 268 | {'t} 269 | if$ 270 | if$ 271 | nameptr #1 + 'nameptr := 272 | namesleft #1 - 'namesleft := 273 | } 274 | while$ 275 | } 276 | 277 | 278 | FUNCTION {format.authors} 279 | { author empty$ 280 | { "" } 281 | { author format.names } 282 | if$ 283 | } 284 | 285 | FUNCTION {format.eprint} 286 | { eprint empty$ 287 | { ""} 288 | { archive empty$ 289 | {"\href{https://arxiv.org/abs/" eprint * "}" * 290 | "{{\ttfamily " * eprint * "}}" *} 291 | {"\href{https://arxiv.org/abs/" archive * "/" * eprint * "}" * 292 | "{{\ttfamily " * archive * "/" * eprint * "}}" *} 293 | if$ 294 | } 295 | if$ 296 | } 297 | 298 | FUNCTION {format.eprint.paren} 299 | { eprint missing$ { "" } { eprint empty$ { "" } 300 | {"[" format.eprint * "]" *} 301 | if$ 302 | } 303 | if$ 304 | } 305 | 306 | 307 | 308 | FUNCTION {format.report} 309 | { report empty$ 310 | { ""} 311 | { report} 312 | if$ 313 | } 314 | 315 | 316 | 317 | FUNCTION {format.editors} 318 | { editor empty$ 319 | { "" } 320 | { editor format.names 321 | editor num.names$ #1 > 322 | { ", eds." * } 323 | { ", ed." * } 324 | if$ 325 | } 326 | if$ 327 | } 328 | 329 | FUNCTION {format.title} 330 | { title empty$ 331 | { "" } 332 | { "\emph{" title "t" change.case$ * "}, " * } 333 | if$ 334 | } 335 | 336 | FUNCTION {format.title.p} 337 | { title empty$ 338 | { "" } 339 | { "``" title "t" change.case$ * ".''" * } 340 | if$ 341 | } 342 | 343 | FUNCTION {n.dashify} 344 | { 't := 345 | "" 346 | { t empty$ not } 347 | { t #1 #1 substring$ "-" = 348 | { t #1 #2 substring$ "--" = not 349 | { "--" * 350 | t #2 global.max$ substring$ 't := 351 | } 352 | { { t #1 #1 substring$ "-" = } 353 | { "-" * 354 | t #2 global.max$ substring$ 't := 355 | } 356 | while$ 357 | } 358 | if$ 359 | } 360 | { t #1 #1 substring$ * 361 | t #2 global.max$ substring$ 't := 362 | } 363 | if$ 364 | } 365 | while$ 366 | } 367 | 368 | FUNCTION {format.date} 369 | { year empty$ 370 | { month empty$ 371 | { "" } 372 | { "there's a month but no year in " cite$ * warning$ 373 | month 374 | } 375 | if$ 376 | } 377 | { month empty$ 378 | 'year 379 | { month ", " * year * } 380 | if$ 381 | } 382 | if$ 383 | } 384 | 385 | FUNCTION {format.date.paren} 386 | { year empty$ 387 | { month empty$ 388 | { "" } 389 | { "there's a month but no year in " cite$ * warning$ 390 | month 391 | } 392 | if$ 393 | } 394 | { month empty$ 395 | {"(" year * ")" *} 396 | {"(" month * ", " * year * ")" *} 397 | if$ 398 | } 399 | if$ 400 | } 401 | 402 | FUNCTION {format.collaboration} 403 | { collaboration empty$ 404 | { "" } 405 | { "{\scshape " collaboration * "} " * "collaboration" * } 406 | if$ 407 | } 408 | 409 | 410 | FUNCTION {format.btitle} 411 | { title emphasize 412 | } 413 | 414 | FUNCTION {tie.or.space.connect} 415 | { duplicate$ text.length$ #3 < 416 | { "~" } 417 | { " " } 418 | if$ 419 | swap$ * * 420 | } 421 | 422 | FUNCTION {either.or.check} 423 | { empty$ 424 | 'pop$ 425 | { "can't use both " swap$ * " fields in " * cite$ * warning$ } 426 | if$ 427 | } 428 | 429 | FUNCTION {format.bvolume} 430 | { volume empty$ 431 | { "" } 432 | { "vol.~" volume * 433 | series empty$ 434 | 'skip$ 435 | { " of " * series emphasize * } 436 | if$ 437 | "volume and number" number either.or.check 438 | } 439 | if$ 440 | } 441 | 442 | FUNCTION {format.number.series} 443 | { volume empty$ 444 | { number empty$ 445 | { series field.or.null } 446 | { output.state mid.sentence = 447 | { "no.~" } 448 | { "No.~" } 449 | if$ 450 | number * 451 | series empty$ 452 | { "there's a number but no series in " cite$ * warning$ } 453 | { " in " * series * } 454 | if$ 455 | } 456 | if$ 457 | } 458 | { "" } 459 | if$ 460 | } 461 | 462 | FUNCTION {format.edition} 463 | { edition empty$ 464 | { "" } 465 | { edition "l" change.case$ "~ed." * } 466 | if$ 467 | } 468 | 469 | INTEGERS { multiresult } 470 | 471 | FUNCTION {multi.page.check} 472 | { 't := 473 | #0 'multiresult := 474 | { multiresult not 475 | t empty$ not 476 | and 477 | } 478 | { t #1 #1 substring$ 479 | duplicate$ "-" = 480 | swap$ duplicate$ "," = 481 | swap$ "+" = 482 | or or 483 | { #1 'multiresult := } 484 | { t #2 global.max$ substring$ 't := } 485 | if$ 486 | } 487 | while$ 488 | multiresult 489 | } 490 | 491 | FUNCTION {format.pages} 492 | { pages empty$ 493 | { "" } 494 | { pages multi.page.check 495 | { "pp.~" pages n.dashify * } 496 | { "p.~" pages * } 497 | if$ 498 | } 499 | if$ 500 | } 501 | 502 | FUNCTION {format.pages.nopp} 503 | { pages empty$ 504 | { "" } 505 | { pages multi.page.check 506 | { pages n.dashify } 507 | { pages } 508 | if$ 509 | } 510 | if$ 511 | } 512 | 513 | 514 | FUNCTION {format.volume} 515 | { volume empty$ 516 | { "" } 517 | { "{\bfseries " volume * "}" * } 518 | if$ 519 | } 520 | 521 | FUNCTION {format.number} 522 | { number empty$ 523 | { "" } 524 | { "no.~" number * } 525 | if$ 526 | } 527 | 528 | FUNCTION {format.chapter.pages} 529 | { chapter empty$ 530 | 'format.pages 531 | { type empty$ 532 | { "ch.~" chapter * } 533 | { type "l" change.case$ chapter tie.or.space.connect } 534 | if$ 535 | pages empty$ 536 | 'skip$ 537 | { ", " * format.pages * } 538 | if$ 539 | } 540 | if$ 541 | } 542 | 543 | FUNCTION {format.in.ed.booktitle} 544 | { booktitle empty$ 545 | { "" } 546 | { "in " booktitle emphasize * 547 | editor empty$ 548 | 'skip$ 549 | { " (" * format.editors * ")" * } 550 | if$ 551 | } 552 | if$ 553 | } 554 | 555 | FUNCTION {format.thesis.type} 556 | { type empty$ 557 | 'skip$ 558 | { pop$ 559 | output.state after.block = 560 | { type "t" change.case$ } 561 | { type "l" change.case$ } 562 | if$ 563 | } 564 | if$ 565 | } 566 | 567 | FUNCTION {empty.misc.check} 568 | { author empty$ title empty$ howpublished empty$ 569 | month empty$ year empty$ note empty$ 570 | and and and and and 571 | { "all relevant fields are empty in " cite$ * warning$ } 572 | 'skip$ 573 | if$ 574 | } 575 | 576 | FUNCTION {format.tr.number} 577 | { type empty$ 578 | { "Tech. Rep." } 579 | 'type 580 | if$ 581 | number empty$ 582 | { "l" change.case$ } 583 | { number tie.or.space.connect } 584 | if$ 585 | } 586 | 587 | FUNCTION {format.paddress} 588 | { address empty$ 589 | { "" } 590 | { "(" address * ")" * } 591 | if$ 592 | } 593 | 594 | FUNCTION {format.article.crossref} 595 | { key empty$ 596 | { journal empty$ 597 | { "need key or journal for " cite$ * " to crossref " * crossref * 598 | warning$ 599 | "" 600 | } 601 | { "in \emph{" journal * "\/}" * } 602 | if$ 603 | } 604 | { "in " key * } 605 | if$ 606 | " \cite{" * crossref * "}" * 607 | } 608 | 609 | FUNCTION {format.crossref.editor} 610 | { editor #1 "{vv~}{ll}" format.name$ 611 | editor num.names$ duplicate$ 612 | #2 > 613 | { pop$ " {et~al.}" * } 614 | { #2 < 615 | 'skip$ 616 | { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = 617 | { " {et~al.}" * } 618 | { " and " * editor #2 "{vv~}{ll}" format.name$ * } 619 | if$ 620 | } 621 | if$ 622 | } 623 | if$ 624 | } 625 | 626 | FUNCTION {format.book.crossref} 627 | { volume empty$ 628 | { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ 629 | "In " 630 | } 631 | { "Vol.~" volume * 632 | " of " * 633 | } 634 | if$ 635 | editor empty$ 636 | editor field.or.null author field.or.null = 637 | or 638 | { key empty$ 639 | { series empty$ 640 | { "need editor, key, or series for " cite$ * " to crossref " * 641 | crossref * warning$ 642 | "" * 643 | } 644 | { "{\em " * series * "\/}" * } 645 | if$ 646 | } 647 | { key * } 648 | if$ 649 | } 650 | { format.crossref.editor * } 651 | if$ 652 | " \cite{" * crossref * "}" * 653 | } 654 | 655 | FUNCTION {format.incoll.inproc.crossref} 656 | { editor empty$ 657 | editor field.or.null author field.or.null = 658 | or 659 | { key empty$ 660 | { booktitle empty$ 661 | { "need editor, key, or booktitle for " cite$ * " to crossref " * 662 | crossref * warning$ 663 | "" 664 | } 665 | { "in {\em " booktitle * "\/}" * } 666 | if$ 667 | } 668 | { "in " key * } 669 | if$ 670 | } 671 | { "in " format.crossref.editor * } 672 | if$ 673 | " \cite{" * crossref * "}" * 674 | } 675 | 676 | FUNCTION {article} 677 | { output.bibitem 678 | format.collaboration output 679 | format.authors "author" output.check 680 | format.title "title" output.check 681 | blank.sep 682 | crossref missing$ 683 | { journal missing$ 684 | { format.eprint output } 685 | { journal empty$ { format.eprint output } { 686 | format.doi * "{" * journal emphasize before.all 'output.state := "journal" output.check 687 | % added \href{doi} and { before journal 688 | % Slv 689 | blank.sep 690 | format.volume output 691 | blank.sep 692 | format.date.paren "year" output.check 693 | %month empty$ { format.number output } 694 | % 'skip$ if$ 695 | blank.sep 696 | format.pages.nopp "}" * output } 697 | %% closed parenthesis for href argument 698 | if$ 699 | } 700 | if$ 701 | report missing$ 702 | { journal empty$ {} { format.eprint.paren output} if$ } 703 | {blank.sep format.report output format.eprint.paren output} 704 | if$ 705 | } 706 | { format.article.crossref output.nonnull 707 | format.pages output 708 | format.eprint.paren output 709 | } 710 | if$ 711 | new.sentence 712 | % format.doi output 713 | % note output 714 | fin.entry 715 | } 716 | 717 | FUNCTION {book} 718 | { output.bibitem 719 | format.collaboration output 720 | author empty$ 721 | { format.editors "author and editor" output.check } 722 | { format.authors output.nonnull 723 | crossref missing$ 724 | { "author and editor" editor either.or.check } 725 | 'skip$ 726 | if$ 727 | } 728 | if$ 729 | format.btitle "title" output.check 730 | crossref missing$ 731 | { format.bvolume output 732 | new.block 733 | format.number.series output 734 | new.sentence 735 | publisher "publisher" output.check 736 | address output 737 | } 738 | { new.block 739 | format.book.crossref output.nonnull 740 | } 741 | if$ 742 | format.edition output 743 | format.date "year" output.check 744 | doi empty$ 745 | {} 746 | { format.doi "{" * doi * "}" * "DOI" output.check } 747 | if$ 748 | fin.entry 749 | } 750 | 751 | FUNCTION {booklet} 752 | { output.bibitem 753 | format.collaboration output 754 | format.authors output 755 | title empty$ 756 | { "empty title in " cite$ * warning$ 757 | howpublished new.sentence.checka 758 | } 759 | { howpublished empty$ not 760 | address empty$ month empty$ year empty$ and and 761 | or 762 | { format.title.p output.nonnull } 763 | { format.title output.nonnull } 764 | if$ 765 | blank.sep 766 | } 767 | if$ 768 | howpublished output 769 | address output 770 | format.date output 771 | new.block 772 | % note output 773 | doi output 774 | fin.entry 775 | } 776 | 777 | FUNCTION {inbook} 778 | { output.bibitem 779 | format.collaboration output 780 | author empty$ 781 | { format.editors "author and editor" output.check } 782 | { format.authors output.nonnull 783 | crossref missing$ 784 | { "author and editor" editor either.or.check } 785 | 'skip$ 786 | if$ 787 | } 788 | if$ 789 | format.btitle "title" output.check 790 | crossref missing$ 791 | { format.bvolume output 792 | format.chapter.pages "chapter and pages" output.check 793 | new.block 794 | format.number.series output 795 | new.block 796 | publisher "publisher" output.check 797 | address output 798 | } 799 | { format.chapter.pages "chapter and pages" output.check 800 | new.block 801 | format.book.crossref output.nonnull 802 | } 803 | if$ 804 | format.edition output 805 | format.date "year" output.check 806 | new.block 807 | format.eprint output 808 | new.block 809 | % note output 810 | doi output 811 | fin.entry 812 | } 813 | 814 | FUNCTION {incollection} 815 | { output.bibitem 816 | format.collaboration output 817 | format.authors "author" output.check 818 | format.title "title" output.check 819 | blank.sep 820 | crossref missing$ 821 | { format.in.ed.booktitle "booktitle" output.check 822 | format.bvolume output 823 | format.number.series output 824 | format.chapter.pages output 825 | new.block 826 | publisher "publisher" output.check 827 | address output 828 | format.edition output 829 | format.date "year" output.check 830 | } 831 | { format.incoll.inproc.crossref output.nonnull 832 | format.chapter.pages output 833 | } 834 | if$ 835 | new.block 836 | format.eprint output 837 | new.block 838 | % note output 839 | formatfull.doi output 840 | fin.entry 841 | } 842 | 843 | FUNCTION {inproceedings} 844 | { output.bibitem 845 | format.collaboration output 846 | format.authors "author" output.check 847 | format.title "title" output.check 848 | blank.sep 849 | crossref missing$ 850 | { format.in.ed.booktitle "booktitle" output.check 851 | format.bvolume output 852 | format.number.series output 853 | format.paddress output 854 | format.pages output 855 | organization output 856 | publisher output 857 | format.date "year" output.check 858 | } 859 | { format.incoll.inproc.crossref output.nonnull 860 | format.pages output 861 | } 862 | if$ 863 | new.block 864 | format.eprint output 865 | new.block 866 | % note output 867 | formatfull.doi output 868 | fin.entry 869 | } 870 | 871 | FUNCTION {conference} { inproceedings } 872 | 873 | FUNCTION {manual} 874 | { output.bibitem 875 | format.collaboration output 876 | author empty$ 877 | { organization empty$ 878 | 'skip$ 879 | { organization output.nonnull 880 | address output 881 | } 882 | if$ 883 | } 884 | { format.authors output.nonnull } 885 | if$ 886 | format.btitle "title" output.check 887 | author empty$ 888 | { organization empty$ 889 | { address new.block.checka 890 | address output 891 | } 892 | 'skip$ 893 | if$ 894 | } 895 | { organization address new.block.checkb 896 | organization output 897 | address output 898 | } 899 | if$ 900 | format.edition output 901 | format.date output 902 | new.block 903 | % note output 904 | doi output 905 | fin.entry 906 | } 907 | 908 | FUNCTION {electronic} { manual } 909 | 910 | FUNCTION {mastersthesis} 911 | { output.bibitem 912 | format.authors "author" output.check 913 | format.title "title" output.check 914 | blank.sep 915 | "Master's thesis" format.thesis.type output.nonnull 916 | school "school" output.check 917 | address output 918 | format.date "year" output.check 919 | new.block 920 | % note output 921 | doi output 922 | fin.entry 923 | } 924 | 925 | FUNCTION {misc} 926 | { output.bibitem 927 | format.collaboration output 928 | format.authors output 929 | title empty$ 930 | { howpublished new.sentence.checka } 931 | { howpublished empty$ not 932 | month empty$ year empty$ and 933 | or 934 | { format.title.p output.nonnull } 935 | { format.title output.nonnull } 936 | if$ 937 | blank.sep 938 | } 939 | if$ 940 | howpublished output 941 | format.date output 942 | new.block 943 | % note output 944 | doi output 945 | fin.entry 946 | empty.misc.check 947 | } 948 | 949 | FUNCTION {phdthesis} 950 | { output.bibitem 951 | format.authors "author" output.check 952 | format.btitle "title" output.check 953 | new.block 954 | "PhD thesis" format.thesis.type output.nonnull 955 | school "school" output.check 956 | address output 957 | format.date "year" output.check 958 | new.block 959 | format.eprint output 960 | new.block 961 | % note output 962 | doi output 963 | fin.entry 964 | } 965 | 966 | FUNCTION {proceedings} 967 | { output.bibitem 968 | editor empty$ 969 | { organization output } 970 | { format.editors output.nonnull } 971 | if$ 972 | format.btitle "title" output.check 973 | format.bvolume output 974 | format.number.series output 975 | format.paddress output 976 | editor empty$ 977 | 'skip$ 978 | { organization output } 979 | if$ 980 | publisher output 981 | format.date "year" output.check 982 | new.block 983 | % note output 984 | doi output 985 | fin.entry 986 | } 987 | 988 | FUNCTION {techreport} 989 | { output.bibitem 990 | format.collaboration output 991 | format.authors "author" output.check 992 | format.title "title" output.check 993 | blank.sep 994 | format.tr.number output.nonnull 995 | institution "institution" output.check 996 | address output 997 | format.date "year" output.check 998 | new.block 999 | % note output 1000 | doi output 1001 | fin.entry 1002 | } 1003 | 1004 | FUNCTION {unpublished} 1005 | { output.bibitem 1006 | format.collaboration output 1007 | format.authors "author" output.check 1008 | format.title.p "title" output.check 1009 | blank.sep 1010 | % note "note" output.check 1011 | format.date output 1012 | fin.entry 1013 | } 1014 | 1015 | FUNCTION {default.type} { misc } 1016 | 1017 | MACRO {jan} {"Jan."} 1018 | 1019 | MACRO {feb} {"Feb."} 1020 | 1021 | MACRO {mar} {"Mar."} 1022 | 1023 | MACRO {apr} {"Apr."} 1024 | 1025 | MACRO {may} {"May"} 1026 | 1027 | MACRO {jun} {"June"} 1028 | 1029 | MACRO {jul} {"July"} 1030 | 1031 | MACRO {aug} {"Aug."} 1032 | 1033 | MACRO {sep} {"Sept."} 1034 | 1035 | MACRO {oct} {"Oct."} 1036 | 1037 | MACRO {nov} {"Nov."} 1038 | 1039 | MACRO {dec} {"Dec."} 1040 | 1041 | MACRO {nup} {"Nucl. Phys."} 1042 | 1043 | MACRO {cmp} {"Comm. Math. Phys."} 1044 | 1045 | MACRO {prl} {"Phys. Rev. Lett."} 1046 | 1047 | MACRO {pl} {"Phys. Lett."} 1048 | 1049 | MACRO {rmp} {"Rev. Mod. Phys."} 1050 | 1051 | MACRO {ijmp} {"Int. Jour. Mod. Phys."} 1052 | 1053 | MACRO {mpl} {"Mod. Phys. Lett."} 1054 | 1055 | MACRO {pr} {"Phys. Rev."} 1056 | 1057 | READ 1058 | 1059 | STRINGS { longest.label } 1060 | 1061 | INTEGERS { number.label longest.label.width } 1062 | 1063 | FUNCTION {initialize.longest.label} 1064 | { "" 'longest.label := 1065 | #1 'number.label := 1066 | #0 'longest.label.width := 1067 | } 1068 | 1069 | FUNCTION {longest.label.pass} 1070 | { number.label int.to.str$ 'label := 1071 | number.label #1 + 'number.label := 1072 | label width$ longest.label.width > 1073 | { label 'longest.label := 1074 | label width$ 'longest.label.width := 1075 | } 1076 | 'skip$ 1077 | if$ 1078 | } 1079 | 1080 | EXECUTE {initialize.longest.label} 1081 | 1082 | ITERATE {longest.label.pass} 1083 | 1084 | FUNCTION {begin.bib} 1085 | { preamble$ empty$ 1086 | 'skip$ 1087 | { preamble$ write$ newline$ } 1088 | if$ 1089 | newline$ 1090 | "\providecommand{\href}[2]{#2}" 1091 | "\begingroup\raggedright\begin{thebibliography}{" * longest.label * 1092 | "}" * write$ newline$ } 1093 | 1094 | EXECUTE {begin.bib} 1095 | 1096 | EXECUTE {init.state.consts} 1097 | 1098 | ITERATE {call.type$} 1099 | 1100 | FUNCTION {end.bib} 1101 | { newline$ 1102 | "\end{thebibliography}\endgroup" write$ newline$ 1103 | } 1104 | 1105 | EXECUTE {end.bib} 1106 | -------------------------------------------------------------------------------- /HEPML.tex: -------------------------------------------------------------------------------- 1 | \documentclass[12pt,letterpaper]{article} 2 | \usepackage{jheppub} 3 | 4 | %\usepackage[hmargin=1.0in,vmargin=1.0in]{geometry} 5 | %\usepackage{cite} 6 | \usepackage[usenames,dvipsnames]{xcolor} % For colors and names for color boxed links 7 | % hyperref included through jheppub 8 | \hypersetup{ 9 | colorlinks=false, % Surround the links by color frames (false) or colors the text of the links (true) 10 | citecolor=blue, % Color of citation links 11 | filecolor=black, % Color of file links 12 | linkcolor=red, % Color of internal links (sections, pages, etc.) 13 | urlcolor=black, % Color of url hyperlinks 14 | linkbordercolor=red, % Color of links to bibliography 15 | citebordercolor=blue, % Color of file links 16 | urlbordercolor=blue % Color of external links 17 | } 18 | % c.f.: 19 | % http://inspirehep.net/info/faq/general#utf8 20 | % https://tex.stackexchange.com/questions/172421/how-to-easily-use-utf-8-with-latex 21 | %\usepackage{fontspec} 22 | 23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 24 | % Document body 25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 26 | 27 | \title{\boldmath A Living Review of Machine \\ Learning for Particle Physics} 28 | 29 | \abstract{ 30 | Modern machine learning techniques, including deep learning, are rapidly being applied, adapted, and developed for high energy physics. The goal of this document is to provide a nearly comprehensive list of citations for those developing and applying these approaches to experimental, phenomenological, or theoretical analyses. As a living document, it will be updated as often as possible to incorporate the latest developments. A list of proper (unchanging) reviews can be found within. Papers are grouped into a small set of topics to be as useful as possible. Suggestions are most welcome. 31 | } 32 | 33 | \begin{document} 34 | \maketitle 35 | 36 | The purpose of this note is to collect references for modern machine learning as applied to particle physics. A minimal number of categories is chosen in order to be as useful as possible. Note that papers may be referenced in more than one category. The fact that a paper is listed in this document does not endorse or validate its content - that is for the community (and for peer-review) to decide. Furthermore, the classification here is a best attempt and may have flaws - please let us know if (a) we have missed a paper you think should be included, (b) a paper has been misclassified, or (c) a citation for a paper is not correct or if the journal information is now available. In order to be as useful as possible, this document will continue to evolve so please check back\footnote{See \href{https://github.com/iml-wg/HEPML-LivingReview}{https://github.com/iml-wg/HEPML-LivingReview}.} before you write your next paper. You can simply download the .bib file to get all of the latest references. Please consider citing Ref.~\cite{Feickert:2021ajf} when referring to this living review. 37 | 38 | \begin{itemize} 39 | \item \textbf{Reviews} 40 | \\\textit{Below are links to many (static) general and specialized reviews. The third bullet contains links to classic papers that applied shallow learning methods many decades before the deep learning revolution.} 41 | \begin{itemize} 42 | \item Modern reviews~\cite{Larkoski:2017jix,Guest:2018yhq,Albertsson:2018maf,Radovic:2018dip,Carleo:2019ptp,Bourilkov:2019yoi,Schwartz:2021ftp} 43 | \item Specialized reviews~\cite{Kasieczka:2019dbj,1807719,1808887,Psihas:2020pby,Butter:2020tvl,Forte:2020yip,Brehmer:2020cvb,Nachman:2020ccu,Duarte:2020ngm,Vlimant:2020enz,Cranmer:2019eaq,Rousseau:2020rnz,Kagan:2020yrm,Guan:2020bdl,deLima:2021fwm,Alanazi:2021grv} 44 | \item Classical papers~\cite{Denby:1987rk,Lonnblad:1990bi} 45 | \item Datasets~\cite{Kasieczka:2021xcg,Aarrestad:2021oeb,Benato:2021olt,Govorkova:2021hqu,Chen:2021euv} 46 | \end{itemize} 47 | \item \textbf{Classification} 48 | \\\textit{Given a feature space $x\in\mathbb{R}^n$, a binary classifier is a function $f:\mathbb{R}^n\rightarrow [0,1]$, where $0$ corresponds to features that are more characteristic of the zeroth class (e.g. background) and $1$ correspond to features that are more characteristic of the one class (e.g. signal). Typically, $f$ will be a function specified by some parameters $w$ (e.g. weights and biases of a neural network) that are determined by minimizing a loss of the form $L[f]=\sum_{i}\ell(f(x_i),y_i)$, where $y_i\in\{0,1\}$ are labels. The function $\ell$ is smaller when $f(x_i)$ and $y_i$ are closer. Two common loss functions are the mean squared error $\ell(x,y)=(x-y)^2$ and the binary cross entropy $\ell(x,y)=y\log(x)+(1-y)\log(1-x)$. Exactly what `more characteristic of' means depends on the loss function used to determine $f$. It is also possible to make a multi-class classifier. A common strategy for the multi-class case is to represent each class as a different basis vector in $\mathbb{R}^{n_\text{classes}}$ and then $f(x)\in[0,1]^{n_\text{classes}}$. In this case, $f(x)$ is usually restricted to have its $n_\text{classes}$ components sum to one and the loss function is typically the cross entropy $\ell(x,y)=\sum_\text{classes $i$} y_i\log(x)$.} 49 | \begin{itemize} 50 | \item \textbf{Parameterized classifiers}~\cite{Baldi:2016fzo,Cranmer:2015bka,Nachman:2021yvi}. 51 | \\\textit{A classifier that is conditioned on model parameters $f(x|\theta)$ is called a parameterized classifier.} 52 | \item \textbf{Representations} 53 | \\\textit{There is no unique way to represent high energy physics data. It is often natural to encode $x$ as an image or another one of the structures listed below.} 54 | \begin{itemize} 55 | \item \textbf{Jet images}~\cite{Pumplin:1991kc,Cogan:2014oua,Almeida:2015jua,deOliveira:2015xxd,ATL-PHYS-PUB-2017-017,Lin:2018cin,Komiske:2018oaa,Barnard:2016qma,Komiske:2016rsd,Kasieczka:2017nvn,Macaluso:2018tck,li2020reconstructing,li2020attention,Lee:2019cad,collado2021learning,Du:2020pmp,Filipek:2021qbe} 56 | \\\textit{Jets are collimated sprays of particles. They have a complex radiation pattern and such, have been a prototypical example for many machine learning studies. See the next item for a specific description about images.} 57 | \item \textbf{Event images}~\cite{Nguyen:2018ugw,ATL-PHYS-PUB-2019-028,Lin:2018cin,Andrews:2018nwy,Chung:2020ysf,Du:2019civ,Andrews:2021ejw,Pol:2021iqw} 58 | \\\textit{A grayscale image is a regular grid with a scalar value at each grid point. `Color' images have a fixed-length vector at each grid point. Many detectors are analogous to digital cameras and thus images are a natural representation. In other cases, images can be created by discretizing. Convolutional neural networks are natural tools for processing image data. One downside of the image representation is that high energy physics data tend to be sparse, unlike natural images.} 59 | \item \textbf{Sequences}~\cite{Guest:2016iqz,Nguyen:2018ugw,Bols:2020bkb,goto2021development,deLima:2021fwm,ATL-PHYS-PUB-2017-003} 60 | \\\textit{Data that have a variable with a particular order may be represented as a sequence. Recurrent neural networks are natural tools for processing sequence data. } 61 | \item \textbf{Trees}~\cite{Louppe:2017ipp,Cheng:2017rdo} 62 | \\\textit{Recursive neural networks are natural tools for processing data in a tree structure.} 63 | \item \textbf{Graphs}~\cite{Henrion:DLPS2017,Ju:2020xty,Abdughani:2018wrw,Martinez:2018fwc,Ren:2019xhp,Moreno:2019bmu,Qasim:2019otl,Chakraborty:2019imr,Chakraborty:2020yfc,1797439,1801423,1808887,Iiyama:2020wap,1811770,Choma:2020cry,alonsomonsalve2020graph,guo2020boosted,Heintz:2020soy,Verma:2020gnq,Dreyer:2020brq,Qian:2021vnh,Pata:2021oez,Biscarat:2021dlj,Rossi:2021tjf,Hewes:2021heg,Thais:2021qcb,Dezoort:2021kfk,Verma:2021ceh,Hariri:2021clz,Belavin:2021bxb,Atkinson:2021nlt} 64 | \\\textit{A graph is a collection of nodes and edges. Graph neural networks are natural tools for processing data in a tree structure.} 65 | \item \textbf{Sets (point clouds)}~\cite{Komiske:2018cqr,Qu:2019gqs,Mikuni:2020wpr,Shlomi:2020ufi,Dolan:2020qkr,Fenton:2020woz,Lee:2020qil,collado2021learning,Mikuni:2021pou,Shmakov:2021qdz,Shimmin:2021pkm,ATL-PHYS-PUB-2020-014} 66 | \\\textit{A point cloud is a (potentially variable-size) set of points in space. Sets are distinguished from sequences in that there is no particular order (i.e. permutation invariance). Sets can also be viewed as graphs without edges and so graph methods that can parse variable-length inputs may also be appropriate for set learning, although there are other methods as well.} 67 | \item \textbf{Physics-inspired basis}~\cite{Datta:2019,Datta:2017rhs,Datta:2017lxt,Komiske:2017aww,Butter:2017cot,Grojean:2020ech} 68 | \\\textit{This is a catch-all category for learning using other representations that use some sort of manual or automated physics-preprocessing.} 69 | \end{itemize} 70 | \item Targets 71 | \begin{itemize} 72 | \item \textbf{$W/Z$ tagging}~\cite{deOliveira:2015xxd,Barnard:2016qma,Louppe:2017ipp,Sirunyan:2020lcu,Chen:2019uar,1811770,Dreyer:2020brq,Kim:2021gtv} 73 | \\\textit{Boosted, hadronically decaying $W$ and $Z$ bosons form jets that are distinguished from generic quark and gluon jets by their mass near the boson mass and their two-prong substructure.} 74 | \item \textbf{$H\rightarrow b\bar{b}$}~\cite{Datta:2019ndh,Lin:2018cin,Moreno:2019neq,Chakraborty:2019imr,Sirunyan:2020lcu,Chung:2020ysf,Tannenwald:2020mhq,guo2020boosted,Abbas:2020khd,Jang:2021eph,Khosa:2021cyk} 75 | \\\textit{Due to the fidelity of $b$-tagging, boosted, hadronically decaying Higgs bosons (predominantly decaying to $b\bar{b}$) has unique challenged and opportunities compared with $W/Z$ tagging.} 76 | \item \textbf{quarks and gluons}~\cite{ATL-PHYS-PUB-2017-017,Komiske:2016rsd,Cheng:2017rdo,Stoye:DLPS2017,Chien:2018dfn,Moreno:2019bmu,Kasieczka:2018lwf,1806025,Lee:2019ssx,Lee:2019cad,Dreyer:2020brq,Romero:2021qlf,Filipek:2021qbe} 77 | \\\textit{Quark jets tend to be narrower and have fewer particles than gluon jets. This classification task has been a benchmark for many new machine learning models.} 78 | \item \textbf{top quark} tagging~\cite{Almeida:2015jua,Stoye:DLPS2017,Kasieczka:2019dbj,Chakraborty:2020yfc,Diefenbacher:2019ezd,Butter:2017cot,Kasieczka:2017nvn,Macaluso:2018tck,Bhattacharya:2020vzu,Lim:2020igi,Dreyer:2020brq,Aguilar-Saavedra:2021rjk,Andrews:2021ejw} 79 | \\\textit{Boosted top quarks form jets that have a three-prong substructure ($t\rightarrow Wb,W\rightarrow q\bar{q}$).} 80 | \item \textbf{strange jets}~\cite{Nakai:2020kuu,Erdmann:2019blf,Erdmann:2020ovh} 81 | \\\textit{Strange quarks have a very similar fragmentation to generic quark and gluon jets, so this is a particularly challenging task.} 82 | \item \textbf{$b$-tagging}~\cite{Sirunyan:2017ezt,Guest:2016iqz,bielkov2020identifying,Bols:2020bkb,ATL-PHYS-PUB-2017-003,ATL-PHYS-PUB-2020-014} 83 | \\\textit{Due to their long (but not too long) lifetime, the $B$-hadron lifetime is macroscopic and $b$-jet tagging has been one of the earliest adapters of modern machine learning tools.} 84 | \item \textbf{Flavor physics}~\cite{1811097} 85 | \\\textit{This category is for studies related to exclusive particle decays, especially with bottom and charm hadrons.} 86 | \item \textbf{BSM particles and models}~\cite{Datta:2019ndh,Baldi:2014kfa,Chakraborty:2019imr,10.1088/2632-2153/ab9023,1792136,1801423,Chang:2020rtc,Cogollo:2020afo,Grossi:2020orx,Ngairangbam:2020ksz,Englert:2020ntw,Freitas:2020ttd,Khosa:2019kxd,Freitas:2019hbk,Stakia:2021pvp,Arganda:2021azw,Jorge:2021vpo,Ren:2021prq,Barron:2021btf,Yang:2021gge,Alvestad:2021sje,Morais:2021ead,Jung:2021tym,Drees:2021oew} 87 | \\\textit{There are many proposals to train classifiers to enhance the presence of particular new physics models.} 88 | \item \textbf{Particle identification}~\cite{deOliveira:2018lqd,Paganini:DLPS2017,Hooberman:DLPS2017,Belayneh:2019vyx,Qasim:2019otl,Collado:2020fwm,Verma:2021ixg} 89 | \\\textit{This is a generic category for direct particle identification and categorization using various detector technologies. Direct means that the particle directly interacts with the detector (in contrast with $b$-tagging).} 90 | \item \textbf{Neutrino Detectors}~\cite{Adams:2018bvi,Aurisano:2016jvx,Acciarri:2016ryt,Hertel:DLPS2017,Aiello:2020orq,Adams:2020vlj,Domine:2020tlx,1805474,1808859,Psihas:2020pby,alonsomonsalve2020graph,Abratenko:2020pbp,Clerbaux:2020ttg,Liu:2020pzv,Abratenko:2020ocq,Chen:2020zkj,Qian:2021vnh,abbasi2021convolutional,Drielsma:2021jdv,Rossi:2021tjf,Hewes:2021heg,Acciarri:2021oav,Belavin:2021bxb,Maksimovic:2021dmz,Gavrikov:2021ktt,Garcia-Mendez:2021vts} 91 | \\\textit{Neutrino detectors are very large in order to have a sizable rate of neutrino detection. The entire neutrino interaction can be characterized to distinguish different neutrino flavors.} 92 | \item \textbf{Direct Dark Matter Detectors}~\cite{Ilyasov_2020,Akerib:2020aws,Khosa:2019qgp,Golovatiuk:2021lqn,McDonald:2021hus} 93 | \\\textit{Dark matter detectors are similar to neutrino detectors, but aim to achieve `zero' background.} 94 | \item \textbf{Cosmology, Astro Particle, and Cosmic Ray physics}~\cite{Ostdiek:2020cqz,Brehmer:2019jyt,Tsai:2020vcx,Verma:2020gnq,Aab:2021rcn,Balazs:2021uhg,gonzalez2021tackling,Conceicao:2021xgn,huang2021convolutionalneuralnetwork,Droz:2021wnh,Han:2021kjx,Arjona:2021hmg,1853992,Shih:2021kbt,Ikeda:2021sxm,Aizpuru:2021vhd,Vago:2021grx,List:2021aer,Kahlhoefer:2021sha,Sabiu:2021aea} 95 | \\\textit{Machine learning is often used in astrophysics and cosmology in different ways than terrestrial particle physics experiments due to a general divide between Bayesian and Frequentist statistics. However, there are many similar tasks and a growing number of proposals designed for one domain that apply to the other. See also https://github.com/georgestein/ml-in-cosmology.} 96 | \item \textbf{Tracking}~\cite{Farrell:DLPS2017,Farrell:2018cjr,Amrouche:2019wmx,Ju:2020xty,Akar:2020jti,Shlomi:2020ufi,Choma:2020cry,Siviero:2020tim,Fox:2020hfm,Amrouche:2021tlm,goto2021development,Biscarat:2021dlj,Akar:2021gns,Thais:2021qcb,Ju:2021ayy,Dezoort:2021kfk,Edmonds:2021lzd,Lavrik:2021zgt,Huth:2021zcm} 97 | \\\textit{Charged particle tracking is a challenging pattern recognition task. This category is for various classification tasks associated with tracking, such as seed selection.} 98 | \item \textbf{Heavy Ions / Nuclear Physics}~\cite{Pang:2016vdc,Chien:2018dfn,Du:2020pmp,Du:2019civ,Mallick:2021wop,Nagu:2021zho,Zhao:2021yjo,Sombillo:2021ifs,Zhou:2021bvw,Apolinario:2021olp,Brown:2021upr,Du:2021pqa,Kuttan:2021npg,Huang:2021iux,Shokr:2021ouh,He:2021uko,Habashy:2021orz} 99 | \\\textit{Many tools in high energy nuclear physics are similar to high energy particle physics. The physics target of these studies are to understand collective properties of the strong force.} 100 | \end{itemize} 101 | \item \textbf{Learning strategies} 102 | \\\textit{There is no unique way to train a classifier and designing an effective learning strategy is often one of the biggest challenges for achieving optimality.} 103 | \begin{itemize} 104 | \item \textbf{Hyperparameters}~\cite{Tani:2020dyi,Dudko:2021cie} 105 | \\\textit{In addition to learnable weights $w$, classifiers have a number of non-differentiable parameters like the number of layers in a neural network. These parameters are called hyperparameters.} 106 | \item \textbf{Weak/Semi supervision}~\cite{Dery:2017fap,Metodiev:2017vrx,Komiske:2018oaa,Collins:2018epr,Collins:2019jip,Borisyak:2019vbz,Cohen:2017exh,Komiske:2018vkc,Metodiev:2018ftz,collaboration2020dijet,Amram:2020ykb,Brewer:2020och,Dahbi:2020zjw,Lee:2019ssx,Lieberman:2021krq} 107 | \\\textit{For supervised learning, the labels $y_i$ are known. In the case that the labels are noisy or only known with some uncertainty, then the learning is called weak supervision. Semi-supervised learning is the related case where labels are known for only a fraction of the training examples.} 108 | \item \textbf{Unsupervised}~\cite{Mackey:2015hwa,Komiske:2019fks,1797846,Dillon:2019cqt,Cai:2020vzx,Howard:2021pos,Dillon:2021gag} 109 | \\\textit{When no labels are provided, the learning is called unsupervised.} 110 | \item \textbf{Reinforcement Learning}~\cite{Carrazza:2019efs,Brehmer:2020brs,John:2020sak,Harvey:2021oue,Cranmer:2021gdt} 111 | \\\textit{Instead of learning to distinguish different types of examples, the goal of reinforcement learning is to learn a strategy (policy). The prototypical example of reinforcement learning in learning a strategy to play video games using some kind of score as a feedback during the learning.} 112 | \item \textbf{Quantum Machine Learning}~\cite{Mott:2017xdb,Zlokapa:2019lvv,Blance:2020nhl,Terashi:2020wfi,Chen:2020zkj,Wu:2020cye,Guan:2020bdl,Chen:2021ouz,Blance:2021gcs,Heredge:2021vww,Wu:2021xsj,Belis:2021zqi,Araz:2021ifk} 113 | \\\textit{Quantum computers are based on unitary operations applied to quantum states. These states live in a vast Hilbert space which may have a usefully large information capacity for machine learning.} 114 | \item \textbf{Feature ranking}~\cite{Faucett:2020vbu,Grojean:2020ech} 115 | \\\textit{It is often useful to take a set of input features and rank them based on their usefulness.} 116 | \item \textbf{Attention}~\cite{goto2021development} 117 | \\\textit{This is an ML tool for helping the network to focus on particularly useful features.} 118 | \item \textbf{Regularization}~\cite{Araz:2021wqm} 119 | \\\textit{This is a term referring to any learning strategy that improves the robustness of a classifier to statistical fluctuations in the data and in the model initialization.} 120 | \end{itemize} 121 | \item \textbf{Fast inference / deployment} 122 | \\\textit{There are many practical issues that can be critical for the actual application of machine learning models.} 123 | \begin{itemize} 124 | \item \textbf{Software}~\cite{Strong:2020mge,Gligorov:2012qt,Weitekamp:DLPS2017,Nguyen:2018ugw,Bourgeois:2018nvk,1792136,Balazs:2021uhg,Rehm:2021zow,Mahesh:2021iph,Amrouche:2021tio,Pol:2021iqw} 125 | \\\textit{Strategies for efficient inference for a given hardware architecture.} 126 | \item \textbf{Hardware/firmware}~\cite{Duarte:2018ite,DiGuglielmo:2020eqx,Summers:2020xiy,1808088,Iiyama:2020wap,Mohan:2020vvi,Carrazza:2020qwu,Rankin:2020usv,Heintz:2020soy,Rossi:2020sbh,Aarrestad:2021zos,Hawks:2021ruw,Teixeira:2021yhl,Hong:2021snb,DiGuglielmo:2021ide,Migliorini:2021fuj,Govorkova:2021utb} 127 | \\\textit{Various accelerators have been studied for fast inference that is very important for latency-limited applications like the trigger at collider experiments.} 128 | \item \textbf{Deployment}~\cite{Kuznetsov:2020mcj,SunnebornGudnadottir:2021nhk} 129 | \\\textit{This category is for the deployment of machine learning interfaces, such as in the cloud.} 130 | \end{itemize} 131 | \end{itemize} 132 | \item \textbf{Regression} 133 | \\\textit{In contrast to classification, the goal of regression is to learn a function $f:\mathbb{R}^n\rightarrow\mathbb{R}^m$ for input features $x\in\mathbb{R}^n$ and target features $y\in\mathbb{R}^m$. The learning setup is very similar to classification, where the network architectures and loss functions may need to be tweaked. For example, the mean squared error is the most common loss function for regression, but the network output is no longer restricted to be between $0$ and $1$.} 134 | \begin{itemize} 135 | \item \textbf{Pileup}~\cite{Komiske:2017ubm,ATL-PHYS-PUB-2019-028,Martinez:2018fwc,Carrazza:2019efs,Maier:2021ymx} 136 | \\\textit{A given bunch crossing at the LHC will have many nearly simultaneous proton-proton collisions. Only one of those is usually interesting and the rest introduce a source of noise (pileup) that must be mitigating for precise final state reconstruction.} 137 | \item \textbf{Calibration}~\cite{Cheong:2019upg,ATL-PHYS-PUB-2020-001,ATL-PHYS-PUB-2018-013,Hooberman:DLPS2017,Kasieczka:2020vlh,Sirunyan:2019wwa,Baldi:2020hjm,Du:2020pmp,Kieseler:2021jxc,Pollard:2021fqv,Akchurin:2021afn,Kieseler:2020wcq,Akchurin:2021ahx,Diefenthaler:2021rdj,Polson:2021kvr,Micallef:2021src} 138 | \\\textit{The goal of calibration is to remove the bias (and reduce variance if possible) from detector (or related) effects.} 139 | \item \textbf{Recasting}~\cite{Caron:2017hku,Bertone:2016mdy,1806026} 140 | \\\textit{Even though an experimental analysis may provide a single model-dependent interpretation of the result, the results are likely to have important implications for a variety of other models. Recasting is the task of taking a result and interpreting it in the context of a model that was not used for the original analysis.} 141 | \item \textbf{Matrix elements}~\cite{Badger:2020uow,Bishara:2019iwh,1804325,Bury:2020ewi,Sombillo:2021yxe,Sombillo:2021rxv,Aylett-Bullock:2021hmo,Maitre:2021uaa} 142 | \\\textit{Regression methods can be used as surrogate models for functions that are too slow to evaluate. One important class of functions are matrix elements, which form the core component of cross section calculations in quantum field theory.} 143 | \item \textbf{Parameter estimation}~\cite{Lei:2020ucb,1808105,Lazzarin:2020uvv,Kim:2021pcz,Alda:2021rgt} 144 | \\\textit{The target features could be parameters of a model, which can be learned directly through a regression setup. Other forms of inference are described in later sections (which could also be viewed as regression).} 145 | \item \textbf{Parton Distribution Functions (and related)}~\cite{DelDebbio:2020rgv,Grigsby:2020auv,Rossi:2020sbh,Carrazza:2021hny,Ball:2021leu,Ball:2021xlu} 146 | \\\textit{Various machine learning models can provide flexible function approximators, which can be useful for modeling functions that cannot be determined easily from first principles such as parton distribution functions.} 147 | \item \textbf{Lattice Gauge Theory}~\cite{Kanwar:2003.06413,Favoni:2020reg,Bulusu:2021rqz,Shi:2021qr,Hackett:2021idh} 148 | \\\textit{Lattice methods offer a complementary approach to perturbation theory. A key challenge is to create approaches that respect the local gauge symmetry (equivariant networks).} 149 | \item \textbf{Function Approximation}~\cite{1853982,Haddadin:2021mmo} 150 | \\\textit{Approximating functions that obey certain (physical) constraints.} 151 | \end{itemize} 152 | \item \textbf{Decorrelation methods}~\cite{Louppe:2016ylz,Dolen:2016kst,Moult:2017okx,Stevens:2013dya,Shimmin:2017mfk,Bradshaw:2019ipy,ATL-PHYS-PUB-2018-014,DiscoFever,Xia:2018kgd,Englert:2018cfo,Wunsch:2019qbo,Rogozhnikov:2014zea,10.1088/2632-2153/ab9023,clavijo2020adversarial,Kasieczka:2020pil,Kitouni:2020xgb,Ghosh:2021hrh} 153 | \\\textit{It it sometimes the case that a classification or regression model needs to be independent of a set of features (usually a mass-like variable) in order to estimate the background or otherwise reduce the uncertainty. These techniques are related to what the machine learning literature calls model `fairness'.} 154 | \item \textbf{Generative models / density estimation} 155 | \\\textit{The goal of generative modeling is to learn (explicitly or implicitly) a probability density $p(x)$ for the features $x\in\mathbb{R}^n$. This task is usually unsupervised (no labels).} 156 | \begin{itemize} 157 | \item \textbf{GANs}:~\cite{deOliveira:2017pjk,Paganini:2017hrr,Paganini:2017dwg,Alonso-Monsalve:2018aqs,Butter:2019eyo,Martinez:2019jlu,Bellagente:2019uyp,Vallecorsa:2019ked,SHiP:2019gcl,Carrazza:2019cnt,Butter:2019cae,Lin:2019htn,DiSipio:2019imz,Hashemi:2019fkn,Chekalina:2018hxi,ATL-SOFT-PUB-2018-001,Zhou:2018ill,Carminati:2018khv,Vallecorsa:2018zco,Datta:2018mwd,Musella:2018rdi,Erdmann:2018kuh,Deja:2019vcv,Derkach:2019qfk,Erbin:2018csv,Erdmann:2018jxd,Urban:2018tqv,Oliveira:DLPS2017,deOliveira:2017rwa,Farrell:2019fsm,Hooberman:DLPS2017,Belayneh:2019vyx,buhmann2020getting,Alanazi:2020jod,2009.03796,2008.06545,Kansal:2020svm,Maevskiy:2020ank,Lai:2020byl,Choi:2021sku,Rehm:2021zow,Rehm:2021zoz,Carrazza:2021hny,Rehm:2021qwm,Lebese:2021foi,Winterhalder:2021ave,Kansal:2021cqp,NEURIPS2020_a878dbeb,Khattak:2021ndw,Mu:2021nno} 158 | \\\textit{Generative Adversarial Networks~\cite{Goodfellow:2014upx} learn $p(x)$ implicitly through the minimax optimization of two networks: one that maps noise to structure $G(z)$ and one a classifier (called the discriminator) that learns to distinguish examples generated from $G(z)$ and those generated from the target process. When the discriminator is maximally `confused', then the generator is effectively mimicking $p(x)$.} 159 | \item \textbf{Autoencoders}~\cite{Monk:2018zsb,ATL-SOFT-PUB-2018-001,Cheng:2020dal,1816035,Howard:2021pos,Buhmann:2021lxj,Bortolato:2021zic,deja2020endtoend,Hariri:2021clz,Fanelli:2019qaq} 160 | \\\textit{An autoencoder consists of two functions: one that maps $x$ into a latent space $z$ (encoder) and a second one that maps the latent space back into the original space (decoder). The encoder and decoder are simultaneously trained so that their composition is nearly the identity. When the latent space has a well-defined probability density (as in variational autoencoders), then one can sample from the autoencoder by applying the detector to a randomly chosen element of the latent space.} 161 | \item \textbf{Normalizing flows}~\cite{Albergo:2019eim,Kanwar:2003.06413,Brehmer:2020vwc,Bothmann:2020ywa,Gao:2020zvv,Gao:2020vdv,Nachman:2020lpy,Choi:2020bnf,Lu:2020npg,Bieringer:2020tnw,Hollingsworth:2021sii,Winterhalder:2021ave,krause2021caloflow,Hackett:2021idh,Menary:2021tjg,Hallin:2021wme,NEURIPS2020_a878dbeb,Vandegar:2020yvw} 162 | \\\textit{Normalizing flows~\cite{pmlr-v37-rezende15} learn $p(x)$ explicitly by starting with a simple probability density and then applyinga series of bijective transformations with tractable Jacobians.} 163 | \item \textbf{Physics-inspired}~\cite{Andreassen:2018apy,Andreassen:2019txo,1808876,Lai:2020byl,Barenboim:2021vzh} 164 | \\\textit{A variety of methods have been proposed to use machine learning tools (e.g. neural networks) combined with physical components.} 165 | \item \textbf{Mixture Models}~\cite{Chen:2020uds,Burton:2021tsd} 166 | \\\textit{A mixture model is a superposition of simple probability densities. For example, a Gaussian mixture model is a sum of normal probability densities. Mixture density networks are mixture models where the coefficients in front of the constituent densities as well as the density parameters (e.g. mean and variances of Gaussians) are parameterized by neural networks.} 167 | \item \textbf{Phase space generation}~\cite{Bendavid:2017zhk,Bothmann:2020ywa,Gao:2020zvv,Gao:2020vdv,Klimek:2018mza,Carrazza:2020rdn,Nachman:2020fff,Chen:2020nfb,Verheyen:2020bjw,Backes:2020vka} 168 | \\\textit{Monte Carlo event generators integrate over a phase space that needs to be generated efficiently and this can be aided by machine learning methods.} 169 | \item \textbf{Gaussian processes}~\cite{Frate:2017mai,Bertone:2016mdy,1804325,Cisbani:2019xta} 170 | \\\textit{These are non-parametric tools for modeling the `time'-dependence of a random variable. The `time' need not be actual time - for instance, one can use Gaussian processes to model the energy dependence of some probability density.} 171 | \end{itemize} 172 | \item \textbf{Anomaly detection}~\cite{DAgnolo:2018cun,Collins:2018epr,Collins:2019jip,DAgnolo:2019vbw,Farina:2018fyg,Heimel:2018mkt,Roy:2019jae,Cerri:2018anq,Blance:2019ibf,Hajer:2018kqm,DeSimone:2018efk,Mullin:2019mmh,1809.02977,Dillon:2019cqt,Andreassen:2020nkr,Nachman:2020lpy,Aguilar-Saavedra:2017rzt,Romao:2019dvs,Romao:2020ojy,knapp2020adversarially,collaboration2020dijet,1797846,1800445,Amram:2020ykb,Cheng:2020dal,Khosa:2020qrz,Thaprasop:2020mzp,Alexander:2020mbx,aguilarsaavedra2020mass,1815227,pol2020anomaly,Mikuni:2020qds,vanBeekveld:2020txa,Park:2020pak,Faroughy:2020gas,Stein:2020rou,Kasieczka:2021xcg,Chakravarti:2021svb,Batson:2021agz,Blance:2021gcs,Bortolato:2021zic,Collins:2021nxn,Dillon:2021nxw,Finke:2021sdf,Shih:2021kbt,Atkinson:2021nlt,Kahn:2021drv,Aarrestad:2021oeb,Dorigo:2021iyy,Caron:2021wmq,Govorkova:2021hqu,Kasieczka:2021tew,Volkovich:2021txe,Govorkova:2021utb,Hallin:2021wme,Ostdiek:2021bem} 173 | \\\textit{The goal of anomaly detection is to identify abnormal events. The abnormal events could be from physics beyond the Standard Model or from faults in a detector. While nearly all searches for new physics are technically anomaly detection, this category is for methods that are mode-independent (broadly defined). Anomalies in high energy physics tend to manifest as over-densities in phase space (often called `population anomalies') in contrast to off-manifold anomalies where you can flag individual examples as anomalous. } 174 | \item \textbf{Simulation-based (`likelihood-free') Inference} 175 | \\\textit{Likelihood-based inference is the case where $p(x|\theta)$ is known and $\theta$ can be determined by maximizing the probability of the data. In high energy physics, $p(x|\theta)$ is often not known analytically, but it is often possible to sample from the density implicitly using simulations.} 176 | \begin{itemize} 177 | \item \textbf{Parameter estimation}~\cite{Andreassen:2019nnm,Stoye:2018ovl,Hollingsworth:2020kjg,Brehmer:2018kdj,Brehmer:2018eca,Brehmer:2019xox,Brehmer:2018hga,Cranmer:2015bka,Andreassen:2020gtw,Coogan:2020yux,Flesher:2020kuy,Bieringer:2020tnw,Nachman:2021yvi,Chatterjee:2021nms,NEURIPS2020_a878dbeb} 178 | \\\textit{This can also be viewed as a regression problem, but there the goal is typically to do maximum likelihood estimation in contrast to directly minimizing the mean squared error between a function and the target.} 179 | \item \textbf{Unfolding}~\cite{Andreassen:2019cjw,Datta:2018mwd,Bellagente:2019uyp,Gagunashvili:2010zw,Glazov:2017vni,Martschei:2012pr,Lindemann:1995ut,Zech2003BinningFreeUB,1800956,Vandegar:2020yvw,Howard:2021pos,Baron:2021vvl,Andreassen:2021zzk,Komiske:2021vym,H1:2021wkz} 180 | \\\textit{This is the task of removing detector distortions. In contrast to parameter estimation, the goal is not to infer model parameters, but instead, the undistorted phase space probability density. This is often also called deconvolution.} 181 | \item \textbf{Domain adaptation}~\cite{Rogozhnikov:2016bdp,Andreassen:2019nnm,Cranmer:2015bka,2009.03796,Nachman:2021opi} 182 | \\\textit{Morphing simulations to look like data is a form of domain adaptation.} 183 | \item \textbf{BSM}~\cite{Andreassen:2020nkr,Hollingsworth:2020kjg,Brehmer:2018kdj,Brehmer:2018eca,Brehmer:2018hga,Brehmer:2019xox,Romao:2020ojy} 184 | \\\textit{This category is for parameter estimation when the parameter is the signal strength of new physics.} 185 | \end{itemize} 186 | \item \textbf{Uncertainty Quantification} 187 | \\\textit{Estimating and mitigating uncertainty is essential for the successful deployment of machine learning methods in high energy physics. } 188 | \begin{itemize} 189 | \item \textbf{Interpretability}~\cite{deOliveira:2015xxd,Chang:2017kvc,Diefenbacher:2019ezd,Agarwal:2020fpt,Grojean:2020ech,Romero:2021qlf} 190 | \\\textit{Machine learning methods that are interpretable maybe more robust and thus less susceptible to various sources of uncertainty.} 191 | \item \textbf{Estimation}~\cite{Nachman:2019dol,Nachman:2019yfl,Barnard:2016qma,Bellagente:2021yyh} 192 | \\\textit{A first step in reducing uncertainties is estimating their size.} 193 | \item \textbf{Mitigation}~\cite{Estrade:DLPS2017,Englert:2018cfo,Louppe:2016ylz,Araz:2021wqm} 194 | \\\textit{This category is for proposals to reduce uncertainty.} 195 | \item \textbf{Uncertainty-aware inference}~\cite{Caron:2019xkx,Bollweg:2019skg,deCastro:2018mgh,Wunsch:2020iuh,Ghosh:2021roe} 196 | \\\textit{The usual path for inference is that a machine learning method is trained for a nominal setup. Uncertainties are then propagated in the usual way. This is suboptimal and so there are multiple proposals for incorporating uncertainties into the learning to get as close to making the final statistical test the target of the machine learning as possible.} 197 | \end{itemize} 198 | \item \textbf{Experimental results} 199 | \\\textit{This section is incomplete as there are many results that directly and indirectly (e.g. via flavor tagging) use modern machine learning techniques. We will try to highlight experimental results that use deep learning in a critical way for the final analysis sensitivity.} 200 | \begin{itemize} 201 | \item Final analysis discriminate for searches~\cite{Aad:2019yxi,Aad:2020hzm,collaboration2020dijet,Sirunyan:2020hwz}. 202 | \item Measurements using deep learning directly (not through object reconstruction)~\cite{H1:2021wkz} 203 | \end{itemize} 204 | 205 | 206 | \end{itemize} 207 | 208 | 209 | \clearpage 210 | \flushbottom 211 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 212 | % References 213 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 214 | %\bibliographystyle{uiuchept} 215 | \bibliographystyle{JHEP} 216 | \bibliography{HEPML} 217 | 218 | \end{document} 219 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **A Living Review of Machine Learning for Particle Physics** 2 | 3 | *Modern machine learning techniques, including deep learning, is rapidly being applied, adapted, and developed for high energy physics. The goal of this document is to provide a nearly comprehensive list of citations for those developing and applying these approaches to experimental, phenomenological, or theoretical analyses. As a living document, it will be updated as often as possible to incorporate the latest developments. A list of proper (unchanging) reviews can be found within. Papers are grouped into a small set of topics to be as useful as possible. Suggestions are most welcome.* 4 | 5 | [![download](https://img.shields.io/badge/download-review-blue.svg)](https://iml-wg.github.io/HEPML-LivingReview/review/hepml-review.pdf) 6 | 7 | The purpose of this note is to collect references for modern machine learning as applied to particle physics. A minimal number of categories is chosen in order to be as useful as possible. Note that papers may be referenced in more than one category. The fact that a paper is listed in this document does not endorse or validate its content - that is for the community (and for peer-review) to decide. Furthermore, the classification here is a best attempt and may have flaws - please let us know if (a) we have missed a paper you think should be included, (b) a paper has been misclassified, or (c) a citation for a paper is not correct or if the journal information is now available. In order to be as useful as possible, this document will continue to evolve so please check back before you write your next paper. If you find this review helpful, please consider citing it using \cite{hepmllivingreview} in HEPML.bib. 8 | 9 | * Reviews 10 | * Modern reviews 11 | 12 | * [Jet Substructure at the Large Hadron Collider: A Review of Recent Advances in Theory and Machine Learning](https://arxiv.org/abs/1709.04464) [[DOI](https://doi.org/10.1016/j.physrep.2019.11.001)] 13 | * [Deep Learning and its Application to LHC Physics](https://arxiv.org/abs/1806.11484) [[DOI](https://doi.org/10.1146/annurev-nucl-101917-021019)] 14 | * [Machine Learning in High Energy Physics Community White Paper](https://arxiv.org/abs/1807.02876) [[DOI](https://doi.org/10.1088/1742-6596/1085/2/022008)] 15 | * [Machine learning at the energy and intensity frontiers of particle physics](https://doi.org/10.1038/s41586-018-0361-2) 16 | * [Machine learning and the physical sciences](https://arxiv.org/abs/1903.10563) [[DOI](https://doi.org/10.1103/RevModPhys.91.045002)] 17 | * [Machine and Deep Learning Applications in Particle Physics](https://arxiv.org/abs/1912.08245) [[DOI](https://doi.org/10.1142/S0217751X19300199)] 18 | * [Modern Machine Learning and Particle Physics](https://arxiv.org/abs/2103.12226) 19 | 20 | * Specialized reviews 21 | 22 | * [The Machine Learning Landscape of Top Taggers](https://arxiv.org/abs/1902.09914) [[DOI](https://doi.org/10.21468/SciPostPhys.7.1.014)] 23 | * [Dealing with Nuisance Parameters using Machine Learning in High Energy Physics: a Review](https://arxiv.org/abs/2007.09121) 24 | * [Graph neural networks in particle physics](https://arxiv.org/abs/2007.13681) [[DOI](https://doi.org/{10.1088/2632-2153/abbf9a)] 25 | * [A Review on Machine Learning for Neutrino Experiments](https://arxiv.org/abs/2008.01242) [[DOI](https://doi.org/10.1142/S0217751X20430058)] 26 | * [Generative Networks for LHC events](https://arxiv.org/abs/2008.08558) 27 | * [Parton distribution functions](https://arxiv.org/abs/2008.12305) 28 | * [Simulation-based inference methods for particle physics](https://arxiv.org/abs/2010.06439) 29 | * [Anomaly Detection for Physics Analysis and Less than Supervised Learning](https://arxiv.org/abs/2010.14554) 30 | * [Graph Neural Networks for Particle Tracking and Reconstruction](https://arxiv.org/abs/2012.01249) 31 | * [Distributed Training and Optimization Of Neural Networks](https://arxiv.org/abs/2012.01839) 32 | * [The frontier of simulation-based inference](https://arxiv.org/abs/1911.01429) [[DOI](https://doi.org/10.1073/pnas.1912789117)] 33 | * [Machine Learning scientific competitions and datasets](https://arxiv.org/abs/2012.08520) 34 | * [Image-Based Jet Analysis](https://arxiv.org/abs/2012.09719) 35 | * [Quantum Machine Learning in High Energy Physics](https://arxiv.org/abs/2005.08582) [[DOI](https://doi.org/10.1088/2632-2153/abc17d)] 36 | * [Sequence-based Machine Learning Models in Jet Physics](https://arxiv.org/abs/2102.06128) 37 | * [A survey of machine learning-based physics event generation](https://arxiv.org/abs/2106.00643) 38 | 39 | * Classical papers 40 | 41 | * [Neural Networks and Cellular Automata in Experimental High-energy Physics](https://doi.org/10.1016/0010-4655(88)90004-5) 42 | * [Finding Gluon Jets With a Neural Trigger](https://doi.org/10.1103/PhysRevLett.65.1321) 43 | 44 | * Datasets 45 | 46 | * [The LHC Olympics 2020: A Community Challenge for Anomaly Detection in High Energy Physics](https://arxiv.org/abs/2101.08320) 47 | * [The Dark Machines Anomaly Score Challenge: Benchmark Data and Model Independent Event Classification for the Large Hadron Collider](https://arxiv.org/abs/2105.14027) 48 | * [Shared Data and Algorithms for Deep Learning in Fundamental Physics](https://arxiv.org/abs/2107.00656) 49 | * [LHC physics dataset for unsupervised New Physics detection at 40 MHz](https://arxiv.org/abs/2107.02157) 50 | * [A FAIR and AI-ready Higgs Boson Decay Dataset](https://arxiv.org/abs/2108.02214) 51 | 52 | * Classification 53 | * Parameterized classifiers 54 | 55 | * [Parameterized neural networks for high-energy physics](https://arxiv.org/abs/1601.07913) [[DOI](https://doi.org/10.1140/epjc/s10052-016-4099-4)] 56 | * [Approximating Likelihood Ratios with Calibrated Discriminative Classifiers](https://arxiv.org/abs/1506.02169) 57 | * [E Pluribus Unum Ex Machina: Learning from Many Collider Events at Once](https://arxiv.org/abs/2101.07263) 58 | 59 | * Jet images 60 | 61 | * [How to tell quark jets from gluon jets](https://doi.org/10.1103/PhysRevD.44.2025) 62 | * [Jet-Images: Computer Vision Inspired Techniques for Jet Tagging](https://arxiv.org/abs/1407.5675) [[DOI](https://doi.org/10.1007/JHEP02(2015)118)] 63 | * [Playing Tag with ANN: Boosted Top Identification with Pattern Recognition](https://arxiv.org/abs/1501.05968) [[DOI](https://doi.org/10.1007/JHEP07(2015)086)] 64 | * [Jet-images — deep learning edition](https://arxiv.org/abs/1511.05190) [[DOI](https://doi.org/10.1007/JHEP07(2016)069)] 65 | * [Quark versus Gluon Jet Tagging Using Jet Images with the ATLAS Detector](http://cds.cern.ch/record/2275641) 66 | * [Boosting $H\to b\bar b$ with Machine Learning](https://arxiv.org/abs/1807.10768) [[DOI](https://doi.org/10.1007/JHEP10(2018)101)] 67 | * [Learning to classify from impure samples with high-dimensional data](https://arxiv.org/abs/1801.10158) [[DOI](https://doi.org/10.1103/PhysRevD.98.011502)] 68 | * [Parton Shower Uncertainties in Jet Substructure Analyses with Deep Neural Networks](https://arxiv.org/abs/1609.00607) [[DOI](https://doi.org/10.1103/PhysRevD.95.014018)] 69 | * [Deep learning in color: towards automated quark/gluon](https://arxiv.org/abs/1612.01551) [[DOI](https://doi.org/10.1007/JHEP01(2017)110)] 70 | * [Deep-learning Top Taggers or The End of QCD?](https://arxiv.org/abs/1701.08784) [[DOI](https://doi.org/10.1007/JHEP05(2017)006)] 71 | * [Pulling Out All the Tops with Computer Vision and Deep Learning](https://arxiv.org/abs/1803.00107) [[DOI](https://doi.org/10.1007/JHEP10(2018)121)] 72 | * [Reconstructing boosted Higgs jets from event image segmentation](https://arxiv.org/abs/2008.13529) 73 | * [An Attention Based Neural Network for Jet Tagging](https://arxiv.org/abs/2009.00170) 74 | * [Quark-Gluon Jet Discrimination Using Convolutional Neural Networks](https://arxiv.org/abs/2012.02531) [[DOI](https://doi.org/10.3938/jkps.74.219)] 75 | * [Learning to Isolate Muons](https://arxiv.org/abs/2102.02278) 76 | * [Deep learning jet modifications in heavy-ion collisions](https://arxiv.org/abs/2012.07797) 77 | * [Identifying the Quantum Properties of Hadronic Resonances using Machine Learning](https://arxiv.org/abs/2105.04582) 78 | 79 | * Event images 80 | 81 | * [Topology classification with deep learning to improve real-time event selection at the LHC](https://arxiv.org/abs/1807.00083) [[DOI](https://doi.org/10.1007/s41781-019-0028-1)] 82 | * [Convolutional Neural Networks with Event Images for Pileup Mitigation with the ATLAS Detector](http://cds.cern.ch/record/2684070) 83 | * [Boosting $H\to b\bar b$ with Machine Learning](https://arxiv.org/abs/1807.10768) [[DOI](https://doi.org/10.1007/JHEP10(2018)101)] 84 | * [End-to-End Physics Event Classification with the CMS Open Data: Applying Image-based Deep Learning on Detector Data to Directly Classify Collision Events at the LHC](https://arxiv.org/abs/1807.11916) [[DOI](https://doi.org/10.1007/s41781-020-00038-8)] 85 | * [Disentangling Boosted Higgs Boson Production Modes with Machine Learning](https://arxiv.org/abs/2009.05930) 86 | * [Identifying the nature of the QCD transition in relativistic collision of heavy nuclei with deep learning](https://arxiv.org/abs/1910.11530) [[DOI](https://doi.org/10.1140/epjc/s10052-020-8030-7)] 87 | * [End-to-End Jet Classification of Boosted Top Quarks with the CMS Open Data](https://arxiv.org/abs/2104.14659) 88 | * [Jet Single Shot Detection](https://arxiv.org/abs/2105.05785) 89 | 90 | * Sequences 91 | 92 | * [Jet Flavor Classification in High-Energy Physics with Deep Neural Networks](https://arxiv.org/abs/1607.08633) [[DOI](https://doi.org/10.1103/PhysRevD.94.112002)] 93 | * [Topology classification with deep learning to improve real-time event selection at the LHC](https://arxiv.org/abs/1807.00083) [[DOI](https://doi.org/10.1007/s41781-019-0028-1)] 94 | * [Jet Flavour Classification Using DeepJet](https://arxiv.org/abs/2008.10519) [[DOI](https://doi.org/10.1088/1748-0221/15/12/P12012)] 95 | * [Development of a Vertex Finding Algorithm using Recurrent Neural Network](https://arxiv.org/abs/2101.11906) 96 | * [Sequence-based Machine Learning Models in Jet Physics](https://arxiv.org/abs/2102.06128) 97 | * [Identification of Jets Containing $b$-Hadrons with Recurrent Neural Networks at the ATLAS Experiment](http://cdsweb.cern.ch/record/2255226) 98 | 99 | * Trees 100 | 101 | * [QCD-Aware Recursive Neural Networks for Jet Physics](https://arxiv.org/abs/1702.00748) [[DOI](https://doi.org/10.1007/JHEP01(2019)057)] 102 | * [Recursive Neural Networks in Quark/Gluon Tagging](https://arxiv.org/abs/1711.02633) [[DOI](https://doi.org/10.1007/s41781-018-0007-y)] 103 | 104 | * Graphs 105 | 106 | * [Neural Message Passing for Jet Physics](https://dl4physicalsciences.github.io/files/nips_dlps_2017_29.pdf}) 107 | * [Graph Neural Networks for Particle Reconstruction in High Energy Physics detectors](https://arxiv.org/abs/2003.11603) 108 | * [Probing stop pair production at the LHC with graph neural networks](https://arxiv.org/abs/1807.09088) [[DOI](https://doi.org/10.1007/JHEP08(2019)055)] 109 | * [Pileup mitigation at the Large Hadron Collider with graph neural networks](https://arxiv.org/abs/1810.07988) [[DOI](https://doi.org/10.1140/epjp/i2019-12710-3)] 110 | * [Unveiling CP property of top-Higgs coupling with graph neural networks at the LHC](https://arxiv.org/abs/1901.05627) [[DOI](https://doi.org/10.1016/j.physletb.2020.135198)] 111 | * [JEDI-net: a jet identification algorithm based on interaction networks](https://arxiv.org/abs/1908.05318) [[DOI](https://doi.org/10.1140/epjc/s10052-020-7608-4)] 112 | * [Learning representations of irregular particle-detector geometry with distance-weighted graph networks](https://arxiv.org/abs/1902.07987) [[DOI](https://doi.org/10.1140/epjc/s10052-019-7113-9)] 113 | * [Interpretable deep learning for two-prong jet classification with jet spectra](https://arxiv.org/abs/1904.02092) [[DOI](https://doi.org/10.1007/JHEP07(2019)135)] 114 | * [Neural Network-based Top Tagger with Two-Point Energy Correlations and Geometry of Soft Emissions](https://arxiv.org/abs/2003.11787) [[DOI](https://doi.org/10.1007/JHEP07(2020)111)] 115 | * [Probing triple Higgs coupling with machine learning at the LHC](https://arxiv.org/abs/2005.11086) 116 | * [Casting a graph net to catch dark showers](https://arxiv.org/abs/2006.08639) [[DOI](https://doi.org/10.21468/SciPostPhys.10.2.046)] 117 | * [Graph neural networks in particle physics](https://arxiv.org/abs/2007.13681) [[DOI](https://doi.org/{10.1088/2632-2153/abbf9a)] 118 | * [Distance-Weighted Graph Neural Networks on FPGAs for Real-Time Particle Reconstruction in High Energy Physics](https://arxiv.org/abs/2008.03601) [[DOI](https://doi.org/10.3389/fdata.2020.598927)] 119 | * [Supervised Jet Clustering with Graph Neural Networks for Lorentz Boosted Bosons](https://arxiv.org/abs/2008.06064) [[DOI](https://doi.org/10.1103/PhysRevD.102.075014)] 120 | * [Track Seeding and Labelling with Embedded-space Graph Neural Networks](https://arxiv.org/abs/2007.00149) 121 | * [Graph neural network for 3D classification of ambiguities and optical crosstalk in scintillator-based neutrino detectors](https://arxiv.org/abs/2009.00688) [[DOI](https://doi.org/10.1103/PhysRevD.103.032005)] 122 | * [The Boosted Higgs Jet Reconstruction via Graph Neural Network](https://arxiv.org/abs/2010.05464) 123 | * [Accelerated Charged Particle Tracking with Graph Neural Networks on FPGAs](https://arxiv.org/abs/2012.01563) 124 | * [Particle Track Reconstruction using Geometric Deep Learning](https://arxiv.org/abs/2012.08515) 125 | * [Jet tagging in the Lund plane with graph networks](https://arxiv.org/abs/2012.08526) [[DOI](https://doi.org/10.1007/JHEP03(2021)052)] 126 | * [Vertex and Energy Reconstruction in JUNO with Machine Learning Methods](https://arxiv.org/abs/2101.04839) 127 | * [MLPF: Efficient machine-learned particle-flow reconstruction using graph neural networks](https://arxiv.org/abs/2101.08578) 128 | * [Towards a realistic track reconstruction algorithm based on graph neural networks for the HL-LHC](https://arxiv.org/abs/2103.00916) 129 | * [Deep Learning strategies for ProtoDUNE raw data denoising](https://arxiv.org/abs/2103.01596) 130 | * [Graph Neural Network for Object Reconstruction in Liquid Argon Time Projection Chambers](https://arxiv.org/abs/2103.06233) 131 | * [Instance Segmentation GNNs for One-Shot Conformal Tracking at the LHC](https://arxiv.org/abs/2103.06509) 132 | * [Charged particle tracking via edge-classifying interaction networks](https://arxiv.org/abs/2103.16701) 133 | * [Jet characterization in Heavy Ion Collisions by QCD-Aware Graph Neural Networks](https://arxiv.org/abs/2103.14906) 134 | * [Graph Generative Models for Fast Detector Simulations in High Energy Physics](https://arxiv.org/abs/2104.01725) 135 | * [Segmentation of EM showers for neutrino experiments with deep graph neural networks](https://arxiv.org/abs/2104.02040) 136 | * [Anomaly detection with Convolutional Graph Neural Networks](https://arxiv.org/abs/2105.07988) 137 | 138 | * Sets (point clouds) 139 | 140 | * [Energy Flow Networks: Deep Sets for Particle Jets](https://arxiv.org/abs/1810.05165) [[DOI](https://doi.org/10.1007/JHEP01(2019)121)] 141 | * [ParticleNet: Jet Tagging via Particle Clouds](https://arxiv.org/abs/1902.08570) [[DOI](https://doi.org/10.1103/PhysRevD.101.056019)] 142 | * [ABCNet: An attention-based method for particle tagging](https://arxiv.org/abs/2001.05311) [[DOI](https://doi.org/10.1140/epjp/s13360-020-00497-3)] 143 | * [Secondary Vertex Finding in Jets with Neural Networks](https://arxiv.org/abs/2008.02831) 144 | * [Equivariant Energy Flow Networks for Jet Tagging](https://arxiv.org/abs/2012.00964) 145 | * [Permutationless Many-Jet Event Reconstruction with Symmetry Preserving Attention Networks](https://arxiv.org/abs/2010.09206) 146 | * [Zero-Permutation Jet-Parton Assignment using a Self-Attention Network](https://arxiv.org/abs/2012.03542) 147 | * [Learning to Isolate Muons](https://arxiv.org/abs/2102.02278) 148 | * [Point Cloud Transformers applied to Collider Physics](https://arxiv.org/abs/2102.05073) 149 | * [SPANet: Generalized Permutationless Set Assignment for Particle Physics using Symmetry Preserving Attention](https://arxiv.org/abs/2106.03898) 150 | * [Particle Convolution for High Energy Physics](https://arxiv.org/abs/2107.02908) 151 | * [Deep Sets based Neural Networks for Impact Parameter Flavour Tagging in ATLAS](https://cds.cern.ch/record/2718948) 152 | 153 | * Physics-inspired basis 154 | 155 | * [Automating the Construction of Jet Observables with Machine Learning](https://arxiv.org/abs/1902.07180) [[DOI](https://doi.org/10.1103/PhysRevD.100.095016)] 156 | * [How Much Information is in a Jet?](https://arxiv.org/abs/1704.08249) [[DOI](https://doi.org/10.1007/JHEP06(2017)073)] 157 | * [Novel Jet Observables from Machine Learning](https://arxiv.org/abs/1710.01305) [[DOI](https://doi.org/10.1007/JHEP03(2018)086)] 158 | * [Energy flow polynomials: A complete linear basis for jet substructure](https://arxiv.org/abs/1712.07124) [[DOI](https://doi.org/10.1007/JHEP04(2018)013)] 159 | * [Deep-learned Top Tagging with a Lorentz Layer](https://arxiv.org/abs/1707.08966) [[DOI](https://doi.org/10.21468/SciPostPhys.5.3.028)] 160 | * [Resurrecting $b\bar{b}h$ with kinematic shapes](https://arxiv.org/abs/2011.13945) 161 | 162 | * $W/Z$ tagging 163 | 164 | * [Jet-images — deep learning edition](https://arxiv.org/abs/1511.05190) [[DOI](https://doi.org/10.1007/JHEP07(2016)069)] 165 | * [Parton Shower Uncertainties in Jet Substructure Analyses with Deep Neural Networks](https://arxiv.org/abs/1609.00607) [[DOI](https://doi.org/10.1103/PhysRevD.95.014018)] 166 | * [QCD-Aware Recursive Neural Networks for Jet Physics](https://arxiv.org/abs/1702.00748) [[DOI](https://doi.org/10.1007/JHEP01(2019)057)] 167 | * [Identification of heavy, energetic, hadronically decaying particles using machine-learning techniques](https://arxiv.org/abs/2004.08262) [[DOI](https://doi.org/10.1088/1748-0221/15/06/P06005)] 168 | * [Boosted $W$ and $Z$ tagging with jet charge and deep learning](https://arxiv.org/abs/1908.08256) [[DOI](https://doi.org/10.1103/PhysRevD.101.053001)] 169 | * [Supervised Jet Clustering with Graph Neural Networks for Lorentz Boosted Bosons](https://arxiv.org/abs/2008.06064) [[DOI](https://doi.org/10.1103/PhysRevD.102.075014)] 170 | * [Jet tagging in the Lund plane with graph networks](https://arxiv.org/abs/2012.08526) [[DOI](https://doi.org/10.1007/JHEP03(2021)052)] 171 | * [A $W^\pm$ polarization analyzer from Deep Neural Networks](https://arxiv.org/abs/2102.05124) 172 | 173 | * $H\rightarrow b\bar{b$} 174 | 175 | * [Automating the Construction of Jet Observables with Machine Learning](https://arxiv.org/abs/1902.07180) [[DOI](https://doi.org/10.1103/PhysRevD.100.095016)] 176 | * [Boosting $H\to b\bar b$ with Machine Learning](https://arxiv.org/abs/1807.10768) [[DOI](https://doi.org/10.1007/JHEP10(2018)101)] 177 | * [Interaction networks for the identification of boosted $H \rightarrow b\overline{b}$ decays](https://arxiv.org/abs/1909.12285) [[DOI](https://doi.org/10.1103/PhysRevD.102.012010)] 178 | * [Interpretable deep learning for two-prong jet classification with jet spectra](https://arxiv.org/abs/1904.02092) [[DOI](https://doi.org/10.1007/JHEP07(2019)135)] 179 | * [Identification of heavy, energetic, hadronically decaying particles using machine-learning techniques](https://arxiv.org/abs/2004.08262) [[DOI](https://doi.org/10.1088/1748-0221/15/06/P06005)] 180 | * [Disentangling Boosted Higgs Boson Production Modes with Machine Learning](https://arxiv.org/abs/2009.05930) 181 | * [Benchmarking Machine Learning Techniques with Di-Higgs Production at the LHC](https://arxiv.org/abs/2009.06754) 182 | * [The Boosted Higgs Jet Reconstruction via Graph Neural Network](https://arxiv.org/abs/2010.05464) 183 | * [Extracting Signals of Higgs Boson From Background Noise Using Deep Neural Networks](https://arxiv.org/abs/2010.08201) 184 | * [Learning to increase matching efficiency in identifying additional b-jets in the $\text{t}\bar{\text{t}}\text{b}\bar{\text{b}}$ process](https://arxiv.org/abs/2103.09129) 185 | * [Higgs tagging with the Lund jet plane](https://arxiv.org/abs/2105.03989) 186 | 187 | * quarks and gluons 188 | 189 | * [Quark versus Gluon Jet Tagging Using Jet Images with the ATLAS Detector](http://cds.cern.ch/record/2275641) 190 | * [Deep learning in color: towards automated quark/gluon](https://arxiv.org/abs/1612.01551) [[DOI](https://doi.org/10.1007/JHEP01(2017)110)] 191 | * [Recursive Neural Networks in Quark/Gluon Tagging](https://arxiv.org/abs/1711.02633) [[DOI](https://doi.org/10.1007/s41781-018-0007-y)] 192 | * [DeepJet: Generic physics object based jet multiclass classification for LHC experiments](https://dl4physicalsciences.github.io/files/nips_dlps_2017_10.pdf}) 193 | * [Probing heavy ion collisions using quark and gluon jet substructure](https://arxiv.org/abs/1803.03589) 194 | * [JEDI-net: a jet identification algorithm based on interaction networks](https://arxiv.org/abs/1908.05318) [[DOI](https://doi.org/10.1140/epjc/s10052-020-7608-4)] 195 | * [Quark-Gluon Tagging: Machine Learning vs Detector](https://arxiv.org/abs/1812.09223) [[DOI](https://doi.org/10.21468/SciPostPhys.6.6.069)] 196 | * [Towards Machine Learning Analytics for Jet Substructure](https://arxiv.org/abs/2007.04319) [[DOI](https://doi.org/10.1007/JHEP09(2020)195)] 197 | * [Quark Gluon Jet Discrimination with Weakly Supervised Learning](https://arxiv.org/abs/2012.02540) [[DOI](https://doi.org/10.3938/jkps.75.652)] 198 | * [Quark-Gluon Jet Discrimination Using Convolutional Neural Networks](https://arxiv.org/abs/2012.02531) [[DOI](https://doi.org/10.3938/jkps.74.219)] 199 | * [Jet tagging in the Lund plane with graph networks](https://arxiv.org/abs/2012.08526) [[DOI](https://doi.org/10.1007/JHEP03(2021)052)] 200 | * [Safety of Quark/Gluon Jet Classification](https://arxiv.org/abs/2103.09103) 201 | * [Identifying the Quantum Properties of Hadronic Resonances using Machine Learning](https://arxiv.org/abs/2105.04582) 202 | 203 | * top quark tagging 204 | 205 | * [Playing Tag with ANN: Boosted Top Identification with Pattern Recognition](https://arxiv.org/abs/1501.05968) [[DOI](https://doi.org/10.1007/JHEP07(2015)086)] 206 | * [DeepJet: Generic physics object based jet multiclass classification for LHC experiments](https://dl4physicalsciences.github.io/files/nips_dlps_2017_10.pdf}) 207 | * [The Machine Learning Landscape of Top Taggers](https://arxiv.org/abs/1902.09914) [[DOI](https://doi.org/10.21468/SciPostPhys.7.1.014)] 208 | * [Neural Network-based Top Tagger with Two-Point Energy Correlations and Geometry of Soft Emissions](https://arxiv.org/abs/2003.11787) [[DOI](https://doi.org/10.1007/JHEP07(2020)111)] 209 | * [CapsNets Continuing the Convolutional Quest](https://arxiv.org/abs/1906.11265) [[DOI](https://doi.org/10.21468/SciPostPhys.8.2.023)] 210 | * [Deep-learned Top Tagging with a Lorentz Layer](https://arxiv.org/abs/1707.08966) [[DOI](https://doi.org/10.21468/SciPostPhys.5.3.028)] 211 | * [Deep-learning Top Taggers or The End of QCD?](https://arxiv.org/abs/1701.08784) [[DOI](https://doi.org/10.1007/JHEP05(2017)006)] 212 | * [Pulling Out All the Tops with Computer Vision and Deep Learning](https://arxiv.org/abs/1803.00107) [[DOI](https://doi.org/10.1007/JHEP10(2018)121)] 213 | * [Boosted Top Quark Tagging and Polarization Measurement using Machine Learning](https://arxiv.org/abs/2010.11778) 214 | * [Morphology for Jet Classification](https://arxiv.org/abs/2010.13469) 215 | * [Jet tagging in the Lund plane with graph networks](https://arxiv.org/abs/2012.08526) [[DOI](https://doi.org/10.1007/JHEP03(2021)052)] 216 | * [Pulling the Higgs and Top needles from the jet stack with Feature Extended Supervised Tagging](https://arxiv.org/abs/2102.01667) 217 | * [End-to-End Jet Classification of Boosted Top Quarks with the CMS Open Data](https://arxiv.org/abs/2104.14659) 218 | 219 | * strange jets 220 | 221 | * [Strange Jet Tagging](https://arxiv.org/abs/2003.09517) 222 | * [A tagger for strange jets based on tracking information using long short-term memory](https://arxiv.org/abs/1907.07505) [[DOI](https://doi.org/10.1088/1748-0221/15/01/P01021)] 223 | * [Maximum performance of strange-jet tagging at hadron colliders](https://arxiv.org/abs/2011.10736) 224 | 225 | * $b$-tagging 226 | 227 | * [Identification of heavy-flavour jets with the CMS detector in pp collisions at 13 TeV](https://arxiv.org/abs/1712.07158) [[DOI](https://doi.org/10.1088/1748-0221/13/05/P05011)] 228 | * [Jet Flavor Classification in High-Energy Physics with Deep Neural Networks](https://arxiv.org/abs/1607.08633) [[DOI](https://doi.org/10.1103/PhysRevD.94.112002)] 229 | * [Identifying Heavy-Flavor Jets Using Vectors of Locally Aggregated Descriptors](https://arxiv.org/abs/2005.01842) [[DOI](https://doi.org/10.1088/1748-0221/16/03/P03017)] 230 | * [Jet Flavour Classification Using DeepJet](https://arxiv.org/abs/2008.10519) [[DOI](https://doi.org/10.1088/1748-0221/15/12/P12012)] 231 | * [Identification of Jets Containing $b$-Hadrons with Recurrent Neural Networks at the ATLAS Experiment](http://cdsweb.cern.ch/record/2255226) 232 | * [Deep Sets based Neural Networks for Impact Parameter Flavour Tagging in ATLAS](https://cds.cern.ch/record/2718948) 233 | 234 | * Flavor physics 235 | 236 | * ['Deep' Dive into $b \to c$ Anomalies: Standardized and Future-proof Model Selection Using Self-normalizing Neural Networks](https://arxiv.org/abs/2008.04316) 237 | 238 | * BSM particles and models 239 | 240 | * [Automating the Construction of Jet Observables with Machine Learning](https://arxiv.org/abs/1902.07180) [[DOI](https://doi.org/10.1103/PhysRevD.100.095016)] 241 | * [Searching for Exotic Particles in High-Energy Physics with Deep Learning](https://arxiv.org/abs/1402.4735) [[DOI](https://doi.org/10.1038/ncomms5308)] 242 | * [Interpretable deep learning for two-prong jet classification with jet spectra](https://arxiv.org/abs/1904.02092) [[DOI](https://doi.org/10.1007/JHEP07(2019)135)] 243 | * [A deep neural network to search for new long-lived particles decaying to jets](https://arxiv.org/abs/1912.12238) [[DOI](https://doi.org/10.1088/2632-2153/ab9023)] 244 | * [Fast convolutional neural networks for identifying long-lived particles in a high-granularity calorimeter](https://arxiv.org/abs/2004.10744) [[DOI](https://doi.org/10.1088/1748-0221/15/12/P12006)] 245 | * [Casting a graph net to catch dark showers](https://arxiv.org/abs/2006.08639) [[DOI](https://doi.org/10.21468/SciPostPhys.10.2.046)] 246 | * [Distinguishing $W'$ Signals at Hadron Colliders Using Neural Networks](https://arxiv.org/abs/2007.14586) [[DOI](https://doi.org/10.1103/PhysRevD.103.036016)] 247 | * [Deep learnig analysis of the inverse seesaw in a 3-3-1 model at the LHC](https://arxiv.org/abs/2008.03409) [[DOI](https://doi.org/10.1016/j.physletb.2020.135931)] 248 | * [Comparing Traditional and Deep-Learning Techniques of Kinematic Reconstruction for polarisation Discrimination in Vector Boson Scattering](https://arxiv.org/abs/2008.05316) [[DOI](https://doi.org/10.1140/epjc/s10052-020-08713-1)] 249 | * [Invisible Higgs search through Vector Boson Fusion: A deep learning approach](https://arxiv.org/abs/2008.05434) [[DOI](https://doi.org/10.1140/epjc/s10052-020-08629-w)] 250 | * [Sensing Higgs cascade decays through memory](https://arxiv.org/abs/2008.08611) [[DOI](https://doi.org/10.1103/PhysRevD.102.095027)] 251 | * [Phenomenology of vector-like leptons with Deep Learning at the Large Hadron Collider](https://arxiv.org/abs/2010.01307) [[DOI](https://doi.org/10.1007/JHEP01(2021)076)] 252 | * [WIMPs or else? Using Machine Learning to disentangle LHC signatures](https://arxiv.org/abs/1910.06058) 253 | * [Exploring the standard model EFT in VH production with machine learning](https://arxiv.org/abs/1902.05803) [[DOI](https://doi.org/10.1103/PhysRevD.100.035040)] 254 | * [Advanced Multi-Variate Analysis Methods for New Physics Searches at the Large Hadron Collider](https://arxiv.org/abs/2105.07530) 255 | * [Towards a method to anticipate dark matter signals with deep learning at the LHC](https://arxiv.org/abs/2105.12018) 256 | * [Top squark signal significance enhancement by different Machine Learning Algorithms](https://arxiv.org/abs/2106.06813) 257 | * [Detecting an axion-like particle with machine learning at the LHC](https://arxiv.org/abs/2106.07018) 258 | * [Unsupervised Hadronic SUEP at the LHC](https://arxiv.org/abs/2107.12379) 259 | * [Extract the energy scale of anomalous $\gamma\gamma \to W^+W^-$ scattering in the vector boson scattering process using artificial neural networks](https://arxiv.org/abs/2107.13624) 260 | * [Beyond Cuts in Small Signal Scenarios - Enhanced Sneutrino Detectability Using Machine Learning](https://arxiv.org/abs/2108.03125) 261 | * [Deep Learning Searches for Vector-Like Leptons at the LHC and Electron/Muon Colliders](https://arxiv.org/abs/2108.03926) 262 | * [Probing Higgs exotic decay at the LHC with machine learning](https://arxiv.org/abs/2109.03294) 263 | * [Machine Learning Optimized Search for the $Z'$ from $U(1)_{L_\mu-L_\tau}$ at the LHC](https://arxiv.org/abs/2109.07674) 264 | 265 | * Particle identification 266 | 267 | * [Electromagnetic Showers Beyond Shower Shapes](https://arxiv.org/abs/1806.05667) [[DOI](https://doi.org/10.1016/j.nima.2019.162879)] 268 | * [Survey of Machine Learning Techniques for High Energy Electromagnetic Shower Classification](https://dl4physicalsciences.github.io/files/nips_dlps_2017_24.pdf}) 269 | * [Calorimetry with Deep Learning: Particle Classification, Energy Regression, and Simulation for High-Energy Physics](https://dl4physicalsciences.github.io/files/nips_dlps_2017_15.pdf}) 270 | * [Calorimetry with Deep Learning: Particle Simulation and Reconstruction for Collider Physics](https://arxiv.org/abs/1912.06794) [[DOI](https://doi.org/10.1140/epjc/s10052-020-8251-9)] 271 | * [Learning representations of irregular particle-detector geometry with distance-weighted graph networks](https://arxiv.org/abs/1902.07987) [[DOI](https://doi.org/10.1140/epjc/s10052-019-7113-9)] 272 | * [Learning to Identify Electrons](https://arxiv.org/abs/2011.01984) 273 | * [Shower Identification in Calorimeter using Deep Learning](https://arxiv.org/abs/2103.16247) 274 | 275 | * Neutrino Detectors 276 | 277 | * [Deep neural network for pixel-level electromagnetic particle identification in the MicroBooNE liquid argon time projection chamber](https://arxiv.org/abs/1808.07269) [[DOI](https://doi.org/10.1103/PhysRevD.99.092001)] 278 | * [A Convolutional Neural Network Neutrino Event Classifier](https://arxiv.org/abs/1604.01444) [[DOI](https://doi.org/10.1088/1748-0221/11/09/P09001)] 279 | * [Convolutional Neural Networks Applied to Neutrino Events in a Liquid Argon Time Projection Chamber](https://arxiv.org/abs/1611.05531) [[DOI](https://doi.org/10.1088/1748-0221/12/03/P03011)] 280 | * [Convolutional Neural Networks for Electron Neutrino and Electron Shower Energy Reconstruction in the NO$\nu$A Detectors](https://dl4physicalsciences.github.io/files/nips_dlps_2017_7.pdf}) 281 | * [Event reconstruction for KM3NeT/ORCA using convolutional neural networks](https://arxiv.org/abs/2004.08254) [[DOI](https://doi.org/10.1088/1748-0221/15/10/P10005)] 282 | * [PILArNet: Public Dataset for Particle Imaging Liquid Argon Detectors in High Energy Physics](https://arxiv.org/abs/2006.01993) 283 | * [Point Proposal Network for Reconstructing 3D Particle Positions with Sub-Pixel Precision in Liquid Argon Time Projection Chambers](https://arxiv.org/abs/2006.14745) 284 | * [Scalable, Proposal-free Instance Segmentation Network for 3D Pixel Clustering and Particle Trajectory Reconstruction in Liquid Argon Time Projection Chambers](https://arxiv.org/abs/2007.03083) 285 | * [Augmented Signal Processing in Liquid Argon Time Projection Chamber with Deep Neural Network](https://arxiv.org/abs/2007.12743) [[DOI](https://doi.org/10.1088/1748-0221/16/01/P01036)] 286 | * [A Review on Machine Learning for Neutrino Experiments](https://arxiv.org/abs/2008.01242) [[DOI](https://doi.org/10.1142/S0217751X20430058)] 287 | * [Graph neural network for 3D classification of ambiguities and optical crosstalk in scintillator-based neutrino detectors](https://arxiv.org/abs/2009.00688) [[DOI](https://doi.org/10.1103/PhysRevD.103.032005)] 288 | * [A Convolutional Neural Network for Multiple Particle Identification in the MicroBooNE Liquid Argon Time Projection Chamber](https://arxiv.org/abs/2010.08653) 289 | * [Study of using machine learning for level 1 trigger decision in JUNO experiment](https://arxiv.org/abs/2011.08847) 290 | * [Deep-Learning-Based Kinematic Reconstruction for DUNE](https://arxiv.org/abs/2012.06181) 291 | * [Semantic Segmentation with a Sparse Convolutional Neural Network for Event Reconstruction in MicroBooNE](https://arxiv.org/abs/2012.08513) 292 | * [Quantum Convolutional Neural Networks for High Energy Physics Data Analysis](https://arxiv.org/abs/2012.12177) 293 | * [Vertex and Energy Reconstruction in JUNO with Machine Learning Methods](https://arxiv.org/abs/2101.04839) 294 | * [A Convolutional Neural Network based Cascade Reconstruction for the IceCube Neutrino Observatory](https://arxiv.org/abs/2101.11589) 295 | * [Scalable, End-to-End, Deep-Learning-Based Data Reconstruction Chain for Particle Imaging Detectors](https://arxiv.org/abs/2102.01033) 296 | * [Deep Learning strategies for ProtoDUNE raw data denoising](https://arxiv.org/abs/2103.01596) 297 | * [Graph Neural Network for Object Reconstruction in Liquid Argon Time Projection Chambers](https://arxiv.org/abs/2103.06233) 298 | * [A deep-learning based raw waveform region-of-interest finder for the liquid argon time projection chamber](https://arxiv.org/abs/2103.06391) 299 | * [Segmentation of EM showers for neutrino experiments with deep graph neural networks](https://arxiv.org/abs/2104.02040) 300 | * [CNNs for enhanced background discrimination in DSNB searches in large-scale water-Gd detectors](https://arxiv.org/abs/2104.13426) 301 | * [The use of Boosted Decision Trees for Energy Reconstruction in JUNO experiment](https://arxiv.org/abs/2106.02907) 302 | * [Deep learning reconstruction in ANTARES](https://arxiv.org/abs/2107.13654) 303 | 304 | * Direct Dark Matter Detectors 305 | 306 | * Boosted decision trees approach to neck alpha events discrimination in DEAP-3600 experiment 307 | * [Improving sensitivity to low-mass dark matter in LUX using a novel electrode background mitigation technique](https://arxiv.org/abs/2011.09602) 308 | * [Convolutional Neural Networks for Direct Detection of Dark Matter](https://arxiv.org/abs/1911.09210) [[DOI](https://doi.org/10.1088/1361-6471/ab8e94)] 309 | * [Deep Learning for direct Dark Matter search with nuclear emulsions](https://arxiv.org/abs/2106.11995) 310 | * [Scanning the landscape of axion dark matter detectors: applying gradient descent to experimental design](https://arxiv.org/abs/2108.13894) 311 | 312 | * Cosmology, Astro Particle, and Cosmic Ray physics 313 | 314 | * [Detecting Subhalos in Strong Gravitational Lens Images with Image Segmentation](https://arxiv.org/abs/2009.06663) 315 | * [Mining for Dark Matter Substructure: Inferring subhalo population properties from strong lenses with machine learning](https://arxiv.org/abs/1909.02005) [[DOI](https://doi.org/10.3847/1538-4357/ab4c41)] 316 | * [Inverting cosmic ray propagation by Convolutional Neural Networks](https://arxiv.org/abs/2011.11930) 317 | * [Particle Track Reconstruction using Geometric Deep Learning](https://arxiv.org/abs/2012.08515) 318 | * [Deep-Learning based Reconstruction of the Shower Maximum $X_{\mathrm{max}}$ using the Water-Cherenkov Detectors of the Pierre Auger Observatory](https://arxiv.org/abs/2101.02946) 319 | * [A comparison of optimisation algorithms for high-dimensional particle and astrophysics applications](https://arxiv.org/abs/2101.04525) 320 | * [Tackling the muon identification in water Cherenkov detectors problem for the future Southern Wide-field Gamma-ray Observatory by means of Machine Learning](https://arxiv.org/abs/2101.11924) 321 | * [Muon identification in a compact single-layered water Cherenkov detector and gamma/hadron discrimination using Machine Learning techniques](https://arxiv.org/abs/2101.10109) 322 | * [A convolutional-neural-network estimator of CMB constraints on dark matter energy injection](https://arxiv.org/abs/2101.10360) 323 | * [A neural network classifier for electron identification on the DAMPE experiment](https://arxiv.org/abs/2102.05534) 324 | * [Bayesian nonparametric inference of neutron star equation of state via neural network](https://arxiv.org/abs/2103.05408) 325 | * [Novel null tests for the spatial curvature and homogeneity of the Universe and their machine learning reconstructions](https://arxiv.org/abs/2103.06789) 326 | * [Machine Learning the 6th Dimension: Stellar Radial Velocities from 5D Phase-Space Correlations](https://arxiv.org/abs/2103.14039) 327 | * [Via Machinae: Searching for Stellar Streams using Unsupervised Machine Learning](https://arxiv.org/abs/2104.12789) 328 | * [Development of Convolutional Neural Networks for an Electron-Tracking Compton Camera](https://arxiv.org/abs/2105.02512) 329 | * [Machine Learning improved fits of the sound horizon at the baryon drag epoch](https://arxiv.org/abs/2106.00428) 330 | * [Using Convolutional Neural Networks for the Helicity Classification of Magnetic Fields](https://arxiv.org/abs/2106.06718) 331 | * [Dim but not entirely dark: Extracting the Galactic Center Excess' source-count distribution with neural nets](https://arxiv.org/abs/2107.09070) 332 | * [Constraining dark matter annihilation with cosmic ray antiprotons using neural networks](https://arxiv.org/abs/2107.12395) 333 | * [Probing Ultra-light Axion Dark Matter from 21cm Tomography using Convolutional Neural Networks](https://arxiv.org/abs/2108.07972) 334 | 335 | * Tracking 336 | 337 | * [Particle Track Reconstruction with Deep Learning](https://dl4physicalsciences.github.io/files/nips_dlps_2017_28.pdf}) 338 | * [Novel deep learning methods for track reconstruction](https://arxiv.org/abs/1810.06111) 339 | * [The Tracking Machine Learning challenge : Accuracy phase](https://arxiv.org/abs/1904.06778) [[DOI](https://doi.org/10.1007/978-3-030-29135-8\_9)] 340 | * [Graph Neural Networks for Particle Reconstruction in High Energy Physics detectors](https://arxiv.org/abs/2003.11603) 341 | * [An updated hybrid deep learning algorithm for identifying and locating primary vertices](https://arxiv.org/abs/2007.01023) 342 | * [Secondary Vertex Finding in Jets with Neural Networks](https://arxiv.org/abs/2008.02831) 343 | * [Track Seeding and Labelling with Embedded-space Graph Neural Networks](https://arxiv.org/abs/2007.00149) 344 | * [First application of machine learning algorithms to the position reconstruction in Resistive Silicon Detectors](https://arxiv.org/abs/2011.02410) [[DOI](https://doi.org/10.1088/1748-0221/16/03/P03019)] 345 | * [Beyond 4D Tracking: Using Cluster Shapes for Track Seeding](https://arxiv.org/abs/2012.04533) 346 | * [Hashing and metric learning for charged particle tracking](https://arxiv.org/abs/2101.06428) 347 | * [Development of a Vertex Finding Algorithm using Recurrent Neural Network](https://arxiv.org/abs/2101.11906) 348 | * [Towards a realistic track reconstruction algorithm based on graph neural networks for the HL-LHC](https://arxiv.org/abs/2103.00916) 349 | * [Progress in developing a hybrid deep learning algorithm for identifying and locating primary vertices](https://arxiv.org/abs/2103.04962) 350 | * [Instance Segmentation GNNs for One-Shot Conformal Tracking at the LHC](https://arxiv.org/abs/2103.06509) 351 | * [Physics and Computing Performance of the Exa.TrkX TrackML Pipeline](https://arxiv.org/abs/2103.06995) 352 | * [Charged particle tracking via edge-classifying interaction networks](https://arxiv.org/abs/2103.16701) 353 | * [Using Machine Learning to Select High-Quality Measurements](https://arxiv.org/abs/2106.08891) 354 | * [Optical Inspection of the Silicon Micro-strip Sensors for the CBM Experiment employing Artificial Intelligence](https://arxiv.org/abs/2107.07714) 355 | * [Machine learning for surface prediction in ACTS](https://arxiv.org/abs/2108.03068) 356 | 357 | * Heavy Ions / Nuclear Physics 358 | 359 | * [An equation-of-state-meter of quantum chromodynamics transition from deep learning](https://arxiv.org/abs/1612.04262) [[DOI](https://doi.org/10.1038/s41467-017-02726-3)] 360 | * [Probing heavy ion collisions using quark and gluon jet substructure](https://arxiv.org/abs/1803.03589) 361 | * [Deep learning jet modifications in heavy-ion collisions](https://arxiv.org/abs/2012.07797) 362 | * [Identifying the nature of the QCD transition in relativistic collision of heavy nuclei with deep learning](https://arxiv.org/abs/1910.11530) [[DOI](https://doi.org/10.1140/epjc/s10052-020-8030-7)] 363 | * [Estimation of Impact Parameter and Transverse Spherocity in heavy-ion collisions at the LHC energies using Machine Learning](https://arxiv.org/abs/2103.01736) 364 | * [Constraining nuclear effects in Argon using machine learning algorithms](https://arxiv.org/abs/2105.12733) 365 | * [Detecting Chiral Magnetic Effect via Deep Learning](https://arxiv.org/abs/2105.13761) 366 | * [Classifying near-threshold enhancement using deep neural network](https://arxiv.org/abs/2106.03453) 367 | * [Application of radial basis functions neutral networks in spectral functions](https://arxiv.org/abs/2106.08168) 368 | * [Deep Learning for the Classification of Quenched Jets](https://arxiv.org/abs/2106.08869) 369 | * [inclusiveAI: A machine learning representation of the $F_2$ structure function over all charted $Q^2$ and $x$ range](https://arxiv.org/abs/2106.06390) 370 | * [Jet tomography in heavy ion collisions with deep learning](https://arxiv.org/abs/2106.11271) 371 | * [An equation-of-state-meter for CBM using PointNet](https://arxiv.org/abs/2107.05590) 372 | * [Probing criticality with deep learning in relativistic heavy-ion collisions](https://arxiv.org/abs/2107.11828) 373 | * [Modeling of charged-particle multiplicity and transverse-momentum distributions in pp collisions using a DNN](https://arxiv.org/abs/2108.06102) 374 | * [Machine-learning-based identification for initial clustering structure in relativistic heavy-ion collisions](https://arxiv.org/abs/2109.06277) 375 | * [Particles Multiplicity Based on Rapidity in Landau and Artificial Neural Network(ANN) Models](https://arxiv.org/abs/2109.07191) 376 | 377 | * Hyperparameters 378 | 379 | * [Evolutionary algorithms for hyperparameter optimization in machine learning for application in high energy physics](https://arxiv.org/abs/2011.04434) [[DOI](https://doi.org/10.1140/epjc/s10052-021-08950-y)] 380 | * [Application of Deep Learning Technique to an Analysis of Hard Scattering Processes at Colliders](https://arxiv.org/abs/2109.08520) 381 | 382 | * Weak/Semi supervision 383 | 384 | * [Weakly Supervised Classification in High Energy Physics](https://arxiv.org/abs/1702.00414) [[DOI](https://doi.org/10.1007/JHEP05(2017)145)] 385 | * [Classification without labels: Learning from mixed samples in high energy physics](https://arxiv.org/abs/1708.02949) [[DOI](https://doi.org/10.1007/JHEP10(2017)174)] 386 | * [Learning to classify from impure samples with high-dimensional data](https://arxiv.org/abs/1801.10158) [[DOI](https://doi.org/10.1103/PhysRevD.98.011502)] 387 | * [Anomaly Detection for Resonant New Physics with Machine Learning](https://arxiv.org/abs/1805.02664) [[DOI](https://doi.org/10.1103/PhysRevLett.121.241803)] 388 | * [Extending the search for new resonances with machine learning](https://arxiv.org/abs/1902.02634) [[DOI](https://doi.org/10.1103/PhysRevD.99.014038)] 389 | * [Machine Learning on data with sPlot background subtraction](https://arxiv.org/abs/1905.11719) [[DOI](https://doi.org/10.1088/1748-0221/14/08/P08020)] 390 | * [(Machine) Learning to Do More with Less](https://arxiv.org/abs/1706.09451) [[DOI](https://doi.org/10.1007/JHEP02(2018)034)] 391 | * [An operational definition of quark and gluon jets](https://arxiv.org/abs/1809.01140) [[DOI](https://doi.org/10.1007/JHEP11(2018)059)] 392 | * [Jet Topics: Disentangling Quarks and Gluons at Colliders](https://arxiv.org/abs/1802.00008) [[DOI](https://doi.org/10.1103/PhysRevLett.120.241602)] 393 | * [Dijet resonance search with weak supervision using 13 TeV pp collisions in the ATLAS detector](https://arxiv.org/abs/2005.02983) [[DOI](https://doi.org/10.1103/PhysRevLett.125.131801)] 394 | * [Tag N' Train: A Technique to Train Improved Classifiers on Unlabeled Data](https://arxiv.org/abs/2002.12376) [[DOI](https://doi.org/10.1007/JHEP01(2021)153)] 395 | * [Data-driven quark and gluon jet modification in heavy-ion collisions](https://arxiv.org/abs/2008.08596) [[DOI](https://doi.org/10.1103/PhysRevC.103.L021901)] 396 | * [Machine learning approach for the search of resonances with topological features at the Large Hadron Collider](https://arxiv.org/abs/2011.09863) 397 | * [Quark Gluon Jet Discrimination with Weakly Supervised Learning](https://arxiv.org/abs/2012.02540) [[DOI](https://doi.org/10.3938/jkps.75.652)] 398 | * [An investigation of over-training within semi-supervised machine learning models in the search for heavy resonances at the LHC](https://arxiv.org/abs/2109.07287) 399 | 400 | * Unsupervised 401 | 402 | * [Fuzzy Jets](https://arxiv.org/abs/1509.02216) [[DOI](https://doi.org/10.1007/JHEP06(2016)010)] 403 | * [Metric Space of Collider Events](https://arxiv.org/abs/1902.02346) [[DOI](https://doi.org/10.1103/PhysRevLett.123.041801)] 404 | * [Learning the latent structure of collider events](https://arxiv.org/abs/2005.12319) [[DOI](https://doi.org/10.1007/JHEP10(2020)206)] 405 | * [Uncovering latent jet substructure](https://arxiv.org/abs/1904.04200) [[DOI](https://doi.org/10.1103/PhysRevD.100.056002)] 406 | * [Linearized Optimal Transport for Collider Events](https://arxiv.org/abs/2008.08604) [[DOI](https://doi.org/10.1103/PhysRevD.102.116019)] 407 | * [Foundations of a Fast, Data-Driven, Machine-Learned Simulator](https://arxiv.org/abs/2101.08944) 408 | * [Symmetries, Safety, and Self-Supervision](https://arxiv.org/abs/2108.04253) 409 | 410 | * Reinforcement Learning 411 | 412 | * [Jet grooming through reinforcement learning](https://arxiv.org/abs/1903.09644) [[DOI](https://doi.org/10.1103/PhysRevD.100.014014)] 413 | * [Hierarchical clustering in particle physics through reinforcement learning](https://arxiv.org/abs/2011.08191) 414 | * [Real-time Artificial Intelligence for Accelerator Control: A Study at the Fermilab Booster](https://arxiv.org/abs/2011.07371) 415 | * [Particle Physics Model Building with Reinforcement Learning](https://arxiv.org/abs/2103.04759) 416 | * [Reframing Jet Physics with New Computational Methods](https://arxiv.org/abs/2105.10512) 417 | 418 | * Quantum Machine Learning 419 | 420 | * [Solving a Higgs optimization problem with quantum annealing for machine learning](https://doi.org/10.1038/nature24047) 421 | * [Quantum adiabatic machine learning with zooming](https://arxiv.org/abs/1908.04480) [[DOI](https://doi.org/10.1103/PhysRevA.102.062405)] 422 | * [Quantum Machine Learning for Particle Physics using a Variational Quantum Classifier](https://arxiv.org/abs/2010.07335) [[DOI](https://doi.org/10.1007/JHEP02(2021)212)] 423 | * [Event Classification with Quantum Machine Learning in High-Energy Physics](https://arxiv.org/abs/2002.09935) [[DOI](https://doi.org/10.1007/s41781-020-00047-7)] 424 | * [Quantum Convolutional Neural Networks for High Energy Physics Data Analysis](https://arxiv.org/abs/2012.12177) 425 | * [Application of Quantum Machine Learning using the Quantum Variational Classifier Method to High Energy Physics Analysis at the LHC on IBM Quantum Computer Simulator and Hardware with 10 qubits](https://arxiv.org/abs/2012.11560) 426 | * [Quantum Machine Learning in High Energy Physics](https://arxiv.org/abs/2005.08582) [[DOI](https://doi.org/10.1088/2632-2153/abc17d)] 427 | * [Hybrid Quantum-Classical Graph Convolutional Network](https://arxiv.org/abs/2101.06189) 428 | * [Unsupervised Event Classification with Graphs on Classical and Photonic Quantum Computers](https://arxiv.org/abs/2103.03897) 429 | * [Quantum Support Vector Machines for Continuum Suppression in B Meson Decays](https://arxiv.org/abs/2103.12257) 430 | * [Application of Quantum Machine Learning using the Quantum Kernel Algorithm on High Energy Physics Analysis at the LHC](https://arxiv.org/abs/2104.05059) 431 | * [Higgs analysis with quantum classifiers](https://arxiv.org/abs/2104.07692) 432 | * [Quantum-inspired event reconstruction with Tensor Networks: Matrix Product States](https://arxiv.org/abs/2106.08334) 433 | 434 | * Feature ranking 435 | 436 | * [Mapping Machine-Learned Physics into a Human-Readable Space](https://arxiv.org/abs/2010.11998) [[DOI](https://doi.org/10.1103/PhysRevD.103.036020)] 437 | * [Resurrecting $b\bar{b}h$ with kinematic shapes](https://arxiv.org/abs/2011.13945) 438 | 439 | * Attention 440 | 441 | * [Development of a Vertex Finding Algorithm using Recurrent Neural Network](https://arxiv.org/abs/2101.11906) 442 | 443 | * Regularization 444 | 445 | * [Combine and Conquer: Event Reconstruction with Bayesian Ensemble Neural Networks](https://arxiv.org/abs/2102.01078) 446 | 447 | * Software 448 | 449 | * [On the impact of modern deep-learning techniques to the performance and time-requirements of classification models in experimental high-energy physics](https://arxiv.org/abs/2002.01427) [[DOI](https://doi.org/10.1088/2632-2153/ab983a)] 450 | * [Efficient, reliable and fast high-level triggering using a bonsai boosted decision tree](https://arxiv.org/abs/1210.6861) [[DOI](https://doi.org/10.1088/1748-0221/8/02/P02013)] 451 | * [Deep topology classifiers for a more efficient trigger selection at the LHC](https://dl4physicalsciences.github.io/files/nips_dlps_2017_3.pdf}) 452 | * [Topology classification with deep learning to improve real-time event selection at the LHC](https://arxiv.org/abs/1807.00083) [[DOI](https://doi.org/10.1007/s41781-019-0028-1)] 453 | * [Using holistic event information in the trigger](https://arxiv.org/abs/1808.00711) 454 | * [Fast convolutional neural networks for identifying long-lived particles in a high-granularity calorimeter](https://arxiv.org/abs/2004.10744) [[DOI](https://doi.org/10.1088/1748-0221/15/12/P12006)] 455 | * [A comparison of optimisation algorithms for high-dimensional particle and astrophysics applications](https://arxiv.org/abs/2101.04525) 456 | * [Reduced Precision Strategies for Deep Learning: A High Energy Physics Generative Adversarial Network Use Case](https://arxiv.org/abs/2103.10142) [[DOI](https://doi.org/10.5220/0010245002510258)] 457 | * [Towards an Interpretable Data-driven Trigger System for High-throughput Physics Facilities](https://arxiv.org/abs/2104.06622) 458 | * [The Tracking Machine Learning challenge : Throughput phase](https://arxiv.org/abs/2105.01160) 459 | * [Jet Single Shot Detection](https://arxiv.org/abs/2105.05785) 460 | 461 | * Hardware/firmware 462 | 463 | * [Fast inference of deep neural networks in FPGAs for particle physics](https://arxiv.org/abs/1804.06913) [[DOI](https://doi.org/10.1088/1748-0221/13/07/P07027)] 464 | * [Compressing deep neural networks on FPGAs to binary and ternary precision with HLS4ML](https://arxiv.org/abs/2003.06308) [[DOI](https://doi.org/10.1088/2632-2153/aba042)] 465 | * [Fast inference of Boosted Decision Trees in FPGAs for particle physics](https://arxiv.org/abs/2002.02534) [[DOI](https://doi.org/10.1088/1748-0221/15/05/P05026)] 466 | * [GPU coprocessors as a service for deep learning inference in high energy physics](https://arxiv.org/abs/2007.10359) 467 | * [Distance-Weighted Graph Neural Networks on FPGAs for Real-Time Particle Reconstruction in High Energy Physics](https://arxiv.org/abs/2008.03601) [[DOI](https://doi.org/10.3389/fdata.2020.598927)] 468 | * [Studying the potential of Graphcore IPUs for applications in Particle Physics](https://arxiv.org/abs/2008.09210) [[DOI](https://doi.org/10.1007/s41781-021-00057-z)] 469 | * [PDFFlow: parton distribution functions on GPU](https://arxiv.org/abs/2009.06635) 470 | * [FPGAs-as-a-Service Toolkit (FaaST)](https://arxiv.org/abs/2010.08556) [[DOI](https://doi.org/10.1109/H2RC51942.2020.00010)] 471 | * [Accelerated Charged Particle Tracking with Graph Neural Networks on FPGAs](https://arxiv.org/abs/2012.01563) 472 | * [PDFFlow: hardware accelerating parton density access](https://arxiv.org/abs/2012.08221) [[DOI](https://doi.org/10.5821/zenodo.4286175)] 473 | * [Fast convolutional neural networks on FPGAs with hls4ml](https://arxiv.org/abs/2101.05108) 474 | * [Ps and Qs: Quantization-aware pruning for efficient low latency neural network inference](https://arxiv.org/abs/2102.11289) 475 | * [Sparse Deconvolution Methods for Online Energy Estimation in Calorimeters Operating in High Luminosity Conditions](https://arxiv.org/abs/2103.12467) 476 | * [Nanosecond machine learning event classification with boosted decision trees in FPGA for high energy physics](https://arxiv.org/abs/2104.03408) 477 | * [A reconfigurable neural network ASIC for detector front-end data compression at the HL-LHC](https://arxiv.org/abs/2105.01683) 478 | * [Muon trigger with fast Neural Networks on FPGA, a demonstrator](https://arxiv.org/abs/2105.04428) 479 | * [Autoencoders on FPGAs for real-time, unsupervised new physics detection at 40 MHz at the Large Hadron Collider](https://arxiv.org/abs/2108.03986) 480 | 481 | * Deployment 482 | 483 | * [MLaaS4HEP: Machine Learning as a Service for HEP](https://arxiv.org/abs/2007.14781) 484 | * [Distributed training and scalability for the particle clustering method UCluster](https://arxiv.org/abs/2109.00264) [[DOI](https://doi.org/10.1051/epjconf/202125102054)] 485 | 486 | * Regression 487 | * Pileup 488 | 489 | * [Pileup Mitigation with Machine Learning (PUMML)](https://arxiv.org/abs/1707.08600) [[DOI](https://doi.org/10.1007/JHEP12(2017)051)] 490 | * [Convolutional Neural Networks with Event Images for Pileup Mitigation with the ATLAS Detector](http://cds.cern.ch/record/2684070) 491 | * [Pileup mitigation at the Large Hadron Collider with graph neural networks](https://arxiv.org/abs/1810.07988) [[DOI](https://doi.org/10.1140/epjp/i2019-12710-3)] 492 | * [Jet grooming through reinforcement learning](https://arxiv.org/abs/1903.09644) [[DOI](https://doi.org/10.1103/PhysRevD.100.014014)] 493 | * [Pile-Up Mitigation using Attention](https://arxiv.org/abs/2107.02779) 494 | 495 | * Calibration 496 | 497 | * [Parametrizing the Detector Response with Neural Networks](https://arxiv.org/abs/1910.03773) [[DOI](https://doi.org/10.1088/1748-0221/15/01/P01030)] 498 | * [Simultaneous Jet Energy and Mass Calibrations with Neural Networks](http://cds.cern.ch/record/2706189) 499 | * [Generalized Numerical Inversion: A Neural Network Approach to Jet Calibration](http://cds.cern.ch/record/2630972) 500 | * [Calorimetry with Deep Learning: Particle Classification, Energy Regression, and Simulation for High-Energy Physics](https://dl4physicalsciences.github.io/files/nips_dlps_2017_15.pdf}) 501 | * [Per-Object Systematics using Deep-Learned Calibration](https://arxiv.org/abs/2003.11099) [[DOI](https://doi.org/10.21468/SciPostPhys.9.6.089)] 502 | * [A deep neural network for simultaneous estimation of b jet energy and resolution](https://arxiv.org/abs/1912.06046) [[DOI](https://doi.org/10.1007/s41781-020-00041-z)] 503 | * [How to GAN Higher Jet Resolution](https://arxiv.org/abs/2012.11944) 504 | * [Deep learning jet modifications in heavy-ion collisions](https://arxiv.org/abs/2012.07797) 505 | * [Calorimetric Measurement of Multi-TeV Muons via Deep Regression](https://arxiv.org/abs/2107.02119) 506 | * [Transport away your problems: Calibrating stochastic simulations with optimal transport](https://arxiv.org/abs/2107.08648) 507 | * [On the Use of Neural Networks for Energy Reconstruction in High-granularity Calorimeters](https://arxiv.org/abs/2107.10207) 508 | * [Object condensation: one-stage grid-free multi-object reconstruction in physics detectors, graph and image data](https://arxiv.org/abs/2002.03605) [[DOI](https://doi.org/10.1140/epjc/s10052-020-08461-2)] 509 | * [Perspectives on the Calibration of CNN Energy Reconstruction in Highly Granular Calorimeters](https://arxiv.org/abs/2108.10963) 510 | * [Deeply Learning Deep Inelastic Scattering Kinematics](https://arxiv.org/abs/2108.11638) 511 | * [Energy reconstruction in a liquid argon calorimeter cell using convolutional neural networks](https://arxiv.org/abs/2109.05124) 512 | * [Using Convolutional Neural Networks to Reconstruct Energy of GeV Scale IceCube Neutrinos](https://arxiv.org/abs/2109.08152) 513 | 514 | * Recasting 515 | 516 | * [The BSM-AI project: SUSY-AI--generalizing LHC limits on supersymmetry with machine learning](https://doi.org/{10.1140/epjc/s10052-017-4814-9) 517 | * [Accelerating the BSM interpretation of LHC data with machine learning](https://arxiv.org/abs/1611.02704) [[DOI](https://doi.org/10.1016/j.dark.2019.100293)] 518 | * [Bayesian Neural Networks for Fast SUSY Predictions](https://arxiv.org/abs/2007.04506) [[DOI](https://doi.org/10.1016/j.physletb.2020.136041)] 519 | 520 | * Matrix elements 521 | 522 | * [Using neural networks for efficient evaluation of high multiplicity scattering amplitudes](https://arxiv.org/abs/2002.07516) [[DOI](https://doi.org/10.1007/JHEP06(2020)114)] 523 | * [(Machine) Learning Amplitudes for Faster Event Generation](https://arxiv.org/abs/1912.11055) 524 | * [$\textsf{Xsec}$: the cross-section evaluation code](https://arxiv.org/abs/2006.16273) [[DOI](https://doi.org/10.1140/epjc/s10052-020-08635-y)] 525 | * [Matrix Element Regression with Deep Neural Networks -- breaking the CPU barrier](https://arxiv.org/abs/2008.10949) 526 | * [Unveiling the pole structure of S-matrix using deep learning](https://arxiv.org/abs/2104.14182) 527 | * [Model independent analysis of coupled-channel scattering: a deep learning approach](https://arxiv.org/abs/2105.04898) 528 | * [Optimising simulations for diphoton production at hadron colliders using amplitude neural networks](https://arxiv.org/abs/2106.09474) 529 | * [A factorisation-aware Matrix element emulator](https://arxiv.org/abs/2107.06625) 530 | 531 | * Parameter estimation 532 | 533 | * [Numerical analysis of neutrino physics within a high scale supersymmetry model via machine learning](https://arxiv.org/abs/2006.01495) [[DOI](https://doi.org/10.1142/S0217732320502181)] 534 | * [Parametrized classifiers for optimal EFT sensitivity](https://arxiv.org/abs/2007.10356) 535 | * [MCNNTUNES: tuning Shower Monte Carlo generators with machine learning](https://arxiv.org/abs/2010.02213) [[DOI](https://doi.org/10.1016/j.cpc.2021.107908)] 536 | * [Deep-Learned Event Variables for Collider Phenomenology](https://arxiv.org/abs/2105.10126) 537 | * [Using Machine Learning techniques in phenomenological studies in flavour physics](https://arxiv.org/abs/2109.07405) 538 | 539 | * Parton Distribution Functions (and related) 540 | 541 | * [Neural-network analysis of Parton Distribution Functions from Ioffe-time pseudodistributions](https://arxiv.org/abs/2010.03996) [[DOI](https://doi.org/10.1007/JHEP02(2021)138)] 542 | * [Deep Learning Analysis of Deeply Virtual Exclusive Photoproduction](https://arxiv.org/abs/2012.04801) 543 | * [PDFFlow: hardware accelerating parton density access](https://arxiv.org/abs/2012.08221) [[DOI](https://doi.org/10.5821/zenodo.4286175)] 544 | * [Compressing PDF sets using generative adversarial networks](https://arxiv.org/abs/2104.04535) 545 | * [The Path to Proton Structure at One-Percent Accuracy](https://arxiv.org/abs/2109.02653) 546 | * [An open-source machine learning framework for global analyses of parton distributions](https://arxiv.org/abs/2109.02671) 547 | 548 | * Lattice Gauge Theory 549 | 550 | * [Equivariant flow-based sampling for lattice gauge theory](https://arxiv.org/abs/2003.06413) [[DOI](https://doi.org/10.1103/PhysRevLett.125.121601)] 551 | * [Lattice gauge equivariant convolutional neural networks](https://arxiv.org/abs/2012.12901) 552 | * [Generalization capabilities of translationally equivariant neural networks](https://arxiv.org/abs/2103.14686) 553 | * [Heavy Quark Potential in QGP: DNN meets LQCD](https://arxiv.org/abs/2105.07862) 554 | * [Flow-based sampling for multimodal distributions in lattice field theory](https://arxiv.org/abs/2107.00734) 555 | 556 | * Function Approximation 557 | 558 | * [Elvet -- a neural network-based differential equation and variational problem solver](https://arxiv.org/abs/2103.14575) 559 | * [Invariant polynomials and machine learning](https://arxiv.org/abs/2104.12733) 560 | 561 | * Decorrelation methods. 562 | 563 | * [Learning to Pivot with Adversarial Networks](https://arxiv.org/abs/1611.01046) [[url](https://papers.nips.cc/paper/2017/hash/48ab2f9b45957ab574cf005eb8a76760-Abstract.html)] 564 | * [Thinking outside the ROCs: Designing Decorrelated Taggers (DDT) for jet substructure](https://arxiv.org/abs/1603.00027) [[DOI](https://doi.org/10.1007/JHEP05(2016)156)] 565 | * Convolved Substructure: Analytically Decorrelating Jet Substructure Observables 566 | * [uBoost: A boosting method for producing uniform selection efficiencies from multivariate classifiers](https://arxiv.org/abs/1305.7248) [[DOI](https://doi.org/10.1088/1748-0221/8/12/P12013)] 567 | * [Decorrelated Jet Substructure Tagging using Adversarial Neural Networks](https://arxiv.org/abs/1703.03507) [[DOI](https://doi.org/10.1103/PhysRevD.96.074034)] 568 | * [Mass Agnostic Jet Taggers](https://arxiv.org/abs/1908.08959) [[DOI](https://doi.org/10.21468/SciPostPhys.8.1.011)] 569 | * [Performance of mass-decorrelated jet substructure](http://cds.cern.ch/record/2630973) 570 | * [DisCo Fever: Robust Networks Through Distance Correlation](https://arxiv.org/abs/2001.05310) [[DOI](https://doi.org/10.1103/PhysRevLett.125.122001)] 571 | * [QBDT, a new boosting decision tree method with systematical uncertainties into training for High Energy Physics](https://arxiv.org/abs/1810.08387) [[DOI](https://doi.org/10.1016/j.nima.2019.03.088)] 572 | * [Machine Learning Uncertainties with Adversarial Neural Networks](https://arxiv.org/abs/1807.08763) [[DOI](https://doi.org/10.1140/epjc/s10052-018-6511-8)] 573 | * [Reducing the dependence of the neural network function to systematic uncertainties in the input space](https://arxiv.org/abs/1907.11674) [[DOI](https://doi.org/10.1007/s41781-020-00037-9)] 574 | * [New approaches for boosting to uniformity](https://arxiv.org/abs/1410.4140) [[DOI](https://doi.org/10.1088/1748-0221/10/03/T03002)] 575 | * [A deep neural network to search for new long-lived particles decaying to jets](https://arxiv.org/abs/1912.12238) [[DOI](https://doi.org/10.1088/2632-2153/ab9023)] 576 | * [Adversarial domain adaptation to reduce sample bias of a high energy physics classifier](https://arxiv.org/abs/2005.00568) 577 | * [ABCDisCo: Automating the ABCD Method with Machine Learning](https://arxiv.org/abs/2007.14400) [[DOI](https://doi.org/10.1103/PhysRevD.103.035021)] 578 | * [Enhancing searches for resonances with machine learning and moment decomposition](https://arxiv.org/abs/2010.09745) 579 | * [A Cautionary Tale of Decorrelating Theory Uncertainties](https://arxiv.org/abs/2109.08159) 580 | 581 | * Generative models / density estimation 582 | * GANs: 583 | 584 | * [Learning Particle Physics by Example: Location-Aware Generative Adversarial Networks for Physics Synthesis](https://arxiv.org/abs/1701.05927) [[DOI](https://doi.org/10.1007/s41781-017-0004-6)] 585 | * [Accelerating Science with Generative Adversarial Networks: An Application to 3D Particle Showers in Multilayer Calorimeters](https://arxiv.org/abs/1705.02355) [[DOI](https://doi.org/10.1103/PhysRevLett.120.042003)] 586 | * [CaloGAN : Simulating 3D high energy particle showers in multilayer electromagnetic calorimeters with generative adversarial networks](https://arxiv.org/abs/1712.10321) [[DOI](https://doi.org/10.1103/PhysRevD.97.014021)] 587 | * [Image-based model parameter optimization using Model-Assisted Generative Adversarial Networks](https://arxiv.org/abs/1812.00879) [[DOI](https://doi.org/10.1109/TNNLS.2020.2969327)] 588 | * [How to GAN Event Subtraction](https://arxiv.org/abs/1912.08824) [[DOI](https://doi.org/10.21468/SciPostPhysCore.3.2.009)] 589 | * [Particle Generative Adversarial Networks for full-event simulation at the LHC and their application to pileup description](https://arxiv.org/abs/1912.02748) [[DOI](https://doi.org/10.1088/1742-6596/1525/1/012081)] 590 | * [How to GAN away Detector Effects](https://arxiv.org/abs/1912.00477) [[DOI](https://doi.org/10.21468/SciPostPhys.8.4.070)] 591 | * [3D convolutional GAN for fast simulation](https://doi.org/10.1051/epjconf/201921402010) 592 | * [Fast simulation of muons produced at the SHiP experiment using Generative Adversarial Networks](https://arxiv.org/abs/1909.04451) [[DOI](https://doi.org/10.1088/1748-0221/14/11/P11028)] 593 | * [Lund jet images from generative and cycle-consistent adversarial networks](https://arxiv.org/abs/1909.01359) [[DOI](https://doi.org/10.1140/epjc/s10052-019-7501-1)] 594 | * [How to GAN LHC Events](https://arxiv.org/abs/1907.03764) [[DOI](https://doi.org/10.21468/SciPostPhys.7.6.075)] 595 | * [Machine Learning Templates for QCD Factorization in the Search for Physics Beyond the Standard Model](https://arxiv.org/abs/1903.02556) [[DOI](https://doi.org/10.1007/JHEP05(2019)181)] 596 | * [DijetGAN: A Generative-Adversarial Network Approach for the Simulation of QCD Dijet Events at the LHC](https://arxiv.org/abs/1903.02433) [[DOI](https://doi.org/10.1007/JHEP08(2019)110)] 597 | * [LHC analysis-specific datasets with Generative Adversarial Networks](https://arxiv.org/abs/1901.05282) 598 | * [Generative Models for Fast Calorimeter Simulation.LHCb case](https://arxiv.org/abs/1812.01319) [[DOI](https://doi.org/10.1051/epjconf/201921402034)] 599 | * [Deep generative models for fast shower simulation in ATLAS](http://cds.cern.ch/record/2630433) 600 | * [Regressive and generative neural networks for scalar field theory](https://arxiv.org/abs/1810.12879) [[DOI](https://doi.org/10.1103/PhysRevD.100.011501)] 601 | * [Three dimensional Generative Adversarial Networks for fast simulation](https://doi.org/10.1088/1742-6596/1085/3/032016) 602 | * [Generative models for fast simulation](https://doi.org/10.1088/1742-6596/1085/2/022005) 603 | * [Unfolding with Generative Adversarial Networks](https://arxiv.org/abs/1806.00433) 604 | * [Fast and Accurate Simulation of Particle Detectors Using Generative Adversarial Networks](https://arxiv.org/abs/1805.00850) [[DOI](https://doi.org/10.1007/s41781-018-0015-y)] 605 | * [Generating and refining particle detector simulations using the Wasserstein distance in adversarial networks](https://arxiv.org/abs/1802.03325) [[DOI](https://doi.org/10.1007/s41781-018-0008-x)] 606 | * [Generative models for fast cluster simulations in the TPC for the ALICE experiment](https://doi.org/10.1051/epjconf/201921406003) 607 | * [RICH 2018](https://arxiv.org/abs/1903.11788) [[DOI](https://doi.org/10.1016/j.nima.2019.01.031)] 608 | * [GANs for generating EFT models](https://arxiv.org/abs/1809.02612) [[DOI](https://doi.org/10.1016/j.physletb.2020.135798)] 609 | * [Precise simulation of electromagnetic calorimeter showers using a Wasserstein Generative Adversarial Network](https://arxiv.org/abs/1807.01954) [[DOI](https://doi.org/10.1007/s41781-018-0019-7)] 610 | * [Reducing Autocorrelation Times in Lattice Simulations with Generative Adversarial Networks](https://arxiv.org/abs/1811.03533) [[DOI](https://doi.org/10.1088/2632-2153/abae73)] 611 | * [Tips and Tricks for Training GANs with Physics Constraints](https://dl4physicalsciences.github.io/files/nips_dlps_2017_26.pdf}) 612 | * [Controlling Physical Attributes in GAN-Accelerated Simulation of Electromagnetic Calorimeters](https://arxiv.org/abs/1711.08813) [[DOI](https://doi.org/10.1088/1742-6596/1085/4/042017)] 613 | * [Next Generation Generative Neural Networks for HEP](https://doi.org/10.1051/epjconf/201921409005) 614 | * [Calorimetry with Deep Learning: Particle Classification, Energy Regression, and Simulation for High-Energy Physics](https://dl4physicalsciences.github.io/files/nips_dlps_2017_15.pdf}) 615 | * [Calorimetry with Deep Learning: Particle Simulation and Reconstruction for Collider Physics](https://arxiv.org/abs/1912.06794) [[DOI](https://doi.org/10.1140/epjc/s10052-020-8251-9)] 616 | * [Getting High: High Fidelity Simulation of High Granularity Calorimeters with High Speed](https://arxiv.org/abs/2005.05334) 617 | * [AI-based Monte Carlo event generator for electron-proton scattering](https://arxiv.org/abs/2008.03151) 618 | * [DCTRGAN: Improving the Precision of Generative Models with Reweighting](https://arxiv.org/abs/2009.03796) [[DOI](https://doi.org/{10.1088/1748-0221/15/11/p11004)] 619 | * [GANplifying Event Samples](https://arxiv.org/abs/2008.06545) 620 | * [Graph Generative Adversarial Networks for Sparse Data Generation in High Energy Physics](https://arxiv.org/abs/2012.00173) 621 | * [Simulating the Time Projection Chamber responses at the MPD detector using Generative Adversarial Networks](https://arxiv.org/abs/2012.04595) 622 | * [Explainable machine learning of the underlying physics of high-energy particle collisions](https://arxiv.org/abs/2012.06582) 623 | * [A Data-driven Event Generator for Hadron Colliders using Wasserstein Generative Adversarial Network](https://arxiv.org/abs/2102.11524) [[DOI](https://doi.org/10.1007/s40042-021-00095-1)] 624 | * [Reduced Precision Strategies for Deep Learning: A High Energy Physics Generative Adversarial Network Use Case](https://arxiv.org/abs/2103.10142) [[DOI](https://doi.org/10.5220/0010245002510258)] 625 | * [Validation of Deep Convolutional Generative Adversarial Networks for High Energy Physics Calorimeter Simulations](https://arxiv.org/abs/2103.13698) 626 | * [Compressing PDF sets using generative adversarial networks](https://arxiv.org/abs/2104.04535) 627 | * [Physics Validation of Novel Convolutional 2D Architectures for Speeding Up High Energy Physics Simulations](https://arxiv.org/abs/2105.08960) 628 | * [The use of Generative Adversarial Networks to characterise new physics in multi-lepton final states at the LHC](https://arxiv.org/abs/2105.14933) 629 | * [Latent Space Refinement for Deep Generative Models](https://arxiv.org/abs/2106.00792) 630 | * [Particle Cloud Generation with Message Passing Generative Adversarial Networks](https://arxiv.org/abs/2106.11535) 631 | * [Black-Box Optimization with Local Generative Surrogates](https://arxiv.org/abs/2002.04632) [[url](https://proceedings.neurips.cc/paper/2020/hash/a878dbebc902328b41dbf02aa87abb58-Abstract.html)] 632 | * [Fast Simulation of a High Granularity Calorimeter by Generative Adversarial Networks](https://arxiv.org/abs/2109.07388) 633 | * [Photon detection probability prediction using one-dimensional generative neural network](https://arxiv.org/abs/2109.07277) 634 | 635 | * Autoencoders 636 | 637 | * [Deep Learning as a Parton Shower](https://arxiv.org/abs/1807.03685) 638 | * [Deep generative models for fast shower simulation in ATLAS](http://cds.cern.ch/record/2630433) 639 | * [Variational Autoencoders for Anomalous Jet Tagging](https://arxiv.org/abs/2007.01850) 640 | * [Variational Autoencoders for Jet Simulation](https://arxiv.org/abs/2009.04842) 641 | * [Foundations of a Fast, Data-Driven, Machine-Learned Simulator](https://arxiv.org/abs/2101.08944) 642 | * [Decoding Photons: Physics in the Latent Space of a BIB-AE Generative Network](https://arxiv.org/abs/2102.12491) 643 | * [Bump Hunting in Latent Space](https://arxiv.org/abs/2103.06595) 644 | * [{End-to-end Sinkhorn Autoencoder with Noise Generator](https://arxiv.org/abs/2006.06704) 645 | * [Graph Generative Models for Fast Detector Simulations in High Energy Physics](https://arxiv.org/abs/2104.01725) 646 | * [DeepRICH: Learning Deeply Cherenkov Detectors](https://arxiv.org/abs/1911.11717) [[DOI](https://doi.org/10.1088/2632-2153/ab845a)] 647 | 648 | * Normalizing flows 649 | 650 | * [Flow-based generative models for Markov chain Monte Carlo in lattice field theory](https://arxiv.org/abs/1904.12072) [[DOI](https://doi.org/10.1103/PhysRevD.100.034515)] 651 | * [Equivariant flow-based sampling for lattice gauge theory](https://arxiv.org/abs/2003.06413) [[DOI](https://doi.org/10.1103/PhysRevLett.125.121601)] 652 | * [Flows for simultaneous manifold learning and density estimation](https://arxiv.org/abs/2003.13913) 653 | * [Exploring phase space with Neural Importance Sampling](https://arxiv.org/abs/2001.05478) [[DOI](https://doi.org/10.21468/SciPostPhys.8.4.069)] 654 | * [Event Generation with Normalizing Flows](https://arxiv.org/abs/2001.10028) [[DOI](https://doi.org/10.1103/PhysRevD.101.076002)] 655 | * [i-flow: High-Dimensional Integration and Sampling with Normalizing Flows](https://arxiv.org/abs/2001.05486) [[DOI](https://doi.org/10.1088/2632-2153/abab62)] 656 | * [Anomaly Detection with Density Estimation](https://arxiv.org/abs/2001.04990) [[DOI](https://doi.org/10.1103/PhysRevD.101.075042)] 657 | * [Data-driven Estimation of Background Distribution through Neural Autoregressive Flows](https://arxiv.org/abs/2008.03636) 658 | * [SARM: Sparse Autoregressive Model for Scalable Generation of Sparse Images in Particle Physics](https://arxiv.org/abs/2009.14017) [[DOI](https://doi.org/10.1103/PhysRevD.103.036012)] 659 | * [Measuring QCD Splittings with Invertible Networks](https://arxiv.org/abs/2012.09873) 660 | * [Efficient sampling of constrained high-dimensional theoretical spaces with machine learning](https://arxiv.org/abs/2103.06957) 661 | * [Latent Space Refinement for Deep Generative Models](https://arxiv.org/abs/2106.00792) 662 | * [CaloFlow: Fast and Accurate Generation of Calorimeter Showers with Normalizing Flows](https://arxiv.org/abs/2106.05285) 663 | * [Flow-based sampling for multimodal distributions in lattice field theory](https://arxiv.org/abs/2107.00734) 664 | * [Learning to discover: expressive Gaussian mixture models for multi-dimensional simulation and parameter inference in the physical sciences](https://arxiv.org/abs/2108.11481) 665 | * [Classifying Anomalies THrough Outer Density Estimation (CATHODE)](https://arxiv.org/abs/2109.00546) 666 | * [Black-Box Optimization with Local Generative Surrogates](https://arxiv.org/abs/2002.04632) [[url](https://proceedings.neurips.cc/paper/2020/hash/a878dbebc902328b41dbf02aa87abb58-Abstract.html)] 667 | * [Neural Empirical Bayes: Source Distribution Estimation and its Applications to Simulation-Based Inference](https://arxiv.org/abs/2011.05836) [[url](https://proceedings.mlr.press/v130/vandegar21a.html)] 668 | 669 | * Physics-inspired 670 | 671 | * [JUNIPR: a Framework for Unsupervised Machine Learning in Particle Physics](https://arxiv.org/abs/1804.09720) 672 | * [Binary JUNIPR: an interpretable probabilistic model for discrimination](https://arxiv.org/abs/1906.10137) [[DOI](https://doi.org/10.1103/PhysRevLett.123.182001)] 673 | * [Exploring the Possibility of a Recovery of Physics Process Properties from a Neural Network Model](https://arxiv.org/abs/2007.13110) [[DOI](https://doi.org/10.3390/e22090994)] 674 | * [Explainable machine learning of the underlying physics of high-energy particle collisions](https://arxiv.org/abs/2012.06582) 675 | * [Symmetry meets AI](https://arxiv.org/abs/2103.06115) 676 | 677 | * Mixture Models 678 | 679 | * [Data Augmentation at the LHC through Analysis-specific Fast Simulation with Deep Learning](https://arxiv.org/abs/2010.01835) 680 | * [Mixture Density Network Estimation of Continuous Variable Maximum Likelihood Using Discrete Training Samples](https://arxiv.org/abs/2103.13416) 681 | 682 | * Phase space generation 683 | 684 | * [Efficient Monte Carlo Integration Using Boosted Decision](https://arxiv.org/abs/1707.00028) 685 | * [Exploring phase space with Neural Importance Sampling](https://arxiv.org/abs/2001.05478) [[DOI](https://doi.org/10.21468/SciPostPhys.8.4.069)] 686 | * [Event Generation with Normalizing Flows](https://arxiv.org/abs/2001.10028) [[DOI](https://doi.org/10.1103/PhysRevD.101.076002)] 687 | * [i-flow: High-Dimensional Integration and Sampling with Normalizing Flows](https://arxiv.org/abs/2001.05486) [[DOI](https://doi.org/10.1088/2632-2153/abab62)] 688 | * [Neural Network-Based Approach to Phase Space Integration](https://arxiv.org/abs/1810.11509) [[DOI](https://doi.org/10.21468/SciPostPhys.9.4.053)] 689 | * [VegasFlow: accelerating Monte Carlo simulation across multiple hardware platforms](https://arxiv.org/abs/2002.12921) [[DOI](https://doi.org/10.1016/j.cpc.2020.107376)] 690 | * [A Neural Resampler for Monte Carlo Reweighting with Preserved Uncertainties](https://arxiv.org/abs/2007.11586) [[DOI](https://doi.org/10.1103/PhysRevD.102.076004)] 691 | * [Improved Neural Network Monte Carlo Simulation](https://arxiv.org/abs/2009.07819) [[DOI](https://doi.org/10.21468/SciPostPhys.10.1.023)] 692 | * [Phase Space Sampling and Inference from Weighted Events with Autoregressive Flows](https://arxiv.org/abs/2011.13445) [[DOI](https://doi.org/10.21468/SciPostPhys.10.2.038)] 693 | * [How to GAN Event Unweighting](https://arxiv.org/abs/2012.07873) 694 | 695 | * Gaussian processes 696 | 697 | * [Modeling Smooth Backgrounds and Generic Localized Signals with Gaussian Processes](https://arxiv.org/abs/1709.05681) 698 | * [Accelerating the BSM interpretation of LHC data with machine learning](https://arxiv.org/abs/1611.02704) [[DOI](https://doi.org/10.1016/j.dark.2019.100293)] 699 | * [$\textsf{Xsec}$: the cross-section evaluation code](https://arxiv.org/abs/2006.16273) [[DOI](https://doi.org/10.1140/epjc/s10052-020-08635-y)] 700 | * [AI-optimized detector design for the future Electron-Ion Collider: the dual-radiator RICH case](https://arxiv.org/abs/1911.05797) [[DOI](https://doi.org/10.1088/1748-0221/15/05/P05009)] 701 | 702 | * Anomaly detection. 703 | 704 | * [Learning New Physics from a Machine](https://arxiv.org/abs/1806.02350) [[DOI](https://doi.org/10.1103/PhysRevD.99.015014)] 705 | * [Anomaly Detection for Resonant New Physics with Machine Learning](https://arxiv.org/abs/1805.02664) [[DOI](https://doi.org/10.1103/PhysRevLett.121.241803)] 706 | * [Extending the search for new resonances with machine learning](https://arxiv.org/abs/1902.02634) [[DOI](https://doi.org/10.1103/PhysRevD.99.014038)] 707 | * [Learning Multivariate New Physics](https://arxiv.org/abs/1912.12155) [[DOI](https://doi.org/10.1140/epjc/s10052-021-08853-y)] 708 | * [Searching for New Physics with Deep Autoencoders](https://arxiv.org/abs/1808.08992) [[DOI](https://doi.org/10.1103/PhysRevD.101.075021)] 709 | * [QCD or What?](https://arxiv.org/abs/1808.08979) [[DOI](https://doi.org/10.21468/SciPostPhys.6.3.030)] 710 | * [A robust anomaly finder based on autoencoder](https://arxiv.org/abs/1903.02032) 711 | * [Variational Autoencoders for New Physics Mining at the Large Hadron Collider](https://arxiv.org/abs/1811.10276) [[DOI](https://doi.org/10.1007/JHEP05(2019)036)] 712 | * [Adversarially-trained autoencoders for robust unsupervised new physics searches](https://arxiv.org/abs/1905.10384) [[DOI](https://doi.org/10.1007/JHEP10(2019)047)] 713 | * [Novelty Detection Meets Collider Physics](https://arxiv.org/abs/1807.10261) [[DOI](https://doi.org/10.1103/PhysRevD.101.076015)] 714 | * [Guiding New Physics Searches with Unsupervised Learning](https://arxiv.org/abs/1807.06038) [[DOI](https://doi.org/10.1140/epjc/s10052-019-6787-3)] 715 | * [Does SUSY have friends? A new approach for LHC event analysis](https://arxiv.org/abs/1912.10625) [[DOI](https://doi.org/10.1007/JHEP02(2021)160)] 716 | * [Nonparametric semisupervised classification for signal detection in high energy physics](https://arxiv.org/abs/1809.02977) 717 | * [Uncovering latent jet substructure](https://arxiv.org/abs/1904.04200) [[DOI](https://doi.org/10.1103/PhysRevD.100.056002)] 718 | * [Simulation Assisted Likelihood-free Anomaly Detection](https://arxiv.org/abs/2001.05001) [[DOI](https://doi.org/10.1103/PhysRevD.101.095004)] 719 | * [Anomaly Detection with Density Estimation](https://arxiv.org/abs/2001.04990) [[DOI](https://doi.org/10.1103/PhysRevD.101.075042)] 720 | * [A generic anti-QCD jet tagger](https://arxiv.org/abs/1709.01087) [[DOI](https://doi.org/10.1007/JHEP11(2017)163)] 721 | * [Transferability of Deep Learning Models in Searches for New Physics at Colliders](https://arxiv.org/abs/1912.04220) [[DOI](https://doi.org/10.1103/PhysRevD.101.035042)] 722 | * [Use of a Generalized Energy Mover's Distance in the Search for Rare Phenomena at Colliders](https://arxiv.org/abs/2004.09360) [[DOI](https://doi.org/10.1140/epjc/s10052-021-08891-6)] 723 | * [Adversarially Learned Anomaly Detection on CMS Open Data: re-discovering the top quark](https://arxiv.org/abs/2005.01598) [[DOI](https://doi.org/10.1140/epjp/s13360-021-01109-4)] 724 | * [Dijet resonance search with weak supervision using 13 TeV pp collisions in the ATLAS detector](https://arxiv.org/abs/2005.02983) [[DOI](https://doi.org/10.1103/PhysRevLett.125.131801)] 725 | * [Learning the latent structure of collider events](https://arxiv.org/abs/2005.12319) [[DOI](https://doi.org/10.1007/JHEP10(2020)206)] 726 | * [Finding New Physics without learning about it: Anomaly Detection as a tool for Searches at Colliders](https://arxiv.org/abs/2006.05432) [[DOI](https://doi.org/10.1140/epjc/s10052-020-08807-w)] 727 | * [Tag N' Train: A Technique to Train Improved Classifiers on Unlabeled Data](https://arxiv.org/abs/2002.12376) [[DOI](https://doi.org/10.1007/JHEP01(2021)153)] 728 | * [Variational Autoencoders for Anomalous Jet Tagging](https://arxiv.org/abs/2007.01850) 729 | * [Anomaly Awareness](https://arxiv.org/abs/2007.14462) 730 | * [Unsupervised Outlier Detection in Heavy-Ion Collisions](https://arxiv.org/abs/2007.15830) 731 | * [Decoding Dark Matter Substructure without Supervision](https://arxiv.org/abs/2008.12731) 732 | * [Mass Unspecific Supervised Tagging (MUST) for boosted jets](https://arxiv.org/abs/2008.12792) [[DOI](https://doi.org/10.1007/JHEP03(2021)012)] 733 | * [Simulation-Assisted Decorrelation for Resonant Anomaly Detection](https://arxiv.org/abs/2009.02205) 734 | * [Anomaly Detection With Conditional Variational Autoencoders](https://arxiv.org/abs/2010.05531) 735 | * [Unsupervised clustering for collider physics](https://arxiv.org/abs/2010.07106) 736 | * [Combining outlier analysis algorithms to identify new physics at the LHC](https://arxiv.org/abs/2010.07940) 737 | * [Quasi Anomalous Knowledge: Searching for new physics with embedded knowledge](https://arxiv.org/abs/2011.03550) 738 | * [Uncovering hidden patterns in collider events with Bayesian probabilistic models](https://arxiv.org/abs/2012.08579) 739 | * [Unsupervised in-distribution anomaly detection of new physics through conditional density estimation](https://arxiv.org/abs/2012.11638) 740 | * [The LHC Olympics 2020: A Community Challenge for Anomaly Detection in High Energy Physics](https://arxiv.org/abs/2101.08320) 741 | * [Model-Independent Detection of New Physics Signals Using Interpretable Semi-Supervised Classifier Tests](https://arxiv.org/abs/2102.07679) 742 | * [Topological Obstructions to Autoencoding](https://arxiv.org/abs/2102.08380) 743 | * [Unsupervised Event Classification with Graphs on Classical and Photonic Quantum Computers](https://arxiv.org/abs/2103.03897) 744 | * [Bump Hunting in Latent Space](https://arxiv.org/abs/2103.06595) 745 | * [Comparing Weak- and Unsupervised Methods for Resonant Anomaly Detection](https://arxiv.org/abs/2104.02092) 746 | * [Better Latent Spaces for Better Autoencoders](https://arxiv.org/abs/2104.08291) 747 | * [Autoencoders for unsupervised anomaly detection in high energy physics](https://arxiv.org/abs/2104.09051) 748 | * [Via Machinae: Searching for Stellar Streams using Unsupervised Machine Learning](https://arxiv.org/abs/2104.12789) 749 | * [Anomaly detection with Convolutional Graph Neural Networks](https://arxiv.org/abs/2105.07988) 750 | * [Anomalous Jet Identification via Sequence Modeling](https://arxiv.org/abs/2105.09274) 751 | * [The Dark Machines Anomaly Score Challenge: Benchmark Data and Model Independent Event Classification for the Large Hadron Collider](https://arxiv.org/abs/2105.14027) 752 | * [RanBox: Anomaly Detection in the Copula Space](https://arxiv.org/abs/2106.05747) 753 | * [Rare and Different: Anomaly Scores from a combination of likelihood and out-of-distribution models to detect new physics at the LHC](https://arxiv.org/abs/2106.10164) 754 | * [LHC physics dataset for unsupervised New Physics detection at 40 MHz](https://arxiv.org/abs/2107.02157) 755 | * [New Methods and Datasets for Group Anomaly Detection From Fundamental Physics](https://arxiv.org/abs/2107.02821) 756 | * [The Data-Directed Paradigm for BSM searches](https://arxiv.org/abs/2107.11573) 757 | * [Autoencoders on FPGAs for real-time, unsupervised new physics detection at 40 MHz at the Large Hadron Collider](https://arxiv.org/abs/2108.03986) 758 | * [Classifying Anomalies THrough Outer Density Estimation (CATHODE)](https://arxiv.org/abs/2109.00546) 759 | * [Deep Set Auto Encoders for Anomaly Detection in Particle Physics](https://arxiv.org/abs/2109.01695) 760 | 761 | * Simulation-based (`likelihood-free') Inference 762 | * Parameter estimation 763 | 764 | * [Neural Networks for Full Phase-space Reweighting and Parameter Tuning](https://arxiv.org/abs/1907.08209) [[DOI](https://doi.org/10.1103/PhysRevD.101.091901)] 765 | * [Likelihood-free inference with an improved cross-entropy estimator](https://arxiv.org/abs/1808.00973) 766 | * [Resonance Searches with Machine Learned Likelihood Ratios](https://arxiv.org/abs/2002.04699) 767 | * [Constraining Effective Field Theories with Machine Learning](https://arxiv.org/abs/1805.00013) [[DOI](https://doi.org/10.1103/PhysRevLett.121.111801)] 768 | * [A Guide to Constraining Effective Field Theories with Machine Learning](https://arxiv.org/abs/1805.00020) [[DOI](https://doi.org/10.1103/PhysRevD.98.052004)] 769 | * [MadMiner: Machine learning-based inference for particle physics](https://arxiv.org/abs/1907.10621) [[DOI](https://doi.org/10.1007/s41781-020-0035-2)] 770 | * [Mining gold from implicit models to improve likelihood-free inference](https://arxiv.org/abs/1805.12244) [[DOI](https://doi.org/10.1073/pnas.1915980117)] 771 | * [Approximating Likelihood Ratios with Calibrated Discriminative Classifiers](https://arxiv.org/abs/1506.02169) 772 | * [Parameter Estimation using Neural Networks in the Presence of Detector Effects](https://arxiv.org/abs/2010.03569) [[DOI](https://doi.org/10.1103/PhysRevD.103.036001)] 773 | * [Targeted Likelihood-Free Inference of Dark Matter Substructure in Strongly-Lensed Galaxies](https://arxiv.org/abs/2010.07032) 774 | * [Parameter Inference from Event Ensembles and the Top-Quark Mass](https://arxiv.org/abs/2011.04666) 775 | * [Measuring QCD Splittings with Invertible Networks](https://arxiv.org/abs/2012.09873) 776 | * [E Pluribus Unum Ex Machina: Learning from Many Collider Events at Once](https://arxiv.org/abs/2101.07263) 777 | * [Tree boosting for learning EFT parameters](https://arxiv.org/abs/2107.10859) 778 | * [Black-Box Optimization with Local Generative Surrogates](https://arxiv.org/abs/2002.04632) [[url](https://proceedings.neurips.cc/paper/2020/hash/a878dbebc902328b41dbf02aa87abb58-Abstract.html)] 779 | 780 | * Unfolding 781 | 782 | * [OmniFold: A Method to Simultaneously Unfold All Observables](https://arxiv.org/abs/1911.09107) [[DOI](https://doi.org/10.1103/PhysRevLett.124.182001)] 783 | * [Unfolding with Generative Adversarial Networks](https://arxiv.org/abs/1806.00433) 784 | * [How to GAN away Detector Effects](https://arxiv.org/abs/1912.00477) [[DOI](https://doi.org/10.21468/SciPostPhys.8.4.070)] 785 | * [Machine learning approach to inverse problem and unfolding procedure](https://arxiv.org/abs/1004.2006) 786 | * [Machine learning as an instrument for data unfolding](https://arxiv.org/abs/1712.01814) 787 | * [Advanced event reweighting using multivariate analysis](https://doi.org/10.1088/1742-6596/368/1/012028) 788 | * [Unfolding by weighting Monte Carlo events](https://doi.org/10.1016/0168-9002(94)01067-6) 789 | * Binning-Free Unfolding Based on Monte Carlo Migration 790 | * [Invertible Networks or Partons to Detector and Back Again](https://arxiv.org/abs/2006.06685) [[DOI](https://doi.org/10.21468/SciPostPhys.9.5.074)] 791 | * [Neural Empirical Bayes: Source Distribution Estimation and its Applications to Simulation-Based Inference](https://arxiv.org/abs/2011.05836) [[url](https://proceedings.mlr.press/v130/vandegar21a.html)] 792 | * [Foundations of a Fast, Data-Driven, Machine-Learned Simulator](https://arxiv.org/abs/2101.08944) 793 | * [Comparison of Machine Learning Approach to other Unfolding Methods](https://arxiv.org/abs/2104.03036) 794 | * [Scaffolding Simulations with Deep Learning for High-dimensional Deconvolution](https://arxiv.org/abs/2105.04448) 795 | * [Preserving New Physics while Simultaneously Unfolding All Observables](https://arxiv.org/abs/2105.09923) 796 | * [Measurement of lepton-jet correlation in deep-inelastic scattering with the H1 detector using machine learning for unfolding](https://arxiv.org/abs/2108.12376) 797 | 798 | * Domain adaptation 799 | 800 | * [Reweighting with Boosted Decision Trees](https://arxiv.org/abs/1608.05806) [[DOI](https://doi.org/10.1088/1742-6596/762/1/012036)] 801 | * [Neural Networks for Full Phase-space Reweighting and Parameter Tuning](https://arxiv.org/abs/1907.08209) [[DOI](https://doi.org/10.1103/PhysRevD.101.091901)] 802 | * [Approximating Likelihood Ratios with Calibrated Discriminative Classifiers](https://arxiv.org/abs/1506.02169) 803 | * [DCTRGAN: Improving the Precision of Generative Models with Reweighting](https://arxiv.org/abs/2009.03796) [[DOI](https://doi.org/{10.1088/1748-0221/15/11/p11004)] 804 | * [Neural Conditional Reweighting](https://arxiv.org/abs/2107.08979) 805 | 806 | * BSM 807 | 808 | * [Simulation Assisted Likelihood-free Anomaly Detection](https://arxiv.org/abs/2001.05001) [[DOI](https://doi.org/10.1103/PhysRevD.101.095004)] 809 | * [Resonance Searches with Machine Learned Likelihood Ratios](https://arxiv.org/abs/2002.04699) 810 | * [Constraining Effective Field Theories with Machine Learning](https://arxiv.org/abs/1805.00013) [[DOI](https://doi.org/10.1103/PhysRevLett.121.111801)] 811 | * [A Guide to Constraining Effective Field Theories with Machine Learning](https://arxiv.org/abs/1805.00020) [[DOI](https://doi.org/10.1103/PhysRevD.98.052004)] 812 | * [Mining gold from implicit models to improve likelihood-free inference](https://arxiv.org/abs/1805.12244) [[DOI](https://doi.org/10.1073/pnas.1915980117)] 813 | * [MadMiner: Machine learning-based inference for particle physics](https://arxiv.org/abs/1907.10621) [[DOI](https://doi.org/10.1007/s41781-020-0035-2)] 814 | * [Use of a Generalized Energy Mover's Distance in the Search for Rare Phenomena at Colliders](https://arxiv.org/abs/2004.09360) [[DOI](https://doi.org/10.1140/epjc/s10052-021-08891-6)] 815 | 816 | * Uncertainty Quantification 817 | * Interpretability 818 | 819 | * [Jet-images — deep learning edition](https://arxiv.org/abs/1511.05190) [[DOI](https://doi.org/10.1007/JHEP07(2016)069)] 820 | * [What is the Machine Learning?](https://arxiv.org/abs/1709.10106) [[DOI](https://doi.org/10.1103/PhysRevD.97.056009)] 821 | * [CapsNets Continuing the Convolutional Quest](https://arxiv.org/abs/1906.11265) [[DOI](https://doi.org/10.21468/SciPostPhys.8.2.023)] 822 | * [Explainable AI for ML jet taggers using expert variables and layerwise relevance propagation](https://arxiv.org/abs/2011.13466) 823 | * [Resurrecting $b\bar{b}h$ with kinematic shapes](https://arxiv.org/abs/2011.13945) 824 | * [Safety of Quark/Gluon Jet Classification](https://arxiv.org/abs/2103.09103) 825 | 826 | * Estimation 827 | 828 | * [A guide for deploying Deep Learning in LHC searches: How to achieve optimality and account for uncertainty](https://arxiv.org/abs/1909.03081) [[DOI](https://doi.org/10.21468/SciPostPhys.8.6.090)] 829 | * [AI Safety for High Energy Physics](https://arxiv.org/abs/1910.08606) 830 | * [Parton Shower Uncertainties in Jet Substructure Analyses with Deep Neural Networks](https://arxiv.org/abs/1609.00607) [[DOI](https://doi.org/10.1103/PhysRevD.95.014018)] 831 | * [Understanding Event-Generation Networks via Uncertainties](https://arxiv.org/abs/2104.04543) 832 | 833 | * Mitigation 834 | 835 | * [Adversarial learning to eliminate systematic errors: a case study in High Energy Physics](https://dl4physicalsciences.github.io/files/nips_dlps_2017_1.pdf}) 836 | * [Machine Learning Uncertainties with Adversarial Neural Networks](https://arxiv.org/abs/1807.08763) [[DOI](https://doi.org/10.1140/epjc/s10052-018-6511-8)] 837 | * [Learning to Pivot with Adversarial Networks](https://arxiv.org/abs/1611.01046) [[url](https://papers.nips.cc/paper/2017/hash/48ab2f9b45957ab574cf005eb8a76760-Abstract.html)] 838 | * [Combine and Conquer: Event Reconstruction with Bayesian Ensemble Neural Networks](https://arxiv.org/abs/2102.01078) 839 | 840 | * Uncertainty-aware inference 841 | 842 | * [Constraining the Parameters of High-Dimensional Models with Active Learning](https://arxiv.org/abs/1905.08628) [[DOI](https://doi.org/10.1140/epjc/s10052-019-7437-5)] 843 | * [Deep-Learning Jets with Uncertainties and More](https://arxiv.org/abs/1904.10004) [[DOI](https://doi.org/10.21468/SciPostPhys.8.1.006)] 844 | * [INFERNO: Inference-Aware Neural Optimisation](https://arxiv.org/abs/1806.04743) [[DOI](https://doi.org/10.1016/j.cpc.2019.06.007)] 845 | * [Optimal statistical inference in the presence of systematic uncertainties using neural network optimization based on binned Poisson likelihoods with nuisance parameters](https://arxiv.org/abs/2003.07186) [[DOI](https://doi.org/10.1007/s41781-020-00049-5)] 846 | * [Uncertainty Aware Learning for High Energy Physics](https://arxiv.org/abs/2105.08742) 847 | 848 | * Experimental results. *This section is incomplete as there are many results that directly and indirectly (e.g. via flavor tagging) use modern machine learning techniques. We will try to highlight experimental results that use deep learning in a critical way for the final analysis sensitivity.* 849 | 850 | * Final analysis discriminate for searches 851 | 852 | * [Search for non-resonant Higgs boson pair production in the $bb\ell\nu\ell\nu$ final state with the ATLAS detector in $pp$ collisions at $\sqrt{s}](https://arxiv.org/abs/1908.06765) [[DOI](https://doi.org/10.1016/j.physletb.2019.135145)] 853 | * [Search for Higgs boson decays into a $Z$ boson and a light hadronically decaying resonance using 13 TeV $pp$ collision data from the ATLAS detector](https://arxiv.org/abs/2004.01678) [[DOI](https://doi.org/10.1103/PhysRevLett.125.221802)] 854 | * [Dijet resonance search with weak supervision using 13 TeV pp collisions in the ATLAS detector](https://arxiv.org/abs/2005.02983) [[DOI](https://doi.org/10.1103/PhysRevLett.125.131801)] 855 | * [Inclusive search for highly boosted Higgs bosons decaying to bottom quark-antiquark pairs in proton-proton collisions at $\sqrt{s}](https://arxiv.org/abs/2006.13251) [[DOI](https://doi.org/10.1007/JHEP12(2020)085)] 856 | 857 | * Measurements using deep learning directly (not through object reconstruction) 858 | 859 | * [Measurement of lepton-jet correlation in deep-inelastic scattering with the H1 detector using machine learning for unfolding](https://arxiv.org/abs/2108.12376) 860 | 861 | --------------------------------------------------------------------------------