├── rrlogo2.png ├── rrlogo2tr.png ├── .github └── workflows │ └── docs-check.yml ├── src ├── requirements.txt └── doi2md.py ├── .mlc_config.json ├── contributing.md └── code-of-conduct.md /rrlogo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leipzig/awesome-reproducible-research/HEAD/rrlogo2.png -------------------------------------------------------------------------------- /rrlogo2tr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leipzig/awesome-reproducible-research/HEAD/rrlogo2tr.png -------------------------------------------------------------------------------- /.github/workflows/docs-check.yml: -------------------------------------------------------------------------------- 1 | name: Check Markdown Links 2 | on: [push, pull_request] 3 | jobs: 4 | markdown-link-check: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - uses: gaurav-nelson/github-action-markdown-link-check@v1 9 | with: 10 | config-file: '.mlc_config.json' 11 | -------------------------------------------------------------------------------- /src/requirements.txt: -------------------------------------------------------------------------------- 1 | arxiv==2.1.3 2 | backoff==2.2.1 3 | beautifulsoup4==4.12.3 4 | bibtexparser==1.4.3 5 | certifi==2024.7.4 6 | charset-normalizer==3.3.2 7 | crossref==0.1.2 8 | crossrefapi==1.5.0 9 | feedparser==6.0.11 10 | idna==3.7 11 | numpy==1.26.4 12 | pandas==2.2.3 13 | pyparsing==3.1.2 14 | python-dateutil==2.9.0.post0 15 | pytz==2024.1 16 | pyzenodo3==1.0.2 17 | requests==2.32.4 18 | sgmllib3k==1.0.0 19 | six==1.16.0 20 | soupsieve==2.5 21 | tzdata==2024.1 22 | urllib3==2.5.0 23 | -------------------------------------------------------------------------------- /.mlc_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "aliveStatusCodes": [200, 429, 0, 202, 406], 3 | "ignorePatterns": [ 4 | { "pattern": "^https://datacite.org" }, 5 | { "pattern": "^https://bioportal.bioontology.org" }, 6 | { "pattern": "^https://openmkt.org" }, 7 | { "pattern": "^https://figshare.com" }, 8 | { "pattern": "^https://zenodo.org" }, 9 | { "pattern": "^https://www.ncbi.nlm.nih.gov" }, 10 | { "pattern": "^https://www.ebi.ac.uk" }, 11 | { "pattern": "\\[.*\\]\\(#.*\\)" }, 12 | { "pattern": "^https://midas\\.umich\\.edu" } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /contributing.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Please note that this project is released with a 4 | [Contributor Code of Conduct](code-of-conduct.md). By participating in this 5 | project you agree to abide by its terms. 6 | 7 | --- 8 | 9 | Ensure your pull request adheres to the following guidelines: 10 | 11 | - Content must be awesome 12 | - Content must be described as to its relevance to RR 13 | - No NSS ("No Shit, Sherlock") Content. Content must not be so obvious that anyone who came to look at this page would not already know about its existence (e.g. Git, Github, R, RStudio, Jupyter). Point neophytes to the educational materials. 14 | 15 | Thank you for your suggestions! 16 | -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at leipzig@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /src/doi2md.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from crossref.restful import Works 4 | import arxiv 5 | import argparse 6 | import datetime 7 | import html 8 | import requests 9 | import re 10 | import sys 11 | from requests.exceptions import HTTPError 12 | import bibtexparser 13 | from bs4 import BeautifulSoup 14 | 15 | 16 | 17 | # { 18 | # 'indexed': { 19 | # 'date-parts': [ 20 | # [2020, 2, 26] 21 | # ], 22 | # 'date-time': '2020-02-26T18:23:23Z', 23 | # 'timestamp': 1582741403836 24 | # }, 25 | # 'reference-count': 0, 26 | # 'publisher': 'BMJ', 27 | # 'issue': '7659', 28 | # 'license': [{ 29 | # 'URL': 'http://www.bmj.org/licenses/tdm/1.0/terms-and-conditions.html', 30 | # 'start': { 31 | # 'date-parts': [ 32 | # [2008, 6, 26] 33 | # ], 34 | # 'date-time': '2008-06-26T00:00:00Z', 35 | # 'timestamp': 1214438400000 36 | # }, 37 | # 'delay-in-days': 0, 38 | # 'content-version': 'tdm' 39 | # }], 40 | # 'content-domain': { 41 | # 'domain': [], 42 | # 'crossmark-restriction': False 43 | # }, 44 | # 'short-container-title': ['BMJ'], 45 | # 'published-print': { 46 | # 'date-parts': [ 47 | # [2008, 6, 28] 48 | # ] 49 | # }, 50 | # 'DOI': '10.1136/bmj.39590.732037.47', 51 | # 'type': 'journal-article', 52 | # 'created': { 53 | # 'date-parts': [ 54 | # [2008, 6, 26] 55 | # ], 56 | # 'date-time': '2008-06-26T22:28:39Z', 57 | # 'timestamp': 1214519319000 58 | # }, 59 | # 'page': '1472-1474', 60 | # 'source': 'Crossref', 61 | # 'is-referenced-by-count': 340, 62 | # 'title': ['What is missing from descriptions of treatment in trials and reviews?'], 63 | # 'prefix': '10.1136', 64 | # 'volume': '336', 65 | # 'author': [{ 66 | # 'given': 'Paul', 67 | # 'family': 'Glasziou', 68 | # 'sequence': 'first', 69 | # 'affiliation': [] 70 | # }, { 71 | # 'given': 'Emma', 72 | # 'family': 'Meats', 73 | # 'sequence': 'additional', 74 | # 'affiliation': [] 75 | # }, { 76 | # 'given': 'Carl', 77 | # 'family': 'Heneghan', 78 | # 'sequence': 'additional', 79 | # 'affiliation': [] 80 | # }, { 81 | # 'given': 'Sasha', 82 | # 'family': 'Shepperd', 83 | # 'sequence': 'additional', 84 | # 'affiliation': [] 85 | # }], 86 | # 'member': '239', 87 | # 'published-online': { 88 | # 'date-parts': [ 89 | # [2008, 6, 26] 90 | # ] 91 | # }, 92 | # 'container-title': ['BMJ'], 93 | # 'original-title': [], 94 | # 'language': 'en', 95 | # 'link': [{ 96 | # 'URL': 'http://data.bmj.org/tdm/10.1136/bmj.39590.732037.47', 97 | # 'content-type': 'unspecified', 98 | # 'content-version': 'vor', 99 | # 'intended-application': 'text-mining' 100 | # }, { 101 | # 'URL': 'https://syndication.highwire.org/content/doi/10.1136/bmj.39590.732037.47', 102 | # 'content-type': 'unspecified', 103 | # 'content-version': 'vor', 104 | # 'intended-application': 'similarity-checking' 105 | # }], 106 | # 'deposited': { 107 | # 'date-parts': [ 108 | # [2018, 2, 21] 109 | # ], 110 | # 'date-time': '2018-02-21T18:07:45Z', 111 | # 'timestamp': 1519236465000 112 | # }, 113 | # 'score': 1.0, 114 | # 'subtitle': [], 115 | # 'short-title': [], 116 | # 'issued': { 117 | # 'date-parts': [ 118 | # [2008, 6, 26] 119 | # ] 120 | # }, 121 | # 'references-count': 0, 122 | # 'journal-issue': { 123 | # 'published-online': { 124 | # 'date-parts': [ 125 | # [2008, 6, 26] 126 | # ] 127 | # }, 128 | # 'published-print': { 129 | # 'date-parts': [ 130 | # [2008, 6, 28] 131 | # ] 132 | # }, 133 | # 'issue': '7659' 134 | # }, 135 | # 'alternative-id': ['10.1136/bmj.39590.732037.47'], 136 | # 'URL': 'http://dx.doi.org/10.1136/bmj.39590.732037.47', 137 | # 'relation': {}, 138 | # 'ISSN': ['0959-8138', '1468-5833'], 139 | # 'issn-type': [{ 140 | # 'value': '0959-8138', 141 | # 'type': 'print' 142 | # }, { 143 | # 'value': '1468-5833', 144 | # 'type': 'electronic' 145 | # }] 146 | # } 147 | 148 | 149 | #arxiv 150 | # [{ 151 | # 'id': 'http://arxiv.org/abs/2002.11626v1', 152 | # 'guidislink': True, 153 | # 'updated': '2020-02-06T17:12:29Z', 154 | # 'updated_parsed': time.struct_time(tm_year = 2020, tm_mon = 2, tm_mday = 6, tm_hour = 17, tm_min = 12, tm_sec = 29, tm_wday = 3, tm_yday = 37, tm_isdst = 0), 155 | # 'published': '2020-02-06T17:12:29Z', 156 | # 'published_parsed': time.struct_time(tm_year = 2020, tm_mon = 2, tm_mday = 6, tm_hour = 17, tm_min = 12, tm_sec = 29, tm_wday = 3, tm_yday = 37, tm_isdst = 0), 157 | # 'title': 'A Realistic Guide to Making Data Available Alongside Code to Improve\n Reproducibility', 158 | # 'title_detail': { 159 | # 'type': 'text/plain', 160 | # 'language': None, 161 | # 'base': 'http://export.arxiv.org/api/query?search_query=&id_list=2002.11626&start=0&max_results=1000&sortBy=relevance&sortOrder=descending', 162 | # 'value': 'A Realistic Guide to Making Data Available Alongside Code to Improve\n Reproducibility' 163 | # }, 164 | # 'summary': "Data makes science possible. Sharing data improves visibility, and makes the\nresearch process transparent. This increases trust in the work, and allows for\nindependent reproduction of results. However, a large proportion of data from\npublished research is often only available to the original authors. Despite the\nobvious benefits of sharing data, and scientists' advocating for the importance\nof sharing data, most advice on sharing data discusses its broader benefits,\nrather than the practical considerations of sharing. This paper provides\npractical, actionable advice on how to actually share data alongside research.\nThe key message is sharing data falls on a continuum, and entering it should\ncome with minimal barriers.", 165 | # 'summary_detail': { 166 | # 'type': 'text/plain', 167 | # 'language': None, 168 | # 'base': 'http://export.arxiv.org/api/query?search_query=&id_list=2002.11626&start=0&max_results=1000&sortBy=relevance&sortOrder=descending', 169 | # 'value': "Data makes science possible. Sharing data improves visibility, and makes the\nresearch process transparent. This increases trust in the work, and allows for\nindependent reproduction of results. However, a large proportion of data from\npublished research is often only available to the original authors. Despite the\nobvious benefits of sharing data, and scientists' advocating for the importance\nof sharing data, most advice on sharing data discusses its broader benefits,\nrather than the practical considerations of sharing. This paper provides\npractical, actionable advice on how to actually share data alongside research.\nThe key message is sharing data falls on a continuum, and entering it should\ncome with minimal barriers." 170 | # }, 171 | # 'authors': ['Nicholas J Tierney', 'Karthik Ram'], 172 | # 'author_detail': { 173 | # 'name': 'Karthik Ram' 174 | # }, 175 | # 'author': 'Karthik Ram', 176 | # 'arxiv_comment': 'Both authors contributed equally to the work, 35 pages, 7 figures, 3\n tables', 177 | # 'links': [{ 178 | # 'href': 'http://arxiv.org/abs/2002.11626v1', 179 | # 'rel': 'alternate', 180 | # 'type': 'text/html' 181 | # }, { 182 | # 'title': 'pdf', 183 | # 'href': 'http://arxiv.org/pdf/2002.11626v1', 184 | # 'rel': 'related', 185 | # 'type': 'application/pdf' 186 | # }], 187 | # 'arxiv_primary_category': { 188 | # 'term': 'cs.DL', 189 | # 'scheme': 'http://arxiv.org/schemas/atom' 190 | # }, 191 | # 'tags': [{ 192 | # 'term': 'cs.DL', 193 | # 'scheme': 'http://arxiv.org/schemas/atom', 194 | # 'label': None 195 | # }], 196 | # 'pdf_url': 'http://arxiv.org/pdf/2002.11626v1', 197 | # 'affiliation': 'None', 198 | # 'arxiv_url': 'http://arxiv.org/abs/2002.11626v1', 199 | # 'journal_reference': None, 200 | # 'doi': None 201 | # }] 202 | 203 | 204 | 205 | 206 | class md: 207 | def __init__(self, args): 208 | self.works = Works() 209 | #print(args.doi) 210 | if 'arxiv' in args.doi: 211 | #maybe you prepended it, maybe not 212 | self.doi=args.doi.replace('https://arxiv.org/abs/','') 213 | self.link='https://arxiv.org/abs/'+self.doi 214 | self.arxiv() 215 | elif 'zenodo' in args.doi: 216 | self.doi=args.doi.replace('https://zenodo.org/record/','').replace('#.+','') 217 | self.link='https://zenodo.org/record/'+self.doi 218 | self.zenodo() 219 | elif 'proquest' in args.doi: 220 | self.doi=args.doi 221 | self.link = self.doi 222 | self.proquest() 223 | elif args.bibtex: 224 | self.bibtex() 225 | else: 226 | self.doi=args.doi.replace('https://doi.org/','') 227 | self.link='https://doi.org/'+self.doi 228 | self.crossref() 229 | 230 | #user-supplied 231 | self.type=args.type 232 | self.field=args.field 233 | self.approach=args.approach 234 | self.size=args.size 235 | self.category=args.category 236 | self.toolsarg=args.tools 237 | self.capsule=args.capsule 238 | 239 | 240 | 241 | def arxiv(self): 242 | try: 243 | pub = next(arxiv.Search(id_list=[self.doi]).results()) 244 | except AttributeError: 245 | # NOTE: see lukasschwab/arxiv.py#80. Should make this exception 246 | # handling more specific when that issue is resolved. 247 | print("Arxiv doesn't know about this doi") 248 | sys.exit(0) 249 | if pub.updated is not None: 250 | self.yyyymmdd=pub.updated.strftime('%Y-%m-%d') 251 | self.yyyy=pub.updated.strftime('%Y') 252 | self.title=pub.title 253 | self.abstract=pub.summary 254 | last_name = lambda author: author.name.split(' ')[-1] 255 | if len(pub.authors)==1: 256 | self.author = last_name(pub.authors[0]) 257 | elif len(pub.authors)==2: 258 | self.author = ' & '.join([last_name(a) for a in pub.authors]) 259 | else: 260 | self.author = '{} et al'.format(last_name(pub.authors[0])) 261 | self.school = 'Institution' 262 | 263 | def crossref(self): 264 | pub=self.works.doi(self.doi) 265 | if pub is None: 266 | print("Crossref doesn't know about this doi") 267 | sys.exit(1) 268 | if pub.get('created') is not None: 269 | if pub.get('created').get('date-time') is not None: 270 | date_time_str=pub.get('created').get('date-time') 271 | date_time_obj = datetime.datetime.strptime(date_time_str,"%Y-%m-%dT%H:%M:%SZ") 272 | self.yyyymmdd=date_time_obj.strftime('%Y-%m-%d') 273 | self.yyyy=date_time_obj.strftime('%Y') 274 | elif pub.get('published-online') is not None: 275 | if pub.get('published-online').get('date-parts') is not None: 276 | dl=pub.get('published-online').get('date-parts')[0] 277 | self.yyyymmdd=dl[0]+'-'+dl[1]+'-'+dl[2] 278 | # 'date-parts': [ 279 | # [2008, 6, 26] 280 | else: 281 | print("Cannot find created or published-online attributes") 282 | sys.exit(1) 283 | 284 | self.title=pub.get('title')[0] 285 | self.abstract=pub.get('abstract') or "Abstract" 286 | 287 | # Use family if it exists in first author, otherwise use name 288 | if 'family' not in pub['author'][0]: 289 | pub['author'][0]['family'] = pub['author'][0].get('name', '') 290 | if len(pub['author'])==1: 291 | self.author = pub['author'][0]['family'] 292 | elif len(pub['author'])==2: 293 | self.author = pub['author'][0]['family']+' & '+pub['author'][1]['family'] 294 | else: 295 | self.author = pub['author'][0]['family']+' et al' 296 | self.school = pub['author'][0]['affiliation'] 297 | 298 | def zenodo(self): 299 | #https://zenodo.org/api/records/3818329 300 | #extract from doi:10.5281/zenodo.3818329 301 | # {"conceptdoi":"10.5281/zenodo.3818328","conceptrecid":"3818328","created":"2020-05-09T13:39:47.718973+00:00","doi":"10.5281/zenodo.3818329","files":[{"bucket":"ef05647e-d3c9-4e45-a0c9-090fce671637","checksum":"md5:342105a963d9c70ce26232373602df9d","key":"20200513 - Data and Code for Reproducible Research - Zaringhalam and Federer.pdf","links":{"self":"https://zenodo.org/api/files/ef05647e-d3c9-4e45-a0c9-090fce671637/20200513%20-%20Data%20and%20Code%20for%20Reproducible%20Research%20-%20Zaringhalam%20and%20Federer.pdf"},"size":456320,"type":"pdf"}],"id":3818329,"links":{"badge":"https://zenodo.org/badge/doi/10.5281/zenodo.3818329.svg","bucket":"https://zenodo.org/api/files/ef05647e-d3c9-4e45-a0c9-090fce671637","conceptbadge":"https://zenodo.org/badge/doi/10.5281/zenodo.3818328.svg","conceptdoi":"https://doi.org/10.5281/zenodo.3818328","doi":"https://doi.org/10.5281/zenodo.3818329","html":"https://zenodo.org/record/3818329","latest":"https://zenodo.org/api/records/3818329","latest_html":"https://zenodo.org/record/3818329","self":"https://zenodo.org/api/records/3818329"},"metadata":{"access_right":"open","access_right_category":"success","communities":[{"id":"csvconfv5"}],"creators":[{"affiliation":"National Library of Medicine","name":"Zaringhalam, Maryam","orcid":"0000-0002-7306-0210"},{"affiliation":"National Library of Medicine","name":"Federer, Lisa","orcid":"0000-0001-5732-5285"}],"description":"
The National Library of Medicine held two workshops in 2019...","doi":"10.5281/zenodo.3818329","keywords":["reproducibility","data science training","open science","open code","open data"],"language":"eng","license":{"id":"CC-BY-4.0"},"publication_date":"2020-05-09","related_identifiers":[{"identifier":"10.5281/zenodo.3818328","relation":"isVersionOf","scheme":"doi"}],"relations":{"version":[{"count":1,"index":0,"is_last":true,"last_child":{"pid_type":"recid","pid_value":"3818329"},"parent":{"pid_type":"recid","pid_value":"3818328"}}]},"resource_type":{"title":"Presentation","type":"presentation"},"title":"Data and Code for Reproducible Research: Lessons Learned from the NLM Reproducibility Workshop","version":"1.0.0"},"owners":[101399],"revision":3,"stats":{"downloads":203.0,"unique_downloads":191.0,"unique_views":275.0,"version_downloads":203.0,"version_unique_downloads":191.0,"version_unique_views":275.0,"version_views":320.0,"version_volume":92632960.0,"views":320.0,"volume":92632960.0},"updated":"2020-05-13T20:20:37.470893+00:00"} 302 | #dict_keys(['conceptdoi', 'conceptrecid', 'created', 'doi', 'files', 'id', 'links', 'metadata', 'owners', 'revision', 'stats', 'updated']) 303 | pub=self.doi 304 | if pub is None: 305 | print("{0} is not matching the 10.5281/zenodo.([0-9]+) pattern") 306 | else: 307 | try: 308 | response = requests.get('https://zenodo.org/api/records/{0}'.format(pub)) 309 | response.raise_for_status() 310 | resp = response.json() 311 | 312 | self.title = resp['metadata']['title'] 313 | last_name = lambda author: author.split(', ')[0] 314 | if len(resp['metadata']['contributors'])==1: 315 | self.author = last_name(resp['metadata']['contributors'][0]['name']) 316 | elif len(resp['metadata']['contributors'])==2: 317 | self.author = ' & '.join([last_name(a['name']) for a in resp['metadata']['contributors']]) 318 | else: 319 | self.author = '{} et al'.format(last_name(resp['metadata']['contributors'][0]['name'])) 320 | self.abstract=resp['metadata']['description'] 321 | self.yyyymmdd=resp['metadata']["publication_date"] 322 | self.yyyy=resp['metadata']["publication_date"][0:4] 323 | 324 | except HTTPError as http_err: 325 | print(f'HTTP error occurred: {http_err}') 326 | except Exception as err: 327 | print(f'Other error occurred: {err}') 328 | 329 | def bibtex(self): 330 | with open(args.bibtex) as bibtex_file: 331 | bibtex_database = bibtexparser.load(bibtex_file) 332 | #https://stackoverflow.com/questions/72899/how-do-i-sort-a-list-of-dictionaries-by-a-value-of-the-dictionary 333 | bib_sorted = sorted(bibtex_database.entries, key=lambda d: d['year']) 334 | for entry in bib_sorted: 335 | if entry['ID'] == args.doi: 336 | self.link=entry['url'] 337 | self.title=entry['title'] 338 | self.author=entry['author'] 339 | self.yyyy=entry['year'] 340 | self.yyyymmdd=self.yyyy+'0101' 341 | self.school=entry['school'] 342 | 343 | def proquest(self): 344 | try: 345 | response = requests.get(self.link) 346 | response.raise_for_status() 347 | soup = BeautifulSoup(response.content, 'html.parser') 348 | myauthors = soup.find_all("div", {"class": "truncatedAuthor"}) 349 | self.author = re.sub('\s+$','',re.sub('\.',' ',re.sub('\n','',myauthors[0].text))) 350 | mytitles = soup.find_all("h2", {"class": "unauthdocheader"}) 351 | self.title = mytitles[0].text 352 | myheader = soup.find_all("span", {"class": "titleAuthorETC dissertpub"}) 353 | self.school = re.sub('^\s','',re.sub("ProQuest.+","",myheader[0].text)) 354 | self.yyyy=re.search('([0-9]{4})', myheader[0].text, re.IGNORECASE).group(1) 355 | self.yyyymmdd=self.yyyy+'0101' 356 | except HTTPError as http_err: 357 | print(f'HTTP error occurred: {http_err}') 358 | except Exception as err: 359 | print(f'Other error occurred: {err}') 360 | 361 | def study(self): 362 | return("""
365 | {1} 366 | {3} 367 |
368 |371 | {5} 372 |
373 |376 | {6} 377 |
378 |381 | {7} 382 |
383 |391 | {2} {4} 392 |
393 |396 | {6} 397 |
398 |401 | {7} 402 |
403 |406 | {8} 407 |
408 |416 | {2} {4} 417 |
418 |421 | {5} 422 |
423 |426 | {6} 427 |
428 |437 | {1} 438 | {3} 439 |
440 |443 | {5} 444 |
445 |448 | {6} 449 |
450 |