├── .flake8 ├── .github └── workflows │ └── python-app.yml ├── .gitignore ├── .python-version ├── CHANGELOG.md ├── CITATION.cff ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── csv_metadata_quality ├── __init__.py ├── __main__.py ├── app.py ├── check.py ├── data │ └── licenses.json ├── experimental.py ├── fix.py ├── util.py └── version.py ├── data ├── abstract-check.csv ├── test-geography.csv ├── test.csv └── test.xlsx ├── pyproject.toml ├── pytest.ini ├── renovate.json ├── requirements.txt ├── tests ├── __init__.py ├── test_check.py └── test_fix.py └── uv.lock /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E501 3 | -------------------------------------------------------------------------------- /.github/workflows/python-app.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Build and Test 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Install uv 20 | uses: astral-sh/setup-uv@v5 21 | with: 22 | version: 'latest' 23 | - run: uv sync 24 | - name: Test with pytest 25 | run: uv run pytest 26 | - name: Test CLI 27 | run: | 28 | # Basic test 29 | uv run csv-metadata-quality -i data/test.csv -o /tmp/test.csv 30 | # Test with unsafe fixes 31 | uv run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u 32 | # Test with experimental checks 33 | uv run csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e 34 | # Test with AGROVOC validation 35 | uv run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject 36 | # Test with AGROVOC validation (and dropping invalid) 37 | uv run csv-metadata-quality -i data/test.csv -o /tmp/test.csv --agrovoc-fields dcterms.subject -d 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.sqlite 3 | *.egg-info 4 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.13 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [0.7.0] - 2025-01-31 8 | ### Added 9 | - Ability to normalize DOIs to https://doi.org URI format 10 | 11 | ### Fixed 12 | - Fixed regex so we don't run the invalid multi-value separator fix on 13 | `dcterms.bibliographicCitation` fields 14 | - Fixed regex so we run the comma space fix on `dcterms.bibliographicCitation` 15 | fields 16 | - Don't crash the country/region checker/fixer when a title field is missing 17 | 18 | ### Changed 19 | - Don't run newline fix on description fields 20 | - Install requests-cache in main run() function instead of check.agrovoc() function so we only incur the overhead once 21 | - Use py3langid instead of langid, see: [How to make language detection with langid.py faster](https://adrien.barbaresi.eu/blog/language-detection-langid-py-faster.html) 22 | - Use uv instead of rye 23 | - Remove pytest-clarity — I think pytest itself has gotten much better in the past few years 24 | 25 | ### Updated 26 | - Python dependencies, including Pandas 2.0.0 and [Arrow-backed dtypes](https://datapythonista.me/blog/pandas-20-and-the-arrow-revolution-part-i) 27 | - SPDX license list 28 | 29 | ## [0.6.1] - 2023-02-23 30 | ### Fixed 31 | - Missing region check should ignore subregion field, if it exists 32 | 33 | ### Changed 34 | - Use SPDX license data from SPDX themselves instead of spdx-license-list 35 | because it is deprecated and outdated 36 | - Require Python 3.9+ 37 | - Don't run `fix.separators()` on title or abstract fields 38 | - Don't run whitespace or newline fixes on abstract fields 39 | - Ignore some common non-SPDX licenses 40 | - Ignore `__description` suffix in filenames meant for SAFBuilder when checking 41 | for uncommon file extensions 42 | 43 | ### Updated 44 | - Python dependencies 45 | 46 | ## [0.6.0] - 2022-09-02 47 | ### Changed 48 | - Perform fix for "unnecessary" Unicode characters after we try to fix encoding 49 | issues with ftfy 50 | - ftfy heuristics to use `is_bad()` instead of `sequence_weirdness()` 51 | - ftfy `fix_text()` to *not* change “smart quotes” to "ASCII quotes" 52 | 53 | ### Updated 54 | - Python dependencies 55 | - Metadatata field exclude logic 56 | 57 | ### Added 58 | - Ability to drop invalid AGROVOC values with `-d` when checking AGROVOC values 59 | with `-a ` 60 | - Ability to add missing UN M.49 regions when both country and region columns 61 | are present. Enable with `-u` (unsafe fixes) for now. 62 | 63 | ### Removed 64 | - Support for reading Excel files (both `.xls` and `.xlsx`) as it was completely 65 | untested 66 | 67 | ## [0.5.0] - 2021-12-08 68 | ### Added 69 | - Ability to check for, and fix, "mojibake" characters using [ftfy](https://github.com/LuminosoInsight/python-ftfy) 70 | - Ability to check if the item's title exists in the citation 71 | - Ability to check if an item has countries, but no matching regions (only 72 | suggests missing regions if there is a region field in the CSV) 73 | 74 | ### Updated 75 | - Python dependencies 76 | 77 | ### Fixed 78 | - Regular expression to match all citation fields (dc.identifier.citation as 79 | well as dcterms.bibliographicCitation) in `experimental.correct_language()` 80 | - Regular expression to match dc.title and dcterms.title, but 81 | ignore dc.title.alternative `check.duplicate_items()` 82 | - Missing field name in `fix.newlines()` output 83 | 84 | ## [0.4.7] - 2021-03-17 85 | ### Changed 86 | - Fixing invalid multi-value separators like `|` and `|||` is no longer class- 87 | ified as "unsafe" as I have yet to see a case where this was intentional 88 | - Not user visible, but now checks only print a warning to the screen instead 89 | of returning a value and re-writing the DataFrame, which should be faster and 90 | use less memory 91 | 92 | ### Added 93 | - Configurable directory for AGROVOC requests cache (to allow running the web 94 | version from Google App Engine where we can only write to /tmp) 95 | - Ability to check for duplicate items in the data set (uses a combination of 96 | the title, type, and date issued to determine uniqueness) 97 | 98 | ### Removed 99 | - Checks for invalid and unnecessary multi-value separators because now I fix 100 | them whenever I see them, so there is no need to have checks for them 101 | 102 | ### Updated 103 | - Run `poetry update` to update project dependencies 104 | 105 | ## [0.4.6] - 2021-03-11 106 | ### Added 107 | - Validation of dcterms.license field against SPDX license identifiers 108 | 109 | ### Changed 110 | - Use DCTERMS fields where possible in `data/test.csv` 111 | 112 | ### Updated 113 | - Run `poetry update` to update project dependencies 114 | 115 | ### Fixed 116 | - Output for all fixes should be green, because it is good 117 | 118 | ## [0.4.5] - 2021-03-04 119 | ### Added 120 | - Check dates in dcterms.issued field as well, not just fields that have the 121 | word "date" in them 122 | 123 | ### Updated 124 | - Run `poetry update` to update project dependencies 125 | 126 | ## [0.4.4] - 2021-02-21 127 | ### Added 128 | - Accept dates formatted in ISO 8601 extended with combined date and time, for 129 | example: 2020-08-31T11:04:56Z 130 | - Colorized output: red for errors, yellow for warnings and information, green 131 | for changes 132 | 133 | ### Updated 134 | - Run `poetry update` to update project dependencies 135 | 136 | ## [0.4.3] - 2021-01-26 137 | ### Changed 138 | - Reformat with black 139 | - Requires Python 3.7+ for pandas 1.2.0 140 | 141 | ### Updated 142 | - Run `poetry update` 143 | - Expand check/fix for multi-value separators to include metadata with invalid 144 | separators at the end, for example "Kenya||Tanzania||" 145 | 146 | ## [0.4.2] - 2020-07-06 147 | ### Changed 148 | - Add field name to the output for more fixes and checks to help identify where 149 | the error is 150 | - Minor optimizations to AGROVOC subject lookup 151 | - Use Poetry instead of Pipenv 152 | 153 | ### Updated 154 | - Update python dependencies to latest versions 155 | 156 | ## [0.4.1] - 2020-01-15 157 | ### Changed 158 | - Reduce minimum Python version to 3.6 by working around the `is_normalized()` 159 | that only works in Python >= 3.8 160 | 161 | ## [0.4.0] - 2020-01-15 162 | ### Added 163 | - Unicode normalization (enable with `--unsafe-fixes`, see README.md) 164 | 165 | ### Updated 166 | - Update python dependencies to latest versions, including numpy 1.18.1, pandas 167 | 1.0.0rc0, flake8 3.7.9, pytest 5.3.2, and black 19.10b0 168 | - Regenerate requirements.txt and requirements-dev.txt 169 | 170 | ### Changed 171 | - Use Python 3.8.0 for pipenv 172 | - Use Ubuntu 18.04 "Bionic" for TravisCI builds 173 | - Test Python 3.8 in TravisCI builds 174 | 175 | ## [0.3.1] - 2019-10-01 176 | ## Changed 177 | - Replace non-breaking spaces (U+00A0) with space instead of removing them 178 | - Harmonize language of script output when fixing various issues 179 | 180 | ## [0.3.0] - 2019-09-26 181 | ### Updated 182 | - Update python dependencies to latest versions, including numpy 1.17.2, pandas 183 | 0.25.1, pytest 5.1.3, and requests-cache 0.5.2 184 | 185 | ### Added 186 | - csvkit to dev requirements (csvcut etc are useful during development) 187 | - Experimental language validation using the Python `langid` library (enable with `-e`, see README.md) 188 | 189 | ### Changed 190 | - Re-formatted code with black and isort 191 | 192 | ## [0.2.2] - 2019-08-27 193 | ### Changed 194 | - Output of date checks to include column names (helps debugging in case there are multiple date fields) 195 | 196 | ### Added 197 | - Ability to exclude certain fields using `--exclude-fields` 198 | - Fix for missing space after a comma, ie "Orth,Alan S." 199 | 200 | ### Improved 201 | - AGROVOC lookup code 202 | 203 | ## [0.2.1] - 2019-08-11 204 | ### Added 205 | - Check for uncommon filename extensions 206 | - Replacement of unneccessary Unicode characters like soft hyphens (U+00AD) 207 | 208 | ## [0.2.0] - 2019-08-09 209 | ### Added 210 | - Handle Ctrl-C interrupt gracefully 211 | - Make output in suspicious character check more user friendly 212 | - Add pytest-clarity to dev packages for more user friendly pytest output 213 | 214 | ## [0.1.0] - 2019-08-01 215 | ### Changed 216 | - AGROVOC validation is now turned off by default 217 | 218 | ### Added 219 | - Ability to enable AGROVOC validation on a field-by-field basis using the `--agrovoc-fields` option 220 | - Option to print the version (`--version` or `-V`) 221 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: "1.1.0" 2 | abstract: "A simple but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem." 3 | authors: 4 | - 5 | affiliation: "International Livestock Research Institute" 6 | family-names: Orth 7 | given-names: "Alan S." 8 | orcid: "https://orcid.org/0000-0002-1735-7458" 9 | date-released: 2019-07-26 10 | doi: "10568/110997" 11 | keywords: 12 | - dspace 13 | - "dublin-core" 14 | - csv 15 | - metadata 16 | license: "GPL-3.0-only" 17 | message: "If you use this software, please cite it using these metadata." 18 | repository-code: "https://github.com/ilri/csv-metadata-quality" 19 | title: "DSpace CSV Metadata Quality Checker" 20 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include csv_metadata_quality/data/licenses.json 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

DSpace CSV Metadata Quality Checker

2 | 3 |

4 | Build and Test 5 | Code style: black 6 |

7 | 8 | A simple, but opinionated metadata quality checker and fixer designed to work with CSVs in the DSpace ecosystem (though it could theoretically work on any CSV that uses Dublin Core fields as columns). The implementation is essentially a pipeline of checks and fixes that begins with splitting multi-value fields on the standard DSpace "||" separator, trimming leading/trailing whitespace, and then proceeding to more specialized cases like ISSNs, ISBNs, languages, unnecessary Unicode, AGROVOC terms, etc. 9 | 10 | Requires Python 3.9 or greater. CSV support comes from the [Pandas](https://pandas.pydata.org/) library. 11 | 12 | If you use the DSpace CSV metadata quality checker please cite: 13 | 14 | *Orth, A. 2019. DSpace CSV metadata quality checker. Nairobi, Kenya: ILRI. https://hdl.handle.net/10568/110997.* 15 | 16 | ## Functionality 17 | 18 | - Validate dates, ISSNs, ISBNs, and multi-value separators ("||") 19 | - Validate languages against ISO 639-1 (alpha2) and ISO 639-3 (alpha3) 20 | - Experimental validation of titles and abstracts against item's Dublin Core language field 21 | - Validate subjects against the AGROVOC REST API (see the `--agrovoc-fields` option) 22 | - Validation of licenses against the list of [SPDX license identifiers](https://spdx.org/licenses) 23 | - Fix leading, trailing, and excessive (ie, more than one) whitespace 24 | - Fix invalid and unnecessary multi-value separators (`|`) 25 | - Fix problematic newlines (line feeds) using `--unsafe-fixes` 26 | - Perform [Unicode normalization](https://withblue.ink/2019/03/11/why-you-need-to-normalize-unicode-strings.html) on strings using `--unsafe-fixes` 27 | - Remove unnecessary Unicode like [non-breaking spaces](https://en.wikipedia.org/wiki/Non-breaking_space), [replacement characters](https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character), etc 28 | - Check for "suspicious" characters that indicate encoding or copy/paste issues, for example "foreˆt" should be "forêt" 29 | - Check for "mojibake" characters (and attempt to fix with `--unsafe-fixes`) 30 | - Check for countries with missing regions (and attempt to fix with `--unsafe-fixes`) 31 | - Remove duplicate metadata values 32 | - Check for duplicate items, using the title, type, and date issued as an indicator 33 | - [Normalize DOIs](https://www.crossref.org/documentation/member-setup/constructing-your-dois/) to https://doi.org URI format 34 | 35 | ## Installation 36 | The easiest way to install CSV Metadata Quality is with [uv](https://docs.astral.sh/uv/): 37 | 38 | ``` 39 | $ git clone https://github.com/ilri/csv-metadata-quality.git 40 | $ cd csv-metadata-quality 41 | $ uv sync 42 | $ source .venv/bin/activate 43 | ``` 44 | 45 | Otherwise, if you don't have uv, you can use a vanilla Python virtual environment: 46 | 47 | ``` 48 | $ git clone https://github.com/ilri/csv-metadata-quality.git 49 | $ cd csv-metadata-quality 50 | $ python3 -m venv .venv 51 | $ source .venv/bin/activate 52 | $ pip install -r requirements.txt 53 | ``` 54 | 55 | ## Usage 56 | Run CSV Metadata Quality with the `--help` flag to see available options: 57 | 58 | ``` 59 | $ csv-metadata-quality --help 60 | ``` 61 | 62 | To validate and clean a CSV file you must specify input and output files using the `-i` and `-o` options. For example, using the included test file: 63 | 64 | ``` 65 | $ csv-metadata-quality -i data/test.csv -o /tmp/test.csv 66 | ``` 67 | 68 | ## Invalid Multi-Value Separators 69 | While it is *theoretically* possible for a single `|` character to be used legitimately in a metadata value, in my experience it is always a typo. For example, if a user mistakenly writes `Kenya|Tanzania` when attempting to indicate two countries, the result will be one metadata value with the literal text `Kenya|Tanzania`. This utility will correct the invalid multi-value separator so that there are two metadata values, ie `Kenya||Tanzania`. 70 | 71 | This will also remove unnecessary trailing multi-value separators, for example `Kenya||Tanzania||`. 72 | 73 | ## Unsafe Fixes 74 | You can enable several "unsafe" fixes with the `--unsafe-fixes` option. Currently this will remove newlines, perform Unicode normalization, attempt to fix "mojibake" characters, and add missing UN M.49 regions. 75 | 76 | ### Newlines 77 | This is considered "unsafe" because some systems give special importance to vertical space and render it properly. DSpace does not support rendering newlines in its XMLUI and has, at times, suffered from parsing errors that cause the import process to fail if an input file had newlines. The `--unsafe-fixes` option strips Unix line feeds (U+000A). 78 | 79 | ### Unicode Normalization 80 | [Unicode](https://en.wikipedia.org/wiki/Unicode) is a standard for encoding text. As the standard aims to support most of the world's languages, characters can often be represented in different ways and still be valid Unicode. This leads to interesting problems that can be confusing unless you know what's going on behind the scenes. For example, the characters `é` and `é` *look* the same, but are not — technically they refer to different code points in the Unicode standard: 81 | 82 | - `é` is the Unicode code point `U+00E9` 83 | - `é` is the Unicode code points `U+0065` + `U+0301` 84 | 85 | Read more about [Unicode normalization](https://withblue.ink/2019/03/11/why-you-need-to-normalize-unicode-strings.html). 86 | 87 | ### Encoding Issues aka "Mojibake" 88 | [Mojibake](https://en.wikipedia.org/wiki/Mojibake) is a phenomenon that occurs when text is decoded using an unintended character encoding. This usually presents itself in the form of strange, garbled characters in the text. Enabling "unsafe" fixes will attempt to correct these, for example: 89 | 90 | - CIAT Publicaçao → CIAT Publicaçao 91 | - CIAT Publicación → CIAT Publicación 92 | 93 | Pay special attention to the output of the script as well as the resulting file to make sure no new issues have been introduced. The ideal way to solve these issues is to avoid it in the first place. See [this guide about opening CSVs in UTF-8 format in Excel](https://www.itg.ias.edu/content/how-import-csv-file-uses-utf-8-character-encoding-0). 94 | 95 | ### Countries With Missing Regions 96 | When an input file has both country and region columns we can check to see if the ISO 3166 country names have matching UN M.49 regions and add them when they are missing. 97 | 98 | ## AGROVOC Validation 99 | You can enable validation of metadata values in certain fields against the AGROVOC REST API with the `--agrovoc-fields` option. For example, in addition to agricultural subjects, many countries and regions are also present AGROVOC. Enable this validation by specifying a comma-separated list of fields: 100 | 101 | ``` 102 | $ csv-metadata-quality -i data/test.csv -o /tmp/test.csv -u --agrovoc-fields dc.subject,cg.coverage.country 103 | ... 104 | Invalid AGROVOC (dc.subject): FOREST 105 | Invalid AGROVOC (cg.coverage.country): KENYAA 106 | ``` 107 | 108 | *Note: Requests to the AGROVOC REST API are cached using [requests_cache](https://pypi.org/project/requests-cache/) to speed up subsequent runs with the same data and to be kind to the system's administrators.* 109 | 110 | ## Experimental Checks 111 | You can enable experimental support for validating whether the value of an item's `dc.language.iso` or `dcterms.language` field matches the actual language used in its title, abstract, and citation. 112 | 113 | ``` 114 | $ csv-metadata-quality -i data/test.csv -o /tmp/test.csv -e 115 | ... 116 | Possibly incorrect language es (detected en): Incorrect ISO 639-1 language 117 | Possibly incorrect language spa (detected eng): Incorrect ISO 639-3 language 118 | ``` 119 | 120 | This currently uses the [Python langid](https://github.com/saffsd/langid.py) library. In the future I would like to move to the fastText library, but there is currently an [issue with their Python bindings](https://github.com/facebookresearch/fastText/issues/909) that makes this unfeasible. 121 | 122 | ## Todo 123 | 124 | - Reporting / summary 125 | - Better logging, for example with INFO, WARN, and ERR levels 126 | - Verbose, debug, or quiet options 127 | - Warn if an author is shorter than 3 characters? 128 | - Warn if two items use the same file in `filename` column 129 | - Add tests for application invocation, ie `tests/test_app.py`? 130 | - Validate ISSNs or journal titles against CrossRef API? 131 | - Add configurable field validation, like specify a field name and a validation file? 132 | - Perhaps like --validate=field.name,filename 133 | - Add some row-based item sanity checks and fixes: 134 | - Warn if item is Open Access, but missing a filename or URL 135 | - Warn if item is Open Access, but missing a license 136 | - Warn if item has an ISSN but no journal title 137 | - Update journal titles from ISSN 138 | - Migrate from Pandas to Polars 139 | 140 | ## License 141 | This work is licensed under the [GPLv3](https://www.gnu.org/licenses/gpl-3.0.en.html). 142 | 143 | The license allows you to use and modify the work for personal and commercial purposes, but if you distribute the work you must provide users with a means to access the source code for the version you are distributing. Read more about the [GPLv3 at TL;DR Legal](https://tldrlegal.com/license/gnu-general-public-license-v3-(gpl-3)). 144 | -------------------------------------------------------------------------------- /csv_metadata_quality/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilri/csv-metadata-quality/753f3340a32dbd6ade4d6798d34bccdfeba63dd8/csv_metadata_quality/__init__.py -------------------------------------------------------------------------------- /csv_metadata_quality/__main__.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | from sys import argv 4 | 5 | from csv_metadata_quality import app 6 | 7 | 8 | def main(): 9 | app.run(argv) 10 | 11 | 12 | if __name__ == "__main__": 13 | main() 14 | -------------------------------------------------------------------------------- /csv_metadata_quality/app.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | import argparse 4 | import os 5 | import re 6 | import signal 7 | import sys 8 | from datetime import timedelta 9 | 10 | import pandas as pd 11 | import requests_cache 12 | from colorama import Fore 13 | 14 | import csv_metadata_quality.check as check 15 | import csv_metadata_quality.experimental as experimental 16 | import csv_metadata_quality.fix as fix 17 | from csv_metadata_quality.version import VERSION 18 | 19 | 20 | def parse_args(argv): 21 | parser = argparse.ArgumentParser(description="Metadata quality checker and fixer.") 22 | parser.add_argument( 23 | "--agrovoc-fields", 24 | "-a", 25 | help="Comma-separated list of fields to validate against AGROVOC, for example: dcterms.subject,cg.coverage.country", 26 | ) 27 | parser.add_argument( 28 | "--drop-invalid-agrovoc", 29 | "-d", 30 | help="After validating metadata values against AGROVOC, drop invalid values.", 31 | action="store_true", 32 | ) 33 | parser.add_argument( 34 | "--experimental-checks", 35 | "-e", 36 | help="Enable experimental checks like language detection", 37 | action="store_true", 38 | ) 39 | parser.add_argument( 40 | "--input-file", 41 | "-i", 42 | help="Path to input file. Must be a UTF-8 CSV.", 43 | required=True, 44 | type=argparse.FileType("r", encoding="UTF-8"), 45 | ) 46 | parser.add_argument( 47 | "--output-file", 48 | "-o", 49 | help="Path to output file (always CSV).", 50 | required=True, 51 | type=argparse.FileType("w", encoding="UTF-8"), 52 | ) 53 | parser.add_argument( 54 | "--unsafe-fixes", "-u", help="Perform unsafe fixes.", action="store_true" 55 | ) 56 | parser.add_argument( 57 | "--version", "-V", action="version", version=f"CSV Metadata Quality v{VERSION}" 58 | ) 59 | parser.add_argument( 60 | "--exclude-fields", 61 | "-x", 62 | help="Comma-separated list of fields to skip, for example: dc.contributor.author,dcterms.bibliographicCitation", 63 | ) 64 | args = parser.parse_args() 65 | 66 | return args 67 | 68 | 69 | def signal_handler(signal, frame): 70 | sys.exit(1) 71 | 72 | 73 | def run(argv): 74 | args = parse_args(argv) 75 | 76 | # set the signal handler for SIGINT (^C) 77 | signal.signal(signal.SIGINT, signal_handler) 78 | 79 | # Read all fields as strings so dates don't get converted from 1998 to 1998.0 80 | df = pd.read_csv(args.input_file, dtype_backend="pyarrow", dtype="str") 81 | 82 | # Check if the user requested to skip any fields 83 | if args.exclude_fields: 84 | # Split the list of excluded fields on ',' into a list. Note that the 85 | # user should be careful to no include spaces here. 86 | exclude = args.exclude_fields.split(",") 87 | else: 88 | exclude = [] 89 | 90 | # enable transparent request cache with thirty days expiry 91 | expire_after = timedelta(days=30) 92 | # Allow overriding the location of the requests cache, just in case we are 93 | # running in an environment where we can't write to the current working di- 94 | # rectory (for example from csv-metadata-quality-web). 95 | REQUESTS_CACHE_DIR = os.environ.get("REQUESTS_CACHE_DIR", ".") 96 | requests_cache.install_cache( 97 | f"{REQUESTS_CACHE_DIR}/agrovoc-response-cache", expire_after=expire_after 98 | ) 99 | 100 | # prune old cache entries 101 | requests_cache.delete() 102 | 103 | for column in df.columns: 104 | if column in exclude: 105 | print(f"{Fore.YELLOW}Skipping {Fore.RESET}{column}") 106 | 107 | continue 108 | 109 | if args.unsafe_fixes: 110 | # Skip whitespace and newline fixes on abstracts and descriptions 111 | # because there are too many with legitimate multi-line metadata. 112 | match = re.match(r"^.*?(abstract|description).*$", column) 113 | if match is None: 114 | # Fix: whitespace 115 | df[column] = df[column].apply(fix.whitespace, field_name=column) 116 | 117 | # Fix: newlines 118 | df[column] = df[column].apply(fix.newlines, field_name=column) 119 | 120 | # Fix: missing space after comma. Only run on author and citation 121 | # fields for now, as this problem is mostly an issue in names. 122 | if args.unsafe_fixes: 123 | match = re.match(r"^.*?(author|[Cc]itation).*$", column) 124 | if match is not None: 125 | df[column] = df[column].apply(fix.comma_space, field_name=column) 126 | 127 | # Fix: perform Unicode normalization (NFC) to convert decomposed 128 | # characters into their canonical forms. 129 | if args.unsafe_fixes: 130 | df[column] = df[column].apply(fix.normalize_unicode, field_name=column) 131 | 132 | # Check: suspicious characters 133 | df[column].apply(check.suspicious_characters, field_name=column) 134 | 135 | # Fix: mojibake. If unsafe fixes are not enabled then we only check. 136 | if args.unsafe_fixes: 137 | df[column] = df[column].apply(fix.mojibake, field_name=column) 138 | else: 139 | df[column].apply(check.mojibake, field_name=column) 140 | 141 | # Fix: unnecessary Unicode 142 | df[column] = df[column].apply(fix.unnecessary_unicode) 143 | 144 | # Fix: normalize DOIs 145 | match = re.match(r"^.*?identifier\.doi.*$", column) 146 | if match is not None: 147 | df[column] = df[column].apply(fix.normalize_dois) 148 | 149 | # Fix: invalid and unnecessary multi-value separators. Skip the title 150 | # and abstract fields because "|" is used to indicate something like 151 | # a subtitle. 152 | match = re.match(r"^.*?(abstract|[Cc]itation|title).*$", column) 153 | if match is None: 154 | df[column] = df[column].apply(fix.separators, field_name=column) 155 | # Run whitespace fix again after fixing invalid separators 156 | df[column] = df[column].apply(fix.whitespace, field_name=column) 157 | 158 | # Fix: duplicate metadata values 159 | df[column] = df[column].apply(fix.duplicates, field_name=column) 160 | 161 | # Check: invalid AGROVOC subject and optionally drop them 162 | if args.agrovoc_fields: 163 | # Identify fields the user wants to validate against AGROVOC 164 | for field in args.agrovoc_fields.split(","): 165 | if column == field: 166 | df[column] = df[column].apply( 167 | check.agrovoc, field_name=column, drop=args.drop_invalid_agrovoc 168 | ) 169 | 170 | # Check: invalid language 171 | match = re.match(r"^.*?language.*$", column) 172 | if match is not None: 173 | df[column].apply(check.language) 174 | 175 | # Check: invalid ISSN 176 | match = re.match(r"^.*?issn.*$", column) 177 | if match is not None: 178 | df[column].apply(check.issn) 179 | 180 | # Check: invalid ISBN 181 | match = re.match(r"^.*?isbn.*$", column) 182 | if match is not None: 183 | df[column].apply(check.isbn) 184 | 185 | # Check: invalid date 186 | match = re.match(r"^.*?(date|dcterms\.issued).*$", column) 187 | if match is not None: 188 | df[column].apply(check.date, field_name=column) 189 | 190 | # Check: filename extension 191 | if column == "filename": 192 | df[column].apply(check.filename_extension) 193 | 194 | # Check: SPDX license identifier 195 | match = re.match(r"dcterms\.license.*$", column) 196 | if match is not None: 197 | df[column].apply(check.spdx_license_identifier) 198 | 199 | ### End individual column checks ### 200 | 201 | # Check: duplicate items 202 | # We extract just the title, type, and date issued columns to analyze 203 | try: 204 | duplicates_df = df.filter( 205 | regex=r"dcterms\.title|dc\.title|dcterms\.type|dc\.type|dcterms\.issued|dc\.date\.issued" 206 | ) 207 | check.duplicate_items(duplicates_df) 208 | 209 | # Delete the temporary duplicates DataFrame 210 | del duplicates_df 211 | except IndexError: 212 | pass 213 | 214 | ## 215 | # Perform some checks on rows so we can consider items as a whole rather 216 | # than simple on a field-by-field basis. This allows us to check whether 217 | # the language used in the title and abstract matches the language indi- 218 | # cated in the language field, for example. 219 | # 220 | # This is slower and apparently frowned upon in the Pandas community be- 221 | # cause it requires iterating over rows rather than using apply over a 222 | # column. For now it will have to do. 223 | ## 224 | 225 | # Transpose the DataFrame so we can consider each row as a column 226 | df_transposed = df.T 227 | 228 | # Remember, here a "column" is an item (previously row). Perhaps I 229 | # should rename column in this for loop... 230 | for column in df_transposed.columns: 231 | # Check: citation DOI 232 | check.citation_doi(df_transposed[column], exclude) 233 | 234 | # Check: title in citation 235 | check.title_in_citation(df_transposed[column], exclude) 236 | 237 | if args.unsafe_fixes: 238 | # Fix: countries match regions 239 | df_transposed[column] = fix.countries_match_regions( 240 | df_transposed[column], exclude 241 | ) 242 | else: 243 | # Check: countries match regions 244 | check.countries_match_regions(df_transposed[column], exclude) 245 | 246 | if args.experimental_checks: 247 | experimental.correct_language(df_transposed[column], exclude) 248 | 249 | # Transpose the DataFrame back before writing. This is probably wasteful to 250 | # do every time since we technically only need to do it if we've done the 251 | # countries/regions fix above, but I can't think of another way for now. 252 | df_transposed_back = df_transposed.T 253 | 254 | # Write 255 | df_transposed_back.to_csv(args.output_file, index=False) 256 | 257 | # Close the input and output files before exiting 258 | args.input_file.close() 259 | args.output_file.close() 260 | 261 | sys.exit(0) 262 | -------------------------------------------------------------------------------- /csv_metadata_quality/check.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | import logging 4 | import re 5 | from datetime import datetime 6 | 7 | import country_converter as coco 8 | import pandas as pd 9 | import requests 10 | from colorama import Fore 11 | from pycountry import languages 12 | from stdnum import isbn as stdnum_isbn 13 | from stdnum import issn as stdnum_issn 14 | 15 | from csv_metadata_quality.util import is_mojibake, load_spdx_licenses 16 | 17 | 18 | def issn(field): 19 | """Check if an ISSN is valid. 20 | 21 | Prints the ISSN if invalid. 22 | 23 | stdnum's is_valid() function never raises an exception. 24 | 25 | See: https://arthurdejong.org/python-stdnum/doc/1.11/index.html#stdnum.module.is_valid 26 | """ 27 | 28 | # Skip fields with missing values 29 | if pd.isna(field): 30 | return 31 | 32 | # Try to split multi-value field on "||" separator 33 | for value in field.split("||"): 34 | if not stdnum_issn.is_valid(value): 35 | print(f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}") 36 | 37 | return 38 | 39 | 40 | def isbn(field): 41 | """Check if an ISBN is valid. 42 | 43 | Prints the ISBN if invalid. 44 | 45 | stdnum's is_valid() function never raises an exception. 46 | 47 | See: https://arthurdejong.org/python-stdnum/doc/1.11/index.html#stdnum.module.is_valid 48 | """ 49 | 50 | # Skip fields with missing values 51 | if pd.isna(field): 52 | return 53 | 54 | # Try to split multi-value field on "||" separator 55 | for value in field.split("||"): 56 | if not stdnum_isbn.is_valid(value): 57 | print(f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}") 58 | 59 | return 60 | 61 | 62 | def date(field, field_name): 63 | """Check if a date is valid. 64 | 65 | In DSpace the issue date is usually 1990, 1990-01, or 1990-01-01, but it 66 | could technically even include time as long as it is ISO8601. 67 | 68 | Also checks for other invalid cases like missing and multiple dates. 69 | 70 | Prints the date if invalid. 71 | """ 72 | 73 | if pd.isna(field): 74 | print(f"{Fore.RED}Missing date ({field_name}).{Fore.RESET}") 75 | 76 | return 77 | 78 | # Try to split multi-value field on "||" separator 79 | multiple_dates = field.split("||") 80 | 81 | # We don't allow multi-value date fields 82 | if len(multiple_dates) > 1: 83 | print( 84 | f"{Fore.RED}Multiple dates not allowed ({field_name}): {Fore.RESET}{field}" 85 | ) 86 | 87 | return 88 | 89 | try: 90 | # Check if date is valid YYYY format 91 | datetime.strptime(field, "%Y") 92 | 93 | return 94 | except ValueError: 95 | pass 96 | 97 | try: 98 | # Check if date is valid YYYY-MM format 99 | datetime.strptime(field, "%Y-%m") 100 | 101 | return 102 | except ValueError: 103 | pass 104 | 105 | try: 106 | # Check if date is valid YYYY-MM-DD format 107 | datetime.strptime(field, "%Y-%m-%d") 108 | 109 | return 110 | except ValueError: 111 | pass 112 | 113 | try: 114 | # Check if date is valid YYYY-MM-DDTHH:MM:SSZ format 115 | datetime.strptime(field, "%Y-%m-%dT%H:%M:%SZ") 116 | 117 | return 118 | except ValueError: 119 | print(f"{Fore.RED}Invalid date ({field_name}): {Fore.RESET}{field}") 120 | 121 | return 122 | 123 | 124 | def suspicious_characters(field, field_name): 125 | """Warn about suspicious characters. 126 | 127 | Look for standalone characters that could indicate encoding or copy/paste 128 | errors for languages with accents. For example: foreˆt should be forêt. 129 | """ 130 | 131 | # Skip fields with missing values 132 | if pd.isna(field): 133 | return 134 | 135 | # List of suspicious characters, for example: ́ˆ~` 136 | suspicious_characters = ["\u00b4", "\u02c6", "\u007e", "\u0060"] 137 | 138 | for character in suspicious_characters: 139 | # Find the position of the suspicious character in the string 140 | suspicious_character_position = field.find(character) 141 | 142 | # Python returns -1 if there is no match 143 | if suspicious_character_position != -1: 144 | # Create a temporary new string starting from the position of the 145 | # suspicious character 146 | field_subset = field[suspicious_character_position:] 147 | 148 | # Print part of the metadata value starting from the suspicious 149 | # character and spanning enough of the rest to give a preview, 150 | # but not too much to cause the line to break in terminals with 151 | # a default of 80 characters width. 152 | suspicious_character_msg = f"{Fore.YELLOW}Suspicious character ({field_name}): {Fore.RESET}{field_subset}" 153 | print(f"{suspicious_character_msg:1.80}") 154 | 155 | return 156 | 157 | 158 | def language(field): 159 | """Check if a language is valid ISO 639-1 (alpha 2) or ISO 639-3 (alpha 3). 160 | 161 | Prints the value if it is invalid. 162 | """ 163 | 164 | # Skip fields with missing values 165 | if pd.isna(field): 166 | return 167 | 168 | # need to handle "Other" values here... 169 | 170 | # Try to split multi-value field on "||" separator 171 | for value in field.split("||"): 172 | # After splitting, check if language value is 2 or 3 characters so we 173 | # can check it against ISO 639-1 or ISO 639-3 accordingly. 174 | if len(value) == 2: 175 | if not languages.get(alpha_2=value): 176 | print(f"{Fore.RED}Invalid ISO 639-1 language: {Fore.RESET}{value}") 177 | elif len(value) == 3: 178 | if not languages.get(alpha_3=value): 179 | print(f"{Fore.RED}Invalid ISO 639-3 language: {Fore.RESET}{value}") 180 | else: 181 | print(f"{Fore.RED}Invalid language: {Fore.RESET}{value}") 182 | 183 | return 184 | 185 | 186 | def agrovoc(field, field_name, drop): 187 | """Check subject terms against AGROVOC REST API. 188 | 189 | Function constructor expects the field as well as the field name because 190 | many fields can now be validated against AGROVOC and we want to be able 191 | to inform the user in which field the invalid term is. 192 | 193 | Logic copied from agrovoc-lookup.py. 194 | 195 | See: https://github.com/ilri/DSpace/blob/5_x-prod/agrovoc-lookup.py 196 | 197 | Prints a warning if the value is invalid. 198 | """ 199 | 200 | # Skip fields with missing values 201 | if pd.isna(field): 202 | return 203 | 204 | # Initialize an empty list to hold the validated AGROVOC values 205 | values = [] 206 | 207 | # Try to split multi-value field on "||" separator 208 | for value in field.split("||"): 209 | request_url = "https://agrovoc.uniroma2.it/agrovoc/rest/v1/agrovoc/search" 210 | request_params = {"query": value} 211 | 212 | request = requests.get(request_url, params=request_params) 213 | 214 | if request.status_code == requests.codes.ok: 215 | data = request.json() 216 | 217 | # check if there are any results 218 | if len(data["results"]) == 0: 219 | if drop: 220 | print( 221 | f"{Fore.GREEN}Dropping invalid AGROVOC ({field_name}): {Fore.RESET}{value}" 222 | ) 223 | else: 224 | print( 225 | f"{Fore.RED}Invalid AGROVOC ({field_name}): {Fore.RESET}{value}" 226 | ) 227 | 228 | # value is invalid AGROVOC, but we are not dropping 229 | values.append(value) 230 | else: 231 | # value is valid AGROVOC so save it 232 | values.append(value) 233 | 234 | # Create a new field consisting of all values joined with "||" 235 | new_field = "||".join(values) 236 | 237 | return new_field 238 | 239 | 240 | def filename_extension(field): 241 | """Check filename extension. 242 | 243 | CSVs with a 'filename' column are likely meant as input for the SAFBuilder 244 | tool, which creates a Simple Archive Format bundle for importing metadata 245 | with accompanying PDFs or other files into DSpace. 246 | 247 | This check warns if a filename has an uncommon extension (that is, other 248 | than .pdf, .xls(x), .doc(x), ppt(x), case insensitive). 249 | """ 250 | 251 | # Skip fields with missing values 252 | if pd.isna(field): 253 | return 254 | 255 | # Try to split multi-value field on "||" separator 256 | values = field.split("||") 257 | 258 | # List of common filename extentions 259 | common_filename_extensions = [ 260 | ".pdf", 261 | ".doc", 262 | ".docx", 263 | ".ppt", 264 | ".pptx", 265 | ".xls", 266 | ".xlsx", 267 | ] 268 | 269 | # Iterate over all values 270 | for value in values: 271 | # Strip filename descriptions that are meant for SAF Bundler, for 272 | # example: Annual_Report_2020.pdf__description:Report 273 | if "__description" in value: 274 | value = value.split("__")[0] 275 | 276 | # Assume filename extension does not match 277 | filename_extension_match = False 278 | 279 | for filename_extension in common_filename_extensions: 280 | # Check for extension at the end of the filename 281 | pattern = re.escape(filename_extension) + r"$" 282 | match = re.search(pattern, value, re.IGNORECASE) 283 | 284 | if match is not None: 285 | # Register the match and stop checking for this filename 286 | filename_extension_match = True 287 | 288 | break 289 | 290 | if filename_extension_match is False: 291 | print(f"{Fore.YELLOW}Filename with uncommon extension: {Fore.RESET}{value}") 292 | 293 | return 294 | 295 | 296 | def spdx_license_identifier(field): 297 | """Check if a license is a valid SPDX identifier. 298 | 299 | Prints the value if it is invalid. 300 | """ 301 | 302 | # List of common non-SPDX licenses to ignore 303 | # See: https://ilri.github.io/cgspace-submission-guidelines/dcterms-license/dcterms-license.txt 304 | ignore_licenses = { 305 | "All rights reserved; no re-use allowed", 306 | "All rights reserved; self-archive copy only", 307 | "Copyrighted; Non-commercial educational use only", 308 | "Copyrighted; Non-commercial use only", 309 | "Copyrighted; all rights reserved", 310 | "Other", 311 | } 312 | 313 | # Skip fields with missing values 314 | if pd.isna(field) or field in ignore_licenses: 315 | return 316 | 317 | spdx_licenses = load_spdx_licenses() 318 | 319 | # Try to split multi-value field on "||" separator 320 | for value in field.split("||"): 321 | if value not in spdx_licenses: 322 | print(f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{value}") 323 | 324 | return 325 | 326 | 327 | def duplicate_items(df): 328 | """Attempt to identify duplicate items. 329 | 330 | First we check the total number of titles and compare it with the number of 331 | unique titles. If there are less unique titles than total titles we expand 332 | the search by creating a key (of sorts) for each item that includes their 333 | title, type, and date issued, and compare it with all the others. If there 334 | are multiple occurrences of the same title, type, date string then it's a 335 | very good indicator that the items are duplicates. 336 | """ 337 | 338 | # Extract the names of the title, type, and date issued columns so we can 339 | # reference them later. First we filter columns by likely patterns, then 340 | # we extract the name from the first item of the resulting object, ie: 341 | # 342 | # Index(['dcterms.title[en_US]'], dtype='object') 343 | # 344 | # But, we need to consider that dc.title.alternative might come before the 345 | # main title in the CSV, so use a negative lookahead to eliminate that. 346 | # 347 | # See: https://regex101.com/r/elyXkW/1 348 | title_column_name = df.filter( 349 | regex=r"^(dc|dcterms)\.title(?!\.alternative).*$" 350 | ).columns[0] 351 | type_column_name = df.filter(regex=r"^(dcterms\.type|dc\.type).*$").columns[0] 352 | date_column_name = df.filter( 353 | regex=r"^(dcterms\.issued|dc\.date\.accessioned).*$" 354 | ).columns[0] 355 | 356 | items_count_total = df[title_column_name].count() 357 | items_count_unique = df[title_column_name].nunique() 358 | 359 | if items_count_unique < items_count_total: 360 | # Create a list to hold our items while we check for duplicates 361 | items = [] 362 | 363 | for index, row in df.iterrows(): 364 | item_title_type_date = f"{row[title_column_name]}{row[type_column_name]}{row[date_column_name]}" 365 | 366 | if item_title_type_date in items: 367 | print( 368 | f"{Fore.YELLOW}Possible duplicate ({title_column_name}): {Fore.RESET}{row[title_column_name]}" 369 | ) 370 | else: 371 | items.append(item_title_type_date) 372 | 373 | 374 | def mojibake(field, field_name): 375 | """Check for mojibake (text that was encoded in one encoding and decoded in 376 | in another, perhaps multiple times). See util.py. 377 | 378 | Prints the string if it contains suspected mojibake. 379 | """ 380 | 381 | # Skip fields with missing values 382 | if pd.isna(field): 383 | return 384 | 385 | if is_mojibake(field): 386 | print( 387 | f"{Fore.YELLOW}Possible encoding issue ({field_name}): {Fore.RESET}{field}" 388 | ) 389 | 390 | return 391 | 392 | 393 | def citation_doi(row, exclude): 394 | """Check for the scenario where an item has a DOI listed in its citation, 395 | but does not have a cg.identifier.doi field. 396 | 397 | Function prints a warning if the DOI field is missing, but there is a DOI 398 | in the citation. 399 | """ 400 | # Check if the user requested us to skip any DOI fields so we can 401 | # just return before going any further. 402 | for field in exclude: 403 | match = re.match(r"^.*?doi.*$", field) 404 | if match is not None: 405 | return 406 | 407 | # Initialize some variables at global scope so that we can set them in the 408 | # loop scope below and still be able to access them afterwards. 409 | citation = "" 410 | 411 | # Iterate over the labels of the current row's values to check if a DOI 412 | # exists. If not, then we extract the citation to see if there is a DOI 413 | # listed there. 414 | for label in row.axes[0]: 415 | # Skip fields with missing values 416 | if pd.isna(row[label]): 417 | continue 418 | 419 | # If a DOI field exists we don't need to check the citation 420 | match = re.match(r"^.*?doi.*$", label) 421 | if match is not None: 422 | return 423 | 424 | # Check if the current label is a citation field and make sure the user 425 | # hasn't asked to skip it. If not, then set the citation. 426 | match = re.match(r"^.*?[cC]itation.*$", label) 427 | if match is not None and label not in exclude: 428 | citation = row[label] 429 | 430 | if citation != "": 431 | # Check the citation for "doi: 10.1186/1743-422X-9-218" 432 | doi_match1 = re.match(r"^.*?doi:\s.*$", citation) 433 | # Check the citation for a DOI URL (doi.org, dx.doi.org, etc) 434 | doi_match2 = re.match(r"^.*?doi\.org.*$", citation) 435 | if doi_match1 is not None or doi_match2 is not None: 436 | print( 437 | f"{Fore.YELLOW}DOI in citation, but missing a DOI field: {Fore.RESET}{citation}" 438 | ) 439 | 440 | return 441 | 442 | 443 | def title_in_citation(row, exclude): 444 | """Check for the scenario where an item's title is missing from its cita- 445 | tion. This could mean that it is missing entirely, or perhaps just exists 446 | in a different format (whitespace, accents, etc). 447 | 448 | Function prints a warning if the title does not appear in the citation. 449 | """ 450 | # Initialize some variables at global scope so that we can set them in the 451 | # loop scope below and still be able to access them afterwards. 452 | title = "" 453 | citation = "" 454 | 455 | # Iterate over the labels of the current row's values to get the names of 456 | # the title and citation columns. Then we check if the title is present in 457 | # the citation. 458 | for label in row.axes[0]: 459 | # Skip fields with missing values 460 | if pd.isna(row[label]): 461 | continue 462 | 463 | # Find the name of the title column 464 | match = re.match(r"^(dc|dcterms)\.title.*$", label) 465 | if match is not None and label not in exclude: 466 | title = row[label] 467 | 468 | # Find the name of the citation column 469 | match = re.match(r"^.*?[cC]itation.*$", label) 470 | if match is not None and label not in exclude: 471 | citation = row[label] 472 | 473 | if citation != "": 474 | if title not in citation: 475 | print(f"{Fore.YELLOW}Title is not present in citation: {Fore.RESET}{title}") 476 | 477 | return 478 | 479 | 480 | def countries_match_regions(row, exclude): 481 | """Check for the scenario where an item has country coverage metadata, but 482 | does not have the corresponding region metadata. For example, an item that 483 | has country coverage "Kenya" should also have region "Eastern Africa" acc- 484 | ording to the UN M.49 classification scheme. 485 | 486 | See: https://unstats.un.org/unsd/methodology/m49/ 487 | 488 | Function prints a warning if the appropriate region is not present. 489 | """ 490 | # Initialize some variables at global scope so that we can set them in the 491 | # loop scope below and still be able to access them afterwards. 492 | country_column_name = "" 493 | region_column_name = "" 494 | title_column_name = "" 495 | 496 | # Instantiate a CountryConverter() object here. According to the docs it is 497 | # more performant to do that as opposed to calling coco.convert() directly 498 | # because we don't need to re-load the country data with each iteration. 499 | cc = coco.CountryConverter() 500 | 501 | # Set logging to ERROR so country_converter's convert() doesn't print the 502 | # "not found in regex" warning message to the screen. 503 | logging.basicConfig(level=logging.ERROR) 504 | 505 | # Iterate over the labels of the current row's values to get the names of 506 | # the title and citation columns. Then we check if the title is present in 507 | # the citation. 508 | for label in row.axes[0]: 509 | # Find the name of the country column 510 | match = re.match(r"^.*?country.*$", label) 511 | if match is not None: 512 | country_column_name = label 513 | 514 | # Find the name of the region column, but make sure it's not subregion! 515 | match = re.match(r"^.*?region.*$", label) 516 | if match is not None and "sub" not in label: 517 | region_column_name = label 518 | 519 | # Find the name of the title column 520 | match = re.match(r"^(dc|dcterms)\.title.*$", label) 521 | if match is not None: 522 | title_column_name = label 523 | 524 | # Make sure the user has not asked to exclude any metadata fields. If so, we 525 | # should return immediately. 526 | column_names = [country_column_name, region_column_name, title_column_name] 527 | if any(field in column_names for field in exclude): 528 | return 529 | 530 | # Make sure we found the country and region columns 531 | if country_column_name != "" and region_column_name != "": 532 | # If we don't have any countries then we should return early before 533 | # suggesting regions. 534 | if row[country_column_name] is not None: 535 | countries = row[country_column_name].split("||") 536 | else: 537 | return 538 | 539 | if row[region_column_name] is not None: 540 | regions = row[region_column_name].split("||") 541 | else: 542 | regions = [] 543 | 544 | for country in countries: 545 | # Look up the UN M.49 regions for this country code. CoCo seems to 546 | # only list the direct region, ie Western Africa, rather than all 547 | # the parent regions ("Sub-Saharan Africa", "Africa", "World") 548 | un_region = cc.convert(names=country, to="UNRegion") 549 | 550 | if un_region != "not found" and un_region not in regions: 551 | try: 552 | print( 553 | f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}{row[title_column_name]}" 554 | ) 555 | except KeyError: 556 | print( 557 | f"{Fore.YELLOW}Missing region ({country} → {un_region}): {Fore.RESET}" 558 | ) 559 | 560 | return 561 | -------------------------------------------------------------------------------- /csv_metadata_quality/experimental.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | import re 4 | 5 | import pandas as pd 6 | import py3langid as langid 7 | from colorama import Fore 8 | from pycountry import languages 9 | 10 | 11 | def correct_language(row, exclude): 12 | """Analyze the text used in the title, abstract, and citation fields to pre- 13 | dict the language being used and compare it with the item's dc.language.iso 14 | field. 15 | 16 | Function prints an error if the language field does not match the detected 17 | language and returns the value in the language field if it does match. 18 | """ 19 | 20 | # Initialize some variables at global scope so that we can set them in the 21 | # loop scope below and still be able to access them afterwards. 22 | language = "" 23 | sample_strings = [] 24 | title = None 25 | 26 | # Iterate over the labels of the current row's values. Before we transposed 27 | # the DataFrame these were the columns in the CSV, ie dc.title and dc.type. 28 | for label in row.axes[0]: 29 | # Skip fields with missing values 30 | if pd.isna(row[label]): 31 | continue 32 | 33 | # Check if current row has multiple language values (separated by "||") 34 | match = re.match(r"^.*?language.*$", label) 35 | if match is not None: 36 | # Skip fields with multiple language values 37 | if "||" in row[label]: 38 | return 39 | 40 | language = row[label] 41 | 42 | # Extract title if it is present (note that we don't allow excluding 43 | # the title here because it complicates things). 44 | match = re.match(r"^.*?title.*$", label) 45 | if match is not None: 46 | title = row[label] 47 | # Append title to sample strings 48 | sample_strings.append(row[label]) 49 | 50 | # Extract abstract if it is present 51 | match = re.match(r"^.*?abstract.*$", label) 52 | if match is not None and label not in exclude: 53 | sample_strings.append(row[label]) 54 | 55 | # Extract citation if it is present 56 | match = re.match(r"^.*?[cC]itation.*$", label) 57 | if match is not None and label not in exclude: 58 | sample_strings.append(row[label]) 59 | 60 | # Make sure language is not blank and is valid ISO 639-1/639-3 before proceeding with language prediction 61 | if language != "": 62 | # Check language value like "es" 63 | if len(language) == 2: 64 | if not languages.get(alpha_2=language): 65 | return 66 | # Check language value like "spa" 67 | elif len(language) == 3: 68 | if not languages.get(alpha_3=language): 69 | return 70 | # Language value is something else like "Span", do not proceed 71 | else: 72 | return 73 | # Language is blank, do not proceed 74 | else: 75 | return 76 | 77 | # Concatenate all sample strings into one string 78 | sample_text = " ".join(sample_strings) 79 | 80 | # Restrict the langid detection space to reduce false positives 81 | langid.set_languages( 82 | ["ar", "de", "en", "es", "fr", "hi", "it", "ja", "ko", "pt", "ru", "vi", "zh"] 83 | ) 84 | langid_classification = langid.classify(sample_text) 85 | 86 | # langid returns an ISO 639-1 (alpha 2) representation of the detected language, but the current item's language field might be ISO 639-3 (alpha 3) so we should use a pycountry Language object to compare both represenations and give appropriate error messages that match the format used by in the input file. 87 | detected_language = languages.get(alpha_2=langid_classification[0]) 88 | if len(language) == 2 and language != detected_language.alpha_2: 89 | print( 90 | f"{Fore.YELLOW}Possibly incorrect language {language} (detected {detected_language.alpha_2}): {Fore.RESET}{title}" 91 | ) 92 | 93 | elif len(language) == 3 and language != detected_language.alpha_3: 94 | print( 95 | f"{Fore.YELLOW}Possibly incorrect language {language} (detected {detected_language.alpha_3}): {Fore.RESET}{title}" 96 | ) 97 | 98 | else: 99 | return 100 | -------------------------------------------------------------------------------- /csv_metadata_quality/fix.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | import logging 4 | import re 5 | from unicodedata import normalize 6 | 7 | import country_converter as coco 8 | import pandas as pd 9 | from colorama import Fore 10 | from ftfy import TextFixerConfig, fix_text 11 | 12 | from csv_metadata_quality.util import is_mojibake, is_nfc 13 | 14 | 15 | def whitespace(field, field_name): 16 | """Fix whitespace issues. 17 | 18 | Return string with leading, trailing, and consecutive whitespace trimmed. 19 | """ 20 | 21 | # Skip fields with missing values 22 | if pd.isna(field): 23 | return 24 | 25 | # Initialize an empty list to hold the cleaned values 26 | values = [] 27 | 28 | # Try to split multi-value field on "||" separator 29 | for value in field.split("||"): 30 | # Strip leading and trailing whitespace 31 | value = value.strip() 32 | 33 | # Replace excessive whitespace (>2) with one space 34 | pattern = re.compile(r"\s{2,}") 35 | match = re.findall(pattern, value) 36 | 37 | if match: 38 | print( 39 | f"{Fore.GREEN}Removing excessive whitespace ({field_name}): {Fore.RESET}{value}" 40 | ) 41 | value = re.sub(pattern, " ", value) 42 | 43 | # Save cleaned value 44 | values.append(value) 45 | 46 | # Create a new field consisting of all values joined with "||" 47 | new_field = "||".join(values) 48 | 49 | return new_field 50 | 51 | 52 | def separators(field, field_name): 53 | """Fix for invalid and unnecessary multi-value separators, for example: 54 | 55 | value|value 56 | value|||value 57 | value||value|| 58 | 59 | Prints the field with the invalid multi-value separator. 60 | """ 61 | 62 | # Skip fields with missing values 63 | if pd.isna(field): 64 | return 65 | 66 | # Initialize an empty list to hold the cleaned values 67 | values = [] 68 | 69 | # Try to split multi-value field on "||" separator 70 | for value in field.split("||"): 71 | # Check if the value is blank and skip it 72 | if value == "": 73 | print( 74 | f"{Fore.GREEN}Fixing unnecessary multi-value separator ({field_name}): {Fore.RESET}{field}" 75 | ) 76 | 77 | continue 78 | 79 | # After splitting, see if there are any remaining "|" characters 80 | pattern = re.compile(r"\|") 81 | match = re.findall(pattern, value) 82 | 83 | if match: 84 | print( 85 | f"{Fore.GREEN}Fixing invalid multi-value separator ({field_name}): {Fore.RESET}{value}" 86 | ) 87 | 88 | value = re.sub(pattern, "||", value) 89 | 90 | # Save cleaned value 91 | values.append(value) 92 | 93 | # Create a new field consisting of all values joined with "||" 94 | new_field = "||".join(values) 95 | 96 | return new_field 97 | 98 | 99 | def unnecessary_unicode(field): 100 | """Remove and replace unnecessary Unicode characters. 101 | 102 | Removes unnecessary Unicode characters like: 103 | - Zero-width space (U+200B) 104 | - Replacement character (U+FFFD) 105 | 106 | Replaces unnecessary Unicode characters like: 107 | - Soft hyphen (U+00AD) → hyphen 108 | - No-break space (U+00A0) → space 109 | - Thin space (U+2009) → space 110 | 111 | Return string with characters removed or replaced. 112 | """ 113 | 114 | # Skip fields with missing values 115 | if pd.isna(field): 116 | return 117 | 118 | # Check for zero-width space characters (U+200B) 119 | pattern = re.compile(r"\u200B") 120 | match = re.findall(pattern, field) 121 | 122 | if match: 123 | print(f"{Fore.GREEN}Removing unnecessary Unicode (U+200B): {Fore.RESET}{field}") 124 | field = re.sub(pattern, "", field) 125 | 126 | # Check for replacement characters (U+FFFD) 127 | pattern = re.compile(r"\uFFFD") 128 | match = re.findall(pattern, field) 129 | 130 | if match: 131 | print(f"{Fore.GREEN}Removing unnecessary Unicode (U+FFFD): {Fore.RESET}{field}") 132 | field = re.sub(pattern, "", field) 133 | 134 | # Check for no-break spaces (U+00A0) 135 | pattern = re.compile(r"\u00A0") 136 | match = re.findall(pattern, field) 137 | 138 | if match: 139 | print( 140 | f"{Fore.GREEN}Replacing unnecessary Unicode (U+00A0): {Fore.RESET}{field}" 141 | ) 142 | field = re.sub(pattern, " ", field) 143 | 144 | # Check for soft hyphens (U+00AD), sometimes preceeded with a normal hyphen 145 | pattern = re.compile(r"\u002D*?\u00AD") 146 | match = re.findall(pattern, field) 147 | 148 | if match: 149 | print( 150 | f"{Fore.GREEN}Replacing unnecessary Unicode (U+00AD): {Fore.RESET}{field}" 151 | ) 152 | field = re.sub(pattern, "-", field) 153 | 154 | # Check for thin spaces (U+2009) 155 | pattern = re.compile(r"\u2009") 156 | match = re.findall(pattern, field) 157 | 158 | if match: 159 | print( 160 | f"{Fore.GREEN}Replacing unnecessary Unicode (U+2009): {Fore.RESET}{field}" 161 | ) 162 | field = re.sub(pattern, " ", field) 163 | 164 | return field 165 | 166 | 167 | def duplicates(field, field_name): 168 | """Remove duplicate metadata values.""" 169 | 170 | # Skip fields with missing values 171 | if pd.isna(field): 172 | return 173 | 174 | # Try to split multi-value field on "||" separator 175 | values = field.split("||") 176 | 177 | # Initialize an empty list to hold the de-duplicated values 178 | new_values = [] 179 | 180 | # Iterate over all values 181 | for value in values: 182 | # Check if each value exists in our list of values already 183 | if value not in new_values: 184 | new_values.append(value) 185 | else: 186 | print( 187 | f"{Fore.GREEN}Removing duplicate value ({field_name}): {Fore.RESET}{value}" 188 | ) 189 | 190 | # Create a new field consisting of all values joined with "||" 191 | new_field = "||".join(new_values) 192 | 193 | return new_field 194 | 195 | 196 | def newlines(field, field_name): 197 | """Fix newlines. 198 | 199 | Single metadata values should not span multiple lines because this is not 200 | rendered properly in DSpace's XMLUI and even causes issues during import. 201 | 202 | Implementation note: this currently only detects Unix line feeds (0x0a). 203 | This is essentially when a user presses "Enter" to move to the next line. 204 | Other newlines like the Windows carriage return are already handled with 205 | the string stipping performed in the whitespace fixes. 206 | 207 | Confusingly, in Vim '\n' matches a line feed when searching, but you must 208 | use '\r' to *insert* a line feed, ie in a search and replace expression. 209 | 210 | Return string with newlines removed. 211 | """ 212 | 213 | # Skip fields with missing values 214 | if pd.isna(field): 215 | return 216 | 217 | # Check for Unix line feed (LF) 218 | match = re.findall(r"\n", field) 219 | 220 | if match: 221 | print(f"{Fore.GREEN}Removing newline ({field_name}): {Fore.RESET}{field}") 222 | field = field.replace("\n", "") 223 | 224 | return field 225 | 226 | 227 | def comma_space(field, field_name): 228 | """Fix occurrences of commas missing a trailing space, for example: 229 | 230 | Orth,Alan S. 231 | 232 | This is a very common mistake in author and citation fields. 233 | 234 | Return string with a space added. 235 | """ 236 | 237 | # Skip fields with missing values 238 | if pd.isna(field): 239 | return 240 | 241 | # Check for comma followed by a word character 242 | match = re.findall(r",\w", field) 243 | 244 | if match: 245 | print( 246 | f"{Fore.GREEN}Adding space after comma ({field_name}): {Fore.RESET}{field}" 247 | ) 248 | field = re.sub(r",(\w)", r", \1", field) 249 | 250 | return field 251 | 252 | 253 | def normalize_unicode(field, field_name): 254 | """Fix occurrences of decomposed Unicode characters by normalizing them 255 | with NFC to their canonical forms, for example: 256 | 257 | Ouédraogo, Mathieu → Ouédraogo, Mathieu 258 | 259 | Return normalized string. 260 | """ 261 | 262 | # Skip fields with missing values 263 | if pd.isna(field): 264 | return 265 | 266 | # Check if the current string is using normalized Unicode (NFC) 267 | if not is_nfc(field): 268 | print(f"{Fore.GREEN}Normalizing Unicode ({field_name}): {Fore.RESET}{field}") 269 | field = normalize("NFC", field) 270 | 271 | return field 272 | 273 | 274 | def mojibake(field, field_name): 275 | """Attempts to fix mojibake (text that was encoded in one encoding and deco- 276 | ded in another, perhaps multiple times). See util.py. 277 | 278 | Return fixed string. 279 | """ 280 | 281 | # Skip fields with missing values 282 | if pd.isna(field): 283 | return field 284 | 285 | # We don't want ftfy to change “smart quotes” to "ASCII quotes" 286 | config = TextFixerConfig(uncurl_quotes=False) 287 | 288 | if is_mojibake(field): 289 | print(f"{Fore.GREEN}Fixing encoding issue ({field_name}): {Fore.RESET}{field}") 290 | 291 | return fix_text(field, config) 292 | else: 293 | return field 294 | 295 | 296 | def countries_match_regions(row, exclude): 297 | """Check for the scenario where an item has country coverage metadata, but 298 | does not have the corresponding region metadata. For example, an item that 299 | has country coverage "Kenya" should also have region "Eastern Africa" acc- 300 | ording to the UN M.49 classification scheme. 301 | 302 | See: https://unstats.un.org/unsd/methodology/m49/ 303 | 304 | Return fixed string. 305 | """ 306 | # Initialize some variables at global scope so that we can set them in the 307 | # loop scope below and still be able to access them afterwards. 308 | country_column_name = "" 309 | region_column_name = "" 310 | title_column_name = "" 311 | 312 | # Instantiate a CountryConverter() object here. According to the docs it is 313 | # more performant to do that as opposed to calling coco.convert() directly 314 | # because we don't need to re-load the country data with each iteration. 315 | cc = coco.CountryConverter() 316 | 317 | # Set logging to ERROR so country_converter's convert() doesn't print the 318 | # "not found in regex" warning message to the screen. 319 | logging.basicConfig(level=logging.ERROR) 320 | 321 | # Iterate over the labels of the current row's values to get the names of 322 | # the title and citation columns. Then we check if the title is present in 323 | # the citation. 324 | for label in row.axes[0]: 325 | # Find the name of the country column 326 | match = re.match(r"^.*?country.*$", label) 327 | if match is not None: 328 | country_column_name = label 329 | 330 | # Find the name of the region column, but make sure it's not subregion! 331 | match = re.match(r"^.*?region.*$", label) 332 | if match is not None and "sub" not in label: 333 | region_column_name = label 334 | 335 | # Find the name of the title column 336 | match = re.match(r"^(dc|dcterms)\.title.*$", label) 337 | if match is not None: 338 | title_column_name = label 339 | 340 | # Make sure the user has not asked to exclude any metadata fields. If so, we 341 | # should return immediately. 342 | column_names = [country_column_name, region_column_name, title_column_name] 343 | if any(field in column_names for field in exclude): 344 | return row 345 | 346 | # Make sure we found the country and region columns 347 | if country_column_name != "" and region_column_name != "": 348 | # If we don't have any countries then we should return early before 349 | # suggesting regions. 350 | if row[country_column_name] is not None: 351 | countries = row[country_column_name].split("||") 352 | else: 353 | return row 354 | 355 | if row[region_column_name] is not None: 356 | regions = row[region_column_name].split("||") 357 | else: 358 | regions = [] 359 | 360 | # An empty list for our regions so we can keep track for all countries 361 | missing_regions = [] 362 | 363 | for country in countries: 364 | # Look up the UN M.49 regions for this country code. CoCo seems to 365 | # only list the direct region, ie Western Africa, rather than all 366 | # the parent regions ("Sub-Saharan Africa", "Africa", "World") 367 | un_region = cc.convert(names=country, to="UNRegion") 368 | 369 | # Add the new un_region to regions if it is not "not found" and if 370 | # it doesn't already exist in regions. 371 | if un_region != "not found" and un_region not in regions: 372 | if un_region not in missing_regions: 373 | try: 374 | print( 375 | f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}{row[title_column_name]}" 376 | ) 377 | except KeyError: 378 | # If there is no title column in the CSV we will print 379 | # the fix without the title instead of crashing. 380 | print( 381 | f"{Fore.YELLOW}Adding missing region ({un_region}): {Fore.RESET}<title field not present>" 382 | ) 383 | 384 | missing_regions.append(un_region) 385 | 386 | if len(missing_regions) > 0: 387 | # Add the missing regions back to the row, paying attention to whether 388 | # or not the row's region column is None (aka null) or just an empty 389 | # string (length would be 0). 390 | if row[region_column_name] is not None and len(row[region_column_name]) > 0: 391 | row[region_column_name] = ( 392 | row[region_column_name] + "||" + "||".join(missing_regions) 393 | ) 394 | else: 395 | row[region_column_name] = "||".join(missing_regions) 396 | 397 | return row 398 | 399 | 400 | def normalize_dois(field): 401 | """Normalize DOIs. 402 | 403 | DOIs are meant to be globally unique identifiers. They are case insensitive, 404 | but in order to compare them robustly they should be normalized to a common 405 | format: 406 | 407 | - strip leading and trailing whitespace 408 | - lowercase all ASCII characters 409 | - convert all variations to https://doi.org/10.xxxx/xxxx URI format 410 | 411 | Return string with normalized DOI. 412 | 413 | See: https://www.crossref.org/documentation/member-setup/constructing-your-dois/ 414 | """ 415 | 416 | # Skip fields with missing values 417 | if pd.isna(field): 418 | return 419 | 420 | # Try to split multi-value field on "||" separator 421 | values = field.split("||") 422 | 423 | # Initialize an empty list to hold the de-duplicated values 424 | new_values = [] 425 | 426 | # Iterate over all values (most items will only have one DOI) 427 | for value in values: 428 | # Strip leading and trailing whitespace 429 | new_value = value.strip() 430 | 431 | new_value = new_value.lower() 432 | 433 | # Convert to HTTPS 434 | pattern = re.compile(r"^http://") 435 | match = re.findall(pattern, new_value) 436 | 437 | if match: 438 | new_value = re.sub(pattern, "https://", new_value) 439 | 440 | # Convert dx.doi.org to doi.org 441 | pattern = re.compile(r"dx\.doi\.org") 442 | match = re.findall(pattern, new_value) 443 | 444 | if match: 445 | new_value = re.sub(pattern, "doi.org", new_value) 446 | 447 | # Convert www.doi.org to doi.org 448 | pattern = re.compile(r"www\.doi\.org") 449 | match = re.findall(pattern, new_value) 450 | 451 | if match: 452 | new_value = re.sub(pattern, "doi.org", new_value) 453 | 454 | # Convert erroneous %2f to / 455 | pattern = re.compile("%2f") 456 | match = re.findall(pattern, new_value) 457 | 458 | if match: 459 | new_value = re.sub(pattern, "/", new_value) 460 | 461 | # Replace values like doi: 10.11648/j.jps.20140201.14 462 | pattern = re.compile(r"^doi: 10\.") 463 | match = re.findall(pattern, new_value) 464 | 465 | if match: 466 | new_value = re.sub(pattern, "https://doi.org/10.", new_value) 467 | 468 | # Replace values like 10.3390/foods12010115 469 | pattern = re.compile(r"^10\.") 470 | match = re.findall(pattern, new_value) 471 | 472 | if match: 473 | new_value = re.sub(pattern, "https://doi.org/10.", new_value) 474 | 475 | if new_value != value: 476 | print(f"{Fore.GREEN}Normalized DOI: {Fore.RESET}{value}") 477 | 478 | new_values.append(new_value) 479 | 480 | new_field = "||".join(new_values) 481 | 482 | return new_field 483 | -------------------------------------------------------------------------------- /csv_metadata_quality/util.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | 4 | import json 5 | import os 6 | 7 | from ftfy.badness import is_bad 8 | 9 | 10 | def is_nfc(field): 11 | """Utility function to check whether a string is using normalized Unicode. 12 | Python's built-in unicodedata library has the is_normalized() function, but 13 | it was only introduced in Python 3.8. By using a simple utility function we 14 | are able to run on Python >= 3.6 again. 15 | 16 | See: https://docs.python.org/3/library/unicodedata.html 17 | 18 | Return boolean. 19 | """ 20 | 21 | from unicodedata import normalize 22 | 23 | return field == normalize("NFC", field) 24 | 25 | 26 | def is_mojibake(field): 27 | """Determines whether a string contains mojibake. 28 | 29 | We commonly deal with CSV files that were *encoded* in UTF-8, but decoded 30 | as something else like CP-1252 (Windows Latin). This manifests in the form 31 | of "mojibake", for example: 32 | 33 | - CIAT Publicaçao 34 | - CIAT Publicación 35 | 36 | This uses the excellent "fixes text for you" (ftfy) library to determine 37 | whether a string contains characters that have been encoded in one encoding 38 | and decoded in another. 39 | 40 | Inspired by this code snippet from Martijn Pieters on StackOverflow: 41 | https://stackoverflow.com/questions/29071995/identify-garbage-unicode-string-using-python 42 | 43 | Return boolean. 44 | """ 45 | if not is_bad(field): 46 | # Nothing weird, should be okay 47 | return False 48 | try: 49 | field.encode("sloppy-windows-1252") 50 | except UnicodeEncodeError: 51 | # Not CP-1252 encodable, probably fine 52 | return False 53 | else: 54 | # Encodable as CP-1252, Mojibake alert level high 55 | return True 56 | 57 | 58 | def load_spdx_licenses(): 59 | """Returns a Python list of SPDX short license identifiers.""" 60 | 61 | with open(os.path.join(os.path.dirname(__file__), "data/licenses.json")) as f: 62 | licenses = json.load(f) 63 | 64 | # List comprehension to extract the license ID for each license 65 | return [license["licenseId"] for license in licenses["licenses"]] 66 | -------------------------------------------------------------------------------- /csv_metadata_quality/version.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | VERSION = "0.7.0" 4 | -------------------------------------------------------------------------------- /data/abstract-check.csv: -------------------------------------------------------------------------------- 1 | id,dc.title,dcterms.abstract 2 | 1,Normal item,This is an abstract 3 | 2,Leading whitespace, This is an abstract 4 | 3,Trailing whitespace,This is an abstract 5 | 4,Consecutive whitespace,This is an abstract 6 | 5,Newline,"This 7 | is an abstract" 8 | 6,Newline with leading whitespace," This 9 | is an abstract" 10 | 7,Newline with trailing whitespace,"This 11 | is an abstract " 12 | 8,Newline with consecutive whitespace,"This 13 | is an abstract" 14 | 9,Multiple newlines,"This 15 | is 16 | an 17 | abstract" 18 | -------------------------------------------------------------------------------- /data/test-geography.csv: -------------------------------------------------------------------------------- 1 | dc.title,dcterms.issued,dcterms.type,dc.contributor.author,cg.coverage.country,cg.coverage.region 2 | No country,2022-09-01,Report,"Orth, Alan",, 3 | Matching country and region,2022-09-01,Report,"Orth, Alan",Kenya,Eastern Africa 4 | Missing region,2022-09-01,Report,"Orth, Alan",Kenya, 5 | Caribbean country with matching region,2022-09-01,Report,"Orth, Alan",Bahamas,Caribbean 6 | Caribbean country with no region,2022-09-01,Report,"Orth, Alan",Bahamas, 7 | Fake country with no region,2022-09-01,Report,"Orth, Alan",Yeah Baby, 8 | SE Asian country with matching region,2022-09-01,Report,"Orth, Alan",Cambodia,South-eastern Asia 9 | SE Asian country with no region,2022-09-01,Report,"Orth, Alan",Cambodia, 10 | Duplicate countries with matching region,2022-09-01,Report,"Orth, Alan",Kenya||Kenya,Eastern Africa 11 | Duplicate countries with missing regions,2022-09-01,Report,"Orth, Alan",Kenya||Kenya, 12 | Multiple countries with no regions,2022-09-01,Report,"Orth, Alan",Kenya||Bahamas, 13 | Multiple countries with mixed matching regions,2022-09-01,Report,"Orth, Alan",Kenya||Bahamas,Eastern Africa 14 | -------------------------------------------------------------------------------- /data/test.csv: -------------------------------------------------------------------------------- 1 | dc.title,dcterms.issued,dc.identifier.issn,dc.identifier.isbn,dcterms.language,dcterms.subject,cg.coverage.country,filename,dcterms.license,dcterms.type,dcterms.bibliographicCitation,cg.identifier.doi,cg.coverage.region,cg.coverage.subregion 2 | Leading space,2019-07-29,,,,,,,,,,,, 3 | Trailing space ,2019-07-29,,,,,,,,,,,, 4 | Excessive space,2019-07-29,,,,,,,,,,,, 5 | Miscellaenous ||whitespace | issues ,2019-07-29,,,,,,,,,,,, 6 | Duplicate||Duplicate,2019-07-29,,,,,,,,,,,, 7 | Invalid ISSN,2019-07-29,2321-2302,,,,,,,,,,, 8 | Invalid ISBN,2019-07-29,,978-0-306-40615-6,,,,,,,,,, 9 | Multiple valid ISSNs,2019-07-29,0378-5955||0024-9319,,,,,,,,,,, 10 | Multiple valid ISBNs,2019-07-29,,99921-58-10-7||978-0-306-40615-7,,,,,,,,,, 11 | Invalid date,2019-07-260,,,,,,,,,,,, 12 | Multiple dates,2019-07-26||2019-01-10,,,,,,,,,,,, 13 | Invalid multi-value separator,2019-07-29,0378-5955|0024-9319,,,,,,,,,,, 14 | Unnecessary Unicode​,2019-07-29,,,,,,,,,,,, 15 | Suspicious character||foreˆt,2019-07-29,,,,,,,,,,,, 16 | Invalid ISO 639-1 (alpha 2) language,2019-07-29,,,jp,,,,,,,,, 17 | Invalid ISO 639-3 (alpha 3) language,2019-07-29,,,chi,,,,,,,,, 18 | Invalid language,2019-07-29,,,Span,,,,,,,,, 19 | Invalid AGROVOC subject,2019-07-29,,,,LIVESTOCK||FOREST,,,,,,,, 20 | Newline (LF),2019-07-30,,,,"TANZA 21 | NIA",,,,,,,, 22 | Missing date,,,,,,,,,,,,, 23 | Invalid country,2019-08-01,,,,,KENYAA,,,,,,, 24 | Uncommon filename extension,2019-08-10,,,,,,file.pdf.lck,,,,,, 25 | Unneccesary unicode (U+002D + U+00AD),2019-08-10,,978-­92-­9043-­823-­6,,,,,,,,,, 26 | "Missing space,after comma",2019-08-27,,,,,,,,,,,, 27 | Incorrect ISO 639-1 language,2019-09-26,,,es,,,,,,,,, 28 | Incorrect ISO 639-3 language,2019-09-26,,,spa,,,,,,,,, 29 | Composéd Unicode,2020-01-14,,,,,,,,,,,, 30 | Decomposéd Unicode,2020-01-14,,,,,,,,,,,, 31 | Unnecessary multi-value separator,2021-01-03,0378-5955||,,,,,,,,,,, 32 | Invalid SPDX license identifier,2021-03-11,,,,,,,CC-BY,,,,, 33 | Duplicate Title,2021-03-17,,,,,,,,Report,,,, 34 | Duplicate Title,2021-03-17,,,,,,,,Report,,,, 35 | Mojibake,2021-03-18,,,,Publicaçao CIAT,,,,Report,,,, 36 | "DOI in citation, but missing cg.identifier.doi",2021-10-06,,,,,,,,,"Orth, A. 2021. DOI in citation, but missing cg.identifier.doi. doi: 10.1186/1743-422X-9-218",,, 37 | Title missing from citation,2021-12-05,,,,,,,,,"Orth, A. 2021. Title missing f rom citation.",,, 38 | Country missing region,2021-12-08,,,,,Kenya,,,,,,, 39 | Subregion field shouldn’t trigger region checks,2022-12-07,,,,,Kenya,,,,,,Eastern Africa,Baringo 40 | DOI with HTTP and dx.doi.org,2024-04-23,,,,,,,,,,http://dx.doi.org/10.1016/j.envc.2023.100794,, 41 | DOI with colon,2024-04-23,,,,,,,,,,doi: 10.11648/j.jps.20140201.14,, 42 | Upper case bare DOI,2024-04-23,,,,,,,,,,10.19103/AS.2018.0043.16,, 43 | DOI with %2f,2024-06-25,,,,,,,,,,https://doi.org/10.1016%2fj.envc.2023.100794,, 44 | -------------------------------------------------------------------------------- /data/test.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilri/csv-metadata-quality/753f3340a32dbd6ade4d6798d34bccdfeba63dd8/data/test.xlsx -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "csv-metadata-quality" 3 | version = "0.7.0" 4 | description="A simple, but opinionated CSV quality checking and fixing pipeline for CSVs in the DSpace ecosystem." 5 | authors = [ 6 | { name = "Alan Orth", email = "alan.orth@gmail.com" } 7 | ] 8 | license= { file = "LICENSE.txt" } 9 | dependencies = [ 10 | "pandas[feather,performance]~=2.2.3", 11 | "python-stdnum~=1.20", 12 | "requests~=2.32.3", 13 | "requests-cache~=1.2.1", 14 | "colorama~=0.4", 15 | "ftfy~=6.3.0", 16 | "country-converter~=1.3", 17 | "pycountry~=24.6.1", 18 | "py3langid~=0.3", 19 | ] 20 | readme = "README.md" 21 | requires-python = ">= 3.10" 22 | 23 | classifiers = [ 24 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 25 | "Natural Language :: English", 26 | "Operating System :: OS Independent", 27 | "Programming Language :: Python :: 3.10", 28 | "Programming Language :: Python :: 3.11", 29 | "Programming Language :: Python :: 3.12", 30 | "Programming Language :: Python :: 3.13", 31 | "Programming Language :: Python :: Implementation :: CPython", 32 | ] 33 | 34 | [project.urls] 35 | repository = "https://github.com/ilri/csv-metadata-quality" 36 | homepage = "https://github.com/ilri/csv-metadata-quality" 37 | 38 | [project.scripts] 39 | csv-metadata-quality = 'csv_metadata_quality.__main__:main' 40 | 41 | # So uv doesn't fall back to setuptools 42 | # See: https://packaging.python.org/en/latest/tutorials/packaging-projects/#choosing-build-backend 43 | [build-system] 44 | requires = ["hatchling"] 45 | build-backend = "hatchling.build" 46 | 47 | [dependency-groups] 48 | dev = [ 49 | "pytest~=8.3", 50 | "isort~=6.0", 51 | "csvkit~=2.0", 52 | "ipython~=8.31", 53 | ] 54 | 55 | # So hatch doesn't try to build other top-level directories like "data" 56 | [tool.hatch.build.targets.wheel] 57 | packages = ["csv_metadata_quality"] 58 | 59 | [tool.isort] 60 | profile = "black" 61 | line_length=88 62 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts= -rsxX -s -v --strict-markers --capture=sys 3 | filterwarnings = 4 | error::UserWarning 5 | ignore:.*U.* is deprecated:DeprecationWarning 6 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ], 6 | "pip_requirements": { 7 | "enabled": false 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv export --no-dev 3 | -e . 4 | attrs==25.1.0 \ 5 | --hash=sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e \ 6 | --hash=sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a 7 | bottleneck==1.4.2 \ 8 | --hash=sha256:037315c56605128a39f77d19af6a6019dc8c21a63694a4bfef3c026ed963be2e \ 9 | --hash=sha256:070d22f2f62ab81297380a89492cca931e4d9443fa4b84c2baeb52db09c3b1b4 \ 10 | --hash=sha256:122845e3106c85465551d4a9a3777841347cfedfbebb3aa985cca110e07030b1 \ 11 | --hash=sha256:125436df93751a226eab1732783aa8f6125e88e779587aa61be071fb66e41f9d \ 12 | --hash=sha256:1f61658ebdf5a178298544336b65020730bf86cc092dab5f6579a99a86bd888b \ 13 | --hash=sha256:1fc4e7645bd425c05e05acd5541e9e09cb4179e71164e862f082561bf4509eac \ 14 | --hash=sha256:26b5f0531f7044befaad95c20365dd666372e66bdacbfaf009ff65d60285534d \ 15 | --hash=sha256:2c9dbaf737b605b30c81611f2c1d197c2fd2e46c33f605876c1d332d3360c4fc \ 16 | --hash=sha256:2db287f6ecdbb1c998085eca9b717fec2bfc48a4ab6ae070a9820ba8ab59c90b \ 17 | --hash=sha256:2e2fe327dc2d0564e295a5857a252755103f8c6e05b07d3ff80a69afaa9f5065 \ 18 | --hash=sha256:48c6b9d9287c4102b803fcb01ae66ae7ef6b310b711b4b7b7e23bf952894dc05 \ 19 | --hash=sha256:4c6df9a60ec6ab88fec934ca864266ba95edd89c490af71dc9cd8afb2a54ebd9 \ 20 | --hash=sha256:6282fa925ac3768f66e3547f89a512376d3f9de7ef53bdd37aa29232fd864054 \ 21 | --hash=sha256:6b7790ca8658cd69e3cc0d0e4ff0e9829d60849bf7945fbd7344fbce05b2bbb8 \ 22 | --hash=sha256:7363b3c8ce6ca433779cd7e96bcb94c0e516dcacadff0011adcbf0b3ac86bc9d \ 23 | --hash=sha256:7c7d29c044a3511b36fd744503c3e697e279c273a8477a6d91a2831d04fd19e0 \ 24 | --hash=sha256:7ebbcbe5d4062e37507b9a81e2aacdb1fcccc6193f7feff124ef2b5a6a5eb740 \ 25 | --hash=sha256:89651ef18c06616850203bf8875c958c5d316ea48d8ba60d9b450199d39ae391 \ 26 | --hash=sha256:964f6ac4118ddab3bbbac79d4f726b093459be751baba73ee0aa364666e8068e \ 27 | --hash=sha256:99778329331d5fae8df19772a019e8b73ba4d9d1650f110cd995ab7657114db0 \ 28 | --hash=sha256:a74ddd0417f42eeaba37375f0fc065b28451e0fba45cb2f99e88880b10b3fa43 \ 29 | --hash=sha256:b6902ebf3e85315b481bc084f10c5770f8240275ad1e039ac69c7c8d2013b040 \ 30 | --hash=sha256:c1c885ad02a6a8fa1f7ee9099f29b9d4c03eb1da2c7ab25839482d5cce739021 \ 31 | --hash=sha256:c2fd34b9b490204f95288f0dd35d37042486a95029617246c88c0f94a0ab49fe \ 32 | --hash=sha256:c663cbba8f52011fd82ee08c6a85c93b34b19e0e7ebba322d2d67809f34e0597 \ 33 | --hash=sha256:e56a206fbf48e3b8054a964398bf1ed843e9625d3c6bdbeb7898cb48bf97441b \ 34 | --hash=sha256:e7a1b023de1de3d84b18826462718fba548fed41870df44354f9ab6a414ea82f \ 35 | --hash=sha256:eb0c611d15b0fd8f511d288e8964e4725b4b3b0d9d310880cf0ff6b8dd03c859 \ 36 | --hash=sha256:fa8e8e1799dea5483ce6669462660f9d9a95649f6f98a80d315b84ec89f449f4 37 | cattrs==24.1.2 \ 38 | --hash=sha256:67c7495b760168d931a10233f979b28dc04daf853b30752246f4f8471c6d68d0 \ 39 | --hash=sha256:8028cfe1ff5382df59dd36474a86e02d817b06eaf8af84555441bac915d2ef85 40 | certifi==2024.12.14 \ 41 | --hash=sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 \ 42 | --hash=sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db 43 | charset-normalizer==3.4.1 \ 44 | --hash=sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd \ 45 | --hash=sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601 \ 46 | --hash=sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd \ 47 | --hash=sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d \ 48 | --hash=sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313 \ 49 | --hash=sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd \ 50 | --hash=sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa \ 51 | --hash=sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8 \ 52 | --hash=sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1 \ 53 | --hash=sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2 \ 54 | --hash=sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d \ 55 | --hash=sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b \ 56 | --hash=sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408 \ 57 | --hash=sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3 \ 58 | --hash=sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f \ 59 | --hash=sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a \ 60 | --hash=sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146 \ 61 | --hash=sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6 \ 62 | --hash=sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9 \ 63 | --hash=sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f \ 64 | --hash=sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545 \ 65 | --hash=sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176 \ 66 | --hash=sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b \ 67 | --hash=sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f \ 68 | --hash=sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b \ 69 | --hash=sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9 \ 70 | --hash=sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125 \ 71 | --hash=sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de \ 72 | --hash=sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11 \ 73 | --hash=sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35 \ 74 | --hash=sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f \ 75 | --hash=sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda \ 76 | --hash=sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a \ 77 | --hash=sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971 \ 78 | --hash=sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d \ 79 | --hash=sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f \ 80 | --hash=sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757 \ 81 | --hash=sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a \ 82 | --hash=sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886 \ 83 | --hash=sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77 \ 84 | --hash=sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76 \ 85 | --hash=sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247 \ 86 | --hash=sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85 \ 87 | --hash=sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb \ 88 | --hash=sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7 \ 89 | --hash=sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037 \ 90 | --hash=sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1 \ 91 | --hash=sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807 \ 92 | --hash=sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407 \ 93 | --hash=sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12 \ 94 | --hash=sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3 \ 95 | --hash=sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd \ 96 | --hash=sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00 \ 97 | --hash=sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616 98 | colorama==0.4.6 \ 99 | --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ 100 | --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 101 | country-converter==1.3 \ 102 | --hash=sha256:006958c83adeada455d2f178921fdd051def736259ff250fada912eaf3ca8cf1 \ 103 | --hash=sha256:f6a1a14d1f98112ca90a5198f645f4e60bb73840e98f3f733893ff5b617c2f38 104 | exceptiongroup==1.2.2 ; python_full_version < '3.11' \ 105 | --hash=sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b \ 106 | --hash=sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc 107 | ftfy==6.3.1 \ 108 | --hash=sha256:7c70eb532015cd2f9adb53f101fb6c7945988d023a085d127d1573dc49dd0083 \ 109 | --hash=sha256:9b3c3d90f84fb267fe64d375a07b7f8912d817cf86009ae134aa03e1819506ec 110 | idna==3.10 \ 111 | --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ 112 | --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 113 | llvmlite==0.44.0 \ 114 | --hash=sha256:07667d66a5d150abed9157ab6c0b9393c9356f229784a4385c02f99e94fc94d4 \ 115 | --hash=sha256:1d671a56acf725bf1b531d5ef76b86660a5ab8ef19bb6a46064a705c6ca80aad \ 116 | --hash=sha256:2fb7c4f2fb86cbae6dca3db9ab203eeea0e22d73b99bc2341cdf9de93612e930 \ 117 | --hash=sha256:319bddd44e5f71ae2689859b7203080716448a3cd1128fb144fe5c055219d516 \ 118 | --hash=sha256:40526fb5e313d7b96bda4cbb2c85cd5374e04d80732dd36a282d72a560bb6408 \ 119 | --hash=sha256:41e3839150db4330e1b2716c0be3b5c4672525b4c9005e17c7597f835f351ce2 \ 120 | --hash=sha256:46224058b13c96af1365290bdfebe9a6264ae62fb79b2b55693deed11657a8bf \ 121 | --hash=sha256:5f79a728e0435493611c9f405168682bb75ffd1fbe6fc360733b850c80a026db \ 122 | --hash=sha256:7202b678cdf904823c764ee0fe2dfe38a76981f4c1e51715b4cb5abb6cf1d9e8 \ 123 | --hash=sha256:9c58867118bad04a0bb22a2e0068c693719658105e40009ffe95c7000fcde88e \ 124 | --hash=sha256:9fbadbfba8422123bab5535b293da1cf72f9f478a65645ecd73e781f962ca614 \ 125 | --hash=sha256:aa0097052c32bf721a4efc03bd109d335dfa57d9bffb3d4c24cc680711b8b4fc \ 126 | --hash=sha256:ace564d9fa44bb91eb6e6d8e7754977783c68e90a471ea7ce913bff30bd62427 \ 127 | --hash=sha256:c0143a5ef336da14deaa8ec26c5449ad5b6a2b564df82fcef4be040b9cacfea9 \ 128 | --hash=sha256:c5d22c3bfc842668168a786af4205ec8e3ad29fb1bc03fd11fd48460d0df64c1 \ 129 | --hash=sha256:cccf8eb28f24840f2689fb1a45f9c0f7e582dd24e088dcf96e424834af11f791 \ 130 | --hash=sha256:d752f89e31b66db6f8da06df8b39f9b91e78c5feea1bf9e8c1fba1d1c24c065d \ 131 | --hash=sha256:d8489634d43c20cd0ad71330dde1d5bc7b9966937a263ff1ec1cebb90dc50955 \ 132 | --hash=sha256:eae7e2d4ca8f88f89d315b48c6b741dcb925d6a1042da694aa16ab3dd4cbd3a1 \ 133 | --hash=sha256:eed7d5f29136bda63b6d7804c279e2b72e08c952b7c5df61f45db408e0ee52f3 \ 134 | --hash=sha256:f01a394e9c9b7b1d4e63c327b096d10f6f0ed149ef53d38a09b3749dcf8c9610 135 | numba==0.61.0 \ 136 | --hash=sha256:074cd38c5b1f9c65a4319d1f3928165f48975ef0537ad43385b2bd908e6e2e35 \ 137 | --hash=sha256:0ebbd4827091384ab8c4615ba1b3ca8bc639a3a000157d9c37ba85d34cd0da1b \ 138 | --hash=sha256:152146ecdbb8d8176f294e9f755411e6f270103a11c3ff50cecc413f794e52c8 \ 139 | --hash=sha256:21c2fe25019267a608e2710a6a947f557486b4b0478b02e45a81cf606a05a7d4 \ 140 | --hash=sha256:43aa4d7d10c542d3c78106b8481e0cbaaec788c39ee8e3d7901682748ffdf0b4 \ 141 | --hash=sha256:44240e694d4aa321430c97b21453e46014fe6c7b8b7d932afa7f6a88cc5d7e5e \ 142 | --hash=sha256:46c5ae094fb3706f5adf9021bfb7fc11e44818d61afee695cdee4eadfed45e98 \ 143 | --hash=sha256:550d389573bc3b895e1ccb18289feea11d937011de4d278b09dc7ed585d1cdcb \ 144 | --hash=sha256:5cafa6095716fcb081618c28a8d27bf7c001e09696f595b41836dec114be2905 \ 145 | --hash=sha256:5f6c452dca1de8e60e593f7066df052dd8da09b243566ecd26d2b796e5d3087d \ 146 | --hash=sha256:6fb74e81aa78a2303e30593d8331327dfc0d2522b5db05ac967556a26db3ef87 \ 147 | --hash=sha256:74250b26ed6a1428763e774dc5b2d4e70d93f73795635b5412b8346a4d054574 \ 148 | --hash=sha256:764f0e47004f126f58c3b28e0a02374c420a9d15157b90806d68590f5c20cc89 \ 149 | --hash=sha256:888d2e89b8160899e19591467e8fdd4970e07606e1fbc248f239c89818d5f925 \ 150 | --hash=sha256:9cab9783a700fa428b1a54d65295122bc03b3de1d01fb819a6b9dbbddfdb8c43 \ 151 | --hash=sha256:9f25f7fef0206d55c1cfb796ad833cbbc044e2884751e56e798351280038484c \ 152 | --hash=sha256:b72bbc8708e98b3741ad0c63f9929c47b623cc4ee86e17030a4f3e301e8401ac \ 153 | --hash=sha256:b96fafbdcf6f69b69855273e988696aae4974115a815f6818fef4af7afa1f6b8 \ 154 | --hash=sha256:bf64c2d0f3d161af603de3825172fb83c2600bcb1d53ae8ea568d4c53ba6ac08 \ 155 | --hash=sha256:de5aa7904741425f28e1028b85850b31f0a245e9eb4f7c38507fb893283a066c \ 156 | --hash=sha256:ffe9fe373ed30638d6e20a0269f817b2c75d447141f55a675bfcf2d1fe2e87fb 157 | numexpr==2.10.2 \ 158 | --hash=sha256:0db5ff5183935d1612653559c319922143e8fa3019007696571b13135f216458 \ 159 | --hash=sha256:15f59655458056fdb3a621b1bb8e071581ccf7e823916c7568bb7c9a3e393025 \ 160 | --hash=sha256:3bf01ec502d89944e49e9c1b5cc7c7085be8ca2eb9dd46a0eafd218afbdbd5f5 \ 161 | --hash=sha256:3fc2b8035a0c2cdc352e58c3875cb668836018065cbf5752cb531015d9a568d8 \ 162 | --hash=sha256:4213a92efa9770bc28e3792134e27c7e5c7e97068bdfb8ba395baebbd12f991b \ 163 | --hash=sha256:5191ba8f2975cb9703afc04ae845a929e193498c0e8bcd408ecb147b35978470 \ 164 | --hash=sha256:57b59cbb5dcce4edf09cd6ce0b57ff60312479930099ca8d944c2fac896a1ead \ 165 | --hash=sha256:5b3f814437d5a10797f8d89d2037cca2c9d9fa578520fc911f894edafed6ea3e \ 166 | --hash=sha256:6b360eb8d392483410fe6a3d5a7144afa298c9a0aa3e9fe193e89590b47dd477 \ 167 | --hash=sha256:81d1dde7dd6166d8ff5727bb46ab42a6b0048db0e97ceb84a121334a404a800f \ 168 | --hash=sha256:83fcb11988b57cc25b028a36d285287d706d1f536ebf2662ea30bd990e0de8b9 \ 169 | --hash=sha256:9309f2e43fe6e4560699ef5c27d7a848b3ff38549b6b57194207cf0e88900527 \ 170 | --hash=sha256:97298b14f0105a794bea06fd9fbc5c423bd3ff4d88cbc618860b83eb7a436ad6 \ 171 | --hash=sha256:a37d6a51ec328c561b2ca8a2bef07025642eca995b8553a5267d0018c732976d \ 172 | --hash=sha256:a42963bd4c62d8afa4f51e7974debfa39a048383f653544ab54f50a2f7ec6c42 \ 173 | --hash=sha256:b0aff6b48ebc99d2f54f27b5f73a58cb92fde650aeff1b397c71c8788b4fff1a \ 174 | --hash=sha256:b5323a46e75832334f1af86da1ef6ff0add00fbacdd266250be872b438bdf2be \ 175 | --hash=sha256:b5b0e82d2109c1d9e63fcd5ea177d80a11b881157ab61178ddbdebd4c561ea46 \ 176 | --hash=sha256:ba85371c9a8d03e115f4dfb6d25dfbce05387002b9bc85016af939a1da9624f0 \ 177 | --hash=sha256:cb845b2d4f9f8ef0eb1c9884f2b64780a85d3b5ae4eeb26ae2b0019f489cd35e \ 178 | --hash=sha256:ce8cccf944339051e44a49a124a06287fe3066d0acbff33d1aa5aee10a96abb7 \ 179 | --hash=sha256:d7a3fc83c959288544db3adc70612475d8ad53a66c69198105c74036182d10dd \ 180 | --hash=sha256:d9a42f5c24880350d88933c4efee91b857c378aaea7e8b86221fff569069841e \ 181 | --hash=sha256:deb64235af9eeba59fcefa67e82fa80cfc0662e1b0aa373b7118a28da124d51d \ 182 | --hash=sha256:e2d0ae24b0728e4bc3f1d3f33310340d67321d36d6043f7ce26897f4f1042db0 \ 183 | --hash=sha256:ebb73b93f5c4d6994f357fa5a47a9f7a5485577e633b3c46a603cb01445bbb19 \ 184 | --hash=sha256:ebdbef5763ca057eea0c2b5698e4439d084a0505d9d6e94f4804f26e8890c45e \ 185 | --hash=sha256:ec04c9a3c050c175348801e27c18c68d28673b7bfb865ef88ce333be523bbc01 \ 186 | --hash=sha256:f9d7805ccb6be2d3b0f7f6fad3707a09ac537811e8e9964f4074d28cb35543db 187 | numpy==2.1.3 \ 188 | --hash=sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe \ 189 | --hash=sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0 \ 190 | --hash=sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48 \ 191 | --hash=sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a \ 192 | --hash=sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564 \ 193 | --hash=sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958 \ 194 | --hash=sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17 \ 195 | --hash=sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0 \ 196 | --hash=sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee \ 197 | --hash=sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b \ 198 | --hash=sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4 \ 199 | --hash=sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4 \ 200 | --hash=sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6 \ 201 | --hash=sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4 \ 202 | --hash=sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d \ 203 | --hash=sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f \ 204 | --hash=sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f \ 205 | --hash=sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f \ 206 | --hash=sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56 \ 207 | --hash=sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9 \ 208 | --hash=sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd \ 209 | --hash=sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23 \ 210 | --hash=sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed \ 211 | --hash=sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a \ 212 | --hash=sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098 \ 213 | --hash=sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1 \ 214 | --hash=sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512 \ 215 | --hash=sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f \ 216 | --hash=sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09 \ 217 | --hash=sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f \ 218 | --hash=sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc \ 219 | --hash=sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8 \ 220 | --hash=sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0 \ 221 | --hash=sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761 \ 222 | --hash=sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef \ 223 | --hash=sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5 \ 224 | --hash=sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e \ 225 | --hash=sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b \ 226 | --hash=sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d \ 227 | --hash=sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43 \ 228 | --hash=sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c \ 229 | --hash=sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41 \ 230 | --hash=sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff \ 231 | --hash=sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408 \ 232 | --hash=sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2 \ 233 | --hash=sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9 \ 234 | --hash=sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57 \ 235 | --hash=sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb \ 236 | --hash=sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9 \ 237 | --hash=sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3 \ 238 | --hash=sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a \ 239 | --hash=sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0 \ 240 | --hash=sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e \ 241 | --hash=sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598 \ 242 | --hash=sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4 243 | pandas==2.2.3 \ 244 | --hash=sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a \ 245 | --hash=sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d \ 246 | --hash=sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5 \ 247 | --hash=sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4 \ 248 | --hash=sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0 \ 249 | --hash=sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32 \ 250 | --hash=sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28 \ 251 | --hash=sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f \ 252 | --hash=sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348 \ 253 | --hash=sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18 \ 254 | --hash=sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468 \ 255 | --hash=sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5 \ 256 | --hash=sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667 \ 257 | --hash=sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645 \ 258 | --hash=sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13 \ 259 | --hash=sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3 \ 260 | --hash=sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d \ 261 | --hash=sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb \ 262 | --hash=sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3 \ 263 | --hash=sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039 \ 264 | --hash=sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8 \ 265 | --hash=sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd \ 266 | --hash=sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659 \ 267 | --hash=sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57 \ 268 | --hash=sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4 \ 269 | --hash=sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a \ 270 | --hash=sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9 \ 271 | --hash=sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42 \ 272 | --hash=sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2 \ 273 | --hash=sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc \ 274 | --hash=sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698 \ 275 | --hash=sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed \ 276 | --hash=sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015 \ 277 | --hash=sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24 \ 278 | --hash=sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319 279 | platformdirs==4.3.6 \ 280 | --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ 281 | --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb 282 | py3langid==0.3.0 \ 283 | --hash=sha256:0a875a031a58aaf9dbda7bb8285fd75e801a7bd276216ffabe037901d4b449ec \ 284 | --hash=sha256:38f022eec31cf9a2bf6f142acb2a9b350fd7d0d5ae7762b1392c6d3567401fd3 285 | pyarrow==19.0.0 \ 286 | --hash=sha256:239ca66d9a05844bdf5af128861af525e14df3c9591bcc05bac25918e650d3a2 \ 287 | --hash=sha256:2795064647add0f16563e57e3d294dbfc067b723f0fd82ecd80af56dad15f503 \ 288 | --hash=sha256:29cd86c8001a94f768f79440bf83fee23963af5e7bc68ce3a7e5f120e17edf89 \ 289 | --hash=sha256:2a0144a712d990d60f7f42b7a31f0acaccf4c1e43e957f7b1ad58150d6f639c1 \ 290 | --hash=sha256:2a1a109dfda558eb011e5f6385837daffd920d54ca00669f7a11132d0b1e6042 \ 291 | --hash=sha256:2b6d3ce4288793350dc2d08d1e184fd70631ea22a4ff9ea5c4ff182130249d9b \ 292 | --hash=sha256:2f672f5364b2d7829ef7c94be199bb88bf5661dd485e21d2d37de12ccb78a136 \ 293 | --hash=sha256:450a7d27e840e4d9a384b5c77199d489b401529e75a3b7a3799d4cd7957f2f9c \ 294 | --hash=sha256:4624c89d6f777c580e8732c27bb8e77fd1433b89707f17c04af7635dd9638351 \ 295 | --hash=sha256:4d8b0c0de0a73df1f1bf439af1b60f273d719d70648e898bc077547649bb8352 \ 296 | --hash=sha256:5418d4d0fab3a0ed497bad21d17a7973aad336d66ad4932a3f5f7480d4ca0c04 \ 297 | --hash=sha256:5e8a28b918e2e878c918f6d89137386c06fe577cd08d73a6be8dafb317dc2d73 \ 298 | --hash=sha256:62ef8360ff256e960f57ce0299090fb86423afed5e46f18f1225f960e05aae3d \ 299 | --hash=sha256:66732e39eaa2247996a6b04c8aa33e3503d351831424cdf8d2e9a0582ac54b34 \ 300 | --hash=sha256:8d47c691765cf497aaeed4954d226568563f1b3b74ff61139f2d77876717084b \ 301 | --hash=sha256:8e3a839bf36ec03b4315dc924d36dcde5444a50066f1c10f8290293c0427b46a \ 302 | --hash=sha256:9348a0137568c45601b031a8d118275069435f151cbb77e6a08a27e8125f59d4 \ 303 | --hash=sha256:a08e2a8a039a3f72afb67a6668180f09fddaa38fe0d21f13212b4aba4b5d2451 \ 304 | --hash=sha256:a218670b26fb1bc74796458d97bcab072765f9b524f95b2fccad70158feb8b17 \ 305 | --hash=sha256:a22a4bc0937856263df8b94f2f2781b33dd7f876f787ed746608e06902d691a5 \ 306 | --hash=sha256:a7bbe7109ab6198688b7079cbad5a8c22de4d47c4880d8e4847520a83b0d1b68 \ 307 | --hash=sha256:a92aff08e23d281c69835e4a47b80569242a504095ef6a6223c1f6bb8883431d \ 308 | --hash=sha256:b34d3bde38eba66190b215bae441646330f8e9da05c29e4b5dd3e41bde701098 \ 309 | --hash=sha256:b903afaa5df66d50fc38672ad095806443b05f202c792694f3a604ead7c6ea6e \ 310 | --hash=sha256:be686bf625aa7b9bada18defb3a3ea3981c1099697239788ff111d87f04cd263 \ 311 | --hash=sha256:c318eda14f6627966997a7d8c374a87d084a94e4e38e9abbe97395c215830e0c \ 312 | --hash=sha256:c3b78eff5968a1889a0f3bc81ca57e1e19b75f664d9c61a42a604bf9d8402aae \ 313 | --hash=sha256:c751c1c93955b7a84c06794df46f1cec93e18610dcd5ab7d08e89a81df70a849 \ 314 | --hash=sha256:ce42275097512d9e4e4a39aade58ef2b3798a93aa3026566b7892177c266f735 \ 315 | --hash=sha256:cf3bf0ce511b833f7bc5f5bb3127ba731e97222023a444b7359f3a22e2a3b463 \ 316 | --hash=sha256:e675a3ad4732b92d72e4d24009707e923cab76b0d088e5054914f11a797ebe44 \ 317 | --hash=sha256:e82c3d5e44e969c217827b780ed8faf7ac4c53f934ae9238872e749fa531f7c9 \ 318 | --hash=sha256:f094742275586cdd6b1a03655ccff3b24b2610c3af76f810356c4c71d24a2a6c \ 319 | --hash=sha256:f208c3b58a6df3b239e0bb130e13bc7487ed14f39a9ff357b6415e3f6339b560 \ 320 | --hash=sha256:f43f5aef2a13d4d56adadae5720d1fed4c1356c993eda8b59dace4b5983843c1 321 | pycountry==24.6.1 \ 322 | --hash=sha256:b61b3faccea67f87d10c1f2b0fc0be714409e8fcdcc1315613174f6466c10221 \ 323 | --hash=sha256:f1a4fb391cd7214f8eefd39556d740adcc233c778a27f8942c8dca351d6ce06f 324 | python-dateutil==2.9.0.post0 \ 325 | --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ 326 | --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 327 | python-stdnum==1.20 \ 328 | --hash=sha256:111008e10391d54fb2afad2a10df70d5cb0c6c0a7ec82fec6f022cb8712961d3 \ 329 | --hash=sha256:ad2a2cf2eb025de408210235f36b4ae31252de3186240ccaa8126e117cb82690 330 | pytz==2024.2 \ 331 | --hash=sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a \ 332 | --hash=sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725 333 | requests==2.32.3 \ 334 | --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ 335 | --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 336 | requests-cache==1.2.1 \ 337 | --hash=sha256:1285151cddf5331067baa82598afe2d47c7495a1334bfe7a7d329b43e9fd3603 \ 338 | --hash=sha256:68abc986fdc5b8d0911318fbb5f7c80eebcd4d01bfacc6685ecf8876052511d1 339 | six==1.17.0 \ 340 | --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ 341 | --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 342 | typing-extensions==4.12.2 ; python_full_version < '3.11' \ 343 | --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ 344 | --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 345 | tzdata==2025.1 \ 346 | --hash=sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694 \ 347 | --hash=sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639 348 | url-normalize==1.4.3 \ 349 | --hash=sha256:d23d3a070ac52a67b83a1c59a0e68f8608d1cd538783b401bc9de2c0fac999b2 \ 350 | --hash=sha256:ec3c301f04e5bb676d333a7fa162fa977ad2ca04b7e652bfc9fac4e405728eed 351 | urllib3==2.3.0 \ 352 | --hash=sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df \ 353 | --hash=sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d 354 | wcwidth==0.2.13 \ 355 | --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \ 356 | --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5 357 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ilri/csv-metadata-quality/753f3340a32dbd6ade4d6798d34bccdfeba63dd8/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_check.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | import pandas as pd 4 | from colorama import Fore 5 | 6 | import csv_metadata_quality.check as check 7 | import csv_metadata_quality.experimental as experimental 8 | 9 | 10 | def test_check_invalid_issn(capsys): 11 | """Test checking invalid ISSN.""" 12 | 13 | value = "2321-2302" 14 | 15 | check.issn(value) 16 | 17 | captured = capsys.readouterr() 18 | assert captured.out == f"{Fore.RED}Invalid ISSN: {Fore.RESET}{value}\n" 19 | 20 | 21 | def test_check_valid_issn(): 22 | """Test checking valid ISSN.""" 23 | 24 | value = "0024-9319" 25 | 26 | result = check.issn(value) 27 | 28 | assert result is None 29 | 30 | 31 | def test_check_invalid_isbn(capsys): 32 | """Test checking invalid ISBN.""" 33 | 34 | value = "99921-58-10-6" 35 | 36 | check.isbn(value) 37 | 38 | captured = capsys.readouterr() 39 | assert captured.out == f"{Fore.RED}Invalid ISBN: {Fore.RESET}{value}\n" 40 | 41 | 42 | def test_check_valid_isbn(): 43 | """Test checking valid ISBN.""" 44 | 45 | value = "99921-58-10-7" 46 | 47 | result = check.isbn(value) 48 | 49 | assert result is None 50 | 51 | 52 | def test_check_missing_date(capsys): 53 | """Test checking missing date.""" 54 | 55 | value = None 56 | 57 | field_name = "dc.date.issued" 58 | 59 | check.date(value, field_name) 60 | 61 | captured = capsys.readouterr() 62 | assert captured.out == f"{Fore.RED}Missing date ({field_name}).{Fore.RESET}\n" 63 | 64 | 65 | def test_check_multiple_dates(capsys): 66 | """Test checking multiple dates.""" 67 | 68 | value = "1990||1991" 69 | 70 | field_name = "dc.date.issued" 71 | 72 | check.date(value, field_name) 73 | 74 | captured = capsys.readouterr() 75 | assert ( 76 | captured.out 77 | == f"{Fore.RED}Multiple dates not allowed ({field_name}): {Fore.RESET}{value}\n" 78 | ) 79 | 80 | 81 | def test_check_invalid_date(capsys): 82 | """Test checking invalid ISO8601 date.""" 83 | 84 | value = "1990-0" 85 | 86 | field_name = "dc.date.issued" 87 | 88 | check.date(value, field_name) 89 | 90 | captured = capsys.readouterr() 91 | assert ( 92 | captured.out == f"{Fore.RED}Invalid date ({field_name}): {Fore.RESET}{value}\n" 93 | ) 94 | 95 | 96 | def test_check_valid_date(): 97 | """Test checking valid ISO8601 date.""" 98 | 99 | value = "1990" 100 | 101 | field_name = "dc.date.issued" 102 | 103 | result = check.date(value, field_name) 104 | 105 | assert result is None 106 | 107 | 108 | def test_check_suspicious_characters(capsys): 109 | """Test checking for suspicious characters.""" 110 | 111 | value = "foreˆt" 112 | 113 | field_name = "dc.contributor.author" 114 | 115 | check.suspicious_characters(value, field_name) 116 | 117 | captured = capsys.readouterr() 118 | assert ( 119 | captured.out 120 | == f"{Fore.YELLOW}Suspicious character ({field_name}): {Fore.RESET}ˆt\n" 121 | ) 122 | 123 | 124 | def test_check_valid_iso639_1_language(): 125 | """Test valid ISO 639-1 (alpha 2) language.""" 126 | 127 | value = "ja" 128 | 129 | result = check.language(value) 130 | 131 | assert result is None 132 | 133 | 134 | def test_check_valid_iso639_3_language(): 135 | """Test valid ISO 639-3 (alpha 3) language.""" 136 | 137 | value = "eng" 138 | 139 | result = check.language(value) 140 | 141 | assert result is None 142 | 143 | 144 | def test_check_invalid_iso639_1_language(capsys): 145 | """Test invalid ISO 639-1 (alpha 2) language.""" 146 | 147 | value = "jp" 148 | 149 | check.language(value) 150 | 151 | captured = capsys.readouterr() 152 | assert ( 153 | captured.out == f"{Fore.RED}Invalid ISO 639-1 language: {Fore.RESET}{value}\n" 154 | ) 155 | 156 | 157 | def test_check_invalid_iso639_3_language(capsys): 158 | """Test invalid ISO 639-3 (alpha 3) language.""" 159 | 160 | value = "chi" 161 | 162 | check.language(value) 163 | 164 | captured = capsys.readouterr() 165 | assert ( 166 | captured.out == f"{Fore.RED}Invalid ISO 639-3 language: {Fore.RESET}{value}\n" 167 | ) 168 | 169 | 170 | def test_check_invalid_language(capsys): 171 | """Test invalid language.""" 172 | 173 | value = "Span" 174 | 175 | check.language(value) 176 | 177 | captured = capsys.readouterr() 178 | assert captured.out == f"{Fore.RED}Invalid language: {Fore.RESET}{value}\n" 179 | 180 | 181 | def test_check_invalid_agrovoc(capsys): 182 | """Test invalid AGROVOC subject. Invalid values *will not* be dropped.""" 183 | 184 | valid_agrovoc = "LIVESTOCK" 185 | invalid_agrovoc = "FOREST" 186 | value = f"{valid_agrovoc}||{invalid_agrovoc}" 187 | field_name = "dcterms.subject" 188 | drop = False 189 | 190 | new_value = check.agrovoc(value, field_name, drop) 191 | 192 | captured = capsys.readouterr() 193 | assert ( 194 | captured.out 195 | == f"{Fore.RED}Invalid AGROVOC ({field_name}): {Fore.RESET}{invalid_agrovoc}\n" 196 | ) 197 | assert new_value == value 198 | 199 | 200 | def test_check_invalid_agrovoc_dropped(capsys): 201 | """Test invalid AGROVOC subjects. Invalid values *will* be dropped.""" 202 | 203 | valid_agrovoc = "LIVESTOCK" 204 | invalid_agrovoc = "FOREST" 205 | value = f"{valid_agrovoc}||{invalid_agrovoc}" 206 | field_name = "dcterms.subject" 207 | drop = True 208 | 209 | new_value = check.agrovoc(value, field_name, drop) 210 | 211 | captured = capsys.readouterr() 212 | assert ( 213 | captured.out 214 | == f"{Fore.GREEN}Dropping invalid AGROVOC ({field_name}): {Fore.RESET}{invalid_agrovoc}\n" 215 | ) 216 | assert new_value == valid_agrovoc 217 | 218 | 219 | def test_check_valid_agrovoc(): 220 | """Test valid AGROVOC subject.""" 221 | 222 | value = "FORESTS" 223 | field_name = "dcterms.subject" 224 | drop = False 225 | 226 | result = check.agrovoc(value, field_name, drop) 227 | 228 | assert result == "FORESTS" 229 | 230 | 231 | def test_check_uncommon_filename_extension(capsys): 232 | """Test uncommon filename extension.""" 233 | 234 | value = "file.pdf.lck" 235 | 236 | check.filename_extension(value) 237 | 238 | captured = capsys.readouterr() 239 | assert ( 240 | captured.out 241 | == f"{Fore.YELLOW}Filename with uncommon extension: {Fore.RESET}{value}\n" 242 | ) 243 | 244 | 245 | def test_check_common_filename_extension(): 246 | """Test common filename extension.""" 247 | 248 | value = "file.pdf" 249 | 250 | result = check.filename_extension(value) 251 | 252 | assert result is None 253 | 254 | 255 | def test_check_incorrect_iso_639_1_language(capsys): 256 | """Test incorrect ISO 639-1 language, as determined by comparing the item's language field with the actual language predicted in the item's title.""" 257 | 258 | title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle" 259 | language = "es" 260 | exclude = [] 261 | 262 | # Create a dictionary to mimic Pandas series 263 | row = {"dc.title": title, "dc.language.iso": language} 264 | series = pd.Series(row) 265 | 266 | experimental.correct_language(series, exclude) 267 | 268 | captured = capsys.readouterr() 269 | assert ( 270 | captured.out 271 | == f"{Fore.YELLOW}Possibly incorrect language {language} (detected en): {Fore.RESET}{title}\n" 272 | ) 273 | 274 | 275 | def test_check_incorrect_iso_639_3_language(capsys): 276 | """Test incorrect ISO 639-3 language, as determined by comparing the item's language field with the actual language predicted in the item's title.""" 277 | 278 | title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle" 279 | language = "spa" 280 | exclude = [] 281 | 282 | # Create a dictionary to mimic Pandas series 283 | row = {"dc.title": title, "dc.language.iso": language} 284 | series = pd.Series(row) 285 | 286 | experimental.correct_language(series, exclude) 287 | 288 | captured = capsys.readouterr() 289 | assert ( 290 | captured.out 291 | == f"{Fore.YELLOW}Possibly incorrect language {language} (detected eng): {Fore.RESET}{title}\n" 292 | ) 293 | 294 | 295 | def test_check_correct_iso_639_1_language(): 296 | """Test correct ISO 639-1 language, as determined by comparing the item's language field with the actual language predicted in the item's title.""" 297 | 298 | title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle" 299 | language = "en" 300 | exclude = [] 301 | 302 | # Create a dictionary to mimic Pandas series 303 | row = {"dc.title": title, "dc.language.iso": language} 304 | series = pd.Series(row) 305 | 306 | result = experimental.correct_language(series, exclude) 307 | 308 | assert result is None 309 | 310 | 311 | def test_check_correct_iso_639_3_language(): 312 | """Test correct ISO 639-3 language, as determined by comparing the item's language field with the actual language predicted in the item's title.""" 313 | 314 | title = "A randomised vaccine field trial in Kenya demonstrates protection against wildebeest-associated malignant catarrhal fever in cattle" 315 | language = "eng" 316 | exclude = [] 317 | 318 | # Create a dictionary to mimic Pandas series 319 | row = {"dc.title": title, "dc.language.iso": language} 320 | series = pd.Series(row) 321 | 322 | result = experimental.correct_language(series, exclude) 323 | 324 | assert result is None 325 | 326 | 327 | def test_check_valid_spdx_license_identifier(): 328 | """Test valid SPDX license identifier.""" 329 | 330 | license = "CC-BY-SA-4.0" 331 | 332 | result = check.spdx_license_identifier(license) 333 | 334 | assert result is None 335 | 336 | 337 | def test_check_invalid_spdx_license_identifier(capsys): 338 | """Test invalid SPDX license identifier.""" 339 | 340 | license = "CC-BY-SA" 341 | 342 | check.spdx_license_identifier(license) 343 | 344 | captured = capsys.readouterr() 345 | assert ( 346 | captured.out 347 | == f"{Fore.YELLOW}Non-SPDX license identifier: {Fore.RESET}{license}\n" 348 | ) 349 | 350 | 351 | def test_check_duplicate_item(capsys): 352 | """Test item with duplicate title, type, and date.""" 353 | 354 | item_title = "Title" 355 | item_type = "Report" 356 | item_date = "2021-03-17" 357 | 358 | d = { 359 | "dc.title": [item_title, item_title], 360 | "dcterms.type": [item_type, item_type], 361 | "dcterms.issued": [item_date, item_date], 362 | } 363 | df = pd.DataFrame(data=d) 364 | 365 | check.duplicate_items(df) 366 | 367 | captured = capsys.readouterr() 368 | assert ( 369 | captured.out 370 | == f"{Fore.YELLOW}Possible duplicate (dc.title): {Fore.RESET}{item_title}\n" 371 | ) 372 | 373 | 374 | def test_check_no_mojibake(): 375 | """Test string with no mojibake.""" 376 | 377 | field = "CIAT Publicaçao" 378 | field_name = "dcterms.isPartOf" 379 | 380 | result = check.mojibake(field, field_name) 381 | 382 | assert result is None 383 | 384 | 385 | def test_check_mojibake(capsys): 386 | """Test string with mojibake.""" 387 | 388 | field = "CIAT Publicaçao" 389 | field_name = "dcterms.isPartOf" 390 | 391 | check.mojibake(field, field_name) 392 | 393 | captured = capsys.readouterr() 394 | assert ( 395 | captured.out 396 | == f"{Fore.YELLOW}Possible encoding issue ({field_name}): {Fore.RESET}{field}\n" 397 | ) 398 | 399 | 400 | def test_check_doi_field(): 401 | """Test an item with a DOI field.""" 402 | 403 | doi = "https://doi.org/10.1186/1743-422X-9-218" 404 | citation = "Orth, A. 2021. Testing all the things. doi: 10.1186/1743-422X-9-218" 405 | 406 | # Emulate a column in a transposed dataframe (which is just a series), with 407 | # the citation and a DOI field. 408 | d = {"cg.identifier.doi": doi, "dcterms.bibliographicCitation": citation} 409 | series = pd.Series(data=d) 410 | exclude = [] 411 | 412 | result = check.citation_doi(series, exclude) 413 | 414 | assert result is None 415 | 416 | 417 | def test_check_doi_only_in_citation(capsys): 418 | """Test an item with a DOI in its citation, but no DOI field.""" 419 | 420 | citation = "Orth, A. 2021. Testing all the things. doi: 10.1186/1743-422X-9-218" 421 | exclude = [] 422 | 423 | # Emulate a column in a transposed dataframe (which is just a series), with 424 | # an empty DOI field and a citation containing a DOI. 425 | d = {"cg.identifier.doi": None, "dcterms.bibliographicCitation": citation} 426 | series = pd.Series(data=d) 427 | 428 | check.citation_doi(series, exclude) 429 | 430 | captured = capsys.readouterr() 431 | assert ( 432 | captured.out 433 | == f"{Fore.YELLOW}DOI in citation, but missing a DOI field: {Fore.RESET}{citation}\n" 434 | ) 435 | 436 | 437 | def test_title_in_citation(): 438 | """Test an item with its title in the citation.""" 439 | 440 | title = "Testing all the things" 441 | citation = "Orth, A. 2021. Testing all the things." 442 | exclude = [] 443 | 444 | # Emulate a column in a transposed dataframe (which is just a series), with 445 | # the title and citation. 446 | d = {"dc.title": title, "dcterms.bibliographicCitation": citation} 447 | series = pd.Series(data=d) 448 | 449 | result = check.title_in_citation(series, exclude) 450 | 451 | assert result is None 452 | 453 | 454 | def test_title_not_in_citation(capsys): 455 | """Test an item with its title missing from the citation.""" 456 | 457 | title = "Testing all the things" 458 | citation = "Orth, A. 2021. Testing all teh things." 459 | exclude = [] 460 | 461 | # Emulate a column in a transposed dataframe (which is just a series), with 462 | # the title and citation. 463 | d = {"dc.title": title, "dcterms.bibliographicCitation": citation} 464 | series = pd.Series(data=d) 465 | 466 | check.title_in_citation(series, exclude) 467 | 468 | captured = capsys.readouterr() 469 | assert ( 470 | captured.out 471 | == f"{Fore.YELLOW}Title is not present in citation: {Fore.RESET}{title}\n" 472 | ) 473 | 474 | 475 | def test_country_matches_region(): 476 | """Test an item with regions matching its country list.""" 477 | 478 | country = "Kenya" 479 | region = "Eastern Africa" 480 | exclude = [] 481 | 482 | # Emulate a column in a transposed dataframe (which is just a series) 483 | d = {"cg.coverage.country": country, "cg.coverage.region": region} 484 | series = pd.Series(data=d) 485 | 486 | result = check.countries_match_regions(series, exclude) 487 | 488 | assert result is None 489 | 490 | 491 | def test_country_not_matching_region(capsys): 492 | """Test an item with regions not matching its country list.""" 493 | 494 | title = "Testing an item with no matching region." 495 | country = "Kenya" 496 | region = "" 497 | missing_region = "Eastern Africa" 498 | exclude = [] 499 | 500 | # Emulate a column in a transposed dataframe (which is just a series) 501 | d = { 502 | "dc.title": title, 503 | "cg.coverage.country": country, 504 | "cg.coverage.region": region, 505 | } 506 | series = pd.Series(data=d) 507 | 508 | check.countries_match_regions(series, exclude) 509 | 510 | captured = capsys.readouterr() 511 | assert ( 512 | captured.out 513 | == f"{Fore.YELLOW}Missing region ({country} → {missing_region}): {Fore.RESET}{title}\n" 514 | ) 515 | -------------------------------------------------------------------------------- /tests/test_fix.py: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-3.0-only 2 | 3 | import pandas as pd 4 | 5 | import csv_metadata_quality.fix as fix 6 | 7 | 8 | def test_fix_leading_whitespace(): 9 | """Test fixing leading whitespace.""" 10 | 11 | value = " Alan" 12 | 13 | field_name = "dc.contributor.author" 14 | 15 | assert fix.whitespace(value, field_name) == "Alan" 16 | 17 | 18 | def test_fix_trailing_whitespace(): 19 | """Test fixing trailing whitespace.""" 20 | 21 | value = "Alan " 22 | 23 | field_name = "dc.contributor.author" 24 | 25 | assert fix.whitespace(value, field_name) == "Alan" 26 | 27 | 28 | def test_fix_excessive_whitespace(): 29 | """Test fixing excessive whitespace.""" 30 | 31 | value = "Alan Orth" 32 | 33 | field_name = "dc.contributor.author" 34 | 35 | assert fix.whitespace(value, field_name) == "Alan Orth" 36 | 37 | 38 | def test_fix_invalid_separators(): 39 | """Test fixing invalid multi-value separators.""" 40 | 41 | value = "Alan|Orth" 42 | 43 | field_name = "dc.contributor.author" 44 | 45 | assert fix.separators(value, field_name) == "Alan||Orth" 46 | 47 | 48 | def test_fix_unnecessary_separators(): 49 | """Test fixing unnecessary multi-value separators.""" 50 | 51 | field = "Alan||Orth||" 52 | 53 | field_name = "dc.contributor.author" 54 | 55 | assert fix.separators(field, field_name) == "Alan||Orth" 56 | 57 | 58 | def test_fix_unnecessary_unicode(): 59 | """Test fixing unnecessary Unicode.""" 60 | 61 | value = "Alan​ Orth" 62 | 63 | assert fix.unnecessary_unicode(value) == "Alan Orth" 64 | 65 | 66 | def test_fix_duplicates(): 67 | """Test fixing duplicate metadata values.""" 68 | 69 | value = "Kenya||Kenya" 70 | 71 | field_name = "dc.contributor.author" 72 | 73 | assert fix.duplicates(value, field_name) == "Kenya" 74 | 75 | 76 | def test_fix_newlines(): 77 | """Test fixing newlines.""" 78 | 79 | value = """Ken 80 | ya""" 81 | field_name = "dcterms.subject" 82 | 83 | assert fix.newlines(value, field_name) == "Kenya" 84 | 85 | 86 | def test_fix_comma_space(): 87 | """Test adding space after comma.""" 88 | 89 | value = "Orth,Alan S." 90 | 91 | field_name = "dc.contributor.author" 92 | 93 | assert fix.comma_space(value, field_name) == "Orth, Alan S." 94 | 95 | 96 | def test_fix_normalized_unicode(): 97 | """Test fixing a string that is already in its normalized (NFC) Unicode form.""" 98 | 99 | # string using the normalized canonical form of é 100 | value = "Ouédraogo, Mathieu" 101 | 102 | field_name = "dc.contributor.author" 103 | 104 | assert fix.normalize_unicode(value, field_name) == "Ouédraogo, Mathieu" 105 | 106 | 107 | def test_fix_decomposed_unicode(): 108 | """Test fixing a string that contains Unicode string.""" 109 | 110 | # string using the decomposed form of é 111 | value = "Ouédraogo, Mathieu" 112 | 113 | field_name = "dc.contributor.author" 114 | 115 | assert fix.normalize_unicode(value, field_name) == "Ouédraogo, Mathieu" 116 | 117 | 118 | def test_fix_mojibake(): 119 | """Test string with no mojibake.""" 120 | 121 | field = "CIAT Publicaçao" 122 | field_name = "dcterms.isPartOf" 123 | 124 | assert fix.mojibake(field, field_name) == "CIAT Publicaçao" 125 | 126 | 127 | def test_fix_country_not_matching_region(): 128 | """Test an item with regions not matching its country list.""" 129 | 130 | title = "Testing an item with no matching region." 131 | country = "Kenya" 132 | region = "" 133 | missing_region = "Eastern Africa" 134 | exclude = [] 135 | 136 | # Emulate a column in a transposed dataframe (which is just a series) 137 | d = { 138 | "dc.title": title, 139 | "cg.coverage.country": country, 140 | "cg.coverage.region": region, 141 | } 142 | series = pd.Series(data=d) 143 | 144 | result = fix.countries_match_regions(series, exclude) 145 | 146 | # Emulate the correct series we are expecting 147 | d_correct = { 148 | "dc.title": title, 149 | "cg.coverage.country": country, 150 | "cg.coverage.region": missing_region, 151 | } 152 | series_correct = pd.Series(data=d_correct) 153 | 154 | pd.testing.assert_series_equal(result, series_correct) 155 | 156 | 157 | def test_fix_normalize_dois(): 158 | """Test normalizing a DOI.""" 159 | 160 | value = "doi: 10.11648/j.jps.20140201.14" 161 | 162 | assert fix.normalize_dois(value) == "https://doi.org/10.11648/j.jps.20140201.14" 163 | --------------------------------------------------------------------------------