├── .circleci └── config.yml ├── .dockerignore ├── .gitignore ├── .travis.yml ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── alembic.ini ├── bin └── ci │ └── deploy-dockerhub.sh ├── dev-requirements.txt ├── docker-entrypoint.sh ├── docs ├── RELEASE.md └── configuration.rst ├── etc └── tokenserver-dev.ini ├── loadtest ├── Makefile ├── README.txt ├── config │ ├── bench.ini │ ├── megabench.ini │ └── test.ini ├── loadtest.py ├── mock-oauth-cfn.yml └── populate-db.py ├── requirements.txt ├── setup.py └── tokenserver ├── __init__.py ├── assignment ├── __init__.py ├── memorynode.py └── sqlnode │ ├── __init__.py │ ├── migrations │ ├── README │ ├── env.py │ ├── script.py.mako │ └── versions │ │ ├── 17d209a72e2f_add_replaced_at_idx.py │ │ ├── 2b968b28bcdc_remove_node_column.py │ │ ├── 3d5af3924466_drop_clientstate_idx.py │ │ ├── 5d056c5b8f57_create_dyn_settings_table.py │ │ ├── 6569dd9a060_populate_nodeid_column_and_index.py │ │ ├── 75e8ca84b0bc_add_keys_changed_at_column.py │ │ ├── 846f28d1b6f_add_nodeid_column.py │ │ └── 9fb109457bd_make_users_node_column_nullable.py │ ├── schemas.py │ ├── sql.py │ └── sqliteschemas.py ├── run.py ├── scripts ├── __init__.py ├── add_node.py ├── allocate_user.py ├── count_users.py ├── process_account_events.py ├── purge_old_records.py ├── remove_node.py ├── unassign_node.py └── update_node.py ├── tests ├── __init__.py ├── assignment │ ├── __init__.py │ └── test_sqlnode.py ├── secrets ├── secrets2 ├── support.py ├── test_backend_sql.py ├── test_local_browserid_verifier.py ├── test_memorynode.ini ├── test_memorynode.py ├── test_node_type_classifier.py ├── test_oauth_verifier.py ├── test_process_account_events.py ├── test_purge_old_records.py ├── test_remote_browserid_verifier.py ├── test_service.py └── test_sql.ini ├── tweens.py ├── util.py ├── verifiers.py └── views.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | docker: 5 | - image: circleci/python 6 | steps: 7 | - checkout 8 | - setup_remote_docker 9 | 10 | - run: 11 | name: Create version.json 12 | command: | 13 | printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' \ 14 | "$CIRCLE_SHA1" \ 15 | "$CIRCLE_TAG" \ 16 | "$CIRCLE_PROJECT_USERNAME" \ 17 | "$CIRCLE_PROJECT_REPONAME" \ 18 | "$CIRCLE_BUILD_URL" | tee version.json 19 | - store_artifacts: 20 | path: version.json 21 | 22 | - run: 23 | name: Build deployment container image 24 | command: docker build -t app:build . 25 | - run: 26 | name: Test flake8 27 | command: docker run -it app:build test_flake8 28 | - run: 29 | name: Test nose 30 | command: docker run -it app:build test_nose 31 | - run: 32 | name: Test that the `pypy` entrypoint responds 33 | command: | 34 | set -e 35 | docker run -it app:build pypy --version 36 | - run: 37 | name: Test that the `server` entrypoint starts ok 38 | command: | 39 | set -e 40 | container_name=$(docker run -it -e MOZSVC_SQLURI="sqlite:////tmp/tokenserver.db" --rm -d app:build server) 41 | # Grab the logs now in case this container crashes and is removed. 42 | (docker logs --follow $container_name &) 43 | sleep 10 44 | # If the container above stops before running 10 seconds, the 45 | # `--rm` will remove it and this `docker inspect` will exit 46 | # non-zero, failing this step. 47 | docker inspect -f '{{.State.Running}}' $container_name 48 | docker kill $container_name 49 | - store_test_results: 50 | path: test_results 51 | - run: 52 | name: Push to Dockerhub 53 | command: | 54 | if [ "${CIRCLE_BRANCH}" == "master" ]; then 55 | bin/ci/deploy-dockerhub.sh latest 56 | fi 57 | if [[ "${CIRCLE_BRANCH}" == feature* ]] || [[ "${CIRCLE_BRANCH}" == dockerpush* ]]; then 58 | bin/ci/deploy-dockerhub.sh "$CIRCLE_BRANCH" 59 | fi 60 | if [ -n "${CIRCLE_TAG}" ]; then 61 | bin/ci/deploy-dockerhub.sh "$CIRCLE_TAG" 62 | fi 63 | 64 | workflows: 65 | version: 2 66 | 67 | # workflow jobs are _not_ run in tag builds by default 68 | # we use filters to whitelist jobs that should be run for tags 69 | 70 | # workflow jobs are run in _all_ branch builds by default 71 | # we use filters to blacklist jobs that shouldn't be run for a branch 72 | 73 | # see: https://circleci.com/docs/2.0/workflows/#git-tag-job-execution 74 | 75 | build-test-push: 76 | jobs: 77 | - build: 78 | filters: 79 | tags: 80 | only: /.*/ 81 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | local 3 | *.egg-info 4 | *.swp 5 | \.coverage 6 | *~ 7 | nosetests.xml 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | tokenserver.egg-info 4 | build 5 | dist 6 | local 7 | version.json 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "2.7" 5 | 6 | services: 7 | - mysql 8 | 9 | notifications: 10 | email: 11 | - rfkelly@mozilla.com 12 | irc: 13 | channels: 14 | - "irc.mozilla.org#services-dev" 15 | use_notice: false 16 | skip_join: false 17 | 18 | before_install: 19 | # https://github.com/travis-ci/travis-ci/issues/7940 20 | - sudo rm -f /etc/boto.cfg 21 | 22 | install: 23 | - make install-dev 24 | 25 | before_script: 26 | - mysql -e 'create database tokenserver_test;' 27 | 28 | script: 29 | - make flake8 30 | - export BOTO_CONFIG=/dev/null # https://github.com/travis-ci/travis-ci/issues/7940 31 | - MOZSVC_SQLURI=pymysql://root@localhost/tokenserver_test make tests 32 | - make tests 33 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Participation Guidelines 2 | 3 | This repository is governed by Mozilla's code of conduct and etiquette guidelines. 4 | For more details, please read the 5 | [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). 6 | 7 | ## How to Report 8 | For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. 9 | 10 | 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pypy:2.7-jessie 2 | 3 | WORKDIR /app 4 | 5 | RUN addgroup -gid 1001 app && useradd -g app --shell /usr/sbin/nologin --uid 1001 app 6 | # run the server by default 7 | ENTRYPOINT ["/app/docker-entrypoint.sh"] 8 | CMD ["server"] 9 | 10 | COPY ./requirements.txt /app/requirements.txt 11 | COPY ./dev-requirements.txt /app/dev-requirements.txt 12 | 13 | # install dependencies, cleanup and add libstdc++ back in since 14 | # we the app needs to link to it 15 | RUN apt-get update && \ 16 | apt-get install -y build-essential ca-certificates libffi-dev libssl-dev libmysqlclient-dev && \ 17 | pip install -r dev-requirements.txt && \ 18 | apt-get remove -y build-essential gcc 19 | 20 | # Copy in the whole app after dependencies have been installed & cached 21 | COPY . /app 22 | RUN pypy ./setup.py develop 23 | 24 | # run as non priviledged user 25 | USER app 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.rst 3 | include *.spec 4 | recursive-include etc *.ini *.conf 5 | recursive-include tokenserver *.json *.ini 6 | include tokenserver/tests/secrets 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VIRTUALENV = virtualenv 2 | VENV := $(shell echo $${VIRTUAL_ENV-local}) 3 | PTYPE = pypy 4 | PYTHON = $(VENV)/bin/python 5 | NOSE = $(VENV)/bin/nosetests 6 | DEV_STAMP = $(VENV)/.dev_env_installed.stamp 7 | INSTALL_STAMP = $(VENV)/.install.stamp 8 | TEMPDIR := $(shell mktemp -d) 9 | 10 | # Hackety-hack around OSX system python bustage. 11 | # The need for this should go away with a future osx/xcode update. 12 | ARCHFLAGS = -Wno-error=unused-command-line-argument-hard-error-in-future 13 | CFLAGS = -Wno-error=write-strings 14 | 15 | INSTALL = ARCHFLAGS=$(ARCHFLAGS) CFLAGS=$(CFLAGS) $(VENV)/bin/pip install 16 | 17 | .IGNORE: clean distclean maintainer-clean 18 | .PHONY: all install install-dev virtualenv tests 19 | 20 | help: 21 | @echo "Since Python 2.7 is no longer supported, but is used by this project," 22 | @echo "please be sure to install pypy 2.7 and" 23 | @echo "the system appropriate `pypy-dev` package" 24 | @echo "" 25 | @echo "Please use 'make ' where is one of" 26 | @echo " install install dependencies and prepare environment" 27 | @echo " install-dev install dependencies and everything needed to run tests" 28 | @echo " build-requirements install all requirements and freeze them in requirements.txt" 29 | @echo " flake8 run the flake8 linter" 30 | @echo " tests run all the tests with all the supported python interpreters (same as travis)" 31 | @echo " version-file update the version.json file" 32 | @echo " clean remove *.pyc files and __pycache__ directory" 33 | @echo " distclean remove *.egg-info files and *.egg, build and dist directories" 34 | @echo " maintainer-clean remove the .tox and the .venv directories" 35 | @echo "Check the Makefile to know exactly what each target is doing." 36 | 37 | all: install 38 | install: $(INSTALL_STAMP) 39 | $(INSTALL_STAMP): $(PYTHON) setup.py 40 | $(INSTALL) -Ue . 41 | touch $(INSTALL_STAMP) 42 | 43 | install-dev: $(INSTALL_STAMP) $(DEV_STAMP) 44 | $(DEV_STAMP): $(PYTHON) dev-requirements.txt 45 | $(INSTALL) -Ur dev-requirements.txt 46 | touch $(DEV_STAMP) 47 | 48 | virtualenv: $(PYTHON) 49 | $(PYTHON): 50 | # The latest `pip` doesn't work with pypy 2.7 on some platforms. 51 | # Pin to a working version; ref https://github.com/pypa/pip/issues/8653 52 | $(VIRTUALENV) -p $(PTYPE) --no-pip $(VENV) 53 | $(VENV)/bin/easy_install pip==20.1.1 54 | 55 | build-requirements: 56 | $(VIRTUALENV) -p $(PTYPE) --no-pip $(TEMPDIR) 57 | $(TEMPDIR)/bin/easy_install pip==20.1.1 58 | ARCHFLAGS=$(ARCHFLAGS) $(TEMPDIR)/bin/pip install -Ue . 59 | $(TEMPDIR)/bin/pip freeze | grep -v -- '^-e' > requirements.txt 60 | 61 | tests: install-dev 62 | # By default nose will skip tests in executable files, but that's annoying 63 | # when working in WSL with a checkout mounted from the native filesystem. 64 | $(VENV)/bin/nosetests --exe tokenserver/tests 65 | 66 | flake8: install-dev 67 | $(VENV)/bin/flake8 tokenserver 68 | 69 | clean: 70 | find . -name '*.pyc' -delete 71 | find . -name '__pycache__' -type d | xargs rm -fr 72 | rm -fr docs/_build/ 73 | 74 | distclean: clean 75 | rm -fr *.egg *.egg-info/ dist/ build/ 76 | 77 | maintainer-clean: distclean 78 | rm -fr local/ .tox/ 79 | 80 | NAME := tokenserver 81 | SOURCE := $(shell git config remote.origin.url | sed -e 's|git@|https://|g' | sed -e 's|github.com:|github.com/|g') 82 | VERSION := $(shell git describe --always --tag) 83 | COMMIT := $(shell git log --pretty=format:'%H' -n 1) 84 | version-file: 85 | echo '{"name":"$(NAME)","version":"$(VERSION)","source":"$(SOURCE)","commit":"$(COMMIT)"}' > version.json 86 | -------------------------------------------------------------------------------- /PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | Describe these changes. 4 | 5 | ## Testing 6 | 7 | How should reviewers test? 8 | 9 | ## Issue(s) 10 | 11 | Closes [link](link). 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/mozilla-services/tokenserver.png?branch=master)](https://travis-ci.org/mozilla-services/tokenserver) 2 | [![Docker Build Status](https://circleci.com/gh/mozilla-services/tokenserver/tree/master.svg?style=shield&circle-token=0fdb6d8d80e18f180132ea25cf9f75a38828591a)](https://circleci.com/gh/mozilla-services/tokenserver) 3 | 4 | # Firefox Sync TokenServer 5 | 6 | This service is responsible for allocating Firefox Sync users to one of several Sync Storage nodes. 7 | It provides the "glue" between [Firefox Accounts](https://github.com/mozilla/fxa/) and the 8 | [SyncStorage](https://github.com/mozilla-services/server-syncstorage) API, and handles: 9 | 10 | * Checking the user's credentials as provided by FxA 11 | * Sharding users across storage nodes in a way that evenly distributes server load 12 | * Re-assigning the user to a new storage node if their FxA encryption key changes 13 | * Cleaning up old data from e.g. deleted accounts 14 | 15 | The service was originallly conceived to be a general-purpose mechanism for connecting users 16 | to multiple different Mozilla-run services, and you can see some of the historical context 17 | for that original design [here](https://wiki.mozilla.org/Services/Sagrada/TokenServer) 18 | and [here](https://mozilla-services.readthedocs.io/en/latest/token/index.html). 19 | 20 | In practice today, it is only used for connecting to Firefox Sync. 21 | 22 | ## How to run the server 23 | 24 | Like this: 25 | 26 | $ make install 27 | $ ./local/bin/pip install gunicorn 28 | $ ./local/bin/gunicorn --paste etc/tokenserver-dev.ini 29 | 30 | ## API 31 | 32 | Firfox Sync clients must first obtain user credentials from FxA, which can be either: 33 | 34 | * A BrowserID assertion with audience of `https://token.services.mozilla.com/` 35 | * An OAuth access token bearing the scope `https://identity.mozilla.com/apps/oldsync` 36 | 37 | They then provide this in the `Authorization` header of a `GET` request to the Tokenserver, 38 | which will respond with the URL of the user's sync storage node, and some short-lived credentials 39 | that can be used to access it. 40 | 41 | More detailed API documentation is available [here](https://mozilla-services.readthedocs.io/en/latest/token/apis.html). 42 | 43 | ### Using BrowserID 44 | 45 | To access the user's sync data using BrowserID, the client must obtain a BrowserID assertion 46 | with audience matching the tokenserver's public URL, as well as the user's Sync encryption key. 47 | They send the BrowserID assertion in the `Authorization` header, and the first half of the 48 | hex-encoded SHA256 digest of the encryption key in the `X-Client-State` header, like so: 49 | 50 | ``` 51 | GET /1.0/sync/1.5 52 | Host: token.services.mozilla.com 53 | Authorization: BrowserID 54 | X-Client-State: 55 | ``` 56 | 57 | ### Using OAuth 58 | 59 | To access the user's sync data using OAuth, the client must obtain an FxA OAuth access_token 60 | with scope `https://identity.mozilla.com/apps/oldsync`, and the corresponding encryption key 61 | as a JWK. They send the OAuth token in the `Authorization` header, and the `kid` field of the 62 | encryption key in the `X-KeyID` header, like so: 63 | 64 | ``` 65 | GET /1.0/sync/1.5 66 | Host: token.services.mozilla.com 67 | Authorization: Bearer 68 | X-KeyID: 69 | ``` 70 | 71 | ### Response 72 | 73 | The tokenserver will validate the provided credentials, and either look up the user's existing 74 | storage node allocation or assign them to a new one. It responds with the location of the 75 | storage node and a set of short-lived credentials that can be used to access it: 76 | 77 | ``` 78 | { 79 | 'id': , 80 | 'key': , 81 | 'api_endpoint': 'https://db42.sync.services.mozilla.com/1.5/12345', 82 | 'uid': 12345, 83 | 'duration': 300, 84 | } 85 | ``` 86 | 87 | ### Storage Token 88 | 89 | The value of `` is intended to be opaque to the client, but is in fact an encoded JSON blob 90 | signed using a secret key shared between the tokenserver and the storage nodes. This allows 91 | the tokenserver to securely communicate information about the user to their storage node. 92 | The fields contained therein include: 93 | 94 | * `uid`: A numeric userid that uniquely identifies this user, on this storage node, using this encryption key 95 | * `node`: The intended storage node on which these credentials can be used 96 | * `expires`: A timestamp for when the credentials expire 97 | * `fxa_uid`: The user's stable FxA user id, as a hex string 98 | * `fxa_kid`: The key-id of the JWK representing the user's sync encryption key 99 | 100 | ## Specifying the Data Store 101 | 102 | The data store is specified in the *[tokenserver]* section as `sqluri`. If you want to use a longer 103 | lived data store than `sqlite3`, you will need to specify the Data Source Name (DSN) as a Universal 104 | Resource Locator (URL) such as: `sqluri = mysql+pymysql://scott:tiger@localhost/tokenserver`. See the 105 | [SQLAlchemy Engine](https://docs.sqlalchemy.org/en/13/core/engines.html) specification for details. 106 | 107 | TokenServer comes with support for `pymysql`, `mysqldb`, and `sqlite3` by default. Additional databases 108 | will require `pip install`. 109 | 110 | ## Data Model 111 | 112 | The core of the TokenServer's data model is a table named `users` that maps each user to their storage 113 | node, and that provides enough information to update that mapping over time. Each row in the table 114 | contains the following fields: 115 | 116 | * `uid`: Auto-incrementing numeric userid, created automatically for each row. 117 | * `service`: The service the user is accessing; in practice this is always `sync-1.5`. 118 | * `email`: Stable identifier for the user; in practice this is always `@api.accounts.firefox.com`. 119 | * `nodeid`: The storage node to which the user has been assigned. 120 | * `generation`: A monotonically increasing number provided by the FxA server, indicating 121 | the last time at which the user's login credentials were changed. 122 | * `client_state`: The hash of the user's sync encryption key. 123 | * `keys_changed_at`: A monotonically increasing timestamp provided by the FxA server, indicating 124 | the last time at which the user's encryption keys were changed. 125 | * `created_at`: Timestamp at which this node-assignment record was created. 126 | * `replaced_at`: Timestamp at which this node-assignment record was replaced by a newer assignment, if any. 127 | 128 | As you can see, this table contains some unnecessarily general names; these are a legacy of earlier plans 129 | to re-use Tokenserver for multiple Mozilla services and with multiple identity providers. 130 | 131 | The `generation` column is used to detect when the user's FxA credentials have been changed 132 | and to lock out clients that have not been updated with the latest credentials. 133 | Tokenserver tracks the highest value of `generation` that it has ever seen for a user, 134 | and rejects BrowserID assertions in which the `generation` number is less than that high-water mark. 135 | Note that OAuth clients do not provide a `generation` number, because OAuth tokens get 136 | revoked immediately when the user's credentials are changed. 137 | 138 | The `client_state` column is used to detect when the user's encryption key changes. 139 | When it sees a new value for `client_state`, Tokenserver will replace the user's node assignment 140 | with a new one, so that data encrypted with the new key will be written into a different 141 | storage "bucket" on the storage nodes. 142 | 143 | The `keys_changed_at` column tracks the timestamp at which the user's encryption keys were 144 | last changed. BrowserID clients provide this as a field in the assertion, while OAuth clients 145 | provide it as part of the `X-KeyID` header. Tokenserver will check that changes in the value 146 | of `keys_changed_at` always correspond to a change in `client_state`, and will use this pair of 147 | values to construct the `fxa_kid` field that is communicated to the storage nodes. 148 | 149 | When replacing a user's node assignment, the previous column is not deleted immediately. 150 | Instead, it is marked as "replaced" by setting the `replaced_at` timestamp, and then a background 151 | job periodically purges replaced rows (including making a `DELETE` request to the storage node 152 | to clean up any old data stored under that `uid`). 153 | 154 | For this scheme to work as intended, it's expected that storage nodes will index user data by either: 155 | 156 | 1. The tuple `(fxa_uid, fxa_kid)`, which identifies a consistent set of sync data for a particular 157 | user, encrypted using a particular key. 158 | 2. The numeric `uid`, which changes whenever either of the above two values change. 159 | -------------------------------------------------------------------------------- /alembic.ini: -------------------------------------------------------------------------------- 1 | 2 | [alembic] 3 | # path to migration scripts 4 | script_location = tokenserver/assignment/sqlnode/migrations 5 | 6 | # Logging configuration 7 | [loggers] 8 | keys = root,sqlalchemy,alembic 9 | 10 | [handlers] 11 | keys = console 12 | 13 | [formatters] 14 | keys = generic 15 | 16 | [logger_root] 17 | level = WARN 18 | handlers = console 19 | qualname = 20 | 21 | [logger_sqlalchemy] 22 | level = WARN 23 | handlers = 24 | qualname = sqlalchemy.engine 25 | 26 | [logger_alembic] 27 | level = INFO 28 | handlers = 29 | qualname = alembic 30 | 31 | [handler_console] 32 | class = StreamHandler 33 | args = (sys.stderr,) 34 | level = NOTSET 35 | formatter = generic 36 | 37 | [formatter_generic] 38 | format = %(levelname)-5.5s [%(name)s] %(message)s 39 | datefmt = %H:%M:%S 40 | -------------------------------------------------------------------------------- /bin/ci/deploy-dockerhub.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # THIS IS MEANT TO BE RUN BY CI 4 | 5 | set -e 6 | 7 | # Usage: retry MAX CMD... 8 | # Retry CMD up to MAX times. If it fails MAX times, returns failure. 9 | # Example: retry 3 docker push "$DOCKERHUB_REPO:$TAG" 10 | function retry() { 11 | max=$1 12 | shift 13 | count=1 14 | until "$@"; do 15 | count=$((count + 1)) 16 | if [[ $count -gt $max ]]; then 17 | return 1 18 | fi 19 | echo "$count / $max" 20 | done 21 | return 0 22 | } 23 | 24 | # configure docker creds 25 | retry 3 echo "$DOCKER_PASS" | docker login -u="$DOCKER_USER" --password-stdin 26 | 27 | # docker tag and push git branch to dockerhub 28 | if [ -n "$1" ]; then 29 | [ "$1" == master ] && TAG=latest || TAG="$1" 30 | docker tag app:build "$DOCKERHUB_REPO:$TAG" || 31 | (echo "Couldn't tag app:build as $DOCKERHUB_REPO:$TAG" && false) 32 | retry 3 docker push "$DOCKERHUB_REPO:$TAG" || 33 | (echo "Couldn't push $DOCKERHUB_REPO:$TAG" && false) 34 | echo "Pushed $DOCKERHUB_REPO:$TAG" 35 | fi 36 | -------------------------------------------------------------------------------- /dev-requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | flake8 3 | mock 4 | nose 5 | # Note: later versions of `responses` cause a `ValueError` to be returned 6 | # in `test_oauth_verifier.py` See commented content in 7 | # `test_verifier_config_dynamic_issuer_discovery()` 8 | responses==0.14.0 9 | webtest 10 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cd $(dirname $0) 4 | case "$1" in 5 | server) 6 | _SETTINGS_FILE=${TOKENSERVER_SETTINGS_FILE:-"/app/tokenserver/tests/test_sql.ini"} 7 | 8 | if [ ! -e $_SETTINGS_FILE ]; then 9 | echo "Could not find ini file: $_SETTINGS_FILE" 10 | exit 1 11 | fi 12 | 13 | echo "Starting gunicorn with config: $_SETTINGS_FILE" 14 | 15 | # the 'gevent' worker class causes gunicorn to fail to load 16 | # under Docker. Switching to 'sync' solves this. 17 | exec gunicorn \ 18 | --paste "$_SETTINGS_FILE" \ 19 | --bind ${HOST-127.0.0.1}:${PORT-8000}\ 20 | --worker-class sync \ 21 | --timeout ${TOKENSERVER_TIMEOUT-600} \ 22 | --workers ${WEB_CONCURRENCY-5}\ 23 | --graceful-timeout ${TOKENSERVER_GRACEFUL_TIMEOUT-660}\ 24 | --max-requests ${TOKENSERVER_MAX_REQUESTS-20000}\ 25 | --log-config "$_SETTINGS_FILE" 26 | ;; 27 | 28 | pypy) 29 | exec "$@" 30 | ;; 31 | 32 | test_all) 33 | $0 test_flake8 34 | $0 test_nose 35 | ;; 36 | 37 | test_flake8) 38 | echo "test - flake8" 39 | flake8 tokenserver 40 | ;; 41 | 42 | test_nose) 43 | echo "test - nose" 44 | nosetests --verbose --nocapture tokenserver/tests 45 | ;; 46 | 47 | *) 48 | echo "Unknown CMD, $1" 49 | exit 1 50 | ;; 51 | esac 52 | -------------------------------------------------------------------------------- /docs/RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Procedure 2 | 3 | These steps outline what is required for a new production release. 4 | 5 | ## Requirements 6 | 7 | * All releases must be from the `master` branch. 8 | * All releases must pass CI testing. 9 | * All releases must be accompanied by a CHANGELOG.md that indicates bug fixes, new features and breaking changes. (This may be auto-generated by the [clog-cli](https://github.com/clog-tool/clog-cli) tool) 10 | 11 | ## Versions 12 | 13 | Versioning should use a `{major}.{minor}.{patch}` version scheme. New 14 | `{major}` versions are only issued if backwards compatibility is 15 | impacted. `{ minor}` involves introduction of new features. `{patch}` 16 | versions involve bug fixes only. 17 | 18 | ## Release Process 19 | 20 | 1. switch to `master` branch 21 | 1. `git pull` to ensure local copy is completely up-to-date 22 | 1. `git diff origin/master` to ensure there are no local staged or 23 | uncommited changes. 24 | 1. run local testing to ensure no artifacts or other local changes that might break tests have been introduced. 25 | 1. change the release branch: `git checkout -b release/{major}.{minor}` (note `{version}` will be used as short-hand for `{major}.{minor}`) 26 | 1. edit `version` in `setup.py` to reflect current release version. 27 | 1. run `clog --setversion {version}`, verify changes were properly accounted for in `CHANGELOG.md`. 28 | 1. verify that edited files appear in `git status` 29 | 1. `git commit -m "chore: tag {version}` to commit the new version and record of changes. 30 | 1. `git tag -s -m "chore: tag {version}" {version}` to create a signed tag of the current HEAD commit for release 31 | 1. `git push --set-upstream origin release/{version}` to push the tags to the release branch 32 | 1. submit a github Pull Request to merge the release branch to master 33 | 1. Verify that the new tag, with no release info, appears in the github releases page. 34 | 1. Click the `Draft a new release` button 35 | 1. Enter the `{version}` for `Tag version` 36 | 1. Copy the relevant version data from `CHANGELOG.md` into the release description. 37 | 1. Once the rlease branch PR is approved and merged, click `Publish Release`. 38 | 1. File a bug for stage depolyment in Bugzilla under `Cloud Services` product, `Operations: Depolyment Requests` component. It should be titled `Please deploy tokenserver {version} to STAGE` and include the relevant version data from `CHANGELOG.md` 39 | 40 | 41 | At this point, QA should take over, verify Stage and create the production deployment ticket. 42 | -------------------------------------------------------------------------------- /docs/configuration.rst: -------------------------------------------------------------------------------- 1 | Configuration 2 | ============= 3 | 4 | The tokenserver uses a typical Page `.ini` file for configuration, and 5 | an example file is located at `./etc/tokenserver-dev.ini`. 6 | 7 | Relevant sections: 8 | 9 | - tokenserver 10 | - tokenserver.secrets 11 | - endpoints 12 | - browserid 13 | 14 | Example:: 15 | 16 | [tokenserver] 17 | backend = tokenserver.assignment.fixednode.DefaultNodeAssignmentBackend 18 | service_entry = example.com 19 | applications = sync-1.0, aitc-1.0 20 | secrets.backend = mozsvc.secrets.FixedSecrets 21 | secrets.secrets = abcdef123456 22 | 23 | [endpoints] 24 | aitc-1.0 = {node}/1.0/{uid} 25 | 26 | [browserid] 27 | backend = tokenserver.verifiers.LocalVerifier 28 | audiences = * 29 | ssl_certificate = /path/to/cert.pem 30 | 31 | 32 | tokenserver 33 | ~~~~~~~~~~~ 34 | **backend** 35 | The class used to assign a node to the user. 36 | 37 | Possible values: 38 | 39 | - :class:`tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend` 40 | - :class:`tokenserver.assignment.sqlnode.SQLNodeAssignment` 41 | 42 | See :ref:`nodeassign` for more information. 43 | 44 | **service_entry** 45 | The node returned for all users when using :class:`MemoryNodeAssignmentBackend` 46 | 47 | **applications** 48 | The list of supported services, separated by commas. A service is composed 49 | of a name and a version. 50 | 51 | **secrets.backend** 52 | One of the classes from :class:`mozsvc.secrets` to be used for managing 53 | node-specific secret keys. 54 | 55 | **sqluri** -- for SQL backends only 56 | The SQL URI for the User DB 57 | 58 | **create_tables** -- for SQL backends only 59 | If True, creates the tables in the DB when they don't exist 60 | 61 | **pool_size** -- for MySQL only 62 | The size of the pool to be maintained, defaults to 5. This is the largest 63 | number of connections that will be kept persistently in the pool. Note 64 | that the pool begins with no connections; once this number of connections 65 | is requested, that number of connections will remain. pool_size can be 66 | set to 0 to indicate no size limit 67 | 68 | **pool_recycle** -- for MySQL only 69 | If set to non -1, number of seconds between connection recycling, which 70 | means upon checkout, if this timeout is surpassed the connection will be 71 | closed and replaced with a newly opened connection. Defaults to -1. 72 | 73 | **pool_timeout** -- for MySQL only 74 | The number of seconds to wait before giving up on returning a connection. 75 | Defaults to 30. 76 | 77 | **max_overflow** -- for MySQL only 78 | The maximum overflow size of the pool. When the number of checked-out 79 | connections reaches the size set in pool_size, additional connections will 80 | be returned up to this limit. When those additional connections are returned 81 | to the pool, they are disconnected and discarded. It follows then that the 82 | total number of simultaneous connections the pool will allow is pool_size + 83 | max_overflow, and the total number of "sleeping" connections the pool will 84 | allow is pool_size. max_overflow can be set to -1 to indicate no overflow 85 | limit; no limit will be placed on the total number of concurrent connections. 86 | Defaults to 10. 87 | 88 | 89 | tokenserver.secrets 90 | ~~~~~~~~~~~~~~~~~~~ 91 | Configures a "secrets management" class that is used to determine the 92 | master token-signing secret for each node. 93 | 94 | **backend** 95 | The class used to manage per-node secret keys. 96 | 97 | Possible values: 98 | 99 | - :class:`mozsvc.secrets.Secrets` 100 | - :class:`mozsvc.secrets.FixedSecrets` 101 | - :class:`mozsvc.secrets.DerivedSecrets` 102 | 103 | **filename** -- for Secrets class only 104 | A file listing each available node along with its secret keys. 105 | 106 | **secrets** -- for FixedSecrets class only 107 | A list of hex-encoded secret keys, which will be used for all 108 | nodes. 109 | 110 | **master_secrets** -- for DerivedSecrets class only 111 | A list of hex-encoded secret keys. Unique secrets for each node will 112 | be derived from these master secrets using HKDF. 113 | 114 | 115 | endpoint 116 | ~~~~~~~~ 117 | List of patterns for the api endpoints. The variable is the application name, 118 | the value is the pattern. When this section is not provided, and an SQL 119 | backend is provided, fall backs to using the patterns table in the SQL DB. 120 | 121 | Patterns are used to find the api endpoint for a given user for a given service. 122 | 123 | For example, *aitc-1.0 = {node}/1.0/{uid}* means that the api end point for the 124 | user of id **1** for the aitc service will be something like: 125 | 126 | http://some.node/1.0/1 127 | 128 | Variables that gets replaced: 129 | 130 | - node: the service node root url 131 | - uid: the user id for that service 132 | - service: the service name (name+version) 133 | 134 | 135 | browserid 136 | ~~~~~~~~~ 137 | **backend** 138 | The class used to verify a Browser-ID assertion 139 | 140 | Possible values: 141 | 142 | - :class:`tokenserver.verifiers.RemoteVerifier` 143 | - :class:`tokenserver.verifiers.LocalVerifier` 144 | 145 | See :ref:`verifiers` for more information. 146 | 147 | **audience** 148 | A whitelist of supported audiences. Ordinarily this should be 149 | set to the publicly-visible hostname of the server. A value of 150 | "*" will match any audience, and may be useful for testing purposes. 151 | 152 | **ssl_certificate** 153 | How to validate the SSL certificate of the server when fetching its 154 | informations to verify client assertions. 155 | 156 | Possible values (defaults to **True**): 157 | 158 | True 159 | validate server's certificate using default Certificate Authorities 160 | False 161 | to disable server's certificate validation. 162 | this is not recommended since it would allow for man in the middle 163 | attacks 164 | /path/pointing/to/your/servers/certificate 165 | to validate against a custom CA bundle. This is what you want to do if 166 | you use self-signed certificates 167 | -------------------------------------------------------------------------------- /etc/tokenserver-dev.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = tokenserver 4 | debug = true 5 | 6 | # token server configuration 7 | [tokenserver] 8 | backend = tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend 9 | applications = sync-1.5 10 | secrets_file = tokenserver/tests/secrets 11 | service_entry = https://example.com 12 | spanner_entry = https://spanner.example.com 13 | spanner_node_id = 800 14 | # this can be used to lock down the system to only existing accounts 15 | #allow_new_users = true 16 | migrate_new_user_percentage=0 17 | 18 | [endpoints] 19 | sync-1.5 = {node}/1.5/{uid} 20 | 21 | [browserid] 22 | backend = tokenserver.verifiers.LocalBrowserIdVerifier 23 | audiences = https://token.services.mozilla.com 24 | 25 | # Paster configuration for Pyramid 26 | [filter:catcherror] 27 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 28 | 29 | [pipeline:main] 30 | pipeline = catcherror 31 | pyramidapp 32 | 33 | [app:pyramidapp] 34 | use = egg:tokenserver 35 | 36 | pyramid.reload_templates = true 37 | pyramid.debug_authorization = false 38 | pyramid.debug_notfound = false 39 | pyramid.debug_routematch = false 40 | pyramid.debug_templates = true 41 | pyramid.default_locale_name = en 42 | pyramid.includes = pyramid_debugtoolbar 43 | 44 | # need to do this programmatically 45 | mako.directories = cornice:templates 46 | 47 | [server:main] 48 | use = egg:Paste#http 49 | host = 0.0.0.0 50 | port = 5000 51 | 52 | [metlog] 53 | backend = mozsvc.metrics.MetlogPlugin 54 | enabled = true 55 | sender_class = metlog.senders.StdOutSender 56 | -------------------------------------------------------------------------------- /loadtest/Makefile: -------------------------------------------------------------------------------- 1 | SERVER_URL = https://token.stage.mozaws.net 2 | 3 | # Hackety-hack around OSX system python bustage. 4 | # The need for this should go away with a future osx/xcode update. 5 | ARCHFLAGS = -Wno-error=unused-command-line-argument-hard-error-in-future 6 | 7 | INSTALL = ARCHFLAGS=$(ARCHFLAGS) ../local/bin/pip install 8 | 9 | .PHONY: build test bench 10 | 11 | build: 12 | $(INSTALL) pexpect 13 | $(INSTALL) https://github.com/mozilla-services/loads/archive/master.zip 14 | $(INSTALL) -U 'requests<2.13' 15 | 16 | # Update https://mock-oauth-stage.dev.lcip.org with latest config. 17 | mock-oauth-stage: 18 | aws cloudformation deploy --template-file ./mock-oauth-cfn.yml --stack-name mock-oauth-stage --capabilities CAPABILITY_IAM 19 | 20 | # Run a quick sanity-check on the server's advertised config. 21 | config-check: 22 | ../local/bin/loads-runner --config=./config/test.ini --server-url=$(SERVER_URL) loadtest.NodeAssignmentTest.test_server_config 23 | 24 | # Run a single test from the local machine, for sanity-checking. 25 | test: 26 | ../local/bin/loads-runner --config=./config/test.ini --server-url=$(SERVER_URL) loadtest.NodeAssignmentTest.test_realistic 27 | 28 | # Run a fuller bench suite from the local machine. 29 | bench: 30 | ../local/bin/loads-runner --config=./config/bench.ini --server-url=$(SERVER_URL) loadtest.NodeAssignmentTest.test_realistic 31 | 32 | # Run a full bench, by submitting to broker in AWS. 33 | megabench: 34 | ../local/bin/loads-runner --config=./config/megabench.ini --user-id=$(USER) --server-url=$(SERVER_URL) loadtest.NodeAssignmentTest.test_realistic 35 | 36 | # Purge any currently-running loadtest runs. 37 | purge: 38 | ../local/bin/loads-runner --config=./config/megabench.ini --purge-broker 39 | -------------------------------------------------------------------------------- /loadtest/README.txt: -------------------------------------------------------------------------------- 1 | This directory contains some very simple loadtests, written using 2 | the "loads" framework: 3 | 4 | https://github.com/mozilla/loads 5 | 6 | 7 | To run them, you will need the following dependencies: 8 | 9 | * Python development files (e.g. python-dev or python-devel package) 10 | * Virtualenv (e.g. python-virtualenv package) 11 | * ZeroMQ development files (e.g. libzmq-dev package) 12 | * (for megabench) ssh access to the mozilla loads cluster 13 | 14 | You'll also need to configure the server under test to talk to a mock 15 | FxA OAuth verifier. You can use this preconfigured one, which proxies 16 | valid tokens through to the FxA stage environment: 17 | 18 | https://mock-oauth-stage.dev.lcip.org 19 | 20 | Or you can deploy one for your own use using the `mock-oauth-cfn.yml` 21 | CloudFormation template, like this: 22 | 23 | $> aws cloudformation deploy \ 24 | --template-file=mock-oauth-cfn.yml \ 25 | --stack-name my-mock-oauth-stack \ 26 | --capabilities CAPABILITY_IAM \ 27 | --parameter-overrides \ 28 | DomainName=my-mock-oauth.dev.lcip.org 29 | 30 | Then do the following: 31 | 32 | $> make build # installs local environment with all dependencies 33 | $> make test # runs a single test, to check that everything's working 34 | $> make bench # runs a longer, higher-concurrency test. 35 | $> make megabench # runs a really-long, really-high-concurrent test 36 | # using https://loads.services.mozilla.com 37 | 38 | To hit a specific server you can specify the SERVER_URL make variable, like 39 | this: 40 | 41 | $> make test SERVER_URL=https://token.stage.mozaws.net 42 | 43 | -------------------------------------------------------------------------------- /loadtest/config/bench.ini: -------------------------------------------------------------------------------- 1 | [loads] 2 | users = 20 3 | duration = 300 4 | -------------------------------------------------------------------------------- /loadtest/config/megabench.ini: -------------------------------------------------------------------------------- 1 | [loads] 2 | users = 20 3 | duration = 1800 4 | include_file = ./loadtest.py 5 | python_dep = PyBrowserID 6 | agents = 5 7 | detach = true 8 | observer = irc 9 | ssh = ubuntu@loads.services.mozilla.com 10 | broker = tcp://172.31.44.86:7780 11 | -------------------------------------------------------------------------------- /loadtest/config/test.ini: -------------------------------------------------------------------------------- 1 | [loads] 2 | hits = 1 3 | users = 1 4 | -------------------------------------------------------------------------------- /loadtest/loadtest.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import json 4 | import time 5 | import uuid 6 | import random 7 | import urlparse 8 | 9 | import browserid 10 | import browserid.jwt 11 | from browserid.tests.support import make_assertion 12 | 13 | from tokenserver.verifiers import DEFAULT_OAUTH_SCOPE 14 | 15 | from loads import TestCase 16 | 17 | 18 | ONE_YEAR = 60 * 60 * 24 * 365 19 | 20 | 21 | # We use a custom mockmyid site to synthesize valid assertions. 22 | # It's hosted in a static S3 bucket so we don't swamp the live mockmyid server. 23 | MOCKMYID_DOMAIN = "mockmyid.s3-us-west-2.amazonaws.com" 24 | MOCKMYID_PRIVATE_KEY = browserid.jwt.DS128Key({ 25 | "algorithm": "DS", 26 | "x": "385cb3509f086e110c5e24bdd395a84b335a09ae", 27 | "y": "738ec929b559b604a232a9b55a5295afc368063bb9c20fac4e53a74970a4db795" 28 | "6d48e4c7ed523405f629b4cc83062f13029c4d615bbacb8b97f5e56f0c7ac9bc1" 29 | "d4e23809889fa061425c984061fca1826040c399715ce7ed385c4dd0d40225691" 30 | "2451e03452d3c961614eb458f188e3e8d2782916c43dbe2e571251ce38262", 31 | "p": "ff600483db6abfc5b45eab78594b3533d550d9f1bf2a992a7a8daa6dc34f8045a" 32 | "d4e6e0c429d334eeeaaefd7e23d4810be00e4cc1492cba325ba81ff2d5a5b305a" 33 | "8d17eb3bf4a06a349d392e00d329744a5179380344e82a18c47933438f891e22a" 34 | "eef812d69c8f75e326cb70ea000c3f776dfdbd604638c2ef717fc26d02e17", 35 | "q": "e21e04f911d1ed7991008ecaab3bf775984309c3", 36 | "g": "c52a4a0ff3b7e61fdf1867ce84138369a6154f4afa92966e3c827e25cfa6cf508b" 37 | "90e5de419e1337e07a2e9e2a3cd5dea704d175f8ebf6af397d69e110b96afb17c7" 38 | "a03259329e4829b0d03bbc7896b15b4ade53e130858cc34d96269aa89041f40913" 39 | "6c7242a38895c9d5bccad4f389af1d7a4bd1398bd072dffa896233397a", 40 | }) 41 | 42 | 43 | # There are three different kinds of test, one of which is randomly 44 | # selected for each run: 45 | # 46 | # - get a token for a previously-seen user 47 | # - get a token for a never-before-seen user 48 | # - fail to get a token using an invalid assertion 49 | # 50 | # The first is the default operation and by far the most likely. 51 | # The below options control what percentage of the requests are each 52 | # of the other types. 53 | 54 | PERCENT_NEW_USER = 0.3 # point-three of one percent; yes really 55 | PERCENT_BAD_USER = 1.0 56 | 57 | # There are two different authentication mechanisms that can be used, 58 | # either BrowserID assertions or FxA OAuth tokens. The below option 59 | # controls what proportion of requests are the later. 60 | 61 | PERCENT_OAUTH = 5.0 # we expect very low OAuth traffic initially 62 | 63 | 64 | class NodeAssignmentTest(TestCase): 65 | """This tests the assertion verification + node retrieval. 66 | 67 | It sends a combination of existing-user, new-user, and invalid-assertion 68 | requests and does some basic sanity-checking on the results. 69 | """ 70 | 71 | server_url = 'https://token.stage.mozaws.net' 72 | 73 | def setUp(self): 74 | self.endpoint = urlparse.urljoin(self.server_url, '/1.0/sync/1.5') 75 | self.audience = self.server_url.rstrip('/') 76 | 77 | def test_server_config(self): 78 | """Sanity-check server config against local settings. 79 | 80 | This is a quick test that fetches config from the target server 81 | and checks it against our local settings. It will fail if there's 82 | a mis-match that would cause our loatests to fail. 83 | """ 84 | res = self.session.get(self.server_url + '/') 85 | self.assertEquals(res.status_code, 200) 86 | server_config = res.json() 87 | try: 88 | allowed_issuers = server_config['browserid']['allowed_issuers'] 89 | if allowed_issuers is not None: 90 | if MOCKMYID_DOMAIN not in allowed_issuers: 91 | msg = 'Server does not allow browserid assertions from {}' 92 | raise AssertionError(msg.format(MOCKMYID_DOMAIN)) 93 | except KeyError: 94 | pass 95 | if server_config['oauth']['default_issuer'] != MOCKMYID_DOMAIN: 96 | msg = 'OAuth default_issuer does not match {}' 97 | raise AssertionError(msg.format(MOCKMYID_DOMAIN)) 98 | if server_config['oauth']['scope'] != DEFAULT_OAUTH_SCOPE: 99 | msg = 'OAuth scope does not match {}' 100 | raise AssertionError(msg.format(DEFAULT_OAUTH_SCOPE)) 101 | 102 | def test_realistic(self): 103 | """Run a test scencario based on 'realistic' user behaviour. 104 | 105 | The 'realistic' part depends on correctly configuring the ratio 106 | of various kinds of events in the global variables above, to match 107 | observed user behaviour in production. 108 | """ 109 | if self._flip_a_coin(PERCENT_BAD_USER): 110 | self._test_bad_auth() 111 | elif self._flip_a_coin(PERCENT_NEW_USER): 112 | self._test_new_user() 113 | else: 114 | self._test_old_user() 115 | 116 | def _make_assertion(self, email, **kwds): 117 | if "audience" not in kwds: 118 | kwds["audience"] = self.audience 119 | if "exp" not in kwds: 120 | kwds["exp"] = int((time.time() + ONE_YEAR) * 1000) 121 | if "issuer" not in kwds: 122 | kwds["issuer"] = MOCKMYID_DOMAIN 123 | if "issuer_keypair" not in kwds: 124 | kwds["issuer_keypair"] = (None, MOCKMYID_PRIVATE_KEY) 125 | return make_assertion(email, **kwds) 126 | 127 | def _make_oauth_token(self, user=None, status=200, **fields): 128 | # For mock oauth tokens, we bundle the desired status code 129 | # and response body into a JSON blob for the mock verifier 130 | # to echo back to us. 131 | body = {} 132 | if status < 400: 133 | if user is None: 134 | raise ValueError("Must specify user for valid oauth token") 135 | if "scope" not in fields: 136 | fields["scope"] = [DEFAULT_OAUTH_SCOPE] 137 | if "client_id" not in fields: 138 | fields["client_id"] = "x" 139 | if user is not None: 140 | parts = user.split("@", 1) 141 | if len(parts) == 1: 142 | body["user"] = user 143 | else: 144 | body["user"] = parts[0] 145 | body["issuer"] = parts[1] 146 | body.update(fields) 147 | return json.dumps({ 148 | "status": status, 149 | "body": body 150 | }) 151 | 152 | def _do_token_exchange_via_browserid(self, assertion, status=200): 153 | headers = {'Authorization': 'BrowserID %s' % assertion} 154 | res = self.session.get(self.endpoint, headers=headers) 155 | self.assertEquals(res.status_code, status) 156 | return res 157 | 158 | def _do_token_exchange_via_oauth(self, token, status=200): 159 | headers = {'Authorization': 'Bearer %s' % token} 160 | res = self.session.get(self.endpoint, headers=headers) 161 | self.assertEquals(res.status_code, status) 162 | return res 163 | 164 | def _do_token_exchange(self, email): 165 | if self._flip_a_coin(PERCENT_OAUTH): 166 | self._do_token_exchange_via_oauth(self._make_oauth_token(email)) 167 | else: 168 | self._do_token_exchange_via_browserid(self._make_assertion(email)) 169 | 170 | def _test_old_user(self): 171 | # Get a token for an "existing" user account. 172 | # There's no guarantee it will actually exist, but we pull from a 173 | # fixed pool of user ids so they should get created and persist 174 | # over time. 175 | uid = random.randint(1, 1000000) 176 | email = "user{uid}@{host}".format(uid=uid, host=MOCKMYID_DOMAIN) 177 | self._do_token_exchange(email) 178 | 179 | def _test_new_user(self): 180 | # Get a token for a never-before-seen user account. 181 | uid = str(uuid.uuid1()) 182 | email = "loadtest-{uid}@{host}".format(uid=uid, host=MOCKMYID_DOMAIN) 183 | self._do_token_exchange(email) 184 | 185 | def _test_bad_auth(self): 186 | if self._flip_a_coin(PERCENT_OAUTH): 187 | self._test_bad_oauth_token() 188 | else: 189 | self._test_bad_assertion() 190 | 191 | def _test_bad_assertion(self): 192 | uid = random.randint(1, 1000000) 193 | # Try to get a token using an invalid assertion. 194 | # Obviously, this should result in a 401. 195 | if self._flip_a_coin(25): 196 | # expired assertion 197 | assertion = self._make_assertion( 198 | "{uid}@{host}".format(uid=uid, host=MOCKMYID_DOMAIN), 199 | exp=int(time.time() - ONE_YEAR) * 1000 200 | ) 201 | elif self._flip_a_coin(25): 202 | # email/issuer mismatch 203 | assertion = self._make_assertion( 204 | "{uid}@hotmail.com".format(uid=uid) 205 | ) 206 | elif self._flip_a_coin(25): 207 | # invalid issuer privkey 208 | assertion = self._make_assertion( 209 | "{uid}@{host}".format(uid=uid, host=MOCKMYID_DOMAIN), 210 | issuer="api.accounts.firefox.com" 211 | ) 212 | else: 213 | # invalid audience 214 | assertion = self._make_assertion( 215 | "{uid}@{host}".format(uid=uid, host=MOCKMYID_DOMAIN), 216 | audience="http://123done.org" 217 | ) 218 | self._do_token_exchange_via_browserid(assertion, 401) 219 | 220 | def _test_bad_oauth_token(self): 221 | uid = random.randint(1, 1000000) 222 | # Try to get a token using an invalid OAuth token. 223 | # Obviously, this should result in a 401. 224 | if self._flip_a_coin(50): 225 | # invalid token 226 | token = self._make_oauth_token(status=400, errno=108) 227 | else: 228 | # invalid scope 229 | token = self._make_oauth_token( 230 | user=str(uid), 231 | scope=["unrelated", "scopes"], 232 | ) 233 | self._do_token_exchange_via_oauth(token, 401) 234 | 235 | def _flip_a_coin(self, percent=50): 236 | # Return True on 'percent' percent of calls. 237 | return (random.random() * 100) < percent 238 | -------------------------------------------------------------------------------- /loadtest/mock-oauth-cfn.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This is a CloudFormation script to deploy a tiny lambda+API-gateway 3 | # service that can mock out FxA OAuth verification responses. 4 | # 5 | # When deployed, this API will proxy all HTTP requests through to a live 6 | # FxA OAuth server except that `POST /v1/verify` will attempt to parse 7 | # the submitted token as JSON. If it succeeds, then it will use the `status` 8 | # and `body` fields from that JSON to return a mocked response. 9 | # 10 | # The idea is to let this API be used as a stand-in for the real FxA OAuth 11 | # server, and have it function correctly for manual testing with real accounts, 12 | # but then also to be able to make fake OAuth tokens during a loadtest, like 13 | # this: 14 | # 15 | # requests.get("https://mock-oauth-stage.dev.lcip.org", json={ 16 | # "token": json.dumps({ 17 | # "status": 200, 18 | # "body": { 19 | # "user": "loadtest123456", 20 | # "scope": ["myscope"], 21 | # "client_id": "my_client_id", 22 | # } 23 | # }) 24 | # }) 25 | # 26 | # Or to be able to simulate OAuth token failures like this: 27 | # 28 | # requests.get("https://mock-oauth-stage.dev.lcip.org", json={ 29 | # "token": json.dumps({ 30 | # "status": 400, 31 | # "body": { 32 | # "errno": "108", 33 | # "message": "invalid token", 34 | # } 35 | # }) 36 | # }) 37 | # 38 | # You'll notice that there's some javascript written inline in this yaml file. 39 | # That does make it a little bit annoying to edit, but that's outweighed by 40 | # the advantage of have a single file that can be deployed with a single 41 | # command with no pre-processing palaver. 42 | # 43 | Parameters: 44 | ProxyTarget: 45 | Type: "String" 46 | Default: "oauth.stage.mozaws.net" 47 | Description: "The live OAuth server to which un-mocked requests should be proxied" 48 | MockIssuer: 49 | Type: "String" 50 | Default: "mockmyid.s3-us-west-2.amazonaws.com" 51 | Description: "The issuer domain to use for mock tokens" 52 | DomainName: 53 | Type: "String" 54 | Default: "mock-oauth-stage.dev.lcip.org" 55 | Description: "The domain name at which to expose the API" 56 | CertificateArn: 57 | Type: "String" 58 | Default: "arn:aws:acm:us-east-1:927034868273:certificate/675e0ac8-23af-4153-8295-acb28ccc9f0f" 59 | Description: "The certificate to use with $DomainName" 60 | HostedZoneName: 61 | Type: "String" 62 | Default: "lcip.org" 63 | Description: "The hosted zone in which to create a DNS record" 64 | Owner: 65 | Type: "String" 66 | Default: "rfkelly@mozilla.com" 67 | Description: "Email address of owner to tag resources with" 68 | 69 | Resources: 70 | Handler: 71 | Type: "AWS::Lambda::Function" 72 | Properties: 73 | Description: "Mock FxA OAuth verifier" 74 | Handler: "index.handler" 75 | Role: !GetAtt HandlerRole.Arn 76 | Tags: 77 | - Key: "Owner" 78 | Value: !Ref Owner 79 | Runtime: "nodejs6.10" 80 | Code: 81 | ZipFile: !Sub |- 82 | 83 | const https = require('https'); 84 | const url = require('url'); 85 | 86 | function proxy(event, context, callback) { 87 | const output = [] 88 | const req = https.request({ 89 | hostname: "${ProxyTarget}", 90 | post: 443, 91 | path: url.format({ 92 | pathname: event.path, 93 | query: event.queryStringParameters 94 | }), 95 | method: event.httpMethod, 96 | }, res => { 97 | res.setEncoding('utf8'); 98 | res.on('data', d => { 99 | output.push(d); 100 | }) 101 | res.on('end', () => { 102 | callback(null, { 103 | statusCode: res.statusCode, 104 | headers: res.headers, 105 | body: output.join('') 106 | }); 107 | }) 108 | }); 109 | req.on('error', e => { 110 | callback(e); 111 | }) 112 | if (event.body) { 113 | req.write(event.body, 'utf8'); 114 | } 115 | req.end(); 116 | } 117 | 118 | const HANDLERS = { 119 | 'POST:/v1/verify': function(event, context, callback) { 120 | try { 121 | const token = JSON.parse(event.body).token; 122 | const mockResponse = JSON.parse(token); 123 | const mockStatus = mockResponse.status || 200; 124 | const mockBody = mockResponse.body || {}; 125 | // Ensure that successful responses always claim to be from 126 | // the mock issuer. Otherwise you could use a mock token to 127 | // any account, even accounts backed by accounts.firefox.com! 128 | if (mockStatus < 400) { 129 | mockBody.issuer = "${MockIssuer}"; 130 | } 131 | // Return the mocked response from the token. 132 | return callback(null, { 133 | statusCode: mockStatus, 134 | headers: { 135 | "content-type": "application/json" 136 | }, 137 | body: JSON.stringify(mockBody) 138 | }); 139 | } catch (e) { 140 | // If it's not a mock token, forward to real server. 141 | return proxy(event, context, callback); 142 | } 143 | } 144 | } 145 | 146 | exports.handler = (event, context, callback) => { 147 | const h = HANDLERS[event.httpMethod + ':' + event.path] || proxy; 148 | return h(event, context, callback); 149 | }; 150 | 151 | HandlerRole: 152 | Type: "AWS::IAM::Role" 153 | Properties: 154 | AssumeRolePolicyDocument: 155 | Version: "2012-10-17" 156 | Statement: 157 | - Effect: "Allow" 158 | Principal: 159 | Service: 160 | - "lambda.amazonaws.com" 161 | Action: 162 | - "sts:AssumeRole" 163 | ManagedPolicyArns: 164 | - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" 165 | 166 | HandlerPermission: 167 | Type: "AWS::Lambda::Permission" 168 | Properties: 169 | Action: "lambda:invokeFunction" 170 | FunctionName: !GetAtt Handler.Arn 171 | Principal: "apigateway.amazonaws.com" 172 | SourceArn: !Sub "arn:aws:execute-api:${AWS::Region}:${AWS::AccountId}:${API}/*" 173 | 174 | API: 175 | Type: "AWS::ApiGateway::RestApi" 176 | Properties: 177 | Description: "Mock FxA OAuth API" 178 | Name: !Sub "${AWS::StackName}-mock-fxa-oauth" 179 | FailOnWarnings: true 180 | 181 | APIResource: 182 | Type: "AWS::ApiGateway::Resource" 183 | Properties: 184 | RestApiId: !Ref API 185 | ParentId: !GetAtt API.RootResourceId 186 | PathPart: "{proxy+}" 187 | 188 | APIMethod: 189 | Type: "AWS::ApiGateway::Method" 190 | DependsOn: 191 | - HandlerPermission 192 | Properties: 193 | AuthorizationType: "NONE" 194 | HttpMethod: "ANY" 195 | ResourceId: !Ref APIResource 196 | RestApiId: !Ref API 197 | Integration: 198 | Type: "AWS_PROXY" 199 | IntegrationHttpMethod: "POST" 200 | Uri: !Sub "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${Handler.Arn}/invocations" 201 | 202 | APIDeployment: 203 | Type: "AWS::ApiGateway::Deployment" 204 | DependsOn: 205 | - APIMethod 206 | Properties: 207 | RestApiId: !Ref API 208 | StageName: "main" 209 | 210 | APIDomainName: 211 | Type: "AWS::ApiGateway::DomainName" 212 | Properties: 213 | DomainName: !Ref DomainName 214 | CertificateArn: !Ref CertificateArn 215 | 216 | APIDomainMapping: 217 | Type: "AWS::ApiGateway::BasePathMapping" 218 | Properties: 219 | DomainName: !Ref APIDomainName 220 | RestApiId: !Ref API 221 | Stage: "main" 222 | 223 | APIDNSRecord: 224 | Type : "AWS::Route53::RecordSet" 225 | Properties : 226 | HostedZoneName : !Sub "${HostedZoneName}." 227 | Name : !Sub "${DomainName}." 228 | Type : "A" 229 | AliasTarget: 230 | DNSName: !GetAtt APIDomainName.DistributionDomainName 231 | HostedZoneId: "Z2FDTNDATAQYW2" # Published ZoneId for CloudFront 232 | -------------------------------------------------------------------------------- /loadtest/populate-db.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # script to populate the database with records 3 | import time 4 | import random 5 | from wimms.sql import SQLMetadata, _CREATE_USER_RECORD 6 | 7 | 8 | def populate_db(sqluri, service, nodes, user_range, host="loadtest.local"): 9 | """Create a bunch of users for the given service. 10 | 11 | The resulting users will have an adress in the form of @ where 12 | uid is an int from 0 to :param user_range:. 13 | 14 | This function is useful to populate the database during the loadtest. It 15 | allows to test a specific behaviour: making sure that we are not reading 16 | the values from memory when retrieving the node information. 17 | 18 | :param sqluri: the sqluri string used to connect to the database 19 | :param service: the service to assign the users to. 20 | :param nodes: the list of availables nodes for this service 21 | :param user_range: the number of users to create 22 | :param host: the hostname to use when generating users 23 | """ 24 | params = { 25 | 'service': service, 26 | 'generation': 0, 27 | 'client_state': '', 28 | 'timestamp': int(time.time() * 1000), 29 | } 30 | # for each user in the range, assign him to a node 31 | md = SQLMetadata(sqluri, create_tables=True) 32 | for idx in range(0, user_range): 33 | email = "%s@%s" % (idx, host) 34 | node = random.choice(nodes) 35 | md._safe_execute(_CREATE_USER_RECORD, email=email, node=node, **params) 36 | 37 | 38 | def main(): 39 | """Read the arguments from the command line and pass them to the 40 | populate_db function. 41 | 42 | Example use: 43 | 44 | python populate-db.py sqlite:////tmp/tokenserver aitc\ 45 | node1,node2,node3,node4,node5,node6 100 46 | """ 47 | import sys 48 | if len(sys.argv) < 5: 49 | raise ValueError('You need to specify (in this order) sqluri, ' 50 | 'service, nodes (comma separated) and user_range') 51 | # transform the values from the cli to python objects 52 | sys.argv[3] = sys.argv[3].split(',') # comma separated => list 53 | sys.argv[4] = int(sys.argv[4]) 54 | 55 | populate_db(*sys.argv[1:]) 56 | print("created {nb_users} users".format(nb_users=sys.argv[4])) 57 | 58 | 59 | if __name__ == '__main__': 60 | main() 61 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | alembic==1.0.9 2 | asn1crypto==0.24.0 3 | boto==2.49.0 4 | certifi==2019.3.9 5 | # Handle older cffi versions in circleci 6 | cffi==1.14.0; platform_python_implementation == "CPython" 7 | chardet==3.0.4 8 | configparser==3.7.4 9 | cornice==3.5.1 10 | cryptography==41.0.3 11 | enum34==1.1.6 12 | gevent==1.4.0 13 | greenlet==0.4.13 14 | gunicorn==19.10.0 15 | hawkauthlib==2.0.0 16 | hupper==1.6.1 17 | idna==2.8 18 | ipaddress==1.0.22 19 | konfig==1.1 20 | Mako==1.0.9 21 | MarkupSafe==1.1.1 22 | mozsvc==0.10 23 | mysqlclient==1.4.6 24 | Paste==3.0.8 25 | PasteDeploy==2.0.1 26 | plaster==1.0 27 | plaster-pastedeploy==0.7 28 | PyBrowserID==0.14.0 29 | pycparser==2.19 30 | PyFxA==0.7.7 31 | PyJWT==1.7.1 32 | PyMySQL==0.9.3 33 | pymysql-sa==1.0 34 | pyramid==1.10.4 35 | python-dateutil==2.8.0 36 | python-editor==1.0.4 37 | repoze.lru==0.7 38 | requests==2.22.0 39 | simplejson==3.16.0 40 | six==1.14.0 41 | SQLAlchemy==1.3.3 42 | testfixtures==6.7.0 43 | tokenlib==2.0.0 44 | translationstring==1.3 45 | urllib3==1.25.2 46 | venusian==1.2.0 47 | WebOb==1.8.5 48 | zope.deprecation==4.4.0 49 | zope.interface==4.6.0 50 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | from setuptools import setup, find_packages 5 | 6 | 7 | def load_req(filename): 8 | """Load a pip style requirement file.""" 9 | reqs = [] 10 | with open(filename, "r") as file: 11 | for line in file.readlines(): 12 | line = line.strip() 13 | if line.startswith("-r"): 14 | content = load_req(line.split(' ')[1]) 15 | reqs.extend(content) 16 | continue 17 | reqs.append(line) 18 | return reqs 19 | 20 | 21 | requires = load_req("requirements.txt") 22 | tests_require = load_req("dev-requirements.txt") 23 | 24 | 25 | setup(name='tokenserver', 26 | version='1.6.1', 27 | packages=find_packages(), 28 | include_package_data=True, 29 | zip_safe=False, 30 | entry_points="""\ 31 | [paste.app_factory] 32 | main = tokenserver:main 33 | """, 34 | install_requires=requires, 35 | tests_require=tests_require, 36 | test_suite='tokenserver.tests') 37 | -------------------------------------------------------------------------------- /tokenserver/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | # This Source Code Form is subject to the terms of the Mozilla Public 3 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 | # You can obtain one at http://mozilla.org/MPL/2.0/. 5 | 6 | import sys 7 | import re 8 | import fnmatch 9 | 10 | 11 | runner = sys.argv[0] 12 | 13 | 14 | import logging 15 | from collections import defaultdict 16 | 17 | from tokenserver.assignment import INodeAssignment 18 | 19 | from mozsvc.config import get_configurator 20 | from mozsvc.plugin import load_and_register, load_from_settings 21 | 22 | 23 | logger = logging.getLogger('tokenserver') 24 | 25 | 26 | def includeme(config): 27 | settings = config.registry.settings 28 | config.include("cornice") 29 | config.include("mozsvc") 30 | config.include("tokenserver.tweens") 31 | config.scan("tokenserver.views") 32 | 33 | # initializes the assignment backend 34 | load_and_register("tokenserver", config) 35 | 36 | # initialize the browserid backend if it exists 37 | if "browserid.backend" in settings: 38 | load_and_register("browserid", config) 39 | 40 | # initialize the oauth backend if it exists 41 | if "oauth.backend" in settings: 42 | load_and_register("oauth", config) 43 | 44 | # initialize node-type classifier 45 | load_node_type_classifier(config) 46 | 47 | # load apps and set them up back in the setting 48 | key = 'tokenserver.applications' 49 | applications = defaultdict(list) 50 | for element in settings.get(key, '').split(','): 51 | element = element.strip() 52 | if element == '': 53 | continue 54 | element = element.split('-') 55 | if len(element) != 2: 56 | continue 57 | app, version = element 58 | applications[app].append(version) 59 | 60 | settings[key] = applications 61 | 62 | # load the secrets backend, with a b/w-compat hook 63 | # for the old 'secrets_file' setting. 64 | secrets_file = settings.get('tokenserver.secrets_file') 65 | if secrets_file is not None: 66 | if 'tokenserver.secrets.backend' in settings: 67 | raise ValueError("can't use secrets_file with secrets.backend") 68 | if isinstance(secrets_file, basestring): 69 | secrets_file = secrets_file.split() 70 | settings['tokenserver.secrets.backend'] = 'mozsvc.secrets.Secrets' 71 | settings['tokenserver.secrets.filename'] = secrets_file 72 | secrets = load_from_settings('tokenserver.secrets', settings) 73 | settings['tokenserver.secrets'] = secrets 74 | 75 | # ensure the metrics_id_secret_key is an ascii string. 76 | id_key = settings.get('fxa.metrics_uid_secret_key') 77 | if id_key is None: 78 | logger.warning( 79 | 'fxa.metrics_uid_secret_key is not set. ' 80 | 'This will allow PII to be more easily identified') 81 | elif isinstance(id_key, unicode): 82 | settings['fxa.metrics_uid_secret_key'] = id_key.encode('ascii') 83 | 84 | read_endpoints(config) 85 | 86 | 87 | class LazyDict(dict): 88 | def __init__(self, callable): 89 | self.callable = callable 90 | self._loaded = False 91 | 92 | def __getitem__(self, name): 93 | if not self._loaded: 94 | self.callable(self) 95 | self._loaded = True 96 | return super(LazyDict, self).__getitem__(name) 97 | 98 | def __iter__(self): 99 | if not self._loaded: 100 | self.callable(self) 101 | self._loaded = True 102 | return super(LazyDict, self).__iter__() 103 | 104 | def keys(self): 105 | if not self._loaded: 106 | self.callable(self) 107 | self._loaded = True 108 | return super(LazyDict, self).keys() 109 | 110 | 111 | def load_endpoints(mapping, config): 112 | patterns = dict([(key.split('.', 1)[-1], value) 113 | for key, value in config.registry.settings.items() 114 | if key.startswith('endpoints.')]) 115 | mapping.update(patterns) 116 | 117 | if len(mapping) == 0: 118 | # otherwise, try to ask the assignment backend the list of 119 | # endpoints 120 | backend = config.registry.getUtility(INodeAssignment) 121 | mapping.update(backend.get_patterns()) 122 | 123 | 124 | def read_endpoints(config): 125 | """If there is a section "endpoints", load it the format is 126 | service-version = pattern, and a dict will be built with those. 127 | """ 128 | def _read(mapping): 129 | load_endpoints(mapping, config) 130 | 131 | config.registry['endpoints_patterns'] = LazyDict(_read) 132 | 133 | 134 | def load_node_type_classifier(config): 135 | """Load fnmatch-style patterns for classifying node type. 136 | 137 | Given entries in a config file like this: 138 | 139 | [tokenserver] 140 | node_type_patterns = 141 | foo:*.foo.com 142 | bar:*bar* 143 | default:* 144 | 145 | Returns a classifier function that will take a string argument and 146 | return the name of the first matching pattern, or None if no patterns 147 | matched to string. Patterns are matched in the order specified in the 148 | config file. 149 | """ 150 | settings = config.registry.settings 151 | patterns = settings.get('tokenserver.node_type_patterns', ()) 152 | if isinstance(patterns, basestring): 153 | raise ValueError( 154 | "Expected 'tokenserver.node_type_patterns' to be a list") 155 | patterns = [p.split(":", 1) for p in patterns] 156 | # For easy matching, compile all the patterns together into a single regex. 157 | # A good regex engine would turn this into a single FSA to efficiently test 158 | # all patterns simultaneously. Python's regex engine will do a left-to-right 159 | # backtracking search, which is also fine for our purposes. 160 | regexes = [] 161 | for label, pattern in patterns: 162 | regexes.append("(?P<{}>{})".format(label, fnmatch.translate(pattern))) 163 | try: 164 | regex = re.compile("|".join(regexes)) 165 | except re.error: 166 | raise ValueError("Invalid node_type_patterns") 167 | 168 | def classify(node): 169 | # N.B. `match` always matches from the start of the string. 170 | m = regex.match(node) 171 | if m is None: 172 | return None 173 | return m.lastgroup 174 | 175 | settings['tokenserver.node_type_classifier'] = classify 176 | return classify 177 | 178 | 179 | def main(global_config, **settings): 180 | config = get_configurator(global_config, **settings) 181 | config.include(includeme) 182 | return config.make_wsgi_app() 183 | -------------------------------------------------------------------------------- /tokenserver/assignment/__init__.py: -------------------------------------------------------------------------------- 1 | from zope.interface import Interface 2 | 3 | 4 | class INodeAssignment(Interface): 5 | """Interface definition for backend node-assignment db.""" 6 | 7 | def should_allocate_to_spanner(self, email): 8 | """Determine if this user is routed to spanner 9 | 10 | """ 11 | 12 | def get_user(self, service, email): 13 | """Returns the user record for the given service and email. 14 | 15 | The returned object will be None if no service record exists for that 16 | email, otherwise it will be an object with the following fields: 17 | 18 | * email: the email address, as given to this method 19 | * uid: integer userid assigned to that email 20 | * node: service node assigned to that email, or None 21 | * generation: the last-seen generation number for that email 22 | * client_state: the last-seen client state string for that email 23 | * old_client_states: any previously--seen client state strings 24 | 25 | """ 26 | 27 | def allocate_user(self, service, email, generation=0, client_state='', 28 | keys_changed_at=0, node=None): 29 | """Create a new user record for the given service and email. 30 | 31 | The newly-created user record is returned in the format described 32 | for the get_user() method. 33 | """ 34 | 35 | def update_user(self, service, user, generation=None, client_state=None, 36 | keys_changed_at=None, node=None): 37 | """Update the user record for the given service. 38 | 39 | This method can be used to update the last-seen generation number, 40 | client-state string or node assignment for a user. Changing the 41 | client-state or node will result in a new uid being generated. 42 | 43 | The given user object is modified in-place to reflect the changes 44 | stored on the backend. 45 | """ 46 | -------------------------------------------------------------------------------- /tokenserver/assignment/memorynode.py: -------------------------------------------------------------------------------- 1 | from zope.interface import implements 2 | 3 | from tokenserver.assignment import INodeAssignment 4 | from tokenserver.util import get_timestamp 5 | 6 | from mozsvc.exceptions import BackendError 7 | 8 | 9 | class MemoryNodeAssignmentBackend(object): 10 | """Simple in-memory INodeAssignment backend. 11 | 12 | This is useful for testing purposes and probably not much else. 13 | """ 14 | implements(INodeAssignment) 15 | 16 | def __init__(self, service_entry=None, **kw): 17 | self.service_entry = service_entry 18 | self._users = {} 19 | self._next_uid = 1 20 | self.settings = kw or {} 21 | 22 | def clear(self): 23 | self._users.clear() 24 | self._next_uid = 1 25 | 26 | def get_user(self, service, email): 27 | try: 28 | return self._users[(service, email)].copy() 29 | except KeyError: 30 | return None 31 | 32 | def allocate_user(self, service, email, generation=0, client_state='', 33 | keys_changed_at=0, node=None): 34 | if (service, email) in self._users: 35 | raise BackendError('user already exists: ' + email) 36 | if node is not None and node != self.service_entry: 37 | raise ValueError("unknown node: %s" % (node,)) 38 | user = { 39 | 'email': email, 40 | 'uid': self._next_uid, 41 | 'node': self.service_entry, 42 | 'generation': generation, 43 | 'keys_changed_at': keys_changed_at, 44 | 'client_state': client_state, 45 | 'old_client_states': {}, 46 | 'first_seen_at': get_timestamp(), 47 | } 48 | self._users[(service, email)] = user 49 | self._next_uid += 1 50 | return user.copy() 51 | 52 | def update_user(self, service, user, generation=None, client_state=None, 53 | keys_changed_at=None, node=None): 54 | if (service, user['email']) not in self._users: 55 | raise BackendError('unknown user: ' + user['email']) 56 | if node is not None and node != self.service_entry: 57 | raise ValueError("unknown node: %s" % (node,)) 58 | if generation is not None: 59 | user['generation'] = generation 60 | if keys_changed_at is not None: 61 | user['keys_changed_at'] = keys_changed_at 62 | if client_state is not None: 63 | user['old_client_states'][user['client_state']] = True 64 | user['client_state'] = client_state 65 | user['uid'] = self._next_uid 66 | self._next_uid += 1 67 | self._users[(service, user['email'])].update(user) 68 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/__init__.py: -------------------------------------------------------------------------------- 1 | from tokenserver.assignment.sqlnode.sql import SQLNodeAssignment # NOQA 2 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/README: -------------------------------------------------------------------------------- 1 | 2 | Database migrations for tokenserver. 3 | 4 | This directory contains alembic migrations tracking the database schema 5 | changes made in tokenserver: 6 | 7 | https://alembic.readthedocs.io/ 8 | 9 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/env.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | import os 3 | import sys 4 | 5 | # ensure that the containing module is on sys.path 6 | # this is a hack for using alembic in our built virtualenv. 7 | mod_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..") 8 | if mod_dir not in sys.path: 9 | sys.path.append(mod_dir) 10 | 11 | from alembic import context 12 | from logging.config import fileConfig 13 | 14 | from sqlalchemy import create_engine 15 | from sqlalchemy.pool import NullPool 16 | 17 | from tokenserver.util import find_config_file 18 | from mozsvc.config import load_into_settings 19 | 20 | # this is the Alembic Config object, which provides 21 | # access to the values within the .ini file in use. 22 | config = context.config 23 | 24 | # Interpret the config file for Python logging. 25 | # This line sets up loggers basically. 26 | fileConfig(config.config_file_name) 27 | 28 | 29 | ini_file = find_config_file(config.get_main_option("token_ini")) 30 | settings = {} 31 | load_into_settings(ini_file, settings) 32 | 33 | def run_migrations_offline(): 34 | """Run migrations in 'offline' mode. 35 | 36 | This configures the context with just a URL 37 | and not an Engine, though an Engine is acceptable 38 | here as well. By skipping the Engine creation 39 | we don't even need a DBAPI to be available. 40 | 41 | Calls to context.execute() here emit the given string to the 42 | script output. 43 | 44 | """ 45 | url = settings["tokenserver.sqluri"] 46 | context.configure(url=url) 47 | 48 | with context.begin_transaction(): 49 | context.run_migrations() 50 | 51 | 52 | def run_migrations_online(): 53 | """Run migrations in 'online' mode. 54 | 55 | In this scenario we need to create an Engine 56 | and associate a connection with the context. 57 | 58 | """ 59 | url = settings["tokenserver.sqluri"] 60 | engine = create_engine(url, poolclass=NullPool) 61 | connection = engine.connect() 62 | context.configure( 63 | connection=connection, 64 | ) 65 | 66 | try: 67 | with context.begin_transaction(): 68 | context.run_migrations() 69 | finally: 70 | connection.close() 71 | 72 | if context.is_offline_mode(): 73 | run_migrations_offline() 74 | else: 75 | run_migrations_online() 76 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = ${repr(up_revision)} 11 | down_revision = ${repr(down_revision)} 12 | 13 | from alembic import op 14 | import sqlalchemy as sa 15 | ${imports if imports else ""} 16 | 17 | def upgrade(): 18 | ${upgrades if upgrades else "pass"} 19 | 20 | 21 | def downgrade(): 22 | ${downgrades if downgrades else "pass"} 23 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/17d209a72e2f_add_replaced_at_idx.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """add replaced_at_idx 3 | 4 | Revision ID: 17d209a72e2f 5 | Revises: None 6 | Create Date: 2014-04-14 02:42:04.919012 7 | 8 | This adds an index on ("service", "replaced_at") to the users table. 9 | See https://bugzilla.mozilla.org/show_bug.cgi?id=984232 10 | 11 | """ 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '17d209a72e2f' 15 | down_revision = None 16 | 17 | from alembic import op 18 | 19 | 20 | def upgrade(): 21 | op.create_index('replaced_at_idx', 'users', ['service', 'replaced_at']) 22 | 23 | 24 | def downgrade(): 25 | op.drop_index('replaced_at_idx', 'users') 26 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/2b968b28bcdc_remove_node_column.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """remove node column 3 | 4 | Revision ID: 2b968b28bcdc 5 | Revises: 9fb109457bd 6 | Create Date: 2014-06-27 09:41:22.944863 7 | 8 | """ 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '2b968b28bcdc' 12 | down_revision = '9fb109457bd' 13 | 14 | from alembic import op 15 | import sqlalchemy as sa 16 | 17 | 18 | def upgrade(): 19 | op.drop_column('users', 'node') 20 | 21 | 22 | def downgrade(): 23 | # Re-create the column, making it nullable so that it 24 | # can be safely inserted in the presence of existing data. 25 | # The previous migration knows how to make it non-nullable. 26 | op.add_column( 27 | 'users', 28 | sa.Column('node', sa.String(64), nullable=True) 29 | ) 30 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/3d5af3924466_drop_clientstate_idx.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """drop clientstate_idx 3 | 4 | Revision ID: 3d5af3924466 5 | Revises: 17d209a72e2f 6 | Create Date: 2014-04-14 02:47:55.094158 7 | 8 | This drops the "clientstate_idx" index from the users table. 9 | It was a unique index, but we no longer require them to be unique 10 | at this level. See https://bugzilla.mozilla.org/show_bug.cgi?id=988134 11 | 12 | """ 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '3d5af3924466' 16 | down_revision = '17d209a72e2f' 17 | 18 | from alembic import op 19 | 20 | 21 | def upgrade(): 22 | op.drop_index('clientstate_idx', 'users') 23 | 24 | 25 | def downgrade(): 26 | op.create_unique_constraint('clientstate_idx', 'users', 27 | ['email', 'service', 'client_state']) 28 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/5d056c5b8f57_create_dyn_settings_table.py: -------------------------------------------------------------------------------- 1 | """create dyn_settings table 2 | 3 | Revision ID: 5d056c5b8f57 4 | Revises: 75e8ca84b0bc 5 | Create Date: 2020-01-06 08:16:15.546054 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '5d056c5b8f57' 13 | down_revision = '75e8ca84b0bc' 14 | 15 | 16 | def upgrade(): 17 | op.create_table( 18 | 'dynamic_settings', 19 | sa.Column('setting', sa.String(100), primary_key=True), 20 | sa.Column('value', sa.String(255), nullable=False), 21 | sa.Column('description', sa.String(255)) 22 | ) 23 | 24 | 25 | def downgrade(): 26 | op.drop_table('dynamic_settings') 27 | pass 28 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/6569dd9a060_populate_nodeid_column_and_index.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """populate nodeid column and index 3 | 4 | Revision ID: 6569dd9a060 5 | Revises: 846f28d1b6f 6 | Create Date: 2014-04-14 05:26:44.146236 7 | 8 | This updates the values in the "nodeid" column to ensure that they match 9 | the value in the string-based "node" column, then indexes the column for fast 10 | node-based lookup. It should only be applied *after* all servers have bee 11 | upgraded to properly write the value of the "nodeid" column; it's part 2 of 2 12 | in getting to the desired state without downtime. 13 | 14 | See https://bugzilla.mozilla.org/show_bug.cgi?id=988643 15 | 16 | """ 17 | 18 | # revision identifiers, used by Alembic. 19 | revision = '6569dd9a060' 20 | down_revision = '846f28d1b6f' 21 | 22 | from alembic import op 23 | import sqlalchemy as sa 24 | 25 | 26 | def upgrade(): 27 | # Populate nodeid with the proper id for each existing row. 28 | # XXX NOTE: MySQL-specific! 29 | op.execute(""" 30 | UPDATE users, nodes 31 | SET users.nodeid = nodes.id 32 | WHERE users.node = nodes.node 33 | """.strip()) 34 | # Set the column non-nullable so it doesn't mask bugs in the future. 35 | op.alter_column( 36 | 'users', 'nodeid', 37 | nullable=False, 38 | existing_type=sa.BigInteger(), 39 | existing_server_default=None, 40 | ) 41 | # Index the nodeid column. 42 | op.create_index('node_idx', 'users', ['nodeid']) 43 | 44 | 45 | def downgrade(): 46 | op.drop_index('node_idx', 'users') 47 | op.alter_column( 48 | 'users', 'nodeid', 49 | nullable=True, 50 | existing_type=sa.BigInteger(), 51 | existing_server_default=None, 52 | ) 53 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/75e8ca84b0bc_add_keys_changed_at_column.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """add_keys_changed_at_column 3 | 4 | Revision ID: 75e8ca84b0bc 5 | Revises: 2b968b28bcdc 6 | Create Date: 2019-10-14 12:09:18.257878 7 | 8 | """ 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '75e8ca84b0bc' 12 | down_revision = '2b968b28bcdc' 13 | 14 | from alembic import op 15 | import sqlalchemy as sa 16 | 17 | 18 | def upgrade(): 19 | # Create the column, making it nullable so that it can be 20 | # safely inserted in the present of existing data. 21 | op.add_column( 22 | 'users', 23 | sa.Column('keys_changed_at', sa.BigInteger(), nullable=True) 24 | ) 25 | pass 26 | 27 | 28 | def downgrade(): 29 | op.drop_column('users', 'keys_changed_at') 30 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/846f28d1b6f_add_nodeid_column.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """add nodeid column 3 | 4 | Revision ID: 846f28d1b6f 5 | Revises: 3d5af3924466 6 | Create Date: 2014-04-14 03:28:16.156769 7 | 8 | This adds a new "nodeid" column to the users table, for referencing a row 9 | in the "nodes" table. It doesn't make the column *useful* to the app since 10 | it needs to be properly populated from the existing data, which we can't do 11 | until the app is updated to write into it. It's part 1 of 2 in getting to 12 | the desired state without downtime. 13 | 14 | See https://bugzilla.mozilla.org/show_bug.cgi?id=988643 15 | 16 | """ 17 | 18 | # revision identifiers, used by Alembic. 19 | revision = '846f28d1b6f' 20 | down_revision = '3d5af3924466' 21 | 22 | from alembic import op 23 | import sqlalchemy as sa 24 | 25 | 26 | def upgrade(): 27 | # Create the column, making it nullable so that it can be 28 | # safely inserted in the present of existing data. 29 | # The next migration will make it non-nullable. 30 | op.add_column( 31 | 'users', 32 | sa.Column('nodeid', sa.BigInteger(), nullable=True) 33 | ) 34 | 35 | 36 | def downgrade(): 37 | op.drop_column('users', 'nodeid') 38 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/migrations/versions/9fb109457bd_make_users_node_column_nullable.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | """make users.node column nullable 3 | 4 | Revision ID: 9fb109457bd 5 | Revises: 6569dd9a060 6 | Create Date: 2014-04-29 12:51:41.879429 7 | 8 | """ 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '9fb109457bd' 12 | down_revision = '6569dd9a060' 13 | 14 | from alembic import op 15 | import sqlalchemy as sa 16 | 17 | 18 | def upgrade(): 19 | op.alter_column( 20 | 'users', 'node', 21 | nullable=True, 22 | existing_type=sa.String(64), 23 | existing_server_default=None, 24 | ) 25 | 26 | 27 | def downgrade(): 28 | # Populate the column with denormalized data from the nodes table. 29 | # XXX NOTE: MySQL-specific! 30 | op.execute(""" 31 | UPDATE users, nodes 32 | SET users.node = nodes.node 33 | WHERE users.nodeid = nodes.id 34 | """.strip()) 35 | op.alter_column( 36 | 'users', 'node', 37 | nullable=False, 38 | existing_type=sa.String(64), 39 | existing_server_default=None, 40 | ) 41 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/schemas.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | Table schema for MySQL and sqlite. 6 | 7 | We have the following tables: 8 | 9 | services: lists the available services and their endpoint-url pattern. 10 | nodes: lists the nodes available for each service. 11 | users: lists the user records for each service, along with their 12 | metadata and current node assignment. 13 | 14 | """ 15 | 16 | from sqlalchemy.ext.declarative import declared_attr 17 | from sqlalchemy import Column, Integer, String, BigInteger, Index 18 | 19 | 20 | bases = {} 21 | 22 | 23 | def _add(name, base): 24 | bases[name] = base 25 | 26 | 27 | def get_cls(name, base_cls): 28 | if name in base_cls.metadata.tables: 29 | return base_cls.metadata.tables[name] 30 | 31 | args = {'__tablename__': name} 32 | base = bases[name] 33 | return type(name, (base, base_cls), args).__table__ 34 | 35 | 36 | class _UsersBase(object): 37 | """This table associates email addresses with services via integer uids. 38 | 39 | A user is uniquely identified by their email. For each service they have 40 | a uid, an allocated node, and last-seen generation and client-state values. 41 | Rows are timestamped for easy cleanup of old records. 42 | """ 43 | uid = Column(BigInteger(), primary_key=True, autoincrement=True, 44 | nullable=False) 45 | service = Column(Integer(), nullable=False) 46 | email = Column(String(255), nullable=False) 47 | generation = Column(BigInteger(), nullable=False) 48 | client_state = Column(String(32), nullable=False) 49 | created_at = Column(BigInteger(), nullable=False) 50 | replaced_at = Column(BigInteger(), nullable=True) 51 | nodeid = Column(BigInteger(), nullable=False) 52 | keys_changed_at = Column(BigInteger(), nullable=True) 53 | 54 | @declared_attr 55 | def __table_args__(cls): 56 | return ( 57 | # Index used to slurp in all records for a (service, email) 58 | # pair, sorted by creation time. 59 | Index('lookup_idx', 'email', 'service', 'created_at'), 60 | # Index used for purging user_records that have been replaced. 61 | Index('replaced_at_idx', 'service', 'replaced_at'), 62 | # Index used for looking up all assignments on a node. 63 | Index('node_idx', 'nodeid'), 64 | {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} 65 | ) 66 | 67 | 68 | _add('users', _UsersBase) 69 | 70 | 71 | class _ServicesBase(object): 72 | """This table lists all the available services and their endpoint patterns. 73 | 74 | Service names are expected to be "{app_name}-{app_version}" for example 75 | "sync-1.5". Endpoint patterns can use python formatting options on the 76 | keys {uid}, {node} and {service}. 77 | 78 | Having a table for these means that we can internally refer to each service 79 | by an integer key, which helps when indexing by service. 80 | """ 81 | id = Column(Integer(), primary_key=True, autoincrement=True, 82 | nullable=False) 83 | service = Column(String(30), unique=True) 84 | pattern = Column(String(128)) 85 | 86 | 87 | _add('services', _ServicesBase) 88 | 89 | 90 | class _NodesBase(object): 91 | """This table keeps tracks of all nodes available per service 92 | 93 | Each node has a root URL as well as metadata about its current availability 94 | and capacity. 95 | """ 96 | id = Column(BigInteger(), primary_key=True, autoincrement=True, 97 | nullable=False) 98 | service = Column(Integer(), nullable=False) 99 | node = Column(String(64), nullable=False) 100 | # The number of free slots currently available on this node. 101 | available = Column(Integer, default=0, nullable=False) 102 | # The number of users current assigned to this node. 103 | current_load = Column(Integer, default=0, nullable=False) 104 | # The maximum number of users that can be assigned to this node. 105 | capacity = Column(Integer, default=0, nullable=False) 106 | # Whether the node is currently in service. 107 | downed = Column(Integer, default=0, nullable=False) 108 | backoff = Column(Integer, default=0, nullable=False) 109 | 110 | @declared_attr 111 | def __table_args__(cls): 112 | return ( 113 | Index('unique_idx', 'service', 'node', unique=True), 114 | {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} 115 | ) 116 | 117 | 118 | _add('nodes', _NodesBase) 119 | 120 | 121 | class _SettingsBase(object): 122 | """This table holds dynamic setting values for operations. 123 | 124 | Really, only useful for longer lived SQL nodes right now. 125 | 126 | """ 127 | setting = Column(String(100), primary_key=True, nullable=False) 128 | value = Column(String(255), nullable=False) 129 | description = Column(String(255), nullable=True) 130 | 131 | 132 | _add('dynamic_settings', _SettingsBase) 133 | -------------------------------------------------------------------------------- /tokenserver/assignment/sqlnode/sqliteschemas.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | Table schema for Sqlite 6 | """ 7 | from tokenserver.assignment.sqlnode.schemas import (_UsersBase, 8 | _NodesBase, 9 | _SettingsBase, 10 | Integer, 11 | Column, 12 | String, 13 | _add, 14 | declared_attr) 15 | 16 | from tokenserver.assignment.sqlnode.schemas import get_cls # NOQA 17 | 18 | 19 | __all__ = (get_cls,) 20 | 21 | 22 | class _SQLITENodesBase(_NodesBase): 23 | id = Column(Integer, primary_key=True) 24 | 25 | @declared_attr 26 | def __table_args__(cls): 27 | return () 28 | 29 | 30 | _add('nodes', _SQLITENodesBase) 31 | 32 | 33 | class _SQLITEUsersBase(_UsersBase): 34 | uid = Column(Integer, primary_key=True) 35 | 36 | @declared_attr 37 | def __table_args__(cls): 38 | return () 39 | 40 | 41 | _add('users', _SQLITEUsersBase) 42 | 43 | 44 | class _SQLITESettingsBase(_SettingsBase): 45 | setting = Column(String(100), primary_key=True) 46 | 47 | @declared_attr 48 | def __table_args__(cls): 49 | return() 50 | 51 | 52 | _add('dynamic_settings', _SQLITESettingsBase) 53 | -------------------------------------------------------------------------------- /tokenserver/run.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | # -*- coding: utf8 -*- 3 | # This Source Code Form is subject to the terms of the Mozilla Public 4 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 | # You can obtain one at http://mozilla.org/MPL/2.0/. 6 | """ 7 | Runs the Application. This script can be called by any wsgi runner that looks 8 | for an 'application' variable 9 | """ 10 | import os 11 | from logging.config import fileConfig 12 | from ConfigParser import NoSectionError 13 | 14 | from tokenserver.util import find_config_file 15 | 16 | # setting up the egg cache to a place where apache can write 17 | os.environ['PYTHON_EGG_CACHE'] = '/tmp/python-eggs' 18 | 19 | # setting up logging 20 | ini_file = find_config_file() 21 | try: 22 | fileConfig(ini_file) 23 | except NoSectionError: 24 | pass 25 | 26 | # running the app using Paste 27 | from paste.deploy import loadapp 28 | 29 | application = loadapp('config:%s' % ini_file) 30 | -------------------------------------------------------------------------------- /tokenserver/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Admin/managment scripts for TokenServer. 7 | 8 | """ 9 | 10 | import sys 11 | import logging 12 | 13 | import tokenserver 14 | 15 | 16 | def run_script(main): 17 | """Simple wrapper for running scripts in __main__ section.""" 18 | try: 19 | exitcode = main() 20 | except KeyboardInterrupt: 21 | exitcode = 1 22 | sys.exit(exitcode) 23 | 24 | 25 | def load_configurator(config_file): 26 | """Load a TokenServer configurator object from the given config file. 27 | 28 | This is a lightweight wrapper around tokenserver.get_configurator(), 29 | which adds some configuration tweaks for running in a script instead of 30 | as an long-running application. 31 | """ 32 | config = tokenserver.get_configurator({"__file__": config_file}) 33 | config.include(tokenserver) 34 | config.commit() 35 | return config 36 | 37 | 38 | def configure_script_logging(opts=None): 39 | """Configure stdlib logging to produce output from the script. 40 | 41 | This basically configures logging to send messages to stderr, with 42 | formatting that's more for human readability than machine parsing. 43 | It also takes care of the --verbosity command-line option. 44 | """ 45 | if not opts or not opts.verbosity: 46 | loglevel = logging.WARNING 47 | elif opts.verbosity == 1: 48 | loglevel = logging.INFO 49 | else: 50 | loglevel = logging.DEBUG 51 | 52 | handler = logging.StreamHandler() 53 | handler.setFormatter(logging.Formatter("%(message)s")) 54 | handler.setLevel(loglevel) 55 | logger = logging.getLogger("") 56 | logger.addHandler(handler) 57 | logger.setLevel(loglevel) 58 | -------------------------------------------------------------------------------- /tokenserver/scripts/add_node.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to add a new node to the system. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then adds the named node to the system. 10 | 11 | """ 12 | 13 | import os 14 | import logging 15 | import optparse 16 | 17 | import tokenserver.scripts 18 | from tokenserver.assignment import INodeAssignment 19 | 20 | 21 | logger = logging.getLogger("tokenserver.scripts.add_node") 22 | 23 | 24 | def add_node(config_file, service, node, capacity, **kwds): 25 | """Add the specific node to the system.""" 26 | logger.info("Adding node %s to service %s", node, service) 27 | logger.debug("Using config file %r", config_file) 28 | config = tokenserver.scripts.load_configurator(config_file) 29 | config.begin() 30 | try: 31 | backend = config.registry.getUtility(INodeAssignment) 32 | backend.add_node(service, node, capacity, **kwds) 33 | except Exception: 34 | logger.exception("Error while adding node") 35 | return False 36 | else: 37 | logger.info("Finished adding node %s", node) 38 | return True 39 | finally: 40 | config.end() 41 | 42 | 43 | def main(args=None): 44 | """Main entry-point for running this script. 45 | 46 | This function parses command-line arguments and passes them on 47 | to the add_node() function. 48 | """ 49 | usage = "usage: %prog [options] config_file service node_name capacity" 50 | descr = "Add a new node to the tokenserver database" 51 | parser = optparse.OptionParser(usage=usage, description=descr) 52 | parser.add_option("", "--available", type="int", 53 | help="How many user slots the node has available") 54 | parser.add_option("", "--current-load", type="int", 55 | help="How many user slots the node has occupied") 56 | parser.add_option("", "--downed", action="store_true", 57 | help="Mark the node as down in the db") 58 | parser.add_option("", "--backoff", action="store_true", 59 | help="Mark the node as backed-off in the db") 60 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 61 | help="Control verbosity of log messages") 62 | 63 | opts, args = parser.parse_args(args) 64 | if len(args) != 4: 65 | parser.print_usage() 66 | return 1 67 | 68 | tokenserver.scripts.configure_script_logging(opts) 69 | 70 | config_file = os.path.abspath(args[0]) 71 | service = args[1] 72 | node_name = args[2] 73 | capacity = int(args[3]) 74 | 75 | kwds = {} 76 | if opts.available is not None: 77 | kwds["available"] = opts.available 78 | if opts.current_load is not None: 79 | kwds["current_load"] = opts.current_load 80 | if opts.backoff is not None: 81 | kwds["backoff"] = opts.backoff 82 | if opts.downed is not None: 83 | kwds["downed"] = opts.downed 84 | 85 | add_node(config_file, service, node_name, capacity, **kwds) 86 | return 0 87 | 88 | 89 | if __name__ == "__main__": 90 | tokenserver.scripts.run_script(main) 91 | -------------------------------------------------------------------------------- /tokenserver/scripts/allocate_user.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to allocate a specific user to a node. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then allocates the specified user to a node. A particular node 10 | may be specified, or the best available node used by default. 11 | 12 | The allocated node is printed to stdout. 13 | 14 | """ 15 | 16 | import os 17 | import logging 18 | import optparse 19 | 20 | import tokenserver.scripts 21 | from tokenserver.assignment import INodeAssignment 22 | 23 | 24 | logger = logging.getLogger("tokenserver.scripts.allocate_user") 25 | 26 | 27 | def allocate_user(config, service, email, node=None): 28 | logger.info("Allocating node for user %s", email) 29 | config.begin() 30 | try: 31 | backend = config.registry.getUtility(INodeAssignment) 32 | user = backend.get_user(service, email) 33 | if user is None: 34 | user = backend.allocate_user(service, email, node=node) 35 | else: 36 | backend.update_user(service, user, node=node) 37 | print user["node"] 38 | except Exception: 39 | logger.exception("Error while updating node") 40 | return False 41 | else: 42 | logger.info("Finished updating node %s", node) 43 | return True 44 | finally: 45 | config.end() 46 | 47 | 48 | def main(args=None): 49 | """Main entry-point for running this script. 50 | 51 | This function parses command-line arguments and passes them on 52 | to the allocate_user() function. 53 | """ 54 | usage = "usage: %prog [options] config_file service email [node_name]" 55 | descr = "Allocate a user to a node. You may specify a particular node, "\ 56 | "or omit to use the best available node." 57 | parser = optparse.OptionParser(usage=usage, description=descr) 58 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 59 | help="Control verbosity of log messages") 60 | 61 | opts, args = parser.parse_args(args) 62 | if not 3 <= len(args) <= 4: 63 | parser.print_usage() 64 | return 1 65 | 66 | tokenserver.scripts.configure_script_logging(opts) 67 | 68 | config_file = os.path.abspath(args[0]) 69 | logger.debug("Using config file %r", config_file) 70 | config = tokenserver.scripts.load_configurator(config_file) 71 | 72 | service = args[1] 73 | email = args[2] 74 | if len(args) == 3: 75 | node_name = None 76 | else: 77 | node_name = args[3] 78 | 79 | allocate_user(config, service, email, node_name) 80 | return 0 81 | 82 | 83 | if __name__ == "__main__": 84 | tokenserver.scripts.run_script(main) 85 | -------------------------------------------------------------------------------- /tokenserver/scripts/count_users.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to emit total-user-count metrics for exec dashboard. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then outputs the reported user count. 10 | 11 | """ 12 | 13 | import os 14 | import sys 15 | import time 16 | import json 17 | import socket 18 | import optparse 19 | from datetime import datetime, timedelta, tzinfo 20 | 21 | from tokenserver.assignment import INodeAssignment 22 | import tokenserver.scripts 23 | 24 | import logging 25 | logger = logging.getLogger("tokenserver.scripts.count_users") 26 | 27 | ZERO = timedelta(0) 28 | 29 | 30 | class UTC(tzinfo): 31 | 32 | def utcoffset(self, dt): 33 | return ZERO 34 | 35 | def tzname(self, dt): 36 | return "UTC" 37 | 38 | def dst(self, dt): 39 | return ZERO 40 | 41 | 42 | utc = UTC() 43 | 44 | 45 | def count_users(config_file, outfile, timestamp=None): 46 | logger.debug("Using config file %r", config_file) 47 | config = tokenserver.scripts.load_configurator(config_file) 48 | config.begin() 49 | try: 50 | if timestamp is None: 51 | ts = time.gmtime() 52 | midnight = (ts[0], ts[1], ts[2], 0, 0, 0, ts[6], ts[7], ts[8]) 53 | timestamp = int(time.mktime(midnight)) * 1000 54 | backend = config.registry.getUtility(INodeAssignment) 55 | logger.debug("Counting users created before %i", timestamp) 56 | count = backend.count_users(timestamp) 57 | logger.debug("Found %d users", count) 58 | # Output has heka-filter-compatible JSON object. 59 | ts_sec = timestamp / 1000 60 | output = { 61 | "hostname": socket.gethostname(), 62 | "pid": os.getpid(), 63 | "op": "sync_count_users", 64 | "total_users": count, 65 | "time": datetime.fromtimestamp(ts_sec, utc).isoformat(), 66 | "v": 0 67 | } 68 | json.dump(output, outfile) 69 | outfile.write("\n") 70 | finally: 71 | config.end() 72 | 73 | 74 | def main(args=None): 75 | """Main entry-point for running this script. 76 | 77 | This function parses command-line arguments and passes them on 78 | to the add_node() function. 79 | """ 80 | usage = "usage: %prog [options] config_file" 81 | descr = "Count total users in the tokenserver database" 82 | parser = optparse.OptionParser(usage=usage, description=descr) 83 | parser.add_option("-t", "--timestamp", type="int", 84 | help="Max creation timestamp; default previous midnight") 85 | parser.add_option("-o", "--output", 86 | help="Output file; default stderr") 87 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 88 | help="Control verbosity of log messages") 89 | 90 | opts, args = parser.parse_args(args) 91 | if len(args) != 1: 92 | parser.print_usage() 93 | return 1 94 | 95 | tokenserver.scripts.configure_script_logging(opts) 96 | 97 | config_file = os.path.abspath(args[0]) 98 | if opts.output in (None, "-"): 99 | count_users(config_file, sys.stdout, opts.timestamp) 100 | else: 101 | with open(opts.output, "a") as outfile: 102 | count_users(config_file, outfile, opts.timestamp) 103 | 104 | return 0 105 | 106 | 107 | if __name__ == "__main__": 108 | tokenserver.scripts.run_script(main) 109 | -------------------------------------------------------------------------------- /tokenserver/scripts/process_account_events.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to process account-related events from an SQS queue. 7 | 8 | This script polls an SQS queue for events indicating activity on an upstream 9 | account, as documented here: 10 | 11 | https://github.com/mozilla/fxa-auth-server/blob/master/docs/service_notifications.md 12 | 13 | The following event types are currently supported: 14 | 15 | * "delete": the account was deleted; we mark their records as retired 16 | so they'll be cleaned up by our garbage-collection process. 17 | 18 | * "reset": the account password was reset; we update our copy of their 19 | generation number to disconnect other devices. 20 | 21 | * "passwordChange": the account password was changed; we update our copy 22 | of their generation number to disconnect other devices. 23 | 24 | Note that this is a purely optional administrative task, highly specific to 25 | Mozilla's internal Firefox-Accounts-supported deployment. 26 | 27 | """ 28 | 29 | import os 30 | import json 31 | import logging 32 | import optparse 33 | 34 | import boto 35 | import boto.ec2 36 | import boto.sqs 37 | import boto.sqs.message 38 | import boto.utils 39 | 40 | import tokenserver.scripts 41 | from tokenserver.assignment import INodeAssignment 42 | 43 | 44 | logger = logging.getLogger("tokenserver.scripts.process_account_deletions") 45 | 46 | 47 | def process_account_events(config_file, queue_name, aws_region=None, 48 | queue_wait_time=20): 49 | """Process account events from an SQS queue. 50 | 51 | This function polls the specified SQS queue for account-realted events, 52 | processing each as it is found. It polls indefinitely and does not return; 53 | to interrupt execution you'll need to e.g. SIGINT the process. 54 | """ 55 | logger.info("Processing account events from %s", queue_name) 56 | logger.debug("Using config file %r", config_file) 57 | config = tokenserver.scripts.load_configurator(config_file) 58 | config.begin() 59 | try: 60 | # Connect to the SQS queue. 61 | # If no region is given, infer it from the instance metadata. 62 | if aws_region is None: 63 | logger.debug("Finding default region from instance metadata") 64 | aws_info = boto.utils.get_instance_metadata() 65 | aws_region = aws_info["placement"]["availability-zone"][:-1] 66 | logger.debug("Connecting to queue %r in %r", queue_name, aws_region) 67 | conn = boto.sqs.connect_to_region(aws_region) 68 | queue = conn.get_queue(queue_name) 69 | # We must force boto not to b64-decode the message contents, ugh. 70 | queue.set_message_class(boto.sqs.message.RawMessage) 71 | # Poll for messages indefinitely. 72 | while True: 73 | msg = queue.read(wait_time_seconds=queue_wait_time) 74 | if msg is None: 75 | continue 76 | process_account_event(config, msg.get_body()) 77 | # This intentionally deletes the event even if it was some 78 | # unrecognized type. Not point leaving a backlog. 79 | queue.delete_message(msg) 80 | except Exception: 81 | logger.exception("Error while processing account events") 82 | raise 83 | finally: 84 | config.end() 85 | 86 | 87 | def process_account_event(config, body): 88 | """Parse and process a single account event.""" 89 | backend = config.registry.getUtility(INodeAssignment) 90 | # Try very hard not to error out if there's junk in the queue. 91 | email = None 92 | event_type = None 93 | generation = None 94 | try: 95 | body = json.loads(body) 96 | event = json.loads(body['Message']) 97 | event_type = event["event"] 98 | uid = event["uid"] 99 | # Older versions of the fxa-auth-server would send an email-like 100 | # identifier the "uid" field, but that doesn't make sense for any 101 | # relier other than tokenserver. Newer versions send just the raw uid 102 | # in the "uid" field, and include the domain in a separate "iss" field. 103 | if "iss" in event: 104 | email = "%s@%s" % (uid, event["iss"]) 105 | else: 106 | if "@" not in uid: 107 | raise ValueError("uid field does not contain issuer info") 108 | email = uid 109 | if event_type in ("reset", "passwordChange",): 110 | generation = event["generation"] 111 | except (ValueError, KeyError), e: 112 | logger.exception("Invalid account message: %s", e) 113 | else: 114 | if email is not None: 115 | if event_type == "delete": 116 | # Mark the user as retired. 117 | # Actual cleanup is done by a separate process. 118 | logger.info("Processing account delete for %r", email) 119 | backend.retire_user(email) 120 | elif event_type == "reset": 121 | logger.info("Processing account reset for %r", email) 122 | update_generation_number(config, backend, email, generation) 123 | elif event_type == "passwordChange": 124 | logger.info("Processing password change for %r", email) 125 | update_generation_number(config, backend, email, generation) 126 | else: 127 | logger.warning("Dropping unknown event type %r", 128 | event_type) 129 | 130 | 131 | def update_generation_number(config, backend, email, generation): 132 | """Update the maximum recorded generation number for the given user. 133 | 134 | When the FxA server sends us an update to the user's generation 135 | number, we want to update our high-water-mark in the DB in order to 136 | immediately lock out disconnected devices. However, since we don't 137 | know the new value of X-Client-State that goes with it, we can't just 138 | record the new generation number in the DB. If we did, the first 139 | device that tried to sync with the new generation number would appear 140 | to have an incorrect X-Client-State value, and would be rejected. 141 | 142 | Instead, we take advantage of the fact that it's a timestamp, and write 143 | it into the DB at one millisecond less than its current value. This 144 | ensures that we lock out any devices with an older generation number 145 | while avoiding errors with X-Client-State handling. 146 | 147 | This does leave a tiny edge-case where we can fail to lock out older 148 | devices, if the generation number changes twice in less than a 149 | millisecond. This is acceptably unlikely in practice, and we'll recover 150 | as soon as we see an updated generation number as part of a sync. 151 | """ 152 | patterns = config.registry['endpoints_patterns'] 153 | for service in patterns: 154 | logger.debug("Recording generation change for service: %s", 155 | service) 156 | user = backend.get_user(service, email) 157 | if user is not None: 158 | backend.update_user(service, user, generation - 1) 159 | 160 | 161 | def main(args=None): 162 | """Main entry-point for running this script. 163 | 164 | This function parses command-line arguments and passes them on 165 | to the process_account_events() function. 166 | """ 167 | usage = "usage: %prog [options] config_file queue_name" 168 | parser = optparse.OptionParser(usage=usage) 169 | parser.add_option("", "--aws-region", 170 | help="aws region in which the queue can be found") 171 | parser.add_option("", "--queue-wait-time", type="int", default=20, 172 | help="Number of seconds to wait for jobs on the queue") 173 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 174 | help="Control verbosity of log messages") 175 | 176 | opts, args = parser.parse_args(args) 177 | if len(args) != 2: 178 | parser.print_usage() 179 | return 1 180 | 181 | tokenserver.scripts.configure_script_logging(opts) 182 | 183 | config_file = os.path.abspath(args[0]) 184 | queue_name = args[1] 185 | 186 | process_account_events(config_file, queue_name, 187 | opts.aws_region, opts.queue_wait_time) 188 | return 0 189 | 190 | 191 | if __name__ == "__main__": 192 | tokenserver.scripts.run_script(main) 193 | -------------------------------------------------------------------------------- /tokenserver/scripts/purge_old_records.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to purge user records that have been replaced. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then purges any obsolete user records from that backend. 10 | Obsolete records are those that have been replaced by a newer record for 11 | the same user. 12 | 13 | Note that this is a purely optional administrative task, since replaced records 14 | are handled internally by the assignment backend. But it should help reduce 15 | overheads, improve performance etc if run regularly. 16 | 17 | """ 18 | 19 | import os 20 | import time 21 | import random 22 | import logging 23 | import optparse 24 | 25 | import requests 26 | import tokenlib 27 | import hawkauthlib 28 | 29 | import tokenserver.scripts 30 | from tokenserver.assignment import INodeAssignment 31 | from tokenserver.util import format_key_id 32 | 33 | 34 | logger = logging.getLogger("tokenserver.scripts.purge_old_records") 35 | 36 | 37 | def purge_old_records(config_file, grace_period=-1, max_per_loop=10, 38 | max_offset=0, request_timeout=60, settings=None): 39 | """Purge old records from the assignment backend in the given config file. 40 | 41 | This function iterates through each storage backend in the given config 42 | file and calls its purge_expired_items() method. The result is a 43 | gradual pruning of expired items from each database. 44 | 45 | `max_offset` is used to select a random offset into the list of purgeable 46 | records. With multiple tasks running concurrently, this will provide each 47 | a (likely) different set of records to work on. A cheap, imperfect 48 | randomization. 49 | """ 50 | logger.info("Purging old user records") 51 | logger.debug("Using config file %r", config_file) 52 | config = tokenserver.scripts.load_configurator(config_file) 53 | config.begin() 54 | try: 55 | backend = config.registry.getUtility(INodeAssignment) 56 | patterns = config.registry['endpoints_patterns'] 57 | for service in patterns: 58 | previous_list = [] 59 | logger.debug("Purging old user records for service: %s", service) 60 | # Process batches of items, until we run out. 61 | while True: 62 | offset = random.randint(0, max_offset) 63 | kwds = { 64 | "grace_period": grace_period, 65 | "limit": max_per_loop, 66 | "offset": offset, 67 | } 68 | rows = list(backend.get_old_user_records(service, **kwds)) 69 | if not rows: 70 | logger.info("No more data for %s", service) 71 | break 72 | if rows == previous_list: 73 | raise Exception("Loop detected") 74 | previous_list = rows 75 | logger.info("Fetched %d rows at offset %d", len(rows), offset) 76 | counter = 0 77 | for row in rows: 78 | # Don't attempt to purge data from downed nodes. 79 | # Instead wait for them to either come back up or to be 80 | # completely removed from service. 81 | if row.node is None: 82 | logger.info("Deleting user record for uid %s on %s", 83 | row.uid, row.node) 84 | if settings and not settings.dryrun: 85 | backend.delete_user_record(service, row.uid) 86 | elif not row.downed: 87 | logger.info("Purging uid %s on %s", row.uid, row.node) 88 | delete_service_data(config, service, row, 89 | timeout=request_timeout, 90 | settings=settings) 91 | if settings and not settings.dryrun: 92 | backend.delete_user_record(service, row.uid) 93 | counter += 1 94 | elif row.downed and settings and settings.force: 95 | logger.info( 96 | "Forcing tokenserver record delete: {}".format( 97 | row.uid 98 | ) 99 | ) 100 | if not settings.dryrun: 101 | backend.delete_user_record(service, row.uid) 102 | counter += 1 103 | if settings and settings.max_records: 104 | if counter >= settings.max_records: 105 | logger.info("Reached max_records, exiting") 106 | return True 107 | if len(rows) < max_per_loop: 108 | break 109 | except Exception as e: 110 | logger.exception("Error while purging old user records: {}".format(e)) 111 | return False 112 | else: 113 | logger.info("Finished purging old user records") 114 | return True 115 | finally: 116 | config.end() 117 | 118 | 119 | def delete_service_data(config, service, user, timeout=60, settings=None): 120 | """Send a data-deletion request to the user's service node. 121 | 122 | This is a little bit of hackery to cause the user's service node to 123 | remove any data it still has stored for the user. We simulate a DELETE 124 | request from the user's own account. 125 | """ 126 | secrets = config.registry.settings['tokenserver.secrets'] 127 | pattern = config.registry['endpoints_patterns'][service] 128 | node_secrets = secrets.get(user.node) 129 | if not node_secrets: 130 | msg = "The node %r does not have any shared secret" % (user.node,) 131 | raise ValueError(msg) 132 | token = tokenlib.make_token({ 133 | "uid": user.uid, 134 | "node": user.node, 135 | "fxa_uid": user.email.split("@", 1)[0], 136 | "fxa_kid": format_key_id( 137 | user.keys_changed_at or user.generation, 138 | user.client_state.decode('hex') 139 | ), 140 | }, secret=node_secrets[-1]) 141 | secret = tokenlib.get_derived_secret(token, secret=node_secrets[-1]) 142 | endpoint = pattern.format(uid=user.uid, service=service, node=user.node) 143 | auth = HawkAuth(token, secret) 144 | if settings and settings.dryrun: 145 | return 146 | resp = requests.delete(endpoint, auth=auth, timeout=timeout) 147 | if resp.status_code >= 400 and resp.status_code != 404: 148 | resp.raise_for_status() 149 | 150 | 151 | class HawkAuth(requests.auth.AuthBase): 152 | """Hawk-signing auth helper class.""" 153 | 154 | def __init__(self, token, secret): 155 | self.token = token 156 | self.secret = secret 157 | 158 | def __call__(self, req): 159 | hawkauthlib.sign_request(req, self.token, self.secret) 160 | return req 161 | 162 | 163 | def main(args=None): 164 | """Main entry-point for running this script. 165 | 166 | This function parses command-line arguments and passes them on 167 | to the purge_old_records() function. 168 | """ 169 | usage = "usage: %prog [options] config_file" 170 | parser = optparse.OptionParser(usage=usage) 171 | parser.add_option("", "--purge-interval", type="int", default=3600, 172 | help="Interval to sleep between purging runs") 173 | parser.add_option("", "--grace-period", type="int", default=86400, 174 | help="Number of seconds grace to allow on replacement") 175 | parser.add_option("", "--max-per-loop", type="int", default=10, 176 | help="Maximum number of items to fetch in one go") 177 | # N.B., if the number of purgeable rows is <<< max_offset then most 178 | # selects will return zero rows. Choose this value accordingly. 179 | parser.add_option("", "--max-offset", type="int", default=0, 180 | help="Use random offset from 0 to max_offset") 181 | parser.add_option("", "--request-timeout", type="int", default=60, 182 | help="Timeout for service deletion requests") 183 | parser.add_option("", "--oneshot", action="store_true", 184 | help="Do a single purge run and then exit") 185 | parser.add_option("", "--max-records", type="int", default=0, 186 | help="Max records to delete") 187 | parser.add_option("", "--dryrun", action="store_true", 188 | help="Don't do destructive things") 189 | parser.add_option("", "--force", action="store_true", 190 | help="force record to be deleted from TS db," 191 | " even if node is down") 192 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 193 | help="Control verbosity of log messages") 194 | 195 | opts, args = parser.parse_args(args) 196 | if len(args) != 1: 197 | parser.print_usage() 198 | return 1 199 | 200 | tokenserver.scripts.configure_script_logging(opts) 201 | 202 | config_file = os.path.abspath(args[0]) 203 | 204 | purge_old_records(config_file, 205 | grace_period=opts.grace_period, 206 | max_per_loop=opts.max_per_loop, 207 | max_offset=opts.max_offset, 208 | request_timeout=opts.request_timeout, 209 | settings=opts) 210 | if not opts.oneshot: 211 | while True: 212 | # Randomize sleep interval +/- thirty percent to desynchronize 213 | # instances of this script running on multiple webheads. 214 | sleep_time = opts.purge_interval 215 | sleep_time += random.randint(-0.3 * sleep_time, 0.3 * sleep_time) 216 | logger.debug("Sleeping for %d seconds", sleep_time) 217 | time.sleep(sleep_time) 218 | purge_old_records(config_file, 219 | grace_period=opts.grace_period, 220 | max_per_loop=opts.max_per_loop, 221 | max_offset=opts.max_offset, 222 | request_timeout=opts.request_timeout, 223 | settings=opts) 224 | return 0 225 | 226 | 227 | if __name__ == "__main__": 228 | tokenserver.scripts.run_script(main) 229 | -------------------------------------------------------------------------------- /tokenserver/scripts/remove_node.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to remove a node from the system. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then nukes any references to the named node - it is removed from 10 | the "nodes" table and any users currently assigned to that node have their 11 | assignments cleared. 12 | 13 | """ 14 | 15 | import os 16 | import logging 17 | import optparse 18 | 19 | import tokenserver.scripts 20 | from tokenserver.assignment import INodeAssignment 21 | 22 | 23 | logger = logging.getLogger("tokenserver.scripts.remove_node") 24 | 25 | 26 | def remove_node(config_file, node): 27 | """Remove the named node from the system.""" 28 | logger.info("Removing node %s", node) 29 | logger.debug("Using config file %r", config_file) 30 | config = tokenserver.scripts.load_configurator(config_file) 31 | config.begin() 32 | try: 33 | backend = config.registry.getUtility(INodeAssignment) 34 | patterns = config.registry['endpoints_patterns'] 35 | found = False 36 | for service in patterns: 37 | logger.debug("Removing node for service: %s", service) 38 | try: 39 | backend.remove_node(service, node) 40 | except ValueError: 41 | logger.debug(" not found") 42 | else: 43 | found = True 44 | logger.debug(" removed") 45 | except Exception: 46 | logger.exception("Error while removing node") 47 | return False 48 | else: 49 | if not found: 50 | logger.info("Node %s was not found", node) 51 | else: 52 | logger.info("Finished removing node %s", node) 53 | return True 54 | finally: 55 | config.end() 56 | 57 | 58 | def main(args=None): 59 | """Main entry-point for running this script. 60 | 61 | This function parses command-line arguments and passes them on 62 | to the remove_node() function. 63 | """ 64 | usage = "usage: %prog [options] config_file node_name" 65 | descr = "Remove a node from the tokenserver database" 66 | parser = optparse.OptionParser(usage=usage, description=descr) 67 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 68 | help="Control verbosity of log messages") 69 | 70 | opts, args = parser.parse_args(args) 71 | if len(args) != 2: 72 | parser.print_usage() 73 | return 1 74 | 75 | tokenserver.scripts.configure_script_logging(opts) 76 | 77 | config_file = os.path.abspath(args[0]) 78 | node_name = args[1] 79 | 80 | remove_node(config_file, node_name) 81 | return 0 82 | 83 | 84 | if __name__ == "__main__": 85 | tokenserver.scripts.run_script(main) 86 | -------------------------------------------------------------------------------- /tokenserver/scripts/unassign_node.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to remove a node from the system. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then clears any assignments to the named node. 10 | 11 | """ 12 | 13 | import os 14 | import logging 15 | import optparse 16 | 17 | import tokenserver.scripts 18 | from tokenserver.assignment import INodeAssignment 19 | 20 | 21 | logger = logging.getLogger("tokenserver.scripts.unassign_node") 22 | 23 | 24 | def unassign_node(config_file, node): 25 | """Clear any assignments to the named node.""" 26 | logger.info("Unassignment node %s", node) 27 | logger.debug("Using config file %r", config_file) 28 | config = tokenserver.scripts.load_configurator(config_file) 29 | config.begin() 30 | try: 31 | backend = config.registry.getUtility(INodeAssignment) 32 | patterns = config.registry['endpoints_patterns'] 33 | found = False 34 | for service in patterns: 35 | logger.debug("Unassigning node for service: %s", service) 36 | try: 37 | backend.unassign_node(service, node) 38 | except ValueError: 39 | logger.debug(" not found") 40 | else: 41 | found = True 42 | logger.debug(" unassigned") 43 | except Exception: 44 | logger.exception("Error while unassigning node") 45 | return False 46 | else: 47 | if not found: 48 | logger.info("Node %s was not found", node) 49 | else: 50 | logger.info("Finished unassigning node %s", node) 51 | return True 52 | finally: 53 | config.end() 54 | 55 | 56 | def main(args=None): 57 | """Main entry-point for running this script. 58 | 59 | This function parses command-line arguments and passes them on 60 | to the unassign_node() function. 61 | """ 62 | usage = "usage: %prog [options] config_file node_name" 63 | descr = "Clear all assignments to node in the tokenserver database" 64 | parser = optparse.OptionParser(usage=usage, description=descr) 65 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 66 | help="Control verbosity of log messages") 67 | 68 | opts, args = parser.parse_args(args) 69 | if len(args) != 2: 70 | parser.print_usage() 71 | return 1 72 | 73 | tokenserver.scripts.configure_script_logging(opts) 74 | 75 | config_file = os.path.abspath(args[0]) 76 | node_name = args[1] 77 | 78 | unassign_node(config_file, node_name) 79 | return 0 80 | 81 | 82 | if __name__ == "__main__": 83 | tokenserver.scripts.run_script(main) 84 | -------------------------------------------------------------------------------- /tokenserver/scripts/update_node.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | """ 5 | 6 | Script to update node status in the db. 7 | 8 | This script takes a tokenserver config file, uses it to load the assignment 9 | backend, and then writes the updated node status into the db. 10 | 11 | """ 12 | 13 | import os 14 | import logging 15 | import optparse 16 | 17 | import tokenserver.scripts 18 | from tokenserver.assignment import INodeAssignment 19 | 20 | 21 | logger = logging.getLogger("tokenserver.scripts.update_node") 22 | 23 | 24 | def update_node(config_file, service, node, **kwds): 25 | """Update details of a node.""" 26 | logger.info("Updating node %s for service %s", node, service) 27 | logger.debug("Value: %r", kwds) 28 | logger.debug("Using config file %r", config_file) 29 | config = tokenserver.scripts.load_configurator(config_file) 30 | config.begin() 31 | try: 32 | backend = config.registry.getUtility(INodeAssignment) 33 | backend.update_node(service, node, **kwds) 34 | except Exception: 35 | logger.exception("Error while updating node") 36 | return False 37 | else: 38 | logger.info("Finished updating node %s", node) 39 | return True 40 | finally: 41 | config.end() 42 | 43 | 44 | def main(args=None): 45 | """Main entry-point for running this script. 46 | 47 | This function parses command-line arguments and passes them on 48 | to the update_node() function. 49 | """ 50 | usage = "usage: %prog [options] config_file service node_name" 51 | descr = "Update node details in the tokenserver database" 52 | parser = optparse.OptionParser(usage=usage, description=descr) 53 | parser.add_option("", "--capacity", type="int", 54 | help="How many user slots the node has overall") 55 | parser.add_option("", "--available", type="int", 56 | help="How many user slots the node has available") 57 | parser.add_option("", "--current-load", type="int", 58 | help="How many user slots the node has occupied") 59 | parser.add_option("", "--downed", action="store_true", 60 | help="Mark the node as down in the db") 61 | parser.add_option("", "--backoff", action="store_true", 62 | help="Mark the node as backed-off in the db") 63 | parser.add_option("-v", "--verbose", action="count", dest="verbosity", 64 | help="Control verbosity of log messages") 65 | 66 | opts, args = parser.parse_args(args) 67 | if len(args) != 3: 68 | parser.print_usage() 69 | return 1 70 | 71 | tokenserver.scripts.configure_script_logging(opts) 72 | 73 | config_file = os.path.abspath(args[0]) 74 | service = args[1] 75 | node_name = args[2] 76 | 77 | kwds = {} 78 | if opts.capacity is not None: 79 | kwds["capacity"] = opts.capacity 80 | if opts.available is not None: 81 | kwds["available"] = opts.available 82 | if opts.current_load is not None: 83 | kwds["current_load"] = opts.current_load 84 | if opts.backoff is not None: 85 | kwds["backoff"] = opts.backoff 86 | if opts.downed is not None: 87 | kwds["downed"] = opts.downed 88 | 89 | update_node(config_file, service, node_name, **kwds) 90 | return 0 91 | 92 | 93 | if __name__ == "__main__": 94 | tokenserver.scripts.run_script(main) 95 | -------------------------------------------------------------------------------- /tokenserver/tests/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | if "MOZSVC_SQLURI" not in os.environ: 4 | os.environ["MOZSVC_SQLURI"] = "sqlite:////tmp/tokenserver.db" 5 | -------------------------------------------------------------------------------- /tokenserver/tests/assignment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/tokenserver/9128c9db356601696f91c6f1590253b44f99aafe/tokenserver/tests/assignment/__init__.py -------------------------------------------------------------------------------- /tokenserver/tests/secrets: -------------------------------------------------------------------------------- 1 | https://pxh32,12345:oqiwdhdoqwuhdwqouhwqdoiwqdhqwdih 2 | https://example.com,873675:2e197e218762e1g12e76e2187gjhg 3 | -------------------------------------------------------------------------------- /tokenserver/tests/secrets2: -------------------------------------------------------------------------------- 1 | https://pxh23234232,12345:oqiwdhdoqwuhdwqouhwqdoiwqdhqwdih 2 | https://example2.com,873675:2e197e218762e1g12e76e2187gjhg 3 | -------------------------------------------------------------------------------- /tokenserver/tests/support.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from browserid.verifiers.local import LocalVerifier 4 | from browserid.tests.support import (make_assertion, get_keypair) 5 | 6 | 7 | # very dummy verifier 8 | class DummyBrowserIdVerifier(LocalVerifier): 9 | def verify_certificate_chain(self, certs, *args, **kw): 10 | return certs[0] 11 | 12 | 13 | CERTS_LOCATION = os.path.join(os.path.dirname(__file__), 'certs') 14 | 15 | 16 | def load_key(hostname): 17 | return get_keypair(hostname)[1] 18 | 19 | 20 | def sign_data(hostname, data, key=None): 21 | # load the cert with the private key 22 | return load_key(hostname).sign(data) 23 | 24 | 25 | def get_assertion(email, audience="*", issuer='browserid.org', 26 | bad_issuer_cert=False, bad_email_cert=False, exp=None): 27 | """Creates a browserid assertion for the given email, audience and 28 | hostname. 29 | 30 | This function can also be used to create invalid assertions. This will be 31 | the case if you set the bad_issuer_cert or the bad_email cert arguments to 32 | True. 33 | """ 34 | kwargs = {'exp': exp} 35 | if bad_issuer_cert: 36 | kwargs['issuer_keypair'] =\ 37 | get_keypair(hostname="not-the-right-host.com") 38 | 39 | if bad_email_cert: 40 | kwargs['email_keypair'] =\ 41 | get_keypair(hostname="not-the-right-host.com") 42 | 43 | assertion = make_assertion(email, audience, issuer=issuer, **kwargs) 44 | return assertion.encode('ascii') 45 | -------------------------------------------------------------------------------- /tokenserver/tests/test_backend_sql.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from pyramid import testing 5 | from pyramid.threadlocal import get_current_registry 6 | from mozsvc.config import load_into_settings 7 | from mozsvc.plugin import load_and_register 8 | from sqlalchemy.exc import IntegrityError 9 | 10 | from tokenserver.assignment import INodeAssignment 11 | from tokenserver import load_endpoints 12 | 13 | 14 | class TestSQLBackend(unittest.TestCase): 15 | 16 | def setUp(self): 17 | super(TestSQLBackend, self).setUp() 18 | 19 | # get the options from the config 20 | self.config = testing.setUp() 21 | self.ini = os.path.join(os.path.dirname(__file__), 22 | 'test_sql.ini') 23 | settings = {} 24 | load_into_settings(self.ini, settings) 25 | self.config.add_settings(settings) 26 | 27 | # instantiate the backend to test 28 | self.config.include("tokenserver") 29 | load_and_register("tokenserver", self.config) 30 | self.backend = self.config.registry.getUtility(INodeAssignment) 31 | 32 | # adding a service and a node with 100 slots 33 | try: 34 | self.backend.add_service("sync-1.1", "{node}/1.1/{uid}") 35 | except IntegrityError: 36 | # ignore if the service was already added by another test. 37 | pass 38 | self.backend.add_node("sync-1.1", "https://phx12", 100) 39 | 40 | self._sqlite = self.backend._engine.driver == 'pysqlite' 41 | endpoints = {} 42 | load_endpoints(endpoints, self.config) 43 | get_current_registry()['endpoints_patterns'] = endpoints 44 | 45 | def tearDown(self): 46 | if self._sqlite: 47 | filename = self.backend.sqluri.split('sqlite://')[-1] 48 | if os.path.exists(filename): 49 | os.remove(filename) 50 | else: 51 | self.backend._safe_execute('delete from services') 52 | self.backend._safe_execute('delete from nodes') 53 | self.backend._safe_execute('delete from users') 54 | 55 | def test_get_node(self): 56 | user = self.backend.get_user("sync-1.1", "test1@example.com") 57 | self.assertEquals(user, None) 58 | 59 | user = self.backend.allocate_user("sync-1.1", "test1@example.com") 60 | self.assertEqual(user['email'], "test1@example.com") 61 | self.assertEqual(user['node'], "https://phx12") 62 | 63 | user = self.backend.get_user("sync-1.1", "test1@example.com") 64 | self.assertEqual(user['email'], "test1@example.com") 65 | self.assertEqual(user['node'], "https://phx12") 66 | 67 | def test_get_patterns(self): 68 | # patterns should have been populated 69 | patterns = get_current_registry()['endpoints_patterns'] 70 | self.assertDictEqual(patterns, {'sync-1.1': '{node}/1.1/{uid}'}) 71 | -------------------------------------------------------------------------------- /tokenserver/tests/test_local_browserid_verifier.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import unittest 6 | 7 | from pyramid.config import Configurator 8 | 9 | from tokenserver.verifiers import LocalBrowserIdVerifier, IBrowserIdVerifier 10 | from browserid.tests.support import (make_assertion, 11 | patched_supportdoc_fetching) 12 | import browserid.errors 13 | 14 | 15 | class mockobj(object): 16 | pass 17 | 18 | 19 | class TestLocalBrowserIdVerifier(unittest.TestCase): 20 | 21 | DEFAULT_SETTINGS = { # noqa; identation below is non-standard 22 | "tokenserver.backend": 23 | "tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend", 24 | "browserid.backend": 25 | "tokenserver.verifiers.LocalBrowserIdVerifier", 26 | "tokenserver.secrets.backend": 27 | "mozsvc.secrets.FixedSecrets", 28 | "tokenserver.secrets.secrets": 29 | "bruce-let-the-dogs-out", 30 | } 31 | 32 | def _make_config(self, settings={}): 33 | all_settings = self.DEFAULT_SETTINGS.copy() 34 | all_settings.update(settings) 35 | config = Configurator(settings=all_settings) 36 | config.include("tokenserver") 37 | config.commit() 38 | return config 39 | 40 | def test_verifier_config_loading_defaults(self): 41 | config = self._make_config() 42 | verifier = config.registry.getUtility(IBrowserIdVerifier) 43 | self.assertTrue(isinstance(verifier, LocalBrowserIdVerifier)) 44 | self.assertEquals(verifier.audiences, None) 45 | self.assertEquals(verifier.trusted_issuers, None) 46 | self.assertEquals(verifier.allowed_issuers, None) 47 | 48 | def test_verifier_config_loading_values(self): 49 | config = self._make_config({ # noqa; indentation below is non-standard 50 | "browserid.audiences": 51 | "https://testmytoken.com", 52 | "browserid.trusted_issuers": 53 | "example.com trustyidp.org", 54 | "browserid.allowed_issuers": 55 | "example.com trustyidp.org\nmockmyid.com", 56 | }) 57 | verifier = config.registry.getUtility(IBrowserIdVerifier) 58 | self.assertTrue(isinstance(verifier, LocalBrowserIdVerifier)) 59 | self.assertEquals(verifier.audiences, "https://testmytoken.com") 60 | self.assertEquals(verifier.trusted_issuers, 61 | ["example.com", "trustyidp.org"]) 62 | self.assertEquals(verifier.allowed_issuers, 63 | ["example.com", "trustyidp.org", "mockmyid.com"]) 64 | 65 | def test_verifier_rejects_unallowed_issuers(self): 66 | config = self._make_config({ # noqa; indentation below is non-standard 67 | "browserid.audiences": 68 | "https://testmytoken.com", 69 | "browserid.trusted_issuers": 70 | "accounts.firefox.com trustyidp.org", 71 | "browserid.allowed_issuers": 72 | "accounts.firefox.com mockmyid.com", 73 | }) 74 | with patched_supportdoc_fetching(): 75 | verifier = config.registry.getUtility(IBrowserIdVerifier) 76 | # The issuer is both trusted, and allowed. 77 | assertion = make_assertion(email="test@example.com", 78 | audience="https://testmytoken.com", 79 | issuer="accounts.firefox.com") 80 | self.assertEquals(verifier.verify(assertion)["email"], 81 | "test@example.com") 82 | # The issuer is allowed and is the primary. 83 | assertion = make_assertion(email="test@mockmyid.com", 84 | audience="https://testmytoken.com", 85 | issuer="mockmyid.com") 86 | self.assertEquals(verifier.verify(assertion)["email"], 87 | "test@mockmyid.com") 88 | # The issuer is allowed, but not trusted as a secondary. 89 | assertion = make_assertion(email="test@example.com", 90 | audience="https://testmytoken.com", 91 | issuer="mockmyid.com") 92 | with self.assertRaises(browserid.errors.InvalidSignatureError): 93 | verifier.verify(assertion) 94 | # The issuer is trsuted, but is not allowed. 95 | assertion = make_assertion(email="test@example.com", 96 | audience="https://testmytoken.com", 97 | issuer="trustyidp.org") 98 | with self.assertRaises(browserid.errors.InvalidIssuerError): 99 | verifier.verify(assertion) 100 | # The issuer is the primary, but is not allowed. 101 | assertion = make_assertion(email="test@example.com", 102 | audience="https://testmytoken.com", 103 | issuer="example.com") 104 | with self.assertRaises(browserid.errors.InvalidIssuerError): 105 | verifier.verify(assertion) 106 | # Various tests for string pattern-matching edgecases. 107 | # All of these are primaries, but not allowed. 108 | assertion = make_assertion(email="test@mockmyid.org", 109 | audience="https://testmytoken.com", 110 | issuer="mockmyid.org") 111 | with self.assertRaises(browserid.errors.InvalidIssuerError): 112 | verifier.verify(assertion) 113 | assertion = make_assertion(email="test@mockmyid.co", 114 | audience="https://testmytoken.com", 115 | issuer="mockmyid.co") 116 | with self.assertRaises(browserid.errors.InvalidIssuerError): 117 | verifier.verify(assertion) 118 | -------------------------------------------------------------------------------- /tokenserver/tests/test_memorynode.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = tokenserver 4 | debug = true 5 | 6 | # token server configuration 7 | [tokenserver] 8 | backend = tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend 9 | service_entry = https://example.com 10 | applications = sync-1.1, sync-1.5 11 | secrets_file = tokenserver/tests/secrets tokenserver/tests/secrets2 12 | node = https://example.com 13 | token_duration = 3600 14 | node_type_patterns = 15 | example:*example* 16 | 17 | [endpoints] 18 | sync-1.1 = {node}/1.1/{uid} 19 | 20 | [browserid] 21 | backend = tokenserver.verifiers.RemoteBrowserIdVerifier 22 | audiences = http://tokenserver.services.mozilla.com 23 | 24 | [oauth] 25 | backend = tokenserver.verifiers.RemoteOAuthVerifier 26 | 27 | [fxa] 28 | metrics_uid_secret_key = 'super-sekrit' 29 | 30 | # Paster configuration for Pyramid 31 | [filter:catcherror] 32 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 33 | 34 | [pipeline:main] 35 | pipeline = catcherror 36 | pyramidapp 37 | 38 | [app:pyramidapp] 39 | use = egg:tokenserver 40 | 41 | pyramid.reload_templates = true 42 | pyramid.debug_authorization = false 43 | pyramid.debug_notfound = false 44 | pyramid.debug_routematch = false 45 | pyramid.debug_templates = true 46 | pyramid.default_locale_name = en 47 | pyramid.includes = pyramid_debugtoolbar 48 | # An existing bug in gevent 1.4 under pypy 2.7.3 causes the 49 | # monkeypatch for CRLock to fail. This cascades to causing 50 | # the thread generated by the Class init for 51 | # tests/test_purge_old_records.py to hang while running 52 | # under docker. Running worker_class as `sync` resolves 53 | # those problems. 54 | pyramid.worker_class = sync 55 | 56 | # need to do this programmatically 57 | mako.directories = cornice:templates 58 | 59 | [server:main] 60 | use = egg:Paste#http 61 | host = 0.0.0.0 62 | port = 5000 63 | -------------------------------------------------------------------------------- /tokenserver/tests/test_memorynode.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import os 5 | import unittest 6 | 7 | from pyramid import testing 8 | 9 | from tokenserver.assignment import INodeAssignment 10 | from mozsvc.config import load_into_settings 11 | 12 | DEFAULT_EMAIL = "alexis@mozilla.com" 13 | DEFAULT_NODE = "https://example.com" 14 | DEFAULT_SERVICE = "sync-1.0" 15 | 16 | 17 | class TestFixedBackend(unittest.TestCase): 18 | 19 | def setUp(self): 20 | self.config = testing.setUp() 21 | self.ini = os.path.join(os.path.dirname(__file__), 22 | 'test_memorynode.ini') 23 | settings = {} 24 | load_into_settings(self.ini, settings) 25 | self.config.add_settings(settings) 26 | self.config.include("tokenserver") 27 | self.backend = self.config.registry.getUtility(INodeAssignment) 28 | self.backend.clear() 29 | 30 | def tearDown(self): 31 | self.backend.clear() 32 | 33 | def test_read_config(self): 34 | user = self.backend.allocate_user(DEFAULT_SERVICE, DEFAULT_EMAIL) 35 | self.assertEqual(user['node'], DEFAULT_NODE) 36 | 37 | def test_assignation(self): 38 | user = self.backend.get_user(DEFAULT_SERVICE, DEFAULT_EMAIL) 39 | self.assertEquals(user, None) 40 | 41 | user = self.backend.allocate_user(DEFAULT_EMAIL, DEFAULT_SERVICE) 42 | self.assertEquals(user['uid'], 1) 43 | self.assertEquals(user['node'], DEFAULT_NODE) 44 | 45 | user = self.backend.get_user(DEFAULT_EMAIL, DEFAULT_SERVICE) 46 | self.assertEquals(user['uid'], 1) 47 | self.assertEquals(user['node'], DEFAULT_NODE) 48 | -------------------------------------------------------------------------------- /tokenserver/tests/test_node_type_classifier.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import unittest 6 | 7 | from pyramid.config import Configurator 8 | 9 | 10 | class TestNodeTypeClassifier(unittest.TestCase): 11 | 12 | DEFAULT_SETTINGS = { # noqa; identation below is non-standard 13 | "tokenserver.backend": 14 | "tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend", 15 | "oauth.backend": 16 | "tokenserver.verifiers.RemoteOAuthVerifier", 17 | "tokenserver.secrets.backend": 18 | "mozsvc.secrets.FixedSecrets", 19 | "tokenserver.secrets.secrets": 20 | "steve-let-the-dogs-out", 21 | } 22 | 23 | def _make_classifier(self, settings={}): 24 | all_settings = self.DEFAULT_SETTINGS.copy() 25 | all_settings.update(settings) 26 | config = Configurator(settings=all_settings) 27 | config.include("tokenserver") 28 | config.commit() 29 | return config.registry.settings['tokenserver.node_type_classifier'] 30 | 31 | def test_no_patterns(self): 32 | classifier = self._make_classifier() 33 | self.assertEquals(classifier(''), None) 34 | self.assertEquals(classifier('https://example.com'), None) 35 | 36 | def test_error_if_not_a_list(self): 37 | with self.assertRaises(ValueError): 38 | self._make_classifier({ 39 | 'tokenserver.node_type_patterns': 'foo:*.bar.com', 40 | 41 | }) 42 | 43 | def test_error_if_pattern_has_no_label(self): 44 | with self.assertRaises(ValueError): 45 | self._make_classifier({ 46 | 'tokenserver.node_type_patterns': [ 47 | ':*.bar.com', 48 | ], 49 | }) 50 | 51 | def test_error_if_duplicate_pattern_label(self): 52 | with self.assertRaises(ValueError): 53 | self._make_classifier({ 54 | 'tokenserver.node_type_patterns': [ 55 | 'foo:*.foo.com', 56 | 'foo:*.bar.com', 57 | ], 58 | }) 59 | 60 | def test_pattern_matching(self): 61 | classifier = self._make_classifier({ 62 | 'tokenserver.node_type_patterns': [ 63 | 'foo:*.foo.com', 64 | 'bar:*.bar.com', 65 | ], 66 | }) 67 | self.assertEquals(classifier(''), None) 68 | self.assertEquals(classifier('https://example.com'), None) 69 | self.assertEquals(classifier('https://example.foo.com'), 'foo') 70 | self.assertEquals(classifier('https://example.bar.com'), 'bar') 71 | 72 | def test_precedence_order(self): 73 | classifier = self._make_classifier({ 74 | 'tokenserver.node_type_patterns': [ 75 | 'foo1:*foo.foo.com', 76 | 'foo2:*.foo.com', 77 | ], 78 | }) 79 | self.assertEquals(classifier(''), None) 80 | self.assertEquals(classifier('https://foo.com'), None) 81 | self.assertEquals(classifier('https://example.foo.com'), 'foo2') 82 | self.assertEquals(classifier('https://foo.foo.com'), 'foo1') 83 | -------------------------------------------------------------------------------- /tokenserver/tests/test_oauth_verifier.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import socket 6 | import unittest 7 | import responses 8 | import contextlib 9 | 10 | from pyramid.config import Configurator 11 | 12 | from tokenserver.verifiers import ( 13 | IOAuthVerifier, 14 | ConnectionError, 15 | RemoteOAuthVerifier, 16 | ) 17 | 18 | import fxa.errors 19 | 20 | 21 | MOCK_TOKEN = 'token' 22 | 23 | 24 | class TestRemoteOAuthVerifier(unittest.TestCase): 25 | 26 | DEFAULT_SETTINGS = { # noqa; identation below is non-standard 27 | "tokenserver.backend": 28 | "tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend", 29 | "oauth.backend": 30 | "tokenserver.verifiers.RemoteOAuthVerifier", 31 | "tokenserver.secrets.backend": 32 | "mozsvc.secrets.FixedSecrets", 33 | "tokenserver.secrets.secrets": 34 | "steve-let-the-dogs-out", 35 | } 36 | 37 | def _make_config(self, settings={}): 38 | all_settings = self.DEFAULT_SETTINGS.copy() 39 | all_settings.update(settings) 40 | config = Configurator(settings=all_settings) 41 | config.include("tokenserver") 42 | config.commit() 43 | return config 44 | 45 | @contextlib.contextmanager 46 | def _mock_verifier(self, verifier, response=None, exc=None): 47 | def replacement_verify_token_method(*args, **kwds): 48 | if exc is not None: 49 | raise exc 50 | if response is not None: 51 | return response 52 | raise RuntimeError("incomplete mock") 53 | orig_verify_token_method = verifier._client.verify_token 54 | verifier._client.verify_token = replacement_verify_token_method 55 | try: 56 | yield None 57 | finally: 58 | verifier._client.verify_token = orig_verify_token_method 59 | 60 | def test_verifier_config_loading_defaults(self): 61 | config = self._make_config() 62 | verifier = config.registry.getUtility(IOAuthVerifier) 63 | self.assertTrue(isinstance(verifier, RemoteOAuthVerifier)) 64 | self.assertEquals(verifier.server_url, 65 | "https://oauth.accounts.firefox.com/v1") 66 | self.assertEquals(verifier.default_issuer, 67 | "api.accounts.firefox.com") 68 | self.assertEquals(verifier.scope, 69 | "https://identity.mozilla.com/apps/oldsync") 70 | self.assertEquals(verifier.timeout, 30) 71 | 72 | def test_verifier_config_loading_values(self): 73 | config = self._make_config({ # noqa; indentation below is non-standard 74 | "oauth.server_url": 75 | "https://oauth-test1.dev.lcip.org/", 76 | "oauth.default_issuer": 77 | "myissuer.com", 78 | "oauth.scope": 79 | "some.custom.scope", 80 | "oauth.timeout": 500 81 | }) 82 | verifier = config.registry.getUtility(IOAuthVerifier) 83 | self.assertTrue(isinstance(verifier, RemoteOAuthVerifier)) 84 | self.assertEquals(verifier.server_url, 85 | "https://oauth-test1.dev.lcip.org/v1") 86 | self.assertEquals(verifier.default_issuer, "myissuer.com") 87 | self.assertEquals(verifier.scope, "some.custom.scope") 88 | self.assertEquals(verifier.timeout, 500) 89 | 90 | @responses.activate 91 | def test_verifier_config_dynamic_issuer_discovery(self): 92 | responses.add( 93 | responses.GET, 94 | "https://oauth-server.my-self-hosted-setup.com/config", 95 | json={ 96 | "browserid": { 97 | "issuer": "authy.my-self-hosted-setup.com", 98 | }, 99 | } 100 | ) 101 | call_count = 1 102 | """ 103 | # Note: if versions of responses > 0.14 are required, the following 104 | # code will need to be activated. 105 | try: 106 | import requests 107 | 108 | rr = requests.get( 109 | "https://oauth-server.my-self-hosted-setup.com/config") 110 | rr.json() 111 | call_count = 2 112 | except ValueError as e: 113 | # Not sure why, but for some reason, the above can fail with a 114 | # ValueError. I believe it's the result of trying to read the 115 | # JSON as a stream, when the stream is NONE or undefined. I'm 116 | # testing here to see if the above works at all before continuing. 117 | # If not, just bail, since this test won't work. 118 | # Why does this fail where other tests do the exact same thing and 119 | # work fine? Sunspots and evil gnomes, as far as I can tell. 120 | print("Skipping test since request/response returned {}", e) 121 | self.skipTest("broken response") 122 | return 123 | """ 124 | config = self._make_config({ # noqa; indentation below is non-standard 125 | "oauth.server_url": 126 | "https://oauth-server.my-self-hosted-setup.com/", 127 | }) 128 | verifier = config.registry.getUtility(IOAuthVerifier) 129 | self.assertEqual(len(responses.calls), call_count) 130 | self.assertTrue(isinstance(verifier, RemoteOAuthVerifier)) 131 | self.assertEquals(verifier.server_url, 132 | "https://oauth-server.my-self-hosted-setup.com/v1") 133 | self.assertEquals(verifier.default_issuer, 134 | "authy.my-self-hosted-setup.com") 135 | 136 | @responses.activate 137 | def test_verifier_config_handles_missing_default_issuer(self): 138 | responses.add( 139 | responses.GET, 140 | "https://oauth-server.my-self-hosted-setup.com/config", 141 | json={ 142 | "browserid": { 143 | "oh no": "the issuer is not configured here" 144 | }, 145 | } 146 | ) 147 | config = self._make_config({ # noqa; indentation below is non-standard 148 | "oauth.server_url": 149 | "https://oauth-server.my-self-hosted-setup.com/", 150 | }) 151 | verifier = config.registry.getUtility(IOAuthVerifier) 152 | self.assertEqual(len(responses.calls), 1) 153 | self.assertTrue(isinstance(verifier, RemoteOAuthVerifier)) 154 | self.assertEquals(verifier.server_url, 155 | "https://oauth-server.my-self-hosted-setup.com/v1") 156 | self.assertEquals(verifier.default_issuer, None) 157 | 158 | def test_verifier_config_rejects_empty_scope(self): 159 | with self.assertRaises(ValueError): 160 | self._make_config({ 161 | "oauth.scope": "" 162 | }) 163 | 164 | def test_verifier_failure_cases(self): 165 | config = self._make_config() 166 | verifier = config.registry.getUtility(IOAuthVerifier) 167 | with self._mock_verifier(verifier, exc=socket.error): 168 | with self.assertRaises(ConnectionError): 169 | verifier.verify(MOCK_TOKEN) 170 | err = fxa.errors.ScopeMismatchError(verifier.scope, 'wrong.scope') 171 | with self._mock_verifier(verifier, exc=err): 172 | with self.assertRaises(fxa.errors.ScopeMismatchError): 173 | verifier.verify(MOCK_TOKEN) 174 | 175 | def test_verifier_constructs_email_from_uid_and_reported_issuer(self): 176 | config = self._make_config({ # noqa; indentation below is non-standard 177 | "oauth.default_issuer": 178 | "my.default.issuer.com", 179 | }) 180 | verifier = config.registry.getUtility(IOAuthVerifier) 181 | mock_response = {"user": "UID", "issuer": "my.custom.issuer.com"} 182 | with self._mock_verifier(verifier, response=mock_response): 183 | self.assertEquals(verifier.verify(MOCK_TOKEN)["email"], 184 | "UID@my.custom.issuer.com") 185 | 186 | def test_verifier_constructs_email_from_uid_and_default_issuer(self): 187 | config = self._make_config({ # noqa; indentation below is non-standard 188 | "oauth.default_issuer": 189 | "my.custom.issuer.com", 190 | }) 191 | verifier = config.registry.getUtility(IOAuthVerifier) 192 | with self._mock_verifier(verifier, response={"user": "UID"}): 193 | self.assertEquals(verifier.verify(MOCK_TOKEN)["email"], 194 | "UID@my.custom.issuer.com") 195 | 196 | @responses.activate 197 | def test_verifier_fails_if_issuer_cannot_be_determined(self): 198 | responses.add( 199 | responses.GET, 200 | "https://oauth-server.my-self-hosted-setup.com/config", 201 | json={}, 202 | ) 203 | config = self._make_config({ # noqa; indentation below is non-standard 204 | "oauth.server_url": 205 | "https://oauth-server.my-self-hosted-setup.com/", 206 | }) 207 | verifier = config.registry.getUtility(IOAuthVerifier) 208 | self.assertEqual(len(responses.calls), 1) 209 | with self._mock_verifier(verifier, response={"user": "UID"}): 210 | with self.assertRaises(fxa.errors.TrustError): 211 | verifier.verify(MOCK_TOKEN) 212 | 213 | @responses.activate 214 | def test_verifier_returns_generation(self): 215 | config = self._make_config() 216 | verifier = config.registry.getUtility(IOAuthVerifier) 217 | generation = 2 218 | with self._mock_verifier( 219 | verifier, 220 | response={"user": "UID", "generation": generation}): 221 | self.assertEquals(verifier.verify(MOCK_TOKEN)['idpClaims'].get( 222 | 'fxa-generation'), generation) 223 | -------------------------------------------------------------------------------- /tokenserver/tests/test_process_account_events.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import json 5 | import os 6 | import unittest 7 | 8 | from pyramid import testing 9 | from testfixtures import LogCapture 10 | 11 | from mozsvc.config import load_into_settings 12 | from mozsvc.plugin import load_and_register 13 | 14 | from tokenserver.scripts.process_account_events import process_account_event 15 | from tokenserver.assignment import INodeAssignment 16 | 17 | 18 | SERVICE = "sync-1.1" 19 | PATTERN = "{node}/1.1/{uid}" 20 | EMAIL = "test@example.com" 21 | UID = "test" 22 | ISS = "example.com" 23 | 24 | 25 | def message_body(**kwds): 26 | return json.dumps({ 27 | "Message": json.dumps(kwds) 28 | }) 29 | 30 | 31 | class TestProcessAccountEvents(unittest.TestCase): 32 | 33 | def get_ini(self): 34 | return os.path.join(os.path.dirname(__file__), 35 | 'test_sql.ini') 36 | 37 | def setUp(self): 38 | self.config = testing.setUp() 39 | settings = {} 40 | load_into_settings(self.get_ini(), settings) 41 | self.config.add_settings(settings) 42 | self.config.include("tokenserver") 43 | load_and_register("tokenserver", self.config) 44 | self.backend = self.config.registry.getUtility(INodeAssignment) 45 | self.backend.add_service(SERVICE, PATTERN) 46 | self.backend.add_node(SERVICE, "https://phx12", 100) 47 | self.logs = LogCapture() 48 | 49 | def tearDown(self): 50 | self.logs.uninstall() 51 | testing.tearDown() 52 | if self.backend._engine.driver == 'pysqlite': 53 | filename = self.backend.sqluri.split('sqlite://')[-1] 54 | if os.path.exists(filename): 55 | os.remove(filename) 56 | else: 57 | self.backend._safe_execute('delete from services') 58 | self.backend._safe_execute('delete from nodes') 59 | self.backend._safe_execute('delete from users') 60 | 61 | def assertMessageWasLogged(self, msg): 62 | """Check that a metric was logged during the request.""" 63 | for r in self.logs.records: 64 | if msg in r.getMessage(): 65 | break 66 | else: 67 | assert False, "message %r was not logged" % (msg,) 68 | 69 | def clearLogs(self): 70 | del self.logs.records[:] 71 | 72 | def test_delete_user(self): 73 | self.backend.allocate_user(SERVICE, EMAIL) 74 | user = self.backend.get_user(SERVICE, EMAIL) 75 | self.backend.update_user(SERVICE, user, client_state="abcdef") 76 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 77 | self.assertEquals(len(records), 2) 78 | self.assertTrue(records[0]["replaced_at"] is not None) 79 | 80 | process_account_event(self.config, message_body( 81 | event="delete", 82 | uid=UID, 83 | iss=ISS, 84 | )) 85 | 86 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 87 | self.assertEquals(len(records), 2) 88 | for row in records: 89 | self.assertTrue(row["replaced_at"] is not None) 90 | 91 | def test_delete_user_by_legacy_uid_format(self): 92 | self.backend.allocate_user(SERVICE, EMAIL) 93 | user = self.backend.get_user(SERVICE, EMAIL) 94 | self.backend.update_user(SERVICE, user, client_state="abcdef") 95 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 96 | self.assertEquals(len(records), 2) 97 | self.assertTrue(records[0]["replaced_at"] is not None) 98 | 99 | process_account_event(self.config, message_body( 100 | event="delete", 101 | uid=EMAIL, 102 | )) 103 | 104 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 105 | self.assertEquals(len(records), 2) 106 | for row in records: 107 | self.assertTrue(row["replaced_at"] is not None) 108 | 109 | def test_delete_user_who_is_not_in_the_db(self): 110 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 111 | self.assertEquals(len(records), 0) 112 | 113 | process_account_event(self.config, message_body( 114 | event="delete", 115 | uid=UID, 116 | iss=ISS 117 | )) 118 | 119 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 120 | self.assertEquals(len(records), 0) 121 | 122 | def test_reset_user(self): 123 | self.backend.allocate_user(SERVICE, EMAIL, generation=12) 124 | 125 | process_account_event(self.config, message_body( 126 | event="reset", 127 | uid=UID, 128 | iss=ISS, 129 | generation=43, 130 | )) 131 | 132 | user = self.backend.get_user(SERVICE, EMAIL) 133 | self.assertEquals(user["generation"], 42) 134 | 135 | def test_reset_user_by_legacy_uid_format(self): 136 | self.backend.allocate_user(SERVICE, EMAIL, generation=12) 137 | 138 | process_account_event(self.config, message_body( 139 | event="reset", 140 | uid=EMAIL, 141 | generation=43, 142 | )) 143 | 144 | user = self.backend.get_user(SERVICE, EMAIL) 145 | self.assertEquals(user["generation"], 42) 146 | 147 | def test_reset_user_who_is_not_in_the_db(self): 148 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 149 | self.assertEquals(len(records), 0) 150 | 151 | process_account_event(self.config, message_body( 152 | event="reset", 153 | uid=UID, 154 | iss=ISS, 155 | generation=43, 156 | )) 157 | 158 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 159 | self.assertEquals(len(records), 0) 160 | 161 | def test_password_change(self): 162 | self.backend.allocate_user(SERVICE, EMAIL, generation=12) 163 | 164 | process_account_event(self.config, message_body( 165 | event="passwordChange", 166 | uid=UID, 167 | iss=ISS, 168 | generation=43, 169 | )) 170 | 171 | user = self.backend.get_user(SERVICE, EMAIL) 172 | self.assertEquals(user["generation"], 42) 173 | 174 | def test_password_change_user_not_in_db(self): 175 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 176 | self.assertEquals(len(records), 0) 177 | 178 | process_account_event(self.config, message_body( 179 | event="passwordChange", 180 | uid=UID, 181 | iss=ISS, 182 | generation=43, 183 | )) 184 | 185 | records = list(self.backend.get_user_records(SERVICE, EMAIL)) 186 | self.assertEquals(len(records), 0) 187 | 188 | def test_malformed_events(self): 189 | 190 | # Unknown event type. 191 | process_account_event(self.config, message_body( 192 | event="party", 193 | uid=UID, 194 | iss=ISS, 195 | generation=43, 196 | )) 197 | self.assertMessageWasLogged("Dropping unknown event type") 198 | self.clearLogs() 199 | 200 | # Missing event type. 201 | process_account_event(self.config, message_body( 202 | uid=UID, 203 | iss=ISS, 204 | generation=43, 205 | )) 206 | self.assertMessageWasLogged("Invalid account message") 207 | self.clearLogs() 208 | 209 | # Missing uid. 210 | process_account_event(self.config, message_body( 211 | event="delete", 212 | iss=ISS, 213 | )) 214 | self.assertMessageWasLogged("Invalid account message") 215 | self.clearLogs() 216 | 217 | # Missing generation for reset events. 218 | process_account_event(self.config, message_body( 219 | event="reset", 220 | uid=UID, 221 | iss=ISS, 222 | )) 223 | self.assertMessageWasLogged("Invalid account message") 224 | self.clearLogs() 225 | 226 | # Missing generation for passwordChange events. 227 | process_account_event(self.config, message_body( 228 | event="passwordChange", 229 | uid=UID, 230 | iss=ISS, 231 | )) 232 | self.assertMessageWasLogged("Invalid account message") 233 | self.clearLogs() 234 | 235 | # Missing issuer with nonemail uid 236 | process_account_event(self.config, message_body( 237 | event="delete", 238 | uid=UID, 239 | )) 240 | self.assertMessageWasLogged("Invalid account message") 241 | self.clearLogs() 242 | 243 | # Non-JSON garbage. 244 | process_account_event(self.config, "wat") 245 | self.assertMessageWasLogged("Invalid account message") 246 | self.clearLogs() 247 | 248 | # Non-JSON garbage in Message field. 249 | process_account_event(self.config, '{ "Message": "wat" }') 250 | self.assertMessageWasLogged("Invalid account message") 251 | self.clearLogs() 252 | 253 | # Badly-typed JSON value in Message field. 254 | process_account_event(self.config, '{ "Message": "[1, 2, 3"] }') 255 | self.assertMessageWasLogged("Invalid account message") 256 | self.clearLogs() 257 | -------------------------------------------------------------------------------- /tokenserver/tests/test_purge_old_records.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import os 6 | import re 7 | import threading 8 | import unittest 9 | import mock 10 | from wsgiref.simple_server import make_server 11 | 12 | import tokenlib 13 | import hawkauthlib 14 | import pyramid.testing 15 | 16 | from mozsvc.config import load_into_settings 17 | 18 | from tokenserver.assignment import INodeAssignment 19 | from tokenserver.scripts.purge_old_records import purge_old_records 20 | 21 | 22 | class TestPurgeOldRecordsScript(unittest.TestCase): 23 | """A testcase for proper functioning of the purge_old_records.py script. 24 | 25 | This is a tricky one, because we have to actually run the script and 26 | test that it does the right thing. We also run a mock downstream service 27 | so we can test that data-deletion requests go through ok. 28 | """ 29 | 30 | @classmethod 31 | def setUpClass(cls): 32 | cls.service_requests = [] 33 | cls.service_node = "http://localhost:8002" 34 | cls.service = make_server("localhost", 8002, cls._service_app) 35 | target = cls.service.serve_forever 36 | cls.service_thread = threading.Thread(target=target) 37 | # Note: If the following `start` causes the test thread to hang, 38 | # you may need to specify 39 | # `[app::pyramid.app] pyramid.worker_class = sync` in the test_*.ini 40 | # files 41 | cls.service_thread.start() 42 | # This silences nuisance on-by-default logging output. 43 | cls.service.RequestHandlerClass.log_request = lambda *a: None 44 | 45 | def setUp(self): 46 | super(TestPurgeOldRecordsScript, self).setUp() 47 | 48 | # Make a stub tokenserver app in-line. 49 | self.config = pyramid.testing.setUp() 50 | self.ini_file = os.path.join(os.path.dirname(__file__), 'test_sql.ini') 51 | settings = {} 52 | load_into_settings(self.ini_file, settings) 53 | self.config.add_settings(settings) 54 | self.config.include("tokenserver") 55 | 56 | # Configure the node-assignment backend to talk to our test service. 57 | self.backend = self.config.registry.getUtility(INodeAssignment) 58 | self.backend.add_service("sync-1.1", "{node}/1.1/{uid}") 59 | self.backend.add_node("sync-1.1", self.service_node, 100) 60 | 61 | def tearDown(self): 62 | if self.backend._engine.driver == 'pysqlite': 63 | filename = self.backend.sqluri.split('sqlite://')[-1] 64 | if os.path.exists(filename): 65 | os.remove(filename) 66 | else: 67 | self.backend._safe_execute('delete from services') 68 | self.backend._safe_execute('delete from nodes') 69 | self.backend._safe_execute('delete from users') 70 | pyramid.testing.tearDown() 71 | del self.service_requests[:] 72 | 73 | @classmethod 74 | def tearDownClass(cls): 75 | cls.service.shutdown() 76 | cls.service_thread.join() 77 | 78 | @classmethod 79 | def _service_app(cls, environ, start_response): 80 | cls.service_requests.append(environ) 81 | start_response("200 OK", []) 82 | return "" 83 | 84 | def test_purging_of_old_user_records(self): 85 | # Make some old user records. 86 | service = "sync-1.1" 87 | email = "test@mozilla.com" 88 | user = self.backend.allocate_user(service, email, client_state="aa", 89 | generation=123) 90 | self.backend.update_user(service, user, client_state="bb", 91 | generation=456, keys_changed_at=450) 92 | self.backend.update_user(service, user, client_state="cc", 93 | generation=789) 94 | user_records = list(self.backend.get_user_records(service, email)) 95 | self.assertEqual(len(user_records), 3) 96 | user = self.backend.get_user(service, email) 97 | self.assertEquals(user["client_state"], "cc") 98 | self.assertEquals(len(user["old_client_states"]), 2) 99 | mock_settings = mock.Mock() 100 | mock_settings.dryrun = False 101 | 102 | # The default grace-period should prevent any cleanup. 103 | self.assertTrue(purge_old_records( 104 | self.ini_file, settings=mock_settings)) 105 | user_records = list(self.backend.get_user_records(service, email)) 106 | self.assertEqual(len(user_records), 3) 107 | self.assertEqual(len(self.service_requests), 0) 108 | 109 | # With no grace period, we should cleanup two old records. 110 | self.assertTrue( 111 | purge_old_records( 112 | self.ini_file, grace_period=0, settings=mock_settings)) 113 | user_records = list(self.backend.get_user_records(service, email)) 114 | self.assertEqual(len(user_records), 1) 115 | self.assertEqual(len(self.service_requests), 2) 116 | 117 | # Check that the proper delete requests were made to the service. 118 | secrets = self.config.registry.settings["tokenserver.secrets"] 119 | node_secret = secrets.get(self.service_node)[-1] 120 | expected_kids = ["0000000000450-uw", "0000000000123-qg"] 121 | for i, environ in enumerate(self.service_requests): 122 | # They must be to the correct path. 123 | self.assertEquals(environ["REQUEST_METHOD"], "DELETE") 124 | self.assertTrue(re.match("/1.1/[0-9]+", environ["PATH_INFO"])) 125 | # They must have a correct request signature. 126 | token = hawkauthlib.get_id(environ) 127 | secret = tokenlib.get_derived_secret(token, secret=node_secret) 128 | self.assertTrue(hawkauthlib.check_signature(environ, secret)) 129 | userdata = tokenlib.parse_token(token, secret=node_secret) 130 | self.assertTrue("uid" in userdata) 131 | self.assertTrue("node" in userdata) 132 | self.assertEqual(userdata["fxa_uid"], "test") 133 | self.assertEqual(userdata["fxa_kid"], expected_kids[i]) 134 | 135 | # Check that the user's current state is unaffected 136 | user = self.backend.get_user(service, email) 137 | self.assertEquals(user["client_state"], "cc") 138 | self.assertEquals(len(user["old_client_states"]), 0) 139 | 140 | def test_purging_is_not_done_on_downed_nodes(self): 141 | # Make some old user records. 142 | service = "sync-1.1" 143 | email = "test@mozilla.com" 144 | user = self.backend.allocate_user(service, email, client_state="aa") 145 | self.backend.update_user(service, user, client_state="bb") 146 | user_records = list(self.backend.get_user_records(service, email)) 147 | self.assertEqual(len(user_records), 2) 148 | mock_settings = mock.Mock() 149 | mock_settings.dryrun = False 150 | mock_settings.force = False 151 | 152 | # With the node down, we should not purge any records. 153 | self.backend.update_node(service, self.service_node, downed=1) 154 | self.assertTrue(purge_old_records( 155 | self.ini_file, grace_period=0, settings=mock_settings)) 156 | user_records = list(self.backend.get_user_records(service, email)) 157 | 158 | self.assertEqual(len(user_records), 2) 159 | self.assertEqual(len(self.service_requests), 0) 160 | # With the node back up, we should purge correctly. 161 | self.backend.update_node(service, self.service_node, downed=0) 162 | self.assertTrue(purge_old_records( 163 | self.ini_file, grace_period=0, settings=mock_settings)) 164 | user_records = list(self.backend.get_user_records(service, email)) 165 | self.assertEqual(len(user_records), 1) 166 | self.assertEqual(len(self.service_requests), 1) 167 | -------------------------------------------------------------------------------- /tokenserver/tests/test_remote_browserid_verifier.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import json 6 | import contextlib 7 | import unittest 8 | 9 | from pyramid.config import Configurator 10 | 11 | from tokenserver.verifiers import RemoteBrowserIdVerifier, IBrowserIdVerifier 12 | from browserid.tests.support import make_assertion 13 | import browserid.errors 14 | 15 | 16 | class mockobj(object): 17 | pass 18 | 19 | 20 | class TestRemoteBrowserIdVerifier(unittest.TestCase): 21 | 22 | DEFAULT_SETTINGS = { # noqa; identation below is non-standard 23 | "tokenserver.backend": 24 | "tokenserver.assignment.memorynode.MemoryNodeAssignmentBackend", 25 | "browserid.backend": 26 | "tokenserver.verifiers.RemoteBrowserIdVerifier", 27 | "tokenserver.secrets.backend": 28 | "mozsvc.secrets.FixedSecrets", 29 | "tokenserver.secrets.secrets": 30 | "steve-let-the-dogs-out", 31 | "browserid.backend": 32 | "tokenserver.verifiers.RemoteBrowserIdVerifier", 33 | } 34 | 35 | def _make_config(self, settings={}): 36 | all_settings = self.DEFAULT_SETTINGS.copy() 37 | all_settings.update(settings) 38 | config = Configurator(settings=all_settings) 39 | config.include("tokenserver") 40 | config.commit() 41 | return config 42 | 43 | @contextlib.contextmanager 44 | def _mock_verifier(self, verifier, exc=None, **response_attrs): 45 | def replacement_post_method(*args, **kwds): 46 | if exc is not None: 47 | raise exc 48 | response = mockobj() 49 | response.status_code = 200 50 | response.text = "" 51 | response.__dict__.update(response_attrs) 52 | return response 53 | orig_post_method = verifier.session.post 54 | verifier.session.post = replacement_post_method 55 | try: 56 | yield None 57 | finally: 58 | verifier.session.post = orig_post_method 59 | 60 | def test_verifier_config_loading_defaults(self): 61 | config = self._make_config() 62 | verifier = config.registry.getUtility(IBrowserIdVerifier) 63 | self.assertTrue(isinstance(verifier, RemoteBrowserIdVerifier)) 64 | self.assertEquals(verifier.verifier_url, 65 | "https://verifier.accounts.firefox.com/v2") 66 | self.assertEquals(verifier.audiences, None) 67 | self.assertEquals(verifier.trusted_issuers, None) 68 | self.assertEquals(verifier.allowed_issuers, None) 69 | 70 | def test_verifier_config_loading_values(self): 71 | config = self._make_config({ # noqa; indentation below is non-standard 72 | "browserid.verifier_url": 73 | "https://trustyverifier.notascam.com/endpoint/path", 74 | "browserid.audiences": 75 | "https://testmytoken.com", 76 | "browserid.trusted_issuers": 77 | "example.com trustyidp.org", 78 | "browserid.allowed_issuers": 79 | "example.com trustyidp.org\nmockmyid.com", 80 | }) 81 | verifier = config.registry.getUtility(IBrowserIdVerifier) 82 | self.assertTrue(isinstance(verifier, RemoteBrowserIdVerifier)) 83 | self.assertEquals(verifier.verifier_url, 84 | "https://trustyverifier.notascam.com/endpoint/path") 85 | self.assertEquals(verifier.audiences, "https://testmytoken.com") 86 | self.assertEquals(verifier.trusted_issuers, 87 | ["example.com", "trustyidp.org"]) 88 | self.assertEquals(verifier.allowed_issuers, 89 | ["example.com", "trustyidp.org", "mockmyid.com"]) 90 | 91 | def test_verifier_failure_cases(self): 92 | config = self._make_config({ # noqa; indentation below is non-standard 93 | "browserid.audiences": 94 | "https://testmytoken.com", 95 | }) 96 | verifier = config.registry.getUtility(IBrowserIdVerifier) 97 | assertion = make_assertion(email="test@example.com", 98 | audience="https://testmytoken.com") 99 | with self._mock_verifier(verifier, status_code=500): 100 | with self.assertRaises(browserid.errors.ConnectionError): 101 | verifier.verify(assertion) 102 | with self._mock_verifier(verifier, text="

Server Error

"): 103 | with self.assertRaises(browserid.errors.ConnectionError): 104 | verifier.verify(assertion) 105 | with self._mock_verifier(verifier, text='{"status": "error"}'): 106 | with self.assertRaises(browserid.errors.InvalidSignatureError): 107 | verifier.verify(assertion) 108 | with self._mock_verifier(verifier, text='{"status": "potato"}'): 109 | with self.assertRaises(browserid.errors.InvalidSignatureError): 110 | verifier.verify(assertion) 111 | 112 | def test_verifier_rejects_unallowed_issuers(self): 113 | config = self._make_config({ # noqa; indentation below is non-standard 114 | "browserid.audiences": 115 | "https://testmytoken.com", 116 | "browserid.allowed_issuers": 117 | "accounts.firefox.com mockmyid.com", 118 | }) 119 | verifier = config.registry.getUtility(IBrowserIdVerifier) 120 | assertion = make_assertion(email="test@example.com", 121 | audience="https://testmytoken.com") 122 | mock_response = { 123 | "status": "okay", 124 | "principal": { 125 | "email": "test@example.com", 126 | }, 127 | "audience": "https://testmytoken.com", 128 | "issuer": "login.persona.org", 129 | } 130 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 131 | with self.assertRaises(browserid.errors.InvalidIssuerError): 132 | verifier.verify(assertion) 133 | mock_response["issuer"] = "mockmyid.com" 134 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 135 | self.assertEquals(verifier.verify(assertion)["principal"]["email"], 136 | "test@example.com") 137 | mock_response["issuer"] = "accounts.firefox.com" 138 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 139 | self.assertEquals(verifier.verify(assertion)["principal"]["email"], 140 | "test@example.com") 141 | mock_response["issuer"] = "mockmyid.org" 142 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 143 | with self.assertRaises(browserid.errors.InvalidIssuerError): 144 | verifier.verify(assertion) 145 | mock_response["issuer"] = "http://mockmyid.com" 146 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 147 | with self.assertRaises(browserid.errors.InvalidIssuerError): 148 | verifier.verify(assertion) 149 | mock_response["issuer"] = "mockmyid.co" 150 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 151 | with self.assertRaises(browserid.errors.InvalidIssuerError): 152 | verifier.verify(assertion) 153 | mock_response["issuer"] = 42 154 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 155 | with self.assertRaises(browserid.errors.InvalidIssuerError): 156 | verifier.verify(assertion) 157 | mock_response["issuer"] = None 158 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 159 | with self.assertRaises(browserid.errors.InvalidIssuerError): 160 | verifier.verify(assertion) 161 | del mock_response["issuer"] 162 | with self._mock_verifier(verifier, text=json.dumps(mock_response)): 163 | with self.assertRaises(browserid.errors.InvalidIssuerError): 164 | verifier.verify(assertion) 165 | -------------------------------------------------------------------------------- /tokenserver/tests/test_sql.ini: -------------------------------------------------------------------------------- 1 | # application configuration 2 | [global] 3 | logger_name = tokenserver 4 | debug = true 5 | 6 | # token server configuration 7 | [tokenserver] 8 | backend = tokenserver.assignment.sqlnode.SQLNodeAssignment 9 | sqluri = ${MOZSVC_SQLURI} 10 | # for local docker container testing. 11 | #sqluri=sqlite:////tmp/tokenserver.db 12 | create_tables = true 13 | applications = sync-1.1, sync-1.5 14 | token_duration = 3600 15 | secrets.backend = mozsvc.secrets.DerivedSecrets 16 | secrets.master_secrets = "abcdef" 17 | "123456" 18 | node_type_patterns = 19 | example:*example.com 20 | spanner_node_id = 800 21 | migrate_new_user_percentage=0 22 | 23 | [endpoints] 24 | sync-1.1 = {node}/1.1/{uid} 25 | 26 | [browserid] 27 | backend = tokenserver.verifiers.RemoteBrowserIdVerifier 28 | audiences = http://tokenserver.services.mozilla.com 29 | 30 | [oauth] 31 | backend = tokenserver.verifiers.RemoteOAuthVerifier 32 | jwks = {"keys":[{"kty":"RSA","n":"nW_losfifTdqolJzRvQEHYLzjf25eX7MriczYrUnbr25runIyz214WAuTeAECDpXGJo__J6brUugkLFaf_NGv-JpJ44QKUiZKcw7qB1N3sEy2WF3XbUR0W0w28pfA2WbwcTRb1j0mj0KPWltCFCK51_KeINMuCTDC9UyXUZjwpSQyJ6lYQVK_n2XR8K2qohOE8I3k03dRkZmZ_D6DLHUUD7hp6pdUpvp2Q6pl_AI59s1J3Z-tCgy_N7ja9QdXE8K6hFAjoF3p5ix46vo6M6HeUGVkVrjEa-Lh15dFkmf6_-8N0r9owwNxpNqkT2nzVdZY2LwLzzqqmgzfP0lbhziaw","e":"AQAB","dp":"aod_c9v-N82vmOppJQkIUjSOf_pkmrxJZZ9eJO-ebJd5OsxN_GLOFHa3AH0-vlUoiwFOsziB9yq33EkQT0r9BYcwXEvHJKX5smt17wmIskakLw2FWozSwNf9bgCPoIBh2NyVtcJ0p1SaO3IuIuQsQetfmwkqHbdKOYUnuNc0IuE","dq":"muc3N3YzJ87RLiBij6xfAliSxdMDg6zKBFXwPRHQJJ0cg6lbvnpnp8XJjjhmYov_2xmICi3C_LO6fwe8KyUOyiPkb0VbjWZtq4Iol9qkQ0iKTnGXkoTfBHVheGq5QoAhxiX7xExd4Gnog5KocrexFWuiZQ0Ul22Bji3gqJhwvcE","qi":"xguY_G6Ld0Rp7a_ZHAFnAr3Q5Dzhjhkp3vgCi1uNp2jmP3QYng-GvP2xaLcLA0HLBOc0ghgSJYcnmmOB6bxVkVc5R0Hg17-tLlOgQejCd5mQUeMmp_upAScPHzoEea-OM9O_mHtM5BuuroaLIJdhxYolRkKfwD35cwdMX2j9H_4","kid":"20191118-e43b24c6","alg":"RS256","use":"sig","fxa-createdAt":1574056800}]} 33 | [fxa] 34 | metrics_uid_secret_key = 'super-sekrit' 35 | 36 | # Paster configuration for Pyramid 37 | [filter:catcherror] 38 | paste.filter_app_factory = mozsvc.middlewares:make_err_mdw 39 | 40 | [pipeline:main] 41 | pipeline = catcherror 42 | pyramidapp 43 | 44 | [app:pyramidapp] 45 | use = egg:tokenserver 46 | 47 | pyramid.reload_templates = true 48 | pyramid.debug_authorization = false 49 | pyramid.debug_notfound = false 50 | pyramid.debug_routematch = false 51 | pyramid.debug_templates = true 52 | pyramid.default_locale_name = en 53 | pyramid.includes = pyramid_debugtoolbar 54 | # An existing bug in gevent 1.4 under pypy 2.7.3 causes the 55 | # monkeypatch for CRLock to fail. This cascades to causing 56 | # the thread generated by the Class init for 57 | # tests/test_purge_old_records.py to hang while running 58 | # under docker. Running worker_class as `sync` resolves 59 | # those problems. 60 | pyramid.worker_class = sync 61 | 62 | # need to do this programmatically 63 | mako.directories = cornice:templates 64 | 65 | [server:main] 66 | use = egg:Paste#http 67 | host = 0.0.0.0 68 | port = 5000 69 | 70 | # Begin logging configuration 71 | 72 | [loggers] 73 | keys = root, tokenserver 74 | 75 | [handlers] 76 | keys = console 77 | 78 | [formatters] 79 | keys = generic 80 | 81 | [logger_root] 82 | level = INFO 83 | handlers = console 84 | 85 | [logger_tokenserver] 86 | level = DEBUG 87 | handlers = 88 | qualname = tokenserver 89 | 90 | [handler_console] 91 | class = StreamHandler 92 | args = (sys.stderr,) 93 | level = NOTSET 94 | formatter = generic 95 | 96 | [formatter_generic] 97 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s 98 | 99 | # End logging configuration 100 | -------------------------------------------------------------------------------- /tokenserver/tweens.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | import time 6 | 7 | from pyramid.httpexceptions import HTTPException 8 | 9 | 10 | def set_x_timestamp_header(handler, registry): 11 | """Tween to set the X-Timestamp header on all responses.""" 12 | 13 | def set_x_timestamp_header_tween(request): 14 | try: 15 | response = handler(request) 16 | except HTTPException, response: 17 | response.headers["X-Timestamp"] = str(int(time.time())) 18 | raise 19 | else: 20 | response.headers["X-Timestamp"] = str(int(time.time())) 21 | return response 22 | 23 | return set_x_timestamp_header_tween 24 | 25 | 26 | def includeme(config): 27 | """Include all the TokenServer tweens into the given config.""" 28 | config.add_tween("tokenserver.tweens.set_x_timestamp_header") 29 | -------------------------------------------------------------------------------- /tokenserver/util.py: -------------------------------------------------------------------------------- 1 | # This Source Code Form is subject to the terms of the Mozilla Public 2 | # License, v. 2.0. If a copy of the MPL was not distributed with this file, 3 | # You can obtain one at http://mozilla.org/MPL/2.0/. 4 | import os 5 | from base64 import b32encode 6 | from hashlib import sha1, sha256 7 | import json 8 | import hmac 9 | import time 10 | 11 | from pyramid.response import Response 12 | from pyramid import httpexceptions as exc 13 | 14 | from browserid.utils import encode_bytes as encode_bytes_b64 15 | from browserid.utils import decode_bytes as decode_bytes_b64 16 | from cornice.errors import Errors 17 | 18 | 19 | def hash_email(email): 20 | digest = sha1(email.lower()).digest() 21 | return b32encode(digest).lower() 22 | 23 | 24 | def fxa_metrics_hash(value, hmac_key): 25 | """Derive FxA metrics id from user's FxA email address or whatever. 26 | 27 | This is used to obfuscate the id before logging it with the metrics 28 | data, as a simple privacy measure. 29 | """ 30 | hasher = hmac.new(hmac_key, '', sha256) 31 | # value may be an email address, in which case we only want the first part 32 | hasher.update(value.split("@", 1)[0]) 33 | return hasher.hexdigest() 34 | 35 | 36 | class _JSONError(exc.HTTPError): 37 | def __init__(self, errors, status_code=400, status_message='error'): 38 | body = {'status': status_message, 'errors': errors} 39 | Response.__init__(self, json.dumps(body)) 40 | self.status = status_code 41 | self.content_type = 'application/json' 42 | 43 | 44 | def json_error(status_code=400, status_message='error', **kw): 45 | errors = Errors(status=status_code) 46 | kw.setdefault('location', 'body') 47 | kw.setdefault('name', '') 48 | kw.setdefault('description', '') 49 | errors.add(**kw) 50 | return _JSONError(errors, status_code, status_message) 51 | 52 | 53 | def find_config_file(*paths): 54 | ini_files = [] 55 | ini_files.append(os.environ.get('TOKEN_INI')) 56 | ini_files.extend(paths) 57 | ini_files.extend(( 58 | '/data/tokenserver/token-prod.ini', 59 | '/etc/mozilla-services/token/production.ini', 60 | )) 61 | for ini_file in ini_files: 62 | if ini_file is not None: 63 | ini_file = os.path.abspath(ini_file) 64 | if os.path.exists(ini_file): 65 | return ini_file 66 | raise RuntimeError("Could not locate tokenserver ini file") 67 | 68 | 69 | def get_timestamp(): 70 | """Get current timestamp in milliseconds.""" 71 | return int(time.time() * 1000) 72 | 73 | 74 | def parse_key_id(kid): 75 | """Parse an FxA key ID into its constituent timestamp and key hash.""" 76 | keys_changed_at, key_hash = kid.split("-", 1) 77 | keys_changed_at = int(keys_changed_at) 78 | key_hash = decode_bytes_b64(key_hash) 79 | return (keys_changed_at, key_hash) 80 | 81 | 82 | def format_key_id(keys_changed_at, key_hash): 83 | """Format an FxA key ID from a timestamp and key hash.""" 84 | return "{:013d}-{}".format( 85 | keys_changed_at, 86 | encode_bytes_b64(key_hash), 87 | ) 88 | -------------------------------------------------------------------------------- /tokenserver/verifiers.py: -------------------------------------------------------------------------------- 1 | import json 2 | import warnings 3 | 4 | from pyramid.threadlocal import get_current_registry 5 | from zope.interface import implements, Interface 6 | from zope.interface.interfaces import ComponentLookupError # noqa; for re-export only 7 | 8 | import socket 9 | import requests 10 | import urlparse 11 | 12 | import browserid.verifiers.local 13 | from browserid.errors import (InvalidSignatureError, ExpiredSignatureError, 14 | ConnectionError, AudienceMismatchError, 15 | InvalidIssuerError) 16 | from browserid.supportdoc import SupportDocumentManager 17 | 18 | import fxa.oauth 19 | import fxa.errors 20 | import fxa.constants 21 | 22 | 23 | DEFAULT_OAUTH_SCOPE = 'https://identity.mozilla.com/apps/oldsync' 24 | 25 | 26 | def get_browserid_verifier(registry=None): 27 | """Returns the registered browserid verifier. 28 | 29 | If no browserid verifier is registered, raises ComponentLookupError. 30 | """ 31 | if registry is None: 32 | registry = get_current_registry() 33 | return registry.getUtility(IBrowserIdVerifier) 34 | 35 | 36 | def get_oauth_verifier(registry=None): 37 | """Returns the registered oauth verifier. 38 | 39 | If no oauth verifier is registered, raises ComponentLookupError. 40 | """ 41 | if registry is None: 42 | registry = get_current_registry() 43 | return registry.getUtility(IOAuthVerifier) 44 | 45 | 46 | # This is to simplify the registering of the implementations using pyramid 47 | # registry. 48 | class IBrowserIdVerifier(Interface): 49 | pass 50 | 51 | 52 | class IOAuthVerifier(Interface): 53 | pass 54 | 55 | 56 | # The default verifier from browserid 57 | class LocalBrowserIdVerifier(browserid.verifiers.local.LocalVerifier): 58 | implements(IBrowserIdVerifier) 59 | 60 | def __init__(self, trusted_issuers=None, allowed_issuers=None, **kwargs): 61 | """LocalVerifier constructor, with the following extra config options: 62 | 63 | :param ssl_certificate: The path to an optional ssl certificate to 64 | use when doing SSL requests with the BrowserID server. 65 | Set to True (the default) to use default certificate authorities. 66 | Set to False to disable SSL verification. 67 | """ 68 | if isinstance(trusted_issuers, basestring): 69 | trusted_issuers = trusted_issuers.split() 70 | self.trusted_issuers = trusted_issuers 71 | if trusted_issuers is not None: 72 | kwargs["trusted_secondaries"] = trusted_issuers 73 | if isinstance(allowed_issuers, basestring): 74 | allowed_issuers = allowed_issuers.split() 75 | self.allowed_issuers = allowed_issuers 76 | if "ssl_certificate" in kwargs: 77 | verify = kwargs["ssl_certificate"] 78 | kwargs.pop("ssl_certificate") 79 | if not verify: 80 | self._emit_warning() 81 | else: 82 | verify = None 83 | kwargs["supportdocs"] = SupportDocumentManager(verify=verify) 84 | # Disable warning about evolving data formats, it's out of date. 85 | kwargs.setdefault("warning", False) 86 | super(LocalBrowserIdVerifier, self).__init__(**kwargs) 87 | 88 | def _emit_warning(): 89 | """Emit a scary warning to discourage unverified SSL access.""" 90 | msg = "browserid.ssl_certificate=False disables server's certificate"\ 91 | "validation and poses a security risk. You should pass the path"\ 92 | "to your self-signed certificate(s) instead. "\ 93 | "For more information on the ssl_certificate parameter, see "\ 94 | "http://docs.python-requests.org/en/latest/user/advanced/"\ 95 | "#ssl-cert-verification" 96 | warnings.warn(msg, RuntimeWarning, stacklevel=2) 97 | 98 | def verify(self, assertion, audience=None): 99 | data = super(LocalBrowserIdVerifier, self).verify(assertion, audience) 100 | if self.allowed_issuers is not None: 101 | issuer = data.get('issuer') 102 | if issuer not in self.allowed_issuers: 103 | raise InvalidIssuerError("Issuer not allowed: %s" % (issuer,)) 104 | return data 105 | 106 | 107 | # A verifier that posts to a remote verifier service. 108 | # The RemoteVerifier implementation from PyBrowserID does its own parsing 109 | # of the assertion, and hasn't been updated for the new BrowserID formats. 110 | # Rather than blocking on that work, we use a simple work-alike that doesn't 111 | # do any local inspection of the assertion. 112 | class RemoteBrowserIdVerifier(object): 113 | implements(IBrowserIdVerifier) 114 | 115 | def __init__(self, audiences=None, trusted_issuers=None, 116 | allowed_issuers=None, verifier_url=None, timeout=None): 117 | # Since we don't parse the assertion locally, we cannot support 118 | # list- or pattern-based audience strings. 119 | if audiences is not None: 120 | assert isinstance(audiences, basestring) 121 | self.audiences = audiences 122 | if isinstance(trusted_issuers, basestring): 123 | trusted_issuers = trusted_issuers.split() 124 | self.trusted_issuers = trusted_issuers 125 | if isinstance(allowed_issuers, basestring): 126 | allowed_issuers = allowed_issuers.split() 127 | self.allowed_issuers = allowed_issuers 128 | if verifier_url is None: 129 | verifier_url = "https://verifier.accounts.firefox.com/v2" 130 | self.verifier_url = verifier_url 131 | if timeout is None: 132 | timeout = 30 133 | self.timeout = timeout 134 | self.session = requests.Session() 135 | self.session.verify = True 136 | 137 | def verify(self, assertion, audience=None): 138 | if audience is None: 139 | audience = self.audiences 140 | 141 | body = {'assertion': assertion, 'audience': audience} 142 | if self.trusted_issuers is not None: 143 | body['trustedIssuers'] = self.trusted_issuers 144 | headers = {'content-type': 'application/json'} 145 | try: 146 | response = self.session.post(self.verifier_url, 147 | data=json.dumps(body), 148 | headers=headers, 149 | timeout=self.timeout) 150 | except (socket.error, requests.RequestException) as e: 151 | msg = "Failed to POST %s. Reason: %s" % (self.verifier_url, str(e)) 152 | raise ConnectionError(msg) 153 | 154 | if response.status_code != 200: 155 | raise ConnectionError('server returned invalid response code') 156 | try: 157 | data = json.loads(response.text) 158 | except ValueError: 159 | raise ConnectionError("server returned invalid response body") 160 | 161 | if data.get('status') != "okay": 162 | reason = data.get('reason', 'unknown error') 163 | if "audience mismatch" in reason: 164 | raise AudienceMismatchError(data.get("audience"), audience) 165 | if "expired" in reason or "issued later than" in reason: 166 | raise ExpiredSignatureError(reason) 167 | raise InvalidSignatureError(reason) 168 | if self.allowed_issuers is not None: 169 | issuer = data.get('issuer') 170 | if issuer not in self.allowed_issuers: 171 | raise InvalidIssuerError("Issuer not allowed: %s" % (issuer,)) 172 | return data 173 | 174 | 175 | class RemoteOAuthVerifier(object): 176 | """A Verifier for FxA OAuth tokens that posts to a verifiaction service. 177 | 178 | This verifier uses the remote FxA OAuth verification service to accept 179 | OAuth tokens, and translates the returned data into something that 180 | approximates the information provided by BrowserID. 181 | 182 | In order to meet tokenserver's expectation that users are identified 183 | with an email address, it combines the FxA uid with the hostname of 184 | the corresponding FxA BrowserID issuer. For non-standard FxA hosting 185 | setups this might require it to dynamically discover the BrowserID 186 | issuer by querying the OAuth verifier's configuration. 187 | """ 188 | implements(IOAuthVerifier) 189 | 190 | def __init__(self, server_url=None, default_issuer=None, timeout=30, 191 | scope=DEFAULT_OAUTH_SCOPE, jwks=None): 192 | if not scope: 193 | raise ValueError('Expected a non-empty "scope" argument') 194 | if jwks is not None: 195 | jwks = json.loads(jwks).get('keys', []) 196 | self._client = fxa.oauth.Client(server_url=server_url, jwks=jwks) 197 | 198 | self._client.timeout = timeout 199 | if default_issuer is None: 200 | # This server_url will have been normalized to end in /v1. 201 | server_url = self._client.server_url 202 | # Try to find the auth-server that matches the given oauth-server. 203 | # For well-known servers this avoids discovering it dynamically. 204 | for urls in fxa.constants.ENVIRONMENT_URLS.itervalues(): 205 | if urls['oauth'] == server_url: 206 | auth_url = urls['authentication'] 207 | default_issuer = urlparse.urlparse(auth_url).netloc 208 | break 209 | else: 210 | try: 211 | # For non-standard hosting setups, look it up dynamically. 212 | r = requests.get(server_url[:-3] + '/config') 213 | r.raise_for_status() 214 | try: 215 | default_issuer = r.json()['browserid']['issuer'] 216 | except KeyError: 217 | pass 218 | except ValueError as e: 219 | # some tests fail because requests returns a ValueError 220 | # "I/O operation on a closed file" 221 | # this is because response fails to read the empty stream. 222 | # treat it as an empty response. 223 | import logging 224 | logging.getLogger().debug(e) 225 | pass 226 | self.default_issuer = default_issuer 227 | self.scope = scope 228 | 229 | @property 230 | def server_url(self): 231 | return self._client.server_url 232 | 233 | @property 234 | def timeout(self): 235 | return self._client.timeout 236 | 237 | def verify(self, token): 238 | try: 239 | userinfo = self._client.verify_token(token, self.scope) 240 | except (socket.error, requests.RequestException) as e: 241 | msg = 'Verification request to %s failed; reason: %s' 242 | msg %= (self.server_url, str(e)) 243 | raise ConnectionError(msg) 244 | issuer = userinfo.get('issuer', self.default_issuer) 245 | if not issuer or not isinstance(issuer, basestring): 246 | msg = 'Could not determine issuer from verifier response' 247 | raise fxa.errors.TrustError(msg) 248 | idpclaims = {} 249 | if userinfo.get('generation') is not None: 250 | idpclaims['fxa-generation'] = userinfo['generation'] 251 | return { 252 | 'email': userinfo['user'] + '@' + issuer, 253 | 'idpClaims': idpclaims, 254 | } 255 | 256 | 257 | # For backwards-compatibility with self-hosting setups 258 | # which might be referencing these via their old names. 259 | LocalVerifier = LocalBrowserIdVerifier 260 | RemoteVerifier = RemoteBrowserIdVerifier 261 | --------------------------------------------------------------------------------