├── .circleci └── config.yml ├── .flake8 ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── LICENSE ├── Pipfile ├── README.md ├── containers ├── Dockerfile ├── README.md ├── build-local.sh ├── crl-storage-claim.yaml ├── crlite-config.properties.example ├── crlite-fetch │ └── pod.yaml ├── crlite-generate │ └── pod.yaml ├── crlite-publish-config.properties.example ├── crlite-publish │ └── pod.yaml ├── crlite-signoff-config.properties.example ├── crlite-signoff │ └── pod.yaml └── scripts │ ├── crlite-fetch.sh │ ├── crlite-generate.sh │ ├── crlite-publish.sh │ ├── crlite-signoff-tool.py │ └── crlite-signoff.sh ├── docs ├── .DS_Store ├── figure1-information_flow.png ├── figure2-filter_process.png ├── figure3-filter_structure.png └── figure4-certificate_identifier.png ├── go.mod ├── go ├── cmd │ ├── aggregate-crls │ │ ├── aggregate-crls.go │ │ ├── aggregate-crls_test.go │ │ ├── crl-auditor.go │ │ └── crl-auditor_test.go │ ├── aggregate-known │ │ └── aggregate-known.go │ ├── ct-fetch │ │ └── ct-fetch.go │ └── get-mozilla-issuers │ │ └── get-mozilla-issuers.go ├── config │ ├── config.go │ └── config_test.go ├── downloader │ ├── download-auditor.go │ ├── download-tracer.go │ ├── download-tracer_test.go │ ├── downloader.go │ ├── downloader_test.go │ ├── verifying-downloader.go │ └── verifying-downloader_test.go ├── engine │ └── engine.go ├── go.mod ├── go.sum ├── rootprogram │ ├── issuers.go │ └── issuers_test.go ├── storage │ ├── certdatabase.go │ ├── certdatabase_test.go │ ├── mockcache.go │ ├── rediscache.go │ ├── rediscache_test.go │ ├── serialcachewriter.go │ ├── types.go │ └── types_test.go ├── telemetry │ └── telemetry.go ├── types.go └── types_test.go ├── moz_kinto_publisher ├── example_enrolled.json ├── main.py ├── settings.py └── test_publisher.py ├── pytest.ini ├── rust-create-cascade ├── Cargo.toml └── src │ ├── cascade_helper.rs │ ├── clubcard_helper.rs │ └── main.rs ├── rust-query-crlite ├── Cargo.toml └── src │ └── main.rs ├── setup.py ├── setup └── list_all_active_ct_logs ├── test-via-docker.sh ├── version.json └── workflow ├── 0-allocate_identifier ├── 0-set_credentials.inc ├── 1-upload_data_to_storage ├── 2-generate_mlbf ├── 3-upload_mlbf_to_storage └── __init__.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | python: circleci/python@1.3.4 5 | docker: circleci/docker@2.5.0 6 | 7 | commands: 8 | build-and-publish-image: 9 | description: Build and publish a single image 10 | 11 | parameters: 12 | tag: 13 | default: ${CIRCLE_SHA1} 14 | type: string 15 | path: 16 | default: containers 17 | type: string 18 | 19 | steps: 20 | - docker/check 21 | - run: 22 | name: Generate version.json 23 | command: | 24 | # create a version.json per https://github.com/mozilla-services/Dockerflow/blob/master/docs/version_object.md 25 | printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' \ 26 | "$CIRCLE_SHA1" \ 27 | "$CIRCLE_TAG" \ 28 | "$CIRCLE_PROJECT_USERNAME" \ 29 | "$CIRCLE_PROJECT_REPONAME" \ 30 | "$CIRCLE_BUILD_URL" > version.json 31 | - docker/build: 32 | image: ${DOCKER_IMAGE} 33 | tag: <> 34 | dockerfile: <>/Dockerfile 35 | step-name: build crlite container 36 | - docker/push: 37 | image: ${DOCKER_IMAGE} 38 | tag: <> 39 | step-name: push crlite container 40 | 41 | jobs: 42 | python-build-and-test: 43 | executor: python/default 44 | steps: 45 | - checkout 46 | - run: pip install pytest 47 | 48 | - run: 49 | name: Install Package 50 | command: | 51 | pip install --editable . 52 | 53 | - run: 54 | name: Run Tests 55 | command: | 56 | pip install pytest 57 | mkdir test-results 58 | python -m pytest --junitxml=test-results/junit.xml 59 | 60 | - store_test_results: 61 | path: test-results 62 | - store_artifacts: 63 | path: test-results 64 | 65 | - run: 66 | name: Check format with Black 67 | command: | 68 | pip install "black==22.3.0" 69 | python -m black --check . 70 | 71 | golang-build-and-test: 72 | docker: 73 | # specify the version 74 | - image: cimg/go:1.23 75 | auth: 76 | username: ${DOCKER_LOGIN} 77 | password: ${DOCKER_PASSWORD} 78 | 79 | 80 | working_directory: /home/circleci/src/github.com/mozilla.com/crlite 81 | steps: 82 | - checkout 83 | 84 | # specify any bash command here prefixed with `run: ` 85 | - run: 86 | name: Download and build 87 | command: go get -v -t -d ./... 88 | working_directory: /home/circleci/src/github.com/mozilla.com/crlite/go 89 | - run: 90 | name: gofmt 91 | command: > 92 | if [ -n "$(gofmt -l .)" ]; then 93 | echo "Go code is not formatted:"; gofmt -d .; exit 1; 94 | fi 95 | 96 | working_directory: /home/circleci/src/github.com/mozilla.com/crlite/go 97 | - run: 98 | name: Test 99 | command: go test -v ./... 100 | working_directory: /home/circleci/src/github.com/mozilla.com/crlite/go 101 | 102 | - run: 103 | name: Test with race-finder 104 | command: go test -race -short ./... 105 | working_directory: /home/circleci/src/github.com/mozilla.com/crlite/go 106 | 107 | rust-create-cascade-build-and-test: 108 | docker: 109 | - image: cimg/rust:1.82.0 110 | environment: 111 | RUSTFLAGS: '-D warnings' 112 | working_directory: ~/crlite/rust-create-cascade 113 | steps: 114 | - checkout: 115 | path: ~/crlite/ 116 | - run: 117 | name: Version information 118 | command: rustc --version; cargo --version; rustup --version 119 | - run: 120 | name: rustfmt 121 | command: rustfmt --check src/*.rs 122 | - run: 123 | name: Run Tests 124 | command: cargo test -r 125 | 126 | publish-dev-image: 127 | executor: docker/docker 128 | steps: 129 | - setup_remote_docker 130 | - checkout 131 | - build-and-publish-image: 132 | tag: ${CIRCLE_SHA1} 133 | 134 | publish-tagged-image: 135 | executor: docker/docker 136 | steps: 137 | - setup_remote_docker 138 | - checkout 139 | - build-and-publish-image: 140 | tag: ${CIRCLE_TAG} 141 | 142 | publish-latest-image: 143 | executor: docker/docker 144 | steps: 145 | - setup_remote_docker 146 | - checkout 147 | - build-and-publish-image: 148 | tag: latest 149 | 150 | workflows: 151 | version: 2 152 | 153 | untagged-build: 154 | jobs: 155 | - python-build-and-test 156 | - golang-build-and-test 157 | - rust-create-cascade-build-and-test 158 | - publish-dev-image: 159 | filters: 160 | branches: 161 | only: dev 162 | requires: 163 | - python-build-and-test 164 | - golang-build-and-test 165 | - rust-create-cascade-build-and-test 166 | - publish-latest-image: 167 | filters: 168 | branches: 169 | only: main 170 | requires: 171 | - python-build-and-test 172 | - golang-build-and-test 173 | - rust-create-cascade-build-and-test 174 | 175 | tagged-build: 176 | jobs: 177 | - python-build-and-test: 178 | filters: 179 | branches: 180 | ignore: /.*/ 181 | tags: 182 | only: /^v.*/ 183 | - golang-build-and-test: 184 | filters: 185 | branches: 186 | ignore: /.*/ 187 | tags: 188 | only: /^v.*/ 189 | - rust-create-cascade-build-and-test: 190 | filters: 191 | branches: 192 | ignore: /.*/ 193 | tags: 194 | only: /^v.*/ 195 | - publish-tagged-image: 196 | filters: 197 | branches: 198 | ignore: /.*/ 199 | tags: 200 | only: /^v.*/ 201 | requires: 202 | - python-build-and-test 203 | - golang-build-and-test 204 | - rust-create-cascade-build-and-test 205 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # See http://pep8.readthedocs.io/en/latest/intro.html#configuration 3 | ignore = E121, E123, E126, E129, E133, E203, E226, E241, E242, E704, W503, E402, E741 4 | max-line-length = 99 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | all_CRLs 2 | certs_using_CRL.json 3 | CRL_servers.txt 4 | final_crl_nonrevoked.json 5 | final_crl_revoked.json 6 | mega_CRL.json 7 | moz-crlite-mlbf* 8 | 9 | # Byte-compiled / optimized / DLL files 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | env/ 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *,cover 55 | .hypothesis/ 56 | 57 | # pyenv 58 | .python-version 59 | 60 | # dotenv 61 | .env 62 | 63 | # virtualenv 64 | .venv 65 | venv/ 66 | ENV/ 67 | 68 | # Binaries for programs and plugins 69 | *.exe 70 | *.dll 71 | *.so 72 | *.dylib 73 | 74 | # Test binary, build with `go test -c` 75 | *.test 76 | 77 | # Output of the go coverage tool, specifically when used with LiteIDE 78 | *.out 79 | 80 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 81 | problems 82 | 83 | # vim swap files 84 | .*.swp 85 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: git://github.com/dnephin/pre-commit-golang 3 | rev: HEAD 4 | hooks: 5 | - id: go-fmt 6 | - id: validate-toml 7 | - repo: git://github.com/pre-commit/pre-commit-hooks 8 | rev: HEAD 9 | hooks: 10 | - id: check-ast 11 | - id: detect-private-key 12 | - id: detect-aws-credentials 13 | - id: check-merge-conflict 14 | - id: end-of-file-fixer 15 | - id: requirements-txt-fixer 16 | - id: trailing-whitespace 17 | - repo: https://gitlab.com/pycqa/flake8 18 | rev: HEAD 19 | hooks: 20 | - id: flake8 21 | - repo: local 22 | hooks: 23 | - id: pytest 24 | name: Python Tests 25 | language: system 26 | entry: python3 -m pytest 27 | pass_filenames: false 28 | files: '.py$' 29 | - repo: git://github.com/psf/black 30 | rev: 20.8b1 31 | hooks: 32 | - id: black 33 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Participation Guidelines 2 | 3 | This repository is governed by Mozilla's code of conduct and etiquette guidelines. 4 | For more details, please read the 5 | [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). 6 | 7 | ## How to Report 8 | For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. 9 | 10 | 16 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | bsdiff4 = ">=1.1" 10 | cryptography = ">=2.2" 11 | glog = ">=0.3" 12 | kinto-http = ">=10.9" 13 | python-decouple = ">=3.1" 14 | requests = {extras = ["socks"],version = ">=2.10.0"} 15 | 16 | [requires] 17 | python_version = "3.7" 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://circleci.com/gh/mozilla/crlite.svg?style=shield)](https://circleci.com/gh/mozilla/crlite) 2 | ![Maturity Level: Beta](https://img.shields.io/badge/maturity-beta-blue.svg) 3 | ![Docker Version](https://img.shields.io/docker/v/mozilla/crlite) 4 | 5 | CRLite uses a Bloom filter cascade [and whole-ecosystem analysis of the Web PKI](https://www.certificate-transparency.org/) to push the entire web’s TLS revocation information to Firefox clients, replacing [OCSP](https://en.wikipedia.org/wiki/Online_Certificate_Status_Protocol) for most browser TLS connections, speeding up connection time while continuing to support PKI revocations. The system was [originally proposed at IEEE S&P 2017](http://www.ccs.neu.edu/home/cbw/static/pdf/larisch-oakland17.pdf). 6 | 7 | For details about CRLite, [Mozilla Security Engineering has a blog post series](https://blog.mozilla.org/security/tag/crlite/), and [this repository has a FAQ](https://github.com/mozilla/crlite/wiki#faq). 8 | 9 | There are also useful end-user tools for querying CRLite: [moz_crlite_query](https://github.com/mozilla/moz_crlite_query), to query the current CRLite filter for revocations, and a diagnostic tool [crlite_status](https://github.com/jcjones/crlite_status) to monitor filter generation metrics. 10 | 11 | 12 | ## General Structure 13 | 14 | CRLite is designed to run in Kubernetes, with the following services: 15 | 16 | 1. [`containers/crlite-fetch`](https://github.com/mozilla/crlite/tree/main/containers/crlite-fetch), a constantly-running task that downloads from Certificate Transparency logs. 17 | 1. [`containers/crlite-generate`](https://github.com/mozilla/crlite/tree/main/containers/crlite-generate), a periodic (cron) job that produces a CRLite filter from the data in Redis and uploads the artifacts into Google Cloud Storage 18 | 1. [`containers/crlite-publish`](https://github.com/mozilla/crlite/tree/main/containers/crlite-publish), a periodic (cron) job that publishes the results of a `crlite-generate` run to a Kinto instance. 19 | 1. [`containers/crlite-signoff`](https://github.com/mozilla/crlite/tree/main/containers/crlite-signoff), a periodic (cron) job that verifies and approves data `crlite-publish` placed in a Kinto instance. 20 | 21 | There are scripts in [`containers/`](https://github.com/mozilla/crlite/tree/main/containers) to build Docker images both using Docker, see`build-local.sh`. There are also builds at Docker Hub in the [`mozilla/crlite`](https://hub.docker.com/r/mozilla/crlite) project. 22 | 23 | 24 | ### Storage 25 | Storage consists of these parts: 26 | 27 | 1. Redis, e.g. Google Cloud Memorystore, for ingestion of certificate metadata (serial numbers, expirations, issuers), used in filter generation. 28 | 1. A local disk, for persistent storage of certificate metadata and CRLs. This is defined in [`containers/crl-storage-claim.yaml`](https://github.com/mozilla/crlite/blob/main/containers/crl-storage-claim.yaml). 29 | 1. Google Cloud Storage, for storage of the artifacts when a job is completed. 30 | 31 | 32 | ### Information Flow 33 | 34 | This tooling monitors Certificate Transparency logs and, upon secheduled execution, `crlite-generate` produces a new filter and uploads it to Cloud Storage. 35 | 36 | ![Information flow](docs/figure1-information_flow.png) 37 | 38 | The process for producing a CRLite filter, is run by [`system/crlite-fullrun`](https://github.com/mozilla/crlite/blob/main/system/crlite-fullrun), which is described in block form in this diagram: 39 | 40 | ![Process for building a CRLite Bloom filter](docs/figure2-filter_process.png) 41 | 42 | The output Bloom filter cascade is built by the Python [`mozilla/filter-cascade`](https://github.com/mozilla/filter-cascade) tool and then read in Firefox by the Rust [`mozilla/rust-cascade`](https://github.com/mozilla/rust-cascade) package. 43 | 44 | For complete details of the filter construction see Section III.B of the [CRLite paper](http://www.ccs.neu.edu/home/cbw/static/pdf/larisch-oakland17.pdf). 45 | 46 | ![Structure of the CRLite Bloom filter cascade](docs/figure3-filter_structure.png) 47 | 48 | The keys used into the CRLite data structure consist of the SHA256 digest of the issuer's `Subject Public Key Information` field in DER-encoded form, followed by the the certificate's serial number, unmodified, in DER-encoded form. 49 | 50 | ![Structure of Certificate Identifiers](docs/figure4-certificate_identifier.png) 51 | 52 | 53 | ## Local Installation 54 | 55 | It's possible to run the tools locally, though you will need a local instance of Redis. First, install the tools and their dependencies 56 | 57 | ```sh 58 | go install -u github.com/mozilla/crlite/go/cmd/ct-fetch 59 | go install -u github.com/mozilla/crlite/go/cmd/aggregate-crls 60 | go install -u github.com/mozilla/crlite/go/cmd/aggregate-known 61 | 62 | pipenv install 63 | ``` 64 | 65 | 66 | ### Configuration 67 | 68 | You can configure via environment variables, or via a config file. Environment variables are specified in the [`/containers/*.properties.example`](https://github.com/mozilla/crlite/tree/main/containers) files. To use a configuration file, `~/.ct-fetch.ini` (or any file selected on the CLI using `-config`), construct it as so: 69 | 70 | ``` 71 | certPath = /persistent/certdb/ 72 | numThreads = 16 73 | cacheSize = 128 74 | ``` 75 | 76 | 77 | #### Parameters 78 | 79 | You'll want to set a collection of configuration parameters: 80 | 81 | * `runForever [true/false]` 82 | * `logExpiredEntries [true/false]` 83 | * `numThreads 16` 84 | * `cacheSize [number of cache entries. An individual entry contains an issuer-day's worth of serial numbers, which could be as much as 64 MB of RAM, but is generally closer to 1 MB.]` 85 | * `outputRefreshMs [milliseconds]` 86 | 87 | The log list is all the logs you want to sync, comma separated, as URLs: 88 | * `logList = https://ct.googleapis.com/icarus, https://oak.ct.letsencrypt.org/2021/` 89 | 90 | To get all current ones from 91 | [certificate-transparency.org](https://certificate-transparency.org/): 92 | ``` 93 | echo "logList = $(setup/list_all_active_ct_logs)" >> ~/.ct-fetch.ini 94 | ``` 95 | 96 | If running forever, set the delay on polling for new updates, per log. This will have some jitter added: 97 | * `pollingDelay` [minutes] 98 | 99 | If not running forever, you can give limits or slice up CT log data: 100 | * `limit` [uint] 101 | * `offset` [uint] 102 | 103 | You'll also need to configure credentials used for Google Cloud Storage: 104 | * `GOOGLE_APPLICATION_CREDENTIALS` [base64-encoded string of the service credentials JSON] 105 | 106 | If you need to proxy the connection, perhaps via SSH, set the `HTTPS_PROXY` to something like `socks5://localhost:32547/"` as well. 107 | 108 | 109 | ### General Operation 110 | 111 | [`containers/build-local.sh`](https://github.com/mozilla/crlite/tree/main/containers/build-local.sh) produces the Docker containers locally. 112 | 113 | [`test-via-docker.sh`](https://github.com/mozilla/crlite/tree/main/test-via-docker.sh) executes a complete "run", syncing with CT and producing a filter. It's configured using a series of environment variables. 114 | 115 | ### Starting the Local Dependencies 116 | 117 | Redis can be provided in a variety of ways, easiest is probably the Redis docker distribution. For whatever reason, I have the 118 | best luck remapping ports to make it run on 6379: 119 | ```sh 120 | docker run -p 6379:7000 redis:4 --port 7000 121 | ``` 122 | 123 | 124 | ## Running from a Docker Container 125 | 126 | To construct a container, see [`containers/README.md`](https://github.com/mozilla/crlite/tree/main/containers/README.md). 127 | 128 | The crlite-fetch container runs forever, fetching CT updates: 129 | 130 | ```sh 131 | docker run --rm -it \ 132 | -e "outputRefreshMs=1000" \ 133 | crlite:staging-fetch 134 | ``` 135 | 136 | The crlite-generate container constructs a new filter. To use local disk, set the `persistantStorage` to `/persistent` and mount that volume in Docker. You should also mount the volume `/processing` to get the output files: 137 | 138 | ```sh 139 | docker run --rm -it \ 140 | -e "persistentStorage=/persistent" \ 141 | -e "outputRefreshMs=1000" \ 142 | --mount type=bind,src=/tmp/ctlite_data,dst=/ctdata \ 143 | --mount type=bind,src=/tmp/crlite_results,dst=/processing \ 144 | crlite:staging-generate 145 | ``` 146 | 147 | See the [`test-via-docker.sh`](https://github.com/mozilla/crlite/blob/main/test-via-docker.sh) for an example. 148 | 149 | To run in a remote container, such as a Kubernetes pod, you'll need to make sure to set all the environment variables properly, and the container should otherwise work. See [`containers/crlite-config.properties.example`](https://github.com/mozilla/crlite/blob/main/containers/crlite-config.properties.example) for an example of the Kubernetes environment that can be imported using `kubectl create configmap`, see the `containers` README.md for details. 150 | 151 | 152 | ## Tools 153 | 154 | *`ct-fetch`* 155 | Downloads all CT entry issuer-serial pairs, and associated metadata, to local persistent storage. 156 | 157 | *`aggregate-crls`* 158 | Obtains all CRLs defined in all CT entries' certificates, verifies them, and collates their results 159 | into `*issuer SKI base64*.revoked` files. 160 | 161 | *`aggregate-known`* 162 | Collates all CT entries' unexpired certificates into `*issuer SKI base64*.known` files. 163 | 164 | 165 | 166 | ## Credits 167 | 168 | * The CRLite research team: James Larsich, David Choffnes, Dave Levin, Bruce M. Maggs, Alan Mislove, and Christo Wilson. 169 | * Benton Case for [certificate-revocation-analysis](https://github.com/casebenton/certificate-revocation-analysis), which kicked off this effort. 170 | * Mark Goodwin for the original Python [`filter_cascade`](https://gist.githubusercontent.com/mozmark/c48275e9c07ccca3f8b530b88de6ecde/raw/19152f7f10925379420aa7721319a483273d867d/sample.py) and the [`filter-cascade`](https://github.com/mozilla/filter-cascade) project. 171 | * Dana Keeler and Mark Goodwin together for the Rust [`rust-cascade`](https://github.com/mozilla/rust-cascade). 172 | -------------------------------------------------------------------------------- /containers/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.18.1-bullseye as go-builder 2 | RUN mkdir /build 3 | ADD go /build/ 4 | WORKDIR /build 5 | RUN mkdir bin gopath 6 | ENV GOPATH /build/gopath 7 | RUN go build -o bin/aggregate-crls /build/cmd/aggregate-crls 8 | RUN go build -o bin/aggregate-known /build/cmd/aggregate-known 9 | RUN go build -o bin/ct-fetch /build/cmd/ct-fetch 10 | 11 | FROM rust:latest as rust-builder 12 | RUN mkdir /build 13 | 14 | ADD rust-create-cascade /build/rust-create-cascade/ 15 | WORKDIR /build/rust-create-cascade 16 | RUN cargo build --release --target-dir /build 17 | 18 | ADD rust-query-crlite /build/rust-query-crlite/ 19 | WORKDIR /build/rust-query-crlite 20 | RUN cargo build --release --target-dir /build 21 | 22 | 23 | FROM python:3.12.2-slim-bookworm 24 | RUN apt update \ 25 | && apt install -y ca-certificates \ 26 | && apt -y upgrade \ 27 | && apt-get autoremove --purge -y \ 28 | && apt-get -y clean \ 29 | && rm -rf /var/lib/apt/lists/* 30 | 31 | RUN adduser --system --uid 10001 --group --home /app app 32 | 33 | ENV crlite_log /var/log 34 | ENV crlite_processing /processing 35 | ENV crlite_persistent /persistent 36 | ENV crlite_workflow /app/workflow 37 | ENV crlite_bin /app 38 | 39 | ENV certPath /persistent/certdb/ 40 | 41 | RUN mkdir /processing && chown app /processing && chmod 777 /processing 42 | VOLUME /var/log /processing /persistent 43 | 44 | COPY --from=go-builder /build/bin /app/ 45 | COPY --from=rust-builder /build/release/rust-create-cascade /app/ 46 | COPY --from=rust-builder /build/release/rust-query-crlite /app/ 47 | 48 | COPY moz_kinto_publisher /app/moz_kinto_publisher 49 | COPY workflow /app/workflow 50 | COPY containers/scripts /app/scripts 51 | COPY setup.py version.json /app/ 52 | 53 | RUN pip3 install /app/ 54 | 55 | ENV TINI_VERSION v0.19.0 56 | ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini 57 | RUN chmod +x /tini 58 | ENTRYPOINT ["/tini", "-g", "--"] 59 | 60 | USER app 61 | WORKDIR /app 62 | 63 | # For crlite-fetch 64 | ENV runForever true 65 | ENV logExpiredEntries false 66 | 67 | EXPOSE 8080/tcp 68 | 69 | # For crlite-generate 70 | ENV numThreads 16 71 | ENV cacheSize 64 72 | -------------------------------------------------------------------------------- /containers/README.md: -------------------------------------------------------------------------------- 1 | # Local 2 | 3 | See `./build-local.sh` 4 | 5 | Basic build: 6 | ``` 7 | docker build -t crlite:staging .. -f Dockerfile 8 | ``` 9 | 10 | To run the tools, you'll need Redis 4+: 11 | 12 | ``` 13 | docker run --rm -it -p 6379:7000 \ 14 | redis:4 --port 7000 15 | ``` 16 | 17 | Then you can execute the docker container, setting any environment vars you want: 18 | 19 | ``` 20 | docker run --rm -it \ 21 | -e "redisHost=10.0.0.115:6379" \ 22 | -e "outputRefreshMs=1000" \ 23 | crlite-fetch:staging 24 | ``` 25 | 26 | See the Running section for more environment variables. 27 | 28 | 29 | # Deploying 30 | 31 | ## Set up configuration 32 | customize `crlite-config.properties.example` to `crlite-config.properties` and the same for `-publish` and `-signoff` 33 | 34 | ``` 35 | kubectl delete configmap crlite-config && \ 36 | kubectl create configmap crlite-config \ 37 | --from-env-file=crlite-config.properties 38 | kubectl delete configmap crlite-publish-config && \ 39 | kubectl create configmap crlite-publish-config \ 40 | --from-env-file=crlite-publish-config.properties 41 | kubectl delete configmap crlite-signoff-config && \ 42 | kubectl create configmap crlite-signoff-config \ 43 | --from-env-file=crlite-signoff-config.properties 44 | ``` 45 | 46 | ## Create CRL storage 47 | ``` 48 | kubectl create -f crl-storage-claim.yaml 49 | ``` 50 | 51 | ## Always-on Deployment 52 | `kubectl create -f crlite-fetch` 53 | 54 | ## Periodic jobs 55 | 56 | ### Filter generation 57 | `kubectl create -f crlite-generate` 58 | 59 | ### Filter publication 60 | `kubectl create -f crlite-publish` 61 | `kubectl create -f crlite-signoff` 62 | -------------------------------------------------------------------------------- /containers/build-local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | VER=staging 4 | CT_MAPREDUCE_VER=v1.0.10 5 | 6 | cd $(dirname ${0}) 7 | docker build -t crlite:${VER} .. -f Dockerfile --build-arg ct_mapreduce_ver=${CT_MAPREDUCE_VER} 8 | -------------------------------------------------------------------------------- /containers/crl-storage-claim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: crlite-crls 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 30Gi 11 | -------------------------------------------------------------------------------- /containers/crlite-config.properties.example: -------------------------------------------------------------------------------- 1 | # Be sure not to use quotes for strings in this file 2 | 3 | # Generate with the script setup/list_all_active_ct_logs 4 | logList=https://ct.googleapis.com/logs/argon2019/, https://ct.googleapis.com/logs/argon2020/, https://ct.googleapis.com/logs/argon2021/, https://ct.googleapis.com/logs/argon2022/, https://ct.googleapis.com/logs/argon2023/ 5 | 6 | # This is a service account JSON encoded as base64. For example: 7 | # cat ~/gcp-credentials.json | base64 | pbcopy 8 | credentials_data=someBase64Data 9 | 10 | # Host for Redis in : format 11 | redisHost=127.0.0.1:6379 12 | redisTimeout=3s 13 | 14 | numThreads=16 15 | runForever=true 16 | outputRefreshPeriod=90s 17 | statsRefreshPeriod=5m 18 | pollingDelayMean=60m 19 | pollingDelayStdDev=10 20 | 21 | # The save period needs to be coordinated with the ct-fetch pod liveness probe, 22 | # as liveness health information won't be available until the first save. 23 | # The actual save period is this + a few seconds of jitter calculated in ct-fetch 24 | savePeriod=30s 25 | # The health address should be a [address]:port string, where address defaults to * 26 | healthAddr=:8080 27 | 28 | # The Google Cloud Storage bucket for artifact storage 29 | crlite_filter_bucket=crlite_filters_staging 30 | 31 | # Set if you want to provide StatsD metrics 32 | # statsdHost=localhost 33 | # statsdPort=8125 34 | -------------------------------------------------------------------------------- /containers/crlite-fetch/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | generation: 1 5 | labels: 6 | app: crlite-fetch 7 | name: crlite-fetch 8 | namespace: default 9 | spec: 10 | progressDeadlineSeconds: 600 11 | replicas: 1 12 | revisionHistoryLimit: 10 13 | selector: 14 | matchLabels: 15 | app: crlite-fetch 16 | strategy: 17 | rollingUpdate: 18 | maxSurge: 100% 19 | maxUnavailable: 100% 20 | type: RollingUpdate 21 | template: 22 | metadata: 23 | creationTimestamp: null 24 | labels: 25 | app: crlite-fetch 26 | spec: 27 | containers: 28 | - name: crlite-fetch 29 | image: docker.io/mozilla/crlite:latest 30 | command: [ "/bin/bash", "-xc", "/app/scripts/crlite-fetch.sh" ] 31 | envFrom: 32 | - configMapRef: 33 | name: crlite-config 34 | imagePullPolicy: Always 35 | ports: 36 | - name: liveness-port 37 | containerPort: 8080 38 | hostPort: 8080 39 | livenessProbe: 40 | httpGet: 41 | path: /health 42 | port: liveness-port 43 | initialDelaySeconds: 60 44 | periodSeconds: 60 45 | resources: 46 | requests: 47 | cpu: 1 48 | memory: 2Gi 49 | terminationMessagePath: /dev/termination-log 50 | terminationMessagePolicy: FallbackToLogsOnError 51 | dnsPolicy: ClusterFirst 52 | restartPolicy: Always 53 | schedulerName: default-scheduler 54 | securityContext: {} 55 | terminationGracePeriodSeconds: 120 56 | status: 57 | availableReplicas: 1 58 | readyReplicas: 1 59 | replicas: 1 60 | updatedReplicas: 1 61 | -------------------------------------------------------------------------------- /containers/crlite-generate/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: null 5 | name: crlite-generate 6 | spec: 7 | concurrencyPolicy: Forbid 8 | failedJobsHistoryLimit: 1 9 | jobTemplate: 10 | metadata: 11 | creationTimestamp: null 12 | name: crlite-generate 13 | spec: 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | spec: 18 | initContainers: 19 | - name: init 20 | image: busybox:latest 21 | command: ["/bin/chmod", "777", "/persistent"] 22 | volumeMounts: 23 | - mountPath: "/persistent" 24 | name: crlite-crls 25 | containers: 26 | - name: crlite-generate 27 | envFrom: 28 | - configMapRef: 29 | name: crlite-config 30 | image: docker.io/mozilla/crlite:latest 31 | command: [ "/bin/bash", "-xc", "/app/scripts/crlite-generate.sh" ] 32 | imagePullPolicy: Always 33 | resources: 34 | requests: 35 | memory: 14Gi 36 | cpu: 1.0m 37 | terminationMessagePath: /dev/termination-log 38 | terminationMessagePolicy: FallbackToLogsOnError 39 | volumeMounts: 40 | - mountPath: "/persistent" 41 | name: crlite-crls 42 | volumes: 43 | - name: crlite-crls 44 | persistentVolumeClaim: 45 | claimName: crlite-crls 46 | dnsPolicy: ClusterFirst 47 | restartPolicy: Never 48 | backoffLimit: 0 49 | schedulerName: default-scheduler 50 | securityContext: {} 51 | terminationGracePeriodSeconds: 30 52 | schedule: 0 */6 * * * 53 | successfulJobsHistoryLimit: 3 54 | suspend: false 55 | status: {} 56 | -------------------------------------------------------------------------------- /containers/crlite-publish-config.properties.example: -------------------------------------------------------------------------------- 1 | # Be sure not to use quotes for strings in this file 2 | 3 | # Kinto information. RO and RW can be the same server, but don't have to be. 4 | KINTO_RO_SERVER_URL=https://firefox.settings.services.allizom.org/v1/ 5 | KINTO_RW_SERVER_URL=https://remote-settings.allizom.org/v1/ 6 | KINTO_AUTH_USER=kinto_publisher_example_user 7 | KINTO_AUTH_PASSWORD=kinto_publisher_example_password 8 | 9 | 10 | # If the variable KINTO_NOOP is set to anything at all, then the signoff will 11 | # operate as a no-op and simply print what it's doing. 12 | # KINTO_NOOP=dont_publish_if_this_is_set_to_anything 13 | -------------------------------------------------------------------------------- /containers/crlite-publish/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: null 5 | name: crlite-publish 6 | spec: 7 | concurrencyPolicy: Forbid 8 | failedJobsHistoryLimit: 1 9 | jobTemplate: 10 | metadata: 11 | creationTimestamp: null 12 | name: crlite-publish 13 | spec: 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | spec: 18 | containers: 19 | - name: crlite-publish 20 | envFrom: 21 | - configMapRef: 22 | name: crlite-config 23 | - configMapRef: 24 | name: crlite-publish-config 25 | image: docker.io/mozilla/crlite:latest 26 | command: [ "/bin/bash", "-xc", "/app/scripts/crlite-publish.sh" ] 27 | imagePullPolicy: Always 28 | terminationMessagePath: /dev/termination-log 29 | terminationMessagePolicy: FallbackToLogsOnError 30 | dnsPolicy: ClusterFirst 31 | restartPolicy: Never 32 | schedulerName: default-scheduler 33 | securityContext: {} 34 | terminationGracePeriodSeconds: 30 35 | schedule: 0 * * * * 36 | successfulJobsHistoryLimit: 3 37 | suspend: false 38 | status: {} 39 | -------------------------------------------------------------------------------- /containers/crlite-signoff-config.properties.example: -------------------------------------------------------------------------------- 1 | # Be sure not to use quotes for strings in this file 2 | 3 | # Kinto information. RO and RW can be the same server, but don't have to be. 4 | KINTO_RO_SERVER_URL=https://firefox.settings.services.allizom.org/v1/ 5 | KINTO_RW_SERVER_URL=https://remote-settings.allizom.org/v1/ 6 | KINTO_AUTH_USER=kinto_signer_example_user 7 | KINTO_AUTH_PASSWORD=kinto_signer_example_password 8 | 9 | # If the variable KINTO_NOOP is set to anything at all, then the signoff will 10 | # operate as a no-op and simply print what it's doing. 11 | # KINTO_NOOP=dont_publish_if_this_is_set_to_anything 12 | 13 | 14 | # Hosts to verify as being unrevoked in the filter in the `crlite_filter_bucket` 15 | # config. The pod will connect to each, obtain the certificate in-use, and 16 | # evaluate the CRLite filter to ensure it's not revoked. 17 | # 18 | # Syntax: comma delimited list of accessible urls, containing lines of hosts as 19 | # host[:port] 20 | # and prefixing lines with # or ; to indicate comments. E.g.: 21 | # 22 | # example.com:8443 23 | # ; also the following, port 443 assumed 24 | # example.net 25 | # 26 | crlite_verify_host_file_urls=https://storage.googleapis.com/crlite-verification-domains/mozilla-services-domains.txt, https://storage.googleapis.com/crlite-verification-domains/moz-top500.txt 27 | -------------------------------------------------------------------------------- /containers/crlite-signoff/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: null 5 | name: crlite-signoff 6 | spec: 7 | concurrencyPolicy: Forbid 8 | failedJobsHistoryLimit: 1 9 | jobTemplate: 10 | metadata: 11 | creationTimestamp: null 12 | name: crlite-signoff 13 | spec: 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | spec: 18 | containers: 19 | - name: crlite-signoff 20 | envFrom: 21 | - configMapRef: 22 | name: crlite-config 23 | - configMapRef: 24 | name: crlite-signoff-config 25 | image: docker.io/mozilla/crlite:latest 26 | command: [ "/bin/bash", "-xc", "/app/scripts/crlite-signoff.sh" ] 27 | imagePullPolicy: Always 28 | terminationMessagePath: /dev/termination-log 29 | terminationMessagePolicy: FallbackToLogsOnError 30 | dnsPolicy: ClusterFirst 31 | restartPolicy: Never 32 | schedulerName: default-scheduler 33 | securityContext: {} 34 | terminationGracePeriodSeconds: 30 35 | schedule: 0 * * * * 36 | successfulJobsHistoryLimit: 3 37 | suspend: false 38 | status: {} 39 | -------------------------------------------------------------------------------- /containers/scripts/crlite-fetch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ${crlite_bin:-~/go/bin}/ct-fetch -logtostderr -stderrthreshold=INFO 4 | 5 | exit 0 6 | -------------------------------------------------------------------------------- /containers/scripts/crlite-generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | workflow=${crlite_workflow:-~/go/src/github.com/mozilla/crlite/workflow} 4 | 5 | source ${workflow}/0-set_credentials.inc 6 | 7 | ID=$(${workflow}/0-allocate_identifier \ 8 | --path ${crlite_processing:-/ct/processing/} \ 9 | --filter-bucket ${crlite_filter_bucket:-crlite_filters_staging}) 10 | 11 | echo "Allocated ${ID}" 12 | 13 | ulimit -a 14 | 15 | if [ ! -d ${ID}/log ] ; then 16 | mkdir ${ID}/log 17 | fi 18 | 19 | CRLS=${crlite_persistent:-/ct}/crls 20 | ${crlite_bin:-~/go/bin}/aggregate-crls -crlpath ${CRLS} \ 21 | -revokedpath ${ID}/revoked \ 22 | -enrolledpath ${ID}/enrolled.json \ 23 | -auditpath ${ID}/crl-audit.json \ 24 | -ccadb ${crlite_persistent:-/ct}/ccadb-intermediates.csv \ 25 | -stderrthreshold=INFO -alsologtostderr \ 26 | -log_dir ${ID}/log 27 | 28 | ${crlite_bin:-~/go/bin}/aggregate-known -knownpath ${ID}/known \ 29 | -enrolledpath ${ID}/enrolled.json \ 30 | -ctlogspath ${ID}/ct-logs.json \ 31 | -stderrthreshold=INFO -alsologtostderr \ 32 | -log_dir ${WORKDIR}/log 33 | 34 | # Mark the known and revoked directories as read-only. 35 | # rust-create-cascade assumes they will not change during its execution. 36 | chmod -R a-w "${ID}/known" 37 | chmod -R a-w "${ID}/revoked" 38 | 39 | echo "crlite-fullrun: list known folder" 40 | ls -latS ${ID}/known | head 41 | echo "crlite-fullrun: list revoked folder" 42 | ls -latS ${ID}/revoked | head 43 | echo "crlite-fullrun: disk usage" 44 | du -hc ${ID} 45 | 46 | if [ "x${DoNotUpload}x" == "xx" ] ; then 47 | echo "archiving crls" 48 | tar -czf ${ID}/crls.tar.gz -C ${CRLS} . 49 | echo "uploading source materials" 50 | ${workflow}/1-upload_data_to_storage ${ID} \ 51 | --filter-bucket ${crlite_filter_bucket:-crlite_filters_staging} 52 | fi 53 | 54 | ${workflow}/2-generate_mlbf ${ID} \ 55 | --filter-bucket ${crlite_filter_bucket:-crlite_filters_staging} \ 56 | --statsd-host ${statsdHost} \ 57 | --reason-set all \ 58 | --filter-type clubcard 59 | 60 | ${workflow}/2-generate_mlbf ${ID} \ 61 | --filter-bucket ${crlite_filter_bucket:-crlite_filters_staging} \ 62 | --statsd-host ${statsdHost} \ 63 | --reason-set priority \ 64 | --filter-type clubcard 65 | 66 | if [ "x${DoNotUpload}x" == "xx" ] ; then 67 | echo "uploading mlbf" 68 | ${workflow}/3-upload_mlbf_to_storage ${ID} \ 69 | --filter-bucket ${crlite_filter_bucket:-crlite_filters_staging} 70 | fi 71 | 72 | echo "crlite_processing" 73 | df ${crlite_processing:-/ct/processing} 74 | echo "crlite_persistent" 75 | df ${crlite_persistent:-/ct/} 76 | 77 | exit 0 78 | -------------------------------------------------------------------------------- /containers/scripts/crlite-publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | workflow=${crlite_workflow:-~/go/src/github.com/mozilla/crlite/workflow} 4 | 5 | source ${workflow}/0-set_credentials.inc 6 | 7 | if [ "x${DoNotUpload}x" != "xx" ] || [ "x${KINTO_NOOP}x" != "xx" ] ; then 8 | ARGS="--noop" 9 | echo "Setting argument ${ARGS}" 10 | fi 11 | 12 | mkdir /tmp/crlite 13 | 14 | moz_kinto_publisher/main.py \ 15 | --filter-bucket ${crlite_filter_bucket:-crlite_filters_staging} \ 16 | --download-path /tmp/crlite ${ARGS} 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /containers/scripts/crlite-signoff-tool.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import hashlib 5 | import subprocess 6 | import sys 7 | import tempfile 8 | from pathlib import Path 9 | 10 | import requests 11 | import glog as log 12 | 13 | from decouple import config 14 | 15 | from kinto_http import Client 16 | from kinto_http.patch_type import BasicPatch 17 | from kinto_http.exceptions import KintoException 18 | 19 | KINTO_RW_SERVER_URL = config( 20 | "KINTO_RW_SERVER_URL", default="https://remote-settings.allizom.org/v1/" 21 | ) 22 | KINTO_AUTH_USER = config("KINTO_AUTH_USER", default="") 23 | KINTO_AUTH_PASSWORD = config("KINTO_AUTH_PASSWORD", default="") 24 | KINTO_BUCKET = config("KINTO_BUCKET", default="security-state-staging") 25 | KINTO_CRLITE_COLLECTION = config("KINTO_CRLITE_COLLECTION", default="cert-revocations") 26 | KINTO_INTERMEDIATES_COLLECTION = config( 27 | "KINTO_INTERMEDIATES_COLLECTION", default="intermediates" 28 | ) 29 | KINTO_NOOP = config("KINTO_NOOP", default=False, cast=lambda x: bool(x)) 30 | 31 | 32 | class SignoffClient(Client): 33 | def sign_collection(self, *, collection=None): 34 | try: 35 | resp = self.get_collection(id=collection) 36 | except KintoException as e: 37 | log.error(f"Couldn't determine {collection} review status: {e}") 38 | raise e 39 | 40 | original = resp.get("data") 41 | if original is None: 42 | raise KintoException("Malformed response from Kinto") 43 | 44 | status = original.get("status") 45 | if status is None: 46 | raise KintoException("Malformed response from Kinto") 47 | 48 | if status != "to-review": 49 | log.info("Collection is not marked for review. Skipping.") 50 | return 51 | 52 | try: 53 | resp = self.patch_collection( 54 | original=original, changes=BasicPatch({"status": "to-sign"}) 55 | ) 56 | except KintoException as e: 57 | log.error(f"Couldn't sign {collection}") 58 | raise e 59 | 60 | 61 | if __name__ == "__main__": 62 | OK = 0 63 | ERROR = 1 64 | 65 | parser = argparse.ArgumentParser() 66 | parser.add_argument( 67 | "collection", 68 | help="Collection to sign, either 'cert-revocations' or 'intermediates'", 69 | ) 70 | parser.add_argument( 71 | "--noop", default=KINTO_NOOP, action="store_true", help="Don't update Kinto" 72 | ) 73 | args = parser.parse_args() 74 | 75 | if args.collection == "cert-revocations": 76 | collection = KINTO_CRLITE_COLLECTION 77 | elif args.collection == "intermediates": 78 | collection = KINTO_INTERMEDIATES_COLLECTION 79 | else: 80 | log.error(f"Unknown collection {args.collection}") 81 | sys.exit(ERROR) 82 | 83 | if args.noop: 84 | log.info(f"Would sign off on {collection}, but noop requested") 85 | sys.exit(OK) 86 | 87 | auth = requests.auth.HTTPBasicAuth(KINTO_AUTH_USER, KINTO_AUTH_PASSWORD) 88 | rw_client = SignoffClient( 89 | server_url=KINTO_RW_SERVER_URL, 90 | auth=auth, 91 | bucket=KINTO_BUCKET, 92 | retry=5, 93 | ) 94 | 95 | try: 96 | rw_client.sign_collection(collection=collection) 97 | except KintoException as e: 98 | log.error(f"Kinto exception: {e}") 99 | sys.exit(ERROR) 100 | 101 | sys.exit(OK) 102 | -------------------------------------------------------------------------------- /containers/scripts/crlite-signoff.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | WORKFLOW="${crlite_workflow:-/app/workflow}" 4 | BIN="${crlite_bin:-/app}" 5 | SCRIPTS="${BIN}/scripts" 6 | OUTPUT="${crlite_processing:-/processing}/crlite_db" 7 | RUST_QUERY_CRLITE="${BIN}/rust-query-crlite" 8 | MAX_FILTER_SIZE=${max_filter_size:-10485760} 9 | 10 | source "${WORKFLOW}/0-set_credentials.inc" 11 | 12 | if [[ "${crlite_filter_bucket}" == "crlite-filters-prod" ]]; then 13 | INSTANCE="prod" 14 | elif [[ "${crlite_filter_bucket}" == "crlite-filters-stage" ]]; then 15 | INSTANCE="stage" 16 | elif [[ "${crlite_filter_bucket}" == "crlite-filters-dev" ]]; then 17 | INSTANCE="dev" 18 | else 19 | echo "Cannot map ${crlite_filter_bucket} to known instance" 20 | exit 1; 21 | fi 22 | 23 | # sign off on intermediates 24 | python3 "${SCRIPTS}/crlite-signoff-tool.py" intermediates 25 | 26 | # sign off on cert-revocations if the verification domains test passes and the 27 | # filter is less than MAX_FILTER_SIZE. 28 | FILTER_SIZE=$(stat --format=%s "${OUTPUT}/crlite.filter") 29 | echo "Filter is ${FILTER_SIZE} bytes." 30 | 31 | if (( FILTER_SIZE > MAX_FILTER_SIZE )) 32 | then 33 | echo "Cannot automatically sign off on a filter larger than max_filter_size=${MAX_FILTER_SIZE} bytes." 34 | exit 1; 35 | fi 36 | 37 | if ! "${RUST_QUERY_CRLITE}" -vvv --db "${OUTPUT}" --update "${INSTANCE}" signoff "${crlite_verify_host_file_urls}" 38 | then 39 | echo "Verification domains test failed" 40 | exit 1; 41 | fi 42 | 43 | echo "Running signoff tool" 44 | python3 "${SCRIPTS}/crlite-signoff-tool.py" cert-revocations 45 | -------------------------------------------------------------------------------- /docs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/crlite/c4bacb508d696844ab4d527b71b892c86f267724/docs/.DS_Store -------------------------------------------------------------------------------- /docs/figure1-information_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/crlite/c4bacb508d696844ab4d527b71b892c86f267724/docs/figure1-information_flow.png -------------------------------------------------------------------------------- /docs/figure2-filter_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/crlite/c4bacb508d696844ab4d527b71b892c86f267724/docs/figure2-filter_process.png -------------------------------------------------------------------------------- /docs/figure3-filter_structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/crlite/c4bacb508d696844ab4d527b71b892c86f267724/docs/figure3-filter_structure.png -------------------------------------------------------------------------------- /docs/figure4-certificate_identifier.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/crlite/c4bacb508d696844ab4d527b71b892c86f267724/docs/figure4-certificate_identifier.png -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mozilla/crlite 2 | 3 | go 1.12 4 | -------------------------------------------------------------------------------- /go/cmd/aggregate-crls/crl-auditor.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/url" 9 | "sync" 10 | "time" 11 | 12 | "github.com/golang/glog" 13 | "github.com/mozilla/crlite/go" 14 | "github.com/mozilla/crlite/go/downloader" 15 | "github.com/mozilla/crlite/go/rootprogram" 16 | ) 17 | 18 | var ( 19 | AuditKindFailedDownload CrlAuditEntryKind = "Failed Download" 20 | AuditKindFailedProcessLocal CrlAuditEntryKind = "Failed Process Local" 21 | AuditKindFailedVerify CrlAuditEntryKind = "Failed Verify" 22 | AuditKindOlderThanLast CrlAuditEntryKind = "Older Than Previous" 23 | AuditKindNoRevocations CrlAuditEntryKind = "Empty Revocation List" 24 | AuditKindOld CrlAuditEntryKind = "Not Fresh, Warning" 25 | AuditKindExpired CrlAuditEntryKind = "Expired, Allowed" 26 | AuditKindValid CrlAuditEntryKind = "Valid, Processed" 27 | ) 28 | 29 | type CrlAuditEntryKind string 30 | 31 | type CrlAuditEntry struct { 32 | Timestamp time.Time 33 | Url string `json:",omitempty"` 34 | Path string `json:",omitempty"` 35 | Age string `json:",omitempty"` 36 | Issuer downloader.DownloadIdentifier 37 | IssuerSubject string 38 | Kind CrlAuditEntryKind 39 | Errors []string `json:",omitempty"` 40 | DNSResults []string `json:",omitempty"` 41 | NumRevocations int `json:",omitempty"` 42 | SHA256Sum string `json:",omitempty"` 43 | } 44 | 45 | type CrlAuditor struct { 46 | mutex *sync.Mutex 47 | issuers *rootprogram.MozIssuers 48 | Entries []CrlAuditEntry 49 | } 50 | 51 | func NewCrlAuditor(issuers *rootprogram.MozIssuers) *CrlAuditor { 52 | return &CrlAuditor{ 53 | mutex: &sync.Mutex{}, 54 | issuers: issuers, 55 | Entries: []CrlAuditEntry{}, 56 | } 57 | } 58 | 59 | func (auditor *CrlAuditor) getSubject(identifier downloader.DownloadIdentifier) string { 60 | issuer, ok := identifier.(*types.Issuer) 61 | if !ok { 62 | return "" 63 | } 64 | subject, err := auditor.issuers.GetSubjectForIssuer(*issuer) 65 | if err != nil { 66 | glog.Warningf("Could not get subject for issuer %s: %v", issuer.ID(), err) 67 | return "" 68 | } 69 | return subject 70 | } 71 | 72 | func (auditor *CrlAuditor) GetEntries() []CrlAuditEntry { 73 | return auditor.Entries 74 | } 75 | 76 | func (auditor *CrlAuditor) WriteReport(fd io.Writer) error { 77 | enc := json.NewEncoder(fd) 78 | auditor.mutex.Lock() 79 | defer auditor.mutex.Unlock() 80 | return enc.Encode(auditor) 81 | } 82 | 83 | func (auditor *CrlAuditor) FailedDownload(issuer downloader.DownloadIdentifier, crlUrl *url.URL, dlTracer *downloader.DownloadTracer, err error) { 84 | auditor.mutex.Lock() 85 | defer auditor.mutex.Unlock() 86 | 87 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 88 | Timestamp: time.Now().UTC(), 89 | Kind: AuditKindFailedDownload, 90 | Url: crlUrl.String(), 91 | Issuer: issuer, 92 | IssuerSubject: auditor.getSubject(issuer), 93 | Errors: append(dlTracer.Errors(), err.Error()), 94 | DNSResults: dlTracer.DNSResults(), 95 | }) 96 | } 97 | 98 | func (auditor *CrlAuditor) FailedVerifyUrl(issuer downloader.DownloadIdentifier, crlUrl *url.URL, dlTracer *downloader.DownloadTracer, err error) { 99 | auditor.mutex.Lock() 100 | defer auditor.mutex.Unlock() 101 | 102 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 103 | Timestamp: time.Now().UTC(), 104 | Kind: AuditKindFailedVerify, 105 | Url: crlUrl.String(), 106 | Issuer: issuer, 107 | IssuerSubject: auditor.getSubject(issuer), 108 | Errors: append(dlTracer.Errors(), err.Error()), 109 | DNSResults: dlTracer.DNSResults(), 110 | }) 111 | } 112 | 113 | func (auditor *CrlAuditor) FailedOlderThanPrevious(issuer downloader.DownloadIdentifier, crlUrl *url.URL, dlTracer *downloader.DownloadTracer, previous time.Time, this time.Time) { 114 | auditor.mutex.Lock() 115 | defer auditor.mutex.Unlock() 116 | 117 | err := fmt.Sprintf("Previous: %s, This Run: %s", previous, this) 118 | 119 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 120 | Timestamp: time.Now().UTC(), 121 | Kind: AuditKindOlderThanLast, 122 | Url: crlUrl.String(), 123 | Issuer: issuer, 124 | IssuerSubject: auditor.getSubject(issuer), 125 | Errors: append(dlTracer.Errors(), err), 126 | DNSResults: dlTracer.DNSResults(), 127 | }) 128 | } 129 | 130 | func (auditor *CrlAuditor) Old(issuer downloader.DownloadIdentifier, crlUrl *url.URL, age time.Duration) { 131 | auditor.mutex.Lock() 132 | defer auditor.mutex.Unlock() 133 | 134 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 135 | Timestamp: time.Now().UTC(), 136 | Kind: AuditKindOld, 137 | Url: crlUrl.String(), 138 | Issuer: issuer, 139 | IssuerSubject: auditor.getSubject(issuer), 140 | Age: age.String(), 141 | }) 142 | } 143 | 144 | func (auditor *CrlAuditor) Expired(issuer downloader.DownloadIdentifier, crlUrl *url.URL, nextUpdate time.Time) { 145 | auditor.mutex.Lock() 146 | defer auditor.mutex.Unlock() 147 | 148 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 149 | Timestamp: time.Now().UTC(), 150 | Kind: AuditKindExpired, 151 | Url: crlUrl.String(), 152 | Issuer: issuer, 153 | IssuerSubject: auditor.getSubject(issuer), 154 | Errors: []string{fmt.Sprintf("Expired, NextUpdate was %s", nextUpdate)}, 155 | }) 156 | } 157 | 158 | func (auditor *CrlAuditor) FailedVerifyPath(issuer downloader.DownloadIdentifier, crlUrl *url.URL, crlPath string, err error) { 159 | auditor.mutex.Lock() 160 | defer auditor.mutex.Unlock() 161 | 162 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 163 | Timestamp: time.Now().UTC(), 164 | Kind: AuditKindFailedVerify, 165 | Url: crlUrl.String(), 166 | Path: crlPath, 167 | Issuer: issuer, 168 | IssuerSubject: auditor.getSubject(issuer), 169 | Errors: []string{err.Error()}, 170 | }) 171 | } 172 | func (auditor *CrlAuditor) FailedProcessLocal(issuer downloader.DownloadIdentifier, crlUrl *url.URL, crlPath string, err error) { 173 | auditor.mutex.Lock() 174 | defer auditor.mutex.Unlock() 175 | 176 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 177 | Timestamp: time.Now().UTC(), 178 | Kind: AuditKindFailedProcessLocal, 179 | Url: crlUrl.String(), 180 | Path: crlPath, 181 | Issuer: issuer, 182 | IssuerSubject: auditor.getSubject(issuer), 183 | Errors: []string{err.Error()}, 184 | }) 185 | } 186 | 187 | func (auditor *CrlAuditor) NoRevocations(issuer downloader.DownloadIdentifier, crlUrl *url.URL, crlPath string) { 188 | auditor.mutex.Lock() 189 | defer auditor.mutex.Unlock() 190 | 191 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 192 | Timestamp: time.Now().UTC(), 193 | Kind: AuditKindNoRevocations, 194 | Url: crlUrl.String(), 195 | Path: crlPath, 196 | Issuer: issuer, 197 | IssuerSubject: auditor.getSubject(issuer), 198 | }) 199 | } 200 | 201 | func (auditor *CrlAuditor) ValidAndProcessed(issuer downloader.DownloadIdentifier, crlUrl *url.URL, crlPath string, numRevocations int, age time.Duration, sha256 []byte) { 202 | auditor.mutex.Lock() 203 | defer auditor.mutex.Unlock() 204 | 205 | auditor.Entries = append(auditor.Entries, CrlAuditEntry{ 206 | Timestamp: time.Now().UTC(), 207 | Kind: AuditKindValid, 208 | Url: crlUrl.String(), 209 | Path: crlPath, 210 | Issuer: issuer, 211 | IssuerSubject: auditor.getSubject(issuer), 212 | Age: age.String(), 213 | SHA256Sum: hex.EncodeToString(sha256), 214 | NumRevocations: numRevocations, 215 | }) 216 | } 217 | -------------------------------------------------------------------------------- /go/cmd/aggregate-crls/crl-auditor_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/url" 8 | "testing" 9 | "time" 10 | 11 | "github.com/mozilla/crlite/go" 12 | "github.com/mozilla/crlite/go/downloader" 13 | "github.com/mozilla/crlite/go/rootprogram" 14 | ) 15 | 16 | func assertEmptyList(t *testing.T, a *CrlAuditor) { 17 | t.Helper() 18 | idx := 0 19 | for _ = range a.GetEntries() { 20 | idx += 1 21 | } 22 | if idx != 0 { 23 | t.Errorf("Expected no entries, found %d", idx) 24 | } 25 | } 26 | 27 | func assertValidEntry(t *testing.T, ent *CrlAuditEntry) { 28 | t.Helper() 29 | // Check mandatory fields 30 | if ent.Timestamp.IsZero() { 31 | t.Error("Timestamp should not be zero") 32 | } 33 | if len(ent.Url) == 0 && len(ent.Path) == 0 { 34 | t.Errorf("Either URL or Path must be set: %+v", ent) 35 | } 36 | if ent.Issuer.ID() == "" { 37 | t.Error("Issuer is mandatory") 38 | } 39 | if ent.IssuerSubject == "" { 40 | t.Error("IssuerSubject is mandatory") 41 | } 42 | if ent.Kind != AuditKindNoRevocations && ent.Kind != AuditKindOld && ent.Kind != AuditKindValid { 43 | if len(ent.Errors) == 0 { 44 | t.Error("Expecting an error message") 45 | } 46 | } 47 | if ent.Kind == AuditKindValid && ent.NumRevocations < 1 { 48 | t.Error("Valid kinds should have at least one revocation") 49 | } 50 | } 51 | 52 | func assertOnlyEntryInList(t *testing.T, a *CrlAuditor, entryKind CrlAuditEntryKind) *CrlAuditEntry { 53 | t.Helper() 54 | num := 0 55 | for _, entry := range a.GetEntries() { 56 | num += 1 57 | assertValidEntry(t, &entry) 58 | if entry.Kind == entryKind { 59 | return &entry 60 | } 61 | if num > 1 { 62 | t.Errorf("More than one entry in list") 63 | } 64 | } 65 | t.Fatalf("Entry type %v not in list of size %d", entryKind, num) 66 | return nil 67 | } 68 | 69 | func assertEntryUrlAndIssuer(t *testing.T, ent *CrlAuditEntry, issuer types.Issuer, 70 | issuersObj *rootprogram.MozIssuers, url *url.URL) { 71 | t.Helper() 72 | if ent.Url != url.String() { 73 | t.Errorf("Expected URL of %v got %v", url, ent.Url) 74 | } 75 | if ent.Issuer.ID() != issuer.ID() { 76 | t.Errorf("Expected Issuer of %v got %v", issuer, ent.Issuer) 77 | } 78 | expectedSubject, err := issuersObj.GetSubjectForIssuer(issuer) 79 | if err != nil { 80 | t.Error(err) 81 | } 82 | if ent.IssuerSubject != expectedSubject { 83 | t.Errorf("Expected Issuer Subject of %v got %v", expectedSubject, ent.IssuerSubject) 84 | } 85 | assertValidEntry(t, ent) 86 | } 87 | 88 | func assertEntryUrlPathAndIssuer(t *testing.T, ent *CrlAuditEntry, issuer types.Issuer, 89 | issuersObj *rootprogram.MozIssuers, url *url.URL, path string) { 90 | t.Helper() 91 | if ent.Path != path { 92 | t.Errorf("Expected Path of %v got %v", path, ent.Path) 93 | } 94 | if ent.Url != url.String() { 95 | t.Errorf("Expected URL of %v got %v", url, ent.Url) 96 | } 97 | if ent.Issuer.ID() != issuer.ID() { 98 | t.Errorf("Expected Issuer of %v got %v", issuer, ent.Issuer) 99 | } 100 | expectedSubject, err := issuersObj.GetSubjectForIssuer(issuer) 101 | if err != nil { 102 | t.Error(err) 103 | } 104 | if ent.IssuerSubject != expectedSubject { 105 | t.Errorf("Expected Issuer Subject of %v got %v", expectedSubject, ent.IssuerSubject) 106 | } 107 | assertValidEntry(t, ent) 108 | } 109 | 110 | type testOutReport struct { 111 | Entries []testCrlAuditEntry 112 | } 113 | type testCrlAuditEntry struct { 114 | Timestamp time.Time 115 | Url string 116 | Path string 117 | Age string 118 | Issuer string 119 | IssuerSubject string 120 | Kind CrlAuditEntryKind 121 | Errors []string 122 | DNSResults []string 123 | } 124 | 125 | func (ent *testCrlAuditEntry) assertOkay(t *testing.T) { 126 | if ent.Timestamp.IsZero() { 127 | t.Error("Timestamp should not be zero") 128 | } 129 | if len(ent.Url) == 0 && len(ent.Path) == 0 { 130 | t.Errorf("Either URL or Path must be set: %+v", ent) 131 | } 132 | if ent.Issuer == "" { 133 | t.Error("Issuer is mandatory") 134 | } 135 | if ent.Kind != AuditKindNoRevocations && ent.Kind != AuditKindOld { 136 | if len(ent.Errors) == 0 { 137 | t.Error("Expecting an error message") 138 | } 139 | } 140 | } 141 | 142 | func assertAuditorReportHasEntries(t *testing.T, auditor *CrlAuditor, count int) { 143 | t.Helper() 144 | var b bytes.Buffer 145 | err := auditor.WriteReport(&b) 146 | if err != nil { 147 | t.Fatal(err) 148 | } 149 | 150 | dec := json.NewDecoder(&b) 151 | report := &testOutReport{} 152 | err = dec.Decode(report) 153 | if err != nil { 154 | t.Fatal(err) 155 | } 156 | 157 | if len(report.Entries) != count { 158 | t.Errorf("Expected %d audit report entries but found %d", count, len(report.Entries)) 159 | } 160 | for _, e := range report.Entries { 161 | e.assertOkay(t) 162 | } 163 | } 164 | 165 | func Test_FailedDownload(t *testing.T) { 166 | issuersObj := rootprogram.NewMozillaIssuers() 167 | auditor := NewCrlAuditor(issuersObj) 168 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 169 | url, _ := url.Parse("http://test/crl") 170 | 171 | assertEmptyList(t, auditor) 172 | 173 | auditor.FailedDownload(&issuer, url, downloader.NewDownloadTracer(), fmt.Errorf("bad error")) 174 | 175 | ent := assertOnlyEntryInList(t, auditor, AuditKindFailedDownload) 176 | assertEntryUrlAndIssuer(t, ent, issuer, issuersObj, url) 177 | } 178 | 179 | func Test_FailedVerify(t *testing.T) { 180 | issuersObj := rootprogram.NewMozillaIssuers() 181 | auditor := NewCrlAuditor(issuersObj) 182 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 183 | url, _ := url.Parse("http://test/crl") 184 | 185 | assertEmptyList(t, auditor) 186 | 187 | auditor.FailedVerifyUrl(&issuer, url, downloader.NewDownloadTracer(), fmt.Errorf("bad error")) 188 | 189 | ent := assertOnlyEntryInList(t, auditor, AuditKindFailedVerify) 190 | assertEntryUrlAndIssuer(t, ent, issuer, issuersObj, url) 191 | } 192 | 193 | func Test_FailedProcessLocal(t *testing.T) { 194 | issuersObj := rootprogram.NewMozillaIssuers() 195 | auditor := NewCrlAuditor(issuersObj) 196 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 197 | path := "crls/crl.pem" 198 | url, _ := url.Parse("http://test/crl") 199 | 200 | assertEmptyList(t, auditor) 201 | 202 | auditor.FailedProcessLocal(&issuer, url, path, fmt.Errorf("bad error")) 203 | 204 | ent := assertOnlyEntryInList(t, auditor, AuditKindFailedProcessLocal) 205 | assertEntryUrlPathAndIssuer(t, ent, issuer, issuersObj, url, path) 206 | } 207 | 208 | func Test_FailedVerifyLocal(t *testing.T) { 209 | issuersObj := rootprogram.NewMozillaIssuers() 210 | auditor := NewCrlAuditor(issuersObj) 211 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 212 | path := "crls/crl.pem" 213 | url, _ := url.Parse("http://test/crl") 214 | 215 | assertEmptyList(t, auditor) 216 | 217 | auditor.FailedVerifyPath(&issuer, url, path, fmt.Errorf("bad error")) 218 | 219 | ent := assertOnlyEntryInList(t, auditor, AuditKindFailedVerify) 220 | assertEntryUrlPathAndIssuer(t, ent, issuer, issuersObj, url, path) 221 | } 222 | 223 | func Test_FailedNoRevocations(t *testing.T) { 224 | issuersObj := rootprogram.NewMozillaIssuers() 225 | auditor := NewCrlAuditor(issuersObj) 226 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 227 | path := "crls/crl.pem" 228 | url, _ := url.Parse("http://test/crl") 229 | 230 | assertEmptyList(t, auditor) 231 | 232 | auditor.NoRevocations(&issuer, url, path) 233 | 234 | ent := assertOnlyEntryInList(t, auditor, AuditKindNoRevocations) 235 | assertEntryUrlPathAndIssuer(t, ent, issuer, issuersObj, url, path) 236 | } 237 | 238 | func Test_FailedOld(t *testing.T) { 239 | issuersObj := rootprogram.NewMozillaIssuers() 240 | auditor := NewCrlAuditor(issuersObj) 241 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 242 | url, _ := url.Parse("http://test/crl") 243 | 244 | assertEmptyList(t, auditor) 245 | 246 | age, err := time.ParseDuration("900h") 247 | if err != nil { 248 | t.Error(err) 249 | } 250 | 251 | auditor.Old(&issuer, url, age) 252 | 253 | ent := assertOnlyEntryInList(t, auditor, AuditKindOld) 254 | assertEntryUrlAndIssuer(t, ent, issuer, issuersObj, url) 255 | } 256 | 257 | func Test_FailedOlderThanPrevious(t *testing.T) { 258 | issuersObj := rootprogram.NewMozillaIssuers() 259 | auditor := NewCrlAuditor(issuersObj) 260 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 261 | url, _ := url.Parse("http://test/crl") 262 | 263 | assertEmptyList(t, auditor) 264 | 265 | auditor.FailedOlderThanPrevious(&issuer, url, downloader.NewDownloadTracer(), time.Now(), time.Now().AddDate(0, 0, -1)) 266 | 267 | ent := assertOnlyEntryInList(t, auditor, AuditKindOlderThanLast) 268 | assertEntryUrlAndIssuer(t, ent, issuer, issuersObj, url) 269 | } 270 | 271 | func Test_FailedExpired(t *testing.T) { 272 | issuersObj := rootprogram.NewMozillaIssuers() 273 | auditor := NewCrlAuditor(issuersObj) 274 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 275 | url, _ := url.Parse("http://test/crl") 276 | 277 | assertEmptyList(t, auditor) 278 | 279 | auditor.Expired(&issuer, url, time.Now().AddDate(0, 0, -1)) 280 | 281 | ent := assertOnlyEntryInList(t, auditor, AuditKindExpired) 282 | assertEntryUrlAndIssuer(t, ent, issuer, issuersObj, url) 283 | } 284 | 285 | func Test_Valid(t *testing.T) { 286 | issuersObj := rootprogram.NewMozillaIssuers() 287 | auditor := NewCrlAuditor(issuersObj) 288 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 289 | url, _ := url.Parse("http://test/crl") 290 | path := "/var/tmp/issuer.crl" 291 | 292 | assertEmptyList(t, auditor) 293 | 294 | age, err := time.ParseDuration("900h") 295 | if err != nil { 296 | t.Error(err) 297 | } 298 | 299 | auditor.ValidAndProcessed(&issuer, url, path, 42, age, []byte{0x42}) 300 | 301 | ent := assertOnlyEntryInList(t, auditor, AuditKindValid) 302 | assertEntryUrlPathAndIssuer(t, ent, issuer, issuersObj, url, path) 303 | } 304 | 305 | func Test_EmptyReport(t *testing.T) { 306 | issuersObj := rootprogram.NewMozillaIssuers() 307 | auditor := NewCrlAuditor(issuersObj) 308 | assertEmptyList(t, auditor) 309 | 310 | var b bytes.Buffer 311 | err := auditor.WriteReport(&b) 312 | if err != nil { 313 | t.Error(err) 314 | } 315 | 316 | expected := []byte("{\"Entries\":[]}\n") 317 | if !bytes.Equal(b.Bytes(), expected) { 318 | t.Errorf("Expected %v got %v", expected, b.Bytes()) 319 | } 320 | 321 | assertAuditorReportHasEntries(t, auditor, 0) 322 | } 323 | 324 | func Test_SeveralFailures(t *testing.T) { 325 | issuersObj := rootprogram.NewMozillaIssuers() 326 | auditor := NewCrlAuditor(issuersObj) 327 | issuer := issuersObj.NewTestIssuerFromSubjectString("Test Corporation SA") 328 | url, _ := url.Parse("http://test/crl") 329 | 330 | assertEmptyList(t, auditor) 331 | 332 | age, err := time.ParseDuration("900h") 333 | if err != nil { 334 | t.Error(err) 335 | } 336 | 337 | auditor.Old(&issuer, url, age) 338 | auditor.Old(&issuer, url, age) 339 | auditor.Old(&issuer, url, age) 340 | 341 | if len(auditor.GetEntries()) != 3 { 342 | t.Errorf("Expected 3 entries") 343 | } 344 | for _, e := range auditor.GetEntries() { 345 | assertEntryUrlAndIssuer(t, &e, issuer, issuersObj, url) 346 | } 347 | 348 | path := "/var/tmp/issuer.crl" 349 | 350 | auditor.NoRevocations(&issuer, url, path) 351 | auditor.NoRevocations(&issuer, url, path) 352 | auditor.NoRevocations(&issuer, url, path) 353 | 354 | if len(auditor.GetEntries()) != 6 { 355 | t.Errorf("Expected 6 entries") 356 | } 357 | 358 | for i, e := range auditor.GetEntries() { 359 | if i < 3 { 360 | assertEntryUrlAndIssuer(t, &e, issuer, issuersObj, url) 361 | } else { 362 | assertEntryUrlPathAndIssuer(t, &e, issuer, issuersObj, url, path) 363 | } 364 | } 365 | 366 | assertAuditorReportHasEntries(t, auditor, 6) 367 | } 368 | -------------------------------------------------------------------------------- /go/cmd/aggregate-known/aggregate-known.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/json" 7 | "flag" 8 | "os" 9 | "os/signal" 10 | "path/filepath" 11 | "sync" 12 | "syscall" 13 | "time" 14 | 15 | "github.com/golang/glog" 16 | "github.com/mozilla/crlite/go" 17 | "github.com/mozilla/crlite/go/config" 18 | "github.com/mozilla/crlite/go/engine" 19 | "github.com/mozilla/crlite/go/rootprogram" 20 | "github.com/mozilla/crlite/go/storage" 21 | ) 22 | 23 | const ( 24 | permMode = 0644 25 | permModeDir = 0755 26 | ) 27 | 28 | var ( 29 | enrolledpath = flag.String("enrolledpath", "", "input enrolled issuers JSON") 30 | knownpath = flag.String("knownpath", "", "output directory for files") 31 | ctlogspath = flag.String("ctlogspath", "", "output file for ct-log JSON") 32 | ctconfig = config.NewCTConfig() 33 | ) 34 | 35 | type knownWorkUnit struct { 36 | issuer types.Issuer 37 | issuerDN string 38 | expDates []types.ExpDate 39 | } 40 | 41 | type knownWorker struct { 42 | savePath string 43 | certDB storage.CertDatabase 44 | } 45 | 46 | func (kw knownWorker) run(ctx context.Context, wg *sync.WaitGroup, workChan <-chan knownWorkUnit) { 47 | defer wg.Done() 48 | 49 | err := os.MkdirAll(kw.savePath, permModeDir) 50 | if err != nil && !os.IsExist(err) { 51 | glog.Fatalf("Could not make directory %s: %s", kw.savePath, err) 52 | } 53 | 54 | for tuple := range workChan { 55 | // Wrap in anonymous function to defer a writer.Flush & fd.Close per work unit 56 | func() { 57 | path := filepath.Join(kw.savePath, tuple.issuer.ID()) 58 | fd, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, permMode) 59 | if err != nil { 60 | glog.Fatalf("[%s] Could not open known certificates file: %s", tuple.issuer.ID(), err) 61 | } 62 | defer fd.Close() 63 | 64 | writer := bufio.NewWriter(fd) 65 | defer writer.Flush() 66 | 67 | var serialCount uint64 68 | 69 | for _, expDate := range tuple.expDates { 70 | select { 71 | case <-ctx.Done(): 72 | glog.Warningf("Signal on worker quit channel, quitting (count=%d).", serialCount) 73 | return 74 | default: 75 | } 76 | 77 | if expDate.IsExpiredAt(time.Now()) { 78 | if glog.V(1) { 79 | glog.Warningf("Date %s is expired now, skipping (issuer=%s)", expDate, tuple.issuer.ID()) 80 | } 81 | continue 82 | } 83 | 84 | // Sharded by expiry date, so this should be fairly small. 85 | knownSet, err := kw.certDB.ReadSerialsFromStorage(expDate, tuple.issuer) 86 | if err != nil { 87 | glog.Fatalf("[%s] Could not read serials with expDate=%s: %s", tuple.issuer.ID(), expDate.ID(), err) 88 | } 89 | knownSetLen := uint64(len(knownSet)) 90 | 91 | if knownSetLen == 0 { 92 | // This is almost certainly due to an hour-rollover since the loader ran, and expired all the next hour's 93 | // certs. 94 | glog.Warningf("No cached certificates for issuer=%s (%s) expDate=%s, but the loader thought there should be."+ 95 | " (current count this worker=%d)", tuple.issuerDN, tuple.issuer.ID(), expDate, serialCount) 96 | } 97 | 98 | serialCount += knownSetLen 99 | err = storage.WriteSerialList(writer, expDate, tuple.issuer, knownSet) 100 | if err != nil { 101 | glog.Fatalf("[%s] Could not write serials: %s", tuple.issuer.ID(), err) 102 | } 103 | } 104 | glog.Infof("[%s] %d total known serials for %s (shards=%d)", tuple.issuer.ID(), 105 | serialCount, tuple.issuerDN, len(tuple.expDates)) 106 | }() 107 | 108 | select { 109 | case <-ctx.Done(): 110 | return 111 | default: 112 | } 113 | } 114 | } 115 | 116 | func checkPathArg(strObj string, confOptionName string, ctconfig *config.CTConfig) { 117 | if strObj == "" { 118 | glog.Errorf("Flag %s is not set", confOptionName) 119 | ctconfig.Usage() 120 | os.Exit(2) 121 | } 122 | } 123 | 124 | func main() { 125 | ctconfig.Init() 126 | 127 | ctx := context.Background() 128 | ctx, cancelMain := context.WithCancel(ctx) 129 | 130 | // Try to handle SIGINT and SIGTERM gracefully 131 | sigChan := make(chan os.Signal, 1) 132 | signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) 133 | defer close(sigChan) 134 | go func() { 135 | sig := <-sigChan 136 | glog.Infof("Signal caught: %s..", sig) 137 | cancelMain() 138 | signal.Stop(sigChan) // Restore default behavior 139 | }() 140 | 141 | certDB, cache := engine.GetConfiguredStorage(ctx, ctconfig, false) 142 | defer glog.Flush() 143 | 144 | checkPathArg(*enrolledpath, "enrolledpath", ctconfig) 145 | checkPathArg(*knownpath, "knownpath", ctconfig) 146 | checkPathArg(*ctlogspath, "ctlogspath", ctconfig) 147 | 148 | if err := os.MkdirAll(*knownpath, permModeDir); err != nil { 149 | glog.Fatalf("Unable to make the output directory: %s", err) 150 | } 151 | 152 | engine.PrepareTelemetry("aggregate-known", ctconfig) 153 | 154 | mozIssuers := rootprogram.NewMozillaIssuers() 155 | if err := mozIssuers.LoadEnrolledIssuers(*enrolledpath); err != nil { 156 | glog.Fatalf("Failed to load enrolled issuers from disk: %s", err) 157 | } 158 | 159 | glog.Infof("%d issuers loaded", len(mozIssuers.GetIssuers())) 160 | 161 | glog.Infof("Committing DB changes since last run") 162 | commitToken, err := cache.AcquireCommitLock() 163 | if err != nil || commitToken == nil { 164 | glog.Fatalf("Failed to acquire commit lock: %s", err) 165 | } 166 | defer cache.ReleaseCommitLock(*commitToken) 167 | 168 | err = certDB.Commit(*commitToken) 169 | if err != nil { 170 | glog.Fatalf("Error in commit: %s", err) 171 | } 172 | 173 | logList, err := certDB.GetCTLogsFromStorage() 174 | if err != nil { 175 | glog.Fatalf("Error reading coverage metadata: %s", err) 176 | } 177 | 178 | ctLogFD, err := os.OpenFile(*ctlogspath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) 179 | if err != nil { 180 | glog.Fatalf("Error opening %s: %s", *ctlogspath, err) 181 | } 182 | 183 | enc := json.NewEncoder(ctLogFD) 184 | if err := enc.Encode(logList); err != nil { 185 | glog.Fatalf("Error marshaling ct-logs list %s: %s", *ctlogspath, err) 186 | } 187 | ctLogFD.Close() 188 | 189 | glog.Infof("Listing issuers and their expiration dates...") 190 | issuerList, err := certDB.GetIssuerAndDatesFromStorage() 191 | if err != nil { 192 | glog.Fatal(err) 193 | } 194 | 195 | var count int64 196 | for _, iObj := range issuerList { 197 | if mozIssuers.IsIssuerInProgram(iObj.Issuer) { 198 | count = count + int64(len(iObj.ExpDates)) 199 | } 200 | } 201 | 202 | workChan := make(chan knownWorkUnit, count) 203 | for _, iObj := range issuerList { 204 | if !mozIssuers.IsIssuerInProgram(iObj.Issuer) { 205 | continue 206 | } 207 | 208 | issuerSubj, err := mozIssuers.GetSubjectForIssuer(iObj.Issuer) 209 | if err != nil { 210 | glog.Warningf("Couldn't get subject for issuer=%s that is in the root program: %s", 211 | iObj.Issuer.ID(), err) 212 | issuerSubj = "" 213 | } 214 | 215 | wu := knownWorkUnit{ 216 | issuer: iObj.Issuer, 217 | issuerDN: issuerSubj, 218 | expDates: iObj.ExpDates, 219 | } 220 | 221 | select { 222 | case workChan <- wu: 223 | default: 224 | glog.Fatalf("Channel overflow. Aborting at %+v", wu) 225 | } 226 | } 227 | // Signal that was the last work 228 | close(workChan) 229 | 230 | glog.Infof("Starting worker processes to handle %d work units", count) 231 | 232 | var wg sync.WaitGroup 233 | 234 | // Start the workers 235 | for t := 0; t < *ctconfig.NumThreads; t++ { 236 | wg.Add(1) 237 | worker := knownWorker{ 238 | savePath: *knownpath, 239 | certDB: certDB, 240 | } 241 | go worker.run(ctx, &wg, workChan) 242 | } 243 | 244 | wg.Wait() 245 | } 246 | -------------------------------------------------------------------------------- /go/cmd/get-mozilla-issuers/get-mozilla-issuers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "os" 7 | 8 | "github.com/golang/glog" 9 | "github.com/mozilla/crlite/go/rootprogram" 10 | ) 11 | 12 | var ( 13 | outfile = flag.String("out", "", "output json dictionary of issuers") 14 | inccadb = flag.String("ccadb", "", "input CCADB CSV path") 15 | ) 16 | 17 | func main() { 18 | flag.Parse() 19 | 20 | var err error 21 | 22 | defer glog.Flush() 23 | 24 | mozIssuers := rootprogram.NewMozillaIssuers() 25 | 26 | if *inccadb != "" { 27 | err = mozIssuers.LoadFromDisk(*inccadb) 28 | } else { 29 | err = mozIssuers.Load() 30 | } 31 | 32 | if err != nil { 33 | glog.Fatal(err) 34 | } 35 | 36 | if *outfile == "" { 37 | enc := json.NewEncoder(os.Stdout) 38 | enc.SetIndent("", " ") 39 | if err = enc.Encode(mozIssuers.GetIssuers()); err != nil { 40 | glog.Fatal(err) 41 | } 42 | return 43 | } 44 | 45 | if err = mozIssuers.SaveIssuersList(*outfile); err != nil { 46 | glog.Fatal(err) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /go/config/config.go: -------------------------------------------------------------------------------- 1 | /* This Source Code Form is subject to the terms of the Mozilla Public 2 | * License, v. 2.0. If a copy of the MPL was not distributed with this 3 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 | 5 | package config 6 | 7 | import ( 8 | "flag" 9 | "fmt" 10 | "os" 11 | "os/user" 12 | "strconv" 13 | 14 | "github.com/golang/glog" 15 | "gopkg.in/ini.v1" 16 | ) 17 | 18 | type CTConfig struct { 19 | RemoteSettingsURL *string 20 | CTLogMetadata *string 21 | CertPath *string 22 | GoogleProjectId *string 23 | RedisHost *string 24 | RedisTimeout *string 25 | BatchSize *uint64 26 | NumThreads *int 27 | RunForever *bool 28 | LogExpiredEntries *bool 29 | SavePeriod *string 30 | PollingDelay *uint64 31 | StatsRefreshPeriod *string 32 | Config *string 33 | StatsDHost *string 34 | StatsDPort *int 35 | HealthAddr *string 36 | RemoteSettingsUpdateInterval *uint64 37 | } 38 | 39 | func confInt(p *int, section *ini.Section, key string, def int) { 40 | val, ok := os.LookupEnv(key) 41 | if ok { 42 | i, err := strconv.ParseInt(val, 10, 32) 43 | if err == nil { 44 | *p = int(i) 45 | return 46 | } 47 | } 48 | 49 | *p = def 50 | if section != nil { 51 | k := section.Key(key) 52 | if k != nil { 53 | v, err := k.Int() 54 | if err == nil { 55 | *p = v 56 | } 57 | } 58 | } 59 | } 60 | 61 | func confUint64(p *uint64, section *ini.Section, key string, def uint64) { 62 | // Final override is the environment variable 63 | val, ok := os.LookupEnv(key) 64 | if ok { 65 | u, err := strconv.ParseUint(val, 10, 64) 66 | if err == nil { 67 | *p = u 68 | return 69 | } 70 | } 71 | 72 | // Assume default 73 | *p = def 74 | if section != nil { 75 | k := section.Key(key) 76 | if k != nil { 77 | v, err := k.Uint64() 78 | if err == nil { 79 | *p = v 80 | } 81 | } 82 | } 83 | } 84 | 85 | func confBool(p *bool, section *ini.Section, key string, def bool) { 86 | // Final override is the environment variable 87 | val, ok := os.LookupEnv(key) 88 | if ok { 89 | b, err := strconv.ParseBool(val) 90 | if err == nil { 91 | *p = b 92 | return 93 | } 94 | } 95 | 96 | *p = def 97 | if section != nil { 98 | k := section.Key(key) 99 | if k != nil { 100 | v, err := k.Bool() 101 | if err == nil { 102 | *p = v 103 | } 104 | } 105 | } 106 | 107 | } 108 | 109 | func confString(p *string, section *ini.Section, key string, def string) { 110 | *p = def 111 | if section != nil { 112 | k := section.Key(key) 113 | if k != nil && len(k.String()) > 0 { 114 | *p = k.String() 115 | } 116 | } 117 | val, ok := os.LookupEnv(key) 118 | if ok { 119 | *p = val 120 | } 121 | } 122 | 123 | func NewCTConfig() *CTConfig { 124 | return &CTConfig{ 125 | BatchSize: new(uint64), 126 | RemoteSettingsURL: new(string), 127 | CTLogMetadata: new(string), 128 | NumThreads: new(int), 129 | LogExpiredEntries: new(bool), 130 | RunForever: new(bool), 131 | CertPath: new(string), 132 | GoogleProjectId: new(string), 133 | StatsDHost: new(string), 134 | StatsDPort: new(int), 135 | HealthAddr: new(string), 136 | RedisHost: new(string), 137 | RedisTimeout: new(string), 138 | SavePeriod: new(string), 139 | StatsRefreshPeriod: new(string), 140 | PollingDelay: new(uint64), 141 | RemoteSettingsUpdateInterval: new(uint64), 142 | } 143 | } 144 | 145 | func (c *CTConfig) Init() { 146 | var confFile string 147 | var flagBatchSize uint64 148 | flag.StringVar(&confFile, "config", "", "configuration .ini file") 149 | flag.Uint64Var(&flagBatchSize, "batchSize", 0, "limit on number of CT log entries to download per job") 150 | 151 | flag.Parse() 152 | 153 | if len(confFile) == 0 { 154 | userObj, err := user.Current() 155 | if err == nil { 156 | defPath := fmt.Sprintf("%s/.ct-fetch.ini", userObj.HomeDir) 157 | if _, err := os.Stat(defPath); err == nil { 158 | confFile = defPath 159 | } 160 | } 161 | } 162 | 163 | // First, check the config file, which might have come from a CLI paramater 164 | var section *ini.Section 165 | if len(confFile) > 0 { 166 | cfg, err := ini.Load(confFile) 167 | if err == nil { 168 | glog.Infof("Loaded config file from %s\n", confFile) 169 | section = cfg.Section("") 170 | } else { 171 | glog.Errorf("Could not load config file: %s\n", err) 172 | } 173 | } 174 | 175 | // Fill in values, where conf file < env vars 176 | confUint64(c.BatchSize, section, "batchSize", 4096) 177 | confString(c.RemoteSettingsURL, section, "remoteSettingsURL", "") 178 | confString(c.CTLogMetadata, section, "ctLogMetadata", "") 179 | confInt(c.NumThreads, section, "numThreads", 1) 180 | confBool(c.LogExpiredEntries, section, "logExpiredEntries", false) 181 | confBool(c.RunForever, section, "runForever", false) 182 | confUint64(c.PollingDelay, section, "pollingDelay", 600) 183 | confUint64(c.RemoteSettingsUpdateInterval, section, "remoteSettingsUpdateInterval", 3600) 184 | confString(c.SavePeriod, section, "savePeriod", "15m") 185 | confString(c.CertPath, section, "certPath", "") 186 | confString(c.GoogleProjectId, section, "googleProjectId", "") 187 | confString(c.RedisHost, section, "redisHost", "") 188 | confString(c.RedisTimeout, section, "redisTimeout", "5s") 189 | confString(c.StatsRefreshPeriod, section, "statsRefreshPeriod", "10m") 190 | confString(c.StatsDHost, section, "statsdHost", "") 191 | confInt(c.StatsDPort, section, "statsdPort", 8125) 192 | confString(c.HealthAddr, section, "healthAddr", ":8080") 193 | 194 | // Finally, CLI flags override 195 | if flagBatchSize > 0 { 196 | *c.BatchSize = flagBatchSize 197 | } 198 | } 199 | 200 | func (c *CTConfig) Usage() { 201 | flag.Usage() 202 | 203 | fmt.Println("") 204 | fmt.Println("Environment variable or config file directives:") 205 | fmt.Println("") 206 | fmt.Println("The certPath and redisHost variables are mandatory:") 207 | fmt.Println("certPath = path under which to store persistent certificate data") 208 | fmt.Println("redisHost = address:port of the Redis instance") 209 | fmt.Println("") 210 | fmt.Println("Options:") 211 | fmt.Println("remoteSettingsURL = The base url for remote settings requests") 212 | fmt.Println("ctLogMetadata = A string containing a JSON array of CTLogMetadata objects, for debugging") 213 | fmt.Println("googleProjectId = Google Cloud Platform Project ID, used for stackdriver logging") 214 | fmt.Println("runForever = Run forever, pausing `pollingDelay` seconds between runs") 215 | fmt.Println("pollingDelay= Wait time in seconds between polls. Jitter will be added.") 216 | fmt.Println("logExpiredEntries = Add expired entries to the database") 217 | fmt.Println("numThreads = Use this many threads for normal operations") 218 | fmt.Println("savePeriod = Duration between state saves, e.g. 15m") 219 | fmt.Println("statsRefreshPeriod = Period between stats being dumped to stderr, only if statsdDhost and statsdPort are not set") 220 | fmt.Println("statsdHost = host for StatsD information") 221 | fmt.Println("statsdPort = port for StatsD information") 222 | fmt.Println("redisTimeout = Timeout for operations from Redis, e.g. 10s") 223 | fmt.Println("healthAddr = Address to host the /health information http endpoint, e.g. localhost:8080") 224 | } 225 | -------------------------------------------------------------------------------- /go/config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "gopkg.in/ini.v1" 5 | "testing" 6 | ) 7 | 8 | func Test_Defaults(t *testing.T) { 9 | var section *ini.Section 10 | 11 | var u uint64 12 | confUint64(&u, section, "var", 99) 13 | if u != 99 { 14 | t.Errorf("Expected the default of 99, got %d", u) 15 | } 16 | 17 | var i int 18 | confInt(&i, section, "var", -99) 19 | if i != -99 { 20 | t.Errorf("Expected the default of -99, got %d", i) 21 | } 22 | 23 | var b bool 24 | confBool(&b, section, "var", true) 25 | if b != true { 26 | t.Errorf("Expected default of true") 27 | } 28 | 29 | confBool(&b, section, "var", false) 30 | if b != false { 31 | t.Errorf("Expected default of false") 32 | } 33 | 34 | var s string 35 | confString(&s, section, "var", "hotdog") 36 | if s != "hotdog" { 37 | t.Errorf("Expected the default of hotdog, got %s", s) 38 | } 39 | } 40 | 41 | func Test_SectionOverride(t *testing.T) { 42 | cfg := ini.Empty() 43 | section, err := cfg.NewSection("new section") 44 | if err != nil { 45 | t.Error(err) 46 | } 47 | 48 | _, _ = section.NewKey("signedint", "-42") 49 | 50 | var i int 51 | confInt(&i, section, "signedint", -99) 52 | if i != -42 { 53 | t.Errorf("Expected the config value of -42, got %d", i) 54 | } 55 | 56 | _, _ = section.NewKey("booltrue", "true") 57 | 58 | var b bool 59 | confBool(&b, section, "booltrue", false) 60 | if b != true { 61 | t.Error("Expected true") 62 | } 63 | 64 | _, _ = section.NewKey("boolfalse", "false") 65 | 66 | confBool(&b, section, "boolfalse", true) 67 | if b != false { 68 | t.Error("Expected false") 69 | } 70 | 71 | _, _ = section.NewKey("string", "sandwich") 72 | 73 | var s string 74 | confString(&s, section, "string", "doom") 75 | if s != "sandwich" { 76 | t.Errorf("Expected the value sandwich, got %s", s) 77 | } 78 | 79 | _, _ = section.NewKey("uint64", "935939539593953") 80 | 81 | var u uint64 82 | confUint64(&u, section, "uint64", 1234567890123456789) 83 | if u != 935939539593953 { 84 | t.Errorf("Expected the value 935939539593953, got %v", u) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /go/downloader/download-auditor.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "net/url" 5 | ) 6 | 7 | type DownloadIdentifier interface { 8 | ID() string 9 | } 10 | 11 | type DownloadAuditor interface { 12 | FailedDownload(identifier DownloadIdentifier, crlUrl *url.URL, dlTracer *DownloadTracer, err error) 13 | FailedVerifyUrl(identifier DownloadIdentifier, crlUrl *url.URL, dlTracer *DownloadTracer, err error) 14 | FailedVerifyPath(identifier DownloadIdentifier, crlUrl *url.URL, crlPath string, err error) 15 | } 16 | -------------------------------------------------------------------------------- /go/downloader/download-tracer.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "context" 5 | "net/http/httptrace" 6 | 7 | "github.com/golang/glog" 8 | ) 9 | 10 | type DownloadTracer struct { 11 | DNSDone []httptrace.DNSDoneInfo 12 | } 13 | 14 | func NewDownloadTracer() *DownloadTracer { 15 | return &DownloadTracer{ 16 | DNSDone: []httptrace.DNSDoneInfo{}, 17 | } 18 | } 19 | 20 | func (da *DownloadTracer) dnsDone(ddi httptrace.DNSDoneInfo) { 21 | glog.V(1).Infof("DNS result: %+v", ddi) 22 | da.DNSDone = append(da.DNSDone, ddi) 23 | } 24 | 25 | func (da *DownloadTracer) Configure(ctx context.Context) context.Context { 26 | traceObj := &httptrace.ClientTrace{ 27 | DNSDone: da.dnsDone, 28 | } 29 | 30 | return httptrace.WithClientTrace(ctx, traceObj) 31 | } 32 | 33 | func (da *DownloadTracer) DNSResults() []string { 34 | results := []string{} 35 | for _, ddi := range da.DNSDone { 36 | for _, addr := range ddi.Addrs { 37 | results = append(results, addr.String()) 38 | } 39 | } 40 | return results 41 | } 42 | 43 | func (da *DownloadTracer) Errors() []string { 44 | results := []string{} 45 | for _, ddi := range da.DNSDone { 46 | if ddi.Err != nil { 47 | results = append(results, ddi.Err.Error()) 48 | } 49 | } 50 | return results 51 | } 52 | -------------------------------------------------------------------------------- /go/downloader/download-tracer_test.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "testing" 7 | ) 8 | 9 | func Test_DownloadTracerBlank(t *testing.T) { 10 | dla := NewDownloadTracer() 11 | if len(dla.DNSResults()) != 0 { 12 | t.Error("Should have no DNS results") 13 | } 14 | if len(dla.Errors()) != 0 { 15 | t.Error("Should have no errors") 16 | } 17 | } 18 | 19 | func Test_SingleLookup(t *testing.T) { 20 | dla := NewDownloadTracer() 21 | 22 | ctx := dla.Configure(context.Background()) 23 | 24 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com/", nil) 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | client := &http.Client{} 29 | resp, err := client.Do(req) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | defer resp.Body.Close() 34 | 35 | if len(dla.DNSResults()) == 0 { 36 | t.Error("Should have DNS results!") 37 | } 38 | if len(dla.Errors()) != 0 { 39 | t.Error("Should have no DNS errors!") 40 | } 41 | } 42 | 43 | func Test_SingleLookupError(t *testing.T) { 44 | dla := NewDownloadTracer() 45 | 46 | ctx := dla.Configure(context.Background()) 47 | 48 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.testing/", nil) 49 | if err != nil { 50 | t.Fatal(err) 51 | } 52 | client := &http.Client{} 53 | resp, err := client.Do(req) 54 | if err == nil || resp != nil { 55 | t.Fatal("Expected an error") 56 | } 57 | 58 | if len(dla.Errors()) == 0 { 59 | t.Error("Should have DNS errors!") 60 | } 61 | if len(dla.DNSResults()) != 0 { 62 | t.Error("Should have no DNS results!") 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /go/downloader/downloader.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/golang/glog" 14 | ) 15 | 16 | type DownloadAction int 17 | 18 | const ( 19 | Create DownloadAction = 0 20 | Resume DownloadAction = 1 21 | UpToDate DownloadAction = 2 22 | ) 23 | 24 | func GetSizeAndDateOfFile(path string) (int64, time.Time, error) { 25 | curFile, err := os.Open(path) 26 | if err != nil { 27 | return 0, time.Time{}, err 28 | } 29 | stat, err := curFile.Stat() 30 | if err != nil { 31 | return 0, time.Time{}, err 32 | } 33 | curFile.Close() 34 | 35 | return stat.Size(), stat.ModTime(), nil 36 | } 37 | 38 | func determineAction(client *http.Client, crlUrl url.URL, path string) (DownloadAction, int64, int64) { 39 | szOnDisk, localDate, err := GetSizeAndDateOfFile(path) 40 | if err != nil { 41 | glog.V(1).Infof("[%s] CREATE: File not on disk: %s ", crlUrl.String(), err) 42 | return Create, 0, 0 43 | } 44 | req, err := http.NewRequest("HEAD", crlUrl.String(), nil) 45 | if err != nil { 46 | return Create, szOnDisk, 0 47 | } 48 | req.Header.Add("X-Automated-Tool", "https://github.com/mozilla/crlite") 49 | 50 | resp, err := client.Do(req) 51 | if err != nil { 52 | return Create, szOnDisk, 0 53 | } 54 | 55 | eTag := resp.Header.Get("Etag") 56 | lastMod, err := http.ParseTime(resp.Header.Get("Last-Modified")) 57 | if err != nil { 58 | glog.V(1).Infof("[%s] CREATE: Invalid last-modified: %s [%s]", crlUrl.String(), err, resp.Header.Get("Last-Modified")) 59 | return Create, szOnDisk, 0 60 | } 61 | szOnServer, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) 62 | if err != nil { 63 | glog.V(1).Infof("[%s] CREATE: No content length: %s [%s]", crlUrl.String(), err, resp.Header.Get("Content-Length")) 64 | return Create, szOnDisk, 0 65 | } 66 | 67 | if localDate.Before(lastMod) { 68 | glog.V(1).Infof("[%s] CREATE: Local Date is before last modified header date, assuming out-of-date", crlUrl.String()) 69 | return Create, szOnDisk, szOnServer 70 | } 71 | 72 | if szOnServer == szOnDisk { 73 | glog.V(1).Infof("[%s] UP TO DATE", crlUrl.String()) 74 | return UpToDate, szOnDisk, szOnServer 75 | } 76 | 77 | if szOnServer > szOnDisk { 78 | if resp.Header.Get("Accept-Ranges") == "bytes" { 79 | glog.V(1).Infof("[%s] RESUME: { Already on disk: %d %s, Last-Modified: %s, Etag: %s, Length: %d }", crlUrl.String(), szOnDisk, localDate.String(), lastMod.String(), eTag, szOnServer) 80 | return Resume, szOnDisk, szOnServer 81 | } 82 | 83 | glog.V(1).Infof("[%s] Accept-Ranges not supported, unable to resume", crlUrl.String()) 84 | } 85 | 86 | glog.V(1).Infof("[%s] CREATE: Fallthrough", crlUrl.String()) 87 | return Create, szOnDisk, szOnServer 88 | } 89 | 90 | func download(ctx context.Context, crlUrl url.URL, path string, timeout time.Duration) error { 91 | client := &http.Client{Timeout: timeout} 92 | 93 | action, offset, size := determineAction(client, crlUrl, path) 94 | 95 | if action == UpToDate { 96 | return nil 97 | } 98 | 99 | req, err := http.NewRequestWithContext(ctx, "GET", crlUrl.String(), nil) 100 | if err != nil { 101 | return err 102 | } 103 | 104 | req.Header.Add("X-Automated-Tool", "https://github.com/mozilla/crlite") 105 | if action == Resume { 106 | req.Header.Add("Content-Range", fmt.Sprintf("bytes: %d-%d/%d", offset, size, offset-size)) 107 | } 108 | 109 | resp, err := client.Do(req) 110 | if err != nil { 111 | return err 112 | } 113 | defer resp.Body.Close() 114 | 115 | var outFileParams int 116 | switch resp.StatusCode { 117 | case http.StatusPartialContent: 118 | // Depending on what the server responds with, we may have to go back to Create 119 | outFileParams = os.O_APPEND | os.O_WRONLY 120 | action = Resume 121 | glog.V(1).Infof("[%s] Successfully resumed download at offset %d", crlUrl.String(), offset) 122 | case http.StatusOK: 123 | outFileParams = os.O_TRUNC | os.O_CREATE | os.O_WRONLY 124 | action = Create 125 | default: 126 | return fmt.Errorf("Non-OK status: %s", resp.Status) 127 | } 128 | 129 | outFile, err := os.OpenFile(path, outFileParams, 0644) 130 | if err != nil { 131 | return err 132 | } 133 | defer outFile.Close() 134 | 135 | if ctx.Err() != nil { 136 | return ctx.Err() 137 | } 138 | 139 | defer resp.Body.Close() 140 | 141 | // and copy from reader, propagating errors 142 | totalBytes, err := io.Copy(outFile, resp.Body) 143 | if err != nil { 144 | return err 145 | } 146 | 147 | if action == Create && size != 0 && totalBytes != size { 148 | glog.Warningf("[%s] Didn't seem to download the right number of bytes, expected=%d got %d", 149 | crlUrl.String(), size, totalBytes) 150 | } 151 | 152 | if action == Resume && size != 0 && totalBytes+offset != size { 153 | glog.Warningf("[%s] Didn't seem to download the right number of bytes, expected=%d got %d with %d already local", 154 | crlUrl.String(), size, totalBytes, offset) 155 | } 156 | 157 | lastModStr := resp.Header.Get("Last-Modified") 158 | // http.TimeFormat is 29 characters 159 | if len(lastModStr) < 16 { 160 | glog.Infof("[%s] No compliant reported last-modified time, file may expire early: [%s]", crlUrl.String(), lastModStr) 161 | return nil 162 | } 163 | 164 | lastMod, err := http.ParseTime(resp.Header.Get("Last-Modified")) 165 | if err != nil { 166 | glog.Warningf("[%s] Couldn't parse modified time: %s [%s]", crlUrl.String(), err, lastModStr) 167 | return nil 168 | } 169 | 170 | if err := os.Chtimes(path, lastMod, lastMod); err != nil { 171 | glog.Warningf("Couldn't set modified time: %s", err) 172 | } 173 | return nil 174 | } 175 | 176 | func DownloadFileSync(ctx context.Context, crlUrl url.URL, 177 | path string, maxRetries uint, timeout time.Duration) error { 178 | glog.V(1).Infof("Downloading %s from %s", path, crlUrl.String()) 179 | 180 | var err error 181 | var i uint 182 | 183 | for ; i <= maxRetries; i++ { 184 | select { 185 | case <-ctx.Done(): 186 | glog.Infof("Signal caught, stopping threads at next opportunity.") 187 | return nil 188 | default: 189 | err = download(ctx, crlUrl, path, timeout) 190 | if err == nil { 191 | return nil 192 | } 193 | } 194 | glog.Infof("Failed to download %s (%d/%d): %s", path, i, maxRetries, err) 195 | } 196 | return err 197 | } 198 | -------------------------------------------------------------------------------- /go/downloader/downloader_test.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "net/http/httptest" 10 | "net/url" 11 | "os" 12 | "path/filepath" 13 | "sync" 14 | "testing" 15 | "time" 16 | ) 17 | 18 | func Test_DownloadNotFound(t *testing.T) { 19 | ts := httptest.NewServer(http.NotFoundHandler()) 20 | defer ts.Close() 21 | 22 | tmpfile, err := ioutil.TempFile("", "Test_DownloadNotFound") 23 | if err != nil { 24 | t.Error(err) 25 | } 26 | defer os.Remove(tmpfile.Name()) 27 | 28 | url, _ := url.Parse(ts.URL) 29 | 30 | err = DownloadFileSync(context.TODO(), *url, tmpfile.Name(), 3, 0) 31 | if err.Error() != "Non-OK status: 404 Not Found" { 32 | t.Error(err) 33 | } 34 | } 35 | 36 | func Test_DownloadOK(t *testing.T) { 37 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 38 | fmt.Fprintln(w, "Hello, client") 39 | })) 40 | defer ts.Close() 41 | 42 | tmpfile, err := ioutil.TempFile("", "Test_DownloadNotFound") 43 | if err != nil { 44 | t.Error(err) 45 | } 46 | defer os.Remove(tmpfile.Name()) 47 | 48 | url, _ := url.Parse(ts.URL) 49 | 50 | err = DownloadFileSync(context.TODO(), *url, tmpfile.Name(), 1, 0) 51 | if err != nil { 52 | t.Error(err) 53 | } 54 | 55 | content, err := ioutil.ReadFile(tmpfile.Name()) 56 | if err != nil { 57 | t.Error(err) 58 | } 59 | 60 | if string(content) != "Hello, client\n" { 61 | t.Logf("File contents: %s", content) 62 | t.Error("File contents not correct") 63 | } 64 | } 65 | 66 | type SingleFailureHandler struct { 67 | mu sync.Mutex // guards failedOnce 68 | failedOnce bool 69 | t *testing.T 70 | } 71 | 72 | func (h *SingleFailureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 73 | if r.Method != http.MethodGet { 74 | return 75 | } 76 | 77 | h.mu.Lock() 78 | defer h.mu.Unlock() 79 | if !h.failedOnce { 80 | h.failedOnce = true 81 | w.WriteHeader(http.StatusBadRequest) 82 | fmt.Fprintf(w, "failure") 83 | return 84 | } 85 | fmt.Fprintln(w, "Hello, client") 86 | } 87 | 88 | func Test_DownloadFailureWithoutRetry(t *testing.T) { 89 | ts := httptest.NewServer(http.Handler(&SingleFailureHandler{t: t})) 90 | defer ts.Close() 91 | 92 | tmpfile, err := ioutil.TempFile("", "Test_DownloadFailureWithoutRetry") 93 | if err != nil { 94 | t.Error(err) 95 | } 96 | defer os.Remove(tmpfile.Name()) 97 | 98 | url, _ := url.Parse(ts.URL) 99 | 100 | err = DownloadFileSync(context.TODO(), *url, tmpfile.Name(), 0, 0) 101 | if err == nil { 102 | t.Error("Should have failed") 103 | } 104 | } 105 | 106 | func Test_DownloadFailureWithRetry(t *testing.T) { 107 | ts := httptest.NewServer(http.Handler(&SingleFailureHandler{t: t})) 108 | defer ts.Close() 109 | 110 | tmpfile, err := ioutil.TempFile("", "Test_DownloadFailureWithRetry") 111 | if err != nil { 112 | t.Error(err) 113 | } 114 | defer os.Remove(tmpfile.Name()) 115 | 116 | url, _ := url.Parse(ts.URL) 117 | 118 | err = DownloadFileSync(context.TODO(), *url, tmpfile.Name(), 1, 0) 119 | if err != nil { 120 | t.Error(err) 121 | } 122 | 123 | content, err := ioutil.ReadFile(tmpfile.Name()) 124 | if err != nil { 125 | t.Error(err) 126 | } 127 | 128 | if string(content) != "Hello, client\n" { 129 | t.Logf("File contents: %s", content) 130 | t.Error("File contents not correct") 131 | } 132 | } 133 | 134 | func Test_DownloadResumeNotSupported(t *testing.T) { 135 | testcontent := []byte("download resume not supported test file's content\n") 136 | 137 | // Prepare a partially-downloaded file 138 | alreadydownloaded := testcontent[:4] 139 | downloadedfile, err := ioutil.TempFile("", "Test_DownloadResumeNotSupported.down") 140 | if err != nil { 141 | t.Fatal(err) 142 | } 143 | defer os.Remove(downloadedfile.Name()) 144 | 145 | if _, err := downloadedfile.Write(alreadydownloaded); err != nil { 146 | t.Fatal(err) 147 | } 148 | if err := downloadedfile.Close(); err != nil { 149 | t.Fatal(err) 150 | } 151 | 152 | // Server always returns the whole file 153 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 154 | _, _ = w.Write(testcontent) 155 | })) 156 | defer ts.Close() 157 | 158 | url, _ := url.Parse(ts.URL) 159 | 160 | err = DownloadFileSync(context.TODO(), *url, downloadedfile.Name(), 1, 0) 161 | if err != nil { 162 | t.Error(err) 163 | } 164 | 165 | // Check results 166 | content, err := ioutil.ReadFile(downloadedfile.Name()) 167 | if err != nil { 168 | t.Error(err) 169 | } 170 | 171 | if bytes.Equal(content, testcontent) == false { 172 | t.Logf("File contents: %s", string(content)) 173 | t.Error("File contents not appended") 174 | } 175 | } 176 | 177 | func Test_DownloadResume(t *testing.T) { 178 | testcontent := []byte("download resume test file's content\n") 179 | 180 | dir, err := ioutil.TempDir("", "Test_DownloadResume") 181 | if err != nil { 182 | t.Fatal(err) 183 | } 184 | t.Logf("Dir: %s", dir) 185 | defer os.RemoveAll(dir) 186 | 187 | err = ioutil.WriteFile(filepath.Join(dir, "Test_DownloadNotFound.file"), testcontent, 0644) 188 | if err != nil { 189 | t.Fatal(err) 190 | } 191 | 192 | // Start the server 193 | ts := httptest.NewServer(http.FileServer(http.Dir(dir))) 194 | defer ts.Close() 195 | 196 | // Prepare a partially-downloaded file 197 | alreadydownloaded := testcontent[:4] 198 | downloadedfile, err := ioutil.TempFile("", "Test_DownloadNotFound.down") 199 | if err != nil { 200 | t.Fatal(err) 201 | } 202 | defer os.Remove(downloadedfile.Name()) 203 | 204 | if _, err := downloadedfile.Write(alreadydownloaded); err != nil { 205 | t.Fatal(err) 206 | } 207 | if err := downloadedfile.Close(); err != nil { 208 | t.Fatal(err) 209 | } 210 | 211 | // Download 212 | url, _ := url.Parse(ts.URL) 213 | url.Path = "Test_DownloadNotFound.file" 214 | 215 | err = DownloadFileSync(context.TODO(), *url, downloadedfile.Name(), 1, 0) 216 | if err != nil { 217 | t.Error(err) 218 | } 219 | 220 | // Check result 221 | content, err := ioutil.ReadFile(downloadedfile.Name()) 222 | if err != nil { 223 | t.Error(err) 224 | } 225 | 226 | if bytes.Equal(content, testcontent) == false { 227 | t.Logf("File contents: %s", string(content)) 228 | t.Error("File contents not appended") 229 | } 230 | } 231 | 232 | func Test_GetSizeAndDateOfFile(t *testing.T) { 233 | tmpfile, err := ioutil.TempFile("", "Test_GetSizeAndDateOfFile") 234 | if err != nil { 235 | t.Error(err) 236 | } 237 | defer os.Remove(tmpfile.Name()) 238 | 239 | size, date, err := GetSizeAndDateOfFile(tmpfile.Name()) 240 | if err != nil { 241 | t.Error(err) 242 | } 243 | 244 | if size != 0 { 245 | t.Error("Size should be 0") 246 | } 247 | 248 | if time.Since(date) > time.Second { 249 | t.Error("Timestamp more than a second ago") 250 | } 251 | 252 | // Check that dates shift 253 | earlyDate := time.Now().AddDate(-1, 0, 0) 254 | _ = os.Chtimes(tmpfile.Name(), earlyDate, earlyDate) 255 | 256 | size, date, err = GetSizeAndDateOfFile(tmpfile.Name()) 257 | if err != nil { 258 | t.Error(err) 259 | } 260 | 261 | if size != 0 { 262 | t.Error("Size should still be 0") 263 | } 264 | 265 | if earlyDate.Sub(date) > time.Second { 266 | t.Error("Timestamp more than a second off") 267 | } 268 | 269 | // Make it non-zero bytes, resetting the date 270 | err = ioutil.WriteFile(tmpfile.Name(), []byte("ten bytes\n"), 0644) 271 | if err != nil { 272 | t.Fatal(err) 273 | } 274 | 275 | size, date, err = GetSizeAndDateOfFile(tmpfile.Name()) 276 | if err != nil { 277 | t.Error(err) 278 | } 279 | 280 | if size != 10 { 281 | t.Errorf("Size should be 10: %d", size) 282 | } 283 | 284 | if time.Since(date) > time.Second { 285 | t.Error("Timestamp more than a second ago") 286 | } 287 | } 288 | -------------------------------------------------------------------------------- /go/downloader/verifying-downloader.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/url" 7 | "os" 8 | "time" 9 | 10 | "github.com/golang/glog" 11 | ) 12 | 13 | type DownloadVerifier interface { 14 | IsValid(path string) error 15 | } 16 | 17 | /* 18 | * Returns: Boolean of whether the data at finalPath is now valid, and any error. It is possible 19 | * that err != nil and yet finalPath is valid, so callers should rely on the boolean and merely 20 | * log the error as needed. 21 | */ 22 | func DownloadAndVerifyFileSync(ctx context.Context, verifyFunc DownloadVerifier, auditor DownloadAuditor, 23 | identifier DownloadIdentifier, crlUrl url.URL, finalPath string, maxRetries uint, 24 | timeout time.Duration) (bool, error) { 25 | 26 | dlTracer := NewDownloadTracer() 27 | auditCtx := dlTracer.Configure(ctx) 28 | 29 | tmpPath := fmt.Sprintf("%s.tmp", finalPath) 30 | defer func() { 31 | removeErr := os.Remove(tmpPath) 32 | if removeErr != nil && !os.IsNotExist(removeErr) { 33 | glog.Warningf("[%s] Failed to remove invalid tmp file %s: %s", identifier.ID(), tmpPath, removeErr) 34 | } 35 | }() 36 | 37 | attemptFallbackToExistingFile := func(err error) (bool, error) { 38 | existingValidErr := verifyFunc.IsValid(finalPath) 39 | if existingValidErr == nil { 40 | // The existing file at finalPath is OK. 41 | return true, err 42 | } 43 | // We don't log to the auditor here since the local file being bad isn't necessarily this run's fault, 44 | // and it will be handled later in aggregate-crls if it is relevant at that stage. 45 | combinedError := fmt.Errorf("[%s] Couldn't verify already-on-disk path %s. Local error=%s, Caused by=%s", 46 | identifier.ID(), finalPath, existingValidErr, err) 47 | glog.Error(combinedError) 48 | return false, combinedError 49 | } 50 | 51 | dlErr := DownloadFileSync(auditCtx, crlUrl, tmpPath, maxRetries, timeout) 52 | if dlErr != nil { 53 | auditor.FailedDownload(identifier, &crlUrl, dlTracer, dlErr) 54 | glog.Warningf("[%s] Failed to download from %s to tmp file %s: %s", identifier.ID(), crlUrl.String(), tmpPath, dlErr) 55 | 56 | return attemptFallbackToExistingFile(dlErr) 57 | } 58 | 59 | dlValidErr := verifyFunc.IsValid(tmpPath) 60 | if dlValidErr != nil { 61 | auditor.FailedVerifyUrl(identifier, &crlUrl, dlTracer, dlValidErr) 62 | 63 | return attemptFallbackToExistingFile(dlValidErr) 64 | } 65 | 66 | renameErr := os.Rename(tmpPath, finalPath) 67 | if renameErr != nil { 68 | glog.Errorf("[%s] Couldn't rename %s to %s: %s", identifier.ID(), tmpPath, finalPath, renameErr) 69 | 70 | return attemptFallbackToExistingFile(renameErr) 71 | } 72 | 73 | return true, nil 74 | 75 | } 76 | -------------------------------------------------------------------------------- /go/downloader/verifying-downloader_test.go: -------------------------------------------------------------------------------- 1 | package downloader 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | "net/http" 8 | "net/http/httptest" 9 | "net/url" 10 | "os" 11 | "strings" 12 | "testing" 13 | ) 14 | 15 | type testIdentifier struct{} 16 | 17 | func (ti testIdentifier) ID() string { 18 | return "test identifier" 19 | } 20 | 21 | type testVerifier struct{} 22 | 23 | func (tv *testVerifier) IsValid(path string) error { 24 | data, err := ioutil.ReadFile(path) 25 | if err != nil { 26 | return err 27 | } 28 | if len(data) == 0 { 29 | return fmt.Errorf("Empty file") 30 | } 31 | return nil 32 | } 33 | 34 | type testAuditor struct{} 35 | 36 | func (ta *testAuditor) FailedDownload(issuer DownloadIdentifier, crlUrl *url.URL, dlTracer *DownloadTracer, err error) { 37 | } 38 | func (ta *testAuditor) FailedVerifyUrl(issuer DownloadIdentifier, crlUrl *url.URL, dlTracer *DownloadTracer, err error) { 39 | } 40 | func (ta *testAuditor) FailedVerifyPath(issuer DownloadIdentifier, crlUrl *url.URL, crlPath string, err error) { 41 | } 42 | 43 | func Test_NotFoundNotLocal(t *testing.T) { 44 | ts := httptest.NewServer(http.NotFoundHandler()) 45 | defer ts.Close() 46 | 47 | tmpfile, err := ioutil.TempFile("", "Test_NotFoundNotLocal") 48 | if err != nil { 49 | t.Error(err) 50 | } 51 | defer os.Remove(tmpfile.Name()) 52 | 53 | testUrl, _ := url.Parse(ts.URL) 54 | 55 | ctx := context.TODO() 56 | 57 | dataAtPathIsValid, err := DownloadAndVerifyFileSync(ctx, &testVerifier{}, &testAuditor{}, 58 | &testIdentifier{}, *testUrl, 59 | tmpfile.Name(), 1, 0) 60 | 61 | if err == nil { 62 | t.Error("Expected error") 63 | } 64 | if dataAtPathIsValid { 65 | t.Error("Expected not dataAtPathIsValid") 66 | } 67 | if !strings.Contains(err.Error(), "Local error=Empty file, Caused by=Non-OK status: 404 Not Found") { 68 | t.Error(err) 69 | } 70 | 71 | _, statErr := os.Stat(fmt.Sprintf("%s.tmp", tmpfile.Name())) 72 | if statErr == nil { 73 | t.Error("tmpfile not cleaned up") 74 | } 75 | } 76 | 77 | func Test_NotFoundButIsLocal(t *testing.T) { 78 | ts := httptest.NewServer(http.NotFoundHandler()) 79 | defer ts.Close() 80 | 81 | tmpfile, err := ioutil.TempFile("", "Test_NotFoundButIsLocal") 82 | if err != nil { 83 | t.Error(err) 84 | } 85 | defer os.Remove(tmpfile.Name()) 86 | ioutil.WriteFile(tmpfile.Name(), []byte("Local File"), 0644) 87 | 88 | testUrl, _ := url.Parse(ts.URL) 89 | 90 | ctx := context.TODO() 91 | 92 | dataAtPathIsValid, err := DownloadAndVerifyFileSync(ctx, &testVerifier{}, &testAuditor{}, 93 | &testIdentifier{}, *testUrl, 94 | tmpfile.Name(), 1, 0) 95 | 96 | if err == nil { 97 | t.Error("Expected error") 98 | } 99 | if !dataAtPathIsValid { 100 | t.Error("Expected dataAtPathIsValid!") 101 | } 102 | if err.Error() != "Non-OK status: 404 Not Found" { 103 | t.Error(err) 104 | } 105 | 106 | _, statErr := os.Stat(fmt.Sprintf("%s.tmp", tmpfile.Name())) 107 | if statErr == nil { 108 | t.Error("tmpfile not cleaned up") 109 | } 110 | } 111 | 112 | func Test_FoundRemoteButNotLocal(t *testing.T) { 113 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 114 | fmt.Fprintln(w, "Hello, client") 115 | })) 116 | defer ts.Close() 117 | 118 | tmpfile, err := ioutil.TempFile("", "Test_FoundRemoteButNotLocal") 119 | if err != nil { 120 | t.Error(err) 121 | } 122 | defer os.Remove(tmpfile.Name()) 123 | 124 | testUrl, _ := url.Parse(ts.URL) 125 | 126 | ctx := context.TODO() 127 | 128 | dataAtPathIsValid, err := DownloadAndVerifyFileSync(ctx, &testVerifier{}, &testAuditor{}, 129 | &testIdentifier{}, *testUrl, 130 | tmpfile.Name(), 1, 0) 131 | 132 | if err != nil { 133 | t.Errorf("Expected no error but got %s", err) 134 | } 135 | if !dataAtPathIsValid { 136 | t.Error("Expected dataAtPathIsValid") 137 | } 138 | _, statErr := os.Stat(fmt.Sprintf("%s.tmp", tmpfile.Name())) 139 | if statErr == nil { 140 | t.Error("tmpfile not cleaned up") 141 | } 142 | } 143 | 144 | func Test_FoundRemoteAndAlsoLocal(t *testing.T) { 145 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 146 | fmt.Fprintln(w, "Hello, client") 147 | })) 148 | defer ts.Close() 149 | 150 | tmpfile, err := ioutil.TempFile("", "Test_FoundRemoteAndAlsoLocal") 151 | if err != nil { 152 | t.Error(err) 153 | } 154 | defer os.Remove(tmpfile.Name()) 155 | ioutil.WriteFile(tmpfile.Name(), []byte("Local File"), 0644) 156 | 157 | testUrl, _ := url.Parse(ts.URL) 158 | 159 | ctx := context.TODO() 160 | 161 | dataAtPathIsValid, err := DownloadAndVerifyFileSync(ctx, &testVerifier{}, &testAuditor{}, 162 | &testIdentifier{}, *testUrl, 163 | tmpfile.Name(), 1, 0) 164 | 165 | if err != nil { 166 | t.Errorf("Expected no error but got %s", err) 167 | } 168 | if !dataAtPathIsValid { 169 | t.Error("Expected dataAtPathIsValid") 170 | } 171 | _, statErr := os.Stat(fmt.Sprintf("%s.tmp", tmpfile.Name())) 172 | if statErr == nil { 173 | t.Error("tmpfile not cleaned up") 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /go/engine/engine.go: -------------------------------------------------------------------------------- 1 | /* This Source Code Form is subject to the terms of the Mozilla Public 2 | * License, v. 2.0. If a copy of the MPL was not distributed with this 3 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 | 5 | package engine 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "time" 11 | 12 | "github.com/golang/glog" 13 | "github.com/hashicorp/go-metrics" 14 | "github.com/mozilla/crlite/go/config" 15 | "github.com/mozilla/crlite/go/storage" 16 | "github.com/mozilla/crlite/go/telemetry" 17 | ) 18 | 19 | func GetConfiguredStorage(ctx context.Context, ctconfig *config.CTConfig, roStorage bool) (storage.CertDatabase, storage.RemoteCache) { 20 | var err error 21 | var storageDB storage.CertDatabase 22 | 23 | redisTimeoutDuration, err := time.ParseDuration(*ctconfig.RedisTimeout) 24 | if err != nil { 25 | glog.Fatalf("Could not parse RedisTimeout: %v", err) 26 | } 27 | 28 | remoteCache, err := storage.NewRedisCache(*ctconfig.RedisHost, redisTimeoutDuration) 29 | if err != nil { 30 | glog.Fatalf("Unable to configure Redis cache for host %v", *ctconfig.RedisHost) 31 | } 32 | 33 | storageDB, err = storage.NewCertDatabase(remoteCache, *ctconfig.CertPath, roStorage) 34 | if err != nil { 35 | glog.Fatalf("Unable to construct cache and/or persistent storage: %v", err) 36 | } 37 | 38 | return storageDB, remoteCache 39 | } 40 | 41 | func PrepareTelemetry(utilName string, ctconfig *config.CTConfig) { 42 | metricsConf := metrics.DefaultConfig(utilName) 43 | metricsConf.EnableHostname = false 44 | metricsConf.EnableHostnameLabel = false 45 | metricsConf.EnableRuntimeMetrics = false 46 | metricsConf.EnableServiceLabel = false 47 | 48 | if len(*ctconfig.StatsDHost) > 0 { 49 | metricsSink, err := metrics.NewStatsdSink(fmt.Sprintf("%s:%d", *ctconfig.StatsDHost, *ctconfig.StatsDPort)) 50 | if err != nil { 51 | glog.Fatal(err) 52 | } 53 | 54 | _, err = metrics.NewGlobal(metricsConf, metricsSink) 55 | if err != nil { 56 | glog.Fatal(err) 57 | } 58 | 59 | glog.Infof("%s is starting. Statistics are being reported to the StatsD server at %s:%d", 60 | utilName, *ctconfig.StatsDHost, *ctconfig.StatsDPort) 61 | 62 | return 63 | } 64 | 65 | infoDumpPeriod, err := time.ParseDuration(*ctconfig.StatsRefreshPeriod) 66 | if err != nil { 67 | glog.Fatalf("Could not parse StatsRefreshPeriod: %v", err) 68 | } 69 | 70 | glog.Infof("%s is starting. Local statistics will emit every: %s", 71 | utilName, infoDumpPeriod) 72 | 73 | metricsSink := metrics.NewInmemSink(infoDumpPeriod, 5*infoDumpPeriod) 74 | telemetry.NewMetricsDumper(metricsSink, infoDumpPeriod) 75 | 76 | _, err = metrics.NewGlobal(metricsConf, metricsSink) 77 | if err != nil { 78 | glog.Fatal(err) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /go/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mozilla/crlite/go 2 | 3 | require ( 4 | github.com/bluele/gcache v0.0.0-20190518031135-bc40bd653833 5 | github.com/go-redis/redis v6.15.5+incompatible 6 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b 7 | github.com/google/certificate-transparency-go v1.1.0 8 | github.com/google/renameio v1.0.1 9 | github.com/gopherjs/gopherjs v0.0.0-20190915194858-d3ddacdb130f // indirect 10 | github.com/hashicorp/go-immutable-radix v1.1.0 // indirect 11 | github.com/hashicorp/go-metrics v0.5.3 12 | github.com/hashicorp/go-uuid v1.0.1 // indirect 13 | github.com/hashicorp/golang-lru v0.5.3 // indirect 14 | github.com/jpillora/backoff v1.0.0 15 | github.com/onsi/ginkgo v1.10.2 // indirect 16 | github.com/onsi/gomega v1.7.0 // indirect 17 | github.com/smartystreets/assertions v1.0.1 // indirect 18 | github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect 19 | golang.org/x/net v0.33.0 // indirect 20 | gopkg.in/ini.v1 v1.48.0 21 | ) 22 | 23 | go 1.13 24 | -------------------------------------------------------------------------------- /go/rootprogram/issuers.go: -------------------------------------------------------------------------------- 1 | package rootprogram 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "encoding/base64" 7 | "encoding/csv" 8 | "encoding/json" 9 | "encoding/pem" 10 | "fmt" 11 | "io" 12 | "io/ioutil" 13 | "net/url" 14 | "os" 15 | "strings" 16 | "sync" 17 | "time" 18 | 19 | "github.com/golang/glog" 20 | "github.com/google/certificate-transparency-go/x509" 21 | 22 | "github.com/mozilla/crlite/go" 23 | "github.com/mozilla/crlite/go/downloader" 24 | ) 25 | 26 | const ( 27 | kMozCCADBReport = "https://ccadb.my.salesforce-sites.com/mozilla/MozillaIntermediateCertsCSVReport" 28 | ) 29 | 30 | type issuerCert struct { 31 | cert *x509.Certificate 32 | subjectDN string 33 | pemInfo string 34 | } 35 | 36 | type IssuerData struct { 37 | certs []issuerCert 38 | } 39 | 40 | type EnrolledIssuer struct { 41 | UniqueID string `json:"uniqueID"` 42 | PubKeyHash string `json:"pubKeyHash"` 43 | Subject string `json:"subject"` 44 | Pem string `json:"pem"` 45 | } 46 | 47 | type MozIssuers struct { 48 | issuerMap map[string]IssuerData 49 | CrlMap types.IssuerCrlMap 50 | mutex *sync.Mutex 51 | DiskPath string 52 | ReportUrl string 53 | modTime time.Time 54 | } 55 | 56 | func NewMozillaIssuers() *MozIssuers { 57 | return &MozIssuers{ 58 | issuerMap: make(map[string]IssuerData, 0), 59 | CrlMap: make(types.IssuerCrlMap, 0), 60 | mutex: &sync.Mutex{}, 61 | DiskPath: fmt.Sprintf("%s/mozilla_issuers.csv", os.TempDir()), 62 | ReportUrl: kMozCCADBReport, 63 | } 64 | } 65 | 66 | type verifier struct { 67 | } 68 | 69 | func (v *verifier) IsValid(path string) error { 70 | mi := NewMozillaIssuers() 71 | return mi.LoadFromDisk(path) 72 | } 73 | 74 | type loggingAuditor struct{} 75 | 76 | func (ta *loggingAuditor) FailedDownload(issuer downloader.DownloadIdentifier, crlUrl *url.URL, 77 | dlTracer *downloader.DownloadTracer, err error) { 78 | glog.Warningf("Failed download of %s: %s", crlUrl.String(), err) 79 | } 80 | func (ta *loggingAuditor) FailedVerifyUrl(issuer downloader.DownloadIdentifier, crlUrl *url.URL, 81 | dlTracer *downloader.DownloadTracer, err error) { 82 | glog.Warningf("Failed verify of %s: %s", crlUrl.String(), err) 83 | } 84 | func (ta *loggingAuditor) FailedVerifyPath(issuer downloader.DownloadIdentifier, crlUrl *url.URL, crlPath string, 85 | err error) { 86 | glog.Warningf("Failed verify of %s (local: %s): %s", crlUrl.String(), crlPath, err) 87 | } 88 | 89 | type identifier struct{} 90 | 91 | func (i *identifier) ID() string { 92 | return "Mozilla Issuers" 93 | } 94 | 95 | func (mi *MozIssuers) Load() error { 96 | ctx := context.Background() 97 | 98 | dataUrl, err := url.Parse(mi.ReportUrl) 99 | if err != nil { 100 | glog.Fatalf("Couldn't parse CCADB URL of %s: %s", mi.ReportUrl, err) 101 | return err 102 | } 103 | 104 | isAcceptable, err := downloader.DownloadAndVerifyFileSync(ctx, &verifier{}, &loggingAuditor{}, &identifier{}, 105 | *dataUrl, mi.DiskPath, 3, 300*time.Second) 106 | 107 | if !isAcceptable { 108 | return err 109 | } 110 | 111 | if err != nil { 112 | glog.Warningf("Error encountered loading CCADB data, but able to proceed with previous data. Error: %s", err) 113 | } 114 | 115 | return mi.LoadFromDisk(mi.DiskPath) 116 | } 117 | 118 | func (mi *MozIssuers) LoadFromDisk(aPath string) error { 119 | fd, err := os.Open(aPath) 120 | if err != nil { 121 | return err 122 | } 123 | defer fd.Close() 124 | 125 | fi, err := os.Stat(aPath) 126 | if err != nil { 127 | return err 128 | } 129 | mi.modTime = fi.ModTime() 130 | return mi.parseCCADB(fd) 131 | } 132 | 133 | func (mi *MozIssuers) DatasetAge() time.Duration { 134 | if mi.modTime.IsZero() { 135 | return 0 136 | } 137 | return time.Since(mi.modTime) 138 | } 139 | 140 | func (mi *MozIssuers) GetIssuers() []types.Issuer { 141 | mi.mutex.Lock() 142 | defer mi.mutex.Unlock() 143 | 144 | issuers := make([]types.Issuer, len(mi.issuerMap)) 145 | i := 0 146 | 147 | for _, value := range mi.issuerMap { 148 | cert := value.certs[0].cert 149 | issuers[i] = types.NewIssuer(cert) 150 | i++ 151 | } 152 | return issuers 153 | } 154 | 155 | func normalizePem(input string) string { 156 | // Some consumers of the file produced by `SaveIssuersList` mistakenly 157 | // assume that the PEM encoding of a certificate is unique. This causes 158 | // some problems as the CCADB report often includes a certificate with 159 | // an unusual PEM presentation one day and a different presentation 160 | // another. (Usually a 65 character line that is later reflowed to 161 | // width 64.) As a work-around, we'll normalize to the PEM format 162 | // produced by the go standard library modulo the trailing newline. We 163 | // omit the trailing newline to minimize differences with the entries 164 | // in the CCADB report at the time of writing. 165 | // 166 | var pemBuf strings.Builder 167 | derBytes, rest := pem.Decode([]byte(input)) 168 | if len(rest) != 0 { 169 | glog.Warningf("Ignored %d bytes of trailing data while normalizing this PEM: %s", len(rest), input) 170 | } 171 | pem.Encode(&pemBuf, derBytes) 172 | 173 | output := pemBuf.String() 174 | output = strings.TrimRight(output, "\n") 175 | 176 | return output 177 | } 178 | 179 | func (mi *MozIssuers) SaveIssuersList(filePath string) error { 180 | mi.mutex.Lock() 181 | defer mi.mutex.Unlock() 182 | certCount := 0 183 | 184 | issuers := make([]EnrolledIssuer, 0, len(mi.issuerMap)) 185 | for _, val := range mi.issuerMap { 186 | for _, cert := range val.certs { 187 | pubKeyHash := sha256.Sum256(cert.cert.RawSubjectPublicKeyInfo) 188 | uniqueID := sha256.Sum256(append(cert.cert.RawSubject, cert.cert.RawSubjectPublicKeyInfo...)) 189 | issuers = append(issuers, EnrolledIssuer{ 190 | UniqueID: base64.URLEncoding.EncodeToString(uniqueID[:]), 191 | PubKeyHash: base64.URLEncoding.EncodeToString(pubKeyHash[:]), 192 | Subject: cert.subjectDN, 193 | Pem: normalizePem(cert.pemInfo), 194 | }) 195 | certCount++ 196 | } 197 | } 198 | 199 | glog.Infof("Saving %d issuers and %d certs", len(mi.issuerMap), certCount) 200 | fd, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) 201 | if err != nil { 202 | glog.Errorf("Error opening enrolled issuer %s: %s", filePath, err) 203 | return err 204 | } 205 | 206 | enc := json.NewEncoder(fd) 207 | 208 | if err := enc.Encode(issuers); err != nil { 209 | glog.Errorf("Error marshaling enrolled issuer %s: %s", filePath, err) 210 | } 211 | 212 | if err = fd.Close(); err != nil { 213 | glog.Errorf("Error storing enrolled issuer %s: %s", filePath, err) 214 | } 215 | 216 | return err 217 | } 218 | 219 | func (mi *MozIssuers) LoadEnrolledIssuers(filePath string) error { 220 | bytes, err := ioutil.ReadFile(filePath) 221 | if err != nil { 222 | return err 223 | } 224 | 225 | list := make([]EnrolledIssuer, 0) 226 | err = json.Unmarshal(bytes, &list) 227 | if err != nil { 228 | return err 229 | } 230 | 231 | for _, ei := range list { 232 | cert, err := decodeCertificateFromPem(ei.Pem) 233 | if err != nil { 234 | return err 235 | } 236 | mi.InsertIssuerFromCertAndPem(cert, ei.Pem, nil) 237 | } 238 | 239 | return nil 240 | } 241 | 242 | func (mi *MozIssuers) IsIssuerInProgram(aIssuer types.Issuer) bool { 243 | _, ok := mi.issuerMap[aIssuer.ID()] 244 | return ok 245 | } 246 | 247 | func (mi *MozIssuers) GetCertificateForIssuer(aIssuer types.Issuer) (*x509.Certificate, error) { 248 | mi.mutex.Lock() 249 | defer mi.mutex.Unlock() 250 | 251 | entry, ok := mi.issuerMap[aIssuer.ID()] 252 | if !ok { 253 | return nil, fmt.Errorf("Unknown issuer: %s", aIssuer.ID()) 254 | } 255 | return entry.certs[0].cert, nil 256 | } 257 | 258 | func (mi *MozIssuers) GetSubjectForIssuer(aIssuer types.Issuer) (string, error) { 259 | mi.mutex.Lock() 260 | defer mi.mutex.Unlock() 261 | 262 | entry, ok := mi.issuerMap[aIssuer.ID()] 263 | if !ok { 264 | return "", fmt.Errorf("Unknown issuer: %s", aIssuer.ID()) 265 | } 266 | return entry.certs[0].subjectDN, nil 267 | } 268 | 269 | func decodeCertificateFromPem(aPem string) (*x509.Certificate, error) { 270 | block, rest := pem.Decode([]byte(aPem)) 271 | 272 | if block == nil { 273 | return nil, fmt.Errorf("Not a valid PEM") 274 | } 275 | 276 | if len(rest) != 0 { 277 | return nil, fmt.Errorf("Extra PEM data") 278 | } 279 | 280 | return x509.ParseCertificate(block.Bytes) 281 | } 282 | 283 | func decodeCertificateFromRow(aColMap map[string]int, aRow []string, aLineNum int) (*x509.Certificate, error) { 284 | p := strings.Trim(aRow[aColMap["PEM"]], "'") 285 | 286 | cert, err := decodeCertificateFromPem(p) 287 | if err != nil { 288 | return nil, fmt.Errorf("%s at line %d", err, aLineNum) 289 | } 290 | return cert, nil 291 | } 292 | 293 | func decodeCrlsFromRow(aColMap map[string]int, aRow []string, aLineNum int) ([]string, error) { 294 | crls := []string{} 295 | fullCrlStr := aRow[aColMap["Full CRL Issued By This CA"]] 296 | fullCrlStr = strings.TrimSpace(fullCrlStr) 297 | if fullCrlStr != "" { 298 | fullCrlUrl, err := url.Parse(fullCrlStr) 299 | if err != nil { 300 | glog.Warningf("decodeCrlsFromRow: Line %d: Could not parse %q as URL: %v", aLineNum, fullCrlStr, err) 301 | } else if fullCrlUrl.Scheme != "http" && fullCrlUrl.Scheme != "https" { 302 | glog.Warningf("decodeCrlsFromRow: Line %d: Unknown URL scheme in %q", aLineNum, fullCrlUrl.String()) 303 | } else { 304 | crls = append(crls, fullCrlUrl.String()) 305 | } 306 | } 307 | 308 | partCrlJson := aRow[aColMap["JSON Array of Partitioned CRLs"]] 309 | partCrlJson = strings.Trim(strings.TrimSpace(partCrlJson), "[]") 310 | partCrls := strings.Split(partCrlJson, ",") 311 | for _, crl := range partCrls { 312 | crl = strings.TrimSpace(crl) 313 | if crl == "" { 314 | continue 315 | } 316 | crlUrl, err := url.Parse(crl) 317 | if err != nil { 318 | glog.Warningf("decodeCrlsFromRow: Line %d: Could not parse %q as URL: %v", aLineNum, crl, err) 319 | } else if crlUrl.Scheme != "http" && crlUrl.Scheme != "https" { 320 | glog.Warningf("decodeCrlsFromRow: Line %d: Unknown URL scheme in %q", aLineNum, crlUrl.String()) 321 | } else { 322 | crls = append(crls, crlUrl.String()) 323 | } 324 | } 325 | 326 | return crls, nil 327 | } 328 | 329 | func (mi *MozIssuers) InsertIssuerFromCertAndPem(aCert *x509.Certificate, aPem string, crls []string) types.Issuer { 330 | issuer := types.NewIssuer(aCert) 331 | ic := issuerCert{ 332 | cert: aCert, 333 | subjectDN: aCert.Subject.String(), 334 | pemInfo: aPem, 335 | } 336 | 337 | crlSet, exists := mi.CrlMap[issuer.ID()] 338 | if !exists { 339 | crlSet = make(map[string]bool, 0) 340 | } 341 | for _, crl := range crls { 342 | crlSet[crl] = true 343 | } 344 | mi.CrlMap[issuer.ID()] = crlSet 345 | 346 | v, exists := mi.issuerMap[issuer.ID()] 347 | if exists { 348 | glog.V(1).Infof("[%s] Duplicate issuer ID: %v with %v", issuer.ID(), v, aCert.Subject.String()) 349 | v.certs = append(v.certs, ic) 350 | mi.issuerMap[issuer.ID()] = v 351 | return issuer 352 | } 353 | 354 | mi.issuerMap[issuer.ID()] = IssuerData{ 355 | certs: []issuerCert{ic}, 356 | } 357 | 358 | return issuer 359 | } 360 | 361 | func (mi *MozIssuers) NewTestIssuerFromSubjectString(aSub string) types.Issuer { 362 | issuer := types.NewIssuerFromString(aSub) 363 | ic := issuerCert{ 364 | subjectDN: aSub, 365 | } 366 | mi.issuerMap[issuer.ID()] = IssuerData{ 367 | certs: []issuerCert{ic}, 368 | } 369 | return issuer 370 | } 371 | 372 | func (mi *MozIssuers) parseCCADB(aStream io.Reader) error { 373 | mi.mutex.Lock() 374 | defer mi.mutex.Unlock() 375 | 376 | reader := csv.NewReader(aStream) 377 | columnMap := make(map[string]int) 378 | columns, err := reader.Read() 379 | if err != nil { 380 | return err 381 | } 382 | 383 | for index, attr := range columns { 384 | columnMap[attr] = index 385 | } 386 | 387 | lineNum := 1 388 | for { 389 | row, err := reader.Read() 390 | if err == io.EOF { 391 | break 392 | } 393 | if err != nil { 394 | return err 395 | } 396 | lineNum += 1 397 | 398 | cert, err := decodeCertificateFromRow(columnMap, row, lineNum) 399 | if err != nil { 400 | return err 401 | } 402 | 403 | crls, err := decodeCrlsFromRow(columnMap, row, lineNum) 404 | if err != nil { 405 | return err 406 | } 407 | 408 | _ = mi.InsertIssuerFromCertAndPem(cert, strings.Trim(row[columnMap["PEM"]], "'"), crls) 409 | lineNum += strings.Count(strings.Join(row, ""), "\n") 410 | } 411 | 412 | return nil 413 | } 414 | -------------------------------------------------------------------------------- /go/storage/mockcache.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/base64" 6 | "encoding/json" 7 | "fmt" 8 | "path/filepath" // used for glob-like matching in Keys 9 | "sort" 10 | "strings" 11 | "sync" 12 | "time" 13 | 14 | "github.com/golang/glog" 15 | "github.com/mozilla/crlite/go" 16 | ) 17 | 18 | type MockRemoteCache struct { 19 | mu sync.Mutex 20 | Data map[string][]string 21 | Expirations map[string]time.Time 22 | Duplicate int 23 | CommitLock *string 24 | Epoch uint64 25 | } 26 | 27 | func NewMockRemoteCache() *MockRemoteCache { 28 | return &MockRemoteCache{ 29 | Data: make(map[string][]string), 30 | Expirations: make(map[string]time.Time), 31 | Duplicate: 0, 32 | } 33 | } 34 | 35 | func (ec *MockRemoteCache) cleanupExpiry() { 36 | // ec.mu must be held 37 | now := time.Now() 38 | for key, timestamp := range ec.Expirations { 39 | if timestamp.Before(now) { 40 | delete(ec.Data, key) 41 | delete(ec.Expirations, key) 42 | } 43 | } 44 | } 45 | 46 | func (ec *MockRemoteCache) SetInsert(key string, entry string) (bool, error) { 47 | ec.mu.Lock() 48 | defer ec.mu.Unlock() 49 | count := len(ec.Data[key]) 50 | 51 | idx := sort.Search(count, func(i int) bool { 52 | return strings.Compare(entry, ec.Data[key][i]) <= 0 53 | }) 54 | 55 | var cmp int 56 | if idx < count { 57 | cmp = strings.Compare(entry, ec.Data[key][idx]) 58 | } 59 | 60 | if idx < count && cmp == 0 { 61 | glog.V(3).Infof("[%s] Entry already known: %s (pos=%d)", key, entry, idx) 62 | return false, nil 63 | } 64 | 65 | // Non-allocating insert, see https://github.com/golang/go/wiki/SliceTricks 66 | glog.V(3).Infof("[%s] Entry unknown: %s (pos=%d)", key, entry, idx) 67 | ec.Data[key] = append(ec.Data[key], "") 68 | copy(ec.Data[key][idx+1:], ec.Data[key][idx:]) 69 | ec.Data[key][idx] = entry 70 | return true, nil 71 | } 72 | 73 | func (ec *MockRemoteCache) setRemove(key string, entry string) error { 74 | ec.mu.Lock() 75 | defer ec.mu.Unlock() 76 | count := len(ec.Data[key]) 77 | 78 | idx := sort.Search(count, func(i int) bool { 79 | return strings.Compare(entry, ec.Data[key][i]) <= 0 80 | }) 81 | 82 | var cmp int 83 | if idx < count { 84 | cmp = strings.Compare(entry, ec.Data[key][idx]) 85 | } 86 | 87 | if idx < count && cmp == 0 { 88 | if count == 1 { 89 | delete(ec.Data, key) 90 | } else { 91 | ec.Data[key][idx] = ec.Data[key][count-1] 92 | ec.Data[key] = ec.Data[key][:count-1] 93 | } 94 | return nil 95 | } 96 | 97 | return nil 98 | } 99 | 100 | func (ec *MockRemoteCache) SetRemove(key string, entries []string) error { 101 | for _, entry := range entries { 102 | err := ec.setRemove(key, entry) 103 | if err != nil { 104 | return err 105 | } 106 | } 107 | ec.mu.Lock() 108 | defer ec.mu.Unlock() 109 | ec.cleanupExpiry() 110 | return nil 111 | } 112 | 113 | func (ec *MockRemoteCache) SetContains(key string, entry string) (bool, error) { 114 | ec.mu.Lock() 115 | defer ec.mu.Unlock() 116 | ec.cleanupExpiry() 117 | count := len(ec.Data[key]) 118 | 119 | idx := sort.Search(count, func(i int) bool { 120 | return strings.Compare(entry, ec.Data[key][i]) <= 0 121 | }) 122 | 123 | var cmp int 124 | if idx < count { 125 | cmp = strings.Compare(entry, ec.Data[key][idx]) 126 | } 127 | 128 | if idx < count && cmp == 0 { 129 | return true, nil 130 | } 131 | 132 | return false, nil 133 | } 134 | 135 | func (ec *MockRemoteCache) SetList(key string) ([]string, error) { 136 | ec.mu.Lock() 137 | defer ec.mu.Unlock() 138 | ec.cleanupExpiry() 139 | return ec.Data[key], nil 140 | } 141 | 142 | func (ec *MockRemoteCache) SetToChan(key string, c chan<- string) error { 143 | ec.mu.Lock() 144 | defer ec.mu.Unlock() 145 | defer close(c) 146 | ec.cleanupExpiry() 147 | for i := 0; i < ec.Duplicate+1; i++ { 148 | for _, v := range ec.Data[key] { 149 | c <- v 150 | } 151 | } 152 | return nil 153 | } 154 | 155 | func (ec *MockRemoteCache) SetCardinality(key string) (int, error) { 156 | ec.mu.Lock() 157 | defer ec.mu.Unlock() 158 | return len(ec.Data[key]), nil 159 | } 160 | 161 | func (ec *MockRemoteCache) Exists(key string) (bool, error) { 162 | ec.mu.Lock() 163 | defer ec.mu.Unlock() 164 | ec.cleanupExpiry() 165 | _, ok := ec.Data[key] 166 | return ok, nil 167 | } 168 | 169 | func (ec *MockRemoteCache) ExpireAt(key string, expTime time.Time) error { 170 | ec.mu.Lock() 171 | defer ec.mu.Unlock() 172 | ec.Expirations[key] = expTime 173 | return nil 174 | } 175 | 176 | func (ec *MockRemoteCache) KeysToChan(pattern string, c chan<- string) error { 177 | ec.mu.Lock() 178 | defer ec.mu.Unlock() 179 | defer close(c) 180 | 181 | for key := range ec.Data { 182 | matched, err := filepath.Match(pattern, key) 183 | if err != nil { 184 | return err 185 | } 186 | if matched { 187 | c <- key 188 | } 189 | } 190 | 191 | return nil 192 | } 193 | 194 | func (ec *MockRemoteCache) StoreLogState(log *types.CTLogState) error { 195 | ec.mu.Lock() 196 | defer ec.mu.Unlock() 197 | encoded, err := json.Marshal(log) 198 | if err != nil { 199 | return err 200 | } 201 | 202 | ec.Data["log::"+log.ShortURL] = []string{string(encoded)} 203 | return nil 204 | } 205 | 206 | func (ec *MockRemoteCache) LoadLogState(shortUrl string) (*types.CTLogState, error) { 207 | ec.mu.Lock() 208 | defer ec.mu.Unlock() 209 | data, ok := ec.Data["log::"+shortUrl] 210 | if !ok { 211 | return nil, fmt.Errorf("Log state not found") 212 | } 213 | if len(data) != 1 { 214 | return nil, fmt.Errorf("Unexpected number of log states") 215 | } 216 | 217 | var log types.CTLogState 218 | if err := json.Unmarshal([]byte(data[0]), &log); err != nil { 219 | return nil, err 220 | } 221 | return &log, nil 222 | } 223 | 224 | func (ec *MockRemoteCache) LoadAllLogStates() ([]types.CTLogState, error) { 225 | ec.mu.Lock() 226 | defer ec.mu.Unlock() 227 | var logStates []types.CTLogState 228 | for key, value := range ec.Data { 229 | if strings.HasPrefix(key, "log::") { 230 | var log types.CTLogState 231 | if err := json.Unmarshal([]byte(value[0]), &log); err != nil { 232 | return nil, err 233 | } 234 | logStates = append(logStates, log) 235 | } 236 | } 237 | return logStates, nil 238 | } 239 | 240 | func (ec *MockRemoteCache) Migrate(logData *types.CTLogMetadata) error { 241 | ec.mu.Lock() 242 | defer ec.mu.Unlock() 243 | return nil 244 | } 245 | 246 | func (ec *MockRemoteCache) AcquireCommitLock() (*string, error) { 247 | ec.mu.Lock() 248 | defer ec.mu.Unlock() 249 | randomBytes := make([]byte, 16) 250 | if _, err := rand.Read(randomBytes); err != nil { 251 | return nil, err 252 | } 253 | commitLockToken := base64.URLEncoding.EncodeToString(randomBytes) 254 | if ec.CommitLock == nil { 255 | ec.CommitLock = &commitLockToken 256 | return &commitLockToken, nil 257 | } 258 | return nil, nil 259 | } 260 | 261 | func (ec *MockRemoteCache) ReleaseCommitLock(aToken string) { 262 | ec.mu.Lock() 263 | defer ec.mu.Unlock() 264 | hasLock := ec.CommitLock != nil && *ec.CommitLock == aToken 265 | if hasLock { 266 | ec.CommitLock = nil 267 | } 268 | } 269 | 270 | func (ec *MockRemoteCache) HasCommitLock(aToken string) (bool, error) { 271 | ec.mu.Lock() 272 | defer ec.mu.Unlock() 273 | return ec.CommitLock != nil && *ec.CommitLock == aToken, nil 274 | } 275 | 276 | func (ec *MockRemoteCache) GetEpoch() (uint64, error) { 277 | ec.mu.Lock() 278 | defer ec.mu.Unlock() 279 | return ec.Epoch, nil 280 | } 281 | 282 | func (ec *MockRemoteCache) NextEpoch() error { 283 | ec.mu.Lock() 284 | defer ec.mu.Unlock() 285 | ec.Epoch += 1 286 | return nil 287 | } 288 | 289 | func (ec *MockRemoteCache) Restore(aEpoch uint64, aLogStates []types.CTLogState) error { 290 | ec.mu.Lock() 291 | ec.Epoch = aEpoch 292 | ec.mu.Unlock() 293 | 294 | for key, _ := range ec.Data { 295 | if strings.HasPrefix(key, "log::") { 296 | delete(ec.Data, key) 297 | } 298 | } 299 | 300 | for _, logState := range aLogStates { 301 | err := ec.StoreLogState(&logState) 302 | if err != nil { 303 | return err 304 | } 305 | } 306 | 307 | return nil 308 | } 309 | 310 | func (ec *MockRemoteCache) AddPreIssuerAlias(aPreIssuer types.Issuer, aIssuer types.Issuer) error { 311 | key := fmt.Sprintf("preissuer::%s", aPreIssuer.ID()) 312 | added, err := ec.SetInsert(key, aIssuer.ID()) 313 | if err == nil && added { 314 | glog.Warningf("Added preissuer alias %s -> %s", aPreIssuer.ID(), aIssuer.ID()) 315 | } 316 | return err 317 | } 318 | 319 | func (ec *MockRemoteCache) GetPreIssuerAliases(aPreIssuer types.Issuer) ([]types.Issuer, error) { 320 | key := fmt.Sprintf("preissuer::%s", aPreIssuer.ID()) 321 | aliases, err := ec.SetList(key) 322 | if err != nil { 323 | return nil, err 324 | } 325 | 326 | issuerList := make([]types.Issuer, 0, len(aliases)) 327 | for _, alias := range aliases { 328 | issuerList = append(issuerList, types.NewIssuerFromString(alias)) 329 | } 330 | return issuerList, nil 331 | } 332 | -------------------------------------------------------------------------------- /go/storage/rediscache.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/base64" 6 | "encoding/json" 7 | "fmt" 8 | "net/url" 9 | "strconv" 10 | "strings" 11 | "time" 12 | 13 | "github.com/go-redis/redis" 14 | "github.com/golang/glog" 15 | 16 | "github.com/mozilla/crlite/go" 17 | ) 18 | 19 | const EMPTY_QUEUE string = "redis: nil" 20 | const NO_EXPIRATION time.Duration = 0 21 | 22 | // The commit lock is acquired in aggregate-known before cached serials are 23 | // written to disk. It is held until aggregate-known is done reading serials 24 | // from disk. We set a 4 hour expiry on the commit lock in case the 25 | // aggregate-known process is abruptly terminated. The commit process is 26 | // fault-tolerant and will not leave persistent storage in a bad state. The 27 | // lock expiry just ensures that the next aggregate-known process will get a 28 | // chance to run. 29 | const COMMIT_LOCK_KEY string = "lock::commit" 30 | const COMMIT_LOCK_EXPIRATION time.Duration = 4 * time.Hour 31 | 32 | const EPOCH_KEY string = "epoch" 33 | 34 | type RedisCache struct { 35 | client *redis.Client 36 | } 37 | 38 | func NewRedisCache(addr string, cacheTimeout time.Duration) (*RedisCache, error) { 39 | rdb := redis.NewClient(&redis.Options{ 40 | Addr: addr, 41 | MaxRetries: 10, 42 | MaxRetryBackoff: 5 * time.Second, 43 | ReadTimeout: cacheTimeout, 44 | WriteTimeout: cacheTimeout, 45 | }) 46 | 47 | statusr := rdb.Ping() 48 | if statusr.Err() != nil { 49 | return nil, statusr.Err() 50 | } 51 | 52 | rc := &RedisCache{rdb} 53 | err := rc.MemoryPolicyCorrect() 54 | if err != nil { 55 | glog.Warning(err) 56 | } 57 | 58 | return rc, nil 59 | } 60 | 61 | func (rc *RedisCache) MemoryPolicyCorrect() error { 62 | // maxmemory_policy should be `noeviction` 63 | confr := rc.client.Info("memory") 64 | if confr.Err() != nil { 65 | return confr.Err() 66 | } 67 | if strings.Contains(confr.Val(), "maxmemory_policy:noeviction") { 68 | return nil 69 | } 70 | return fmt.Errorf("Redis maxmemory_policy should be `noeviction`. Memory config is set to %s", 71 | confr.Val()) 72 | } 73 | 74 | func (rc *RedisCache) SetInsert(key string, entry string) (bool, error) { 75 | ir := rc.client.SAdd(key, entry) 76 | added, err := ir.Result() 77 | if err != nil && strings.HasPrefix(err.Error(), "OOM") { 78 | glog.Fatalf("Out of memory on Redis insert of entry %s into key %s, error %v", entry, key, err.Error()) 79 | } 80 | return added == 1, err 81 | } 82 | 83 | func (rc *RedisCache) SetRemove(key string, entries []string) error { 84 | batchSize := 1024 85 | for batchStart := 0; batchStart < len(entries); batchStart += batchSize { 86 | batchEnd := batchStart + batchSize 87 | if batchEnd > len(entries) { 88 | batchEnd = len(entries) 89 | } 90 | batch := entries[batchStart:batchEnd] 91 | _, err := rc.client.Pipelined(func(pipe redis.Pipeliner) error { 92 | for _, entry := range batch { 93 | err := pipe.SRem(key, entry).Err() 94 | if err != nil { 95 | return err 96 | } 97 | } 98 | return nil 99 | }) 100 | if err != nil { 101 | return err 102 | } 103 | } 104 | return nil 105 | } 106 | 107 | func (rc *RedisCache) SetContains(key string, entry string) (bool, error) { 108 | br := rc.client.SIsMember(key, entry) 109 | return br.Result() 110 | } 111 | 112 | func (rc *RedisCache) SetList(key string) ([]string, error) { 113 | slicer := rc.client.SMembers(key) 114 | return slicer.Result() 115 | } 116 | 117 | func (rc *RedisCache) SetToChan(key string, c chan<- string) error { 118 | defer close(c) 119 | scanres := rc.client.SScan(key, 0, "", 0) 120 | err := scanres.Err() 121 | if err != nil { 122 | return err 123 | } 124 | 125 | iter := scanres.Iterator() 126 | 127 | for iter.Next() { 128 | c <- iter.Val() 129 | } 130 | 131 | return iter.Err() 132 | } 133 | 134 | func (rc *RedisCache) SetCardinality(key string) (int, error) { 135 | v, err := rc.client.SCard(key).Result() 136 | return int(v), err 137 | } 138 | 139 | func (rc *RedisCache) Exists(key string) (bool, error) { 140 | ir := rc.client.Exists(key) 141 | count, err := ir.Result() 142 | return count == 1, err 143 | } 144 | 145 | func (rc *RedisCache) ExpireAt(key string, aExpTime time.Time) error { 146 | br := rc.client.ExpireAt(key, aExpTime) 147 | return br.Err() 148 | } 149 | 150 | func (rc *RedisCache) KeysToChan(pattern string, c chan<- string) error { 151 | defer close(c) 152 | scanres := rc.client.Scan(0, pattern, 0) 153 | err := scanres.Err() 154 | if err != nil { 155 | return err 156 | } 157 | 158 | iter := scanres.Iterator() 159 | 160 | for iter.Next() { 161 | c <- iter.Val() 162 | } 163 | 164 | return iter.Err() 165 | } 166 | 167 | func shortUrlToLogKey(shortUrl string) string { 168 | return fmt.Sprintf("log::%s", strings.TrimRight(shortUrl, "/")) 169 | } 170 | 171 | func (ec *RedisCache) Migrate(logData *types.CTLogMetadata) error { 172 | logUrlObj, err := url.Parse(logData.URL) 173 | if err != nil { 174 | return err 175 | } 176 | 177 | shortUrl := logUrlObj.Host + strings.TrimRight(logUrlObj.Path, "/") 178 | newKey := shortUrlToLogKey(shortUrl) 179 | _, err = ec.client.Get(newKey).Bytes() 180 | if err != nil && err != redis.Nil { 181 | return err 182 | } 183 | haveNew := err != redis.Nil 184 | 185 | oldKey := newKey + "/" 186 | oldData, err := ec.client.Get(oldKey).Bytes() 187 | if err != nil && err != redis.Nil { 188 | return err 189 | } 190 | haveOld := err != redis.Nil 191 | 192 | // If we have both new and old data, then just delete old. 193 | if haveOld && haveNew { 194 | ec.client.Del(oldKey) 195 | return nil 196 | } 197 | 198 | // If we have old data but not new, migrate. 199 | if haveOld { 200 | var log types.CTLogState 201 | if err = json.Unmarshal(oldData, &log); err != nil { 202 | return err 203 | } 204 | if err = ec.StoreLogState(&log); err != nil { 205 | return err 206 | } 207 | ec.client.Del(oldKey) 208 | return nil 209 | } 210 | 211 | // No data. Nothing to do. 212 | return nil 213 | } 214 | 215 | func (ec *RedisCache) StoreLogState(log *types.CTLogState) error { 216 | encoded, err := json.Marshal(log) 217 | if err != nil { 218 | return err 219 | } 220 | 221 | return ec.client.Set(shortUrlToLogKey(log.ShortURL), encoded, NO_EXPIRATION).Err() 222 | } 223 | 224 | func (ec *RedisCache) LoadLogState(shortUrl string) (*types.CTLogState, error) { 225 | data, err := ec.client.Get(shortUrlToLogKey(shortUrl)).Bytes() 226 | if err != nil { 227 | return nil, err 228 | } 229 | 230 | var log types.CTLogState 231 | if err = json.Unmarshal(data, &log); err != nil { 232 | return nil, err 233 | } 234 | return &log, nil 235 | } 236 | 237 | func (ec *RedisCache) LoadAllLogStates() ([]types.CTLogState, error) { 238 | ctLogList := make([]types.CTLogState, 0) 239 | keyChan := make(chan string) 240 | go func() { 241 | err := ec.KeysToChan("log::*", keyChan) 242 | if err != nil { 243 | glog.Fatalf("Couldn't list CT logs from cache: %s", err) 244 | } 245 | }() 246 | 247 | for entry := range keyChan { 248 | data, err := ec.client.Get(entry).Bytes() 249 | if err != nil { 250 | return nil, fmt.Errorf("Couldn't parse CT logs metadata: %s", err) 251 | } 252 | 253 | ctLogList = append(ctLogList, types.CTLogState{}) 254 | if err := json.Unmarshal(data, &ctLogList[len(ctLogList)-1]); err != nil { 255 | return nil, fmt.Errorf("Couldn't parse CT logs metadata: %s", err) 256 | } 257 | } 258 | 259 | return ctLogList, nil 260 | } 261 | 262 | func (ec *RedisCache) AcquireCommitLock() (*string, error) { 263 | randomBytes := make([]byte, 16) 264 | if _, err := rand.Read(randomBytes); err != nil { 265 | return nil, err 266 | } 267 | commitLockToken := base64.URLEncoding.EncodeToString(randomBytes) 268 | 269 | // SETNX is a set-if-not-set primitive. Returns true if commitLockToken 270 | // is the new value associated with COMMIT_LOCK_KEY. Returns false or 271 | // an error otherwise. 272 | set, err := ec.client.SetNX(COMMIT_LOCK_KEY, commitLockToken, COMMIT_LOCK_EXPIRATION).Result() 273 | if err != nil || !set { 274 | return nil, err 275 | } 276 | return &commitLockToken, err 277 | } 278 | 279 | func (ec *RedisCache) ReleaseCommitLock(aToken string) { 280 | hasLock, err := ec.HasCommitLock(aToken) 281 | if err == nil && hasLock { 282 | ec.client.Del(COMMIT_LOCK_KEY) 283 | } 284 | } 285 | 286 | func (ec *RedisCache) HasCommitLock(aToken string) (bool, error) { 287 | lockHolder, err := ec.client.Get(COMMIT_LOCK_KEY).Result() 288 | if err == redis.Nil { // COMMIT_LOCK_KEY not set 289 | return false, nil 290 | } 291 | if err != nil { 292 | return false, err 293 | } 294 | return lockHolder == aToken, nil 295 | } 296 | 297 | func (ec *RedisCache) GetEpoch() (uint64, error) { 298 | epochStr, err := ec.client.Get(EPOCH_KEY).Result() 299 | if err == redis.Nil { // EPOCH_KEY not set 300 | return 0, nil 301 | } 302 | if err != nil { 303 | return 0, err 304 | } 305 | return strconv.ParseUint(epochStr, 10, 64) 306 | } 307 | 308 | func (ec *RedisCache) NextEpoch() error { 309 | return ec.client.Incr(EPOCH_KEY).Err() 310 | } 311 | 312 | func (ec *RedisCache) Restore(aEpoch uint64, aLogStates []types.CTLogState) error { 313 | commitToken, err := ec.AcquireCommitLock() 314 | if err != nil || commitToken == nil { 315 | return fmt.Errorf("Failed to acquire commit lock: %s", err) 316 | } 317 | defer ec.ReleaseCommitLock(*commitToken) 318 | 319 | logKeys, err := ec.client.Keys("log::*").Result() 320 | if err != nil { 321 | return err 322 | } 323 | 324 | for _, logKey := range logKeys { 325 | err = ec.client.Del(logKey).Err() 326 | if err != nil { 327 | return err 328 | } 329 | } 330 | 331 | for _, logState := range aLogStates { 332 | err := ec.StoreLogState(&logState) 333 | if err != nil { 334 | return err 335 | } 336 | } 337 | 338 | err = ec.client.Set(EPOCH_KEY, aEpoch, NO_EXPIRATION).Err() 339 | if err != nil { 340 | return err 341 | } 342 | 343 | return nil 344 | } 345 | 346 | func (ec *RedisCache) AddPreIssuerAlias(aPreIssuer types.Issuer, aIssuer types.Issuer) error { 347 | key := fmt.Sprintf("preissuer::%s", aPreIssuer.ID()) 348 | added, err := ec.SetInsert(key, aIssuer.ID()) 349 | if err == nil && added { 350 | glog.Warningf("Added preissuer alias %s -> %s", aPreIssuer.ID(), aIssuer.ID()) 351 | // This alias will be preserved for one week. During this time 352 | // any call to CertDatabase.Commit() will migrate serials from 353 | // the preissuer's bin to the issuer's bin. 354 | ec.ExpireAt(key, time.Now().AddDate(0, 0, 7)) 355 | } 356 | return err 357 | } 358 | 359 | func (ec *RedisCache) GetPreIssuerAliases(aPreIssuer types.Issuer) ([]types.Issuer, error) { 360 | key := fmt.Sprintf("preissuer::%s", aPreIssuer.ID()) 361 | aliases, err := ec.SetList(key) 362 | if err != nil { 363 | return nil, err 364 | } 365 | 366 | issuerList := make([]types.Issuer, 0, len(aliases)) 367 | for _, alias := range aliases { 368 | issuerList = append(issuerList, types.NewIssuerFromString(alias)) 369 | } 370 | return issuerList, nil 371 | } 372 | -------------------------------------------------------------------------------- /go/storage/rediscache_test.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "os" 7 | "reflect" 8 | "testing" 9 | "time" 10 | 11 | "github.com/mozilla/crlite/go" 12 | ) 13 | 14 | var kRedisHost = "RedisHost" 15 | 16 | func getRedisCache(tb testing.TB) *RedisCache { 17 | setting, ok := os.LookupEnv(kRedisHost) 18 | if !ok { 19 | tb.Skipf("%s is not set, unable to run %s. Skipping.", kRedisHost, tb.Name()) 20 | } 21 | tb.Logf("Connecting to Redis instance at %s", setting) 22 | 23 | rc, err := NewRedisCache(setting, time.Second) 24 | if err != nil { 25 | tb.Errorf("Couldn't construct RedisCache: %v", err) 26 | } 27 | return rc 28 | } 29 | 30 | func Test_RedisPoicy(t *testing.T) { 31 | t.Parallel() 32 | rc := getRedisCache(t) 33 | if err := rc.MemoryPolicyCorrect(); err != nil { 34 | t.Error(err) 35 | } 36 | } 37 | 38 | func Test_RedisInvalidHost(t *testing.T) { 39 | t.Parallel() 40 | _, err := NewRedisCache("unknown_host:999999", time.Second) 41 | if err == nil { 42 | t.Error("Should have failed to construct invalid redis cache host") 43 | } 44 | } 45 | 46 | func Test_RedisInsertion(t *testing.T) { 47 | t.Parallel() 48 | rc := getRedisCache(t) 49 | defer rc.client.Del("key") 50 | 51 | firstExists, err := rc.Exists("key") 52 | if err != nil { 53 | t.Error(err) 54 | } 55 | if firstExists == true { 56 | t.Error("Key shouldn't exist yet") 57 | } 58 | 59 | firstInsert, err := rc.SetInsert("key", "FADEC00DEAD00DEAF00CAFE0") 60 | if err != nil { 61 | t.Error(err) 62 | } 63 | if firstInsert == false { 64 | t.Errorf("Should have inserted") 65 | } 66 | 67 | secondExists, err := rc.Exists("key") 68 | if err != nil { 69 | t.Error(err) 70 | } 71 | if secondExists == false { 72 | t.Error("Key should now exist") 73 | } 74 | 75 | doubleInsert, err := rc.SetInsert("key", "FADEC00DEAD00DEAF00CAFE0") 76 | if err != nil { 77 | t.Error(err) 78 | } 79 | if doubleInsert == true { 80 | t.Errorf("Shouldn't have re-inserted") 81 | } 82 | 83 | shouldntExist, err := rc.SetContains("key", "BEAC040FBAC040") 84 | if err != nil { 85 | t.Error(err) 86 | } 87 | if shouldntExist == true { 88 | t.Errorf("This serial should not have been saved") 89 | } 90 | 91 | shouldExist, err := rc.SetContains("key", "FADEC00DEAD00DEAF00CAFE0") 92 | if err != nil { 93 | t.Error(err) 94 | } 95 | if shouldExist == false { 96 | t.Errorf("This serial should have been saved") 97 | } 98 | 99 | err = rc.SetRemove("key", []string{"FADEC00DEAD00DEAF00CAFE0"}) 100 | if err != nil { 101 | t.Error(err) 102 | } 103 | 104 | shouldBeRemoved, err := rc.SetContains("key", "FADEC00DEAD00DEAF00CAFE0") 105 | if err != nil { 106 | t.Error(err) 107 | } 108 | if shouldBeRemoved == true { 109 | t.Errorf("This serial should have been removed") 110 | } 111 | } 112 | 113 | func Test_RedisSets(t *testing.T) { 114 | t.Parallel() 115 | rc := getRedisCache(t) 116 | q := "setCache" 117 | defer rc.client.Del(q) 118 | 119 | sortedSerials := make([]string, 999) 120 | 121 | for i := 0; i < len(sortedSerials); i++ { 122 | sortedSerials[i] = fmt.Sprintf("%04X", i) 123 | } 124 | 125 | randomSerials := make([]string, len(sortedSerials)) 126 | copy(randomSerials[:], sortedSerials) 127 | 128 | rand.Shuffle(len(sortedSerials), func(i, j int) { 129 | randomSerials[i], randomSerials[j] = randomSerials[j], randomSerials[i] 130 | }) 131 | 132 | for _, s := range randomSerials { 133 | success, err := rc.SetInsert(q, s) 134 | if err != nil { 135 | t.Error(err) 136 | } 137 | if success != true { 138 | t.Errorf("Failed to insert %v", s) 139 | } 140 | } 141 | 142 | rand.Shuffle(len(randomSerials), func(i, j int) { 143 | randomSerials[i], randomSerials[j] = randomSerials[j], randomSerials[i] 144 | }) 145 | 146 | for _, s := range randomSerials { 147 | // check'em 148 | exists, err := rc.SetContains(q, s) 149 | if err != nil { 150 | t.Error(err) 151 | } 152 | if exists != true { 153 | t.Errorf("Should have existed! %s", s) 154 | } 155 | } 156 | 157 | list, err := rc.SetList(q) 158 | if err != nil { 159 | t.Error(err) 160 | } 161 | if len(list) != len(sortedSerials) { 162 | t.Errorf("Expected %d serials but got %d", len(sortedSerials), len(list)) 163 | } 164 | 165 | c := make(chan string) 166 | go func() { 167 | err = rc.SetToChan(q, c) 168 | if err != nil { 169 | t.Error(err) 170 | } 171 | }() 172 | counter := 0 173 | for v := range c { 174 | var found bool 175 | for _, s := range sortedSerials { 176 | if s == v { 177 | found = true 178 | break 179 | } 180 | } 181 | if !found { 182 | t.Errorf("Unexpected value from chan, got %s", v) 183 | } 184 | counter++ 185 | } 186 | if counter != len(sortedSerials) { 187 | t.Errorf("Expected %d values from the channel, got %d", len(sortedSerials), 188 | counter) 189 | } 190 | 191 | card, err := rc.SetCardinality(q) 192 | if err != nil { 193 | t.Error(err) 194 | } 195 | if card != counter { 196 | t.Errorf("Expected exact SetCardinality match, got ") 197 | } 198 | } 199 | 200 | func Test_RedisExpiration(t *testing.T) { 201 | t.Parallel() 202 | rc := getRedisCache(t) 203 | defer rc.client.Del("expTest") 204 | 205 | success, err := rc.SetInsert("expTest", "a") 206 | if !success || err != nil { 207 | t.Errorf("Should have inserted: %v", err) 208 | } 209 | 210 | if exists, err := rc.Exists("expTest"); exists == false || err != nil { 211 | t.Errorf("Should exist: %v %v", exists, err) 212 | } 213 | 214 | anHourAgo := time.Now().Add(time.Hour * -1) 215 | if err := rc.ExpireAt("expTest", anHourAgo); err != nil { 216 | t.Error(err) 217 | } 218 | 219 | if exists, err := rc.Exists("expTest"); exists == true || err != nil { 220 | t.Errorf("Should not exist anymore: %v %v", exists, err) 221 | } 222 | } 223 | 224 | func expectNilLogState(t *testing.T, rc *RedisCache, url string) { 225 | obj, err := rc.LoadLogState(url) 226 | if obj != nil { 227 | t.Errorf("Expected a nil state, obtained %+v for %s", obj, url) 228 | } 229 | if err == nil { 230 | t.Error("Expected an error") 231 | } 232 | } 233 | 234 | func Test_RedisLogState(t *testing.T) { 235 | t.Parallel() 236 | rc := getRedisCache(t) 237 | rc.client.Del("log::short_url/location") 238 | defer rc.client.Del("log::short_url/location") 239 | 240 | log := &types.CTLogState{ 241 | ShortURL: "short_url/location", 242 | MaxEntry: 123456789, 243 | MaxTimestamp: uint64(time.Now().Unix()), 244 | } 245 | 246 | expectNilLogState(t, rc, log.ShortURL) 247 | 248 | err := rc.StoreLogState(log) 249 | if err != nil { 250 | t.Error(err) 251 | } 252 | 253 | obj, err := rc.LoadLogState(log.ShortURL) 254 | if err != nil { 255 | t.Error(err) 256 | } 257 | if !reflect.DeepEqual(log, obj) { 258 | t.Errorf("expected identical log objects: %+v %+v", log, obj) 259 | } 260 | 261 | expectNilLogState(t, rc, "") 262 | expectNilLogState(t, rc, fmt.Sprintf("%s/a", log.ShortURL)) 263 | } 264 | 265 | func expectLocked(t *testing.T, rc *RedisCache, aToken *string, aExpected bool) { 266 | locked, err := rc.HasCommitLock(*aToken) 267 | if err != nil { 268 | t.Errorf("Error in HasCommitLock: %v", err) 269 | } 270 | if aExpected != locked { 271 | t.Errorf("Locking error: locked (%t), expected (%t)", locked, aExpected) 272 | } 273 | } 274 | 275 | func Test_RedisCommitLock(t *testing.T) { 276 | rc := getRedisCache(t) 277 | 278 | invalidToken := "invalid token" 279 | // HasCommitLock should return false for invalid tokens 280 | expectLocked(t, rc, &invalidToken, false) 281 | 282 | // We should be able to acquire the lock 283 | token1, err := rc.AcquireCommitLock() 284 | if err != nil { 285 | t.Errorf("Error in AcquireCommitLock: %v", err) 286 | } 287 | if token1 == nil { 288 | t.Error("Should have lock") 289 | } 290 | expectLocked(t, rc, token1, true) 291 | 292 | // The lock should be exclusive 293 | token2, err := rc.AcquireCommitLock() 294 | if err != nil { 295 | t.Errorf("Error in AcquireCommitLock: %v", err) 296 | } 297 | if token2 != nil { 298 | t.Error("Lock should be exclusive") 299 | } 300 | expectLocked(t, rc, token1, true) 301 | 302 | // Other tokens should be able to acquire the lock after we 303 | // release it 304 | rc.ReleaseCommitLock(*token1) 305 | token4, err := rc.AcquireCommitLock() 306 | if err != nil { 307 | t.Errorf("Error in AcquireCommitLock: %v", err) 308 | } 309 | if token4 == nil { 310 | t.Error("Should have acquired lock") 311 | } 312 | expectLocked(t, rc, token1, false) 313 | expectLocked(t, rc, token4, true) 314 | 315 | // Cleanup 316 | rc.ReleaseCommitLock(*token4) 317 | expectLocked(t, rc, token4, false) 318 | } 319 | 320 | func Test_RedisEpoch(t *testing.T) { 321 | rc := getRedisCache(t) 322 | 323 | epoch, err := rc.GetEpoch() 324 | if err != nil { 325 | t.Error("Should have gotten epoch") 326 | } 327 | 328 | err = rc.NextEpoch() 329 | if err != nil { 330 | t.Error("Should have incremented epoch") 331 | } 332 | 333 | nextEpoch, err := rc.GetEpoch() 334 | if err != nil { 335 | t.Error("Should have gotten epoch") 336 | } 337 | 338 | if nextEpoch != epoch+1 { 339 | t.Error("Epoch should have been incremented by 1") 340 | } 341 | } 342 | 343 | func Test_RedisRestore(t *testing.T) { 344 | rc := getRedisCache(t) 345 | 346 | logState := types.CTLogState{ 347 | LogID: "szRxVrR4eNrC0aUI0PD7gVznDV4Ihwvq1xELJwoQ9qQ=", 348 | MMD: 86400, 349 | ShortURL: "ct.example.org/v1", 350 | MinEntry: 0, 351 | MaxEntry: 0, 352 | MinTimestamp: 0, 353 | MaxTimestamp: 0, 354 | LastUpdateTime: time.Now(), 355 | } 356 | 357 | otherLogState := types.CTLogState{ 358 | LogID: "tpj13e9osHbErk6uYPSZMMR-4ODf3TGGIoDWSVGA1hU=", 359 | MMD: 86400, 360 | ShortURL: "ct.example.com/v1", 361 | MinEntry: 0, 362 | MaxEntry: 0, 363 | MinTimestamp: 0, 364 | MaxTimestamp: 0, 365 | LastUpdateTime: time.Now(), 366 | } 367 | 368 | // Put an entry in the cache to test that logs that are 369 | // not passed to `Restore` are removed. 370 | err := rc.StoreLogState(&otherLogState) 371 | if err != nil { 372 | t.Error(err) 373 | } 374 | 375 | _, err = rc.LoadLogState(otherLogState.ShortURL) 376 | if err != nil { 377 | t.Errorf("Entry for %s should be present", otherLogState.ShortURL) 378 | } 379 | 380 | storedEpoch := uint64(31415) 381 | err = rc.Restore(storedEpoch, []types.CTLogState{logState}) 382 | if err != nil { 383 | t.Error("Should have modified cache") 384 | } 385 | 386 | epoch, err := rc.GetEpoch() 387 | if err != nil || epoch != storedEpoch { 388 | t.Errorf("Expected epoch %d", storedEpoch) 389 | } 390 | 391 | _, err = rc.LoadLogState(logState.ShortURL) 392 | if err != nil { 393 | t.Errorf("Entry for %s should be present", logState.ShortURL) 394 | } 395 | 396 | _, err = rc.LoadLogState(otherLogState.ShortURL) 397 | if err == nil { 398 | t.Errorf("Entry for %s should not be present", otherLogState.ShortURL) 399 | } 400 | } 401 | 402 | func Test_RedisPreIssuerAlias(t *testing.T) { 403 | rc := getRedisCache(t) 404 | issuer1 := types.NewIssuerFromString(kIssuer1) 405 | issuer2 := types.NewIssuerFromString(kIssuer2) 406 | issuer3 := types.NewIssuerFromString(kIssuer3) 407 | aliases, err := rc.GetPreIssuerAliases(issuer1) 408 | if err != nil { 409 | t.Error(err) 410 | } 411 | if len(aliases) != 0 { 412 | t.Errorf("Expected 0 alias, found %d", len(aliases)) 413 | } 414 | err = rc.AddPreIssuerAlias(issuer1, issuer2) 415 | if err != nil { 416 | t.Error(err) 417 | } 418 | err = rc.AddPreIssuerAlias(issuer1, issuer3) 419 | if err != nil { 420 | t.Error(err) 421 | } 422 | aliases, err = rc.GetPreIssuerAliases(issuer1) 423 | if err != nil { 424 | t.Error(err) 425 | } 426 | if len(aliases) != 2 { 427 | t.Errorf("Expected 2 aliases, found %d", len(aliases)) 428 | } 429 | if !(kIssuer2 == aliases[0].ID() || kIssuer2 == aliases[1].ID()) { 430 | t.Errorf("Expected alias %s, found %s and %s", kIssuer2, aliases[0].ID(), aliases[1].ID()) 431 | } 432 | if !(kIssuer3 == aliases[0].ID() || kIssuer3 == aliases[1].ID()) { 433 | t.Errorf("Expected alias %s, found %s and %s", kIssuer3, aliases[0].ID(), aliases[1].ID()) 434 | } 435 | rc.client.FlushDB() 436 | } 437 | -------------------------------------------------------------------------------- /go/storage/serialcachewriter.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/golang/glog" 8 | "github.com/mozilla/crlite/go" 9 | ) 10 | 11 | const kSerials = "serials" 12 | 13 | type SerialCacheWriter struct { 14 | expDate types.ExpDate 15 | issuer types.Issuer 16 | cache RemoteCache 17 | expirySet bool 18 | } 19 | 20 | func NewSerialCacheWriter(aExpDate types.ExpDate, aIssuer types.Issuer, aCache RemoteCache) *SerialCacheWriter { 21 | return &SerialCacheWriter{ 22 | expDate: aExpDate, 23 | issuer: aIssuer, 24 | cache: aCache, 25 | expirySet: false, 26 | } 27 | } 28 | 29 | func (kc *SerialCacheWriter) id(params ...string) string { 30 | return fmt.Sprintf("%s%s::%s", kc.expDate.ID(), strings.Join(params, ""), kc.issuer.ID()) 31 | } 32 | 33 | func (kc *SerialCacheWriter) serialId(params ...string) string { 34 | return fmt.Sprintf("%s::%s", kSerials, kc.id(params...)) 35 | } 36 | 37 | // Returns true if this serial was unknown. Subsequent calls with the same serial 38 | // will return false, as it will be known then. 39 | func (kc *SerialCacheWriter) Insert(aSerial types.Serial) (bool, error) { 40 | result, err := kc.cache.SetInsert(kc.serialId(), aSerial.BinaryString()) 41 | if err != nil { 42 | return false, err 43 | } 44 | 45 | if !kc.expirySet { 46 | kc.setExpiryFlag() 47 | kc.expirySet = true 48 | } 49 | 50 | if result { 51 | glog.V(3).Infof("[%s] Certificate unknown: %s", kc.id(), aSerial) 52 | } else { 53 | glog.V(3).Infof("[%s] Certificate already known: %s", kc.id(), aSerial) 54 | } 55 | return result, nil 56 | } 57 | 58 | func (kc *SerialCacheWriter) RemoveMany(aSerials []types.Serial) error { 59 | // Removing an element of a set may leave the set empty. Redis 60 | // automatically deletes empty sets, so assume that we need to reset 61 | // the ExpireAt time for this set on the next Insert call. 62 | kc.expirySet = false 63 | serialStrings := make([]string, len(aSerials)) 64 | for i := 0; i < len(aSerials); i++ { 65 | serialStrings[i] = aSerials[i].BinaryString() 66 | } 67 | return kc.cache.SetRemove(kc.serialId(), serialStrings) 68 | } 69 | 70 | func (kc *SerialCacheWriter) Count() int64 { 71 | count, err := kc.cache.SetCardinality(kc.serialId()) 72 | if err != nil { 73 | glog.Errorf("Couldn't determine count of %s, now at %d: %s", kc.id(), count, err) 74 | } 75 | return int64(count) 76 | } 77 | 78 | func (kc *SerialCacheWriter) Contains(aSerial types.Serial) (bool, error) { 79 | return kc.cache.SetContains(kc.serialId(), aSerial.BinaryString()) 80 | } 81 | 82 | func (kc *SerialCacheWriter) List() []types.Serial { 83 | // Redis' scan methods regularly provide duplicates. The duplication 84 | // happens at this level, pulling from SetToChan, so we make a hash-set 85 | // here to de-duplicate when the memory impacts are the most minimal. 86 | serials := make(map[string]struct{}) 87 | var count int 88 | 89 | strChan := make(chan string) 90 | go func() { 91 | err := kc.cache.SetToChan(kc.serialId(), strChan) 92 | if err != nil { 93 | glog.Fatalf("Error obtaining list of known certificates: %v", err) 94 | } 95 | }() 96 | 97 | for str := range strChan { 98 | serials[str] = struct{}{} 99 | count += 1 100 | } 101 | 102 | serialList := make([]types.Serial, 0, count) 103 | for str := range serials { 104 | bs, err := types.NewSerialFromBinaryString(str) 105 | if err != nil { 106 | glog.Errorf("Failed to populate serial str=[%s] %v", str, err) 107 | continue 108 | } 109 | serialList = append(serialList, bs) 110 | } 111 | 112 | return serialList 113 | } 114 | 115 | func (kc *SerialCacheWriter) setExpiryFlag() { 116 | expireTime := kc.expDate.ExpireTime() 117 | 118 | if err := kc.cache.ExpireAt(kc.serialId(), expireTime); err != nil { 119 | glog.Errorf("Couldn't set expiration time %v for serials %s: %v", expireTime, kc.id(), err) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /go/storage/types.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/mozilla/crlite/go" 7 | ) 8 | 9 | type RemoteCache interface { 10 | Exists(key string) (bool, error) 11 | SetInsert(key string, aEntry string) (bool, error) 12 | SetRemove(key string, aEntries []string) error 13 | SetContains(key string, aEntry string) (bool, error) 14 | SetList(key string) ([]string, error) 15 | SetToChan(key string, c chan<- string) error 16 | SetCardinality(key string) (int, error) 17 | ExpireAt(key string, aExpTime time.Time) error 18 | KeysToChan(pattern string, c chan<- string) error 19 | StoreLogState(aLogObj *types.CTLogState) error 20 | LoadLogState(aLogUrl string) (*types.CTLogState, error) 21 | LoadAllLogStates() ([]types.CTLogState, error) 22 | Migrate(logData *types.CTLogMetadata) error 23 | AcquireCommitLock() (*string, error) 24 | ReleaseCommitLock(aToken string) 25 | HasCommitLock(aToken string) (bool, error) 26 | GetEpoch() (uint64, error) 27 | NextEpoch() error 28 | Restore(aEpoch uint64, aLogStates []types.CTLogState) error 29 | AddPreIssuerAlias(aPreIssuer types.Issuer, aIssuer types.Issuer) error 30 | GetPreIssuerAliases(aPreIssuer types.Issuer) ([]types.Issuer, error) 31 | } 32 | -------------------------------------------------------------------------------- /go/storage/types_test.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "encoding/json" 5 | "encoding/pem" 6 | "math" 7 | "reflect" 8 | "testing" 9 | "time" 10 | 11 | "github.com/google/certificate-transparency-go/x509" 12 | "github.com/mozilla/crlite/go" 13 | ) 14 | 15 | // issuer:ca 16 | // subject: leadingZeros 17 | // serialNumber: 0x00AA 18 | // 19 | // ... requires hacking pycert.py 20 | 21 | const ( 22 | kLeadingZeroes = `-----BEGIN CERTIFICATE----- 23 | MIICozCCAYugAwIBAgICAKowDQYJKoZIhvcNAQELBQAwDTELMAkGA1UEAwwCY2Ew 24 | IhgPMjAxNzExMjcwMDAwMDBaGA8yMDIwMDIwNTAwMDAwMFowGDEWMBQGA1UEAwwN 25 | IGxlYWRpbmdaZXJvczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALqI 26 | UahEjhbWQf1utogGNhA9PBPZ6uQ1SrTs9WhXbCR7wcclqODYH72xnAabbhqG8mvi 27 | r1p1a2pkcQh6pVqnRYf3HNUknAJ+zUP8HmnQOCApk6sgw0nk27lMwmtsDu0Vgg/x 28 | fq1pGrHTAjqLKkHup3DgDw2N/WYLK7AkkqR9uYhheZCxV5A90jvF4LhIH6g304hD 29 | 7ycW2FW3ZlqqfgKQLzp7EIAGJMwcbJetlmFbt+KWEsB1MaMMkd20yvf8rR0l0wnv 30 | uRcOp2jhs3svIm9p47SKlWEd7ibWJZ2rkQhONsscJAQsvxaLL+Xxj5kXMbiz/kkj 31 | +nJRxDHVA6zaGAo17Y0CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAGGxF47xA91w0 32 | JvJ9kMGyiTqwtU7RaCXW+euVrFq8fFqE6+Gy+EnAQkNvzAjgHBoboodsost7xwuq 33 | JG/LoF6qUsztYVpGHtpElghTv6XXhMCh0zaoM0PrE5oXYY75di+ltEH1DJVf0xj0 34 | 30AK23vyZ+UsNwISUyzECxA10RUSAD697vFIqW9RrJG1fM6f3l/VRBLINqOafrNB 35 | z6brFHZzowdAKMBkog7ZQyiHEi1BqV8Vd8SKng2lQNw67RFgfB2Ltgbew2SiZMor 36 | ylxqvBshawlL7jExLaSnMgE0RvcvSjpDguO7QO84CtH2LDGYjBABfy9ShGWTsKHi 37 | Tqhe91GhlQ== 38 | -----END CERTIFICATE-----` 39 | ) 40 | 41 | func TestIssuerLazyInit(t *testing.T) { 42 | i := types.NewIssuer(&x509.Certificate{RawSubjectPublicKeyInfo: []byte{0xFF}}) 43 | 44 | if i.ID() != "qBAK5qoZQNC2Y7sxzUZhQuu9vVGHExuS2TgYmHgy64k=" { 45 | t.Errorf("Unexpected encoding: %s", i.ID()) 46 | } 47 | } 48 | 49 | func TestSerial(t *testing.T) { 50 | x := types.NewSerialFromHex("DEADBEEF") 51 | y, _ := types.NewSerialFromBinaryString(string([]byte{0xDE, 0xAD, 0xBE, 0xEF})) 52 | 53 | if !reflect.DeepEqual(x, y) { 54 | t.Errorf("Serials should match") 55 | } 56 | 57 | if x.BinaryString() != y.BinaryString() { 58 | t.Errorf("Should compare the same") 59 | } 60 | 61 | if y.String() != "deadbeef" { 62 | t.Errorf("Wrong encoding, got: %s but expected deadbeef", y.String()) 63 | } 64 | 65 | if x.String() != "deadbeef" { 66 | t.Errorf("Wrong encoding, got: %s but expected deadbeef", y.String()) 67 | } 68 | } 69 | 70 | func TestSerialFromCertWithLeadingZeroes(t *testing.T) { 71 | b, _ := pem.Decode([]byte(kLeadingZeroes)) 72 | 73 | cert, err := x509.ParseCertificate(b.Bytes) 74 | if err != nil { 75 | t.Error(err) 76 | } 77 | 78 | x := types.NewSerial(cert) 79 | // The Serial should be only the Value of the serialNumber field, so in this 80 | // case [00, AA]. 81 | // The Stringification is the hexification, lowercase 82 | if x.String() != "00aa" { 83 | t.Errorf("Lost leading zeroes: %s != 00aa", x.String()) 84 | } 85 | } 86 | 87 | func TestSerialJson(t *testing.T) { 88 | serials := []types.Serial{types.NewSerialFromHex("ABCDEF"), types.NewSerialFromHex("001100")} 89 | data, err := json.Marshal(serials) 90 | if err != nil { 91 | t.Error(err) 92 | } 93 | 94 | var decoded []types.Serial 95 | err = json.Unmarshal(data, &decoded) 96 | if err != nil { 97 | t.Errorf("Decoding %s got error %v", string(data), err) 98 | } 99 | 100 | if !reflect.DeepEqual(serials, decoded) { 101 | t.Errorf("Should match %+v %+v", serials, decoded) 102 | } 103 | } 104 | 105 | func TestSerialBinaryStrings(t *testing.T) { 106 | serials := []types.Serial{ 107 | types.NewSerialFromHex("ABCDEF"), 108 | types.NewSerialFromHex("001100"), 109 | types.NewSerialFromHex("ABCDEF0100101010010101010100101010"), 110 | types.NewSerialFromHex("00ABCDEF01001010101010101010010101"), 111 | types.NewSerialFromHex("FFFFFFFFFFFFFF00F00FFFFFFFFFFFFFFF"), 112 | } 113 | 114 | for _, s := range serials { 115 | astr := s.BinaryString() 116 | 117 | decoded, err := types.NewSerialFromBinaryString(astr) 118 | if err != nil { 119 | t.Error(err) 120 | } 121 | if !reflect.DeepEqual(s, decoded) { 122 | t.Errorf("Expected to match %v != %v", s, decoded) 123 | } 124 | } 125 | } 126 | 127 | func TestLog(t *testing.T) { 128 | log := types.CTLogState{ 129 | ShortURL: "log.example.com/2525", 130 | MaxEntry: math.MaxInt64, 131 | MaxTimestamp: uint64(time.Date(2525, time.May, 20, 19, 21, 54, 39, time.UTC).Unix()), 132 | LastUpdateTime: time.Date(3000, time.December, 31, 23, 55, 59, 0, time.UTC), 133 | } 134 | 135 | expectedString := "[log.example.com/2525] MinEntry=0, MaxEntry=9223372036854775807, MaxTimestamp=17526223314, LastUpdateTime=3000-12-31 23:55:59 +0000 UTC" 136 | if log.String() != expectedString { 137 | t.Errorf("Expecting %s but got %s", expectedString, log.String()) 138 | } 139 | } 140 | 141 | func TestExpDate(t *testing.T) { 142 | testParsing := func(d string) types.ExpDate { 143 | expDate, err := types.NewExpDate(d) 144 | if err != nil { 145 | t.Error(err) 146 | } 147 | if expDate.ID() != d { 148 | t.Errorf("Expected ID of %s but got %s", d, expDate.ID()) 149 | } 150 | return expDate 151 | } 152 | 153 | hourless := testParsing("2004-01-19") 154 | if !hourless.IsExpiredAt(time.Date(2004, 01, 20, 0, 0, 0, 0, time.UTC)) { 155 | t.Errorf("Should have been expired: %s", hourless) 156 | } 157 | if hourless.IsExpiredAt(time.Date(2004, 01, 19, 23, 59, 59, 59, time.UTC)) { 158 | t.Errorf("Should have been valid: %s", hourless) 159 | } 160 | 161 | fourOclock := testParsing("2004-01-19-04") 162 | if !fourOclock.IsExpiredAt(time.Date(2004, 01, 19, 05, 0, 0, 0, time.UTC)) { 163 | t.Errorf("Should have been expired: %s", fourOclock) 164 | } 165 | if fourOclock.IsExpiredAt(time.Date(2004, 01, 19, 04, 59, 59, 0, time.UTC)) { 166 | t.Errorf("Should have been valid: %s", fourOclock) 167 | } 168 | 169 | elevenOclock := testParsing("2004-01-19-23") 170 | if !elevenOclock.IsExpiredAt(time.Date(2004, 01, 19, 24, 0, 0, 0, time.UTC)) { 171 | t.Errorf("Should have been expired: %s", elevenOclock) 172 | } 173 | if elevenOclock.IsExpiredAt(time.Date(2004, 01, 19, 23, 59, 59, 59, time.UTC)) { 174 | t.Errorf("Should have been valid: %s", elevenOclock) 175 | } 176 | } 177 | 178 | func TestExpDateFromTime(t *testing.T) { 179 | date := time.Date(2004, 01, 20, 4, 22, 19, 44, time.UTC) 180 | truncDate := time.Date(2004, 01, 20, 0, 0, 0, 0, time.UTC) 181 | 182 | expDate := types.NewExpDateFromTime(date) 183 | if !expDate.IsExpiredAt(date) { 184 | t.Errorf("Should have expired at its own time") 185 | } 186 | 187 | if expDate.IsExpiredAt(truncDate.Add(-1 * time.Millisecond)) { 188 | t.Errorf("Should not be expired a moment earlier") 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /go/telemetry/telemetry.go: -------------------------------------------------------------------------------- 1 | package telemetry 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "os" 8 | "strings" 9 | "time" 10 | 11 | "github.com/golang/glog" 12 | "github.com/hashicorp/go-metrics" 13 | ) 14 | 15 | // InmemSignal is used to listen for a given signal, and when received, 16 | // to dump the current metrics from the InmemSink to an io.Writer 17 | type MetricsDumper struct { 18 | inm *metrics.InmemSink 19 | w io.Writer 20 | stopCh chan struct{} 21 | ticker *time.Ticker 22 | } 23 | 24 | func NewMetricsDumper(sink *metrics.InmemSink, period time.Duration) *MetricsDumper { 25 | obj := &MetricsDumper{ 26 | inm: sink, 27 | w: os.Stderr, 28 | stopCh: make(chan struct{}), 29 | ticker: time.NewTicker(period), 30 | } 31 | 32 | go obj.run() 33 | 34 | return obj 35 | } 36 | 37 | func (i *MetricsDumper) run() { 38 | for { 39 | select { 40 | case <-i.ticker.C: 41 | i.dumpStats() 42 | case <-i.stopCh: 43 | return 44 | } 45 | } 46 | } 47 | 48 | func (i *MetricsDumper) Stop() { 49 | close(i.stopCh) 50 | i.ticker.Stop() 51 | } 52 | 53 | // dumpStats is used to dump the data to output writer 54 | func (i *MetricsDumper) dumpStats() { 55 | buf := bytes.NewBuffer(nil) 56 | 57 | data := i.inm.Data() 58 | // Skip the last period which is still being aggregated 59 | for j := 0; j < len(data)-1; j++ { 60 | intv := data[j] 61 | intv.RLock() 62 | for _, val := range intv.Gauges { 63 | name := i.flattenLabels(val.Name, val.Labels) 64 | fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) 65 | } 66 | for name, vals := range intv.Points { 67 | for _, val := range vals { 68 | fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) 69 | } 70 | } 71 | for _, agg := range intv.Counters { 72 | name := i.flattenLabels(agg.Name, agg.Labels) 73 | fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) 74 | } 75 | for _, agg := range intv.Samples { 76 | name := i.flattenLabels(agg.Name, agg.Labels) 77 | fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) 78 | } 79 | intv.RUnlock() 80 | } 81 | 82 | // Write out the bytes 83 | _, err := i.w.Write(buf.Bytes()) 84 | if err != nil { 85 | glog.Warningf("Could not emit stats: %v", err) 86 | } 87 | } 88 | 89 | // Flattens the key for formatting along with its labels, removes spaces 90 | func (i *MetricsDumper) flattenLabels(name string, labels []metrics.Label) string { 91 | buf := bytes.NewBufferString(name) 92 | replacer := strings.NewReplacer(" ", "_", ":", "_") 93 | 94 | for _, label := range labels { 95 | _, _ = replacer.WriteString(buf, ".") 96 | _, _ = replacer.WriteString(buf, label.Value) 97 | } 98 | 99 | return buf.String() 100 | } 101 | -------------------------------------------------------------------------------- /go/types.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "encoding/asn1" 7 | "encoding/base64" 8 | "encoding/hex" 9 | "encoding/json" 10 | "fmt" 11 | "github.com/google/certificate-transparency-go/x509" 12 | "net/url" 13 | "sort" 14 | "strings" 15 | "time" 16 | ) 17 | 18 | const ( 19 | kExpirationFormat = "2006-01-02" 20 | kExpirationFormatWithHour = "2006-01-02-15" 21 | ) 22 | 23 | var ( 24 | kOidExtensionReasonCode = []int{2, 5, 29, 21} 25 | ) 26 | 27 | /* The CTLogMetadata struct contains the information that we receive 28 | * `ct-logs` Remote Settings collection. */ 29 | type CTLogMetadata struct { 30 | CRLiteEnrolled bool `json:"crlite_enrolled"` 31 | Description string `json:"description"` 32 | Key string `json:"key"` 33 | LogID string `json:"logID"` 34 | MMD int `json:"mmd"` 35 | URL string `json:"url"` 36 | } 37 | 38 | func (o *CTLogMetadata) MetricKey() string { 39 | metricKey := o.URL 40 | metricKey = strings.TrimPrefix(metricKey, "https://") 41 | metricKey = strings.TrimSuffix(metricKey, "/") 42 | metricKey = strings.ReplaceAll(metricKey, "/", ".") 43 | return metricKey 44 | } 45 | 46 | /* The CTLogState struct contains information necessary to describe a filter's 47 | * coverage of a CT log. */ 48 | type CTLogState struct { 49 | LogID string `db:"logID"` // The log's RFC 6962 LogID 50 | MMD uint64 `db:"mmd"` // The log's maximum merge delay in seconds 51 | ShortURL string `db:"url"` // URL to the log 52 | MinEntry uint64 `db:"minEntry"` // The smallest index we've downloaded 53 | MaxEntry uint64 `db:"maxEntry"` // The largest index we've downloaded 54 | MinTimestamp uint64 `db:"minTimestamp"` // Unix timestamp of the earliest entry we've downloaded 55 | MaxTimestamp uint64 `db:"maxTimestamp"` // Unix timestamp of the most recent entry we've downloaded 56 | LastUpdateTime time.Time `db:"lastUpdateTime"` // Date when we completed the last update 57 | } 58 | 59 | func (o *CTLogState) String() string { 60 | return fmt.Sprintf("[%s] MinEntry=%d, MaxEntry=%d, MaxTimestamp=%d, LastUpdateTime=%s", 61 | o.ShortURL, o.MinEntry, o.MaxEntry, o.MaxTimestamp, o.LastUpdateTime) 62 | } 63 | 64 | type Issuer struct { 65 | id *string 66 | spki SPKI 67 | } 68 | 69 | func NewIssuer(aCert *x509.Certificate) Issuer { 70 | obj := Issuer{ 71 | id: nil, 72 | spki: SPKI{aCert.RawSubjectPublicKeyInfo}, 73 | } 74 | return obj 75 | } 76 | 77 | func NewIssuerFromString(aStr string) Issuer { 78 | obj := Issuer{ 79 | id: &aStr, 80 | } 81 | return obj 82 | } 83 | 84 | func (o *Issuer) ID() string { 85 | if o.id == nil { 86 | encodedDigest := o.spki.Sha256DigestURLEncodedBase64() 87 | o.id = &encodedDigest 88 | } 89 | return *o.id 90 | } 91 | 92 | func (o *Issuer) MarshalJSON() ([]byte, error) { 93 | return json.Marshal(o.ID()) 94 | } 95 | 96 | func (o *Issuer) UnmarshalJSON(data []byte) error { 97 | return json.Unmarshal(data, &o.id) 98 | } 99 | 100 | type SPKI struct { 101 | spki []byte 102 | } 103 | 104 | func (o SPKI) ID() string { 105 | return base64.URLEncoding.EncodeToString(o.spki) 106 | } 107 | 108 | func (o SPKI) String() string { 109 | return hex.EncodeToString(o.spki) 110 | } 111 | 112 | func (o SPKI) Sha256DigestURLEncodedBase64() string { 113 | binaryDigest := sha256.Sum256(o.spki) 114 | encodedDigest := base64.URLEncoding.EncodeToString(binaryDigest[:]) 115 | return encodedDigest 116 | } 117 | 118 | type Serial struct { 119 | serial []byte 120 | } 121 | 122 | func (s Serial) String() string { 123 | return s.HexString() 124 | } 125 | 126 | func (s Serial) BinaryString() string { 127 | return string(s.serial) 128 | } 129 | 130 | func (s Serial) HexString() string { 131 | return hex.EncodeToString(s.serial) 132 | } 133 | 134 | func (s Serial) MarshalJSON() ([]byte, error) { 135 | return json.Marshal(s.HexString()) 136 | } 137 | 138 | func (s *Serial) UnmarshalJSON(data []byte) error { 139 | if data[0] != '"' || data[len(data)-1] != '"' { 140 | return fmt.Errorf("Expected surrounding quotes") 141 | } 142 | b, err := hex.DecodeString(string(data[1 : len(data)-1])) 143 | s.serial = b 144 | return err 145 | } 146 | 147 | type SerialList []Serial 148 | 149 | func (s SerialList) Len() int { return len(s) } 150 | func (s SerialList) Less(i, j int) bool { return bytes.Compare(s[i].serial, s[j].serial) < 0 } 151 | func (s SerialList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 152 | func (s SerialList) Dedup() SerialList { 153 | if len(s) < 2 { 154 | return s 155 | } 156 | sort.Sort(s) 157 | end := 1 158 | for i := 1; i < len(s); i++ { 159 | if bytes.Equal(s[i].serial, s[i-1].serial) { 160 | continue 161 | } 162 | s[end] = s[i] 163 | end++ 164 | } 165 | return s[:end] 166 | } 167 | 168 | // A serial number with a revocation reason code 169 | type SerialAndReason struct { 170 | Serial Serial 171 | Reason uint8 172 | } 173 | 174 | type IssuerCrlMap map[string]map[string]bool 175 | 176 | func (self IssuerCrlMap) Merge(other IssuerCrlMap) { 177 | for issuer, crls := range other { 178 | selfCrls, pres := self[issuer] 179 | if !pres { 180 | selfCrls = make(map[string]bool) 181 | } 182 | for crl, _ := range crls { 183 | selfCrls[crl] = true 184 | } 185 | self[issuer] = selfCrls 186 | } 187 | } 188 | 189 | type IssuerCrlUrls struct { 190 | Issuer Issuer 191 | Urls []url.URL 192 | } 193 | 194 | type UrlPath struct { 195 | Url url.URL 196 | Path string 197 | } 198 | 199 | type IssuerCrlUrlPaths struct { 200 | Issuer Issuer 201 | IssuerDN string 202 | CrlUrlPaths []UrlPath 203 | } 204 | 205 | type TBSCertificateListWithRawSerials struct { 206 | Raw asn1.RawContent 207 | Version int `asn1:"optional,default:0"` 208 | Signature asn1.RawValue 209 | Issuer asn1.RawValue 210 | ThisUpdate time.Time 211 | NextUpdate time.Time `asn1:"optional"` 212 | RevokedCertificates []RevokedCertificateWithRawSerial `asn1:"optional"` 213 | } 214 | 215 | type Extension struct { 216 | Id asn1.ObjectIdentifier 217 | Critical bool `asn1:"optional"` 218 | Value []byte 219 | } 220 | 221 | type RevokedCertificateWithRawSerial struct { 222 | Raw asn1.RawContent 223 | SerialNumber asn1.RawValue 224 | RevocationTime time.Time 225 | Extensions []Extension `asn1:"optional"` 226 | } 227 | 228 | func (c RevokedCertificateWithRawSerial) Reason() (asn1.Enumerated, error) { 229 | seen := false 230 | reasonCode := asn1.Enumerated(0) 231 | for _, ext := range c.Extensions { 232 | if ext.Id.Equal(kOidExtensionReasonCode) { 233 | if seen { 234 | return reasonCode, fmt.Errorf("Repeated CRLReason extension") 235 | } 236 | _, err := asn1.Unmarshal(ext.Value, &reasonCode) 237 | if err != nil { 238 | return reasonCode, err 239 | } 240 | seen = true 241 | } 242 | } 243 | 244 | if reasonCode < 0 || reasonCode > 255 { 245 | return reasonCode, fmt.Errorf("Invalid reason code") 246 | } 247 | 248 | return reasonCode, nil 249 | } 250 | 251 | func (c RevokedCertificateWithRawSerial) SerialAndReason() (SerialAndReason, error) { 252 | reason, err := c.Reason() 253 | if err != nil { 254 | return SerialAndReason{}, err 255 | } 256 | 257 | return SerialAndReason{ 258 | NewSerialFromBytes(c.SerialNumber.Bytes), 259 | uint8(reason), 260 | }, nil 261 | } 262 | 263 | func DecodeRawTBSCertList(data []byte) (*TBSCertificateListWithRawSerials, error) { 264 | var tbsCertList TBSCertificateListWithRawSerials 265 | _, err := asn1.Unmarshal(data, &tbsCertList) 266 | return &tbsCertList, err 267 | } 268 | 269 | func NewSerialFromBytes(b []byte) Serial { 270 | obj := Serial{ 271 | serial: b, 272 | } 273 | return obj 274 | } 275 | 276 | type tbsCertWithRawSerial struct { 277 | Raw asn1.RawContent 278 | Version asn1.RawValue `asn1:"optional,explicit,default:0,tag:0"` 279 | SerialNumber asn1.RawValue 280 | } 281 | 282 | func NewSerial(aCert *x509.Certificate) Serial { 283 | var tbsCert tbsCertWithRawSerial 284 | _, err := asn1.Unmarshal(aCert.RawTBSCertificate, &tbsCert) 285 | if err != nil { 286 | panic(err) 287 | } 288 | return NewSerialFromBytes(tbsCert.SerialNumber.Bytes) 289 | } 290 | 291 | func NewSerialFromHex(s string) Serial { 292 | b, err := hex.DecodeString(s) 293 | if err != nil { 294 | panic(err) 295 | } 296 | return Serial{ 297 | serial: b, 298 | } 299 | } 300 | 301 | func NewSerialFromBinaryString(s string) (Serial, error) { 302 | bytes := []byte(s) 303 | return NewSerialFromBytes(bytes), nil 304 | } 305 | 306 | type ExpDate struct { 307 | date time.Time 308 | lastGood time.Time 309 | hourResolution bool 310 | } 311 | 312 | func NewExpDateFromTime(t time.Time) ExpDate { 313 | truncTime := t.Truncate(time.Hour) 314 | return ExpDate{ 315 | date: truncTime, 316 | lastGood: truncTime.Add(-1 * time.Millisecond), 317 | hourResolution: true, 318 | } 319 | } 320 | 321 | func NewExpDate(s string) (ExpDate, error) { 322 | if len(s) > 10 { 323 | t, err := time.Parse(kExpirationFormatWithHour, s) 324 | if err == nil { 325 | lastGood := t.Add(1 * time.Hour) 326 | lastGood = lastGood.Add(-1 * time.Millisecond) 327 | return ExpDate{t, lastGood, true}, nil 328 | } 329 | } 330 | 331 | t, err := time.Parse(kExpirationFormat, s) 332 | if err == nil { 333 | lastGood := t.Add(24 * time.Hour) 334 | lastGood = lastGood.Add(-1 * time.Millisecond) 335 | return ExpDate{t, lastGood, false}, nil 336 | } 337 | return ExpDate{}, err 338 | } 339 | 340 | func (e ExpDate) IsExpiredAt(t time.Time) bool { 341 | return e.lastGood.Before(t) 342 | } 343 | 344 | func (e ExpDate) ExpireTime() time.Time { 345 | return e.date 346 | } 347 | 348 | func (e ExpDate) String() string { 349 | return e.ID() 350 | } 351 | 352 | func (e ExpDate) Unix() int64 { 353 | return e.date.Unix() 354 | } 355 | 356 | func (e ExpDate) ID() string { 357 | if e.hourResolution { 358 | return e.date.Format(kExpirationFormatWithHour) 359 | } 360 | return e.date.Format(kExpirationFormat) 361 | } 362 | 363 | type IssuerDate struct { 364 | Issuer Issuer 365 | ExpDates []ExpDate 366 | } 367 | 368 | func IsPreIssuer(issuer *x509.Certificate) bool { 369 | for _, eku := range issuer.ExtKeyUsage { 370 | if eku == x509.ExtKeyUsageCertificateTransparency { 371 | return true 372 | } 373 | } 374 | return false 375 | } 376 | -------------------------------------------------------------------------------- /go/types_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/pem" 6 | "testing" 7 | 8 | "github.com/google/certificate-transparency-go/x509" 9 | ) 10 | 11 | const ( 12 | crlEmptyBase64 = `MIH2AgEBMA0GCSqGSIb3DQEBCwUAMIGRMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExDzANBgNVBAcTBklydmluZTElMCMGA1UEChMcV2VzdGVybiBEaWdpdGFsIFRlY2hub2xvZ2llczE9MDsGA1UEAxM0V2VzdGVybiBEaWdpdGFsIFRlY2hub2xvZ2llcyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eRcNMTkwOTAzMDMyMDAxWhcNMTkwOTA3MDMyMDAxWqAwMC4wHwYDVR0jBBgwFoAUWRAanffYNzT9rdULrGiuAvegvYMwCwYDVR0UBAQCAgOG` 13 | crlFilledBase64 = `MIIe5AIBATANBgkqhkiG9w0BAQsFADBoMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UECxMURG9tYWluIFZhbGlkYXRlZCBTU0wxIjAgBgNVBAMTGUdlb1RydXN0IERWIFNTTCBTSEEyNTYgQ0EXDTE5MDkwMzE2MDAxNFoXDTE5MDkxMDE2MDAxNFowgh4UMCECEAEB6lGMaMDwB4npzZJzbHUXDTE3MTAzMDEyMjU1MFowIQIQAbv6Qg16m7kES5D25I9uLBcNMTcwNjE2MDY1MTM5WjAhAhAB+pw2+hTghWLebK06+RyVFw0xNjA2MTcxMjE0MDVaMCECEAIF2KTVkbRQNwHzId5xmuMXDTE3MDExODE4MDgwMlowIQIQAwRmHMi6N+4BUO72KcRJ6xcNMTYwNzA2MTExNzEyWjAhAhAFQJugUiQVX32uNjOoC+uzFw0xNzExMTYwOTIzMjJaMCECEAXMZT1a/wkvtceJBIBbD0gXDTE3MDIwMzE2MDgyMFowIQIQCHrvxrypRt7/LEH3926tcBcNMTYwOTI4MTA1MjEyWjAhAhAImhrv9G6J+Ex0s5vE+S7MFw0xNzAyMTAwODAxNTBaMCECEAj3jmEeIxSRKKZCfT91R/4XDTE2MDcxMzE1MTMwMlowIQIQCSbL/k8hi6nhuNTVIlHH+xcNMTcwMzMxMTMzOTMyWjAhAhAJbwFakHACtNrXX/weD6pDFw0xNjA4MTAwNTQ0MjhaMCECEAnyeqfTseniVcVVghXQoo8XDTE3MDcxMzE1MDE0NlowIQIQCqXPXZOvv6e/C4XuTZ/tWhcNMTcwMzIzMTU0MzUxWjAhAhALNVPKzF2luG9uL2THZsa6Fw0xNzExMDYxMDEwMDZaMCECEAwkpQdE/cCustn02VP2RlEXDTE3MDYxNTExMTEzOVowIQIQDEqCHh/xpUPeTq8HwNxKuBcNMTcxMDE5MTQ0MzQ0WjAhAhAMl8s2KklG9Oj2LsDQ/1hoFw0xNjA0MTMxMTMyMjNaMCECEAylUtszJVp/KzIz/jF8hAYXDTE3MDYxNTE0MTIzMlowIQIQDZrp2JTJ07IwhAi+okWDFBcNMTYxMTA5MDg1ODE0WjAhAhAPeFu0Br08y2rPzWGpdDFbFw0xNzA2MTkxNDQ2MzlaMCECEBA6yGF2Ue+30gbfBaUdtaQXDTE3MTExNjA5MzAxM1owIQIQEY3BB4li/5ZDUIRUaRRaDhcNMTYwNTE3MDkwMjUxWjAhAhATYqNkBmmfuKVA95Tt6pPXFw0xNzA3MDQwNzE4MjJaMCECEBOT8k4KIB6fY07eolBQSmkXDTE3MTAwNDEzNTQ0N1owIQIQE9TQmB5+Z+q2x9sORn+SSBcNMTcwODI1MDg0MDU5WjAhAhAVLZnaDLktwZDgY4F5zANMFw0xNzA3MjgxNTE2MzNaMCECEBVoh8IVzgv1F+4RRKMEKIwXDTE3MDYyMTE0MTg0OVowIQIQFaUtKwnKEEXOfnmxI1ImFRcNMTYwOTEzMTAzOTI5WjAhAhAVzcVG2Bf3sbwPgLyUsaYEFw0xNzA5MTEwNjE4MTZaMCECEBXRC5OHCUYwob0VoX1yEKUXDTE3MDMwNjE2MzEyMVowIQIQF08QTf0zELxjM05apL1rZRcNMTcwOTEzMTU1MjA1WjAhAhAXmrWRS0eBMdcvPy05P2LXFw0xNzA5MTgxODEzMTNaMCECEBe0MQOkgiE/ByWMPQgsM/MXDTE3MDIwODEwMjAyMFowIQIQF82jDxstjGuSK8/7W8h7fhcNMTYxMDI1MTUyMDQzWjAhAhAY484eLW8MHbnTW5rmQmBBFw0xNzA0MjcwNzI1MjlaMCECEBo/FSJkOCyHDKjHFidOxGcXDTE3MDMwNzE1MDgxNVowIQIQG0AfC9p1ZywwwBquj7OvJRcNMTYwOTMwMTAxNTI2WjAhAhAbxlXt/cJO5DLhwaavXo8LFw0xNjA5MTMxMDM1MTdaMCECEB14b4ULNdYKLxRD42/+zrQXDTE3MDEyNDE0MDUzMlowIQIQHarSH7Q5ICFDs/tXaGsV+BcNMTcwODE0MTY1MTEwWjAhAhAeMrMC+lcYh4+OCXPEFJTTFw0xNzExMDIxNjIzMTdaMCECEB5uzGxxvOShspcpTuRmZhYXDTE3MDEyNzE2NTcxNFowIQIQHtFo2J0nkR03avIyzN4tlRcNMTcwNjI3MTY0ODAxWjAhAhAfZIkEP+LSGQndOqpP8aagFw0xNzA5MTgwOTUyMzFaMCECEB/oMINtKLIhuQL1ET2DdWgXDTE3MTExMDE2MzIyNlowIQIQIAGFVjunEKcBAJs9PjBaHBcNMTgwMTMxMTQ0NDM1WjAhAhAgamqzGIw7u6Fvzw4fd5eVFw0xNzA3MTgxNTQzMDRaMCECECEaD0iyYijoIuye7HjDj04XDTE2MTIxMzA4NTAzOVowIQIQIdb5R6Ody7fiViq0+z+tmRcNMTYwOTIyMDgzNzQxWjAhAhAh17UJ+BHI8rgltZmDUTNlFw0xNzA1MzAxNTMyNDZaMCECECI8b4Q6duMCxUEekVb5HkkXDTE3MDkxMzA3NDU1N1owIQIQIl/aU1bn4lJC9oTzMoASrxcNMTcxMDAzMDkyOTI4WjAhAhAig110rNBSq0uFZxRzbojdFw0xNzA3MjcxMzAzNTFaMCECECKnTJRC/QhhHDHESyVzpQEXDTE3MDkxMjEzMjczM1owIQIQIw3KsH94NvexPLyMcHhmBxcNMTcwNzEwMTEyNTI5WjAhAhAjYE/BSClf6wXU+gWGOYP/Fw0xNzExMDkxMDUwMzJaMCECECPwxDIU0paN0+si7LerMzYXDTE3MDUxNzEyMzMyMFowIQIQJBA3cPKR6kAC4StlZI3JjxcNMTYwNjI0MTcxNTIxWjAhAhAkjmDjLwxK3sKcSgBKqH2qFw0xNzAyMTAwODAxNTBaMCECECX+6XJsIc6WieLKfXEh3CkXDTE2MTAwNzA5MDAwNFowIQIQJgw8YGRhbSQiHVB3lHfyQxcNMTcwODAzMTI1NDQ1WjAhAhAmOVJKrpd+SN/al27O8ODoFw0xNzA3MTEwMzQ4NTJaMCECECZ9X96J7kel3K7tlgWGrHwXDTE3MDYyMDA4MDQ0NVowIQIQKG1cgMfgQqvHnnJa9Isx1xcNMTcwMTAyMTMxMDEzWjAhAhApZvoyIJVbs38C0kU7ssyqFw0xNzExMjExMzA4MjVaMCECECnTyawK17qIrQMq4jGPfjAXDTE3MDczMTAxMDAyMFowIQIQKltq3iLb2FN/4Fke5P1EtBcNMTcwOTIwMTA1MTM0WjAhAhAqbU/qUP1hpmu73i/4F01oFw0xNzA2MTkxNTI5MDNaMCECECp2Ozwrrm2H/GwCGzey1fsXDTE4MTAxMDEyNDMwOVowIQIQLFECNNqb+3wZF8CXsIUHKxcNMTcwNzEzMTEwNjIzWjAhAhAsd5lv/MjCkNy8RnhYVKEoFw0xNzExMTAxMDIzMTdaMCECECx9qu5rc1xTjiK3TdQ5eV4XDTE5MDIwOTE0MzEyNFowIQIQLJ67Wiz5ZY1RXhrrVwu2HxcNMTcwNjI3MTQ0MDU5WjAhAhAtiHSa6O5gr1Rw/iDvW+49Fw0xNzAyMDExNTU3NDVaMCECEC2utPEumDhEiRYco/y9YH4XDTE3MDkxMzE1NTE0N1owIQIQLcOVTSXkQ7AX3VdiKTR+SBcNMTcwODI1MTMxNTA1WjAhAhAt0ik6q5PDPC5G+AZFOjK4Fw0xNzExMDMwOTM5MjRaMCECEC3ZeGz7ucwmpOUkxNBOfLwXDTE3MDUyMTA4Mzc0MFowIQIQLhuZUA4iNkfIACrZyLdjehcNMTcwNjAyMTEwNzQ1WjAhAhAvpi/yhcREvoY4H4ey2wENFw0xNzA5MjAxMDUyNDdaMCECEC/n01KBwiJ6igfoKhpsZNIXDTE2MDkyODEzMDEwOVowIQIQMIdip7KB1ooE37XofVZl6RcNMTcwNTMwMTUzMzIwWjAhAhAwxeo9tlTPq1MJ7gkFiFpnFw0xNzA1MTAxNTU4MjFaMCECEDF9TLfVgSW8TZRKUebTwbkXDTE3MDIxMzE3MzQ0OFowIQIQMYB5hkSJIyK3OsUJ4i7XzhcNMTcwNzI3MTcxNzQ5WjAhAhAx1lxnPT9ifICQCt/xpR5bFw0xODA4MjIwOTE4MzhaMCECEDJGyI8eYNIqCEbOvUVGX7EXDTE3MDQxMTE2MDc0M1owIQIQMoLEOPb53vrooL/s5VXNqBcNMTYxMjA5MTQyNTQ4WjAhAhA0t0jA8eFfOUMhuxu+lnRNFw0xNzExMTYxMDAzMjdaMCECEDWn8tsuno8GpYoArmGUHQUXDTE3MDkyODA5MzIyNlowIQIQNat2HK5Isx2/NbrFxZMMOBcNMTcwMzI4MDkzNzA5WjAhAhA1u5iPTiL62+ulevjYNo8CFw0xNzA5MDExNDA0MzFaMCECEDXWKJgajzllNX2IfQ+agg4XDTE3MDkwMTEzMjIyOFowIQIQNowKW+PW6tgaIn1jf/tKOxcNMTcwNzI2MTgyMzMzWjAhAhA2rQXZ22x2ZwyR4kVTab+JFw0xNjA2MjQxNzE1NTNaMCECEDcUvC702FxNWTIiDIdFXD8XDTE3MDYwMjEwMzY1NlowIQIQOPKvoZBbR5SN5zMAbjKG4RcNMTcwMzI4MDgyMDEzWjAhAhA6BV8CDmVaOMJ1qJ5jKWQmFw0xNzA2MjAxMTA1MzZaMCECEDoSSr0FBP57xZyjDh9ZhcoXDTE3MDkwNTEwNDAxNlowIQIQOuJjfgLdZzbEcNQameS7vxcNMTcwODE2MTQyOTE2WjAhAhA7l2wheT20aMg8dzGJmEPRFw0xNjA3MTkwNzQ4MzhaMCECEDukyNpCv7fpjuXpSCPNId4XDTE2MDcyOTA5MzYyNlowIQIQPEWl8N+sz15SaWmm4xBLxxcNMTYwODE2MTQxMDQxWjAhAhA+CeSAKYbA4pCyKDE7c+vCFw0xNzAyMDgxNDEwNDRaMCECED55HFyFy6T/ZWwqP7eT1roXDTE3MDcwNjA4MzYyNlowIQIQPxBp4aORgvUTzcaL2PthShcNMTYxMjA4MTAyMTU1WjAhAhA/pIFcD3IUtWZU3U+JxqswFw0xNzA4MjExMzAzMTZaMCECED/Cn7DkqnJ6txvLvk7x0H8XDTE2MTAyNDEzNTIxMlowIQIQQDMtwJTGzYRhSTgobs9MRRcNMTYwODAzMDk1NzA1WjAhAhBBkjqVJvMKPFAGaKlJmWy4Fw0xNjExMDMwOTE3MzNaMCECEEJCWVMpdEuiLV6iUqdb8G4XDTE2MDcyMjA5MjU0OVowIQIQQsO4nIwtGdHSzzNlW7siYRcNMTcwMTMxMTUwNjMxWjAhAhBDH0pT86rww4OK13aF/sB6Fw0xNzA5MTIxNTU4NDlaMCECEEPH24enjK8tO8TrX3qzLSQXDTE3MDExNjE2MjIwMVowIQIQRCgwF2FJnn62peOAeiFJNhcNMTYwNzIwMTU1NjI4WjAhAhBFLRuebgTGI8+OyxXDj1+7Fw0xNzEyMTMxMzI5NDRaMCECEEYzlysPoTa+lKjPx2AeOI4XDTE3MTAxMTA5MzA1NVowIQIQRwg3fmHtbKirDxFrshdOVRcNMTcwNzI0MTU0MDI3WjAhAhBIMxmqTgPqZqkdprCJ2J66Fw0xNzA3MDYwODM2MjZaMCECEEia1zLbeFTFTUca0IXEqHEXDTE2MDgwMTEyNDExMlowIQIQSaSO/zqK0z9BhAzYFcm5UhcNMTcwMzAyMTY0NDIwWjAhAhBJ4t8nX+SKiZ3wBLZ6upmrFw0xNzAxMjcxNDI5MTlaMCECEEq/k0OSSHrm4hK6j9GyGecXDTE2MDkxMzEwMzkyMFowIQIQSuoEyddad+zdOIZeHL4hDRcNMTYxMDE5MTM1NDMwWjAhAhBMVMAFdU97kK+eRku5JnrsFw0xNzExMTAxNTEzMTRaMCECEEyDICINeMYCpmB0L/BOkCsXDTE3MTExNjE3Mzk0M1owIQIQTX0zXTwGu88gR7Pg6Sc0yBcNMTYxMTI0MTAwMTM0WjAhAhBOAVJB2OyWMoESAXB1VtSDFw0xNzExMDYxMDEwMjJaMCECEE5T4c+fYPRrT0o6ExKbXBgXDTE2MDgyNTA2Mjk0NlowIQIQTq6x0P/DS4kyvYorduAAmBcNMTcwMTAzMjIwMTA2WjAhAhBO18EM+2LCGps/WQXtxH+9Fw0xNzA0MDcwOTUzNDhaMCECEE8sDa69J2NujhMPO5S2QnUXDTE5MDQyNTIwMjY0NFowIQIQUCGQLj7OH4XJRG7o6RRNuxcNMTYwOTEzMTA0MTIwWjAhAhBQQDu4uid+wzGNOt8OeqnsFw0xNjExMTUxMjAzMzJaMCECEFF/3yrYoks8sPdXFqFmX+gXDTE3MDcyNzEzNDIxNVowIQIQUZurFZnj8c62gXjHtC9ViRcNMTgwNTAyMTE1MzEyWjAhAhBR+iuv9I71Dqw5LSWCvQ7MFw0xNzA2MDkxNDUwMTVaMCECEFIWvFDdSX2wmAqD2F1xwfMXDTE3MDUxMDE0MjQzMFowIQIQUnjq8KFrpmzHLFd6nmO23RcNMTcwNTEwMTQyNDMwWjAhAhBSjaS4a61ql0wWU5reQZyiFw0xNzA3MjMxMDEzNDBaMCECEFMkdjcjYVJH8pbZWKUeb4QXDTE2MDcyNTIwNDg0N1owIQIQU/TnS/Iatjk3Vt/C2+vmZBcNMTcwOTI2MTAwMzQ4WjAhAhBUM0Stu6kAMMqE8mwZSpFDFw0xODAyMDIxOTM5MjZaMCECEFUND0scR8u0ufOwbbFm+jUXDTE4MDEyMjE1NTEyOVowIQIQVTiIzVkNiOJnUazo9gWknBcNMTcwNjE5MTQ0NzU4WjAhAhBV2RspC+MyfXcFBAm6dzNfFw0xNzEwMjAxMTI0MzdaMCECEFdAzspYITdT64oDLcPpXYEXDTE2MTEyOTA5MzY0NlowIQIQV3QCVz5FE9n4f743BODryhcNMTcxMDI3MDcxNjA0WjAhAhBX7/wwktZwJ6JzINuNnftrFw0xNzA3MjAxMDE5MzFaMCECEFgs6PN67QjXNgiwZRIv3ikXDTE3MDYyOTE2MDU0NVowIQIQWMoX/Jes9UOdBJIA7H7IxRcNMTYwNTMwMTM0NDEwWjAhAhBZCvPyQr/y12ZCdaV8FuOlFw0xNzA3MTMxNzE4MzRaMCECEFkikGbBvcZ1DfJoNHFXAqoXDTE3MTEyMDE1NTgwOFowIQIQWVRCxLbMRj3wOEe9BFPX+BcNMTcxMTE1MDkxOTAwWjAhAhBZ1PG+dZlxYaYDlAw5HwB3Fw0xNzA4MjUxMzUxMDZaMCECEFr9GxIeYRrShvPpS1Z9L1cXDTE3MTAxMjA4MDQxOVowIQIQWv3YIujv+DUgY8aCyLyDtRcNMTYxMTI1MTQ0MTE1WjAhAhBcELqTOBFsY/pbmTKaSHsNFw0xNzA4MjQxMzU0MjlaMCECEFyaFHBrgQ4oR5+YvmXzCOkXDTE3MDMxMjE5MjkzNVowIQIQXW6j1NOA7IJmAZU65AIgsRcNMTcwMjIzMTM1OTM2WjAhAhBdksCGtv+wUWUTAgY0O5imFw0xNjExMTAxNDQ2MTJaMCECEF7zJuEu8HzfgKrGky74lfgXDTE2MTIyNzA3NDcwMlowIQIQX8U3crKyWg65jnT6udSx+RcNMTgwOTIwMTgyOTQyWjAhAhBfyxVxgJAdn71A7R0/cKsxFw0xNzA5MjAxMDUzMDlaMCECEF/rRlmCxTAbUBB5rUOgZvYXDTE3MDQxODA5MDEyN1owIQIQX/Ojll0htexcRXXtfhmk5RcNMTcxMTA5MDgxMTU3WjAhAhBhDk8qV6irVduYn91s56kCFw0xNzA5MDUxMjU5MzZaMCECEGIAmz4SdCF3xI9Lp4B0C88XDTE3MDgxMDEwMTU1MVowIQIQYjTo6JNjcRhulhyimHMPqBcNMTYxMTIxMTE0MzMwWjAhAhBipzlbZsVhEmE54Pb/EyEDFw0xNzA5MjIwOTA2NTZaMCECEGMud4SORZsGOrzEW4XxbiQXDTE3MDMyODA4NDUwNlowIQIQZPjKrBcKFjRQPNXFoltxDxcNMTcwODA5MDcwOTA2WjAhAhBlaivxCIdsnoEk60Ozyv+MFw0xNzA2MjgwNjMzMzRaMCECEGXT4FeM4eEqv80sw1vGQnoXDTE3MTExNjA5MTkyMlowIQIQZp/PHAgfHOsVaJwCZbMKPBcNMTcwOTA2MTQwMTQ0WjAhAhBmq/peagWwbLJ00krdwL9nFw0xNjExMDkwODU2MjJaMCECEGbHlMJTBfXt3MdQOiuxg3AXDTE2MDYwOTA4NDg0NlowIQIQZshznoH/fcVqIvq1VdDvvBcNMTYxMTEwMTQ1OTI1WjAhAhBn9E2mp9cFWzpzrUKSa8pBFw0xNzA2MjEwODQwMDNaMCECEGgBHCw4qMICZ9Vp/rfu5wcXDTE3MTEyMjEzMzI1NVowIQIQaBSYj//jXI6b+EZTYQNuVhcNMTYwNTI0MDcxMjM1WjAhAhBoeHea/M8+2rxyBmU1ZtM+Fw0xNzEwMjQwOTA0MDVaMCECEGkFTyTQcdOIlIlLIrLYGy0XDTE3MDYyMDExMDQxMFowIQIQaRTmVf/ebsBCium01hW1rxcNMTkwMTAzMTAyMjMyWjAhAhBqA+wLHuBrn/dpg4Obg60kFw0xNzA0MjExMzIzMTRaMCECEGppMPuTa7yfVF9jXHKgWfwXDTE3MDIyODE1MDgyN1owIQIQapaA3BewexswlZeVtetMUhcNMTcwMzA2MTU1NTEwWjAhAhBqs9yFvXADlRGTngVpc7JHFw0xNzA4MjUxMzUxMjBaMCECEGtOmE1HjTzfEgbzSjsnyZ8XDTE2MTEyMzEzNTQ1OFowIQIQa3cjHTtiTnJGQZPWPYpolBcNMTcwMzIzMTQxMjMwWjAhAhBr06KUAeKyXMnkHZjAJuq4Fw0xNzAyMjgyMTAwMTFaMCECEGw/6ASh3tQU/iGRloZZGhUXDTE2MDQwNDE5NDI1OFowIQIQbEybte/g3HFX2wJalb4UxBcNMTYwNjI4MDgwMTQ0WjAhAhBsh/uBPTZ56i0M5zsHPSCIFw0xNzAxMDIxNjQ5NTdaMCECEG0zkOLZpupDFHg4pI2kCvYXDTE3MDgyNDE1MzY0OVowIQIQbaYRHLJxSaWJOhIARKUTXRcNMTkwNjI2MTMwNjE0WjAhAhBtu0wDSmAmna85LTpR4EffFw0xNzA0MjgxMjM5MTlaMCECEG64SU+D6gakFWcFNeNHGPMXDTE3MDIxNjE2MTYyMVowIQIQb6nowgKurzee0C7kL4rAmBcNMTcwMzEyMTkyOTIyWjAhAhBwALxYNKOak0loIpW+nI50Fw0xNzA2MjYwNjM4MjRaMCECEHHWSDu9g2AvFi095BCNXPoXDTE3MDEyNzEwMjExOVowIQIQciWdVqk9y8LDKUow1vT9JRcNMTYwNjI0MTcxNTQwWjAhAhByWWfpfBsRGmDUEKpngm0YFw0xNzEwMjYwOTE1MjhaMCECEHMMZZpaL+9O1Z22t9iHb/QXDTE2MTIwMTA5NDkwMFowIQIQc8x/E/szWFAMjA/CwXSfaBcNMTcwNzE4MTUwNTQ2WjAhAhB0td5VI43BFKZzS2MWuDKfFw0xNzA3MjUxNjQ0MjZaMCECEHZ2Cwwnc6g2J0yHAtfypHAXDTE5MDEyMjEzMjYzMFowIQIQd0dFd9z/GbnoZ3sdGeiZhhcNMTYwNTMwMTIzNTU1WjAhAhB3id40ys/EF0x+6a/FVatwFw0xNzA1MjIxMTUzMDFaMCECEHe4MvtagGJRvdUyXbK6zBcXDTE3MDMyODA4MTQ1NlowIQIQeGAwjlJ1ml5RXTZsYeVAmRcNMTYxMDA0MDgzNzUwWjAhAhB5M0uJa65mws4beJ2Tx6nJFw0xNzA3MjUxMzM4NThaMCECEHooVYDRqF+iTMwVImd71qYXDTE2MDgyMjEyMjgyOFowIQIQeprfA2AWvG64RJsae94VyBcNMTcwMzIzMTQzMTEwWjAhAhB+0mZAjxfyO27sro+MjkNjFw0xNzAzMDMxNDUwMDVaMCECEH+IbWOQNlOOzUeWTx7662gXDTE3MDUwODA4NDQyM1owIQIQf5rCvGsoADc+bIA6hz+pxxcNMTcwNjE5MTAxNzI2WjAhAhB/2TyZnVAFeDgfllucJktyFw0xNjA4MDQwNzU1NDRaMCECEH/2ozHaTNsAWjqwRDNyfe0XDTE2MDgyOTEyNTIzMlqgMDAuMB8GA1UdIwQYMBaAFEnsp8ip98W7LKok5/RDs7E86FT4MAsGA1UdFAQEAgIPdg==` 14 | kNoCtPrecertificateSigningEku = `-----BEGIN CERTIFICATE----- 15 | MIIDATCCAemgAwIBAgIRAKQkkrFx1T/dgB/Go/xBM5swDQYJKoZIhvcNAQELBQAw 16 | EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjA4MTcyMDM2MDdaFw0xNzA4MTcyMDM2 17 | MDdaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw 18 | ggEKAoIBAQDAoJtjG7M6InsWwIo+l3qq9u+g2rKFXNu9/mZ24XQ8XhV6PUR+5HQ4 19 | jUFWC58ExYhottqK5zQtKGkw5NuhjowFUgWB/VlNGAUBHtJcWR/062wYrHBYRxJH 20 | qVXOpYKbIWwFKoXu3hcpg/CkdOlDWGKoZKBCwQwUBhWE7MDhpVdQ+ZljUJWL+FlK 21 | yQK5iRsJd5TGJ6VUzLzdT4fmN2DzeK6GLeyMpVpU3sWV90JJbxWQ4YrzkKzYhMmB 22 | EcpXTG2wm+ujiHU/k2p8zlf8Sm7VBM/scmnMFt0ynNXop4FWvJzEm1G0xD2t+e2I 23 | 5Utr04dOZPCgkm++QJgYhtZvgW7ZZiGTAgMBAAGjUjBQMA4GA1UdDwEB/wQEAwIF 24 | oDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMBsGA1UdEQQUMBKC 25 | EHRlc3QuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQELBQADggEBADpqKQxrthH5InC7 26 | X96UP0OJCu/lLEMkrjoEWYIQaFl7uLPxKH5AmQPH4lYwF7u7gksR7owVG9QU9fs6 27 | 1fK7II9CVgCd/4tZ0zm98FmU4D0lHGtPARrrzoZaqVZcAvRnFTlPX5pFkPhVjjai 28 | /mkxX9LpD8oK1445DFHxK5UjLMmPIIWd8EOi+v5a+hgGwnJpoW7hntSl8kHMtTmy 29 | fnnktsblSUV4lRCit0ymC7Ojhe+gzCCwkgs5kDzVVag+tnl/0e2DloIjASwOhpbH 30 | KVcg7fBd484ht/sS+l0dsB4KDOSpd8JzVDMF8OZqlaydizoJO0yWr9GbCN1+OKq5 31 | EhLrEqU= 32 | -----END CERTIFICATE-----` 33 | kCtPrecertificateSigningEku = `-----BEGIN CERTIFICATE----- 34 | MIIC8DCCAlmgAwIBAgIBATANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJHQjEk 35 | MCIGA1UEChMbQ2VydGlmaWNhdGUgVHJhbnNwYXJlbmN5IENBMQ4wDAYDVQQIEwVX 36 | YWxlczEQMA4GA1UEBxMHRXJ3IFdlbjAeFw0xMjA2MDEwMDAwMDBaFw0yMjA2MDEw 37 | MDAwMDBaMFgxCzAJBgNVBAYTAkdCMScwJQYDVQQKEx5DZXJ0aWZpY2F0ZSBUcmFu 38 | c3BhcmVuY3kgUHJlQ0ExDjAMBgNVBAgTBVdhbGVzMRAwDgYDVQQHEwdFcncgV2Vu 39 | MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC98DtChb8KPYtaYs/diY2gE8+6 40 | zn1ciLl0zkhVFne0wcEY3eJIRX2MPkaspIEGy5EUwcbrSUp/5MbkGMD5E95/kKLH 41 | R1TvzQvhAJKAYQMeya2WxFMqxpo19H6Yh4UZvTam+Zs0lS0UV0LouQTn+7jurAA6 42 | me0VaJ8VYNXBd0oWpwIDAQABo4HMMIHJMB0GA1UdDgQWBBQH77NAIzT3nv4jgIOy 43 | 4g1c6hB9QDAPBgNVHRMBAf8EBTADAQH/MBgGA1UdJQEB/wQOMAwGCisGAQQB1nkC 44 | BAQwfQYDVR0jBHYwdIAUX52IDchz5lTU+A3Y5rDBJLRHw1WhWaRXMFUxCzAJBgNV 45 | BAYTAkdCMSQwIgYDVQQKExtDZXJ0aWZpY2F0ZSBUcmFuc3BhcmVuY3kgQ0ExDjAM 46 | BgNVBAgTBVdhbGVzMRAwDgYDVQQHEwdFcncgV2VuggEAMA0GCSqGSIb3DQEBBQUA 47 | A4GBAI4vVe78WhnJKvyTY9ZaL8F8wgA68VWhoL4AagKcCgAeJyKGFjSqDTh1gAz7 48 | vC7XmbM2ub5AoccbytTt7zwwcHJ4cBgB5dBXMfjvLVlfiVdoFUQ94kf0aZL6mZ3m 49 | BTkZf6YMN/zvaDJclyE3MC8Tj6bh9v4F3UlZAiDxfWGH9slu 50 | -----END CERTIFICATE-----` 51 | ) 52 | 53 | func Test_DecodeCRL(t *testing.T) { 54 | data, err := base64.StdEncoding.DecodeString(crlEmptyBase64) 55 | if err != nil { 56 | t.Error(err) 57 | } 58 | 59 | emptyList, err := DecodeRawTBSCertList(data) 60 | if err != nil { 61 | t.Error(err) 62 | } 63 | 64 | if len(emptyList.RevokedCertificates) > 0 { 65 | t.Error("Expected an empty list.") 66 | } 67 | 68 | data, err = base64.StdEncoding.DecodeString(crlFilledBase64) 69 | if err != nil { 70 | t.Error(err) 71 | } 72 | 73 | filledList, err := DecodeRawTBSCertList(data) 74 | if err != nil { 75 | t.Error(err) 76 | } 77 | 78 | if len(filledList.RevokedCertificates) != 220 { 79 | t.Errorf("Expected 220 entries, got %d.", len(filledList.RevokedCertificates)) 80 | } 81 | 82 | expectedSerial := NewSerialFromHex("0101ea518c68c0f00789e9cd92736c75") 83 | actualSerial := NewSerialFromBytes(filledList.RevokedCertificates[0].SerialNumber.Bytes) 84 | if expectedSerial.BinaryString() != actualSerial.BinaryString() { 85 | t.Errorf("Expected %s, but got %s", expectedSerial, actualSerial) 86 | } 87 | } 88 | 89 | func TestDetectPrecertSigningEKU(t *testing.T) { 90 | b, _ := pem.Decode([]byte(kCtPrecertificateSigningEku)) 91 | 92 | cert, err := x509.ParseCertificate(b.Bytes) 93 | if err != nil { 94 | t.Error(err) 95 | } 96 | 97 | if !IsPreIssuer(cert) { 98 | t.Errorf("Failed to identify preissuer") 99 | } 100 | 101 | b, _ = pem.Decode([]byte(kNoCtPrecertificateSigningEku)) 102 | 103 | cert, err = x509.ParseCertificate(b.Bytes) 104 | if err != nil { 105 | t.Error(err) 106 | } 107 | 108 | if IsPreIssuer(cert) { 109 | t.Errorf("Bad result from IsPreIssuer") 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /moz_kinto_publisher/settings.py: -------------------------------------------------------------------------------- 1 | from decouple import config 2 | 3 | KINTO_RW_SERVER_URL = config( 4 | "KINTO_RW_SERVER_URL", default="https://remote-settings.allizom.org/v1/" 5 | ) 6 | KINTO_RO_SERVER_URL = config( 7 | "KINTO_RO_SERVER_URL", default="https://firefox.settings.services.allizom.org/v1/" 8 | ) 9 | KINTO_AUTH_USER = config("KINTO_AUTH_USER", default="") 10 | KINTO_AUTH_PASSWORD = config("KINTO_AUTH_PASSWORD", default="") 11 | KINTO_BUCKET = config("KINTO_BUCKET", default="security-state-staging") 12 | KINTO_CRLITE_COLLECTION = config("KINTO_CRLITE_COLLECTION", default="cert-revocations") 13 | KINTO_INTERMEDIATES_COLLECTION = config( 14 | "KINTO_INTERMEDIATES_COLLECTION", default="intermediates" 15 | ) 16 | KINTO_CTLOGS_COLLECTION = config("KINTO_CTLOGS_COLLECTION", default="ct-logs") 17 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | junit_family=xunit2 3 | -------------------------------------------------------------------------------- /rust-create-cascade/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-create-cascade" 3 | version = "0.1.0" 4 | 5 | [dependencies] 6 | base64 = "0.13" 7 | bincode = "1.3" 8 | clap = { version = "3.0", features = ["derive"] } 9 | hex = "0.4" 10 | log = "0.4" 11 | rand="0.7" 12 | rayon = "1.5" 13 | rust_cascade = { version = "1.5.0" , features = ["builder"] } 14 | statsd = "0.16.0" 15 | stderrlog = "0.5" 16 | tempfile = "3.10.1" 17 | clubcard = { version = "0.3", features = ["builder"] } 18 | clubcard-crlite = { version = "0.3", features = ["builder"] } 19 | serde_json = "1" 20 | -------------------------------------------------------------------------------- /rust-create-cascade/src/cascade_helper.rs: -------------------------------------------------------------------------------- 1 | /* This Source Code Form is subject to the terms of the Mozilla Public 2 | * License, v. 2.0. If a copy of the MPL was not distributed with this 3 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 | 5 | use crate::{ 6 | crlite_key, decode_serial, CheckableFilter, FilterBuilder, KnownSerialIterator, ReasonSet, 7 | RevokedSerialAndReasonIterator, Serial, 8 | }; 9 | use rust_cascade::{Cascade, CascadeBuilder, ExcludeSet, HashAlgorithm}; 10 | 11 | use log::*; 12 | use std::collections::HashSet; 13 | use std::fs::File; 14 | use std::io::prelude::Write; 15 | use std::path::Path; 16 | 17 | use rand::rngs::OsRng; 18 | use rand::RngCore; 19 | 20 | impl FilterBuilder for CascadeBuilder { 21 | type ExcludeSetType = ExcludeSet; 22 | type OutputType = Cascade; 23 | 24 | /// `include` finds revoked serials that are known and includes them in the filter cascade. 25 | fn include( 26 | &mut self, 27 | issuer: &[u8; 32], 28 | revoked_serials_and_reasons: RevokedSerialAndReasonIterator, 29 | known_serials: KnownSerialIterator, 30 | ) { 31 | let mut revoked_serial_set: HashSet = revoked_serials_and_reasons.into(); 32 | 33 | for (_expiry, serial) in known_serials { 34 | if revoked_serial_set.contains(&serial) { 35 | let key = crlite_key(issuer.as_ref(), &decode_serial(&serial)); 36 | CascadeBuilder::include(self, key) 37 | .expect("Capacity error. Did the file contents change?"); 38 | // Ensure that we do not attempt to include this issuer+serial again. 39 | revoked_serial_set.remove(&serial); 40 | } 41 | } 42 | } 43 | 44 | /// `exclude` finds known serials that are not revoked excludes them from the filter cascade. 45 | /// It returns an `ExcludeSet` which must be emptied into the builder using 46 | /// `CascadeBuilder::collect_exclude_set` before `CascadeBuilder::finalize` is called. 47 | fn exclude( 48 | &self, 49 | issuer: &[u8; 32], 50 | revoked_serials_and_reasons: Option, 51 | known_serials: KnownSerialIterator, 52 | ) -> ExcludeSet { 53 | let mut exclude_set = ExcludeSet::default(); 54 | let revoked_serial_set: HashSet = revoked_serials_and_reasons 55 | .map(|iter| iter.into()) 56 | .unwrap_or_default(); 57 | 58 | let non_revoked_serials = 59 | known_serials.filter(|(_expiry, serial)| !revoked_serial_set.contains(serial)); 60 | for (_expiry, serial) in non_revoked_serials { 61 | let key = crlite_key(issuer, &decode_serial(&serial)); 62 | CascadeBuilder::exclude_threaded(self, &mut exclude_set, key); 63 | } 64 | exclude_set 65 | } 66 | 67 | fn collect_exclude_sets(&mut self, mut exclude_sets: Vec) { 68 | for mut exclude_set in exclude_sets.drain(..) { 69 | self.collect_exclude_set(&mut exclude_set).unwrap(); 70 | } 71 | } 72 | 73 | fn finalize(self) -> Cascade { 74 | *CascadeBuilder::finalize(self).unwrap() 75 | } 76 | } 77 | 78 | impl CheckableFilter for Cascade { 79 | fn check( 80 | &self, 81 | issuer: &[u8; 32], 82 | revoked_serials_and_reasons: Option, 83 | known_serials: KnownSerialIterator, 84 | ) { 85 | let revoked_serial_set: HashSet = revoked_serials_and_reasons 86 | .map(|iter| iter.into()) 87 | .unwrap_or_default(); 88 | 89 | for (_expiry, serial) in known_serials { 90 | assert_eq!( 91 | Cascade::has(self, crlite_key(issuer, &decode_serial(&serial))), 92 | revoked_serial_set.contains(&serial) 93 | ); 94 | } 95 | } 96 | } 97 | 98 | /// `create_cascade` runs through the full filter generation process and returns the 99 | /// serialized cascade 100 | pub fn create_cascade( 101 | out_file: &Path, 102 | revoked: usize, 103 | not_revoked: usize, 104 | revoked_dir: &Path, 105 | known_dir: &Path, 106 | hash_alg: HashAlgorithm, 107 | reason_set: ReasonSet, 108 | ) -> Vec { 109 | let salt_len = match hash_alg { 110 | HashAlgorithm::MurmurHash3 => 0, 111 | HashAlgorithm::Sha256l32 => 16, 112 | HashAlgorithm::Sha256 => 16, 113 | }; 114 | 115 | let mut salt = vec![0u8; salt_len]; 116 | if salt_len > 0 { 117 | OsRng.fill_bytes(&mut salt); 118 | } 119 | 120 | let mut builder = CascadeBuilder::new(hash_alg, salt, revoked, not_revoked); 121 | 122 | info!("Processing revoked serials"); 123 | FilterBuilder::include_all(&mut builder, revoked_dir, known_dir, reason_set); 124 | 125 | info!("Processing non-revoked serials"); 126 | FilterBuilder::exclude_all(&mut builder, revoked_dir, known_dir, reason_set); 127 | 128 | info!("Eliminating false positives"); 129 | let cascade = FilterBuilder::finalize(builder); 130 | 131 | info!("Testing serialization"); 132 | let cascade_bytes = cascade.to_bytes().expect("cannot serialize cascade"); 133 | info!("Cascade is {} bytes", cascade_bytes.len()); 134 | 135 | if let Some(cascade) = 136 | Cascade::from_bytes(cascade_bytes.clone()).expect("cannot deserialize cascade") 137 | { 138 | info!("\n{}", cascade); 139 | 140 | info!("Verifying cascade"); 141 | cascade.check_all(revoked_dir, known_dir, reason_set); 142 | } else { 143 | warn!("Produced empty cascade. Exiting."); 144 | return vec![]; 145 | } 146 | 147 | info!("Writing cascade to {}", out_file.display()); 148 | let mut filter_writer = File::create(out_file).expect("cannot open file"); 149 | filter_writer 150 | .write_all(&cascade_bytes) 151 | .expect("can't write file"); 152 | 153 | cascade_bytes 154 | } 155 | -------------------------------------------------------------------------------- /rust-create-cascade/src/clubcard_helper.rs: -------------------------------------------------------------------------------- 1 | /* This Source Code Form is subject to the terms of the Mozilla Public 2 | * License, v. 2.0. If a copy of the MPL was not distributed with this 3 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 | 5 | use crate::{ 6 | decode_issuer, decode_serial, list_issuer_file_pairs, CheckableFilter, FilterBuilder, 7 | KnownSerialIterator, ReasonSet, RevokedSerialAndReasonIterator, Serial, 8 | }; 9 | use clubcard::{builder::*, Clubcard}; 10 | 11 | use clubcard_crlite::{builder::*, CRLiteClubcard, CRLiteCoverage, CRLiteKey, CRLiteQuery}; 12 | 13 | use log::*; 14 | use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; 15 | use std::collections::HashSet; 16 | use std::fs::File; 17 | use std::io::prelude::Write; 18 | use std::io::BufReader; 19 | use std::path::Path; 20 | 21 | fn clubcard_do_one_issuer( 22 | clubcard: &ClubcardBuilder<4, CRLiteBuilderItem>, 23 | issuer: &[u8; 32], 24 | revoked_serials_and_reasons: RevokedSerialAndReasonIterator, 25 | known_serials: KnownSerialIterator, 26 | ) -> ApproximateRibbon<4, CRLiteBuilderItem> { 27 | let mut revoked_serial_set: HashSet = revoked_serials_and_reasons.into(); 28 | 29 | let mut ribbon_builder = clubcard.new_approx_builder(issuer.as_ref()); 30 | let mut universe_size = 0; 31 | for (_expiry, serial) in known_serials { 32 | universe_size += 1; 33 | if revoked_serial_set.contains(&serial) { 34 | let key = CRLiteBuilderItem::revoked(*issuer, decode_serial(&serial)); 35 | ribbon_builder.insert(key); 36 | // Ensure that we do not attempt to include this issuer+serial again. 37 | revoked_serial_set.remove(&serial); 38 | } 39 | } 40 | ribbon_builder.set_universe_size(universe_size); 41 | ribbon_builder.into() 42 | } 43 | 44 | impl FilterBuilder for ClubcardBuilder<4, CRLiteBuilderItem> { 45 | type ExcludeSetType = ExactRibbon<4, CRLiteBuilderItem>; 46 | type OutputType = ClubcardBuilder<4, CRLiteBuilderItem>; 47 | 48 | fn include( 49 | &mut self, 50 | _issuer: &[u8; 32], 51 | _revoked_serials_and_reasons: RevokedSerialAndReasonIterator, 52 | _known_serials: KnownSerialIterator, 53 | ) { 54 | // The FilterBuilder trait assumes that include_all() performs a serial iteration over 55 | // shards and defines include() as taking a &mut self reference. Clubcard shards can be 56 | // built in parallel with only an &self reference. So we override include_all() and never 57 | // call include(). 58 | unimplemented!(); 59 | } 60 | 61 | fn include_all(&mut self, revoked_dir: &Path, known_dir: &Path, reason_set: ReasonSet) { 62 | let ribbons: Vec> = 63 | list_issuer_file_pairs(revoked_dir, known_dir) 64 | .par_iter() 65 | .map(|(issuer, maybe_revoked_file, known_file)| { 66 | let issuer_bytes = 67 | decode_issuer(issuer.to_str().expect("non-unicode issuer string")); 68 | let revoked_serials_and_reasons = match maybe_revoked_file { 69 | Some(revoked_file) => { 70 | RevokedSerialAndReasonIterator::new(revoked_file, reason_set) 71 | } 72 | None => RevokedSerialAndReasonIterator::empty(reason_set), 73 | }; 74 | clubcard_do_one_issuer( 75 | self, 76 | &issuer_bytes, 77 | revoked_serials_and_reasons, 78 | KnownSerialIterator::new(known_file), 79 | ) 80 | }) 81 | .collect(); 82 | 83 | ClubcardBuilder::collect_approx_ribbons(self, ribbons); 84 | } 85 | 86 | fn exclude( 87 | &self, 88 | issuer: &[u8; 32], 89 | revoked_serials_and_reasons: Option, 90 | known_serials: KnownSerialIterator, 91 | ) -> Self::ExcludeSetType { 92 | let mut ribbon_builder = ClubcardBuilder::new_exact_builder(self, issuer); 93 | 94 | let revoked_serial_set: HashSet = revoked_serials_and_reasons 95 | .map(|iter| iter.into()) 96 | .unwrap_or_default(); 97 | 98 | for (_expiry, serial) in known_serials { 99 | let serial_bytes = decode_serial(&serial); 100 | let key = if revoked_serial_set.contains(&serial) { 101 | CRLiteBuilderItem::revoked(*issuer, serial_bytes) 102 | } else { 103 | CRLiteBuilderItem::not_revoked(*issuer, serial_bytes) 104 | }; 105 | ribbon_builder.insert(key); 106 | } 107 | 108 | ribbon_builder.into() 109 | } 110 | 111 | fn collect_exclude_sets(&mut self, exclude_sets: Vec) { 112 | self.collect_exact_ribbons(exclude_sets); 113 | } 114 | 115 | fn finalize(self) -> Self::OutputType { 116 | self 117 | } 118 | } 119 | 120 | impl CheckableFilter for CRLiteClubcard { 121 | fn check( 122 | &self, 123 | issuer: &[u8; 32], 124 | revoked_serials_and_reasons: Option, 125 | known_serials: KnownSerialIterator, 126 | ) { 127 | let revoked_serial_set: HashSet = revoked_serials_and_reasons 128 | .map(|iter| iter.into()) 129 | .unwrap_or_default(); 130 | 131 | for (_expiry, serial) in known_serials { 132 | let decoded_serial = decode_serial(&serial); 133 | let key = CRLiteKey::new(issuer, &decoded_serial); 134 | let query = CRLiteQuery::new(&key, None); 135 | assert!( 136 | Clubcard::unchecked_contains(self.as_ref(), &query) 137 | == revoked_serial_set.contains(&serial) 138 | ); 139 | } 140 | } 141 | } 142 | 143 | pub fn create_clubcard( 144 | out_file: &Path, 145 | revoked_dir: &Path, 146 | known_dir: &Path, 147 | coverage_path: &Path, 148 | reason_set: ReasonSet, 149 | ) -> Vec { 150 | let coverage = CRLiteCoverage::from_mozilla_ct_logs_json(BufReader::new( 151 | std::fs::File::open(coverage_path).unwrap(), 152 | )); 153 | 154 | let mut builder = ClubcardBuilder::new(); 155 | 156 | info!("Processing revoked serials"); 157 | FilterBuilder::include_all(&mut builder, revoked_dir, known_dir, reason_set); 158 | 159 | info!("Processing non-revoked serials"); 160 | FilterBuilder::exclude_all(&mut builder, revoked_dir, known_dir, reason_set); 161 | 162 | info!("Building clubcard"); 163 | let clubcard: CRLiteClubcard = builder.finalize().build::(coverage, ()).into(); 164 | 165 | info!("Generated {}", clubcard); 166 | 167 | info!("Testing serialization"); 168 | let clubcard_bytes = clubcard.to_bytes().expect("cannot serialize clubcard"); 169 | info!("Clubcard is {} bytes", clubcard_bytes.len()); 170 | 171 | let clubcard = 172 | CRLiteClubcard::from_bytes(&clubcard_bytes).expect("cannot deserialize clubcard"); 173 | 174 | info!("Verifying clubcard"); 175 | clubcard.check_all(revoked_dir, known_dir, reason_set); 176 | 177 | info!("Writing clubcard to {}", out_file.display()); 178 | let mut filter_writer = File::create(out_file).expect("cannot open file"); 179 | filter_writer 180 | .write_all(&clubcard_bytes) 181 | .expect("can't write file"); 182 | 183 | clubcard_bytes 184 | } 185 | -------------------------------------------------------------------------------- /rust-query-crlite/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-query-crlite" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | base64 = "0.21" 8 | bincode = "1.3" 9 | byteorder = "1.2.7" 10 | clap = { version = "3.2", features = ["derive"] } 11 | clubcard = "0.3" 12 | clubcard-crlite = "0.3" 13 | der-parser = "9.0" 14 | hex = "0.4" 15 | log = "0.4" 16 | num-bigint = "0.4" 17 | pem = "1.0" 18 | reqwest = { version = "0.11", features = ["blocking", "json", "rustls-tls"] } 19 | rust_cascade = "1.4.0" 20 | rustls = { version = "0.21", features = ["dangerous_configuration"] } 21 | serde = { version = "1.0", features = ["derive"] } 22 | serde_json = "1.0" 23 | sha2 = "0.10.2" 24 | stderrlog = "0.5" 25 | x509-parser = { version = "0.16.0", features = ["verify"] } 26 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | setup( 4 | name="crlite", 5 | version="1.0.13", 6 | packages=["moz_kinto_publisher", "workflow"], 7 | install_requires=[ 8 | "cryptography>=2.2", 9 | "glog>=0.3", 10 | "google-api-core", 11 | "google-cloud-core", 12 | "google-cloud-storage", 13 | "kinto-http>=10.9", 14 | "python-decouple>=3.1", 15 | "requests[socks]>=2.10.0", 16 | ], 17 | ) 18 | -------------------------------------------------------------------------------- /setup/list_all_active_ct_logs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for i in $(curl -s https://www.gstatic.com/ct/log_list/log_list.json | jq -r ' .logs | .[] | .url'); do 4 | curl --max-time 10 -s -I --fail -X GET "https://${i}ct/v1/get-sth" | grep -q "200" 5 | [ $? -eq 0 ] && echo -n "https://${i}, " 6 | done 7 | -------------------------------------------------------------------------------- /test-via-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | cd $(dirname ${0}) 4 | 5 | my_ip=$(ipconfig getifaddr en0) 6 | 7 | cd go 8 | go test ./... 9 | cd .. 10 | 11 | assureDir() { 12 | if [ ! -d ${1} ] ; then 13 | mkdir ${1} 14 | fi 15 | } 16 | 17 | assureDir /tmp/crlite 18 | assureDir /tmp/crlite/processing 19 | assureDir /tmp/crlite/persistent 20 | 21 | if [ "x${GOOGLE_APPLICATION_CREDENTIALS}x" == "xx" ]; then 22 | echo "You must set GOOGLE_APPLICATION_CREDENTIALS" 23 | exit 1 24 | fi 25 | 26 | echo "Ensure Redis is running at ${my_ip}:6379" 27 | 28 | docker run --rm -it -p 8080:8080/tcp \ 29 | -e "redisHost=${my_ip}:6379" \ 30 | -e "credentials_data=$(base64 ${GOOGLE_APPLICATION_CREDENTIALS})" \ 31 | -e "DoNotUpload=true" \ 32 | -e "outputRefreshMs=1000" \ 33 | -e "logList=https://ct.googleapis.com/logs/argon2021/, https://ct.googleapis.com/logs/argon2022/, https://ct.googleapis.com/logs/argon2023/" \ 34 | -e "limit=500" \ 35 | -e "runForever=false" \ 36 | crlite:staging-fetch 37 | 38 | docker run --rm -it \ 39 | -e "redisHost=${my_ip}:6379" \ 40 | -e "credentials_data=$(base64 ${GOOGLE_APPLICATION_CREDENTIALS})" \ 41 | -e "DoNotUpload=true" \ 42 | -e "outputRefreshMs=1000" \ 43 | --mount type=bind,src=/tmp/crlite/persistent,dst=/persistent \ 44 | --mount type=bind,src=/tmp/crlite/processing,dst=/processing \ 45 | crlite:staging-generate 46 | 47 | docker run --rm -it \ 48 | -e "redisHost=${my_ip}:6379" \ 49 | -e "credentials_data=$(base64 ${GOOGLE_APPLICATION_CREDENTIALS})" \ 50 | -e "DoNotUpload=true" \ 51 | -e "outputRefreshMs=1000" \ 52 | crlite:staging-publish 53 | -------------------------------------------------------------------------------- /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "source" : "https://github.com/mozilla/crlite", 3 | "version": "devel", 4 | "commit" : "", 5 | "build" : "" 6 | } 7 | -------------------------------------------------------------------------------- /workflow/0-allocate_identifier: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import argparse 5 | import __init__ as workflow 6 | 7 | from pathlib import Path 8 | 9 | from datetime import datetime 10 | 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument("--path", help="Path to folder on disk to store certs", type=Path) 13 | parser.add_argument( 14 | "--filter-bucket", help="Google Cloud Storage filter bucket name", required=True 15 | ) 16 | 17 | args = parser.parse_args() 18 | 19 | if not args.path: 20 | parser.print_usage() 21 | sys.exit(0) 22 | 23 | dateprefix = datetime.utcnow().date().strftime("%Y%m%d") 24 | next_idx = 0 25 | 26 | all_identifiers = workflow.get_run_identifiers(args.filter_bucket) 27 | if all_identifiers: 28 | most_recent_identifier = all_identifiers.pop() 29 | (date_part, idx_part) = most_recent_identifier.split("-") 30 | if date_part == dateprefix: 31 | next_idx = int(idx_part) + 1 32 | 33 | allocatedName = "{}-{}".format(dateprefix, next_idx) 34 | allocatedPath = args.path / Path(allocatedName) 35 | 36 | allocatedPath.mkdir() 37 | 38 | timestamp_file = allocatedPath / Path("timestamp") 39 | timestamp_file.write_text( 40 | datetime.utcnow().isoformat(timespec="seconds"), encoding="utf-8" 41 | ) 42 | 43 | print(allocatedPath) 44 | -------------------------------------------------------------------------------- /workflow/0-set_credentials.inc: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | if [ "x${credentials_data}x" != "xx" ] ; then 4 | echo "Using the credentials_data environment variable for GOOGLE_APPLICATION_CREDENTIALS" 5 | echo "${credentials_data}" | base64 --decode >> /tmp/credentials.json 6 | export GOOGLE_APPLICATION_CREDENTIALS=/tmp/credentials.json 7 | fi 8 | 9 | if [ "x${GOOGLE_APPLICATION_CREDENTIALS}x" == "xx" ] ; then 10 | echo "You'll need to provide GOOGLE_APPLICATION_CREDENTIALS somehow" 11 | echo "because all the scripts depend on it. Maybe you should also set" 12 | echo "DoNotUpload to something, to avoid uploading accidentally?" 13 | exit 1 14 | fi 15 | -------------------------------------------------------------------------------- /workflow/1-upload_data_to_storage: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from google.cloud import storage 4 | from google.api_core.retry import Retry 5 | from pathlib import Path 6 | from datetime import datetime 7 | 8 | import argparse 9 | import os 10 | import sys 11 | import glog as log 12 | 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--noop", help="Don't upload", action="store_true") 15 | parser.add_argument( 16 | "results_path", 17 | help="Path to folder to upload containing the mlbf folder", 18 | nargs=1, 19 | type=Path, 20 | ) 21 | parser.add_argument( 22 | "--filter-bucket", help="Google Cloud Storage filter bucket name", required=True 23 | ) 24 | 25 | 26 | @Retry(deadline=60) 27 | def uploadBlob(bucket, remoteFilePath, localFilePath): 28 | blob = bucket.blob(str(remoteFilePath)) 29 | blob.upload_from_filename(str(localFilePath)) 30 | 31 | 32 | def uploadFiles(files, localFolder, remoteFolder, bucket, *, args): 33 | log.info(f"Uploading {len(files)} files from {localFolder} to {remoteFolder}") 34 | for item in files: 35 | localFilePath = localFolder.joinpath(item) 36 | remoteFilePath = remoteFolder.joinpath(item) 37 | 38 | if localFilePath.is_symlink(): 39 | continue 40 | 41 | log.debug( 42 | f"Uploading {remoteFilePath} (size={localFilePath.stat().st_size}) " 43 | + f"from {localFilePath}" 44 | ) 45 | 46 | if args.noop: 47 | continue 48 | 49 | uploadBlob(bucket, remoteFilePath, localFilePath) 50 | 51 | 52 | def main(): 53 | args = parser.parse_args() 54 | 55 | if not args.results_path or len(args.results_path) != 1: 56 | parser.print_usage() 57 | sys.exit(0) 58 | 59 | storage_client = storage.Client() 60 | bucket = storage_client.get_bucket(args.filter_bucket) 61 | 62 | runIdPath = args.results_path[0].resolve() 63 | 64 | for path, dirs, files in os.walk(runIdPath): 65 | localFolder = Path(path) 66 | remoteFolder = localFolder.relative_to(runIdPath.parent) 67 | uploadFiles(files, localFolder, remoteFolder, bucket, args=args) 68 | 69 | 70 | if __name__ == "__main__": 71 | main() 72 | -------------------------------------------------------------------------------- /workflow/2-generate_mlbf: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from pathlib import Path 4 | from google.api_core import exceptions 5 | import glog as log 6 | 7 | import argparse 8 | import os 9 | import subprocess 10 | import sys 11 | import tempfile 12 | import __init__ as workflow 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("identifier", help="Current working identifier", nargs=1) 16 | parser.add_argument("--nodiff", help="Avoid building a diff") 17 | parser.add_argument( 18 | "--filter-bucket", help="Google Cloud Storage filter bucket name", required=True 19 | ) 20 | parser.add_argument("--statsd-host", help="StatsD host", required=False) 21 | parser.add_argument( 22 | "--reason-set", 23 | help="Reason set [values: all, specified, priority]", 24 | required=False, 25 | ) 26 | parser.add_argument( 27 | "--delta-reason-set", 28 | help="Delta reason set. Mirrors --reason-set if omitted. [values: all, specified, priority]", 29 | required=False, 30 | ) 31 | 32 | parser.add_argument( 33 | "--filter-type", 34 | help="Filter type [values: cascade, clubcard]", 35 | required=False, 36 | ) 37 | 38 | 39 | def main(): 40 | args = parser.parse_args() 41 | 42 | if not args.identifier or len(args.identifier) != 1: 43 | parser.print_usage() 44 | sys.exit(0) 45 | 46 | runIdPath = args.identifier[0] 47 | 48 | exe = os.path.expanduser(f"~/rust-create-cascade") 49 | cmdline = [ 50 | exe, 51 | "-vv", 52 | "--known", 53 | os.path.join(runIdPath, "known"), 54 | "--revoked", 55 | os.path.join(runIdPath, "revoked"), 56 | "--ct-logs-json", 57 | os.path.join(runIdPath, "ct-logs.json"), 58 | "--clobber", 59 | ] 60 | 61 | if args.statsd_host: 62 | cmdline += ["--statsd-host", args.statsd_host] 63 | 64 | reason_set = args.reason_set if args.reason_set else "all" 65 | delta_reason_set = args.delta_reason_set if args.delta_reason_set else reason_set 66 | filter_type = args.filter_type if args.filter_type else "cascade" 67 | 68 | if filter_type == "cascade" and reason_set == "all": 69 | outdir = os.path.join(runIdPath, "mlbf") 70 | elif filter_type == "cascade": 71 | outdir = os.path.join(runIdPath, f"mlbf-{args.reason_set}") 72 | else: 73 | outdir = os.path.join(runIdPath, f"{args.filter_type}-{args.reason_set}") 74 | 75 | cmdline += [ 76 | "--outdir", 77 | outdir, 78 | "--reason-set", 79 | reason_set, 80 | "--delta-reason-set", 81 | delta_reason_set, 82 | "--filter-type", 83 | filter_type, 84 | ] 85 | 86 | if not args.nodiff and "GOOGLE_APPLICATION_CREDENTIALS" in os.environ: 87 | try: 88 | dest = Path(tempfile.mkdtemp()) 89 | prev_revset = dest / Path("prev_revset.bin") 90 | 91 | workflow.download_from_google_cloud( 92 | args.filter_bucket, 93 | "latest/revset.bin", 94 | prev_revset, 95 | ) 96 | 97 | cmdline = cmdline + ["--prev-revset", prev_revset] 98 | except exceptions.NotFound as e: 99 | log.error(f"Could not download existing filter: {e}") 100 | except Exception as e: 101 | log.error(f"Could not get any existing filter: {e}") 102 | 103 | log.info(f"Running {cmdline}") 104 | subprocess.run(cmdline, check=True) 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /workflow/3-upload_mlbf_to_storage: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from google.cloud import storage 4 | from google.api_core.retry import Retry 5 | from pathlib import Path 6 | 7 | import argparse 8 | import datetime 9 | import glog as log 10 | import os 11 | import sys 12 | 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--noop", help="Don't upload", action="store_true") 15 | parser.add_argument( 16 | "results_path", 17 | help="Path to folder to upload containing the mlbf folder", 18 | nargs=1, 19 | type=Path, 20 | ) 21 | parser.add_argument( 22 | "--filter-bucket", help="Google Cloud Storage filter bucket name", required=True 23 | ) 24 | 25 | 26 | @Retry(deadline=60) 27 | def uploadBlob(bucket, remoteFilePath, localFilePath): 28 | blob = bucket.blob(str(remoteFilePath)) 29 | blob.upload_from_filename(str(localFilePath)) 30 | 31 | 32 | def uploadFiles(files, localFolder, remoteFolder, bucket, *, args): 33 | log.info(f"Uploading {len(files)} files from {localFolder} to {remoteFolder}") 34 | for item in files: 35 | localFilePath = localFolder.joinpath(item) 36 | remoteFilePath = remoteFolder.joinpath(item) 37 | 38 | if localFilePath.is_symlink(): 39 | continue 40 | 41 | log.debug( 42 | f"Uploading {remoteFilePath} (size={localFilePath.stat().st_size}) " 43 | + f"from {localFilePath}" 44 | ) 45 | 46 | if args.noop: 47 | continue 48 | 49 | uploadBlob(bucket, remoteFilePath, localFilePath) 50 | 51 | 52 | def ensureFileOrAbort(runIdPath, path): 53 | filePath = runIdPath / Path(path) 54 | if not filePath.exists(): 55 | log.error(f"{filePath} does not exist, aborting.") 56 | sys.exit(1) 57 | 58 | 59 | def main(): 60 | args = parser.parse_args() 61 | 62 | if not args.results_path or len(args.results_path) != 1: 63 | parser.print_usage() 64 | sys.exit(0) 65 | 66 | storage_client = storage.Client() 67 | bucket = storage_client.get_bucket(args.filter_bucket) 68 | 69 | runIdPath = args.results_path[0].resolve() 70 | 71 | ensureFileOrAbort(runIdPath, Path("clubcard-all/filter")) 72 | 73 | for path, dirs, files in os.walk(runIdPath / "clubcard-all"): 74 | localFolder = Path(path) 75 | remoteFolder = localFolder.relative_to(runIdPath.parent) 76 | uploadFiles(files, localFolder, remoteFolder, bucket, args=args) 77 | # Add a copy to /latest 78 | subFolder = localFolder.relative_to(runIdPath / "clubcard-all") 79 | uploadFiles(files, localFolder, Path("latest") / subFolder, bucket, args=args) 80 | 81 | for path, dirs, files in os.walk(runIdPath / "clubcard-priority"): 82 | localFolder = Path(path) 83 | remoteFolder = localFolder.relative_to(runIdPath.parent) 84 | uploadFiles(files, localFolder, remoteFolder, bucket, args=args) 85 | 86 | sentinel = bucket.blob(str(Path(runIdPath.name) / "completed")) 87 | log.info(f"Saving 'completed' marker to {sentinel.name}") 88 | if not args.noop: 89 | sentinel.upload_from_string( 90 | f"Upload completed at {datetime.datetime.now(datetime.UTC)}" 91 | ) 92 | 93 | 94 | if __name__ == "__main__": 95 | main() 96 | -------------------------------------------------------------------------------- /workflow/__init__.py: -------------------------------------------------------------------------------- 1 | import glog as log 2 | import os 3 | import re 4 | import shutil 5 | import time 6 | from datetime import datetime, timedelta 7 | from google.api_core import exceptions, page_iterator 8 | from google.cloud import storage 9 | from pathlib import Path 10 | 11 | 12 | kIdentifierFormat = re.compile(r"(\d{8}-\d+)/?") 13 | 14 | kTestBucket = "local_test" 15 | 16 | 17 | def get_test_dir(bucket_name): 18 | if not bucket_name.startswith(kTestBucket + ":"): 19 | raise ValueError(f"Expected bucket_name to start with '{kTestBucket}:`") 20 | return Path(bucket_name[len(kTestBucket) + 1 :]) / "db" 21 | 22 | 23 | class FileNotFoundException(exceptions.NotFound): 24 | pass 25 | 26 | 27 | def _item_to_value(iterator, item): 28 | return item 29 | 30 | 31 | def list_google_storage_directories(bucket_name, *, prefix=None): 32 | if bucket_name.startswith(kTestBucket): 33 | for _, dirs, _ in os.walk(get_test_dir(bucket_name)): 34 | return dirs 35 | 36 | extra_params = {"projection": "noAcl", "delimiter": "/"} 37 | 38 | if prefix is not None: 39 | if not prefix.endswith("/"): 40 | prefix += "/" 41 | extra_params["prefix"] = prefix 42 | 43 | gcs = storage.Client() 44 | 45 | path = "/b/" + bucket_name + "/o" 46 | 47 | iterator = page_iterator.HTTPIterator( 48 | client=gcs, 49 | api_request=gcs._connection.api_request, 50 | path=path, 51 | items_key="prefixes", 52 | item_to_value=_item_to_value, 53 | extra_params=extra_params, 54 | ) 55 | 56 | return [x for x in iterator] 57 | 58 | 59 | def normalize_identifier(s): 60 | """The first part of the identifier is a date with no separators and is 61 | obvious to sort. The second part is a number which is generally a 62 | single digit, but in a degenerate case could end up with multiple, so 63 | we pad it here. 64 | """ 65 | parts = s.rstrip("/").split("-") 66 | return f"{parts[0]}{int(parts[1]):06d}" 67 | 68 | 69 | def get_run_identifiers(bucket_name): 70 | dirs = list_google_storage_directories(bucket_name) 71 | identifiers = filter(lambda x: kIdentifierFormat.match(x), dirs) 72 | identifiers = map(lambda x: kIdentifierFormat.match(x).group(1), identifiers) 73 | return sorted(identifiers, key=normalize_identifier) 74 | 75 | 76 | def google_cloud_file_exists(bucket_name, remote): 77 | if bucket_name.startswith(kTestBucket): 78 | return (Path(get_test_dir(bucket_name)) / remote).exists() 79 | 80 | gcs = storage.Client() 81 | bucket = gcs.get_bucket(bucket_name) 82 | 83 | blob = storage.blob.Blob(remote, bucket) 84 | return blob.exists() 85 | 86 | 87 | def download_from_google_cloud_to_string(bucket_name, remote): 88 | if bucket_name.startswith(kTestBucket): 89 | return (Path(get_test_dir(bucket_name)) / remote).read_bytes() 90 | 91 | gcs = storage.Client() 92 | bucket = gcs.get_bucket(bucket_name) 93 | 94 | blob = storage.blob.Blob(remote, bucket) 95 | if not blob.exists(): 96 | raise FileNotFoundException(f"{remote} does not exist") 97 | return blob.download_as_string() 98 | 99 | 100 | def download_from_google_cloud(bucket_name, remote, local): 101 | if bucket_name.startswith(kTestBucket): 102 | shutil.copy(Path(get_test_dir(bucket_name)) / remote, local) 103 | return 104 | 105 | gcs = storage.Client() 106 | bucket = gcs.get_bucket(bucket_name) 107 | 108 | blob = storage.blob.Blob(remote, bucket) 109 | if not blob.exists(): 110 | raise FileNotFoundException(f"{remote} does not exist") 111 | with open(local, "wb") as file_obj: 112 | blob.download_to_file(file_obj) 113 | log.info(f"Downloaded {blob.public_url} to {local}") 114 | 115 | 116 | def download_and_retry_from_google_cloud( 117 | bucket_name, remote, local, *, timeout=timedelta(minutes=5) 118 | ): 119 | time_start = datetime.now() 120 | while True: 121 | try: 122 | return download_from_google_cloud(bucket_name, remote, local) 123 | except FileNotFoundException as fnfe: 124 | time_waiting = datetime.now() - time_start 125 | if time_waiting >= timeout: 126 | raise fnfe 127 | log.warning( 128 | f"File {remote} not found, retrying (wating={time_waiting}, " 129 | + f"deadline={timeout-time_waiting})" 130 | ) 131 | time.sleep(30) 132 | --------------------------------------------------------------------------------