├── .envrc ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md ├── after.png ├── before.png ├── config.png └── workflows │ └── ci.yml ├── .gitignore ├── .golangci.yml ├── .mise.toml ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── Dockerfile.dockerignore ├── LICENSE ├── Makefile ├── README.md ├── cmd ├── rggr │ └── main.go ├── rghc │ └── main.go └── root.go ├── codecov.yml ├── docker-compose-gr.yml ├── docker-compose-hardcover.yml ├── go.mod ├── go.sum ├── gr ├── gen.go ├── generated.go ├── genqlient.yaml ├── queries.graphql └── schema.graphql ├── hardcover ├── doc.go ├── generated.go ├── genqlient.yaml ├── mock.go ├── queries.graphql └── schema.graphql ├── internal ├── cache.go ├── cache_test.go ├── controller.go ├── controller_test.go ├── doc.go ├── edges.go ├── edges_test.go ├── error.go ├── error_test.go ├── gr.go ├── gr_test.go ├── graphql.go ├── graphql_test.go ├── handler.go ├── handler_test.go ├── hardcover.go ├── hardcover_test.go ├── language.go ├── log.go ├── memory.go ├── mock.go ├── postgres.go ├── postgres_test.go ├── resources.go ├── schema.sql └── transport.go └── renovate.json /.envrc: -------------------------------------------------------------------------------- 1 | export GOTOOLCHAIN=go1.24.2 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | gr/schema.graphql linguist-generated=true 2 | hardcover/schema.graphql linguist-generated=true 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report incorrect or unexpected behavior 4 | title: "[BUG] " 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe your setup** 11 | * Which server are you using? 12 | - [ ] `api.bookinfo.pro` (shared GR instance) 13 | - [ ] Self-hosted GR instance (`blampe/rreading-glasses:latest`) 14 | - [ ] Self-hosted Hardcover instance (`blampe/rreading-glasses:hardcover`) 15 | 16 | * If using a self-hosted server, have you pulled the latest image? (`docker pull ...`) 17 | - [ ] Yes 18 | - [ ] No 19 | 20 | * If using a shard instance, have you waited a day for data to load? 21 | - [ ] Yes 22 | - [ ] No 23 | 24 | **Describe the bug** 25 | A clear and concise description of what the bug is. 26 | 27 | **Additional context** 28 | Please include any relevant author/book names and/or IDs. 29 | -------------------------------------------------------------------------------- /.github/after.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blampe/rreading-glasses/75c33ea57c6febcfe1403538762a029ac9de9240/.github/after.png -------------------------------------------------------------------------------- /.github/before.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blampe/rreading-glasses/75c33ea57c6febcfe1403538762a029ac9de9240/.github/before.png -------------------------------------------------------------------------------- /.github/config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blampe/rreading-glasses/75c33ea57c6febcfe1403538762a029ac9de9240/.github/config.png -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - "*" 10 | workflow_dispatch: 11 | 12 | jobs: 13 | lint: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: jdx/mise-action@v2 18 | 19 | - run: make lint 20 | 21 | test: 22 | runs-on: ubuntu-latest 23 | services: 24 | postgres: 25 | image: postgres:17 26 | env: 27 | POSTGRES_USER: postgres 28 | POSTGRES_PASSWORD: "" 29 | POSTGRES_HOST_AUTH_METHOD: "trust" 30 | POSTGRES_DB: test 31 | options: >- 32 | --health-cmd pg_isready 33 | --health-interval 10s 34 | --health-timeout 5s 35 | --health-retries 5 36 | ports: 37 | - 5432:5432 38 | 39 | steps: 40 | - uses: actions/checkout@v4 41 | - uses: jdx/mise-action@v2 42 | 43 | - run: make test 44 | shell: bash 45 | env: 46 | HARDCOVER_API_KEY: ${{ secrets.HARDCOVER_API_KEY }} 47 | GR_HOST: ${{ secrets.GR_HOST }} 48 | GR_TEST_COOKIE: ${{ secrets.GR_TEST_COOKIE }} 49 | 50 | - uses: codecov/codecov-action@v5.4.3 51 | with: 52 | token: ${{ secrets.CODECOV_TOKEN }} 53 | 54 | ok: 55 | name: OK 56 | runs-on: ubuntu-latest 57 | needs: [lint, test] 58 | 59 | if: always() 60 | steps: 61 | - run: exit 1 62 | if: >- 63 | needs.lint.result != 'success' || 64 | needs.test.result != 'success' 65 | 66 | - run: exit 0 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | coverage.txt 2 | /bin 3 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | output: 2 | sort-results: true 3 | print-issued-lines: false 4 | 5 | linters: 6 | disable-all: true 7 | enable: 8 | # golangci-lint defaults: 9 | - errcheck 10 | - gosimple 11 | - govet 12 | - ineffassign 13 | - staticcheck 14 | - unused 15 | 16 | # Non-default linters: 17 | - errorlint 18 | - forbidigo 19 | - gocritic 20 | - gofumpt 21 | - nolintlint 22 | - revive 23 | - copyloopvar 24 | - intrange 25 | - musttag 26 | - makezero 27 | 28 | linters-settings: 29 | forbidigo: 30 | # Need to analyze types to match the exactly instead of just name. 31 | analyze-types: true 32 | forbid: 33 | # Don't use charmbracelet/log's global logger. 34 | - p: '^log\.(Debug|Info|Warn|Error|Fatal)f?$' 35 | pkg: github.com/charmbracelet/log 36 | msg: "Don't use the global logger; use a local logger instead." 37 | 38 | govet: 39 | enable: 40 | - niliness 41 | - reflectvaluecompare 42 | - sortslice 43 | - unusedwrite 44 | 45 | issues: 46 | max-issues-per-linter: 0 47 | max-same-issues: 0 48 | 49 | # Don't ignore some of the issues that golangci-lint considers okay. 50 | exclude-use-default: false 51 | 52 | exclude-rules: 53 | # Don't warn on unused parameters. 54 | # Parameter names are useful; replacing them with '_' is undesirable. 55 | - linters: [revive] 56 | text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' 57 | 58 | # staticcheck already has smarter checks for empty blocks. 59 | # revive's empty-block linter has false positives. 60 | # For example, as of writing this, the following is not allowed. 61 | # for foo() { } 62 | - linters: [revive] 63 | text: "empty-block: this block is empty, you can remove it" 64 | 65 | - linters: [musttag] 66 | path: _test.go$ 67 | -------------------------------------------------------------------------------- /.mise.toml: -------------------------------------------------------------------------------- 1 | [tools] 2 | go = "1.24.3" 3 | golangci-lint = "1.64.8" 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official email address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | [INSERT CONTACT METHOD]. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | [translations]: https://www.contributor-covenant.org/translations 133 | 134 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM golang:1.24.3-alpine AS build 2 | 3 | WORKDIR /app 4 | COPY go.mod go.sum ./ 5 | RUN go mod download 6 | COPY . . 7 | 8 | ARG RGPATH 9 | ARG TARGETOS 10 | ARG TARGETARCH 11 | RUN --mount=type=cache,target=/go/pkg/mod \ 12 | --mount=type=cache,target=/root/.cache/go-build \ 13 | CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ 14 | go build -o /app/main -ldflags="-w -s" ${RGPATH} 15 | 16 | FROM gcr.io/distroless/static:nonroot AS app 17 | COPY --from=build /app/main /main 18 | COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 19 | 20 | EXPOSE 8788 21 | CMD ["/main"] 22 | -------------------------------------------------------------------------------- /Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | *.md 2 | *_test.go 3 | .git 4 | .github 5 | .gitignore 6 | .golangci.yml 7 | Dockerfile 8 | Dockerfile.dockerignore 9 | Makefile 10 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL = /bin/bash 2 | PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 3 | 4 | .PHONY: all 5 | all: build lint test 6 | 7 | .PHONY: generate 8 | generate: go.mod $(wildcard *.go) $(wildcard */*.go) 9 | go generate ./... 10 | 11 | .PHONY: build-hc 12 | build-hc: generate go.mod $(wildcard *.go) $(wildcard */*.go) 13 | go build -o $(PROJECT_ROOT)/bin/rghc ./cmd/rghc/... 14 | 15 | .PHONY: build-gr 16 | build-gr: generate go.mod $(wildcard *.go) $(wildcard */*.go) 17 | go build -o $(PROJECT_ROOT)/bin/rggr ./cmd/rggr/... 18 | 19 | .PHONY: build 20 | build: build-hc build-gr 21 | 22 | .PHONY: lint 23 | lint: 24 | golangci-lint run --fix --timeout 10m 25 | 26 | .PHONY: test 27 | test: 28 | go test -v -count=1 -race -coverpkg=./... -covermode=atomic -coverprofile=coverage.txt ./... 29 | 30 | .PHONY: release-hc 31 | release-hc: 32 | docker build -f Dockerfile \ 33 | --builder multiarch \ 34 | --platform linux/amd64,linux/arm64 \ 35 | --tag docker.io/blampe/rreading-glasses:hardcover \ 36 | --build-arg RGPATH=./cmd/rghc \ 37 | --push \ 38 | . 39 | 40 | .PHONY: release-gr 41 | release-gr: 42 | docker build -f Dockerfile \ 43 | --builder multiarch \ 44 | --platform linux/amd64,linux/arm64 \ 45 | --tag docker.io/blampe/rreading-glasses:latest \ 46 | --build-arg RGPATH=./cmd/rggr \ 47 | --push \ 48 | . 49 | 50 | .PHONY: release 51 | release: release-hc release-gr 52 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤓 rreading-glasses [![Discord](https://img.shields.io/discord/1367649771237675078?label=Discord)](https://discord.gg/Xykjv87yYs) 2 | 3 | Corrective lenses for curmudgeonly readars in your life. 4 | 5 | This is a drop-in replacement for R——'s metadata service. It works with your 6 | existing R—— installation, it's backwards-compatible with your library, and it 7 | takes only seconds to enable or disable. You can use it permanently, or 8 | temporarily to help you add books the R—— service doesn't have yet. 9 | 10 | Unlike R——'s proprietary service, this is much faster, handles large authors, 11 | has full coverage of G——R—— (or Hardcover!), and doesn't take months to load 12 | new books. A hosted instance is available at `https://api.bookinfo.pro` but it 13 | can also be self-hosted. 14 | 15 | ```mermaid 16 | graph LR; 17 | R[R——]-.->M[official metadata]; 18 | R--> api.bookinfo.pro; 19 | 20 | classDef dotted stroke-dasharray:2,text-decoration:line-through; 21 | class M dotted; 22 | ``` 23 | 24 | > [!IMPORTANT] 25 | > This is not an official project and is still in progress. Reach out 26 | > to me directly if you have questions or need help, please don't bother the R—— 27 | > team. 28 | 29 | As of May 2025 there are ~900 users of the shared instance. Here's what some 30 | of them have said so far: 31 | 32 | > Man this is wayyyyyy better than the inhouse metadata, thank you!! 33 | 34 | > This is fucking awesome, thank you!!! 35 | 36 | > I just added this, and omg it fixed so many issues i've been having! Thank 37 | > you! 38 | 39 | > Holy shit can I just say this is so much better. 40 | 41 | > This is fucking fantastic. Came across your link, thought hmm I bet this'll 42 | > be crap. Reinstalled R, added your site, instantly found the 5 upcoming books 43 | > that I couldn't. thank you! 44 | 45 | > Already had it pull in an extra book from an author that came out in September 46 | > that wasn't originally found! 47 | > Will definitely be a rreading glasses evangalist! haha 48 | 49 | > My arr instance has been switched over since yesterday, and it really has 50 | > cleaned up that instance. I've been getting a lot of use out of it. 51 | 52 | > it worked! thanks my man, my wife will be happy with this 53 | 54 | > Thanks so much for putting this together again, I can't tell you how much I appreciate it! 55 | 56 | ## Usage 57 | 58 | > [!CAUTION] 59 | > This **will** modify your library's metadata, but it won't modify files on 60 | > disk. In any case, __please__ back up your database _and confirm you know how 61 | > to restore it_ before experimenting with this. 62 | 63 | Navigate to `http(s):///settings/development`. This page isn't 64 | shown in the UI, so you'll need to manually enter the URL. 65 | 66 | Update `Metadata Provider Source` with `https://api.bookinfo.pro` if you'd like 67 | to use the public instance. If you're self-hosting use your own address. 68 | 69 | Click `Save`. 70 | 71 | ![/settings/development](./.github/config.png) 72 | 73 | You can now search and add authors or works not available on the official 74 | service. 75 | 76 | If at any point you want to revert to the official service, simply delete the 77 | `Metadata Provider Source` and save your configuration again. Any works you 78 | added should be preserved. 79 | 80 | > [!IMPORTANT] 81 | > Metadata is periodically refreshed and in some cases existing files may 82 | > become unmapped (see note above about subtitles). You can correct this from 83 | > `Library > Unmapped Files`, or do a `Manual Import` from an author's page. 84 | 85 | ### Before / After 86 | 87 | ![before](./.github/before.png) 88 | 89 | ![after](./.github/after.png) 90 | 91 | ## Self-hosting 92 | 93 | An image is available at 94 | [`blampe/rreading-glasses`](https://hub.docker.com/r/blampe/rreading-glasses). 95 | It requires a Postgres backend, and its flags currently look like this: 96 | 97 | ``` 98 | Usage: rreading-glasses serve --upstream=STRING --hardcover-auth=STRING [flags] 99 | 100 | Run an HTTP server. 101 | 102 | Flags: 103 | -h, --help Show context-sensitive help. 104 | 105 | --postgres-host="localhost" Postgres host ($POSTGRES_HOST). 106 | --postgres-user="postgres" Postgres user ($POSTGRES_USER). 107 | --postgres-password=STRING Postgres password ($POSTGRES_PASSWORD). 108 | --postgres-password-file=POSTGRES-PASSWORD-FILE File with the Postgres password ($POSTGRES_PASSWORD_FILE). 109 | --postgres-port=5432 Postgres port ($POSTGRES_PORT). 110 | --postgres-database="rreading-glasses" Postgres database to use ($POSTGRES_DATABASE). 111 | --verbose increase log verbosity ($VERBOSE) 112 | --port=8788 Port to serve traffic on ($PORT). 113 | --rpm=60 Maximum upstream requests per minute ($RPM). 114 | --cookie=STRING Cookie to use for upstream HTTP requests ($COOKIE). 115 | --cookie-file=COOKIE-FILE File with the Cookie to use for upstream HTTP requests ($COOKIE_FILE). 116 | --proxy="" HTTP proxy URL to use for upstream requests ($PROXY). 117 | --upstream=STRING Upstream host (e.g. www.example.com) ($UPSTREAM). 118 | --hardcover-auth=STRING Hardcover Authorization header, e.g. 'Bearer ...' ($HARDCOVER_AUTH) 119 | --hardcover-auth-file=HARDCOVER-AUTH-FILE File containing the Hardcover Authorization header, e.g. 'Bearer ...' ($HARDCOVER_AUTH_FILE) 120 | ``` 121 | 122 | Two docker compose example files are included as a reference: 123 | `docker-compose-gr.yml` and `docker-compose-hardcover.yml`. 124 | 125 | 126 | ### G——R—— Cookie 127 | 128 | When using the G——R—— image ("latest" tag) it's highly recommended that you set 129 | the `cookie` flag for better performance, otherwise new author lookups will be 130 | throttled to 1 per minute. (These requests don't scrape metadata – they simply 131 | resolve canonical IDs. They are only needed the first time an author or book is 132 | fetched.) 133 | 134 | * Open a Private/Incognito window in your browser. 135 | * Go to G——R——. 136 | * Create an account or login to your existing account, checking the box to `Keep me signed in`. 137 | * Open Developer Tools (usually with `F12`) and go to the `Network` tab. 138 | * Refresh the page. 139 | * Right click on the first row of `g——r——.com`. 140 | * Select `Copy`/`Copy Value` > `Copy as cURL`. 141 | * Paste it into a plain text editor. 142 | 143 | ``` 144 | curl 'https://www.g——r——.com/' 145 | ... 146 | -H 'Cookie: ' 147 | ... 148 | ``` 149 | * Grab everything after `Cookie:` up to, but not including, the trailing `'`. 150 | * If the last character of the string is a semi-colon (`;`), remove this as well. 151 | * Use this as the `--cookie` flag. 152 | 153 | #### Example G——R—— Docker Compose Snippet 154 | 155 | > \- --cookie=ccsid=foo; ...; lc-main=en_US 156 | 157 | ### Hardcover Auth 158 | 159 | When using Hardcover you must set the `hardcover-auth` parameter (this is optional with G——R——). 160 | 161 | * Create an account or login to [Hardcover](https://hardcover.app). 162 | * Click on User Icon and Settings. 163 | * Select `Hardcover API`. 164 | * Copy the entire token **including** `Bearer`. 165 | * Use this as the `--hardcover-auth` flag. 166 | 167 | #### Example Hardcover Docker Compose Snippet 168 | 169 | > \- --hardcover-auth=Bearer Q123AbC... 170 | 171 | ### Resource Requirements 172 | 173 | Resource requirements are minimal; a Raspberry Pi should suffice. Storage 174 | requirements will vary depending on the size of your library, but in most cases 175 | shouldn't exceed a few gigabytes for personal use. (The published image doesn't 176 | require any large data dumps and will gradually grow your database as it's 177 | queried over time.) 178 | 179 | ### Troubleshooting 180 | 181 | When in doubt, make sure you have the latest image pulled: `docker pull 182 | blampe/rreading-glasses:latest` or `blampe/rreading-glasses:hardcover`. 183 | 184 | If you suspect data inconsistencies, try removing R——'s `cache.db` file and 185 | then restart the app. 186 | 187 | You can also try deleting your Postgres database to ensure you don't have any 188 | bad data cached. 189 | 190 | If these steps don't resolve the problem, please create an issue! 191 | 192 | ## Key differences 193 | 194 | I have deviated slightly from the official service's behavior to make a couple 195 | of, in my opinion, quality of life improvements. These aren't due to technical 196 | limitations and can be changed, so I'm eager to hear if people think these are 197 | an improvement or if it would be better to match the official behavior more 198 | exactly. 199 | 200 | - Titles no longer automatically include subtitles _unless_ it's part of a 201 | series, or if multiple books have the same primary title. This de-clutters 202 | the UI, cleans up the directory layout, and improves import matching but 203 | __you may need to re-import some works with long subtitles__. I think the 204 | trade-off is worth it but others might disagree — let me know! 205 | 206 | - The "best" (original) edition is always preferred to make cover art more 207 | consistently high-quality. Additionally, books are no longer returned with 208 | every edition ever released, because that makes manual edition selection 209 | difficult to impossible. Instead, an alternative edition (e.g. translation) 210 | is only included once at least one user has searched for it. (This might 211 | change in the future to include all editions but de-duplicated by title.) 212 | 213 | ## Details 214 | 215 | This project implements an API-compatible, coalescing read-through cache for 216 | consumption by the R—— metadata client. It is not a fork of any prior work. 217 | 218 | The service is pluggable and can serve metadata from any number of sources: API 219 | clients, data dumps, OpenLibrary proxies, scrapers, or other means. The 220 | interface to implement is: 221 | 222 | ```go 223 | type Getter interface { 224 | GetWork(ctx context.Context, workID int64) (*WorkResource, error) 225 | GetAuthor(ctx context.Context, authorID int64) (*AuthorResource, error) 226 | GetBook(ctx context.Context, bookID int64) (*WorkResource, error) 227 | } 228 | ``` 229 | 230 | In other words, anything that understands how to map a G——R—— ID to a Resource 231 | can serve as a source of truth. This project then provides caching and API 232 | routes to make that source compatible with R——. 233 | 234 | There are currently two sources available: [Hardcover](https://hardcover.app) 235 | and G——R——. A summary of their differences is below. 236 | 237 | | | G——R—— | Hardcover | 238 | | -- | -- | ------------- | 239 | | Summary | A slightly faster provider which makes all of G——R—— available, including large authors and books not available by default in R——. | Slightly slower and makes _most_ of Hardcover's library available, as long as their metadata includes a G——R—— ID. This is a smaller data set, but it might be preferable due to having fewer "junk" books. | 240 | | New releases? | Supported | Supported | 241 | | Large authors? | Supported | Supported, but authors include only 20 (max) books by default for now. New books can be added by manually searching. | 242 | | Source code | Public | Public | 243 | | Performance | 3RPS (with query batching) | 1RPS (with query batching) | 244 | | Stability | Stable. Nearly identical behavior to official R—— metadata. | Experimental and probably more appropriate for new libraries. ID mappings are likely to not exactly match with existing libraries. Series data likely to be incomplete | 245 | | Hosted instance | `https://api.bookinfo.pro` | Coming soon! | 246 | | Self-hosted image | `blampe/rreading-glasses:latest` | `blampe/rreading-glasses:hardcover` | 247 | 248 | Please consider [supporting](https://hardcover.app/pricing) Hardcover if you 249 | use them as your source. It's $5/month and the work they are doing to break 250 | down the G——R—— monopoly is commendable. 251 | 252 | Postgres is used as a backend but only as a key-value store, unlike the 253 | official server which performs expensive joins in the request path. 254 | Additionally large authors (and books with many editions) are populated 255 | asynchronously. This allows the server to support arbitrarily large resources 256 | without issue. 257 | 258 | ## Contributing 259 | 260 | This is primarily a personal project that fixes my own workflows. There are 261 | almost certainly edge cases I haven't accounted for, so contributions are very 262 | welcome! 263 | 264 | ### TODO 265 | 266 | - [ ] (Prod) Add Cloudflare client for CDN invalidation. 267 | - [ ] (QOL) Ignore works/editions without publisher to cut down on 268 | self-published ebook slop. 269 | - [ ] (QOL) Update R—— client to send `Accept-Encoding: gzip` headers. 270 | 271 | ## Disclaimer 272 | 273 | This software is provided "as is", without warranty of any kind, express or 274 | implied, including but not limited to the warranties of merchantability, 275 | fitness for a particular purpose and noninfringement. 276 | 277 | In no event shall the authors or copyright holders be liable for any claim, 278 | damages or other liability, whether in an action of contract, tort or 279 | otherwise, arising from, out of or in connection with the software or the use 280 | or other dealings in the software. 281 | 282 | This software is intended for educational and informational purposes only. It 283 | is not intended to, and does not, constitute legal, financial, or professional 284 | advice of any kind. The user of this software assumes all responsibility for 285 | its use or misuse. 286 | 287 | The user is free to use, modify, and distribute the software for any purpose, 288 | subject to the above disclaimers and conditions. 289 | -------------------------------------------------------------------------------- /cmd/rggr/main.go: -------------------------------------------------------------------------------- 1 | // Package main runs a metadata server using G——R—— as an upstream. 2 | package main 3 | 4 | import ( 5 | "bytes" 6 | "context" 7 | "errors" 8 | "fmt" 9 | "log/slog" 10 | "net/http" 11 | "os" 12 | "os/signal" 13 | "time" 14 | 15 | "github.com/alecthomas/kong" 16 | "github.com/blampe/rreading-glasses/cmd" 17 | "github.com/blampe/rreading-glasses/internal" 18 | "github.com/go-chi/chi/v5/middleware" 19 | ) 20 | 21 | // cli contains our command-line flags. 22 | type cli struct { 23 | Serve server `cmd:"" help:"Run an HTTP server."` 24 | 25 | Bust cmd.Bust `cmd:"" help:"Bust cache entries."` 26 | } 27 | 28 | type server struct { 29 | cmd.PGConfig 30 | cmd.LogConfig 31 | 32 | Port int `default:"8788" env:"PORT" help:"Port to serve traffic on."` 33 | RPM int `default:"60" env:"RPM" help:"Maximum upstream requests per minute."` 34 | Cookie string `required:"" xor:"cookie" env:"COOKIE" help:"Cookie to use for upstream HTTP requests."` 35 | CookieFile []byte `required:"" type:"filecontent" xor:"cookie" env:"COOKIE_FILE" help:"File with the Cookie to use for upstream HTTP requests."` 36 | Proxy string `default:"" env:"PROXY" help:"HTTP proxy URL to use for upstream requests."` 37 | Upstream string `required:"" env:"UPSTREAM" help:"Upstream host (e.g. www.example.com)."` 38 | } 39 | 40 | func (s *server) Run() error { 41 | _ = s.LogConfig.Run() 42 | 43 | ctx := context.Background() 44 | cache, err := internal.NewCache(ctx, s.DSN()) 45 | if err != nil { 46 | return fmt.Errorf("setting up cache: %w", err) 47 | } 48 | 49 | if len(s.CookieFile) > 0 { 50 | s.Cookie = string(bytes.TrimSpace(s.CookieFile)) 51 | } 52 | 53 | upstream, err := internal.NewUpstream(s.Upstream, s.Cookie, s.Proxy) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | gql, err := internal.NewGRGQL(ctx, upstream, s.Cookie) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | getter, err := internal.NewGRGetter(cache, gql, upstream) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | ctrl, err := internal.NewController(cache, getter) 69 | if err != nil { 70 | return err 71 | } 72 | h := internal.NewHandler(ctrl) 73 | mux := internal.NewMux(h) 74 | 75 | mux = middleware.RequestSize(1024)(mux) // Limit request bodies. 76 | mux = internal.Requestlogger{}.Wrap(mux) // Log requests. 77 | mux = middleware.RequestID(mux) // Include a request ID header. 78 | mux = middleware.Recoverer(mux) // Recover from panics. 79 | 80 | // TODO: The client doesn't send Accept-Encoding and doesn't handle 81 | // Content-Encoding responses. This would allow us to send compressed bytes 82 | // directly from the cache. 83 | 84 | addr := fmt.Sprintf(":%d", s.Port) 85 | server := &http.Server{ 86 | Handler: mux, 87 | Addr: addr, 88 | ErrorLog: slog.NewLogLogger(slog.Default().Handler(), slog.LevelError), 89 | } 90 | 91 | go func() { 92 | slog.Info("listening on " + addr) 93 | err := server.ListenAndServe() 94 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 95 | internal.Log(ctx).Error(err.Error()) 96 | os.Exit(1) 97 | } 98 | }() 99 | 100 | shutdown := make(chan os.Signal, 1) 101 | signal.Notify(shutdown, os.Interrupt) 102 | 103 | go func() { 104 | <-shutdown 105 | slog.Info("waiting for denormalization to finish") 106 | ctrl.Shutdown(ctx) 107 | slog.Info("shutting down http server") 108 | _ = server.Shutdown(ctx) 109 | }() 110 | 111 | ctrl.Run(ctx, 2*time.Second) 112 | 113 | slog.Info("au revoir!") 114 | 115 | return nil 116 | } 117 | 118 | func main() { 119 | kctx := kong.Parse(&cli{}) 120 | err := kctx.Run() 121 | if err != nil { 122 | internal.Log(context.Background()).Error("fatal", "err", err) 123 | os.Exit(1) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /cmd/rghc/main.go: -------------------------------------------------------------------------------- 1 | // Package main runs a metadata server using Hardcover as an upstream. 2 | package main 3 | 4 | import ( 5 | "bytes" 6 | "context" 7 | "errors" 8 | "fmt" 9 | "log/slog" 10 | "net/http" 11 | "os" 12 | "os/signal" 13 | "time" 14 | 15 | "github.com/alecthomas/kong" 16 | "github.com/blampe/rreading-glasses/cmd" 17 | "github.com/blampe/rreading-glasses/internal" 18 | "github.com/go-chi/chi/v5/middleware" 19 | ) 20 | 21 | // cli contains our command-line flags. 22 | type cli struct { 23 | Serve server `cmd:"" help:"Run an HTTP server."` 24 | 25 | Bust cmd.Bust `cmd:"" help:"Bust cache entries."` 26 | } 27 | 28 | type server struct { 29 | cmd.PGConfig 30 | cmd.LogConfig 31 | 32 | Port int `default:"8788" env:"PORT" help:"Port to serve traffic on."` 33 | RPM int `default:"60" env:"RPM" help:"Maximum upstream requests per minute."` 34 | Cookie string `env:"COOKIE" help:"Cookie to use for upstream HTTP requests."` 35 | Proxy string `default:"" env:"PROXY" help:"HTTP proxy URL to use for upstream requests."` 36 | Upstream string `required:"" env:"UPSTREAM" help:"Upstream host (e.g. www.example.com)."` 37 | 38 | HardcoverAuth string `required:"" env:"HARDCOVER_AUTH" xor:"hardcover-auth" help:"Hardcover Authorization header, e.g. 'Bearer ...'"` 39 | HardcoverAuthFile []byte `required:"" type:"filecontent" xor:"hardcover-auth" env:"HARDCOVER_AUTH_FILE" help:"File containing the Hardcover Authorization header, e.g. 'Bearer ...'"` 40 | } 41 | 42 | func (s *server) Run() error { 43 | _ = s.LogConfig.Run() 44 | 45 | ctx := context.Background() 46 | cache, err := internal.NewCache(ctx, s.DSN()) 47 | if err != nil { 48 | return fmt.Errorf("setting up cache: %w", err) 49 | } 50 | 51 | upstream, err := internal.NewUpstream(s.Upstream, s.Cookie, s.Proxy) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | if len(s.HardcoverAuthFile) > 0 { 57 | s.HardcoverAuth = string(bytes.TrimSpace(s.HardcoverAuthFile)) 58 | } 59 | 60 | hcTransport := internal.ScopedTransport{ 61 | Host: "api.hardcover.app", 62 | RoundTripper: &internal.HeaderTransport{ 63 | Key: "Authorization", 64 | Value: s.HardcoverAuth, 65 | RoundTripper: http.DefaultTransport, 66 | }, 67 | } 68 | 69 | hcClient := &http.Client{Transport: hcTransport} 70 | 71 | gql, err := internal.NewBatchedGraphQLClient("https://api.hardcover.app/v1/graphql", hcClient, time.Second) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | getter, err := internal.NewHardcoverGetter(cache, gql, upstream) 77 | if err != nil { 78 | return err 79 | } 80 | 81 | ctrl, err := internal.NewController(cache, getter) 82 | if err != nil { 83 | return err 84 | } 85 | h := internal.NewHandler(ctrl) 86 | mux := internal.NewMux(h) 87 | 88 | mux = middleware.RequestSize(1024)(mux) // Limit request bodies. 89 | mux = internal.Requestlogger{}.Wrap(mux) // Log requests. 90 | mux = middleware.RequestID(mux) // Include a request ID header. 91 | mux = middleware.Recoverer(mux) // Recover from panics. 92 | 93 | // TODO: The client doesn't send Accept-Encoding and doesn't handle 94 | // Content-Encoding responses. This would allow us to send compressed bytes 95 | // directly from the cache. 96 | 97 | addr := fmt.Sprintf(":%d", s.Port) 98 | server := &http.Server{ 99 | Handler: mux, 100 | Addr: addr, 101 | ErrorLog: slog.NewLogLogger(slog.Default().Handler(), slog.LevelError), 102 | } 103 | 104 | go func() { 105 | slog.Info("listening on " + addr) 106 | err := server.ListenAndServe() 107 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 108 | internal.Log(ctx).Error(err.Error()) 109 | os.Exit(1) 110 | } 111 | }() 112 | 113 | shutdown := make(chan os.Signal, 1) 114 | signal.Notify(shutdown, os.Interrupt) 115 | 116 | go func() { 117 | <-shutdown 118 | slog.Info("shutting down http server") 119 | _ = server.Shutdown(ctx) 120 | slog.Info("waiting for denormalization to finish") 121 | ctrl.Shutdown(ctx) 122 | }() 123 | 124 | ctrl.Run(ctx, 2*time.Second) 125 | 126 | slog.Info("au revoir!") 127 | 128 | return nil 129 | } 130 | 131 | func main() { 132 | kctx := kong.Parse(&cli{}) 133 | err := kctx.Run() 134 | if err != nil { 135 | internal.Log(context.Background()).Error("fatal", "err", err) 136 | os.Exit(1) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | // Package cmd contains helpers common to all CLI implementations. 2 | package cmd 3 | 4 | import ( 5 | "bytes" 6 | "context" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "log/slog" 11 | "path/filepath" 12 | 13 | "github.com/KimMachineGun/automemlimit/memlimit" 14 | "github.com/blampe/rreading-glasses/internal" 15 | charm "github.com/charmbracelet/log" 16 | ) 17 | 18 | // PGConfig configured a PostGres connection. 19 | type PGConfig struct { 20 | PostgresHost string `default:"localhost" env:"POSTGRES_HOST" help:"Postgres host."` 21 | PostgresUser string `default:"postgres" env:"POSTGRES_USER" help:"Postgres user."` 22 | PostgresPassword string `xor:"db-auth" env:"POSTGRES_PASSWORD" help:"Postgres password."` 23 | PostgresPasswordFile []byte `type:"filecontent" xor:"db-auth" env:"POSTGRES_PASSWORD_FILE" help:"File with the Postgres password."` 24 | PostgresPort int `default:"5432" env:"POSTGRES_PORT" help:"Postgres port."` 25 | PostgresDatabase string `default:"rreading-glasses" env:"POSTGRES_DATABASE" help:"Postgres database to use."` 26 | } 27 | 28 | // DSN returns the database's DSN based on the provided flags. 29 | func (c *PGConfig) DSN() string { 30 | if len(c.PostgresPasswordFile) > 0 { 31 | c.PostgresPassword = string(bytes.TrimSpace(c.PostgresPasswordFile)) 32 | } 33 | 34 | // Allow unix sockets. 35 | if filepath.IsAbs(c.PostgresHost) { 36 | return fmt.Sprintf("postgres://%s:%s@/%s?host=%s", 37 | c.PostgresUser, 38 | c.PostgresPassword, 39 | c.PostgresDatabase, 40 | c.PostgresHost, 41 | ) 42 | } 43 | return fmt.Sprintf("postgres://%s:%s@%s:%d/%s", 44 | c.PostgresUser, 45 | c.PostgresPassword, 46 | c.PostgresHost, 47 | c.PostgresPort, 48 | c.PostgresDatabase, 49 | ) 50 | } 51 | 52 | // LogConfig configures logging. 53 | type LogConfig struct { 54 | Verbose bool `env:"VERBOSE" help:"increase log verbosity"` 55 | } 56 | 57 | // Run sets logging to DEBUG if verbose is enabled. 58 | func (c *LogConfig) Run() error { 59 | if c.Verbose { 60 | internal.SetLogLevel(charm.DebugLevel) 61 | } 62 | return nil 63 | } 64 | 65 | // Bust allows manually busting entries from the CLI. 66 | type Bust struct { 67 | PGConfig 68 | LogConfig 69 | 70 | AuthorID int64 `arg:"" help:"author ID to cache bust"` 71 | } 72 | 73 | // Run busts a cache key. 74 | func (b *Bust) Run() error { 75 | _ = b.LogConfig.Run() 76 | ctx := context.Background() 77 | 78 | cache, err := internal.NewCache(ctx, b.DSN()) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | a, ok := cache.Get(ctx, internal.AuthorKey(b.AuthorID)) 84 | if !ok { 85 | return nil 86 | } 87 | 88 | var author internal.AuthorResource 89 | err = json.Unmarshal(a, &author) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | for _, w := range author.Works { 95 | for _, b := range w.Books { 96 | err = errors.Join(err, cache.Expire(ctx, internal.BookKey(b.ForeignID))) 97 | } 98 | err = errors.Join(err, cache.Expire(ctx, internal.WorkKey(w.ForeignID))) 99 | } 100 | err = errors.Join(err, cache.Expire(ctx, internal.AuthorKey(author.ForeignID))) 101 | 102 | return err 103 | } 104 | 105 | func init() { 106 | // Limit our memory to 90% of what's free. This affects cache sizes. 107 | _, err := memlimit.SetGoMemLimitWithOpts( 108 | memlimit.WithRatio(0.9), 109 | memlimit.WithLogger(slog.Default()), 110 | memlimit.WithProvider( 111 | memlimit.ApplyFallback( 112 | memlimit.FromCgroup, 113 | memlimit.FromSystem, 114 | ), 115 | ), 116 | ) 117 | if err != nil { 118 | panic(err) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - gr/generated.go 3 | - hardcover/generated.go 4 | - hardcover/mock.go 5 | - internal/mock.go 6 | -------------------------------------------------------------------------------- /docker-compose-gr.yml: -------------------------------------------------------------------------------- 1 | # Docker Compose for rreading-glasses using Goodreads 2 | 3 | services: 4 | rreading-glasses: 5 | depends_on: 6 | rreading-glasses-db: 7 | condition: service_started 8 | image: blampe/rreading-glasses:latest 9 | container_name: rreading-glasses 10 | hostname: rreading-glasses 11 | entrypoint: ["/main", "serve"] 12 | command: 13 | - --upstream=www.goodreads.com 14 | - --verbose 15 | restart: always 16 | environment: 17 | COOKIE: # Your GR cookie. Only used for GR. 18 | POSTGRES_HOST: rreading-glasses-db 19 | POSTGRES_DATABASE: rreading-glasses 20 | POSTGRES_USER: rreading-glasses 21 | POSTGRES_PASSWORD: # Generate a random string without special symbols 22 | ports: 23 | - "8788:8788" 24 | 25 | rreading-glasses-db: 26 | image: postgres:17 27 | container_name: postgres 28 | restart: always 29 | environment: 30 | POSTGRES_USER: rreading-glasses 31 | POSTGRES_PASSWORD: # Generated password from above 32 | POSTGRES_DB: rreading-glasses 33 | ports: 34 | - "5432:5432" 35 | volumes: 36 | - rreading_glasses_data:/var/lib/postgresql/data 37 | 38 | volumes: 39 | rreading_glasses_data: 40 | -------------------------------------------------------------------------------- /docker-compose-hardcover.yml: -------------------------------------------------------------------------------- 1 | # Docker Compose for rreading-glasses using Hardcover 2 | 3 | services: 4 | rreading-glasses: 5 | depends_on: 6 | rreading-glasses-db: 7 | condition: service_started 8 | image: blampe/rreading-glasses:hardcover 9 | container_name: rreading-glasses 10 | hostname: rreading-glasses 11 | entrypoint: ["/main", "serve"] 12 | command: 13 | - --upstream=hardcover.app 14 | - --verbose 15 | restart: unless-stopped 16 | environment: 17 | HARDCOVER_AUTH: # Only used for Hardcover. Starts with Bearer 18 | POSTGRES_HOST: rreading-glasses-db 19 | POSTGRES_DATABASE: rreading-glasses 20 | POSTGRES_USER: rreading-glasses 21 | POSTGRES_PASSWORD: # Generate a random string without special symbols 22 | ports: 23 | - "8788:8788" 24 | 25 | rreading-glasses-db: 26 | image: postgres:17 27 | container_name: rreading-glasses-db 28 | hostname: rreading-glasses-db 29 | restart: unless-stopped 30 | environment: 31 | POSTGRES_USER: rreading-glasses 32 | POSTGRES_PASSWORD: # Generated password from above 33 | POSTGRES_DB: rreading-glasses 34 | ports: 35 | - "5432:5432" 36 | volumes: 37 | - rreading_glasses_data:/var/lib/postgresql/data 38 | 39 | volumes: 40 | rreading_glasses_data: 41 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/blampe/rreading-glasses 2 | 3 | go 1.24.2 4 | 5 | require ( 6 | github.com/Khan/genqlient v0.8.1 7 | github.com/KimMachineGun/automemlimit v0.7.2 8 | github.com/alecthomas/kong v1.11.0 9 | github.com/antchfx/htmlquery v1.3.4 10 | github.com/charmbracelet/lipgloss v1.1.0 11 | github.com/charmbracelet/log v0.4.2 12 | github.com/dgraph-io/ristretto/v2 v2.2.0 13 | github.com/go-chi/chi/v5 v5.2.0 14 | github.com/google/uuid v1.6.0 15 | github.com/graphql-go/graphql v0.8.2-0.20241012201702-a546af7e957c 16 | github.com/jackc/pgx/v5 v5.7.5 17 | github.com/mattn/go-isatty v0.0.20 18 | github.com/microcosm-cc/bluemonday v1.0.27 19 | github.com/stretchr/testify v1.10.0 20 | github.com/vektah/gqlparser/v2 v2.5.22 21 | go.uber.org/mock v0.5.2 22 | go.uber.org/zap v1.27.0 23 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 24 | golang.org/x/net v0.40.0 25 | golang.org/x/oauth2 v0.23.0 26 | golang.org/x/sync v0.14.0 27 | golang.org/x/time v0.8.0 28 | ) 29 | 30 | require ( 31 | github.com/antchfx/xpath v1.3.3 // indirect 32 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect 33 | github.com/aymerick/douceur v0.2.0 // indirect 34 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 35 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect 36 | github.com/charmbracelet/x/ansi v0.8.0 // indirect 37 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect 38 | github.com/charmbracelet/x/term v0.2.1 // indirect 39 | github.com/davecgh/go-spew v1.1.1 // indirect 40 | github.com/dustin/go-humanize v1.0.1 // indirect 41 | github.com/go-logfmt/logfmt v0.6.0 // indirect 42 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 43 | github.com/gorilla/css v1.0.1 // indirect 44 | github.com/jackc/pgpassfile v1.0.0 // indirect 45 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 46 | github.com/jackc/puddle/v2 v2.2.2 // indirect 47 | github.com/kr/pretty v0.3.1 // indirect 48 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 49 | github.com/mattn/go-runewidth v0.0.16 // indirect 50 | github.com/muesli/termenv v0.16.0 // indirect 51 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect 52 | github.com/pmezard/go-difflib v1.0.0 // indirect 53 | github.com/rivo/uniseg v0.4.7 // indirect 54 | github.com/rogpeppe/go-internal v1.13.1 // indirect 55 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 56 | golang.org/x/crypto v0.38.0 // indirect 57 | golang.org/x/mod v0.21.0 // indirect 58 | golang.org/x/sys v0.33.0 // indirect 59 | golang.org/x/text v0.25.0 // indirect 60 | golang.org/x/tools v0.25.0 // indirect 61 | gopkg.in/yaml.v3 v3.0.1 // indirect 62 | ) 63 | 64 | tool go.uber.org/mock/mockgen 65 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Khan/genqlient v0.8.1 h1:wtOCc8N9rNynRLXN3k3CnfzheCUNKBcvXmVv5zt6WCs= 2 | github.com/Khan/genqlient v0.8.1/go.mod h1:R2G6DzjBvCbhjsEajfRjbWdVglSH/73kSivC9TLWVjU= 3 | github.com/KimMachineGun/automemlimit v0.7.2 h1:DyfHI7zLWmZPn2Wqdy2AgTiUvrGPmnYWgwhHXtAegX4= 4 | github.com/KimMachineGun/automemlimit v0.7.2/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= 5 | github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= 6 | github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= 7 | github.com/alecthomas/kong v1.11.0 h1:y++1gI7jf8O7G7l4LZo5ASFhrhJvzc+WgF/arranEmM= 8 | github.com/alecthomas/kong v1.11.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU= 9 | github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= 10 | github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= 11 | github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= 12 | github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= 13 | github.com/antchfx/htmlquery v1.3.4 h1:Isd0srPkni2iNTWCwVj/72t7uCphFeor5Q8nCzj1jdQ= 14 | github.com/antchfx/htmlquery v1.3.4/go.mod h1:K9os0BwIEmLAvTqaNSua8tXLWRWZpocZIH73OzWQbwM= 15 | github.com/antchfx/xpath v1.3.3 h1:tmuPQa1Uye0Ym1Zn65vxPgfltWb/Lxu2jeqIGteJSRs= 16 | github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= 17 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= 18 | github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= 19 | github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= 20 | github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= 21 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 22 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 23 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= 24 | github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= 25 | github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= 26 | github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= 27 | github.com/charmbracelet/log v0.4.2 h1:hYt8Qj6a8yLnvR+h7MwsJv/XvmBJXiueUcI3cIxsyig= 28 | github.com/charmbracelet/log v0.4.2/go.mod h1:qifHGX/tc7eluv2R6pWIpyHDDrrb/AG71Pf2ysQu5nw= 29 | github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= 30 | github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= 31 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= 32 | github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= 33 | github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= 34 | github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= 35 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 36 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 37 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 38 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 39 | github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= 40 | github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= 41 | github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= 42 | github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 43 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 44 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 45 | github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0= 46 | github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= 47 | github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= 48 | github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= 49 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= 50 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 51 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 52 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 53 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 54 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 55 | github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= 56 | github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= 57 | github.com/graphql-go/graphql v0.8.2-0.20241012201702-a546af7e957c h1:44ZJpsMJdHDa3fsY/2ZpSPpFCJoaaKgez7ILHRHnRSU= 58 | github.com/graphql-go/graphql v0.8.2-0.20241012201702-a546af7e957c/go.mod h1:nKiHzRM0qopJEwCITUuIsxk9PlVlwIiiI8pnJEhordQ= 59 | github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= 60 | github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= 61 | github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= 62 | github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= 63 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= 64 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= 65 | github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= 66 | github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= 67 | github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= 68 | github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= 69 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 70 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 71 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 72 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 73 | github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= 74 | github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= 75 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 76 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 77 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= 78 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 79 | github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= 80 | github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= 81 | github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= 82 | github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= 83 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= 84 | github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= 85 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 86 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 87 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 88 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 89 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= 90 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 91 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 92 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 93 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 94 | github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= 95 | github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= 96 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 97 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 98 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 99 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 100 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 101 | github.com/vektah/gqlparser/v2 v2.5.22 h1:yaaeJ0fu+nv1vUMW0Hl+aS1eiv1vMfapBNjpffAda1I= 102 | github.com/vektah/gqlparser/v2 v2.5.22/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM= 103 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= 104 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= 105 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 106 | go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= 107 | go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= 108 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 109 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 110 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 111 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 112 | golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= 113 | golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= 114 | golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= 115 | golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= 116 | golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= 117 | golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= 118 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= 119 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= 120 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 121 | golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 122 | golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 123 | golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= 124 | golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= 125 | golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= 126 | golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= 127 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 128 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 129 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 130 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 131 | golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= 132 | golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= 133 | golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= 134 | golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= 135 | golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= 136 | golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= 137 | golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= 138 | golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= 139 | golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= 140 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 141 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 142 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 143 | golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= 144 | golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 145 | golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 146 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 147 | golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= 148 | golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= 149 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 150 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 151 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 152 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 153 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 154 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 155 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 156 | golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 157 | golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 158 | golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 159 | golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 160 | golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 161 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 162 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 163 | golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= 164 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 165 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 166 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 167 | golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= 168 | golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= 169 | golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= 170 | golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= 171 | golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= 172 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 173 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 174 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 175 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 176 | golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 177 | golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 178 | golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 179 | golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 180 | golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= 181 | golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= 182 | golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= 183 | golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= 184 | golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 185 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 186 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 187 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 188 | golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= 189 | golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= 190 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= 191 | golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= 192 | golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= 193 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 194 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 195 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 196 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 197 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 198 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 199 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 200 | -------------------------------------------------------------------------------- /gr/gen.go: -------------------------------------------------------------------------------- 1 | //go:generate go run github.com/Khan/genqlient@v0.7.0 2 | 3 | // Package gr contains our G——R—— GQL schema and helpers. 4 | package gr 5 | -------------------------------------------------------------------------------- /gr/genqlient.yaml: -------------------------------------------------------------------------------- 1 | # Default genqlient config; for full documentation see: 2 | # https://github.com/Khan/genqlient/blob/main/docs/genqlient.yaml 3 | schema: schema.graphql 4 | operations: 5 | - queries.graphql 6 | package: gr 7 | generated: generated.go 8 | bindings: 9 | Int: 10 | type: int64 11 | -------------------------------------------------------------------------------- /gr/queries.graphql: -------------------------------------------------------------------------------- 1 | query GetBook($legacyId: Int!) { 2 | getBookByLegacyId(legacyId: $legacyId) { 3 | id 4 | legacyId 5 | description(stripped: true) 6 | bookGenres { 7 | genre { 8 | name 9 | } 10 | } 11 | bookSeries { 12 | series { 13 | id 14 | title 15 | webUrl 16 | } 17 | seriesPlacement 18 | } 19 | details { 20 | asin 21 | isbn13 22 | format 23 | numPages 24 | language { 25 | name 26 | } 27 | officialUrl 28 | publisher 29 | publicationTime 30 | } 31 | imageUrl 32 | primaryContributorEdge { 33 | node { 34 | id 35 | name 36 | legacyId 37 | webUrl 38 | profileImageUrl 39 | description 40 | } 41 | } 42 | stats { 43 | averageRating 44 | ratingsCount 45 | ratingsSum 46 | } 47 | title 48 | titlePrimary 49 | webUrl 50 | 51 | work { 52 | id 53 | legacyId 54 | details { 55 | webUrl 56 | publicationTime 57 | } 58 | bestBook { 59 | legacyId 60 | title 61 | titlePrimary 62 | } 63 | editions { 64 | edges { 65 | node { 66 | legacyId 67 | title 68 | details { 69 | language { 70 | name 71 | } 72 | } 73 | } 74 | } 75 | } 76 | } 77 | } 78 | } 79 | 80 | query GetAuthorWorks( 81 | $getWorksByContributorInput: GetWorksByContributorInput! 82 | $pagination: PaginationInput! 83 | ) { 84 | getWorksByContributor( 85 | getWorksByContributorInput: $getWorksByContributorInput 86 | pagination: $pagination 87 | ) { 88 | edges { 89 | node { 90 | # legacyId - causes an error 91 | id 92 | bestBook { 93 | legacyId 94 | primaryContributorEdge { 95 | role 96 | node { 97 | legacyId 98 | } 99 | } 100 | secondaryContributorEdges { 101 | role 102 | } 103 | } 104 | } 105 | } 106 | pageInfo { 107 | hasNextPage 108 | nextPageToken 109 | } 110 | } 111 | } 112 | 113 | query GetEditions($workId: ID!, $pagination: PaginationInput!) { 114 | getEditions(id: $workId, pagination: $pagination) { 115 | edges { 116 | node { 117 | id 118 | legacyId 119 | } 120 | } 121 | pageInfo { 122 | hasNextPage 123 | nextPageToken 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /hardcover/doc.go: -------------------------------------------------------------------------------- 1 | // Package hardcover implements GraphQL clients for fetching book information 2 | // from the hardcover.app API. 3 | // 4 | //go:generate go run github.com/Khan/genqlient@v0.7.0 5 | package hardcover 6 | -------------------------------------------------------------------------------- /hardcover/genqlient.yaml: -------------------------------------------------------------------------------- 1 | schema: schema.graphql 2 | operations: 3 | - queries.graphql 4 | package: hardcover 5 | generated: generated.go 6 | bindings: 7 | Int: 8 | type: int64 9 | date: 10 | type: string 11 | jsonb: 12 | type: encoding/json.RawMessage 13 | json: 14 | type: encoding/json.RawMessage 15 | float8: 16 | type: float32 17 | numeric: 18 | type: float64 19 | -------------------------------------------------------------------------------- /hardcover/mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: hardcover_test.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -typed -source hardcover_test.go -package hardcover -destination hardcover/mock.go . gql 7 | // 8 | 9 | // Package hardcover is a generated GoMock package. 10 | package hardcover 11 | 12 | import ( 13 | context "context" 14 | http "net/http" 15 | reflect "reflect" 16 | 17 | graphql "github.com/Khan/genqlient/graphql" 18 | gomock "go.uber.org/mock/gomock" 19 | ) 20 | 21 | // Mockgql is a mock of gql interface. 22 | type Mockgql struct { 23 | ctrl *gomock.Controller 24 | recorder *MockgqlMockRecorder 25 | } 26 | 27 | // MockgqlMockRecorder is the mock recorder for Mockgql. 28 | type MockgqlMockRecorder struct { 29 | mock *Mockgql 30 | } 31 | 32 | // NewMockgql creates a new mock instance. 33 | func NewMockgql(ctrl *gomock.Controller) *Mockgql { 34 | mock := &Mockgql{ctrl: ctrl} 35 | mock.recorder = &MockgqlMockRecorder{mock} 36 | return mock 37 | } 38 | 39 | // EXPECT returns an object that allows the caller to indicate expected use. 40 | func (m *Mockgql) EXPECT() *MockgqlMockRecorder { 41 | return m.recorder 42 | } 43 | 44 | // MakeRequest mocks base method. 45 | func (m *Mockgql) MakeRequest(ctx context.Context, req *graphql.Request, resp *graphql.Response) error { 46 | m.ctrl.T.Helper() 47 | ret := m.ctrl.Call(m, "MakeRequest", ctx, req, resp) 48 | ret0, _ := ret[0].(error) 49 | return ret0 50 | } 51 | 52 | // MakeRequest indicates an expected call of MakeRequest. 53 | func (mr *MockgqlMockRecorder) MakeRequest(ctx, req, resp any) *MockgqlMakeRequestCall { 54 | mr.mock.ctrl.T.Helper() 55 | call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MakeRequest", reflect.TypeOf((*Mockgql)(nil).MakeRequest), ctx, req, resp) 56 | return &MockgqlMakeRequestCall{Call: call} 57 | } 58 | 59 | // MockgqlMakeRequestCall wrap *gomock.Call 60 | type MockgqlMakeRequestCall struct { 61 | *gomock.Call 62 | } 63 | 64 | // Return rewrite *gomock.Call.Return 65 | func (c *MockgqlMakeRequestCall) Return(arg0 error) *MockgqlMakeRequestCall { 66 | c.Call = c.Call.Return(arg0) 67 | return c 68 | } 69 | 70 | // Do rewrite *gomock.Call.Do 71 | func (c *MockgqlMakeRequestCall) Do(f func(context.Context, *graphql.Request, *graphql.Response) error) *MockgqlMakeRequestCall { 72 | c.Call = c.Call.Do(f) 73 | return c 74 | } 75 | 76 | // DoAndReturn rewrite *gomock.Call.DoAndReturn 77 | func (c *MockgqlMakeRequestCall) DoAndReturn(f func(context.Context, *graphql.Request, *graphql.Response) error) *MockgqlMakeRequestCall { 78 | c.Call = c.Call.DoAndReturn(f) 79 | return c 80 | } 81 | 82 | // Mocktransport is a mock of transport interface. 83 | type Mocktransport struct { 84 | ctrl *gomock.Controller 85 | recorder *MocktransportMockRecorder 86 | } 87 | 88 | // MocktransportMockRecorder is the mock recorder for Mocktransport. 89 | type MocktransportMockRecorder struct { 90 | mock *Mocktransport 91 | } 92 | 93 | // NewMocktransport creates a new mock instance. 94 | func NewMocktransport(ctrl *gomock.Controller) *Mocktransport { 95 | mock := &Mocktransport{ctrl: ctrl} 96 | mock.recorder = &MocktransportMockRecorder{mock} 97 | return mock 98 | } 99 | 100 | // EXPECT returns an object that allows the caller to indicate expected use. 101 | func (m *Mocktransport) EXPECT() *MocktransportMockRecorder { 102 | return m.recorder 103 | } 104 | 105 | // RoundTrip mocks base method. 106 | func (m *Mocktransport) RoundTrip(arg0 *http.Request) (*http.Response, error) { 107 | m.ctrl.T.Helper() 108 | ret := m.ctrl.Call(m, "RoundTrip", arg0) 109 | ret0, _ := ret[0].(*http.Response) 110 | ret1, _ := ret[1].(error) 111 | return ret0, ret1 112 | } 113 | 114 | // RoundTrip indicates an expected call of RoundTrip. 115 | func (mr *MocktransportMockRecorder) RoundTrip(arg0 any) *MocktransportRoundTripCall { 116 | mr.mock.ctrl.T.Helper() 117 | call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoundTrip", reflect.TypeOf((*Mocktransport)(nil).RoundTrip), arg0) 118 | return &MocktransportRoundTripCall{Call: call} 119 | } 120 | 121 | // MocktransportRoundTripCall wrap *gomock.Call 122 | type MocktransportRoundTripCall struct { 123 | *gomock.Call 124 | } 125 | 126 | // Return rewrite *gomock.Call.Return 127 | func (c *MocktransportRoundTripCall) Return(arg0 *http.Response, arg1 error) *MocktransportRoundTripCall { 128 | c.Call = c.Call.Return(arg0, arg1) 129 | return c 130 | } 131 | 132 | // Do rewrite *gomock.Call.Do 133 | func (c *MocktransportRoundTripCall) Do(f func(*http.Request) (*http.Response, error)) *MocktransportRoundTripCall { 134 | c.Call = c.Call.Do(f) 135 | return c 136 | } 137 | 138 | // DoAndReturn rewrite *gomock.Call.DoAndReturn 139 | func (c *MocktransportRoundTripCall) DoAndReturn(f func(*http.Request) (*http.Response, error)) *MocktransportRoundTripCall { 140 | c.Call = c.Call.DoAndReturn(f) 141 | return c 142 | } 143 | -------------------------------------------------------------------------------- /hardcover/queries.graphql: -------------------------------------------------------------------------------- 1 | query GetBook($grBookID: String!) { 2 | book_mappings( 3 | limit: 1 4 | where: { platform_id: { _eq: 1 }, external_id: { _eq: $grBookID } } 5 | ) { 6 | external_id 7 | edition { 8 | id 9 | title 10 | subtitle 11 | asin 12 | isbn_13 13 | edition_format 14 | pages 15 | audio_seconds 16 | language { 17 | language 18 | } 19 | publisher { 20 | name 21 | } 22 | release_date 23 | description 24 | identifiers 25 | book_id 26 | } 27 | book { 28 | id 29 | title 30 | subtitle 31 | description 32 | release_date 33 | cached_tags(path: "$.Genre") 34 | cached_image(path: "url") 35 | contributions { 36 | contributable_type 37 | contribution 38 | author { 39 | id 40 | name 41 | slug 42 | bio 43 | cached_image(path: "url") 44 | } 45 | } 46 | slug 47 | book_series { 48 | position 49 | series { 50 | id 51 | name 52 | description 53 | identifiers 54 | } 55 | } 56 | book_mappings { 57 | dto_external 58 | } 59 | rating 60 | ratings_count 61 | } 62 | } 63 | } 64 | 65 | query GetAuthor($id: Int!) { 66 | authors_by_pk(id: $id) { 67 | location 68 | slug 69 | } 70 | } 71 | 72 | query GetAuthorEditions($id: Int!, $limit: Int!, $offset: Int!) { 73 | authors(limit: 1, where: { id: { _eq: $id } }) { 74 | location 75 | id 76 | slug 77 | contributions( 78 | limit: $limit 79 | offset: $offset 80 | order_by: { id: asc } 81 | where: { contributable_type: { _eq: "Book" } } 82 | ) { 83 | book { 84 | id 85 | title 86 | ratings_count 87 | book_mappings(limit: 1, where: { platform_id: { _eq: 1 } }) { 88 | book_id 89 | edition_id 90 | external_id 91 | } 92 | } 93 | } 94 | identifiers(path: "goodreads[0]") 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /internal/cache.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "sync/atomic" 9 | "time" 10 | ) 11 | 12 | type cache[T any] interface { 13 | Get(ctx context.Context, key string) (T, bool) 14 | GetWithTTL(ctx context.Context, key string) (T, time.Duration, bool) 15 | Set(ctx context.Context, key string, value T, ttl time.Duration) 16 | Expire(ctx context.Context, key string) error 17 | } 18 | 19 | // LayeredCache implements a simple tiered cache. In practice we use an 20 | // in-memory cache backed by Postgres for persistent storage. Hits at lower 21 | // layers are automatically percolated up. Values are compressed with gzip at 22 | // rest. 23 | // 24 | // cache.ChainCache has inconsistent marshaling behavior, so we use our own 25 | // wrapper. Actually that package doesn't really buy us anything... 26 | type LayeredCache struct { 27 | hits atomic.Int64 28 | misses atomic.Int64 29 | 30 | wrapped []cache[[]byte] 31 | } 32 | 33 | var _ cache[[]byte] = (*LayeredCache)(nil) 34 | 35 | // GetWithTTL returns the cached value and its TTL. The boolean returned is 36 | // false if no value was found. 37 | func (c *LayeredCache) GetWithTTL(ctx context.Context, key string) ([]byte, time.Duration, bool) { 38 | var val []byte 39 | var ttl time.Duration 40 | var ok bool 41 | 42 | for _, cc := range c.wrapped { 43 | val, ttl, ok = cc.GetWithTTL(ctx, key) 44 | if !ok { 45 | // Percolate the value back up if we eventually find it. 46 | defer func(cc cache[[]byte]) { 47 | if val == nil { 48 | return 49 | } 50 | cc.Set(ctx, key, val, ttl) 51 | }(cc) 52 | continue 53 | } 54 | 55 | _ = c.hits.Add(1) 56 | 57 | return val, ttl, true 58 | } 59 | 60 | _ = c.misses.Add(1) 61 | 62 | return nil, 0, false 63 | } 64 | 65 | // Get returns a cache value, if it exists, and a boolean if a value was found. 66 | func (c *LayeredCache) Get(ctx context.Context, key string) ([]byte, bool) { 67 | val, _, ok := c.GetWithTTL(ctx, key) 68 | return val, ok 69 | } 70 | 71 | // Expire expires a key from all layers of the cache. This removes it from 72 | // memory but keeps data persisted in Postgres without a TTL. 73 | func (c *LayeredCache) Expire(ctx context.Context, key string) error { 74 | var err error 75 | for _, cc := range c.wrapped { 76 | err = errors.Join(cc.Expire(ctx, key)) 77 | } 78 | return err 79 | } 80 | 81 | // Set a key/value in all layers of the cache. 82 | // TODO: Fuzz expiration 83 | func (c *LayeredCache) Set(ctx context.Context, key string, val []byte, ttl time.Duration) { 84 | if len(val) == 0 { 85 | Log(ctx).Warn("refusing to set empty value", "key", key) 86 | return 87 | } 88 | if ttl == 0 { 89 | Log(ctx).Warn("refusing to set zero ttl", "key", key) 90 | return 91 | } 92 | 93 | // TODO: We can offload the DB write to a background goroutine to speed 94 | // things up. 95 | for _, cc := range c.wrapped { 96 | cc.Set(ctx, key, val, ttl) 97 | } 98 | } 99 | 100 | // NewCache constructs a new layered cache. 101 | func NewCache(ctx context.Context, dsn string) (*LayeredCache, error) { 102 | m := newMemoryCache() 103 | pg, err := newPostgres(ctx, dsn) 104 | if err != nil { 105 | return nil, err 106 | } 107 | c := &LayeredCache{wrapped: []cache[[]byte]{m, pg}} 108 | 109 | // Log cache stats every minute. 110 | go func() { 111 | for { 112 | time.Sleep(1 * time.Minute) 113 | hits, misses := c.hits.Load(), c.misses.Load() 114 | Log(ctx).LogAttrs(ctx, slog.LevelDebug, "cache stats", 115 | slog.Int64("hits", hits), 116 | slog.Int64("misses", misses), 117 | slog.Float64("ratio", float64(hits)/(float64(hits)+float64(misses))), 118 | ) 119 | } 120 | }() 121 | 122 | return c, nil 123 | } 124 | 125 | // WorkKey returns a cache key for a work ID. 126 | func WorkKey(workID int64) string { 127 | return fmt.Sprintf("w%d", workID) 128 | } 129 | 130 | // BookKey returns a cache key for a book (edition) ID. 131 | func BookKey(bookID int64) string { 132 | return fmt.Sprintf("b%d", bookID) 133 | } 134 | 135 | // AuthorKey returns a cache key for an author ID. 136 | func AuthorKey(authorID int64) string { 137 | return fmt.Sprintf("a%d", authorID) 138 | } 139 | -------------------------------------------------------------------------------- /internal/cache_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestCache(t *testing.T) { 12 | ctx := context.Background() 13 | c0 := newMemoryCache() 14 | c1 := newMemoryCache() 15 | 16 | l := &LayeredCache{wrapped: []cache[[]byte]{c0, c1}} 17 | 18 | t.Run("miss", func(t *testing.T) { 19 | out, ok := l.Get(ctx, "miss") 20 | assert.False(t, ok) 21 | assert.Nil(t, out) 22 | }) 23 | 24 | t.Run("percolation", func(t *testing.T) { 25 | key := "c0-miss" 26 | val := []byte(key) 27 | 28 | // Only c1 starts with the entry, 29 | c1.Set(ctx, key, val, time.Hour) 30 | 31 | out, ok := l.Get(ctx, key) 32 | assert.True(t, ok) 33 | assert.Equal(t, val, out) 34 | 35 | // c0 now has it. 36 | out, ttl, ok := c0.GetWithTTL(ctx, key) 37 | assert.True(t, ok) 38 | assert.Equal(t, val, out) 39 | assert.Greater(t, ttl, time.Minute) 40 | }) 41 | 42 | t.Run("set-get", func(t *testing.T) { 43 | key := "set-get" 44 | val := []byte(key) 45 | 46 | l.Set(ctx, key, val, time.Hour) 47 | 48 | out, ok := c0.Get(ctx, key) 49 | assert.True(t, ok) 50 | assert.Equal(t, val, out) 51 | 52 | out, ok = c1.Get(ctx, key) 53 | assert.True(t, ok) 54 | assert.Equal(t, val, out) 55 | 56 | out, ok = l.Get(ctx, key) 57 | assert.True(t, ok) 58 | assert.Equal(t, val, out) 59 | }) 60 | } 61 | -------------------------------------------------------------------------------- /internal/controller_test.go: -------------------------------------------------------------------------------- 1 | //go:generate go run go.uber.org/mock/mockgen -typed -source controller.go -package internal -destination mock.go . getter 2 | 3 | package internal 4 | 5 | import ( 6 | "context" 7 | "encoding/json" 8 | "iter" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | "go.uber.org/mock/gomock" 15 | ) 16 | 17 | func TestIncrementalDenormalization(t *testing.T) { 18 | // Looking up foreign editions should update relevant works to include 19 | // those editions, and authors should be updated to reflect the new works. 20 | t.Parallel() 21 | 22 | ctx := context.Background() 23 | c := gomock.NewController(t) 24 | getter := NewMockgetter(c) 25 | 26 | work := workResource{ForeignID: 1} 27 | 28 | englishEdition := bookResource{ForeignID: 100, Language: "en"} 29 | frenchEdition := bookResource{ForeignID: 200, Language: "fr"} 30 | work.Books = []bookResource{englishEdition} 31 | 32 | authorID := int64(1000) 33 | author := AuthorResource{ForeignID: authorID, Works: []workResource{work}} 34 | 35 | work.Authors = []AuthorResource{author} 36 | 37 | initialAuthorBytes, err := json.Marshal(author) 38 | require.NoError(t, err) 39 | initialWorkBytes, err := json.Marshal(work) 40 | require.NoError(t, err) 41 | frenchEditionBytes, err := json.Marshal(workResource{ForeignID: work.ForeignID, Books: []bookResource{frenchEdition}}) 42 | require.NoError(t, err) 43 | englishEditionBytes, err := json.Marshal(workResource{ForeignID: work.ForeignID, Books: []bookResource{englishEdition}}) 44 | require.NoError(t, err) 45 | 46 | cache := newMemoryCache() 47 | 48 | ctrl, err := NewController(cache, getter) 49 | require.NoError(t, err) 50 | 51 | go func() { 52 | ctrl.Run(ctx, 0) 53 | }() 54 | 55 | // TODO: Generalize this into a test helper. 56 | getter.EXPECT().GetAuthor(gomock.Any(), author.ForeignID).DoAndReturn(func(ctx context.Context, authorID int64) ([]byte, error) { 57 | cachedBytes, ok := ctrl.cache.Get(ctx, AuthorKey(authorID)) 58 | if ok { 59 | return cachedBytes, nil 60 | } 61 | return initialAuthorBytes, nil 62 | }).AnyTimes() 63 | 64 | getter.EXPECT().GetBook(gomock.Any(), englishEdition.ForeignID, nil).DoAndReturn(func(ctx context.Context, bookID int64, loadEditions editionsCallback) ([]byte, int64, int64, error) { 65 | cachedBytes, ok := ctrl.cache.Get(ctx, BookKey(bookID)) 66 | if ok { 67 | return cachedBytes, 0, 0, nil 68 | } 69 | return englishEditionBytes, work.ForeignID, authorID, nil 70 | }).AnyTimes() 71 | 72 | getter.EXPECT().GetBook(gomock.Any(), frenchEdition.ForeignID, nil).DoAndReturn(func(ctx context.Context, bookID int64, loadEditions editionsCallback) ([]byte, int64, int64, error) { 73 | cachedBytes, ok := ctrl.cache.Get(ctx, BookKey(bookID)) 74 | if ok { 75 | return cachedBytes, 0, 0, nil 76 | } 77 | return frenchEditionBytes, work.ForeignID, authorID, nil 78 | }).AnyTimes() 79 | 80 | getter.EXPECT().GetWork(gomock.Any(), work.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 81 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 82 | if ok { 83 | return cachedBytes, 0, nil 84 | } 85 | return initialWorkBytes, author.ForeignID, nil 86 | }).AnyTimes() 87 | 88 | getter.EXPECT().GetAuthorBooks(gomock.Any(), authorID).Return( 89 | func(yield func(int64) bool) { 90 | if !yield(englishEdition.ForeignID) { 91 | return 92 | } 93 | if !yield(frenchEdition.ForeignID) { 94 | return 95 | } 96 | }, 97 | ).AnyTimes() 98 | 99 | // Getting the author will initially return it with only the "best" original-language edition. 100 | authorBytes, err := ctrl.GetAuthor(ctx, author.ForeignID) 101 | require.NoError(t, err) 102 | 103 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 104 | 105 | assert.Len(t, author.Works, 1) 106 | assert.Equal(t, englishEdition.ForeignID, author.Works[0].Books[0].ForeignID) 107 | 108 | // Getting a foreign edition should add it to the work. 109 | _, err = ctrl.GetBook(ctx, frenchEdition.ForeignID) 110 | require.NoError(t, err) 111 | 112 | time.Sleep(100 * time.Millisecond) // Wait for the denormalization goroutine update things. 113 | 114 | workBytes, err := ctrl.GetWork(ctx, work.ForeignID) 115 | require.NoError(t, err) 116 | var w workResource 117 | require.NoError(t, json.Unmarshal(workBytes, &w)) 118 | assert.Len(t, w.Books, 2) 119 | 120 | // The work should have also been updated on the author. 121 | authorBytes, err = ctrl.GetAuthor(ctx, author.ForeignID) 122 | require.NoError(t, err) 123 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 124 | assert.Len(t, author.Works, 1) 125 | assert.Len(t, author.Works[0].Books, 2) 126 | assert.Equal(t, englishEdition.ForeignID, author.Works[0].Books[0].ForeignID) 127 | assert.Equal(t, frenchEdition.ForeignID, author.Works[0].Books[1].ForeignID) 128 | 129 | // Force a cache miss to re-trigger denormalization. 130 | _ = ctrl.cache.Expire(ctx, BookKey(frenchEdition.ForeignID)) 131 | _, _ = ctrl.GetBook(ctx, frenchEdition.ForeignID) 132 | 133 | time.Sleep(100 * time.Millisecond) // Wait for the denormalization goroutine update things. 134 | 135 | workBytes, err = ctrl.GetWork(ctx, work.ForeignID) 136 | require.NoError(t, err) 137 | require.NoError(t, json.Unmarshal(workBytes, &w)) 138 | assert.Len(t, w.Books, 2) 139 | 140 | authorBytes, err = ctrl.GetAuthor(ctx, author.ForeignID) 141 | require.NoError(t, err) 142 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 143 | assert.Len(t, author.Works[0].Books, 2) 144 | 145 | // Force an author cache miss to re-trigger denormalization. 146 | _ = ctrl.cache.Expire(ctx, AuthorKey(author.ForeignID)) 147 | _, _ = ctrl.GetAuthor(ctx, author.ForeignID) 148 | 149 | time.Sleep(100 * time.Millisecond) // Wait for the denormalization goroutine update things. 150 | 151 | authorBytes, err = ctrl.GetAuthor(ctx, author.ForeignID) 152 | require.NoError(t, err) 153 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 154 | assert.Len(t, author.Works[0].Books, 2) 155 | } 156 | 157 | func TestDenormalizeMissing(t *testing.T) { 158 | // Denormalizing relationships on objects that are missing should no-op. 159 | ctx := context.Background() 160 | 161 | authorID := int64(1) 162 | workID := int64(2) 163 | bookID := int64(3) 164 | 165 | cache := newMemoryCache() 166 | 167 | notFoundGetter := NewMockgetter(gomock.NewController(t)) 168 | notFoundGetter.EXPECT().GetAuthor(gomock.Any(), authorID).Return(nil, errNotFound).AnyTimes() 169 | notFoundGetter.EXPECT().GetWork(gomock.Any(), workID, nil).Return(nil, 0, errNotFound).AnyTimes() 170 | 171 | ctrl, err := NewController(cache, notFoundGetter) 172 | require.NoError(t, err) 173 | 174 | err = ctrl.denormalizeEditions(ctx, workID, bookID) 175 | assert.ErrorIs(t, err, errNotFound) 176 | 177 | err = ctrl.denormalizeWorks(ctx, authorID, workID) 178 | assert.ErrorIs(t, err, errNotFound) 179 | } 180 | 181 | func TestSubtitles(t *testing.T) { 182 | // Subtitles (i.e. FullTitle) are used in situations where multiple works 183 | // share the same primary title, or when the work belongs to a series.. 184 | 185 | t.Parallel() 186 | 187 | ctx := context.Background() 188 | c := gomock.NewController(t) 189 | getter := NewMockgetter(c) 190 | 191 | workDupe1 := workResource{ 192 | ForeignID: 1, 193 | Title: "FOO", 194 | FullTitle: "Foo: First Work", 195 | Books: []bookResource{ 196 | {ForeignID: 1, Title: "Foo", FullTitle: "Foo: First Edition"}, 197 | {ForeignID: 2, Title: "Foo", FullTitle: ""}, 198 | }, 199 | } 200 | 201 | workDupe2 := workResource{ 202 | ForeignID: 2, 203 | Title: "Foo", 204 | FullTitle: "Foo: Second Work", 205 | Books: []bookResource{ 206 | {ForeignID: 10, Title: "Foo", FullTitle: "Foo: Second Edition"}, 207 | {ForeignID: 20, Title: "Foo", FullTitle: ""}, 208 | }, 209 | } 210 | 211 | workDupe3 := workResource{ 212 | ForeignID: 3, 213 | Title: "Foo", 214 | FullTitle: "Foo: Third Work", 215 | ShortTitle: "Foo", 216 | Books: []bookResource{ 217 | {ForeignID: 30, Title: "Foo", FullTitle: "Foo: Third Edition"}, 218 | {ForeignID: 40, Title: "Foo", FullTitle: ""}, 219 | }, 220 | } 221 | 222 | workDupe4 := workResource{ 223 | ForeignID: 4, 224 | Title: "Foo", 225 | FullTitle: "Foo: Fourth Work", 226 | ShortTitle: "Foo", 227 | Books: []bookResource{ 228 | {ForeignID: 50, Title: "Foo", FullTitle: "Foo: Fourth Edition"}, 229 | {ForeignID: 60, Title: "Foo", FullTitle: ""}, 230 | }, 231 | } 232 | 233 | workUnique := workResource{ 234 | ForeignID: 5, 235 | Title: "Bar", 236 | FullTitle: "Bar: Not Foo", 237 | Books: []bookResource{ 238 | {ForeignID: 70, Title: "Bar", FullTitle: "Bar: Not Foo"}, 239 | {ForeignID: 80, Title: "Bar", FullTitle: ""}, 240 | }, 241 | } 242 | 243 | workSeries := workResource{ 244 | ForeignID: 6, 245 | Title: "Baz", 246 | FullTitle: "Baz: The Baz Series #3", 247 | ShortTitle: "Baz", 248 | Books: []bookResource{ 249 | { 250 | ForeignID: 90, 251 | Title: "Baz", 252 | FullTitle: "Baz: The Baz Series #3", 253 | ShortTitle: "Baz", 254 | }, 255 | }, 256 | Series: []seriesResource{{ForeignID: 1234}}, 257 | } 258 | 259 | author := AuthorResource{ForeignID: 1000, Works: []workResource{ 260 | workDupe1, 261 | workDupe2, 262 | workUnique, 263 | workSeries, 264 | }} 265 | 266 | workDupe1.Authors = []AuthorResource{author} 267 | workDupe2.Authors = []AuthorResource{author} 268 | workDupe3.Authors = []AuthorResource{author} 269 | workDupe4.Authors = []AuthorResource{author} 270 | workUnique.Authors = []AuthorResource{author} 271 | workSeries.Authors = []AuthorResource{author} 272 | 273 | initialAuthorBytes, err := json.Marshal(author) 274 | require.NoError(t, err) 275 | initialWorkDupe1Bytes, err := json.Marshal(workDupe1) 276 | require.NoError(t, err) 277 | initialWorkDupe2Bytes, err := json.Marshal(workDupe2) 278 | require.NoError(t, err) 279 | initialWorkDupe3Bytes, err := json.Marshal(workDupe3) 280 | require.NoError(t, err) 281 | initialWorkDupe4Bytes, err := json.Marshal(workDupe4) 282 | require.NoError(t, err) 283 | initialWorkUniqueBytes, err := json.Marshal(workUnique) 284 | require.NoError(t, err) 285 | initialWorkSeriesBytes, err := json.Marshal(workSeries) 286 | require.NoError(t, err) 287 | 288 | cache := newMemoryCache() 289 | 290 | ctrl, err := NewController(cache, getter) 291 | require.NoError(t, err) 292 | 293 | getter.EXPECT().GetAuthor(gomock.Any(), author.ForeignID).DoAndReturn(func(ctx context.Context, authorID int64) ([]byte, error) { 294 | cachedBytes, ok := ctrl.cache.Get(ctx, AuthorKey(authorID)) 295 | if ok { 296 | return cachedBytes, nil 297 | } 298 | return initialAuthorBytes, nil 299 | }).AnyTimes() 300 | 301 | getter.EXPECT().GetWork(gomock.Any(), workDupe1.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 302 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 303 | if ok { 304 | return cachedBytes, 0, nil 305 | } 306 | return initialWorkDupe1Bytes, author.ForeignID, nil 307 | }).AnyTimes() 308 | 309 | getter.EXPECT().GetWork(gomock.Any(), workDupe2.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 310 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 311 | if ok { 312 | return cachedBytes, 0, nil 313 | } 314 | return initialWorkDupe2Bytes, author.ForeignID, nil 315 | }).AnyTimes() 316 | 317 | getter.EXPECT().GetWork(gomock.Any(), workDupe3.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 318 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 319 | if ok { 320 | return cachedBytes, 0, nil 321 | } 322 | return initialWorkDupe3Bytes, author.ForeignID, nil 323 | }).AnyTimes() 324 | 325 | getter.EXPECT().GetWork(gomock.Any(), workDupe4.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 326 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 327 | if ok { 328 | return cachedBytes, 0, nil 329 | } 330 | return initialWorkDupe4Bytes, author.ForeignID, nil 331 | }).AnyTimes() 332 | 333 | getter.EXPECT().GetWork(gomock.Any(), workUnique.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 334 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 335 | if ok { 336 | return cachedBytes, 0, nil 337 | } 338 | return initialWorkUniqueBytes, author.ForeignID, nil 339 | }).AnyTimes() 340 | 341 | getter.EXPECT().GetWork(gomock.Any(), workSeries.ForeignID, nil).DoAndReturn(func(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 342 | cachedBytes, ok := ctrl.cache.Get(ctx, WorkKey(workID)) 343 | if ok { 344 | return cachedBytes, 0, nil 345 | } 346 | return initialWorkSeriesBytes, author.ForeignID, nil 347 | }).AnyTimes() 348 | 349 | getter.EXPECT().GetAuthorBooks(gomock.Any(), author.ForeignID).Return(iter.Seq[int64](func(func(int64) bool) {})) 350 | 351 | err = ctrl.denormalizeWorks(ctx, author.ForeignID, workDupe1.ForeignID, workDupe2.ForeignID, workUnique.ForeignID) 352 | require.NoError(t, err) 353 | 354 | // Add these after the others have already had subtitles applied. We should 355 | // still apply a subtitle to this new work, instead of using its short 356 | // title. 357 | err = ctrl.denormalizeWorks(ctx, author.ForeignID, workDupe3.ForeignID) 358 | require.NoError(t, err) 359 | err = ctrl.denormalizeWorks(ctx, author.ForeignID, workDupe4.ForeignID) 360 | require.NoError(t, err) 361 | 362 | authorBytes, err := ctrl.GetAuthor(ctx, author.ForeignID) 363 | require.NoError(t, err) 364 | 365 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 366 | 367 | assert.Equal(t, "Foo: First Work", author.Works[0].Title) 368 | assert.Equal(t, "Foo: Second Work", author.Works[1].Title) 369 | assert.Equal(t, "Foo: Third Work", author.Works[2].Title) 370 | assert.Equal(t, "Foo: Fourth Work", author.Works[3].Title) 371 | assert.Equal(t, "Bar", author.Works[4].Title) 372 | 373 | assert.Equal(t, "Foo: First Edition", author.Works[0].Books[0].Title) 374 | assert.Equal(t, "Foo", author.Works[0].Books[1].Title) 375 | 376 | assert.Equal(t, "Foo: Second Edition", author.Works[1].Books[0].Title) 377 | assert.Equal(t, "Foo", author.Works[1].Books[1].Title) 378 | 379 | assert.Equal(t, "Foo: Third Edition", author.Works[2].Books[0].Title) 380 | assert.Equal(t, "Foo", author.Works[2].Books[1].Title) 381 | 382 | assert.Equal(t, "Foo: Fourth Edition", author.Works[3].Books[0].Title) 383 | assert.Equal(t, "Foo", author.Works[3].Books[1].Title) 384 | 385 | assert.Equal(t, "Bar", author.Works[4].Books[0].Title) 386 | assert.Equal(t, "Bar", author.Works[4].Books[1].Title) 387 | 388 | assert.Equal(t, "Baz: The Baz Series #3", author.Works[5].Books[0].Title) 389 | } 390 | 391 | // TestSortedInvariant ensures we correct any lingering data not sorted 392 | // by ForeignID. This invairant is necessary for fast lookups and replacements 393 | // when updating works and editions. 394 | func TestSortedInvariant(t *testing.T) { 395 | cache := newMemoryCache() 396 | 397 | t.Run("denormalizeWorks", func(t *testing.T) { 398 | c := gomock.NewController(t) 399 | getter := NewMockgetter(c) 400 | ctrl, err := NewController(cache, getter) 401 | require.NoError(t, err) 402 | 403 | author := AuthorResource{ 404 | ForeignID: 1, 405 | Works: []workResource{ 406 | {ForeignID: 1}, 407 | {ForeignID: 2}, 408 | {ForeignID: 1}, 409 | {ForeignID: 3}, 410 | }, 411 | } 412 | 413 | getter.EXPECT().GetWork(gomock.Any(), gomock.Any(), nil).DoAndReturn(func(ctx context.Context, id int64, loadEditions editionsCallback) ([]byte, int64, error) { 414 | bytes, err := json.Marshal(workResource{ForeignID: id, Books: []bookResource{{}}}) 415 | return bytes, 0, err 416 | }).AnyTimes() 417 | 418 | authorBytes, err := json.Marshal(author) 419 | require.NoError(t, err) 420 | 421 | cache.Set(t.Context(), AuthorKey(1), authorBytes, time.Hour) 422 | 423 | err = ctrl.denormalizeWorks(t.Context(), author.ForeignID, 3) 424 | require.NoError(t, err) 425 | 426 | authorBytes, ok := cache.Get(t.Context(), AuthorKey(author.ForeignID)) 427 | require.True(t, ok) 428 | 429 | err = json.Unmarshal(authorBytes, &author) 430 | require.NoError(t, err) 431 | assert.Equal(t, author.Works, []workResource{ 432 | {ForeignID: 1}, 433 | {ForeignID: 2}, 434 | {ForeignID: 3, Books: []bookResource{{}}}, 435 | }) 436 | }) 437 | 438 | t.Run("denormalizeEditions", func(t *testing.T) { 439 | c := gomock.NewController(t) 440 | getter := NewMockgetter(c) 441 | ctrl, err := NewController(cache, getter) 442 | require.NoError(t, err) 443 | 444 | work := workResource{ 445 | ForeignID: 1, 446 | Books: []bookResource{ 447 | {ForeignID: 10}, 448 | {ForeignID: 20}, 449 | {ForeignID: 10}, 450 | {ForeignID: 30}, 451 | }, 452 | } 453 | 454 | getter.EXPECT().GetWork(gomock.Any(), work.ForeignID, nil).DoAndReturn(func(ctx context.Context, id int64, loadEditions editionsCallback) ([]byte, int64, error) { 455 | workBytes, err := json.Marshal(work) 456 | return workBytes, 0, err 457 | }) 458 | 459 | getter.EXPECT().GetBook(gomock.Any(), gomock.Any(), nil).DoAndReturn(func(ctx context.Context, id int64, loadEditions editionsCallback) ([]byte, int64, int64, error) { 460 | bytes, err := json.Marshal(workResource{ForeignID: work.ForeignID, Books: []bookResource{{ForeignID: id}}}) 461 | return bytes, 0, 0, err 462 | }).AnyTimes() 463 | 464 | workBytes, err := json.Marshal(work) 465 | require.NoError(t, err) 466 | 467 | cache.Set(t.Context(), WorkKey(1), workBytes, time.Hour) 468 | 469 | err = ctrl.denormalizeEditions(t.Context(), work.ForeignID, 10) 470 | require.NoError(t, err) 471 | 472 | workBytes, ok := cache.Get(t.Context(), WorkKey(work.ForeignID)) 473 | require.True(t, ok) 474 | 475 | err = json.Unmarshal(workBytes, &work) 476 | require.NoError(t, err) 477 | assert.Equal(t, work.Books, []bookResource{ 478 | {ForeignID: 10}, 479 | {ForeignID: 20}, 480 | {ForeignID: 30}, 481 | }) 482 | }) 483 | } 484 | -------------------------------------------------------------------------------- /internal/doc.go: -------------------------------------------------------------------------------- 1 | // Package internal runs a read-through cache server. 2 | package internal 3 | -------------------------------------------------------------------------------- /internal/edges.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "iter" 5 | "time" 6 | ) 7 | 8 | type edgeKind int 9 | 10 | const ( 11 | authorEdge edgeKind = 1 12 | workEdge edgeKind = 2 13 | ) 14 | 15 | // edge represents a parent/child relationship. 16 | type edge struct { 17 | kind edgeKind 18 | parentID int64 19 | childIDs []int64 20 | } 21 | 22 | // groupEdges collects edges of the same kind and parent together in order to 23 | // reduce the number of times we deserialize the parent during denormalization. 24 | // 25 | // If an edge isn't seen after the wait duration then we yield the last edge we 26 | // saw. 27 | func groupEdges(edges chan edge, wait time.Duration) iter.Seq[edge] { 28 | return func(yield func(edge) bool) { 29 | var next edge 30 | var ok bool 31 | 32 | edge := <-edges 33 | for { 34 | select { 35 | case next, ok = <-edges: 36 | if !ok { 37 | // Channel is closed. 38 | _ = yield(edge) 39 | return 40 | } 41 | case <-time.After(wait): 42 | if !yield(edge) { 43 | return 44 | } 45 | // Wait until we see the next edge, then start over. 46 | edge = <-edges 47 | continue 48 | } 49 | 50 | // If the next edge is for the same parent and kind, then aggregate 51 | // its children into ours and move on. 52 | if edge.parentID == next.parentID && edge.kind == next.kind { 53 | edge.childIDs = append(edge.childIDs, next.childIDs...) 54 | continue 55 | } 56 | 57 | // Next edge is for a different parent, so yield our current edge. 58 | if !yield(edge) { 59 | return 60 | } 61 | 62 | edge = next 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /internal/edges_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "slices" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestGroupEdges(t *testing.T) { 12 | c := make(chan edge) 13 | go func() { 14 | c <- edge{kind: authorEdge, parentID: 100, childIDs: []int64{1}} 15 | c <- edge{kind: authorEdge, parentID: 100, childIDs: []int64{2, 3}} 16 | c <- edge{kind: workEdge, parentID: 100, childIDs: []int64{4}} 17 | c <- edge{kind: authorEdge, parentID: 100, childIDs: []int64{5, 6}} 18 | c <- edge{kind: authorEdge, parentID: 200, childIDs: []int64{7}} 19 | c <- edge{kind: authorEdge, parentID: 100, childIDs: []int64{8}} 20 | c <- edge{kind: authorEdge, parentID: 300, childIDs: []int64{9}} 21 | close(c) 22 | }() 23 | 24 | edges := slices.Collect(groupEdges(c, time.Second)) 25 | 26 | assert.Equal(t, edges[0], edge{kind: authorEdge, parentID: 100, childIDs: []int64{1, 2, 3}}) 27 | assert.Equal(t, edges[1], edge{kind: workEdge, parentID: 100, childIDs: []int64{4}}) 28 | assert.Equal(t, edges[2], edge{kind: authorEdge, parentID: 100, childIDs: []int64{5, 6}}) 29 | assert.Equal(t, edges[3], edge{kind: authorEdge, parentID: 200, childIDs: []int64{7}}) 30 | assert.Equal(t, edges[4], edge{kind: authorEdge, parentID: 100, childIDs: []int64{8}}) 31 | assert.Equal(t, edges[5], edge{kind: authorEdge, parentID: 300, childIDs: []int64{9}}) 32 | } 33 | -------------------------------------------------------------------------------- /internal/error.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/http" 7 | ) 8 | 9 | var ( 10 | errNotFound = statusErr(http.StatusNotFound) 11 | errBadRequest = statusErr(http.StatusBadRequest) 12 | errTryAgain = statusErr(http.StatusTooManyRequests) // Will be retried. 13 | 14 | errMissingIDs = errors.Join(fmt.Errorf(`missing "ids"`), errBadRequest) 15 | ) 16 | 17 | type statusErr int 18 | 19 | var _ error = (*statusErr)(nil) 20 | 21 | func (s statusErr) Status() int { 22 | return int(s) 23 | } 24 | 25 | func (s statusErr) Error() string { 26 | return fmt.Sprintf("HTTP %d: %s", s, http.StatusText(int(s))) 27 | } 28 | -------------------------------------------------------------------------------- /internal/error_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestError(t *testing.T) { 12 | err := errors.Join(fmt.Errorf("invalid request"), errBadRequest) 13 | 14 | var s statusErr 15 | ok := errors.As(err, &s) 16 | assert.True(t, ok) 17 | 18 | assert.ErrorContains(t, err, "invalid request") 19 | } 20 | -------------------------------------------------------------------------------- /internal/gr.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "encoding/hex" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "iter" 10 | "maps" 11 | "net/http" 12 | "slices" 13 | "strings" 14 | "time" 15 | 16 | "github.com/Khan/genqlient/graphql" 17 | q "github.com/antchfx/htmlquery" 18 | "github.com/blampe/rreading-glasses/gr" 19 | "github.com/microcosm-cc/bluemonday" 20 | "golang.org/x/net/html" 21 | ) 22 | 23 | var _stripTags = bluemonday.StrictPolicy() 24 | 25 | // GRGetter fetches information from a GR upstream. 26 | type GRGetter struct { 27 | cache cache[[]byte] 28 | gql graphql.Client 29 | upstream *http.Client 30 | } 31 | 32 | var _ getter = (*GRGetter)(nil) 33 | 34 | // NewGRGetter creates a new Getter backed by G——R——. 35 | func NewGRGetter(cache cache[[]byte], gql graphql.Client, upstream *http.Client) (*GRGetter, error) { 36 | return &GRGetter{ 37 | cache: cache, 38 | gql: gql, 39 | upstream: upstream, 40 | }, nil 41 | } 42 | 43 | // NewGRGQL returns a new GraphQL client for use with GR. The provided 44 | // [http.Client] must be non-nil and is used for issuing requests. If a 45 | // non-empty cookie is given the requests are authorized and use are allowed 46 | // more RPS. 47 | func NewGRGQL(ctx context.Context, upstream *http.Client, cookie string) (graphql.Client, error) { 48 | // These credentials are public and easily obtainable. They are obscured here only to hide them from search results. 49 | defaultToken, err := hex.DecodeString("6461322d787067736479646b627265676a68707236656a7a716468757779") 50 | if err != nil { 51 | return nil, err 52 | } 53 | host, err := hex.DecodeString("68747470733a2f2f6b7862776d716f76366a676733646161616d62373434796375342e61707073796e632d6170692e75732d656173742d312e616d617a6f6e6177732e636f6d2f6772617068716c") 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | auth := &HeaderTransport{ 59 | Key: "X-Api-Key", 60 | Value: string(defaultToken), 61 | RoundTripper: errorProxyTransport{ 62 | RoundTripper: http.DefaultTransport, 63 | }, 64 | } 65 | // 3RPS seems to be the limit for all gql traffic, regardless of 66 | // credentials, but still occasionally gives 403s. So let's try 2RPS. 67 | rate := time.Second / 2.0 68 | 69 | // This path is disabled for now because unauth'd traffic is allowed the 70 | // same RPS as auth'd. The value of the cookie then is to simply allow more 71 | // HEAD requests when resolving authors. 72 | /* 73 | if cookie != "" { 74 | // Grab an authenticated token and continue to refresh it in the background. 75 | token, err := getGRCreds(ctx, upstream) 76 | if err != nil { 77 | return nil, err 78 | } 79 | auth.Key = "Authorization" 80 | auth.Value = token 81 | 82 | go func() { 83 | for { 84 | time.Sleep(290 * time.Second) // TODO: Use cookie expiration time. 85 | token, err := getGRCreds(ctx, upstream) 86 | if err != nil { 87 | Log(ctx).Error("unable to refresh auth", "err", err) 88 | auth.Key = "X-Api-Key" 89 | auth.Value = string(defaultToken) 90 | continue 91 | } 92 | auth.Key = "Authorization" 93 | auth.Value = token 94 | } 95 | }() 96 | } 97 | */ 98 | 99 | return NewBatchedGraphQLClient(string(host), &http.Client{Transport: auth}, rate) 100 | } 101 | 102 | // GetWork returns a work with all known editions. Due to the way R—— works, if 103 | // an edition is missing here (like a translated edition) it's not fetchable. 104 | func (g *GRGetter) GetWork(ctx context.Context, workID int64, loadEditions editionsCallback) (_ []byte, authorID int64, _ error) { 105 | if workID == 146797269 { 106 | // This work always 500s for some reason. Ignore it. 107 | return nil, 0, errNotFound 108 | } 109 | workBytes, ttl, ok := g.cache.GetWithTTL(ctx, WorkKey(workID)) 110 | if ok && ttl > 0 { 111 | return workBytes, 0, nil 112 | } 113 | 114 | if ok { 115 | var work workResource 116 | _ = json.Unmarshal(workBytes, &work) 117 | 118 | bookID := work.BestBookID 119 | if bookID != 0 { 120 | out, _, authorID, err := g.GetBook(ctx, bookID, loadEditions) 121 | return out, authorID, err 122 | } 123 | } 124 | 125 | url := fmt.Sprintf("/work/%d", workID) 126 | resp, err := g.upstream.Head(url) 127 | if err != nil { 128 | return nil, 0, fmt.Errorf("probleam getting HEAD: %w", err) 129 | } 130 | 131 | location := resp.Header.Get("location") 132 | if location == "" { 133 | return nil, 0, fmt.Errorf("missing location header") 134 | } 135 | 136 | bookID, err := pathToID(location) 137 | if err != nil { 138 | Log(ctx).Warn("likely auth error", "err", err, "head", url, "redirect", location) 139 | return nil, 0, fmt.Errorf("invalid redirect, likely auth error: %w", err) 140 | } 141 | 142 | Log(ctx).Debug("getting book", "bookID", bookID) 143 | 144 | out, _, authorID, err := g.GetBook(ctx, bookID, loadEditions) 145 | return out, authorID, err 146 | } 147 | 148 | // GetBook fetches a book (edition) from GR. 149 | func (g *GRGetter) GetBook(ctx context.Context, bookID int64, loadEditions editionsCallback) (_ []byte, workID, authorID int64, _ error) { 150 | if workBytes, ttl, ok := g.cache.GetWithTTL(ctx, BookKey(bookID)); ok && ttl > 0 && loadEditions == nil { 151 | return workBytes, 0, 0, nil 152 | } 153 | 154 | resp, err := gr.GetBook(ctx, g.gql, bookID) 155 | if err != nil { 156 | return nil, 0, 0, fmt.Errorf("getting book: %w", err) 157 | } 158 | 159 | book := resp.GetBookByLegacyId 160 | 161 | genres := []string{} 162 | for _, g := range book.BookGenres { 163 | genres = append(genres, g.Genre.Name) 164 | } 165 | if len(genres) == 0 { 166 | genres = []string{"none"} 167 | } 168 | 169 | series := []seriesResource{} 170 | for _, s := range book.BookSeries { 171 | legacyID, _ := pathToID(s.Series.WebUrl) 172 | position, _ := pathToID(s.SeriesPlacement) 173 | series = append(series, seriesResource{ 174 | KCA: s.Series.Id, 175 | Title: s.Series.Title, 176 | ForeignID: legacyID, 177 | Description: "TODO", // Would need to scrape this. 178 | 179 | LinkItems: []seriesWorkLinkResource{{ 180 | PositionInSeries: s.SeriesPlacement, 181 | SeriesPosition: int(position), // TODO: What's the difference b/t placement? 182 | ForeignWorkID: book.Work.LegacyId, 183 | Primary: false, // TODO: How can we get this??? 184 | }}, 185 | }) 186 | } 187 | 188 | bookDescription := strings.TrimSpace(book.Description) 189 | if bookDescription == "" { 190 | bookDescription = "N/A" // Must be set? 191 | } 192 | 193 | bookRsc := bookResource{ 194 | KCA: resp.GetBookByLegacyId.Id, 195 | ForeignID: book.LegacyId, 196 | Asin: book.Details.Asin, 197 | Description: bookDescription, 198 | Isbn13: book.Details.Isbn13, 199 | Title: book.TitlePrimary, 200 | FullTitle: book.Title, 201 | ShortTitle: book.TitlePrimary, 202 | Language: iso639_3(book.Details.Language.Name), 203 | Format: book.Details.Format, 204 | EditionInformation: "", // TODO: Is this used anywhere? 205 | Publisher: book.Details.Publisher, // TODO: Ignore books without publishers? 206 | ImageURL: book.ImageUrl, 207 | IsEbook: book.Details.Format == "Kindle Edition", // TODO: Flush this out. 208 | NumPages: book.Details.NumPages, 209 | RatingCount: book.Stats.RatingsCount, 210 | RatingSum: book.Stats.RatingsSum, 211 | AverageRating: book.Stats.AverageRating, 212 | URL: book.WebUrl, 213 | // TODO: Omitting release date is a way to essentially force R to hide 214 | // the book from the frontend while allowing the user to still add it 215 | // via search. Better UX depending on what you're after. 216 | } 217 | 218 | if book.Details.PublicationTime != 0 { 219 | bookRsc.ReleaseDate = releaseDate(book.Details.PublicationTime) 220 | } 221 | 222 | author := book.PrimaryContributorEdge.Node 223 | authorDescription := strings.TrimSpace(author.Description) 224 | if authorDescription == "" { 225 | authorDescription = "N/A" // Must be set? 226 | } 227 | 228 | // Unlike bookDescription we can't request this with (stripped: true) 229 | authorDescription = html.UnescapeString(_stripTags.Sanitize(authorDescription)) 230 | 231 | authorRsc := AuthorResource{ 232 | KCA: author.Id, 233 | Name: author.Name, 234 | ForeignID: author.LegacyId, 235 | URL: author.WebUrl, 236 | ImageURL: author.ProfileImageUrl, 237 | Description: authorDescription, 238 | Series: series, 239 | } 240 | 241 | work := book.Work 242 | workRsc := workResource{ 243 | Title: work.BestBook.TitlePrimary, 244 | FullTitle: work.BestBook.Title, 245 | ShortTitle: work.BestBook.TitlePrimary, 246 | KCA: work.Id, 247 | ForeignID: work.LegacyId, 248 | URL: work.Details.WebUrl, 249 | Series: series, 250 | Genres: genres, 251 | RelatedWorks: []int{}, 252 | BestBookID: work.BestBook.LegacyId, 253 | } 254 | 255 | if work.Details.PublicationTime != 0 { 256 | workRsc.ReleaseDate = releaseDate(work.Details.PublicationTime) 257 | } else if bookRsc.ReleaseDate != "" { 258 | workRsc.ReleaseDate = bookRsc.ReleaseDate 259 | } 260 | 261 | bookRsc.Contributors = []contributorResource{{ForeignID: author.LegacyId, Role: "Author"}} 262 | authorRsc.Works = []workResource{workRsc} 263 | workRsc.Authors = []AuthorResource{authorRsc} 264 | workRsc.Books = []bookResource{bookRsc} // TODO: Add best book here as well? 265 | 266 | out, err := json.Marshal(workRsc) 267 | if err != nil { 268 | return nil, 0, 0, fmt.Errorf("marshaling work: %w", err) 269 | } 270 | 271 | // If a work isn't already cached with this ID, and this book is the "best" 272 | // edition, then write a cache entry using our edition as a starting point. 273 | // The controller will handle denormalizing this to the author. 274 | if _, ok := g.cache.Get(ctx, WorkKey(workRsc.ForeignID)); !ok && workRsc.BestBookID == bookID { 275 | g.cache.Set(ctx, WorkKey(workRsc.ForeignID), out, _workTTL) 276 | } 277 | 278 | // If this is the "best" edition for the work, load some additional 279 | // editions for it. 280 | if loadEditions != nil && workRsc.BestBookID == bookID { 281 | editions := map[editionDedupe]int64{} 282 | for _, e := range resp.GetBookByLegacyId.Work.Editions.Edges { 283 | key := editionDedupe{title: strings.ToUpper(e.Node.Title), language: iso639_3(e.Node.Details.Language.Name)} 284 | if _, ok := editions[key]; ok { 285 | continue // Already saw an edition similar to this one. 286 | } 287 | editions[key] = e.Node.LegacyId 288 | } 289 | loadEditions(slices.Collect(maps.Values(editions))...) 290 | } 291 | 292 | return out, workRsc.ForeignID, authorRsc.ForeignID, nil 293 | } 294 | 295 | // GetAuthor returns an author with all of their works and respective editions. 296 | // Due to the way R works, if a work isn't returned here it's not fetchable. 297 | // 298 | // On an initial load we return only one work on the author. The controller 299 | // handles asynchronously fetching all additional works. 300 | func (g *GRGetter) GetAuthor(ctx context.Context, authorID int64) ([]byte, error) { 301 | var authorKCA string 302 | 303 | authorBytes, ok := g.cache.Get(ctx, AuthorKey(authorID)) 304 | 305 | if ok { 306 | // Use our cached value to recover the new KCA. 307 | var author AuthorResource 308 | _ = json.Unmarshal(authorBytes, &author) 309 | authorKCA = author.KCA 310 | if authorKCA != "" { 311 | Log(ctx).Debug("found cached author", "authorKCA", authorKCA, "authorID", authorID) 312 | } 313 | } 314 | 315 | var err error 316 | if authorKCA == "" { 317 | Log(ctx).Debug("resolving author ID", "authorID", authorID) 318 | authorKCA, err = g.legacyAuthorIDtoKCA(ctx, authorID) 319 | if err != nil { 320 | return nil, err 321 | } 322 | } 323 | 324 | if authorKCA == "" { 325 | Log(ctx).Warn("unable to resolve author UID", "hit", ok) 326 | return nil, fmt.Errorf("unable to resolve author %d", authorID) 327 | } 328 | 329 | works, err := gr.GetAuthorWorks(ctx, g.gql, gr.GetWorksByContributorInput{ 330 | Id: authorKCA, 331 | }, gr.PaginationInput{Limit: 20}) 332 | if err != nil { 333 | Log(ctx).Warn("problem getting author works", "err", err, "author", authorID, "authorKCA", authorKCA) 334 | return nil, fmt.Errorf("author works: %w", err) 335 | } 336 | 337 | if len(works.GetWorksByContributor.Edges) == 0 { 338 | Log(ctx).Warn("no works found") 339 | return nil, fmt.Errorf("not found") 340 | // TODO: Return a 404 here instead? 341 | } 342 | 343 | // Load books until we find one with our author. 344 | for _, e := range works.GetWorksByContributor.Edges { 345 | id := e.Node.BestBook.LegacyId 346 | workBytes, _, _, err := g.GetBook(ctx, id, nil) 347 | if err != nil { 348 | Log(ctx).Warn("problem getting initial book for author", "err", err, "bookID", id, "authorID", authorID) 349 | continue 350 | } 351 | var w workResource 352 | err = json.Unmarshal(workBytes, &w) 353 | if err != nil { 354 | Log(ctx).Warn("problem unmarshaling work for author", "err", err, "bookID", id) 355 | _ = g.cache.Expire(ctx, BookKey(id)) 356 | continue 357 | } 358 | 359 | for _, a := range w.Authors { 360 | if a.ForeignID != authorID { 361 | continue 362 | } 363 | a.Works = []workResource{w} 364 | return json.Marshal(a) // Found it! 365 | } 366 | } 367 | 368 | return nil, errNotFound 369 | } 370 | 371 | // GetAuthorBooks enumerates all of the "best" editions for an author. This is 372 | // how we load large authors. 373 | func (g *GRGetter) GetAuthorBooks(ctx context.Context, authorID int64) iter.Seq[int64] { 374 | authorBytes, err := g.GetAuthor(ctx, authorID) 375 | if err != nil { 376 | Log(ctx).Warn("problem getting author for full load", "err", err) 377 | return func(yield func(int64) bool) {} // Empty iterator. 378 | } 379 | 380 | var author AuthorResource 381 | _ = json.Unmarshal(authorBytes, &author) 382 | 383 | return func(yield func(int64) bool) { 384 | after := "" 385 | for { 386 | works, err := gr.GetAuthorWorks(ctx, g.gql, gr.GetWorksByContributorInput{ 387 | Id: author.KCA, 388 | }, gr.PaginationInput{Limit: 20, After: after}) 389 | if err != nil { 390 | Log(ctx).Warn("problem getting author works", "err", err, "author", authorID, "authorKCA", author.KCA, "after", after) 391 | return 392 | } 393 | 394 | for _, w := range works.GetWorksByContributor.Edges { 395 | // Make sure it's actually our author and not a translator or something. 396 | if w.Node.BestBook.PrimaryContributorEdge.Node.LegacyId != authorID { 397 | continue // Wrong author. 398 | } 399 | if w.Node.BestBook.PrimaryContributorEdge.Role != "Author" { 400 | continue // Skip things they didn't author. 401 | } 402 | if !yield(w.Node.BestBook.LegacyId) { 403 | return 404 | } 405 | } 406 | 407 | if !works.GetWorksByContributor.PageInfo.HasNextPage { 408 | return 409 | } 410 | after = works.GetWorksByContributor.PageInfo.NextPageToken 411 | } 412 | } 413 | } 414 | 415 | // legacyAuthorIDtoKCA is the once place where we still need to hit upstream, 416 | // because (AFAICT) the GQL APIs don't expose a way to map a legacy author ID 417 | // to a modern kca://author ID. So we load the author's works and lookup a book 418 | // from that, and that includes the KCA we need. 419 | // 420 | // We keep the author cached for longer to spare ourselves this lookup on 421 | // refreshes. 422 | func (g *GRGetter) legacyAuthorIDtoKCA(ctx context.Context, authorID int64) (string, error) { 423 | // per_page=1 is important, for some reason the default list includes works 424 | // by other authors! 425 | url := fmt.Sprintf("/author/list/%d?per_page=1", authorID) 426 | req, err := http.NewRequestWithContext(ctx, "GET", url, nil) 427 | if err != nil { 428 | Log(ctx).Debug("problem creating request", "err", err) 429 | return "", err 430 | } 431 | 432 | resp, err := g.upstream.Do(req) 433 | if err != nil { 434 | return "", err 435 | } 436 | defer func() { _ = resp.Body.Close() }() 437 | 438 | // TODO: If we get a 404 for the author we should cache a gravestone. 439 | // Do that in the controller. 440 | 441 | doc, err := q.Parse(resp.Body) 442 | if err != nil { 443 | return "", fmt.Errorf("parsing response: %w", err) 444 | } 445 | 446 | bookID, err := scrapeBookID(doc) 447 | if err != nil { 448 | Log(ctx).Warn("problem getting book ID", "err", err) 449 | return "", err 450 | } 451 | 452 | workBytes, _, _, err := g.GetBook(ctx, bookID, nil) 453 | if err != nil { 454 | Log(ctx).Warn("problem getting book for author ID lookup", "err", err, "bookID", bookID) 455 | return "", err 456 | } 457 | 458 | var work workResource 459 | err = json.Unmarshal(workBytes, &work) 460 | if err != nil { 461 | Log(ctx).Warn("problem unmarshaling book", "bookID", bookID, "size", len(workBytes)) 462 | _ = g.cache.Expire(ctx, BookKey(bookID)) 463 | return "", errors.Join(errTryAgain, err) 464 | } 465 | 466 | Log(ctx).Debug( 467 | "resolved legacy author from work", 468 | "workID", work.ForeignID, 469 | "authors", len(work.Authors), 470 | "authorName", work.Authors[0].Name, 471 | "authorID", work.Authors[0].ForeignID, 472 | "authorKCA", work.Authors[0].KCA, 473 | "title", work.Title, 474 | ) 475 | 476 | return work.Authors[0].KCA, nil 477 | } 478 | 479 | // scrapeBookID expects `/author/list/{id}?per_page=1` as input. 480 | func scrapeBookID(doc *html.Node) (int64, error) { 481 | node, err := q.Query(doc, `//a[@class="bookTitle"]`) 482 | if err != nil { 483 | return 0, fmt.Errorf("problem scraping book ID: %w", err) 484 | } 485 | if node == nil { 486 | return 0, fmt.Errorf("no bookTitle link found") 487 | } 488 | 489 | path := q.SelectAttr(node, "href") 490 | return pathToID(path) 491 | } 492 | 493 | // releaseDate parses a G— float into a formatted time R— can work with. 494 | // 495 | // TODO: We might be able to omit the month/day and have R use just the year? 496 | func releaseDate(t float64) string { 497 | ts := time.UnixMilli(int64(t)).UTC() 498 | 499 | if ts.Before(time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)) { 500 | return "" 501 | } 502 | 503 | return ts.Format(time.DateTime) 504 | } 505 | 506 | // editionDedupe is how we avoid grabbing unnecessary editions. If we've 507 | // already seen an edition with the same title and language, then we don't need 508 | // any more for the same title and language. 509 | type editionDedupe struct { 510 | title string 511 | language string 512 | } 513 | -------------------------------------------------------------------------------- /internal/gr_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "os" 10 | "strings" 11 | "sync" 12 | "testing" 13 | "time" 14 | 15 | "github.com/Khan/genqlient/graphql" 16 | "github.com/blampe/rreading-glasses/gr" 17 | "github.com/blampe/rreading-glasses/hardcover" 18 | "github.com/stretchr/testify/assert" 19 | "github.com/stretchr/testify/require" 20 | "github.com/vektah/gqlparser/v2/gqlerror" 21 | "go.uber.org/mock/gomock" 22 | ) 23 | 24 | func TestGetAuthorIntegrity(t *testing.T) { 25 | // Try to repro null "books" on author.Works 26 | // 1. load book first, then author? 27 | // 2. load author first? 28 | } 29 | 30 | func TestGRGetBookDataIntegrity(t *testing.T) { 31 | // The client is particularly sensitive to null values. 32 | // For a given work resource, it MUST 33 | // - have non-null top-level books 34 | // - non-null ratingcount, averagerating 35 | // - have a contributor with a foreign id 36 | 37 | t.Parallel() 38 | 39 | ctx := context.Background() 40 | c := gomock.NewController(t) 41 | 42 | dupeEditionID := int64(123) 43 | 44 | upstream := hardcover.NewMocktransport(c) 45 | upstream.EXPECT().RoundTrip(gomock.Any()).DoAndReturn(func(r *http.Request) (*http.Response, error) { 46 | if r.Method == "HEAD" { 47 | resp := &http.Response{ 48 | StatusCode: http.StatusOK, 49 | Header: http.Header{}, 50 | } 51 | resp.Header.Add("location", "https://www.gr.com/book/show/6609765-out-of-my-mind") 52 | return resp, nil 53 | } 54 | if r.Method == "GET" { 55 | resp := &http.Response{ 56 | StatusCode: http.StatusOK, 57 | Header: http.Header{}, 58 | Body: io.NopCloser(strings.NewReader(``)), 59 | } 60 | return resp, nil 61 | } 62 | panic(r) 63 | }).AnyTimes() 64 | 65 | gql := hardcover.NewMockgql(c) 66 | gql.EXPECT().MakeRequest(gomock.Any(), 67 | gomock.AssignableToTypeOf(&graphql.Request{}), 68 | gomock.AssignableToTypeOf(&graphql.Response{})).DoAndReturn( 69 | func(ctx context.Context, req *graphql.Request, res *graphql.Response) error { 70 | if req.OpName == "GetBook" { 71 | if id := req.Variables.(interface{ GetLegacyId() int64 }).GetLegacyId(); id != 6609765 { 72 | panic(id) 73 | } 74 | gbr, ok := res.Data.(*gr.GetBookResponse) 75 | if !ok { 76 | panic(gbr) 77 | } 78 | gbr.GetBookByLegacyId = gr.GetBookGetBookByLegacyIdBook{ 79 | Id: "kca://book/amzn1.gr.book.v1.WY3sni8ilbLc2WGHV0N3SQ", 80 | LegacyId: 6609765, 81 | Description: "Melody is not like most people. She cannot walk or talk, but she has a photographic memory; she can remember every detail of everything she has ever experienced. She is smarter than most of the adults who try to diagnose her and smarter than her classmates in her integrated classroom - the very same classmates who dismiss her as mentally challenged because she cannot tell them otherwise. But Melody refuses to be defined by cerebral palsy. And she's determined to let everyone know it - somehow.", 82 | BookGenres: []gr.GetBookGetBookByLegacyIdBookBookGenresBookGenre{ 83 | {Genre: gr.GetBookGetBookByLegacyIdBookBookGenresBookGenreGenre{Name: "Young Adult"}}, 84 | }, 85 | BookSeries: []gr.GetBookGetBookByLegacyIdBookBookSeries{ 86 | { 87 | SeriesPlacement: "1", 88 | Series: gr.GetBookGetBookByLegacyIdBookBookSeriesSeries{ 89 | Id: "kca://series/amzn1.gr.series.v3.owomqLJFO4sueLJt", 90 | Title: "Out of My Mind", 91 | WebUrl: "https://www.gr.com/series/326523-out-of-my-mind", 92 | }, 93 | }, 94 | }, 95 | Details: gr.GetBookGetBookByLegacyIdBookDetails{ 96 | Asin: "141697170X", 97 | Isbn13: "9781416971702", 98 | Format: "Hardcover", 99 | NumPages: 295, 100 | Language: gr.GetBookGetBookByLegacyIdBookDetailsLanguage{ 101 | Name: "English", 102 | }, 103 | OfficialUrl: "", 104 | Publisher: "Atheneum Books for Young Readers", 105 | PublicationTime: 1268121600000, 106 | }, 107 | ImageUrl: "https://images-na.ssl-images-amazon.com/images/S/compressed.photo.gr.com/books/1347602096i/6609765.jpg", 108 | PrimaryContributorEdge: gr.GetBookGetBookByLegacyIdBookPrimaryContributorEdgeBookContributorEdge{ 109 | Node: gr.GetBookGetBookByLegacyIdBookPrimaryContributorEdgeBookContributorEdgeNodeContributor{ 110 | Id: "kca://author/amzn1.gr.author.v1.tnLKwFVJefdFsJ6d34fT6Q", 111 | Name: "Sharon M. Draper", 112 | 113 | LegacyId: 51942, 114 | WebUrl: "https://www.gr.com/author/show/51942.Sharon_M_Draper", 115 | ProfileImageUrl: "https://i.gr-assets.com/images/S/compressed.photo.gr.com/authors/1236906847i/51942._UX200_CR0,49,200,200_.jpg", 116 | Description: "Sharon M. Draper is a professional educator as well as an accomplished writer. She has been honored as the National Teacher of the Year, is a five-time winner of the Coretta Scott King Literary Award, and is a New York Times bestselling author. She lives in Cincinnati, Ohio.", 117 | }, 118 | }, 119 | Stats: gr.GetBookGetBookByLegacyIdBookStatsBookOrWorkStats{ 120 | AverageRating: 4.35, 121 | RatingsCount: 156543, 122 | RatingsSum: 680605, 123 | }, 124 | TitlePrimary: "Out of My Mind", 125 | WebUrl: "https://www.gr.com/book/show/6609765-out-of-my-mind", 126 | Work: gr.GetBookGetBookByLegacyIdBookWork{ 127 | Id: "kca://work/amzn1.gr.work.v1.DaUnQI3cWL066Bo8_EL8-A", 128 | LegacyId: 6803732, 129 | Details: gr.GetBookGetBookByLegacyIdBookWorkDetails{ 130 | WebUrl: "https://www.gr.com/work/6803732-out-of-my-mind", 131 | PublicationTime: 1268121600000, 132 | }, 133 | BestBook: gr.GetBookGetBookByLegacyIdBookWorkBestBook{ 134 | LegacyId: 6609765, 135 | }, 136 | Editions: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnection{ 137 | Edges: []gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdge{ 138 | { 139 | Node: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdgeNodeBook{ 140 | LegacyId: 6609765, 141 | Title: "Out of My Mind", 142 | Details: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdgeNodeBookDetails{ 143 | Language: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdgeNodeBookDetailsLanguage{ 144 | Name: "english", 145 | }, 146 | }, 147 | }, 148 | }, 149 | { 150 | Node: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdgeNodeBook{ 151 | LegacyId: dupeEditionID, // Should be ignored since this is a dupe. 152 | Title: "OUT OF MY MIND", 153 | Details: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdgeNodeBookDetails{ 154 | Language: gr.GetBookGetBookByLegacyIdBookWorkEditionsBooksConnectionEdgesBooksEdgeNodeBookDetailsLanguage{ 155 | Name: "english", 156 | }, 157 | }, 158 | }, 159 | }, 160 | }, 161 | }, 162 | }, 163 | } 164 | return nil 165 | 166 | } 167 | if req.OpName == "GetAuthorWorks" { 168 | gaw, ok := res.Data.(*gr.GetAuthorWorksResponse) 169 | if !ok { 170 | panic(gaw) 171 | } 172 | gaw.GetWorksByContributor = gr.GetAuthorWorksGetWorksByContributorContributorWorksConnection{ 173 | Edges: []gr.GetAuthorWorksGetWorksByContributorContributorWorksConnectionEdgesContributorWorksEdge{{ 174 | Node: gr.GetAuthorWorksGetWorksByContributorContributorWorksConnectionEdgesContributorWorksEdgeNodeWork{ 175 | Id: "kca://work/amzn1.gr.work.v1.DaUnQI3cWL066Bo8_EL8-A", 176 | BestBook: gr.GetAuthorWorksGetWorksByContributorContributorWorksConnectionEdgesContributorWorksEdgeNodeWorkBestBook{ 177 | LegacyId: 6609765, 178 | PrimaryContributorEdge: gr.GetAuthorWorksGetWorksByContributorContributorWorksConnectionEdgesContributorWorksEdgeNodeWorkBestBookPrimaryContributorEdgeBookContributorEdge{ 179 | Role: "Author", 180 | Node: gr.GetAuthorWorksGetWorksByContributorContributorWorksConnectionEdgesContributorWorksEdgeNodeWorkBestBookPrimaryContributorEdgeBookContributorEdgeNodeContributor{ 181 | LegacyId: 51942, 182 | }, 183 | }, 184 | SecondaryContributorEdges: []gr.GetAuthorWorksGetWorksByContributorContributorWorksConnectionEdgesContributorWorksEdgeNodeWorkBestBookSecondaryContributorEdgesBookContributorEdge{}, 185 | }, 186 | }, 187 | }}, 188 | } 189 | } 190 | return nil 191 | }).AnyTimes() 192 | 193 | cache := &LayeredCache{wrapped: []cache[[]byte]{newMemoryCache()}} 194 | getter, err := NewGRGetter(cache, gql, &http.Client{Transport: upstream}) 195 | require.NoError(t, err) 196 | 197 | ctrl, err := NewController(cache, getter) 198 | require.NoError(t, err) 199 | 200 | go ctrl.Run(t.Context(), 0) 201 | t.Cleanup(func() { ctrl.Shutdown(t.Context()) }) 202 | 203 | t.Run("GetBook", func(t *testing.T) { 204 | bookBytes, err := ctrl.GetBook(ctx, 6609765) 205 | assert.NoError(t, err) 206 | 207 | var work workResource 208 | require.NoError(t, json.Unmarshal(bookBytes, &work)) 209 | 210 | assert.Equal(t, int64(6803732), work.ForeignID) 211 | require.Len(t, work.Authors, 1) 212 | require.Len(t, work.Authors[0].Works, 1) 213 | assert.Equal(t, int64(51942), work.Authors[0].ForeignID) 214 | 215 | require.Len(t, work.Books, 1) 216 | assert.Equal(t, int64(6609765), work.Books[0].ForeignID) 217 | }) 218 | 219 | t.Run("GetAuthor", func(t *testing.T) { 220 | authorBytes, err := ctrl.GetAuthor(ctx, 51942) 221 | assert.NoError(t, err) 222 | 223 | // author -> .Works.Authors.Works must not be null, but books can be 224 | 225 | var author AuthorResource 226 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 227 | 228 | assert.Equal(t, int64(51942), author.ForeignID) 229 | require.Len(t, author.Works, 1) 230 | require.Len(t, author.Works[0].Authors, 1) 231 | require.Len(t, author.Works[0].Books, 1) 232 | }) 233 | 234 | t.Run("GetWork", func(t *testing.T) { 235 | // Make sure our cache is empty so we actually exercise the work refresh. 236 | require.NoError(t, ctrl.cache.Expire(t.Context(), WorkKey(6803732))) 237 | require.NoError(t, ctrl.cache.Expire(t.Context(), BookKey(6609765))) 238 | 239 | workBytes, err := ctrl.GetWork(ctx, 6803732) 240 | assert.NoError(t, err) 241 | 242 | var work workResource 243 | require.NoError(t, json.Unmarshal(workBytes, &work)) 244 | 245 | require.Len(t, work.Authors, 1) 246 | assert.Equal(t, int64(51942), work.Authors[0].ForeignID) 247 | require.Len(t, work.Authors[0].Works, 1) 248 | 249 | require.Len(t, work.Books, 1) 250 | assert.Equal(t, int64(6609765), work.Books[0].ForeignID) 251 | }) 252 | } 253 | 254 | func TestReleaseDate(t *testing.T) { 255 | tests := []struct { 256 | given float64 257 | want string 258 | }{ 259 | { 260 | given: 715935600000, 261 | want: "1992-09-08 07:00:00", 262 | }, 263 | { 264 | // C#'s DateTime.Parse doesn't handle years before 1AD, so we omit 265 | // them. 266 | given: -73212048000000, 267 | want: "", 268 | }, 269 | { 270 | given: -62135596700000, 271 | want: "0001-01-01 00:01:40", 272 | }, 273 | { 274 | given: -62135596800000, 275 | want: "0001-01-01 00:00:00", 276 | }, 277 | { 278 | given: -62135596900000, 279 | want: "", 280 | }, 281 | } 282 | 283 | for _, tt := range tests { 284 | t.Run(fmt.Sprint(tt.given), func(t *testing.T) { 285 | got := releaseDate(tt.given) 286 | assert.Equal(t, tt.want, got) 287 | }) 288 | } 289 | } 290 | 291 | func TestBatchError(t *testing.T) { 292 | // If one of our results returns a 404, the other results should still succeed. 293 | 294 | host := os.Getenv("GR_HOST") 295 | if host == "" { 296 | t.Skip("missing GR_HOST env var") 297 | return 298 | } 299 | 300 | upstream, err := NewUpstream(host, "", "") 301 | require.NoError(t, err) 302 | 303 | gql, err := NewGRGQL(t.Context(), upstream, "") 304 | require.NoError(t, err) 305 | 306 | var err1, err2 error 307 | 308 | wg := sync.WaitGroup{} 309 | wg.Add(1) 310 | go func() { 311 | defer wg.Done() 312 | _, err1 = gr.GetAuthorWorks(t.Context(), gql, gr.GetWorksByContributorInput{ 313 | Id: "kca://author/amzn1.gr.author.v1.lDq44Mxx0gBfWyqfZwEI1Q", 314 | }, gr.PaginationInput{Limit: 1}) 315 | }() 316 | 317 | wg.Add(1) 318 | go func() { 319 | defer wg.Done() 320 | _, err2 = gr.GetAuthorWorks(t.Context(), gql, gr.GetWorksByContributorInput{ 321 | Id: "kca://author", 322 | }, gr.PaginationInput{Limit: 1}) 323 | }() 324 | 325 | wg.Wait() 326 | 327 | assert.NoError(t, err1) 328 | 329 | gqlErr := &gqlerror.Error{} 330 | assert.ErrorAs(t, err2, &gqlErr) 331 | } 332 | 333 | func TestAuth(t *testing.T) { 334 | t.Parallel() 335 | 336 | // Sanity check that we're authorized for all relevant endpoints. 337 | host := os.Getenv("GR_HOST") 338 | if host == "" { 339 | t.Skip("missing GR_HOST env var") 340 | return 341 | } 342 | 343 | cookie := os.Getenv("GR_TEST_COOKIE") 344 | if cookie == "" { 345 | t.Skip("missing GR_TEST_COOKIE") 346 | return 347 | } 348 | 349 | cache := &LayeredCache{wrapped: []cache[[]byte]{newMemoryCache()}} 350 | 351 | upstream, err := NewUpstream(host, cookie, "") 352 | require.NoError(t, err) 353 | 354 | gql, err := NewGRGQL(t.Context(), upstream, cookie) 355 | require.NoError(t, err) 356 | 357 | getter, err := NewGRGetter(cache, gql, upstream) 358 | require.NoError(t, err) 359 | ctrl, err := NewController(cache, getter) 360 | go ctrl.Run(t.Context(), time.Second) 361 | 362 | require.NoError(t, err) 363 | 364 | t.Run("GetAuthor", func(t *testing.T) { 365 | t.Parallel() 366 | _, err := ctrl.GetAuthor(t.Context(), 4178) 367 | assert.NoError(t, err) 368 | }) 369 | 370 | t.Run("GetBook", func(t *testing.T) { 371 | t.Parallel() 372 | _, err := ctrl.GetBook(t.Context(), 394535) 373 | assert.NoError(t, err) 374 | }) 375 | 376 | t.Run("GetWork", func(t *testing.T) { 377 | t.Parallel() 378 | _, err := ctrl.GetWork(t.Context(), 1930437) 379 | assert.NoError(t, err) 380 | }) 381 | 382 | t.Run("GetAuthorBooks", func(t *testing.T) { 383 | t.Parallel() 384 | iter := getter.GetAuthorBooks(t.Context(), 4178) 385 | gotBook := false 386 | for range iter { 387 | gotBook = true 388 | break 389 | } 390 | assert.True(t, gotBook) 391 | }) 392 | } 393 | -------------------------------------------------------------------------------- /internal/graphql.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | "strings" 10 | "sync" 11 | "time" 12 | 13 | "github.com/Khan/genqlient/graphql" 14 | "github.com/graphql-go/graphql/language/ast" 15 | "github.com/graphql-go/graphql/language/parser" 16 | "github.com/graphql-go/graphql/language/printer" 17 | "github.com/graphql-go/graphql/language/source" 18 | "github.com/graphql-go/graphql/language/visitor" 19 | "golang.org/x/exp/rand" 20 | ) 21 | 22 | // batchedgqlclient accumulates queries and executes them in batch in order to 23 | // make better use of RPS limits. 24 | type batchedgqlclient struct { 25 | mu sync.Mutex 26 | 27 | subscriptions map[string]*subscription 28 | qb *queryBuilder 29 | 30 | wrapped graphql.Client 31 | } 32 | 33 | // NewBatchedGraphQLClient creates a batching GraphQL client. Queries are 34 | // accumulated and executed regularly accurding to the given rate. 35 | func NewBatchedGraphQLClient(url string, client *http.Client, rate time.Duration) (graphql.Client, error) { 36 | wrapped := graphql.NewClient(url, client) 37 | 38 | c := &batchedgqlclient{ 39 | qb: newQueryBuilder(), 40 | subscriptions: map[string]*subscription{}, 41 | wrapped: wrapped, 42 | } 43 | 44 | go func() { 45 | for { 46 | time.Sleep(rate) 47 | c.flush(context.Background()) 48 | } 49 | }() 50 | return c, nil 51 | } 52 | 53 | // flush executes the aggregated queries and returns responses to listeners. 54 | func (c *batchedgqlclient) flush(ctx context.Context) { 55 | c.mu.Lock() 56 | defer c.mu.Unlock() 57 | 58 | if c.qb.op == nil || c.qb.fields == 0 { 59 | return // Nothing to do yet. 60 | } 61 | 62 | query, vars, err := c.qb.build() 63 | if err != nil { 64 | Log(ctx).Error("unable to build query", "err", err) 65 | return 66 | } 67 | 68 | data := map[string]any{} 69 | req := &graphql.Request{ 70 | Query: query, 71 | Variables: vars, 72 | OpName: c.qb.op.Name.Value, 73 | } 74 | resp := &graphql.Response{ 75 | Data: &data, 76 | } 77 | 78 | // Hold on to our subscribers before we reset the batcher. 79 | subscriptions := c.subscriptions 80 | 81 | // Issue the request in a separate goroutine so we can continue to 82 | // accumulate queries without needing to wait for the network call. 83 | go func(qb *queryBuilder) { 84 | ctx, cancel := context.WithTimeout(ctx, 60*time.Second) 85 | defer cancel() 86 | 87 | err := c.wrapped.MakeRequest(ctx, req, resp) 88 | 89 | // Extract any field-level errors, and return them to their 90 | // subscribers. We can ignore the top-level err in this case, because 91 | // it's just the wrapped version of our response errors. 92 | if resp != nil && len(resp.Errors) > 0 { 93 | for _, e := range resp.Errors { 94 | sub, ok := subscriptions[e.Path.String()] 95 | if !ok { 96 | continue 97 | } 98 | sub.respC <- gqlStatusErr(e) 99 | // Remove our subscriber because we already responded. 100 | delete(subscriptions, e.Path.String()) 101 | } 102 | } else if err != nil { 103 | // For everything else return the status code to all our subscribers. 104 | Log(ctx).Warn("batched query error", "count", qb.fields, "err", err, "resp.Errors", resp.Errors) 105 | for _, sub := range subscriptions { 106 | sub.respC <- gqlStatusErr(err) 107 | } 108 | return 109 | } 110 | 111 | for id, sub := range subscriptions { 112 | // TODO: missing response. 113 | byt, err := json.Marshal(map[string]any{ 114 | sub.field: data[id], 115 | }) 116 | if err != nil { 117 | sub.respC <- err 118 | continue 119 | } 120 | 121 | sub.respC <- json.Unmarshal(byt, &sub.resp.Data) 122 | } 123 | }(c.qb) 124 | 125 | c.qb = newQueryBuilder() 126 | c.subscriptions = map[string]*subscription{} 127 | } 128 | 129 | // MakeRequest implements graphql.Client. 130 | func (c *batchedgqlclient) MakeRequest( 131 | ctx context.Context, 132 | req *graphql.Request, 133 | resp *graphql.Response, 134 | ) error { 135 | err := <-c.enqueue(ctx, req, resp).respC 136 | return err 137 | } 138 | 139 | // enqueue adds a query to the batch and returns a subscription whose result 140 | // channel resolves when the batch is executed. 141 | func (c *batchedgqlclient) enqueue( 142 | ctx context.Context, 143 | req *graphql.Request, 144 | resp *graphql.Response, 145 | ) *subscription { 146 | c.mu.Lock() 147 | defer c.mu.Unlock() 148 | 149 | respC := make(chan error, 1) 150 | 151 | sub := &subscription{ 152 | ctx: ctx, 153 | resp: resp, 154 | respC: respC, 155 | } 156 | 157 | var vars map[string]any 158 | out, _ := json.Marshal(req.Variables) 159 | _ = json.Unmarshal(out, &vars) 160 | 161 | id, field, err := c.qb.add(req.Query, vars) 162 | if err != nil { 163 | respC <- err 164 | } 165 | 166 | c.subscriptions[id] = &subscription{ 167 | ctx: ctx, 168 | resp: resp, 169 | respC: respC, 170 | field: field, 171 | } 172 | 173 | return sub 174 | } 175 | 176 | // subscription holds information about a caller who is waiting for a query to 177 | // be resolved as part of a batch. 178 | type subscription struct { 179 | ctx context.Context 180 | resp *graphql.Response 181 | respC chan error 182 | field string 183 | } 184 | 185 | // gqlStatusErr translates errors into meaningful status codes. The client 186 | // normally returns error responses with a 200 OK status code and a populated 187 | // "Errors" field containing stringed errors. We want to instead surface e.g. 188 | // 404 errors directly. 189 | // 190 | // The error is returned unchanged if it doesn't include a status code. 191 | func gqlStatusErr(err error) error { 192 | errStr := err.Error() 193 | idx := strings.Index(errStr, "Request failed with status code") 194 | if idx == -1 { 195 | return err 196 | } 197 | code, _ := pathToID(errStr[idx:]) 198 | return errors.Join(err, statusErr(code)) 199 | } 200 | 201 | // queryBuilder accumulates queries into one query with multiple fields so they 202 | // can all be executed as part of one request. 203 | type queryBuilder struct { 204 | op *ast.OperationDefinition 205 | fields int 206 | vars map[string]any 207 | } 208 | 209 | // newQueryBuilder initializes a new QueryBuilder with an empty Document. 210 | func newQueryBuilder() *queryBuilder { 211 | return &queryBuilder{ 212 | vars: make(map[string]any), 213 | } 214 | } 215 | 216 | var runes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") 217 | 218 | // randRunes returns a short random string of length n. 219 | func randRunes(n int) string { 220 | b := make([]rune, n) 221 | for i := range b { 222 | b[i] = runes[rand.Intn(len(runes))] 223 | } 224 | return string(b) 225 | } 226 | 227 | // add extends the current query with a new field. The field's alias and name 228 | // are returned so they can be recovered later. 229 | func (qb *queryBuilder) add(query string, vars map[string]any) (id string, field string, err error) { 230 | src := source.NewSource(&source.Source{ 231 | Body: []byte(query), 232 | }) 233 | 234 | parsedDoc, err := parser.Parse(parser.ParseParams{Source: src}) 235 | if err != nil { 236 | return "", "", fmt.Errorf("failed to parse query: %w", err) 237 | } 238 | 239 | id = randRunes(8) 240 | 241 | varRename := make(map[string]string) 242 | 243 | // TODO: Only handle one def 244 | for _, def := range parsedDoc.Definitions { 245 | opDef, ok := def.(*ast.OperationDefinition) 246 | if !ok { 247 | continue 248 | } 249 | 250 | if qb.op == nil { 251 | qb.op = opDef 252 | } 253 | 254 | // Visit the AST to rename vars and alias fields 255 | opts := visitor.VisitInParallel(&visitor.VisitorOptions{ 256 | Enter: func(p visitor.VisitFuncParams) (string, interface{}) { 257 | switch node := p.Node.(type) { 258 | case *ast.VariableDefinition: 259 | oldName := node.Variable.Name.Value 260 | newName := id + "_" + oldName 261 | varRename[oldName] = newName 262 | node.Variable.Name.Value = newName 263 | qb.vars[newName] = vars[oldName] 264 | case *ast.Variable: 265 | if newName, ok := varRename[node.Name.Value]; ok { 266 | node.Name.Value = newName 267 | } 268 | case *ast.Field: 269 | if len(p.Ancestors) == 3 { 270 | field = node.Name.Value 271 | node.Alias = &ast.Name{Value: id, Kind: "Name"} 272 | } 273 | } 274 | return visitor.ActionNoChange, nil 275 | }, 276 | }) 277 | visitor.Visit(opDef, opts, nil) 278 | 279 | qb.fields++ 280 | 281 | if qb.op == opDef { 282 | continue 283 | } 284 | 285 | qb.op.SelectionSet.Selections = append(qb.op.SelectionSet.Selections, opDef.SelectionSet.Selections...) 286 | qb.op.VariableDefinitions = append(qb.op.VariableDefinitions, opDef.VariableDefinitions...) 287 | } 288 | 289 | return id, field, nil 290 | } 291 | 292 | // Build returns the merged query string and variables map. 293 | func (qb *queryBuilder) build() (string, map[string]any, error) { 294 | queryStr := printer.Print(qb.op) 295 | return fmt.Sprint(queryStr), qb.vars, nil 296 | } 297 | -------------------------------------------------------------------------------- /internal/graphql_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/blampe/rreading-glasses/gr" 13 | "github.com/blampe/rreading-glasses/hardcover" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/require" 16 | "github.com/vektah/gqlparser/v2/gqlerror" 17 | ) 18 | 19 | func TestQueryBuilderMultipleQueries(t *testing.T) { 20 | qb := newQueryBuilder() 21 | 22 | query1 := hardcover.GetBook_Operation 23 | vars1 := map[string]interface{}{"grBookIDs": []string{"1"}} 24 | 25 | query2 := hardcover.GetAuthorEditions_Operation 26 | vars2 := map[string]any{ 27 | "id": 1, 28 | "limit": 2, 29 | "offset": 3, 30 | } 31 | 32 | id1, _, err := qb.add(query1, vars1) 33 | require.NoError(t, err) 34 | 35 | id2, _, err := qb.add(query2, vars2) 36 | require.NoError(t, err) 37 | 38 | query, vars, err := qb.build() 39 | require.NoError(t, err) 40 | 41 | expected := fmt.Sprintf(`query GetBook($%s_grBookID: String!, $%s_id: Int!, $%s_limit: Int!, $%s_offset: Int!) { 42 | %s: book_mappings(limit: 1, where: {platform_id: {_eq: 1}, external_id: {_eq: $%s_grBookID}}) { 43 | external_id 44 | edition { 45 | id 46 | title 47 | subtitle 48 | asin 49 | isbn_13 50 | edition_format 51 | pages 52 | audio_seconds 53 | language { 54 | language 55 | } 56 | publisher { 57 | name 58 | } 59 | release_date 60 | description 61 | identifiers 62 | book_id 63 | } 64 | book { 65 | id 66 | title 67 | subtitle 68 | description 69 | release_date 70 | cached_tags(path: "$.Genre") 71 | cached_image(path: "url") 72 | contributions { 73 | contributable_type 74 | contribution 75 | author { 76 | id 77 | name 78 | slug 79 | bio 80 | cached_image(path: "url") 81 | } 82 | } 83 | slug 84 | book_series { 85 | position 86 | series { 87 | id 88 | name 89 | description 90 | identifiers 91 | } 92 | } 93 | book_mappings { 94 | dto_external 95 | } 96 | rating 97 | ratings_count 98 | } 99 | } 100 | %s: authors(limit: 1, where: {id: {_eq: $%s_id}}) { 101 | location 102 | id 103 | slug 104 | contributions(limit: $%s_limit, offset: $%s_offset, order_by: {id: asc}, where: {contributable_type: {_eq: "Book"}}) { 105 | book { 106 | id 107 | title 108 | ratings_count 109 | book_mappings(limit: 1, where: {platform_id: {_eq: 1}}) { 110 | book_id 111 | edition_id 112 | external_id 113 | } 114 | } 115 | } 116 | identifiers(path: "goodreads[0]") 117 | } 118 | }`, id1, id2, id2, id2, id1, id1, id2, id2, id2, id2) 119 | 120 | assert.Equal(t, expected, query) 121 | 122 | assert.Len(t, vars, 4) 123 | assert.Contains(t, vars, id1+"_grBookID", id2+"_id", id2+"_limit", id2+"_offset") 124 | } 125 | 126 | func TestQueryBuilderSingleQuery(t *testing.T) { 127 | qb := newQueryBuilder() 128 | 129 | query := gr.GetAuthorWorks_Operation 130 | vars := map[string]interface{}{"grBookIDs": []string{"1"}} 131 | 132 | _, _, err := qb.add(query, vars) 133 | require.NoError(t, err) 134 | 135 | _, _, err = qb.build() 136 | require.NoError(t, err) 137 | } 138 | 139 | func TestBatching(t *testing.T) { 140 | apiKey := os.Getenv("HARDCOVER_API_KEY") 141 | if apiKey == "" { 142 | t.Skip("missing HARDCOVER_API_KEY") 143 | return 144 | } 145 | transport := &HeaderTransport{ 146 | Key: "Authorization", 147 | Value: "Bearer " + apiKey, 148 | RoundTripper: http.DefaultTransport, 149 | } 150 | 151 | client := &http.Client{Transport: transport} 152 | 153 | url := "https://api.hardcover.app/v1/graphql" 154 | 155 | gql, err := NewBatchedGraphQLClient(url, client, time.Second) 156 | require.NoError(t, err) 157 | 158 | start := time.Now() 159 | 160 | wg := sync.WaitGroup{} 161 | wg.Add(1) 162 | go func() { 163 | defer wg.Done() 164 | _, err := hardcover.GetBook(context.Background(), gql, "0156028352") 165 | if err != nil { 166 | panic(err) 167 | } 168 | }() 169 | 170 | wg.Add(1) 171 | go func() { 172 | defer wg.Done() 173 | _, err := hardcover.GetBook(context.Background(), gql, "0164005178") 174 | if err != nil { 175 | panic(err) 176 | } 177 | }() 178 | 179 | wg.Add(1) 180 | go func() { 181 | defer wg.Done() 182 | _, err := hardcover.GetBook(context.Background(), gql, "0340640138") 183 | if err != nil { 184 | panic(err) 185 | } 186 | }() 187 | 188 | wg.Add(1) 189 | go func() { 190 | defer wg.Done() 191 | _, err := hardcover.GetBook(context.Background(), gql, "missing") 192 | if err != nil { 193 | panic(err) 194 | } 195 | }() 196 | 197 | wg.Wait() 198 | 199 | assert.Less(t, time.Since(start), 4*time.Second) 200 | } 201 | 202 | func TestGQLStatusCode(t *testing.T) { 203 | err := &gqlerror.Error{Message: "womp"} 204 | assert.ErrorIs(t, err, gqlStatusErr(err)) 205 | 206 | err = &gqlerror.Error{Message: "Request failed with status code 403"} 207 | err403 := statusErr(403) 208 | assert.ErrorAs(t, gqlStatusErr(err), &err403) 209 | } 210 | -------------------------------------------------------------------------------- /internal/handler.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "cmp" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "net/http" 10 | "net/http/pprof" 11 | "net/url" 12 | "path" 13 | "regexp" 14 | "slices" 15 | "strconv" 16 | "sync" 17 | "time" 18 | ) 19 | 20 | // Handler is our HTTP Handler. It handles muxing, response headers, etc. and 21 | // offloads work to the controller. 22 | type Handler struct { 23 | ctrl *Controller 24 | http *http.Client 25 | } 26 | 27 | var _searchTTL = 24 * time.Hour 28 | 29 | // NewHandler creates a new handler. 30 | func NewHandler(ctrl *Controller) *Handler { 31 | h := &Handler{ 32 | ctrl: ctrl, 33 | http: &http.Client{}, 34 | } 35 | return h 36 | } 37 | 38 | // NewMux registers a handler's routes on a new mux. 39 | func NewMux(h *Handler) http.Handler { 40 | mux := http.NewServeMux() 41 | 42 | mux.HandleFunc("/work/{foreignID}", h.getWorkID) 43 | mux.HandleFunc("/book/{foreignEditionID}", h.getBookID) 44 | mux.HandleFunc("/book/bulk", h.bulkBook) 45 | mux.HandleFunc("/author/{foreignAuthorID}", h.getAuthorID) 46 | mux.HandleFunc("/author/changed", h.getAuthorChanged) 47 | 48 | mux.HandleFunc("/debug/pprof/", pprof.Index) 49 | mux.HandleFunc("/debug/pprof/profile/", pprof.Profile) 50 | mux.HandleFunc("/debug/pprof/symbol/", pprof.Symbol) 51 | mux.HandleFunc("/debug/pprof/trace/", pprof.Trace) 52 | 53 | // Default handler returns 404. 54 | mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 55 | http.NotFound(w, r) 56 | }) 57 | 58 | return mux 59 | } 60 | 61 | // TODO: The client retries on TooManyRequests, but will respect the 62 | // Retry-After (seconds) header. We should account for thundering herds. 63 | 64 | // bulkBook is sent as a POST request which isn't cachable. We immediately 65 | // redirect to GET with query params so it can be cached. 66 | // 67 | // The provided IDs are expected to be book (edition) IDs as returned by 68 | // auto_complete. 69 | func (h *Handler) bulkBook(w http.ResponseWriter, r *http.Request) { 70 | ctx := r.Context() 71 | 72 | var ids []int64 73 | 74 | // If this is a POST, redirect to a GET with query params so the result can 75 | // be cached. 76 | if r.Method == http.MethodPost { 77 | err := json.NewDecoder(r.Body).Decode(&ids) 78 | if err != nil { 79 | h.error(w, errors.Join(err, errBadRequest)) 80 | return 81 | } 82 | if len(ids) == 0 { 83 | h.error(w, errMissingIDs) 84 | return 85 | } 86 | 87 | query := url.Values{} 88 | url := url.URL{Path: r.URL.Path} 89 | for _, id := range ids { 90 | query.Add("id", fmt.Sprint(id)) 91 | } 92 | 93 | url.RawQuery = query.Encode() 94 | 95 | Log(ctx).Debug("redirecting", "url", url.String()) 96 | http.Redirect(w, r, url.String(), http.StatusSeeOther) 97 | return 98 | } 99 | if r.Method != http.MethodGet { 100 | http.NotFound(w, r) 101 | return 102 | } 103 | 104 | // Parse query params. 105 | for _, idStr := range r.URL.Query()["id"] { 106 | id, err := pathToID(idStr) 107 | if err != nil { 108 | h.error(w, err) 109 | return 110 | } 111 | ids = append(ids, id) 112 | } 113 | if len(ids) == 0 { 114 | h.error(w, errMissingIDs) 115 | return 116 | } 117 | 118 | result := bulkBookResource{ 119 | Works: []workResource{}, 120 | Series: []seriesResource{}, 121 | Authors: []AuthorResource{}, 122 | } 123 | 124 | mu := sync.Mutex{} 125 | wg := sync.WaitGroup{} 126 | 127 | for _, id := range ids { 128 | wg.Add(1) 129 | 130 | go func(foreignBookID int64) { 131 | defer wg.Done() 132 | 133 | b, err := h.ctrl.GetBook(ctx, foreignBookID) 134 | if err != nil { 135 | if !errors.Is(err, errNotFound) { 136 | Log(ctx).Warn("getting book", "err", err, "bookID", foreignBookID) 137 | } 138 | return // Ignore the error. 139 | } 140 | 141 | var workRsc workResource 142 | err = json.Unmarshal(b, &workRsc) 143 | if err != nil { 144 | return // Ignore the error. 145 | } 146 | 147 | mu.Lock() 148 | defer mu.Unlock() 149 | 150 | result.Works = append(result.Works, workRsc) 151 | result.Series = []seriesResource{} 152 | 153 | // Check if our result already includes this author. 154 | for _, a := range result.Authors { 155 | if a.ForeignID == workRsc.Authors[0].ForeignID { 156 | return // Nothing more to do. 157 | } 158 | } 159 | 160 | result.Authors = append(result.Authors, workRsc.Authors...) 161 | }(id) 162 | } 163 | 164 | wg.Wait() 165 | 166 | // Collect and de-dupe series -- is this even needed? 167 | seenSeries := map[int64]bool{} 168 | for _, a := range result.Authors { 169 | for _, s := range a.Series { 170 | if _, seen := seenSeries[s.ForeignID]; seen { 171 | continue 172 | } 173 | seenSeries[s.ForeignID] = true 174 | result.Series = append(result.Series, s) 175 | } 176 | } 177 | 178 | // Sort works by rating count. 179 | slices.SortFunc(result.Works, func(left, right workResource) int { 180 | return -cmp.Compare[int64](left.Books[0].RatingCount, right.Books[0].RatingCount) 181 | }) 182 | 183 | cacheFor(w, _searchTTL, true) 184 | _ = json.NewEncoder(w).Encode(result) 185 | } 186 | 187 | // getWorkID handles /work/{id} 188 | // 189 | // Upstream is /work/{workID} which redirects to /book/show/{bestBookID}. 190 | func (h *Handler) getWorkID(w http.ResponseWriter, r *http.Request) { 191 | ctx := r.Context() 192 | 193 | workID, err := pathToID(r.URL.Path) 194 | if err != nil { 195 | h.error(w, err) 196 | return 197 | } 198 | 199 | if r.Method == "DELETE" { 200 | _ = h.ctrl.cache.Expire(r.Context(), WorkKey(workID)) 201 | w.WriteHeader(http.StatusOK) 202 | return 203 | } 204 | 205 | out, err := h.ctrl.GetWork(ctx, workID) 206 | if err != nil { 207 | h.error(w, err) 208 | return 209 | } 210 | 211 | cacheFor(w, _workTTL, false) 212 | w.WriteHeader(http.StatusOK) 213 | _, _ = w.Write(out) 214 | } 215 | 216 | // cacheFor sets cache response headers. s-maxage controls CDN cache time; we 217 | // default to an hour expiry for clients. 218 | // 219 | // Set varyParams to true if the cache key should include query params. 220 | func cacheFor(w http.ResponseWriter, d time.Duration, varyParams bool) { 221 | w.Header().Add("Cache-Control", fmt.Sprintf("public, s-maxage=%d, max-age=3600", int(d.Seconds()))) 222 | w.Header().Add("Vary", "Content-Type,Accept-Encoding") // Ignore headers like User-Agent, etc. 223 | w.Header().Add("Content-Type", "application/json") 224 | // w.Header().Add("Content-Encoding", "gzip") // TODO: Negotiate this with the client. 225 | 226 | if !varyParams { 227 | // In most cases we ignore query params when serving cached responses, 228 | // except for the bulk endpoint and some redirects where these params 229 | // matter. 230 | w.Header().Add("No-Vary-Search", "params") 231 | } 232 | } 233 | 234 | // getBookID handles /book/{id}. 235 | // 236 | // Importantly, the client expects this to always return a redirect -- either 237 | // to an author or a work. The work returned is then expected to be "fat" with 238 | // all editions of the work attached to it. This is very large! 239 | // 240 | // (See BookInfoProxy GetEditionInfo.) 241 | // 242 | // Instead, we redirect to `/author/{authorID}?edition={id}` to return the 243 | // necessary structure with only the edition we care about. 244 | func (h *Handler) getBookID(w http.ResponseWriter, r *http.Request) { 245 | ctx := r.Context() 246 | 247 | bookID, err := pathToID(r.URL.Path) 248 | if err != nil { 249 | h.error(w, err) 250 | return 251 | } 252 | 253 | if r.Method == "DELETE" { 254 | _ = h.ctrl.cache.Expire(r.Context(), BookKey(bookID)) 255 | w.WriteHeader(http.StatusOK) 256 | return 257 | } 258 | 259 | b, err := h.ctrl.GetBook(ctx, bookID) 260 | if err != nil { 261 | h.error(w, err) 262 | return 263 | } 264 | 265 | var workRsc workResource 266 | err = json.Unmarshal(b, &workRsc) 267 | if err != nil { 268 | h.error(w, err) 269 | return 270 | } 271 | 272 | cacheFor(w, _editionTTL, false) 273 | 274 | if len(workRsc.Authors) > 0 { 275 | http.Redirect(w, r, fmt.Sprintf("/author/%d?edition=%d", workRsc.Authors[0].ForeignID, bookID), http.StatusSeeOther) 276 | return 277 | } 278 | 279 | // This doesn't actually work -- the client gets a 280 | // System.NullReferenceException. But we should always have an author, so 281 | // we should never hit this. 282 | http.Redirect(w, r, fmt.Sprintf("/work/%d", workRsc.ForeignID), http.StatusSeeOther) 283 | } 284 | 285 | // getAuthorID handles /author/{id}. 286 | // 287 | // If an ?edition={bookID} query param is present, as with a /book/{id} 288 | // redirect, an author is returned with only that work/edition. 289 | func (h *Handler) getAuthorID(w http.ResponseWriter, r *http.Request) { 290 | ctx := r.Context() 291 | 292 | authorID, err := pathToID(r.URL.Path) 293 | if err != nil { 294 | h.error(w, err) 295 | return 296 | } 297 | 298 | if r.Method == "DELETE" { 299 | bytes, _ := h.ctrl.cache.Get(r.Context(), AuthorKey(authorID)) 300 | _ = h.ctrl.cache.Expire(r.Context(), AuthorKey(authorID)) 301 | go func() { 302 | // Expire all works/editions and then kick off a refresh. 303 | var author AuthorResource 304 | _ = json.Unmarshal(bytes, &author) 305 | for _, w := range author.Works { 306 | for _, b := range w.Books { 307 | _ = h.ctrl.cache.Expire(context.Background(), BookKey(b.ForeignID)) 308 | } 309 | _ = h.ctrl.cache.Expire(context.Background(), WorkKey(w.ForeignID)) 310 | } 311 | _, _ = h.ctrl.GetAuthor(context.Background(), authorID) 312 | }() 313 | w.WriteHeader(http.StatusOK) 314 | return 315 | } 316 | 317 | out, err := h.ctrl.GetAuthor(r.Context(), authorID) 318 | if err != nil { 319 | h.error(w, err) 320 | return 321 | } 322 | 323 | // If a specific edition was requested, mutate the returned author to 324 | // include only that edition. This satisifies SearchByGRBookId. 325 | if edition := r.URL.Query().Get("edition"); edition != "" { 326 | bookID, err := pathToID(edition) 327 | if err != nil { 328 | h.error(w, err) 329 | return 330 | } 331 | var author AuthorResource 332 | err = json.Unmarshal(out, &author) 333 | if err != nil { 334 | h.error(w, err) 335 | return 336 | } 337 | 338 | var work workResource 339 | ww, err := h.ctrl.GetBook(ctx, bookID) 340 | if err != nil { 341 | h.error(w, err) 342 | return 343 | } 344 | 345 | err = json.Unmarshal(ww, &work) 346 | if err != nil { 347 | h.error(w, err) 348 | return 349 | } 350 | 351 | author.Works = []workResource{work} 352 | 353 | cacheFor(w, _authorTTL, true) 354 | _ = json.NewEncoder(w).Encode(author) 355 | return 356 | 357 | } 358 | 359 | cacheFor(w, _authorTTL, true) 360 | w.WriteHeader(http.StatusOK) 361 | _, _ = w.Write(out) 362 | } 363 | 364 | // getAuthorChanged handles the `/author/changed?since={datetime}` endpoint. 365 | // 366 | // Normally this would return IDs for _all_ authors updated since the given 367 | // timestamp -- not just the authors in your library. The query param makes 368 | // this uncachable and it's an expensive operation, so we return nothing and 369 | // force the client to no-op. 370 | // 371 | // As a result, the client will periodically re-query `/author/{id}`: 372 | // - At least once every 30 days. 373 | // - Not more than every 12 hours. 374 | // - At least every 2 days if the author is "continuing" -- which always 375 | // seems to be the case? I don't think we're respecting end/death times 376 | // because they aren't returned by us. 377 | // - Every day if they released a book in the past 30 days, maybe to pick up 378 | // newer ratings? Unclear. 379 | // 380 | // These will hit cached entries, and the client will pick up newer data 381 | // gradually as entries become invalidated. 382 | func (h *Handler) getAuthorChanged(w http.ResponseWriter, _ *http.Request) { 383 | cacheFor(w, _searchTTL, false) 384 | w.WriteHeader(http.StatusOK) 385 | _, _ = w.Write([]byte(`{"Limitted": true, "Ids": []}`)) 386 | } 387 | 388 | // error writes an error message. The status code defaults to 500 unless the 389 | // error wraps a statusErr. 390 | func (*Handler) error(w http.ResponseWriter, err error) { 391 | status := http.StatusInternalServerError 392 | var s statusErr 393 | if errors.As(err, &s) { 394 | status = s.Status() 395 | } 396 | http.Error(w, err.Error(), status) 397 | } 398 | 399 | var _number = regexp.MustCompile("-?[0-9]+") 400 | 401 | func pathToID(p string) (int64, error) { 402 | p = path.Base(p) 403 | p = _number.FindString(p) 404 | i, err := strconv.ParseInt(p, 10, 64) 405 | if err != nil { 406 | return 0, errors.Join(err, errBadRequest) 407 | } 408 | if i <= 0 { 409 | return i, errors.Join(fmt.Errorf("expected %d to be positive", i), errBadRequest) 410 | } 411 | 412 | // The OL metadata server can send IDs over 1B which we don't want to handle. 413 | // ID < 1 billion -> GR 414 | // ID > 1 billion -> OL 415 | if i > 1000000000 { 416 | return i, errors.Join(errBadRequest, errors.New("OpenLibrary IDs are not supported")) 417 | } 418 | 419 | return i, nil 420 | } 421 | -------------------------------------------------------------------------------- /internal/handler_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestPathToID(t *testing.T) { 10 | tests := []struct { 11 | given string 12 | want int64 13 | wantErr error 14 | }{ 15 | { 16 | given: "/book/show/27362503-it-ends-with-us", 17 | want: 27362503, 18 | }, 19 | { 20 | given: "/book/show/7244.The_Poisonwood_Bible", 21 | want: 7244, 22 | }, 23 | { 24 | given: "/work/1842237", 25 | want: 1842237, 26 | }, 27 | { 28 | given: "/book/show/15704307-saga-volume-1", 29 | want: 15704307, 30 | }, 31 | { 32 | given: "https://www.example.com/book/show/218467.Lucifer_s_Hammer", 33 | want: 218467, 34 | }, 35 | { 36 | given: "/author/-1234", 37 | want: -1234, 38 | wantErr: errBadRequest, 39 | }, 40 | { 41 | given: "/author/10000000000", 42 | want: 10000000000, 43 | wantErr: errBadRequest, 44 | }, 45 | } 46 | 47 | for _, tt := range tests { 48 | actual, err := pathToID(tt.given) 49 | assert.ErrorIs(t, err, tt.wantErr) 50 | assert.Equal(t, tt.want, actual) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /internal/hardcover.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "iter" 8 | "net/http" 9 | "strings" 10 | 11 | "github.com/Khan/genqlient/graphql" 12 | "github.com/blampe/rreading-glasses/hardcover" 13 | ) 14 | 15 | // HCGetter implements a Getter using the Hardcover API as its source. It 16 | // attempts to minimize upstread HEAD requests (to resolve book/work IDs) by 17 | // relying on HC's raw external data. 18 | type HCGetter struct { 19 | cache cache[[]byte] 20 | gql graphql.Client 21 | upstream *http.Client 22 | } 23 | 24 | var _ getter = (*HCGetter)(nil) 25 | 26 | // NewHardcoverGetter returns a new Getter backed by Hardcover. 27 | func NewHardcoverGetter(cache cache[[]byte], gql graphql.Client, upstream *http.Client) (*HCGetter, error) { 28 | return &HCGetter{cache: cache, gql: gql, upstream: upstream}, nil 29 | } 30 | 31 | // GetWork returns the canonical edition for a book. Hardcover's GR mappings 32 | // are entirely edition-based, with one edition representing the canonical 33 | // book/work. 34 | // 35 | // A GR Work ID should therefore be mapped to a HC Book ID. However the HC API 36 | // only allows us to query GR Book ID -> HC Edition ID. Therefore we perform a 37 | // HEAD request to the GR work to resolve it's canonical Book ID, and then 38 | // return that. 39 | func (g *HCGetter) GetWork(ctx context.Context, grWorkID int64, _ editionsCallback) ([]byte, int64, error) { 40 | workBytes, ttl, ok := g.cache.GetWithTTL(ctx, WorkKey(grWorkID)) 41 | if ok && ttl > 0 { 42 | return workBytes, 0, nil 43 | } 44 | 45 | // TODO: Loading the best book ID on a cache refresh will lose any other 46 | // editions previously attached to this work. Instead we should re-assemble 47 | // the book array by re-fetching the latest books from the cache. 48 | if ok { 49 | var work workResource 50 | _ = json.Unmarshal(workBytes, &work) 51 | 52 | bookID := work.BestBookID 53 | if bookID != 0 { 54 | out, _, authorID, err := g.GetBook(ctx, bookID, nil) 55 | return out, authorID, err 56 | } 57 | } 58 | Log(ctx).Debug("getting work", "grWorkID", grWorkID) 59 | 60 | // Sniff GR to resolve the work ID. 61 | bookID, err := g.resolveRedirect(ctx, fmt.Sprintf("/work/%d", grWorkID)) 62 | if err != nil { 63 | return nil, 0, fmt.Errorf("problem getting HEAD: %w", err) 64 | } 65 | 66 | workBytes, _, authorID, err := g.GetBook(ctx, bookID, nil) 67 | return workBytes, authorID, err 68 | } 69 | 70 | // GetBook looks up a GR book (edition) in Hardcover's mappings. 71 | func (g *HCGetter) GetBook(ctx context.Context, grBookID int64, _ editionsCallback) ([]byte, int64, int64, error) { 72 | if workBytes, ok := g.cache.Get(ctx, BookKey(grBookID)); ok { 73 | return workBytes, 0, 0, nil 74 | } 75 | 76 | resp, err := hardcover.GetBook(ctx, g.gql, fmt.Sprint(grBookID)) 77 | if err != nil { 78 | return nil, 0, 0, fmt.Errorf("getting book: %w", err) 79 | } 80 | 81 | if len(resp.Book_mappings) == 0 { 82 | return nil, 0, 0, errNotFound 83 | } 84 | bm := resp.Book_mappings[0] 85 | 86 | tags := []struct { 87 | Tag string `json:"tag"` 88 | }{} 89 | genres := []string{} 90 | 91 | err = json.Unmarshal(bm.Book.Cached_tags, &tags) 92 | if err != nil { 93 | return nil, 0, 0, err 94 | } 95 | for _, t := range tags { 96 | genres = append(genres, t.Tag) 97 | } 98 | if len(genres) == 0 { 99 | genres = []string{"none"} 100 | } 101 | 102 | series := []seriesResource{} 103 | for _, s := range bm.Book.Book_series { 104 | series = append(series, seriesResource{ 105 | Title: s.Series.Name, 106 | ForeignID: s.Series.Id, 107 | Description: s.Series.Description, 108 | 109 | LinkItems: []seriesWorkLinkResource{{ 110 | PositionInSeries: fmt.Sprint(s.Position), 111 | SeriesPosition: int(s.Position), // TODO: What's the difference b/t placement? 112 | ForeignWorkID: -1, // TODO: Needs to be GR Work ID. 113 | Primary: false, // TODO: What is this? 114 | }}, 115 | }) 116 | } 117 | 118 | bookDescription := strings.TrimSpace(bm.Edition.Description) 119 | if bookDescription == "" { 120 | bookDescription = bm.Book.Description 121 | } 122 | if bookDescription == "" { 123 | bookDescription = "N/A" // Must be set. 124 | } 125 | 126 | editionTitle := bm.Edition.Title 127 | editionFullTitle := editionTitle 128 | editionSubtitle := bm.Edition.Subtitle 129 | 130 | if editionSubtitle != "" { 131 | editionTitle = strings.ReplaceAll(editionTitle, ": "+editionSubtitle, "") 132 | editionFullTitle = editionTitle + ": " + editionSubtitle 133 | } 134 | 135 | bookRsc := bookResource{ 136 | ForeignID: grBookID, 137 | Asin: bm.Edition.Asin, 138 | Description: bookDescription, 139 | Isbn13: bm.Edition.Isbn_13, 140 | Title: editionTitle, 141 | FullTitle: editionFullTitle, 142 | ShortTitle: editionTitle, 143 | Language: bm.Edition.Language.Language, 144 | Format: bm.Edition.Edition_format, 145 | EditionInformation: "", // TODO: Is this used anywhere? 146 | Publisher: bm.Edition.Publisher.Name, // TODO: Ignore books without publishers? 147 | ImageURL: strings.ReplaceAll(string(bm.Book.Cached_image), `"`, ``), 148 | IsEbook: true, // TODO: Flush this out. 149 | NumPages: bm.Edition.Pages, 150 | RatingCount: bm.Book.Ratings_count, 151 | RatingSum: int64(float64(bm.Book.Ratings_count) * bm.Book.Rating), 152 | AverageRating: bm.Book.Rating, 153 | URL: "https://hardcover.app/books/" + bm.Book.Slug, 154 | ReleaseDate: bm.Edition.Release_date, 155 | 156 | // TODO: Grab release date from book if absent 157 | 158 | // TODO: Omitting release date is a way to essentially force R to hide 159 | // the book from the frontend while allowing the user to still add it 160 | // via search. Better UX depending on what you're after. 161 | } 162 | 163 | authorDescription := "N/A" // Must be set. 164 | author := bm.Book.Contributions[0].Author 165 | if author.Bio != "" { 166 | authorDescription = author.Bio 167 | } 168 | 169 | workID := int64(0) 170 | grAuthorID := int64(0) 171 | for _, bmbm := range bm.Book.Book_mappings { 172 | var dto struct { 173 | RawData struct { 174 | Work struct { 175 | ID int64 `json:"id"` 176 | } `json:"work"` 177 | Authors struct { 178 | Author struct { 179 | ID string `json:"id"` 180 | } `json:"author"` 181 | } `json:"authors"` 182 | } `json:"raw_data"` 183 | } 184 | err := json.Unmarshal(bmbm.Dto_external, &dto) 185 | if err != nil { 186 | continue 187 | } 188 | if dto.RawData.Work.ID != 0 { 189 | workID = dto.RawData.Work.ID 190 | } 191 | if dto.RawData.Authors.Author.ID != "" { 192 | grAuthorID, _ = pathToID(dto.RawData.Authors.Author.ID) 193 | } 194 | if workID != 0 && grAuthorID != 0 { 195 | break 196 | } 197 | } 198 | if workID == 0 { 199 | Log(ctx).Warn("upstream doesn't have a work ID", "grBookID", grBookID) 200 | return nil, 0, 0, errNotFound 201 | } 202 | if grAuthorID == 0 { 203 | Log(ctx).Warn("upstream doesn't have an author ID", "grBookID", grBookID) 204 | return nil, 0, 0, errNotFound 205 | } 206 | 207 | authorRsc := AuthorResource{ 208 | KCA: fmt.Sprint(author.Id), 209 | Name: author.Name, 210 | ForeignID: grAuthorID, 211 | URL: "https://hardcover.app/authors/" + author.Slug, 212 | ImageURL: strings.ReplaceAll(string(author.Cached_image), `"`, ``), 213 | Description: authorDescription, 214 | Series: series, // TODO:: Doesn't fully work yet #17. 215 | } 216 | 217 | // If we haven't already cached this author do so now, because we don't 218 | // normally have a way to lookup GR Author ID -> HC Author. This will get 219 | // incrementally filled in by denormalizeWorks. 220 | if _, ok := g.cache.Get(ctx, AuthorKey(grAuthorID)); !ok { 221 | authorBytes, _ := json.Marshal(authorRsc) 222 | g.cache.Set(ctx, AuthorKey(grAuthorID), authorBytes, _authorTTL) 223 | // Don't use 2x TTL so the next fetch triggers a refresh 224 | } 225 | 226 | workTitle := bm.Book.Title 227 | workFullTitle := workTitle 228 | workSubtitle := bm.Book.Subtitle 229 | 230 | if workSubtitle != "" { 231 | workTitle = strings.ReplaceAll(workTitle, ": "+workSubtitle, "") 232 | workFullTitle = workTitle + ": " + workSubtitle 233 | } 234 | 235 | workRsc := workResource{ 236 | Title: workTitle, 237 | FullTitle: workFullTitle, 238 | ShortTitle: workTitle, 239 | ForeignID: workID, 240 | URL: "https://hardcover.app/books/" + bm.Book.Slug, 241 | ReleaseDate: bm.Book.Release_date, 242 | Series: series, 243 | Genres: genres, 244 | RelatedWorks: []int{}, 245 | } 246 | 247 | bookRsc.Contributors = []contributorResource{{ForeignID: grAuthorID, Role: "Author"}} 248 | authorRsc.Works = []workResource{workRsc} 249 | workRsc.Authors = []AuthorResource{authorRsc} 250 | workRsc.Books = []bookResource{bookRsc} // TODO: Add best book here as well? 251 | 252 | out, err := json.Marshal(workRsc) 253 | if err != nil { 254 | return nil, 0, 0, fmt.Errorf("marshaling work") 255 | } 256 | 257 | // If a work isn't already cached with this ID, write one using our edition as a starting point. 258 | if _, ok := g.cache.Get(ctx, WorkKey(workRsc.ForeignID)); !ok { 259 | g.cache.Set(ctx, WorkKey(workRsc.ForeignID), out, _workTTL) 260 | } 261 | 262 | return out, workRsc.ForeignID, authorRsc.ForeignID, nil 263 | } 264 | 265 | // GetAuthorBooks returns all GR book (edition) IDs. 266 | func (g *HCGetter) GetAuthorBooks(ctx context.Context, authorID int64) iter.Seq[int64] { 267 | noop := func(yield func(int64) bool) {} 268 | authorBytes, ok := g.cache.Get(ctx, AuthorKey(authorID)) 269 | if !ok { 270 | Log(ctx).Debug("skipping uncached author", "authorID", authorID) 271 | return noop 272 | } 273 | 274 | var author AuthorResource 275 | err := json.Unmarshal(authorBytes, &author) 276 | if err != nil { 277 | Log(ctx).Warn("problem unmarshaling author", "authorID", authorID) 278 | return noop 279 | } 280 | 281 | hcAuthorID, _ := pathToID(author.KCA) 282 | 283 | return func(yield func(int64) bool) { 284 | limit, offset := int64(20), int64(0) 285 | for { 286 | gae, err := hardcover.GetAuthorEditions(ctx, g.gql, hcAuthorID, limit, offset) 287 | if err != nil { 288 | Log(ctx).Warn("problem getting author editions", "err", err, "authorID", authorID) 289 | return 290 | } 291 | 292 | if len(gae.Authors) == 0 { 293 | Log(ctx).Warn("expected an author but got none", "authorID", authorID) 294 | return 295 | } 296 | 297 | hcAuthor := gae.Authors[0] 298 | for _, c := range hcAuthor.Contributions { 299 | if len(c.Book.Book_mappings) == 0 { 300 | Log(ctx).Debug("no mappings found") 301 | continue 302 | } 303 | 304 | grAuthorID, _ := pathToID(string(hcAuthor.Identifiers)) 305 | if grAuthorID != authorID { 306 | Log(ctx).Debug("skipping unrelated author", "want", authorID, "got", grAuthorID) 307 | continue 308 | } 309 | 310 | externalID := c.Book.Book_mappings[0].External_id 311 | grBookID, err := pathToID(externalID) 312 | if err != nil { 313 | Log(ctx).Warn("unexpected ID error", "err", err, "externalID", externalID) 314 | continue 315 | } 316 | 317 | if !yield(grBookID) { 318 | return 319 | } 320 | } 321 | 322 | // This currently returns a ton of stuff including translated works. So we 323 | // stop prematurely instead of loading all of it for now. 324 | // offset += limit 325 | break 326 | } 327 | } 328 | } 329 | 330 | // GetAuthor looks up a GR author on Hardcover. The HC API doesn't track GR 331 | // author IDs, so we only become aware of the HC ID once one of the author's 332 | // books is queried in GetBook. 333 | func (g *HCGetter) GetAuthor(ctx context.Context, grAuthorID int64) ([]byte, error) { 334 | authorBytes, ok := g.cache.Get(ctx, AuthorKey(grAuthorID)) 335 | 336 | if !ok { 337 | // We don't yet have a HC author ID, so give up. 338 | return nil, errNotFound 339 | } 340 | 341 | // Nothing else to load for now -- works will be attached asynchronously by 342 | // the controller. 343 | return authorBytes, nil 344 | } 345 | 346 | // resolveRedirect performs a HEAD request against the given URL, which is 347 | // expected to return a redirect. An ID is extracted from the location header 348 | // and returned. For example this allows resolving a canonical book ID by 349 | // sniffing /work/{id}. 350 | func (g *HCGetter) resolveRedirect(ctx context.Context, url string) (int64, error) { 351 | head, _ := http.NewRequestWithContext(ctx, "HEAD", url, nil) 352 | resp, err := g.upstream.Do(head) 353 | if err != nil { 354 | return 0, fmt.Errorf("problem getting HEAD: %w", err) 355 | } 356 | 357 | location := resp.Header.Get("location") 358 | if location == "" { 359 | return 0, fmt.Errorf("missing location header") 360 | } 361 | 362 | id, err := pathToID(location) 363 | if err != nil { 364 | Log(ctx).Warn("likely auth error", "err", err, "head", url, "redirect", location) 365 | return 0, fmt.Errorf("invalid redirect, likely auth error: %w", err) 366 | } 367 | 368 | return id, nil 369 | } 370 | -------------------------------------------------------------------------------- /internal/hardcover_test.go: -------------------------------------------------------------------------------- 1 | //go:generate go run go.uber.org/mock/mockgen -typed -source hardcover_test.go -package hardcover -destination hardcover/mock.go . gql 2 | package internal 3 | 4 | import ( 5 | "context" 6 | "encoding/json" 7 | "net/http" 8 | "testing" 9 | "time" 10 | 11 | "github.com/Khan/genqlient/graphql" 12 | "github.com/blampe/rreading-glasses/hardcover" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | "go.uber.org/mock/gomock" 16 | ) 17 | 18 | //nolint:unused 19 | type gql interface { 20 | graphql.Client 21 | } 22 | 23 | //nolint:unused 24 | type transport interface { 25 | http.RoundTripper 26 | } 27 | 28 | func TestGetBookDataIntegrity(t *testing.T) { 29 | // The client is particularly sensitive to null values. 30 | // For a given work resource, it MUST 31 | // - have non-null top-level books 32 | // - non-null ratingcount, averagerating 33 | // - have a contributor with a foreign id 34 | 35 | t.Parallel() 36 | 37 | ctx := context.Background() 38 | c := gomock.NewController(t) 39 | upstream := hardcover.NewMocktransport(c) 40 | 41 | gql := hardcover.NewMockgql(c) 42 | gql.EXPECT().MakeRequest(gomock.Any(), 43 | gomock.AssignableToTypeOf(&graphql.Request{}), 44 | gomock.AssignableToTypeOf(&graphql.Response{})).DoAndReturn( 45 | func(ctx context.Context, req *graphql.Request, res *graphql.Response) error { 46 | if req.OpName == "GetBook" { 47 | gbr, ok := res.Data.(*hardcover.GetBookResponse) 48 | if !ok { 49 | panic(gbr) 50 | } 51 | gbr.Book_mappings = []hardcover.GetBookBook_mappings{ 52 | { 53 | Edition: hardcover.GetBookBook_mappingsEditionEditions{ 54 | Id: 30405274, 55 | Title: "Out of My Mind", 56 | Asin: "", 57 | Isbn_13: "9781416971702", 58 | Edition_format: "Hardcover", 59 | Pages: 295, 60 | Audio_seconds: 0, 61 | Language: hardcover.GetBookBook_mappingsEditionEditionsLanguageLanguages{ 62 | Language: "English", 63 | }, 64 | Publisher: hardcover.GetBookBook_mappingsEditionEditionsPublisherPublishers{ 65 | Name: "Atheneum", 66 | }, 67 | Release_date: "2010-01-01", 68 | Description: "foo", 69 | // dto_external(path:"identifiers") seems to be more complete 70 | Identifiers: json.RawMessage(`{ 71 | "asin": [], 72 | "lccn": [ 73 | "2009018404" 74 | ], 75 | "oclc": [ 76 | "401713291" 77 | ], 78 | "ocaid": [], 79 | "isbn_10": [ 80 | "141697170X" 81 | ], 82 | "isbn_13": [ 83 | "9781416971702", 84 | "9781416980452" 85 | ], 86 | "gr": [], 87 | "kindle_asin": [], 88 | "openlibrary": [ 89 | "OL24378894M" 90 | ], 91 | "inventaire_id": [] 92 | }`), 93 | Book_id: 141397, 94 | }, 95 | Book: hardcover.GetBookBook_mappingsBookBooks{ 96 | Id: 141397, 97 | Title: "Out of My Mind", 98 | Description: "foo", 99 | Release_date: "2010-01-01", 100 | Cached_tags: json.RawMessage(`[ 101 | { 102 | "tag": "Fiction", 103 | "tagSlug": "fiction", 104 | "category": "Genre", 105 | "categorySlug": "genre", 106 | "spoilerRatio": 0, 107 | "count": 29758 108 | }, 109 | { 110 | "tag": "Young Adult", 111 | "tagSlug": "young-adult", 112 | "category": "Genre", 113 | "categorySlug": "genre", 114 | "spoilerRatio": 0, 115 | "count": 22645 116 | }, 117 | { 118 | "tag": "Juvenile Fiction", 119 | "tagSlug": "juvenile-fiction", 120 | "category": "Genre", 121 | "categorySlug": "genre", 122 | "spoilerRatio": 0, 123 | "count": 3661 124 | }, 125 | { 126 | "tag": "Juvenile Nonfiction", 127 | "tagSlug": "juvenile-nonfiction-6a8774e3-9173-46e1-87d7-ea5fa5eb20e8", 128 | "category": "Genre", 129 | "categorySlug": "genre", 130 | "spoilerRatio": 0, 131 | "count": 1561 132 | }, 133 | { 134 | "tag": "Family", 135 | "tagSlug": "family", 136 | "category": "Genre", 137 | "categorySlug": "genre", 138 | "spoilerRatio": 0, 139 | "count": 847 140 | } 141 | ]`), 142 | Cached_image: json.RawMessage("https://assets.hardcover.app/edition/30405274/d41534ce6075b53289d1c4d57a6dac34b974ce91.jpeg"), 143 | Contributions: []hardcover.GetBookBook_mappingsBookBooksContributions{ 144 | { 145 | Contributable_type: "Book", 146 | Author: hardcover.GetBookBook_mappingsBookBooksContributionsAuthorAuthors{ 147 | Id: 97020, 148 | Name: "Sharon M. Draper", 149 | Slug: "sharon-m-draper", 150 | Cached_image: json.RawMessage("https://assets.hardcover.app/books/97020/10748148-L.jpg"), 151 | }, 152 | }, 153 | }, 154 | Slug: "out-of-my-mind", 155 | Book_series: []hardcover.GetBookBook_mappingsBookBooksBook_series{ 156 | { 157 | Position: 1, 158 | Series: hardcover.GetBookBook_mappingsBookBooksBook_seriesSeries{ 159 | Id: 6143, 160 | Name: "Out of My Mind", 161 | Identifiers: json.RawMessage(`{ 162 | "gr": [ 163 | "326523" 164 | ] 165 | }`), 166 | }, 167 | }, 168 | }, 169 | Rating: 4.111111111111111, 170 | Ratings_count: 63, 171 | Book_mappings: []hardcover.GetBookBook_mappingsBookBooksBook_mappings{ 172 | { 173 | Dto_external: json.RawMessage(`{}`), 174 | }, 175 | { 176 | Dto_external: json.RawMessage(`{ 177 | "raw_data": { 178 | "work": { 179 | "id": 6803732 180 | }, 181 | "authors": { 182 | "author": { 183 | "id": "51942" 184 | } 185 | } 186 | } 187 | }`), 188 | }, 189 | }, 190 | }, 191 | }, 192 | } 193 | 194 | return nil 195 | 196 | } 197 | if req.OpName == "GetAuthorWorks" { 198 | gaw, ok := res.Data.(*hardcover.GetAuthorEditionsResponse) 199 | if !ok { 200 | panic(gaw) 201 | } 202 | gaw.Authors = []hardcover.GetAuthorEditionsAuthors{ 203 | { 204 | Id: 97020, 205 | Slug: "sharon-m-draper", 206 | Contributions: []hardcover.GetAuthorEditionsAuthorsContributions{ 207 | { 208 | Book: hardcover.GetAuthorEditionsAuthorsContributionsBookBooks{ 209 | Id: 141397, 210 | Title: "Out of My Mind", 211 | Ratings_count: 63, 212 | Book_mappings: []hardcover.GetAuthorEditionsAuthorsContributionsBookBooksBook_mappings{ 213 | { 214 | Book_id: 141397, 215 | Edition_id: 30405274, 216 | External_id: "6609765", 217 | }, 218 | }, 219 | }, 220 | }, 221 | }, 222 | }, 223 | } 224 | } 225 | return nil 226 | }).AnyTimes() 227 | 228 | cache := newMemoryCache() 229 | getter, err := NewHardcoverGetter(cache, gql, &http.Client{Transport: upstream}) 230 | require.NoError(t, err) 231 | 232 | ctrl, err := NewController(cache, getter) 233 | require.NoError(t, err) 234 | 235 | go ctrl.Run(context.Background(), 0) // Denormalize data in the background. 236 | 237 | t.Run("GetBook", func(t *testing.T) { 238 | bookBytes, err := ctrl.GetBook(ctx, 6609765) 239 | assert.NoError(t, err) 240 | 241 | var work workResource 242 | require.NoError(t, json.Unmarshal(bookBytes, &work)) 243 | 244 | assert.Equal(t, int64(6803732), work.ForeignID) 245 | require.Len(t, work.Authors, 1) 246 | require.Len(t, work.Authors[0].Works, 1) 247 | assert.Equal(t, int64(51942), work.Authors[0].ForeignID) 248 | 249 | require.Len(t, work.Books, 1) 250 | assert.Equal(t, int64(6609765), work.Books[0].ForeignID) 251 | }) 252 | 253 | time.Sleep(100 * time.Millisecond) // Wait for data denormalization. 254 | 255 | t.Run("GetAuthor", func(t *testing.T) { 256 | authorBytes, err := ctrl.GetAuthor(ctx, 51942) 257 | assert.NoError(t, err) 258 | 259 | // author -> .Works.Authors.Works must not be null, but books can be 260 | 261 | var author AuthorResource 262 | require.NoError(t, json.Unmarshal(authorBytes, &author)) 263 | 264 | assert.Equal(t, int64(51942), author.ForeignID) 265 | require.Len(t, author.Works, 1) 266 | require.Len(t, author.Works[0].Authors, 1) 267 | require.Len(t, author.Works[0].Books, 1) 268 | }) 269 | 270 | t.Run("GetWork", func(t *testing.T) { 271 | workBytes, err := ctrl.GetWork(ctx, 6803732) 272 | assert.NoError(t, err) 273 | 274 | var work workResource 275 | require.NoError(t, json.Unmarshal(workBytes, &work)) 276 | 277 | require.Len(t, work.Authors, 1) 278 | assert.Equal(t, int64(51942), work.Authors[0].ForeignID) 279 | require.Len(t, work.Authors[0].Works, 1) 280 | 281 | require.Len(t, work.Books, 1) 282 | assert.Equal(t, int64(6609765), work.Books[0].ForeignID) 283 | }) 284 | } 285 | -------------------------------------------------------------------------------- /internal/language.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | var _codes = map[string]string{ 4 | "English": "eng", 5 | "French": "fra", 6 | "Spanish": "spa", 7 | "German": "deu", 8 | "Italian": "ita", 9 | "Danish": "dan", 10 | "Dutch": "nld", 11 | "Japanese": "jpn", 12 | "Icelandic": "isl", 13 | "Chinese": "zho", 14 | "Russian": "rus", 15 | "Polish": "pol", 16 | "Vietnamese": "vie", 17 | "Swedish": "swe", 18 | "Norwegian": "nor", 19 | "Norwegian Bokmal": "nob", 20 | "Finnish": "fin", 21 | "Turkish": "tur", 22 | "Portuguese": "por", 23 | "Greek": "ell", 24 | "Korean": "kor", 25 | "Hungarian": "hun", 26 | "Hebrew": "heb", 27 | "Czech": "ces", 28 | "Hindi": "hin", 29 | "Thai": "tha", 30 | "Bulgarian": "bul", 31 | "Romanian": "ron", 32 | "Arabic": "ara", 33 | } 34 | 35 | func iso639_3(name string) (iso string) { 36 | iso, ok := _codes[name] 37 | if ok { 38 | return iso 39 | } 40 | return name 41 | } 42 | -------------------------------------------------------------------------------- /internal/log.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "log/slog" 8 | "net/http" 9 | "os" 10 | "time" 11 | 12 | "github.com/charmbracelet/lipgloss" 13 | charm "github.com/charmbracelet/log" 14 | "github.com/go-chi/chi/v5/middleware" 15 | "github.com/mattn/go-isatty" 16 | ) 17 | 18 | var _logHandler *charm.Logger 19 | 20 | // Log returns a logger scoped to the request ID if present in the context. 21 | func Log(ctx context.Context) *slog.Logger { 22 | return slog.Default().With("trace", ctx.Value(middleware.RequestIDKey)) 23 | } 24 | 25 | // SetLogLevel sets the log level. 26 | func SetLogLevel(l charm.Level) { 27 | _logHandler.SetLevel(l) 28 | } 29 | 30 | // Requestlogger logs some info about requests we handled. 31 | type Requestlogger struct{} 32 | 33 | // Wrap applies middleware. 34 | func (Requestlogger) Wrap(next http.Handler) http.Handler { 35 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 36 | ctx := r.Context() 37 | 38 | attrs := []slog.Attr{ 39 | slog.String("method", r.Method), 40 | slog.String("path", r.URL.Path), 41 | slog.String("ip", r.RemoteAddr), 42 | } 43 | 44 | start := time.Now() 45 | ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor) 46 | 47 | body := &bytes.Buffer{} 48 | ww.Tee(body) 49 | 50 | defer func() { 51 | status := ww.Status() 52 | duration := time.Since(start) 53 | 54 | attrs = append([]slog.Attr{ 55 | slog.Int("status", status), 56 | slog.Duration("duration", duration), 57 | slog.Int("bytes", ww.BytesWritten()), 58 | }, attrs...) 59 | 60 | level := slog.LevelInfo 61 | switch { 62 | case status >= 500: 63 | level = slog.LevelError 64 | attrs = append(attrs, slog.String("err", body.String())) 65 | case status >= 400 && status != http.StatusNotFound && status != http.StatusBadRequest: 66 | level = slog.LevelWarn 67 | default: 68 | } 69 | 70 | Log(ctx).LogAttrs(ctx, level, 71 | fmt.Sprintf("%s %s => HTTP %d (%v)", r.Method, r.URL.String(), ww.Status(), duration), 72 | attrs...) 73 | }() 74 | 75 | next.ServeHTTP(ww, r.WithContext(ctx)) 76 | }) 77 | } 78 | 79 | // set up our default log handler and formatting. 80 | func init() { 81 | styles := charm.DefaultStyles() 82 | styles.Keys["err"] = lipgloss.NewStyle().Foreground(lipgloss.Color("204")).Bold(true) 83 | styles.Keys["status"] = lipgloss.NewStyle().Foreground(lipgloss.Color("86")) 84 | styles.Values["trace"] = lipgloss.NewStyle().Faint(true) 85 | 86 | _logHandler = charm.NewWithOptions(os.Stdout, charm.Options{ 87 | ReportTimestamp: true, 88 | TimeFormat: time.StampMilli, 89 | Level: charm.InfoLevel, 90 | }) 91 | _logHandler.SetStyles(styles) 92 | 93 | // Output JSON in containers. 94 | if !isatty.IsTerminal(os.Stdout.Fd()) { 95 | _logHandler.SetFormatter( 96 | charm.JSONFormatter, 97 | ) 98 | _logHandler.SetTimeFormat(time.RFC3339) 99 | } 100 | 101 | logger := slog.New(_logHandler) 102 | slog.SetDefault(logger) 103 | } 104 | -------------------------------------------------------------------------------- /internal/memory.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "runtime/debug" 6 | "time" 7 | 8 | "github.com/dgraph-io/ristretto/v2" 9 | ) 10 | 11 | var _ cache[[]byte] = (*memoryCache)(nil) 12 | 13 | // newMemoryCache returns a new in-memory cache. 14 | func newMemoryCache() cache[[]byte] { 15 | r, err := ristretto.NewCache(&ristretto.Config[string, []byte]{ 16 | NumCounters: 5e7, // Track LRU for up to 50M keys. 17 | MaxCost: 3 * (debug.SetMemoryLimit(-1) / 4), // Use 75% of available memory. 18 | BufferItems: 64, // Number of keys per Get buffer. 19 | }) 20 | if err != nil { 21 | panic(err) 22 | } 23 | 24 | return &memoryCache{r} 25 | } 26 | 27 | type memoryCache struct { 28 | r *ristretto.Cache[string, []byte] 29 | } 30 | 31 | func (c *memoryCache) Get(_ context.Context, key string) ([]byte, bool) { 32 | return c.r.Get(key) 33 | } 34 | 35 | func (c *memoryCache) GetWithTTL(ctx context.Context, key string) ([]byte, time.Duration, bool) { 36 | ttl, ok := c.r.GetTTL(key) 37 | bytes, _ := c.Get(ctx, key) 38 | return bytes, ttl, ok 39 | } 40 | 41 | func (c *memoryCache) Set(_ context.Context, key string, value []byte, ttl time.Duration) { 42 | _ = c.r.SetWithTTL(key, value, int64(len(value)), ttl) 43 | c.r.Wait() // Synchronous set. 44 | } 45 | 46 | func (c *memoryCache) Expire(_ context.Context, key string) error { 47 | c.r.Del(key) 48 | return nil 49 | } 50 | -------------------------------------------------------------------------------- /internal/mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: controller.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -typed -source controller.go -package internal -destination mock.go . getter 7 | // 8 | 9 | // Package internal is a generated GoMock package. 10 | package internal 11 | 12 | import ( 13 | context "context" 14 | iter "iter" 15 | reflect "reflect" 16 | 17 | gomock "go.uber.org/mock/gomock" 18 | ) 19 | 20 | // Mockgetter is a mock of getter interface. 21 | type Mockgetter struct { 22 | ctrl *gomock.Controller 23 | recorder *MockgetterMockRecorder 24 | } 25 | 26 | // MockgetterMockRecorder is the mock recorder for Mockgetter. 27 | type MockgetterMockRecorder struct { 28 | mock *Mockgetter 29 | } 30 | 31 | // NewMockgetter creates a new mock instance. 32 | func NewMockgetter(ctrl *gomock.Controller) *Mockgetter { 33 | mock := &Mockgetter{ctrl: ctrl} 34 | mock.recorder = &MockgetterMockRecorder{mock} 35 | return mock 36 | } 37 | 38 | // EXPECT returns an object that allows the caller to indicate expected use. 39 | func (m *Mockgetter) EXPECT() *MockgetterMockRecorder { 40 | return m.recorder 41 | } 42 | 43 | // GetAuthor mocks base method. 44 | func (m *Mockgetter) GetAuthor(ctx context.Context, authorID int64) ([]byte, error) { 45 | m.ctrl.T.Helper() 46 | ret := m.ctrl.Call(m, "GetAuthor", ctx, authorID) 47 | ret0, _ := ret[0].([]byte) 48 | ret1, _ := ret[1].(error) 49 | return ret0, ret1 50 | } 51 | 52 | // GetAuthor indicates an expected call of GetAuthor. 53 | func (mr *MockgetterMockRecorder) GetAuthor(ctx, authorID any) *MockgetterGetAuthorCall { 54 | mr.mock.ctrl.T.Helper() 55 | call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthor", reflect.TypeOf((*Mockgetter)(nil).GetAuthor), ctx, authorID) 56 | return &MockgetterGetAuthorCall{Call: call} 57 | } 58 | 59 | // MockgetterGetAuthorCall wrap *gomock.Call 60 | type MockgetterGetAuthorCall struct { 61 | *gomock.Call 62 | } 63 | 64 | // Return rewrite *gomock.Call.Return 65 | func (c *MockgetterGetAuthorCall) Return(arg0 []byte, arg1 error) *MockgetterGetAuthorCall { 66 | c.Call = c.Call.Return(arg0, arg1) 67 | return c 68 | } 69 | 70 | // Do rewrite *gomock.Call.Do 71 | func (c *MockgetterGetAuthorCall) Do(f func(context.Context, int64) ([]byte, error)) *MockgetterGetAuthorCall { 72 | c.Call = c.Call.Do(f) 73 | return c 74 | } 75 | 76 | // DoAndReturn rewrite *gomock.Call.DoAndReturn 77 | func (c *MockgetterGetAuthorCall) DoAndReturn(f func(context.Context, int64) ([]byte, error)) *MockgetterGetAuthorCall { 78 | c.Call = c.Call.DoAndReturn(f) 79 | return c 80 | } 81 | 82 | // GetAuthorBooks mocks base method. 83 | func (m *Mockgetter) GetAuthorBooks(ctx context.Context, authorID int64) iter.Seq[int64] { 84 | m.ctrl.T.Helper() 85 | ret := m.ctrl.Call(m, "GetAuthorBooks", ctx, authorID) 86 | ret0, _ := ret[0].(iter.Seq[int64]) 87 | return ret0 88 | } 89 | 90 | // GetAuthorBooks indicates an expected call of GetAuthorBooks. 91 | func (mr *MockgetterMockRecorder) GetAuthorBooks(ctx, authorID any) *MockgetterGetAuthorBooksCall { 92 | mr.mock.ctrl.T.Helper() 93 | call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorBooks", reflect.TypeOf((*Mockgetter)(nil).GetAuthorBooks), ctx, authorID) 94 | return &MockgetterGetAuthorBooksCall{Call: call} 95 | } 96 | 97 | // MockgetterGetAuthorBooksCall wrap *gomock.Call 98 | type MockgetterGetAuthorBooksCall struct { 99 | *gomock.Call 100 | } 101 | 102 | // Return rewrite *gomock.Call.Return 103 | func (c *MockgetterGetAuthorBooksCall) Return(arg0 iter.Seq[int64]) *MockgetterGetAuthorBooksCall { 104 | c.Call = c.Call.Return(arg0) 105 | return c 106 | } 107 | 108 | // Do rewrite *gomock.Call.Do 109 | func (c *MockgetterGetAuthorBooksCall) Do(f func(context.Context, int64) iter.Seq[int64]) *MockgetterGetAuthorBooksCall { 110 | c.Call = c.Call.Do(f) 111 | return c 112 | } 113 | 114 | // DoAndReturn rewrite *gomock.Call.DoAndReturn 115 | func (c *MockgetterGetAuthorBooksCall) DoAndReturn(f func(context.Context, int64) iter.Seq[int64]) *MockgetterGetAuthorBooksCall { 116 | c.Call = c.Call.DoAndReturn(f) 117 | return c 118 | } 119 | 120 | // GetBook mocks base method. 121 | func (m *Mockgetter) GetBook(ctx context.Context, bookID int64, loadEditions editionsCallback) ([]byte, int64, int64, error) { 122 | m.ctrl.T.Helper() 123 | ret := m.ctrl.Call(m, "GetBook", ctx, bookID, loadEditions) 124 | ret0, _ := ret[0].([]byte) 125 | ret1, _ := ret[1].(int64) 126 | ret2, _ := ret[2].(int64) 127 | ret3, _ := ret[3].(error) 128 | return ret0, ret1, ret2, ret3 129 | } 130 | 131 | // GetBook indicates an expected call of GetBook. 132 | func (mr *MockgetterMockRecorder) GetBook(ctx, bookID, loadEditions any) *MockgetterGetBookCall { 133 | mr.mock.ctrl.T.Helper() 134 | call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBook", reflect.TypeOf((*Mockgetter)(nil).GetBook), ctx, bookID, loadEditions) 135 | return &MockgetterGetBookCall{Call: call} 136 | } 137 | 138 | // MockgetterGetBookCall wrap *gomock.Call 139 | type MockgetterGetBookCall struct { 140 | *gomock.Call 141 | } 142 | 143 | // Return rewrite *gomock.Call.Return 144 | func (c *MockgetterGetBookCall) Return(arg0 []byte, workID, authorID int64, arg3 error) *MockgetterGetBookCall { 145 | c.Call = c.Call.Return(arg0, workID, authorID, arg3) 146 | return c 147 | } 148 | 149 | // Do rewrite *gomock.Call.Do 150 | func (c *MockgetterGetBookCall) Do(f func(context.Context, int64, editionsCallback) ([]byte, int64, int64, error)) *MockgetterGetBookCall { 151 | c.Call = c.Call.Do(f) 152 | return c 153 | } 154 | 155 | // DoAndReturn rewrite *gomock.Call.DoAndReturn 156 | func (c *MockgetterGetBookCall) DoAndReturn(f func(context.Context, int64, editionsCallback) ([]byte, int64, int64, error)) *MockgetterGetBookCall { 157 | c.Call = c.Call.DoAndReturn(f) 158 | return c 159 | } 160 | 161 | // GetWork mocks base method. 162 | func (m *Mockgetter) GetWork(ctx context.Context, workID int64, loadEditions editionsCallback) ([]byte, int64, error) { 163 | m.ctrl.T.Helper() 164 | ret := m.ctrl.Call(m, "GetWork", ctx, workID, loadEditions) 165 | ret0, _ := ret[0].([]byte) 166 | ret1, _ := ret[1].(int64) 167 | ret2, _ := ret[2].(error) 168 | return ret0, ret1, ret2 169 | } 170 | 171 | // GetWork indicates an expected call of GetWork. 172 | func (mr *MockgetterMockRecorder) GetWork(ctx, workID, loadEditions any) *MockgetterGetWorkCall { 173 | mr.mock.ctrl.T.Helper() 174 | call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWork", reflect.TypeOf((*Mockgetter)(nil).GetWork), ctx, workID, loadEditions) 175 | return &MockgetterGetWorkCall{Call: call} 176 | } 177 | 178 | // MockgetterGetWorkCall wrap *gomock.Call 179 | type MockgetterGetWorkCall struct { 180 | *gomock.Call 181 | } 182 | 183 | // Return rewrite *gomock.Call.Return 184 | func (c *MockgetterGetWorkCall) Return(arg0 []byte, authorID int64, arg2 error) *MockgetterGetWorkCall { 185 | c.Call = c.Call.Return(arg0, authorID, arg2) 186 | return c 187 | } 188 | 189 | // Do rewrite *gomock.Call.Do 190 | func (c *MockgetterGetWorkCall) Do(f func(context.Context, int64, editionsCallback) ([]byte, int64, error)) *MockgetterGetWorkCall { 191 | c.Call = c.Call.Do(f) 192 | return c 193 | } 194 | 195 | // DoAndReturn rewrite *gomock.Call.DoAndReturn 196 | func (c *MockgetterGetWorkCall) DoAndReturn(f func(context.Context, int64, editionsCallback) ([]byte, int64, error)) *MockgetterGetWorkCall { 197 | c.Call = c.Call.DoAndReturn(f) 198 | return c 199 | } 200 | -------------------------------------------------------------------------------- /internal/postgres.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "context" 7 | "database/sql" 8 | _ "embed" // For schema. 9 | "errors" 10 | "fmt" 11 | "io" 12 | "time" 13 | 14 | _ "github.com/jackc/pgx/v5/stdlib" // pgx driver 15 | "go.uber.org/zap/buffer" 16 | ) 17 | 18 | //go:embed schema.sql 19 | var _schema string 20 | 21 | // _buffers reduces GC. 22 | var _buffers = buffer.NewPool() 23 | 24 | var _ cache[[]byte] = (*pgcache)(nil) 25 | 26 | func newPostgres(ctx context.Context, dsn string) (*pgcache, error) { 27 | db, err := newDB(ctx, dsn) 28 | if err != nil { 29 | return nil, fmt.Errorf("creating db: %w", err) 30 | } 31 | return &pgcache{db: db}, nil 32 | } 33 | 34 | // newDB connects to our DB and applies our schema. 35 | func newDB(ctx context.Context, dsn string) (*sql.DB, error) { 36 | db, err := sql.Open("pgx", dsn) 37 | if err != nil { 38 | return nil, fmt.Errorf("dbinit: %w", err) 39 | } 40 | err = db.PingContext(ctx) 41 | if err != nil { 42 | return nil, fmt.Errorf("establishing db connection: %w", err) 43 | } 44 | 45 | _logHandler.Info("ensuring DB schema") 46 | _, err = db.ExecContext(ctx, _schema) 47 | if err != nil { 48 | return nil, fmt.Errorf("ensuring schema: %w", err) 49 | } 50 | 51 | return db, nil 52 | } 53 | 54 | // pgcache implements a cacher for use with layeredcache. 55 | type pgcache struct { 56 | db *sql.DB 57 | } 58 | 59 | func (pg *pgcache) Get(ctx context.Context, key string) ([]byte, bool) { 60 | val, _, ok := pg.GetWithTTL(ctx, key) 61 | return val, ok 62 | } 63 | 64 | func (pg *pgcache) GetWithTTL(ctx context.Context, key string) ([]byte, time.Duration, bool) { 65 | var compressed []byte 66 | var expires time.Time 67 | err := pg.db.QueryRowContext(ctx, `SELECT value, expires FROM cache WHERE key = $1;`, key).Scan(&compressed, &expires) 68 | if err != nil { 69 | return nil, 0, false 70 | } 71 | 72 | // TODO: The client doesn't support gzip content-encoding, which is 73 | // bade because we could just return compressed bytes as-is. 74 | buf := _buffers.Get() 75 | defer buf.Free() 76 | 77 | err = decompress(ctx, bytes.NewReader(compressed), buf) 78 | if err != nil { 79 | return nil, 0, false 80 | } 81 | 82 | // We can't return the buffer's underlying byte slice, so make a copy. 83 | // Still allocates but simpler than returning the raw buffer for now. 84 | uncompressed := bytes.Clone(buf.Bytes()) 85 | 86 | // Treat expired entries as a miss to force a refresh, but still return 87 | // the cached data because it can help speed up the refresh. 88 | ttl := time.Until(expires) 89 | if ttl <= 0 { 90 | return uncompressed, 0, false 91 | } 92 | 93 | return uncompressed, ttl, true 94 | } 95 | 96 | func (pg *pgcache) Set(ctx context.Context, key string, val []byte, ttl time.Duration) { 97 | expires := time.Now().Add(ttl) 98 | 99 | buf := _buffers.Get() 100 | defer buf.Free() 101 | 102 | err := compress(bytes.NewReader(val), buf) 103 | if err != nil { 104 | Log(ctx).Error("problem compressing value", "err", err, "key", key) 105 | } 106 | _, err = pg.db.ExecContext(ctx, 107 | `INSERT INTO cache (key, value, expires) VALUES ($1, $2, $3) ON CONFLICT (key) DO UPDATE SET value = $4, expires = $5;`, 108 | key, buf.Bytes(), expires, buf.Bytes(), expires, 109 | ) 110 | if err != nil { 111 | Log(ctx).Error("problem setting cache", "err", err) 112 | } 113 | } 114 | 115 | // Expire can expire a row if provided the key as a tag. 116 | func (pg *pgcache) Expire(ctx context.Context, key string) error { 117 | _, err := pg.db.ExecContext(ctx, `UPDATE cache SET expires = $1 WHERE key = $2;`, time.UnixMicro(0), key) 118 | return err 119 | } 120 | 121 | func compress(plaintext io.Reader, buf *buffer.Buffer) error { 122 | zw := gzip.NewWriter(buf) 123 | _, err := io.Copy(zw, plaintext) 124 | err = errors.Join(err, zw.Close()) 125 | return err 126 | } 127 | 128 | func decompress(ctx context.Context, compressed io.Reader, buf *buffer.Buffer) error { 129 | zr, err := gzip.NewReader(compressed) 130 | if err != nil && !errors.Is(err, io.EOF) { 131 | Log(ctx).Warn("problem unzipping", "err", err) 132 | return err 133 | } 134 | 135 | _, err = io.Copy(buf, zr) 136 | if err != nil && !errors.Is(err, io.EOF) { 137 | Log(ctx).Warn("problem decompressing", "err", err) 138 | return err 139 | } 140 | if err := zr.Close(); err != nil { 141 | Log(ctx).Warn("problem closing zip write", "err", err) 142 | } 143 | 144 | return nil 145 | } 146 | -------------------------------------------------------------------------------- /internal/postgres_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand/v2" 7 | "strings" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestPostgres(t *testing.T) { 17 | ctx := context.Background() 18 | 19 | cache, err := newPostgres(ctx, "postgres://postgres@localhost:5432/test") 20 | require.NoError(t, err) 21 | 22 | missing, ok := cache.Get(ctx, "missing") 23 | assert.False(t, ok) 24 | assert.Nil(t, missing) 25 | 26 | cache.Set(ctx, "expired", []byte{1}, 0) 27 | expired, ok := cache.Get(ctx, "expired") 28 | assert.False(t, ok) 29 | assert.Equal(t, []byte{1}, expired) 30 | 31 | cache.Set(ctx, "cached", []byte{2}, time.Hour) 32 | cached, ttl, ok := cache.GetWithTTL(ctx, "cached") 33 | assert.True(t, ok) 34 | assert.Equal(t, []byte{2}, cached) 35 | assert.Greater(t, ttl, time.Minute) 36 | 37 | cache.Set(ctx, "cached", []byte{3}, time.Hour) 38 | updated, ok := cache.Get(ctx, "cached") 39 | assert.True(t, ok) 40 | assert.Equal(t, []byte{3}, updated) 41 | 42 | assert.NoError(t, cache.Expire(ctx, "cached")) 43 | } 44 | 45 | // TestPostgresCache randomly writes and reads values from the cache 46 | // concurrently to confirm things like our buffer pooling work correctly under 47 | // load. 48 | func TestPostgresCache(t *testing.T) { 49 | t.Parallel() 50 | 51 | dsn := "postgres://postgres@localhost:5432/test" 52 | ctx := context.Background() 53 | cache, err := NewCache(ctx, dsn) 54 | require.NoError(t, err) 55 | 56 | n := 400 57 | wg := sync.WaitGroup{} 58 | 59 | for i := range n { 60 | wg.Add(1) 61 | go func() { 62 | defer wg.Done() 63 | 64 | s := strings.Repeat(fmt.Sprint(i), i) 65 | sleep := time.Duration(rand.Float64() / 10.0 * float64(time.Second)) 66 | time.Sleep(sleep) 67 | cache.Set(ctx, fmt.Sprint(i), []byte(s), time.Minute) 68 | }() 69 | } 70 | wg.Wait() 71 | 72 | checkCache := func(cache *LayeredCache) { 73 | for i := range n { 74 | wg.Add(1) 75 | go func() { 76 | defer wg.Done() 77 | 78 | sleep := time.Duration(rand.Float64() / 10.0 * float64(time.Second)) 79 | time.Sleep(sleep) 80 | actual, ok := cache.Get(ctx, fmt.Sprint(i)) 81 | if i == 0 { 82 | // Empty value isn't set. 83 | require.False(t, ok) 84 | return 85 | } 86 | require.True(t, ok) 87 | expected := strings.Repeat(fmt.Sprint(i), i) 88 | assert.Equal(t, expected, string(actual)) 89 | }() 90 | 91 | } 92 | wg.Wait() 93 | } 94 | 95 | t.Run("warm in-memory cache", func(t *testing.T) { 96 | t.Parallel() 97 | checkCache(cache) 98 | }) 99 | 100 | t.Run("cold in-memory cache", func(t *testing.T) { 101 | t.Parallel() 102 | // Create a new cache. 103 | coldCache, err := NewCache(ctx, dsn) 104 | require.NoError(t, err) 105 | checkCache(coldCache) 106 | }) 107 | 108 | t.Cleanup(func() { 109 | for i := range n { 110 | _ = cache.Expire(ctx, fmt.Sprint(i)) 111 | } 112 | }) 113 | } 114 | -------------------------------------------------------------------------------- /internal/resources.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | // TODO: These could be generated from the OpenAPI spec. 4 | // https://github.com/Readarr/Readarr/blob/develop/src/Readarr.Api.V1/openapi.json 5 | 6 | type bulkBookResource struct { 7 | Works []workResource `json:"Works"` 8 | Series []seriesResource `json:"Series"` 9 | Authors []AuthorResource `json:"Authors"` 10 | } 11 | 12 | type workResource struct { 13 | ForeignID int64 `json:"ForeignId"` 14 | Title string `json:"Title"` // This is what's ultimately displayed in the app. 15 | FullTitle string `json:"FullTitle"` // The title + subtitle. 16 | ShortTitle string `json:"ShortTitle"` // Just the title. 17 | URL string `json:"Url"` 18 | ReleaseDate string `json:"ReleaseDate,omitempty"` 19 | Genres []string `json:"Genres"` 20 | RelatedWorks []int `json:"RelatedWorks"` // ForeignId 21 | 22 | Books []bookResource `json:"Books"` 23 | Series []seriesResource `json:"Series"` 24 | Authors []AuthorResource `json:"Authors"` 25 | 26 | // New fields 27 | KCA string `json:"KCA"` 28 | BestBookID int64 `json:"BestBookId"` 29 | } 30 | 31 | // AuthorResource collects every edition of every work by an author. 32 | type AuthorResource struct { 33 | ForeignID int64 `json:"ForeignId"` 34 | Name string `json:"Name"` 35 | Description string `json:"Description"` 36 | ImageURL string `json:"ImageUrl"` 37 | URL string `json:"Url"` 38 | RatingCount int64 `json:"RatingCount"` 39 | AverageRating float32 `json:"AverageRating"` 40 | 41 | // Relations. 42 | Works []workResource `json:"Works"` 43 | Series []seriesResource `json:"Series"` 44 | 45 | // New fields. 46 | KCA string `json:"KCA"` 47 | } 48 | 49 | type bookResource struct { 50 | ForeignID int64 `json:"ForeignId"` 51 | Asin string `json:"Asin"` 52 | Description string `json:"Description"` 53 | Isbn13 string `json:"Isbn13,omitempty"` 54 | Title string `json:"Title"` // This is what's ultimately displayed in the app. 55 | FullTitle string `json:"FullTitle"` // The title + subtitle. 56 | ShortTitle string `json:"ShortTitle"` // Just the title. 57 | Language string `json:"Language"` 58 | Format string `json:"Format"` 59 | EditionInformation string `json:"EditionInformation"` 60 | Publisher string `json:"Publisher"` 61 | ImageURL string `json:"ImageUrl"` 62 | IsEbook bool `json:"IsEbook"` 63 | NumPages int64 `json:"NumPages"` 64 | RatingCount int64 `json:"RatingCount"` 65 | AverageRating float64 `json:"AverageRating"` 66 | URL string `json:"Url"` 67 | ReleaseDate string `json:"ReleaseDate,omitempty"` 68 | 69 | Contributors []contributorResource `json:"Contributors"` 70 | 71 | // New fields 72 | KCA string `json:"KCA"` 73 | RatingSum int64 `json:"RatingSum"` 74 | } 75 | 76 | type seriesResource struct { 77 | ForeignID int64 `json:"ForeignId"` 78 | Title string `json:"Title"` 79 | Description string `json:"Description"` 80 | 81 | LinkItems []seriesWorkLinkResource `json:"LinkItems"` 82 | 83 | // New fields 84 | KCA string `json:"KCA"` 85 | } 86 | 87 | type seriesWorkLinkResource struct { 88 | ForeignWorkID int64 `json:"ForeignWorkId"` 89 | PositionInSeries string `json:"PositionInSeries"` 90 | SeriesPosition int `json:"SeriesPosition"` 91 | Primary bool `json:"Primary"` 92 | } 93 | 94 | type contributorResource struct { 95 | ForeignID int64 `json:"ForeignId"` 96 | Role string `json:"Role"` 97 | } 98 | -------------------------------------------------------------------------------- /internal/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS "cache" ( 2 | "key" TEXT NOT NULL PRIMARY KEY, 3 | "value" BYTEA NOT NULL, 4 | "expires" TIMESTAMPTZ NOT NULL DEFAULT NOW() + INTERVAL '7 day' 5 | ); 6 | CREATE INDEX IF NOT EXISTS cache_expires_idx ON "cache" (expires); 7 | 8 | -------------------------------------------------------------------------------- /internal/transport.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "log/slog" 5 | "net/http" 6 | "time" 7 | 8 | "golang.org/x/time/rate" 9 | ) 10 | 11 | // throttledTransport rate limits requests. 12 | type throttledTransport struct { 13 | http.RoundTripper 14 | *rate.Limiter 15 | } 16 | 17 | func (t throttledTransport) RoundTrip(r *http.Request) (*http.Response, error) { 18 | if err := t.Limiter.Wait(r.Context()); err != nil { 19 | return nil, err 20 | } 21 | resp, err := t.RoundTripper.RoundTrip(r) 22 | 23 | // Back off for a minute if we got a 403. 24 | // TODO: Return a Retry-After: (seconds) response header.. 25 | if resp != nil && resp.StatusCode == http.StatusForbidden { 26 | slog.Default().Warn("backing off after 403", "limit", t.Limiter.Limit(), "tokens", t.Limiter.Tokens()) 27 | orig := t.Limiter.Limit() 28 | t.Limiter.SetLimit(rate.Every(time.Hour / 60)) // 1RPM 29 | t.Limiter.SetLimitAt(time.Now().Add(time.Minute), orig) // Restore 30 | } 31 | 32 | return resp, err 33 | } 34 | 35 | // ScopedTransport restricts requests to a particular host. 36 | type ScopedTransport struct { 37 | Host string 38 | http.RoundTripper 39 | } 40 | 41 | // RoundTrip forces the request to stick to the given host, so redirects can't 42 | // send us elsewhere. Helpful to ensuring credentials don't leak to other 43 | // domains. 44 | func (t ScopedTransport) RoundTrip(r *http.Request) (*http.Response, error) { 45 | r.URL.Scheme = "https" 46 | r.URL.Host = t.Host 47 | return t.RoundTripper.RoundTrip(r) 48 | } 49 | 50 | // cookieTransport transport adds a cookie to all requests. Best used with a 51 | // scopedTransport. 52 | type cookieTransport struct { 53 | cookies []*http.Cookie 54 | http.RoundTripper 55 | } 56 | 57 | func (t cookieTransport) RoundTrip(r *http.Request) (*http.Response, error) { 58 | for _, c := range t.cookies { 59 | r.AddCookie(c) 60 | } 61 | return t.RoundTripper.RoundTrip(r) 62 | } 63 | 64 | // HeaderTransport adds a header to all requests. Best used with a 65 | // scopedTransport. 66 | type HeaderTransport struct { 67 | Key string 68 | Value string 69 | http.RoundTripper 70 | } 71 | 72 | // RoundTrip always sets the header on the request. 73 | func (t *HeaderTransport) RoundTrip(r *http.Request) (*http.Response, error) { 74 | r.Header.Add(t.Key, t.Value) 75 | return t.RoundTripper.RoundTrip(r) 76 | } 77 | 78 | // errorProxyTransport returns a non-nil statusErr for all response codes 400 79 | // and above so we can return a response with the same code. 80 | type errorProxyTransport struct { 81 | http.RoundTripper 82 | } 83 | 84 | // RoundTrip wraps upstream 4XX and 5XX errors such that they are returned 85 | // directly to the client. 86 | func (t errorProxyTransport) RoundTrip(r *http.Request) (*http.Response, error) { 87 | resp, err := t.RoundTripper.RoundTrip(r) 88 | if err != nil { 89 | return nil, err 90 | } 91 | if resp.StatusCode >= 400 { 92 | return nil, statusErr(resp.StatusCode) 93 | } 94 | return resp, nil 95 | } 96 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ] 6 | } 7 | --------------------------------------------------------------------------------