├── .dockerignore ├── .github └── workflows │ ├── binaries.yml │ ├── build-libheif.sh │ ├── docker.yml │ ├── download_pgo.yml │ ├── download_providers.yml │ └── main.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── SECURITY.md ├── api ├── _apimeta │ └── auth.go ├── _auth_cache │ └── auth_cache.go ├── _debug │ └── pprof.go ├── _responses │ ├── content.go │ ├── errors.go │ ├── meta.go │ └── redirect.go ├── _routers │ ├── 00-install-params.go │ ├── 01-install_metadata.go │ ├── 02-install-headers.go │ ├── 03-host_detection.go │ ├── 04-request-metrics.go │ ├── 97-optional-access-token.go │ ├── 97-require-access-token.go │ ├── 97-require-repo-admin.go │ ├── 97-require-server-auth.go │ ├── 98-use-rcontext.go │ └── 99-response-metrics.go ├── branched_route.go ├── custom │ ├── datastores.go │ ├── exports.go │ ├── federation.go │ ├── health.go │ ├── imports.go │ ├── media_attributes.go │ ├── purge.go │ ├── quarantine.go │ ├── tasks.go │ ├── usage.go │ └── version.go ├── r0 │ ├── download.go │ ├── identicon.go │ ├── logout.go │ ├── preview_url.go │ ├── public_config.go │ ├── thumbnail.go │ ├── upload_async.go │ ├── upload_sync.go │ └── versions.go ├── router.go ├── routes.go ├── unstable │ ├── info.go │ ├── local_copy.go │ └── public_usage.go ├── v1 │ ├── create.go │ ├── download.go │ └── thumbnail.go └── webserver.go ├── archival ├── dir_part_persister.go ├── entity_export.go └── v2archive │ ├── manifest.go │ ├── reader.go │ └── writer.go ├── assets ├── default-artwork.png └── providers.json ├── build-dist.sh ├── build.sh ├── cmd ├── archival │ ├── gdpr_export │ │ └── main.go │ └── gdpr_import │ │ └── main.go ├── homeserver_live_importers │ ├── _common │ │ ├── download.go │ │ └── init_import.go │ ├── import_dendrite │ │ └── main.go │ └── import_synapse │ │ └── main.go ├── homeserver_offline_exporters │ ├── _common │ │ ├── archive_reader.go │ │ └── init_export.go │ └── import_to_synapse │ │ └── main.go ├── homeserver_offline_importers │ ├── _common │ │ ├── archiver.go │ │ └── init_export.go │ ├── export_dendrite_for_import │ │ └── main.go │ └── export_synapse_for_import │ │ └── main.go ├── plugins │ └── plugin_antispam_ocr │ │ └── main.go ├── utilities │ ├── _common │ │ └── signing_key_export.go │ ├── combine_signing_keys │ │ └── main.go │ ├── compile_assets │ │ └── main.go │ ├── generate_signing_key │ │ └── main.go │ ├── s3_consistency_check │ │ └── main.go │ └── thumbnailer │ │ └── main.go └── workers │ └── media_repo │ ├── main.go │ └── reloads.go ├── common ├── assets │ └── process.go ├── config │ ├── access.go │ ├── conf_domain.go │ ├── conf_main.go │ ├── conf_min_shared.go │ ├── models_domain.go │ ├── models_main.go │ ├── util.go │ └── watch.go ├── context.go ├── errorcodes.go ├── errors.go ├── globals │ └── reload.go ├── import_cmdline │ └── ask_machine_id.go ├── logging │ └── logger.go ├── rcontext │ └── request_context.go ├── runtime │ └── init.go └── version │ └── version.go ├── config.sample.yaml ├── database ├── db.go ├── json_value.go ├── table_expiring_media.go ├── table_export_parts.go ├── table_exports.go ├── table_last_access.go ├── table_media.go ├── table_media_attributes.go ├── table_media_hold.go ├── table_reserved_media.go ├── table_restricted_media.go ├── table_tasks.go ├── table_thumbnails.go ├── table_url_previews.go ├── table_user_stats.go └── virtualtable_metadata.go ├── datastores ├── buffer.go ├── delete.go ├── download.go ├── info.go ├── kind.go ├── locate.go ├── pick.go ├── redirect.go ├── s3.go └── upload.go ├── dev ├── docker-compose.yaml ├── element-config.json ├── homeserver.nginx.conf ├── redis.conf └── synapse-db │ └── .gitignore ├── docker └── run.sh ├── docs ├── admin.md ├── contrib │ └── delegation.md ├── grafana.json └── releasing.md ├── errcache ├── cache.go └── init.go ├── go.mod ├── go.sum ├── homeserver_interop ├── ImportDb.go ├── any_server │ └── signing_key.go ├── dendrite │ ├── db.go │ └── signing_key.go ├── internal │ └── signing_key_encode.go ├── mmr │ └── signing_key.go ├── signing_key.go └── synapse │ ├── api.go │ ├── db.go │ └── signing_key.go ├── limits ├── leaky_buckets.go └── rate_limiting.go ├── matrix ├── breakers.go ├── errors.go ├── http.go ├── requests.go ├── requests_admin.go ├── requests_auth.go ├── requests_info.go ├── requests_signing.go ├── responses.go ├── server_discovery.go ├── signing_key_cache.go └── xmatrix.go ├── metrics ├── metrics.go └── webserver.go ├── migrations ├── 10_add_background_tasks_table_down.sql ├── 10_add_background_tasks_table_up.sql ├── 11_add_reserved_ids_table_down.sql ├── 11_add_reserved_ids_table_up.sql ├── 12_user_id_indexes_down.sql ├── 12_user_id_indexes_up.sql ├── 13_add_export_tables_down.sql ├── 13_add_export_tables_up.sql ├── 14_add_blurhash_tables_down.sql ├── 14_add_blurhash_tables_up.sql ├── 15_add_language_url_previews_down.sql ├── 15_add_language_url_previews_up.sql ├── 16_add_media_attributes_table_down.sql ├── 16_add_media_attributes_table_up.sql ├── 17_add_user_stats_table_down.sql ├── 17_add_user_stats_table_up.sql ├── 18_populate_user_stats_table_down.sql ├── 18_populate_user_stats_table_up.sql ├── 19_create_expiring_media_table_down.sql ├── 19_create_expiring_media_table_up.sql ├── 1_create_tables_down.sql ├── 1_create_tables_up.sql ├── 20_create_id_hold_table_down.sql ├── 20_create_id_hold_table_up.sql ├── 21_not_null_end_ts_background_tasks_down.sql ├── 21_not_null_end_ts_background_tasks_up.sql ├── 22_add_thumb_creation_index_down.sql ├── 22_add_thumb_creation_index_up.sql ├── 23_add_datastore_locations_indexes_down.sql ├── 23_add_datastore_locations_indexes_up.sql ├── 24_add_timestamp_to_media_id_hold_down.sql ├── 24_add_timestamp_to_media_id_hold_up.sql ├── 25_try_create_expiring_media_table_down.sql ├── 25_try_create_expiring_media_table_up.sql ├── 26_add_datastore_id_indexes_down.sql ├── 26_add_datastore_id_indexes_up.sql ├── 27_drop_blurhashes_down.sql ├── 27_drop_blurhashes_up.sql ├── 28_add_task_error_column_down.sql ├── 28_add_task_error_column_up.sql ├── 29_create_media_restrictions_down.sql ├── 29_create_media_restrictions_up.sql ├── 2_add_animated_col_down.sql ├── 2_add_animated_col_up.sql ├── 3_add_quarantine_flag_down.sql ├── 3_add_quarantine_flag_up.sql ├── 4_add_hash_to_thumbnails_down.sql ├── 4_add_hash_to_thumbnails_up.sql ├── 5_make_thumbnail_hash_required_down.sql ├── 5_make_thumbnail_hash_required_up.sql ├── 6_track_last_accessed_times_down.sql ├── 6_track_last_accessed_times_up.sql ├── 7_add_datastore_down.sql ├── 7_add_datastore_up.sql ├── 8_sha256_indexes_down.sql ├── 8_sha256_indexes_up.sql ├── 9_origin_indexes_down.sql └── 9_origin_indexes_up.sql ├── notifier ├── tasks.go └── uploads.go ├── pgo_internal └── pgo.go ├── pgo_media_repo.pprof ├── pipelines ├── _steps │ ├── datastore_op │ │ └── put_and_return_stream.go │ ├── download │ │ ├── open_stream.go │ │ ├── try_download.go │ │ └── wait.go │ ├── meta │ │ └── flag_access.go │ ├── quarantine │ │ ├── logic.go │ │ └── thumbnail.go │ ├── quota │ │ └── check.go │ ├── thumbnails │ │ ├── generate.go │ │ └── pick_dimensions.go │ ├── upload │ │ ├── deduplicate.go │ │ ├── generate_media_id.go │ │ ├── limit.go │ │ ├── lock.go │ │ ├── quarantine.go │ │ ├── redis_async.go │ │ └── spam.go │ └── url_preview │ │ ├── preview.go │ │ ├── process.go │ │ └── upload_image.go ├── pipeline_create │ └── pipeline.go ├── pipeline_download │ └── pipeline.go ├── pipeline_preview │ └── pipeline.go ├── pipeline_thumbnail │ └── pipeline.go └── pipeline_upload │ ├── pipeline.go │ └── pipeline2.go ├── plugins ├── manager.go ├── mmr_plugin.go ├── plugin_common │ └── handshake.go └── plugin_interfaces │ └── antispam.go ├── pool ├── init.go └── queue.go ├── redislib ├── cache.go ├── connection.go ├── locking.go └── pubsub.go ├── restrictions └── auth.go ├── tasks ├── all.go ├── exec.go ├── schedule.go └── task_runner │ ├── 00-internal.go │ ├── datastore_migrate.go │ ├── export_data.go │ ├── import_data.go │ ├── purge.go │ ├── purge_held_media_ids.go │ ├── purge_previews.go │ ├── purge_remote_media.go │ ├── purge_thumbnails.go │ └── quarantine.go ├── templates ├── export_index.html └── view_export.html ├── templating ├── models.go └── templates.go ├── test ├── canonical_json_test.go ├── matrix_resolve_test.go ├── msc3916_downloads_suite_test.go ├── msc3916_misc_client_endpoints_suite_test.go ├── msc3916_thumbnails_suite_test.go ├── signing_anyserver_test.go ├── signing_dendrite_test.go ├── signing_keys_test.go ├── signing_mmr_test.go ├── signing_synapse_test.go ├── templates │ ├── minio-config.sh │ ├── mmr.config.yaml │ ├── synapse.homeserver.yaml │ └── synapse.log.config ├── test_internals │ ├── deps.go │ ├── deps_docker_context.go │ ├── deps_minio.go │ ├── deps_mmr.go │ ├── deps_network.go │ ├── deps_synapse.go │ ├── inline_dep_host_file.go │ ├── testcontainers_ext.go │ ├── util.go │ ├── util_client.go │ ├── util_client_api_types.go │ └── util_keyserver.go ├── upload_suite_test.go └── xmatrix_header_test.go ├── thumbnailing ├── i │ ├── 01-factories.go │ ├── apng.go │ ├── bmp.go │ ├── flac.go │ ├── gif.go │ ├── heif.go │ ├── jpegxl.go │ ├── jpg.go │ ├── mp3.go │ ├── mp4.go │ ├── ogg.go │ ├── png.go │ ├── svg.go │ ├── tiff.go │ ├── wav.go │ └── webp.go ├── m │ ├── audio_info.go │ └── thumbnail.go ├── thumbnail.go └── u │ ├── dimensions.go │ ├── encode.go │ ├── exif.go │ ├── framing.go │ ├── metadata.go │ └── sample.go ├── url_previewing ├── m │ ├── errors.go │ ├── preview_result.go │ └── url_payload.go ├── p │ ├── calculated.go │ ├── oembed.go │ └── opengraph.go └── u │ ├── http.go │ └── summarize.go └── util ├── arrays.go ├── canonical_json.go ├── config.go ├── encoding.go ├── http.go ├── identifiers.go ├── ids ├── snowflake.go └── unique.go ├── math.go ├── matrix_media_part.go ├── mime.go ├── mime_detect.go ├── mxc.go ├── random.go ├── readers ├── buffer_reads_reader.go ├── cancel_closer.go ├── error_limit_reader.go ├── maybe_closer.go ├── multipart_reader.go ├── nop_seek_closer.go ├── rewind_reader.go └── temp_file_closer.go ├── sfcache └── sfcache.go ├── strings.go ├── time.go ├── unpadded_base64.go └── urls.go /.dockerignore: -------------------------------------------------------------------------------- 1 | *.key 2 | /webui 3 | /.idea 4 | /bin 5 | /pkg 6 | /logs 7 | /vendor 8 | /config 9 | /gdpr-data 10 | /ipfs 11 | /dev/conduit-db 12 | /dev/psql 13 | /vcpkg 14 | /libheif 15 | 16 | # Generated files 17 | assets.bin.go 18 | 19 | media-repo*.yaml 20 | homeserver.yaml 21 | s3-probably-safe-to-delete.txt 22 | /test.test 23 | 24 | # Binaries for programs and plugins 25 | *.exe 26 | *.dll 27 | *.so 28 | *.dylib 29 | 30 | # Test binary, build with `go test -c` 31 | *.test 32 | 33 | # Output of the go coverage tool, specifically when used with LiteIDE 34 | *.out 35 | 36 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 37 | .glide/ 38 | -------------------------------------------------------------------------------- /.github/workflows/build-libheif.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | sudo apt-get install -y git cmake make pkg-config libx265-dev libde265-dev libjpeg-dev libtool 4 | git clone https://github.com/strukturag/libheif.git 5 | cd libheif 6 | git checkout v1.19.5 7 | mkdir build 8 | cd build 9 | cmake --preset=release .. 10 | make 11 | sudo make install 12 | sudo ldconfig 13 | -------------------------------------------------------------------------------- /.github/workflows/download_pgo.yml: -------------------------------------------------------------------------------- 1 | name: "Update PGO performance profile" 2 | on: 3 | # schedule: 4 | # - cron: "0 0 * * 2" # Every Tuesday at 00:00 5 | workflow_dispatch: 6 | jobs: 7 | update_pgo: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: write 11 | env: 12 | PGO_MERGE: ${{ secrets.PGO_MERGE }} 13 | steps: 14 | - uses: actions/checkout@v4 15 | - name: "Download new pgo_media_repo.pprof" 16 | run: "curl -sv --fail -X POST -H \"Authorization: Bearer ${PGO_MERGE}\" https://pgo-mmr.t2host.io/v1/merge?and_combine=true > pgo_media_repo.pprof" 17 | - uses: stefanzweifel/git-auto-commit-action@v5 18 | with: 19 | commit_message: "Update pgo_media_repo.pprof" 20 | -------------------------------------------------------------------------------- /.github/workflows/download_providers.yml: -------------------------------------------------------------------------------- 1 | name: "Update oEmbed providers" 2 | on: 3 | # schedule: 4 | # - cron: "0 0 * * 2" # Every Tuesday at 00:00 5 | workflow_dispatch: 6 | jobs: 7 | update_providers: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: write 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: "Download new providers.json" 14 | run: "curl -s --fail https://oembed.com/providers.json > assets/providers.json" 15 | - uses: stefanzweifel/git-auto-commit-action@v5 16 | with: 17 | commit_message: "Update providers.json" 18 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Main 2 | on: 3 | push: 4 | jobs: 5 | build: 6 | name: 'Go Build' 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - uses: actions/setup-go@v5 11 | with: 12 | go-version: '1.22' 13 | - name: "Install libheif" 14 | run: "chmod +x ./.github/workflows/build-libheif.sh && ./.github/workflows/build-libheif.sh" 15 | - run: './build.sh' # verify the thing compiles 16 | static: 17 | name: 'Go Static' 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: actions/setup-go@v5 22 | with: 23 | go-version: '1.22' 24 | - name: "Install libheif" 25 | run: "chmod +x ./.github/workflows/build-libheif.sh && ./.github/workflows/build-libheif.sh" 26 | - name: "Prepare: compile assets" 27 | run: "GOBIN=$PWD/bin go install -v ./cmd/utilities/compile_assets" 28 | - name: "Run: compile assets" 29 | run: "$PWD/bin/compile_assets" 30 | - name: "Prepare: staticcheck" 31 | run: 'go install honnef.co/go/tools/cmd/staticcheck@latest' 32 | - run: 'go vet ./cmd/...' 33 | - run: 'staticcheck ./cmd/...' 34 | test: 35 | name: 'Go Test' 36 | runs-on: ubuntu-latest 37 | steps: 38 | - uses: actions/checkout@v4 39 | - uses: actions/setup-go@v5 40 | with: 41 | go-version: '1.22' 42 | - name: "Prepare: compile assets" 43 | run: "GOBIN=$PWD/bin go install -v ./cmd/utilities/compile_assets" 44 | - name: "Run: compile assets" 45 | run: "$PWD/bin/compile_assets" 46 | - name: "Run: tests" 47 | run: "go test -c -v ./test && ./test.test '-test.v' -test.parallel 1" # cheat and work around working directory issues 48 | timeout-minutes: 30 49 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.key 2 | /webui 3 | /.idea 4 | /bin 5 | /pkg 6 | /logs 7 | /vendor 8 | /config 9 | /gdpr-data 10 | /ipfs 11 | /dev/conduit-db 12 | /dev/psql 13 | /dev/thumb_* 14 | /vcpkg 15 | /libheif 16 | 17 | # Generated files 18 | assets.bin.go 19 | 20 | media-repo*.yaml 21 | homeserver.yaml 22 | s3-probably-safe-to-delete.txt 23 | /test.test 24 | 25 | # Binaries for programs and plugins 26 | *.exe 27 | *.dll 28 | *.so 29 | *.dylib 30 | 31 | # Test binary, build with `go test -c` 32 | *.test 33 | 34 | # Output of the go coverage tool, specifically when used with LiteIDE 35 | *.out 36 | 37 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 38 | .glide/ 39 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | Everyone is welcome to contribute code to this project, provided that they are willing to license their contributions 4 | under the same license as the project itself. We follow a simple 'inbound=outbound' model for contributions: the act 5 | of submitting an 'inbound' contribution means that the contributor agrees to license the code under the same terms as 6 | the project's overall 'outbound' license - in our case, this is the MIT license (see [LICENSE](LICENSE)). 7 | 8 | ## How to contribute 9 | 10 | The preferred and easiest way to contribute changes is to fork it on GitHub, and then 11 | [create a pull request](https://help.github.com/articles/using-pull-requests/) to ask us to pull your changes into our repo. 12 | 13 | We use several CI systems for testing PRs and the project in general. After opening your pull request, the build status 14 | will be shown on GitHub. Please ensure your PR passes the builds before asking for review. 15 | 16 | This project does not currently have unit or integration tests, though it is expected that your changes work. Please test 17 | them locally and provide a detailed description on how they are supposed to work. 18 | 19 | ## Code style 20 | 21 | This project doesn't yet have a linter because GoLand's default formatting rules seem good enough. If your code looks 22 | sensible and roughly in the same shape as the code surrounding it, it will be fine. 23 | 24 | ## Changelog 25 | 26 | Please document relevant changes in the [CHANGELOG.md](CHANGELOG.md) file. We use keep-a-changelog's format, so some 27 | headers may need to be created. 28 | 29 | ## Conclusion 30 | 31 | That's it! This project can be difficult to jump into, but we do appreciate collaboration and open communication. We've 32 | adapted these contributing guidelines from [Synapse](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md) 33 | because we believe in Matrix's mission - we hope you do too and welcome you to our project! 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Travis Ralston 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | The most current release and releases within the last 6 weeks are supported. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | Please report vulnerabilities through [t2bot.io's Security Disclosure Policy](https://t2bot.io/docs/legal/security-disclosure-policy-v1/) 10 | -------------------------------------------------------------------------------- /api/_apimeta/auth.go: -------------------------------------------------------------------------------- 1 | package _apimeta 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/getsentry/sentry-go" 7 | 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/matrix" 10 | "github.com/t2bot/matrix-media-repo/util" 11 | ) 12 | 13 | type UserInfo struct { 14 | UserId string 15 | AccessToken string 16 | IsShared bool 17 | } 18 | 19 | type ServerInfo struct { 20 | ServerName string 21 | } 22 | 23 | type AuthContext struct { 24 | User UserInfo 25 | Server ServerInfo 26 | } 27 | 28 | func (a AuthContext) IsAuthenticated() bool { 29 | return a.User.UserId != "" || a.Server.ServerName != "" 30 | } 31 | 32 | func GetRequestUserAdminStatus(r *http.Request, rctx rcontext.RequestContext, user UserInfo) (bool, bool) { 33 | isGlobalAdmin := util.IsGlobalAdmin(user.UserId) || user.IsShared 34 | isLocalAdmin, err := matrix.IsUserAdmin(rctx, r.Host, user.AccessToken, r.RemoteAddr) 35 | if err != nil { 36 | sentry.CaptureException(err) 37 | rctx.Log.Debug("Error verifying local admin: ", err) 38 | return isGlobalAdmin, false 39 | } 40 | 41 | return isGlobalAdmin, isLocalAdmin 42 | } 43 | -------------------------------------------------------------------------------- /api/_responses/content.go: -------------------------------------------------------------------------------- 1 | package _responses 2 | 3 | import "io" 4 | 5 | type EmptyResponse struct{} 6 | 7 | type HtmlResponse struct { 8 | HTML string 9 | } 10 | 11 | type DownloadResponse struct { 12 | ContentType string 13 | Filename string 14 | SizeBytes int64 15 | Data io.ReadCloser 16 | TargetDisposition string 17 | } 18 | 19 | type StreamDataResponse struct { 20 | Stream io.Reader 21 | } 22 | 23 | func MakeQuarantinedImageResponse(stream io.ReadCloser) *DownloadResponse { 24 | return &DownloadResponse{ 25 | ContentType: "image/png", 26 | Filename: "not_allowed.png", 27 | SizeBytes: -1, 28 | Data: stream, 29 | TargetDisposition: "inline", 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /api/_responses/meta.go: -------------------------------------------------------------------------------- 1 | package _responses 2 | 3 | type DoNotCacheResponse struct { 4 | Payload interface{} 5 | } 6 | -------------------------------------------------------------------------------- /api/_responses/redirect.go: -------------------------------------------------------------------------------- 1 | package _responses 2 | 3 | type RedirectResponse struct { 4 | ToUrl string 5 | } 6 | 7 | func Redirect(url string) *RedirectResponse { 8 | return &RedirectResponse{ToUrl: url} 9 | } 10 | -------------------------------------------------------------------------------- /api/_routers/00-install-params.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net/http" 7 | "regexp" 8 | 9 | "github.com/julienschmidt/httprouter" 10 | ) 11 | 12 | func localCompile(expr string) *regexp.Regexp { 13 | r, err := regexp.Compile(expr) 14 | if err != nil { 15 | panic(errors.New("error compiling expression: " + expr + " | " + err.Error())) 16 | } 17 | return r 18 | } 19 | 20 | var ServerNameRegex = localCompile("[a-zA-Z0-9.:\\-_]+") 21 | 22 | //var NumericIdRegex = localCompile("[0-9]+") 23 | 24 | func GetParam(name string, r *http.Request) string { 25 | p := httprouter.ParamsFromContext(r.Context()) 26 | if p == nil { 27 | return "" 28 | } 29 | return p.ByName(name) 30 | } 31 | 32 | func ForceSetParam(name string, val string, r *http.Request) *http.Request { 33 | params := httprouter.ParamsFromContext(r.Context()) 34 | wasSet := false 35 | for _, p := range params { 36 | if p.Key == name { 37 | p.Value = val 38 | wasSet = true 39 | break 40 | } 41 | } 42 | if !wasSet { 43 | params = append(params, httprouter.Param{ 44 | Key: name, 45 | Value: val, 46 | }) 47 | } 48 | return r.WithContext(context.WithValue(r.Context(), httprouter.ParamsKey, params)) 49 | } 50 | -------------------------------------------------------------------------------- /api/_routers/02-install-headers.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "net/http" 5 | ) 6 | 7 | type InstallHeadersRouter struct { 8 | next http.Handler 9 | } 10 | 11 | func NewInstallHeadersRouter(next http.Handler) *InstallHeadersRouter { 12 | return &InstallHeadersRouter{next: next} 13 | } 14 | 15 | func (i *InstallHeadersRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) { 16 | headers := w.Header() 17 | if headers.Get("Allow") != "" { 18 | headers.Set("Access-Control-Allow-Methods", headers.Get("Allow")) 19 | } 20 | headers.Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Authorization") 21 | headers.Set("Access-Control-Allow-Origin", "*") 22 | headers.Set("Content-Security-Policy", "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; media-src 'self'; object-src 'self';") 23 | headers.Set("Cross-Origin-Resource-Policy", "cross-origin") 24 | headers.Set("X-Content-Security-Policy", "sandbox;") 25 | headers.Set("X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") 26 | headers.Set("Server", "matrix-media-repo") 27 | 28 | if i.next != nil { 29 | i.next.ServeHTTP(w, r) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /api/_routers/04-request-metrics.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/t2bot/matrix-media-repo/metrics" 8 | ) 9 | 10 | type MetricsRequestRouter struct { 11 | next http.Handler 12 | } 13 | 14 | func NewMetricsRequestRouter(next http.Handler) *MetricsRequestRouter { 15 | return &MetricsRequestRouter{next: next} 16 | } 17 | 18 | func (m *MetricsRequestRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) { 19 | metrics.HttpRequests.With(prometheus.Labels{ 20 | "host": r.Host, 21 | "action": GetActionName(r), 22 | "method": r.Method, 23 | }).Inc() 24 | 25 | if m.next != nil { 26 | m.next.ServeHTTP(w, r) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /api/_routers/97-optional-access-token.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | 7 | "github.com/getsentry/sentry-go" 8 | "github.com/sirupsen/logrus" 9 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 10 | "github.com/t2bot/matrix-media-repo/api/_auth_cache" 11 | "github.com/t2bot/matrix-media-repo/api/_responses" 12 | "github.com/t2bot/matrix-media-repo/common/config" 13 | "github.com/t2bot/matrix-media-repo/common/rcontext" 14 | "github.com/t2bot/matrix-media-repo/matrix" 15 | "github.com/t2bot/matrix-media-repo/util" 16 | ) 17 | 18 | func OptionalAccessToken(generator GeneratorWithUserFn) GeneratorFn { 19 | return func(r *http.Request, ctx rcontext.RequestContext) interface{} { 20 | accessToken := util.GetAccessTokenFromRequest(r) 21 | if accessToken == "" { 22 | return generator(r, ctx, _apimeta.UserInfo{ 23 | UserId: "", 24 | AccessToken: "", 25 | IsShared: false, 26 | }) 27 | } 28 | if config.Get().SharedSecret.Enabled && accessToken == config.Get().SharedSecret.Token { 29 | ctx = ctx.LogWithFields(logrus.Fields{"sharedSecretAuth": true}) 30 | return generator(r, ctx, _apimeta.UserInfo{ 31 | UserId: "@sharedsecret", 32 | AccessToken: accessToken, 33 | IsShared: true, 34 | }) 35 | } 36 | appserviceUserId := util.GetAppserviceUserIdFromRequest(r) 37 | userId, isGuest, err := _auth_cache.GetUserId(ctx, accessToken, appserviceUserId) 38 | if isGuest { 39 | return _responses.GuestAuthFailed() 40 | } 41 | if err != nil { 42 | if !errors.Is(err, matrix.ErrInvalidToken) { 43 | sentry.CaptureException(err) 44 | ctx.Log.Error("Error verifying token: ", err) 45 | return _responses.InternalServerError("unexpected error validating access token") 46 | } 47 | 48 | ctx.Log.Warn("Failed to verify token (non-fatal): ", err) 49 | userId = "" 50 | } 51 | 52 | ctx = ctx.LogWithFields(logrus.Fields{"authUserId": userId}) 53 | return generator(r, ctx, _apimeta.UserInfo{ 54 | UserId: userId, 55 | AccessToken: accessToken, 56 | IsShared: false, 57 | }) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /api/_routers/97-require-repo-admin.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | 7 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 8 | "github.com/t2bot/matrix-media-repo/api/_responses" 9 | "github.com/t2bot/matrix-media-repo/common/rcontext" 10 | "github.com/t2bot/matrix-media-repo/util" 11 | ) 12 | 13 | func RequireRepoAdmin(generator GeneratorWithUserFn) GeneratorFn { 14 | return func(r *http.Request, ctx rcontext.RequestContext) interface{} { 15 | return RequireAccessToken(func(r *http.Request, ctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 16 | if user.UserId == "" { 17 | panic(errors.New("safety check failed: Repo admin access check received empty user ID")) 18 | } 19 | 20 | if !user.IsShared && !util.IsGlobalAdmin(user.UserId) { 21 | return _responses.AuthFailed() 22 | } 23 | 24 | return generator(r, ctx, user) 25 | }, false)(r, ctx) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /api/_routers/97-require-server-auth.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | 7 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 8 | "github.com/t2bot/matrix-media-repo/api/_responses" 9 | "github.com/t2bot/matrix-media-repo/common" 10 | "github.com/t2bot/matrix-media-repo/common/rcontext" 11 | "github.com/t2bot/matrix-media-repo/matrix" 12 | ) 13 | 14 | type GeneratorWithServerFn = func(r *http.Request, ctx rcontext.RequestContext, server _apimeta.ServerInfo) interface{} 15 | 16 | func RequireServerAuth(generator GeneratorWithServerFn) GeneratorFn { 17 | return func(r *http.Request, ctx rcontext.RequestContext) interface{} { 18 | serverName, err := matrix.ValidateXMatrixAuth(r, true) 19 | if err != nil { 20 | ctx.Log.Debug("Error with X-Matrix auth: ", err) 21 | if errors.Is(err, matrix.ErrNoXMatrixAuth) { 22 | return &_responses.ErrorResponse{ 23 | Code: common.ErrCodeUnauthorized, 24 | Message: "no auth provided (required)", 25 | InternalCode: common.ErrCodeMissingToken, 26 | } 27 | } 28 | if errors.Is(err, matrix.ErrWrongDestination) { 29 | return &_responses.ErrorResponse{ 30 | Code: common.ErrCodeUnauthorized, 31 | Message: "no auth provided for this destination (required)", 32 | InternalCode: common.ErrCodeBadRequest, 33 | } 34 | } 35 | return &_responses.ErrorResponse{ 36 | Code: common.ErrCodeForbidden, 37 | Message: "invalid auth provided (required)", 38 | InternalCode: common.ErrCodeBadRequest, 39 | } 40 | } 41 | return generator(r, ctx, _apimeta.ServerInfo{ 42 | ServerName: serverName, 43 | }) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /api/_routers/99-response-metrics.go: -------------------------------------------------------------------------------- 1 | package _routers 2 | 3 | import ( 4 | "net/http" 5 | "strconv" 6 | 7 | "github.com/prometheus/client_golang/prometheus" 8 | "github.com/t2bot/matrix-media-repo/metrics" 9 | ) 10 | 11 | type MetricsResponseRouter struct { 12 | next http.Handler 13 | } 14 | 15 | func NewMetricsResponseRouter(next http.Handler) *MetricsResponseRouter { 16 | return &MetricsResponseRouter{next: next} 17 | } 18 | 19 | func (m *MetricsResponseRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) { 20 | metrics.HttpResponses.With(prometheus.Labels{ 21 | "host": r.Host, 22 | "action": GetActionName(r), 23 | "method": r.Method, 24 | "statusCode": strconv.Itoa(GetStatusCode(r)), 25 | }).Inc() 26 | metrics.HttpResponseTime.With(prometheus.Labels{ 27 | "host": r.Host, 28 | "action": GetActionName(r), 29 | "method": r.Method, 30 | }).Observe(GetRequestDuration(r)) 31 | 32 | if m.next != nil { 33 | m.next.ServeHTTP(w, r) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /api/branched_route.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "net/http" 5 | "strings" 6 | 7 | "github.com/t2bot/matrix-media-repo/api/_routers" 8 | ) 9 | 10 | type branch struct { 11 | string 12 | http.Handler 13 | } 14 | 15 | type splitBranch struct { 16 | segments []string 17 | handler http.Handler 18 | } 19 | 20 | func branchedRoute(branches []branch) http.Handler { 21 | sbranches := make([]splitBranch, len(branches)) 22 | for i, b := range branches { 23 | sbranches[i] = splitBranch{ 24 | segments: strings.Split(b.string, "/"), 25 | handler: b.Handler, 26 | } 27 | } 28 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 29 | catchAll := _routers.GetParam("branch", r) 30 | if catchAll[0] == '/' { 31 | catchAll = catchAll[1:] 32 | } 33 | params := strings.Split(catchAll, "/") 34 | for _, b := range sbranches { 35 | if b.segments[0][0] == ':' || b.segments[0] == params[0] { 36 | if len(b.segments) != len(params) { 37 | continue 38 | } 39 | for i, s := range b.segments { 40 | if s[0] == ':' { 41 | r = _routers.ForceSetParam(s[1:], params[i], r) 42 | } 43 | } 44 | b.handler.ServeHTTP(w, r) 45 | return 46 | } 47 | } 48 | notFoundFn(w, r) 49 | }) 50 | } 51 | -------------------------------------------------------------------------------- /api/custom/federation.go: -------------------------------------------------------------------------------- 1 | package custom 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "net/http" 7 | 8 | "github.com/getsentry/sentry-go" 9 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 10 | "github.com/t2bot/matrix-media-repo/api/_responses" 11 | "github.com/t2bot/matrix-media-repo/api/_routers" 12 | 13 | "github.com/sirupsen/logrus" 14 | "github.com/t2bot/matrix-media-repo/common/rcontext" 15 | "github.com/t2bot/matrix-media-repo/matrix" 16 | ) 17 | 18 | func GetFederationInfo(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 19 | serverName := _routers.GetParam("serverName", r) 20 | 21 | if !_routers.ServerNameRegex.MatchString(serverName) { 22 | return _responses.BadRequest("invalid server name") 23 | } 24 | 25 | rctx = rctx.LogWithFields(logrus.Fields{ 26 | "serverName": serverName, 27 | }) 28 | 29 | url, hostname, err := matrix.GetServerApiUrl(serverName) 30 | if err != nil { 31 | rctx.Log.Error(err) 32 | sentry.CaptureException(err) 33 | return _responses.InternalServerError(err.Error()) 34 | } 35 | 36 | versionUrl := url + "/_matrix/federation/v1/version" 37 | versionResponse, err := matrix.FederatedGet(rctx, versionUrl, hostname, serverName, matrix.NoSigningKey, false) 38 | if versionResponse != nil { 39 | defer versionResponse.Body.Close() 40 | } 41 | if err != nil { 42 | rctx.Log.Error(err) 43 | sentry.CaptureException(err) 44 | return _responses.InternalServerError(err.Error()) 45 | } 46 | if versionResponse == nil { 47 | return _responses.InternalServerError("version not found") 48 | } 49 | 50 | decoder := json.NewDecoder(io.LimitReader(versionResponse.Body, 1*1024*1024)) 51 | out := make(map[string]interface{}) 52 | err = decoder.Decode(&out) 53 | if err != nil { 54 | rctx.Log.Error(err) 55 | sentry.CaptureException(err) 56 | return _responses.InternalServerError(err.Error()) 57 | } 58 | 59 | resp := make(map[string]interface{}) 60 | resp["base_url"] = url 61 | resp["hostname"] = hostname 62 | resp["versions_response"] = out 63 | return &_responses.DoNotCacheResponse{Payload: resp} 64 | } 65 | -------------------------------------------------------------------------------- /api/custom/health.go: -------------------------------------------------------------------------------- 1 | package custom 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 7 | "github.com/t2bot/matrix-media-repo/api/_responses" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | ) 10 | 11 | type HealthzResponse struct { 12 | OK bool `json:"ok"` 13 | Status string `json:"status"` 14 | } 15 | 16 | func GetHealthz(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 17 | return &_responses.DoNotCacheResponse{ 18 | Payload: &HealthzResponse{ 19 | OK: true, 20 | Status: "Probably not dead", 21 | }, 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /api/custom/version.go: -------------------------------------------------------------------------------- 1 | package custom 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 7 | "github.com/t2bot/matrix-media-repo/api/_responses" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/common/version" 10 | ) 11 | 12 | func GetVersion(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 13 | unstableFeatures := make(map[string]bool) 14 | 15 | return &_responses.DoNotCacheResponse{ 16 | Payload: map[string]interface{}{ 17 | "Version": version.Version, 18 | "GitCommit": version.GitCommit, 19 | "unstable_features": unstableFeatures, 20 | }, 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /api/r0/logout.go: -------------------------------------------------------------------------------- 1 | package r0 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 6 | "github.com/t2bot/matrix-media-repo/api/_responses" 7 | 8 | "net/http" 9 | 10 | "github.com/t2bot/matrix-media-repo/api/_auth_cache" 11 | "github.com/t2bot/matrix-media-repo/common/rcontext" 12 | ) 13 | 14 | func Logout(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 15 | err := _auth_cache.InvalidateToken(rctx, user.AccessToken, user.UserId) 16 | if err != nil { 17 | rctx.Log.Error(err) 18 | sentry.CaptureException(err) 19 | return _responses.InternalServerError("unable to logout") 20 | } 21 | return _responses.EmptyResponse{} 22 | } 23 | 24 | func LogoutAll(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 25 | err := _auth_cache.InvalidateAllTokens(rctx, user.AccessToken, user.UserId) 26 | if err != nil { 27 | rctx.Log.Error(err) 28 | sentry.CaptureException(err) 29 | return _responses.InternalServerError("unable to logout") 30 | } 31 | return _responses.EmptyResponse{} 32 | } 33 | -------------------------------------------------------------------------------- /api/r0/public_config.go: -------------------------------------------------------------------------------- 1 | package r0 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/pipelines/_steps/quota" 10 | ) 11 | 12 | type PublicConfigResponse struct { 13 | UploadMaxSize int64 `json:"m.upload.size,omitempty"` 14 | StorageMaxSize int64 `json:"org.matrix.msc4034.storage.size,omitempty"` 15 | StorageMaxFiles int64 `json:"org.matrix.msc4034.storage.max_files,omitempty"` 16 | } 17 | 18 | func PublicConfig(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 19 | uploadSize := rctx.Config.Uploads.ReportedMaxSizeBytes 20 | if uploadSize == 0 { 21 | uploadSize = rctx.Config.Uploads.MaxSizeBytes 22 | } 23 | 24 | if uploadSize < 0 { 25 | uploadSize = 0 // invokes the omitEmpty 26 | } 27 | 28 | storageSize := int64(0) 29 | limit, err := quota.Limit(rctx, user.UserId, quota.MaxBytes) 30 | if err != nil { 31 | rctx.Log.Warn("Non-fatal error getting per-user quota limit (max bytes): ", err) 32 | sentry.CaptureException(err) 33 | } else { 34 | storageSize = limit 35 | } 36 | if storageSize < 0 { 37 | storageSize = 0 // invokes the omitEmpty 38 | } 39 | 40 | maxFiles := int64(0) 41 | limit, err = quota.Limit(rctx, user.UserId, quota.MaxCount) 42 | if err != nil { 43 | rctx.Log.Warn("Non-fatal error getting per-user quota limit (max files count): ", err) 44 | sentry.CaptureException(err) 45 | } else { 46 | maxFiles = limit 47 | } 48 | 49 | return &PublicConfigResponse{ 50 | UploadMaxSize: uploadSize, 51 | StorageMaxSize: storageSize, 52 | StorageMaxFiles: maxFiles, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /api/r0/versions.go: -------------------------------------------------------------------------------- 1 | package r0 2 | 3 | import ( 4 | "net/http" 5 | "slices" 6 | 7 | "github.com/getsentry/sentry-go" 8 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 9 | "github.com/t2bot/matrix-media-repo/api/_responses" 10 | "github.com/t2bot/matrix-media-repo/matrix" 11 | 12 | "github.com/t2bot/matrix-media-repo/common/rcontext" 13 | ) 14 | 15 | func ClientVersions(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 16 | versions, err := matrix.ClientVersions(rctx, r.Host, user.UserId, user.AccessToken, r.RemoteAddr) 17 | if err != nil { 18 | rctx.Log.Error(err) 19 | sentry.CaptureException(err) 20 | return _responses.InternalServerError("unable to get versions") 21 | } 22 | 23 | // This is where we'd add our feature/version support as needed 24 | if versions.Versions == nil { 25 | versions.Versions = make([]string, 1) 26 | } 27 | 28 | // We add v1.11 by force, even though we can't reliably say the rest of the server implements it. This 29 | // is because server admins which point `/versions` at us are effectively opting in to whatever features 30 | // we need to advertise support for. In our case, it's at least Authenticated Media (MSC3916). 31 | if !slices.Contains(versions.Versions, "v1.11") { 32 | versions.Versions = append(versions.Versions, "v1.11") 33 | } 34 | 35 | return versions 36 | } 37 | -------------------------------------------------------------------------------- /api/unstable/public_usage.go: -------------------------------------------------------------------------------- 1 | package unstable 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/pipelines/_steps/quota" 10 | ) 11 | 12 | type PublicUsageResponse struct { 13 | StorageUsed int64 `json:"org.matrix.msc4034.storage.used,omitempty"` 14 | StorageFiles int64 `json:"org.matrix.msc4034.storage.files,omitempty"` 15 | } 16 | 17 | func PublicUsage(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 18 | storageUsed := int64(0) 19 | current, err := quota.Current(rctx, user.UserId, quota.MaxBytes) 20 | if err != nil { 21 | rctx.Log.Warn("Non-fatal error getting per-user quota usage (max bytes @ now): ", err) 22 | sentry.CaptureException(err) 23 | } else { 24 | storageUsed = current 25 | } 26 | 27 | fileCount, err := quota.Current(rctx, user.UserId, quota.MaxCount) 28 | if err != nil { 29 | rctx.Log.Warn("Non-fatal error getting per-user quota usage (files count @ now): ", err) 30 | sentry.CaptureException(err) 31 | } 32 | 33 | return &PublicUsageResponse{ 34 | StorageUsed: storageUsed, 35 | StorageFiles: fileCount, 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /api/v1/create.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/api/_apimeta" 8 | "github.com/t2bot/matrix-media-repo/api/_responses" 9 | "github.com/t2bot/matrix-media-repo/common/rcontext" 10 | "github.com/t2bot/matrix-media-repo/pipelines/pipeline_create" 11 | "github.com/t2bot/matrix-media-repo/util" 12 | ) 13 | 14 | type MediaCreatedResponse struct { 15 | ContentUri string `json:"content_uri"` 16 | ExpiresTs int64 `json:"unused_expires_at"` 17 | } 18 | 19 | func CreateMedia(r *http.Request, rctx rcontext.RequestContext, user _apimeta.UserInfo) interface{} { 20 | id, err := pipeline_create.Execute(rctx, r.Host, user.UserId, pipeline_create.DefaultExpirationTime) 21 | if err != nil { 22 | rctx.Log.Error("Unexpected error creating media ID:", err) 23 | sentry.CaptureException(err) 24 | return _responses.InternalServerError("unexpected error") 25 | } 26 | 27 | return &MediaCreatedResponse{ 28 | ContentUri: util.MxcUri(id.Origin, id.MediaId), 29 | ExpiresTs: id.ExpiresTs, 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /api/webserver.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net" 7 | "net/http" 8 | "strconv" 9 | "sync" 10 | "time" 11 | 12 | "github.com/didip/tollbooth/v7" 13 | "github.com/getsentry/sentry-go" 14 | sentryhttp "github.com/getsentry/sentry-go/http" 15 | "github.com/sirupsen/logrus" 16 | "github.com/t2bot/matrix-media-repo/common/config" 17 | "github.com/t2bot/matrix-media-repo/limits" 18 | ) 19 | 20 | var srv *http.Server 21 | var waitGroup = &sync.WaitGroup{} 22 | var reload = false 23 | 24 | func Init() *sync.WaitGroup { 25 | address := net.JoinHostPort(config.Get().General.BindAddress, strconv.Itoa(config.Get().General.Port)) 26 | 27 | //defer func() { 28 | // if err := recover(); err != nil { 29 | // logrus.Fatal(err) 30 | // } 31 | //}() 32 | 33 | handler := buildRoutes() 34 | 35 | if config.Get().RateLimit.Enabled { 36 | logrus.Debug("Enabling rate limit") 37 | handler = tollbooth.LimitHandler(limits.GetRequestLimiter(), handler) 38 | } 39 | 40 | // Note: we bind Sentry here to ensure we capture *everything* 41 | sentryHandler := sentryhttp.New(sentryhttp.Options{}) 42 | srv = &http.Server{Addr: address, Handler: sentryHandler.Handle(handler)} 43 | reload = false 44 | 45 | go func() { 46 | //goland:noinspection HttpUrlsUsage 47 | logrus.WithField("address", address).Info("Started up. Listening at http://" + address) 48 | if err := srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { 49 | sentry.CaptureException(err) 50 | logrus.Fatal(err) 51 | } 52 | 53 | // Only notify the main thread that we're done if we're actually done 54 | srv = nil 55 | if !reload { 56 | waitGroup.Done() 57 | } 58 | }() 59 | 60 | return waitGroup 61 | } 62 | 63 | func Reload() { 64 | reload = true 65 | 66 | // Stop the server first 67 | Stop() 68 | 69 | // Reload the web server, ignoring the wait group (because we don't care to wait here) 70 | Init() 71 | } 72 | 73 | func Stop() { 74 | if srv != nil { 75 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 76 | defer cancel() 77 | if err := srv.Shutdown(ctx); err != nil { 78 | panic(err) 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /archival/dir_part_persister.go: -------------------------------------------------------------------------------- 1 | package archival 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "path" 7 | 8 | "github.com/t2bot/matrix-media-repo/archival/v2archive" 9 | ) 10 | 11 | func PersistPartsToDirectory(exportPath string) v2archive.PartPersister { 12 | _ = os.MkdirAll(exportPath, 0755) 13 | return func(part int, fileName string, data io.ReadCloser) error { 14 | defer data.Close() 15 | f, errf := os.Create(path.Join(exportPath, fileName)) 16 | if errf != nil { 17 | return errf 18 | } 19 | _, errf = io.Copy(f, data) 20 | if errf != nil { 21 | return errf 22 | } 23 | return nil 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /archival/v2archive/manifest.go: -------------------------------------------------------------------------------- 1 | package v2archive 2 | 3 | type ManifestVersionVal int 4 | 5 | const ( 6 | ManifestVersionV1 ManifestVersionVal = 1 7 | ManifestVersionV2 ManifestVersionVal = 2 8 | ) 9 | const ManifestVersion = ManifestVersionV2 10 | 11 | type Manifest struct { 12 | Version ManifestVersionVal `json:"version"` 13 | EntityId string `json:"entity_id"` 14 | CreatedTs int64 `json:"created_ts"` 15 | Media map[string]*ManifestRecord `json:"media"` 16 | 17 | // Deprecated: for v1 manifests, now called EntityId 18 | UserId string `json:"user_id,omitempty"` 19 | } 20 | 21 | type ManifestRecord struct { 22 | FileName string `json:"name"` 23 | ArchivedName string `json:"file_name"` 24 | SizeBytes int64 `json:"size_bytes"` 25 | ContentType string `json:"content_type"` 26 | S3Url string `json:"s3_url"` 27 | Sha256 string `json:"sha256"` 28 | Origin string `json:"origin"` 29 | MediaId string `json:"media_id"` 30 | CreatedTs int64 `json:"created_ts"` 31 | Uploader string `json:"uploader"` 32 | } 33 | -------------------------------------------------------------------------------- /assets/default-artwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/t2bot/matrix-media-repo/ed4f3181a080fbfe6c37fa7fd456c7dd9e97d8c6/assets/default-artwork.png -------------------------------------------------------------------------------- /build-dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | rm -rfv $PWD/bin/* 6 | mkdir $PWD/bin/dist 7 | 8 | GOBIN=$PWD/bin go install -v ./cmd/utilities/compile_assets 9 | $PWD/bin/compile_assets 10 | 11 | arches=("amd64") 12 | oses=("windows" "linux") 13 | 14 | for os in "${oses[@]}" 15 | do 16 | for arch in "${arches[@]}" 17 | do 18 | pth="$os-$arch" 19 | mkdir $PWD/bin/$pth 20 | GOOS=$os GOARCH=$arch GOBIN=$PWD/bin go build -o $PWD/bin/$pth -a -ldflags "-X github.com/t2bot/matrix-media-repo/common/version.Version=$(git describe --tags)" -v ./cmd/... 21 | GOOS=$os GOARCH=$arch GOBIN=$PWD/bin go build -pgo=pgo_media_repo.pprof -o $PWD/bin/$pth -a -ldflags "-X github.com/t2bot/matrix-media-repo/common/version.Version=$(git describe --tags)" -v ./cmd/workers/media_repo 22 | cd $PWD/bin/$pth 23 | if [ "$arch" == "amd64" ]; then 24 | arch="x64" 25 | fi 26 | if [ "$os" == "windows" ]; then 27 | for file in * ; do mv -v $file ../dist/${file%.*}-win-${arch}.exe; done; 28 | else 29 | for file in * ; do mv -v $file ../dist/${file}-${os}-${arch}; done; 30 | fi 31 | cd ../../ 32 | rm -rfv $PWD/bin/$pth 33 | done 34 | done 35 | 36 | rm -rfv $PWD/bin/dist/compile_assets* 37 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | GOBIN=$PWD/bin go install -v ./cmd/utilities/compile_assets 6 | $PWD/bin/compile_assets 7 | GOBIN=$PWD/bin go install -ldflags "-X github.com/t2bot/matrix-media-repo/common/version.Version=$(git describe --tags)" -v ./cmd/... 8 | GOBIN=$PWD/bin go install -pgo=pgo_media_repo.pprof -ldflags "-X github.com/t2bot/matrix-media-repo/common/version.Version=$(git describe --tags)" -v ./cmd/workers/media_repo 9 | -------------------------------------------------------------------------------- /cmd/archival/gdpr_export/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "os" 6 | 7 | "github.com/sirupsen/logrus" 8 | "github.com/t2bot/matrix-media-repo/archival" 9 | "github.com/t2bot/matrix-media-repo/common/assets" 10 | "github.com/t2bot/matrix-media-repo/common/config" 11 | "github.com/t2bot/matrix-media-repo/common/logging" 12 | "github.com/t2bot/matrix-media-repo/common/rcontext" 13 | "github.com/t2bot/matrix-media-repo/common/runtime" 14 | ) 15 | 16 | func main() { 17 | configPath := flag.String("config", "media-repo.yaml", "The path to the configuration") 18 | migrationsPath := flag.String("migrations", config.DefaultMigrationsPath, "The absolute path for the migrations folder") 19 | templatesPath := flag.String("templates", config.DefaultTemplatesPath, "The absolute path for the templates folder") 20 | entity := flag.String("entity", "", "The user ID or server name to export") 21 | destination := flag.String("destination", "./gdpr-data", "The directory for where export files should be placed") 22 | flag.Parse() 23 | 24 | // Override config path with config for Docker users 25 | configEnv := os.Getenv("REPO_CONFIG") 26 | if configEnv != "" { 27 | configPath = &configEnv 28 | } 29 | 30 | if *entity == "" { 31 | flag.Usage() 32 | os.Exit(1) 33 | return 34 | } 35 | 36 | config.Runtime.IsImportProcess = true // prevents us from creating media by accident 37 | config.Path = *configPath 38 | 39 | defer assets.Cleanup() 40 | assets.SetupMigrations(*migrationsPath) 41 | assets.SetupTemplates(*templatesPath) 42 | 43 | var err error 44 | err = logging.Setup( 45 | config.Get().General.LogDirectory, 46 | config.Get().General.LogColors, 47 | config.Get().General.JsonLogs, 48 | config.Get().General.LogLevel, 49 | ) 50 | if err != nil { 51 | panic(err) 52 | } 53 | 54 | logrus.Info("Starting up...") 55 | runtime.RunStartupSequence() 56 | 57 | logrus.Info("Starting export...") 58 | ctx := rcontext.Initial() 59 | err = archival.ExportEntityData(ctx, "OOB", *entity, true, archival.PersistPartsToDirectory(*destination)) 60 | if err != nil { 61 | panic(err) 62 | } 63 | 64 | logrus.Infof("Export complete! Files for %s should be in %s", *entity, *destination) 65 | } 66 | -------------------------------------------------------------------------------- /cmd/homeserver_live_importers/import_dendrite/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/cmd/homeserver_live_importers/_common" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/homeserver_interop/dendrite" 7 | ) 8 | 9 | func main() { 10 | cfg := _common.InitImportPsqlMatrixDownload("Dendrite") 11 | ctx := rcontext.Initial() 12 | 13 | ctx.Log.Debug("Connecting to homeserver database...") 14 | hsDb, err := dendrite.OpenDatabase(cfg.ConnectionString, cfg.ServerName) 15 | if err != nil { 16 | panic(err) 17 | } 18 | 19 | _common.PsqlMatrixDownloadCopy[dendrite.LocalMedia](ctx, cfg, hsDb, func(record *dendrite.LocalMedia) (*_common.MediaMetadata, error) { 20 | return &_common.MediaMetadata{ 21 | MediaId: record.MediaId, 22 | ContentType: record.ContentType, 23 | FileName: record.UploadName, 24 | UploaderUserId: record.UserId, 25 | SizeBytes: record.FileSizeBytes, 26 | }, nil 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /cmd/homeserver_live_importers/import_synapse/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/cmd/homeserver_live_importers/_common" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/homeserver_interop/synapse" 7 | ) 8 | 9 | func main() { 10 | cfg := _common.InitImportPsqlMatrixDownload("Synapse") 11 | ctx := rcontext.Initial() 12 | 13 | ctx.Log.Debug("Connecting to homeserver database...") 14 | hsDb, err := synapse.OpenDatabase(cfg.ConnectionString) 15 | if err != nil { 16 | panic(err) 17 | } 18 | 19 | _common.PsqlMatrixDownloadCopy[synapse.LocalMedia](ctx, cfg, hsDb, func(record *synapse.LocalMedia) (*_common.MediaMetadata, error) { 20 | return &_common.MediaMetadata{ 21 | MediaId: record.MediaId, 22 | ContentType: record.ContentType, 23 | FileName: record.UploadName, 24 | UploaderUserId: record.UserId, 25 | SizeBytes: record.SizeBytes, 26 | }, nil 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /cmd/homeserver_offline_importers/_common/archiver.go: -------------------------------------------------------------------------------- 1 | package _common 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | 8 | "github.com/t2bot/matrix-media-repo/archival" 9 | "github.com/t2bot/matrix-media-repo/archival/v2archive" 10 | "github.com/t2bot/matrix-media-repo/common/rcontext" 11 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 12 | ) 13 | 14 | func PsqlFlatFileArchive[M homeserver_interop.ImportDbMedia](ctx rcontext.RequestContext, cfg *ImportOptsPsqlFlatFile, db homeserver_interop.ImportDb[M], processFn func(record *M) (v2archive.MediaInfo, io.ReadCloser, error)) { 15 | ctx.Log.Info("Fetching all local media records from homeserver...") 16 | records, err := db.GetAllMedia() 17 | if err != nil { 18 | panic(err) 19 | } 20 | 21 | ctx.Log.Info(fmt.Sprintf("Exporting %d media records", len(records))) 22 | 23 | archiver, err := v2archive.NewWriter(ctx, "OOB", cfg.ServerName, cfg.PartSizeBytes, archival.PersistPartsToDirectory(cfg.ExportPath)) 24 | if err != nil { 25 | ctx.Log.Fatal(err) 26 | } 27 | 28 | missing := make([]string, 0) 29 | 30 | for _, r := range records { 31 | info, f, err := processFn(r) 32 | if err != nil { 33 | if os.IsNotExist(err) && cfg.SkipMissing { 34 | missing = append(missing, info.FileName) 35 | return 36 | } 37 | ctx.Log.Fatal(err) 38 | } 39 | 40 | _, err = archiver.AppendMedia(f, info) 41 | if err != nil { 42 | ctx.Log.Fatal(err) 43 | } 44 | } 45 | 46 | err = archiver.Finish() 47 | if err != nil { 48 | ctx.Log.Fatal(err) 49 | } 50 | 51 | ctx.Log.Info("Done export") 52 | 53 | // Report missing files 54 | if len(missing) > 0 { 55 | for _, m := range missing { 56 | ctx.Log.Warn("Was not able to find " + m) 57 | } 58 | } 59 | 60 | ctx.Log.Info("Export completed") 61 | } 62 | -------------------------------------------------------------------------------- /cmd/homeserver_offline_importers/export_dendrite_for_import/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "path" 7 | 8 | "github.com/t2bot/matrix-media-repo/archival/v2archive" 9 | "github.com/t2bot/matrix-media-repo/cmd/homeserver_offline_importers/_common" 10 | "github.com/t2bot/matrix-media-repo/common/rcontext" 11 | "github.com/t2bot/matrix-media-repo/homeserver_interop/dendrite" 12 | "github.com/t2bot/matrix-media-repo/util" 13 | ) 14 | 15 | func main() { 16 | cfg := _common.InitExportPsqlFlatFile("Dendrite", "media_api.base_path") 17 | ctx := rcontext.InitialNoConfig() 18 | 19 | ctx.Log.Debug("Connecting to homeserver database...") 20 | hsDb, err := dendrite.OpenDatabase(cfg.ConnectionString, cfg.ServerName) 21 | if err != nil { 22 | panic(err) 23 | } 24 | 25 | _common.PsqlFlatFileArchive[dendrite.LocalMedia](ctx, cfg, hsDb, func(r *dendrite.LocalMedia) (v2archive.MediaInfo, io.ReadCloser, error) { 26 | // For Base64Hash ABCCDD : 27 | // $importPath/A/B/CCDD/file 28 | 29 | mxc := util.MxcUri(cfg.ServerName, r.MediaId) 30 | 31 | ctx.Log.Info("Copying " + mxc) 32 | 33 | filePath := path.Join(cfg.ImportPath, r.Base64Hash[0:1], r.Base64Hash[1:2], r.Base64Hash[2:], "file") 34 | 35 | f, err := os.Open(filePath) 36 | if os.IsNotExist(err) && cfg.SkipMissing { 37 | ctx.Log.Warn("File does not appear to exist, skipping: " + filePath) 38 | return v2archive.MediaInfo{ 39 | FileName: filePath, 40 | }, nil, err 41 | } 42 | if err != nil { 43 | return v2archive.MediaInfo{}, nil, err 44 | } 45 | 46 | return v2archive.MediaInfo{ 47 | Origin: cfg.ServerName, 48 | MediaId: r.MediaId, 49 | FileName: r.UploadName, 50 | ContentType: r.ContentType, 51 | CreationTs: r.CreationTs, 52 | S3Url: "", 53 | UserId: r.UserId, 54 | }, f, nil 55 | }) 56 | } 57 | -------------------------------------------------------------------------------- /cmd/homeserver_offline_importers/export_synapse_for_import/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "path" 7 | "strings" 8 | 9 | "github.com/t2bot/matrix-media-repo/archival/v2archive" 10 | "github.com/t2bot/matrix-media-repo/cmd/homeserver_offline_importers/_common" 11 | "github.com/t2bot/matrix-media-repo/common/rcontext" 12 | "github.com/t2bot/matrix-media-repo/homeserver_interop/synapse" 13 | "github.com/t2bot/matrix-media-repo/util" 14 | ) 15 | 16 | func main() { 17 | cfg := _common.InitExportPsqlFlatFile("Synapse", "media_store_path") 18 | ctx := rcontext.InitialNoConfig() 19 | 20 | ctx.Log.Debug("Connecting to homeserver database...") 21 | hsDb, err := synapse.OpenDatabase(cfg.ConnectionString) 22 | if err != nil { 23 | panic(err) 24 | } 25 | 26 | _common.PsqlFlatFileArchive[synapse.LocalMedia](ctx, cfg, hsDb, func(r *synapse.LocalMedia) (v2archive.MediaInfo, io.ReadCloser, error) { 27 | // For MediaID AABBCCDD : 28 | // $importPath/local_content/AA/BB/CCDD 29 | // 30 | // For a URL MediaID 2020-08-17_AABBCCDD: 31 | // $importPath/url_cache/2020-08-17/AABBCCDD 32 | 33 | mxc := util.MxcUri(cfg.ServerName, r.MediaId) 34 | 35 | ctx.Log.Info("Copying " + mxc) 36 | 37 | filePath := path.Join(cfg.ImportPath, "local_content", r.MediaId[0:2], r.MediaId[2:4], r.MediaId[4:]) 38 | if r.UrlCache != "" { 39 | dateParts := strings.Split(r.MediaId, "_") 40 | filePath = path.Join(cfg.ImportPath, "url_cache", dateParts[0], strings.Join(dateParts[1:], "_")) 41 | } 42 | 43 | f, err := os.Open(filePath) 44 | if os.IsNotExist(err) && cfg.SkipMissing { 45 | ctx.Log.Warn("File does not appear to exist, skipping: " + filePath) 46 | return v2archive.MediaInfo{ 47 | FileName: filePath, 48 | }, nil, err 49 | } 50 | if err != nil { 51 | return v2archive.MediaInfo{}, nil, err 52 | } 53 | 54 | return v2archive.MediaInfo{ 55 | Origin: cfg.ServerName, 56 | MediaId: r.MediaId, 57 | FileName: r.UploadName, 58 | ContentType: r.ContentType, 59 | CreationTs: r.CreatedTs, 60 | S3Url: "", 61 | UserId: r.UserId, 62 | }, f, nil 63 | }) 64 | } 65 | -------------------------------------------------------------------------------- /cmd/utilities/_common/signing_key_export.go: -------------------------------------------------------------------------------- 1 | package _common 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/sirupsen/logrus" 7 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 8 | "github.com/t2bot/matrix-media-repo/homeserver_interop/dendrite" 9 | "github.com/t2bot/matrix-media-repo/homeserver_interop/mmr" 10 | "github.com/t2bot/matrix-media-repo/homeserver_interop/synapse" 11 | ) 12 | 13 | func EncodeSigningKeys(keys []*homeserver_interop.SigningKey, format string, file string) { 14 | var err error 15 | var b []byte 16 | switch format { 17 | case "synapse": 18 | b, err = synapse.EncodeAllSigningKeys(keys) 19 | case "dendrite": 20 | b, err = dendrite.EncodeAllSigningKeys(keys) 21 | case "mmr": 22 | b, err = mmr.EncodeAllSigningKeys(keys) 23 | default: 24 | logrus.Fatalf("Unknown output format '%s'. Try '%s -help' for information.", format, os.Args[0]) 25 | } 26 | if err != nil { 27 | logrus.Fatal(err) 28 | } 29 | 30 | f, err := os.Create(file) 31 | if err != nil { 32 | logrus.Fatal(err) 33 | } 34 | defer func(f *os.File) { 35 | _ = f.Close() 36 | }(f) 37 | 38 | _, err = f.Write(b) 39 | if err != nil { 40 | logrus.Fatal(err) 41 | } 42 | 43 | logrus.Infof("Done! Signing key written to '%s' in %s format", f.Name(), format) 44 | } 45 | -------------------------------------------------------------------------------- /cmd/utilities/combine_signing_keys/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "os" 6 | 7 | "github.com/sirupsen/logrus" 8 | "github.com/t2bot/matrix-media-repo/cmd/utilities/_common" 9 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 10 | "github.com/t2bot/matrix-media-repo/homeserver_interop/any_server" 11 | "github.com/t2bot/matrix-media-repo/util" 12 | ) 13 | 14 | func main() { 15 | outputFormat := flag.String("format", "mmr", "The output format for the key. May be 'mmr', 'synapse', or 'dendrite'.") 16 | outputFile := flag.String("output", "./signing.key", "The output file for the key. Note that not all software will use multiple keys.") 17 | flag.Parse() 18 | 19 | keys := make(map[string]*homeserver_interop.SigningKey) 20 | keysArray := make([]*homeserver_interop.SigningKey, 0) 21 | for _, file := range flag.Args() { 22 | logrus.Infof("Reading %s", file) 23 | 24 | localKeys, err := decodeKeys(file) 25 | if err != nil { 26 | logrus.Fatal(err) 27 | } 28 | 29 | for _, key := range localKeys { 30 | if val, ok := keys[key.KeyVersion]; ok { 31 | logrus.Fatalf("Duplicate key version '%s' detected. Known='%s', duplicate='%s'", key.KeyVersion, util.EncodeUnpaddedBase64ToString(val.PrivateKey), util.EncodeUnpaddedBase64ToString(key.PrivateKey)) 32 | } 33 | 34 | keys[key.KeyVersion] = key 35 | keysArray = append(keysArray, key) 36 | } 37 | } 38 | 39 | _common.EncodeSigningKeys(keysArray, *outputFormat, *outputFile) 40 | } 41 | 42 | func decodeKeys(fileName string) ([]*homeserver_interop.SigningKey, error) { 43 | f, err := os.Open(fileName) 44 | if err != nil { 45 | return nil, err 46 | } 47 | defer f.Close() 48 | 49 | return any_server.DecodeAllSigningKeys(f) 50 | } 51 | -------------------------------------------------------------------------------- /cmd/utilities/generate_signing_key/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "os" 6 | 7 | "github.com/sirupsen/logrus" 8 | "github.com/t2bot/matrix-media-repo/cmd/utilities/_common" 9 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 10 | "github.com/t2bot/matrix-media-repo/homeserver_interop/any_server" 11 | ) 12 | 13 | func main() { 14 | inputFile := flag.String("input", "", "When set to a file path, the signing key to convert to the output format. The key must have been generated in a format supported by -format. If the format supports multiple keys, only the first will be converted.") 15 | outputFormat := flag.String("format", "mmr", "The output format for the key. May be 'mmr', 'synapse', or 'dendrite'.") 16 | outputFile := flag.String("output", "./signing.key", "The output file for the key.") 17 | flag.Parse() 18 | 19 | var key *homeserver_interop.SigningKey 20 | var err error 21 | 22 | if *inputFile != "" { 23 | key, err = decodeKey(*inputFile) 24 | } else { 25 | key, err = homeserver_interop.GenerateSigningKey() 26 | } 27 | if err != nil { 28 | logrus.Fatal(err) 29 | } 30 | 31 | logrus.Infof("Key ID will be 'ed25519:%s'", key.KeyVersion) 32 | 33 | _common.EncodeSigningKeys([]*homeserver_interop.SigningKey{key}, *outputFormat, *outputFile) 34 | } 35 | 36 | func decodeKey(fileName string) (*homeserver_interop.SigningKey, error) { 37 | f, err := os.Open(fileName) 38 | if err != nil { 39 | return nil, err 40 | } 41 | defer f.Close() 42 | 43 | return any_server.DecodeSigningKey(f) 44 | } 45 | -------------------------------------------------------------------------------- /common/config/conf_min_shared.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | type MinimumRepoConfig struct { 4 | DataStores []DatastoreConfig `yaml:"datastores"` 5 | Archiving ArchivingConfig `yaml:"archiving"` 6 | Uploads UploadsConfig `yaml:"uploads"` 7 | Identicons IdenticonsConfig `yaml:"identicons"` 8 | Quarantine QuarantineConfig `yaml:"quarantine"` 9 | TimeoutSeconds TimeoutsConfig `yaml:"timeouts"` 10 | Features FeatureConfig `yaml:"featureSupport"` 11 | AccessTokens AccessTokenConfig `yaml:"accessTokens"` 12 | } 13 | 14 | func NewDefaultMinimumRepoConfig() MinimumRepoConfig { 15 | return MinimumRepoConfig{ 16 | DataStores: []DatastoreConfig{}, 17 | Archiving: ArchivingConfig{ 18 | Enabled: true, 19 | SelfService: false, 20 | TargetBytesPerPart: 209715200, // 200mb 21 | }, 22 | Uploads: UploadsConfig{ 23 | MaxSizeBytes: 104857600, // 100mb 24 | MinSizeBytes: 100, 25 | ReportedMaxSizeBytes: 0, 26 | MaxPending: 5, 27 | MaxAgeSeconds: 1800, // 30 minutes 28 | Quota: QuotasConfig{ 29 | Enabled: false, 30 | UserQuotas: []QuotaUserConfig{}, 31 | }, 32 | }, 33 | Identicons: IdenticonsConfig{ 34 | Enabled: true, 35 | }, 36 | Quarantine: QuarantineConfig{ 37 | ReplaceThumbnails: true, 38 | ReplaceDownloads: false, 39 | ThumbnailPath: "", 40 | AllowLocalAdmins: true, 41 | }, 42 | TimeoutSeconds: TimeoutsConfig{ 43 | UrlPreviews: 10, 44 | ClientServer: 30, 45 | Federation: 120, 46 | }, 47 | Features: FeatureConfig{}, 48 | AccessTokens: AccessTokenConfig{ 49 | MaxCacheTimeSeconds: 0, 50 | UseAppservices: false, 51 | Appservices: []AppserviceConfig{}, 52 | }, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /common/config/util.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "gopkg.in/yaml.v3" 5 | ) 6 | 7 | func mapToObjYaml(input map[string]interface{}, ref interface{}) error { 8 | encoded, err := yaml.Marshal(input) 9 | if err != nil { 10 | return err 11 | } 12 | 13 | err = yaml.Unmarshal(encoded, ref) 14 | return err 15 | } 16 | 17 | func objToMapYaml(input interface{}) (map[string]interface{}, error) { 18 | encoded, err := yaml.Marshal(input) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | m := make(map[string]interface{}) 24 | err = yaml.Unmarshal(encoded, &m) 25 | return m, err 26 | } 27 | -------------------------------------------------------------------------------- /common/context.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | type MmrContextKey string 4 | 5 | const ( 6 | ContextLogger MmrContextKey = "mmr.logger" 7 | ContextIgnoreHost MmrContextKey = "mmr.ignore_host" 8 | ContextAction MmrContextKey = "mmr.action" 9 | ContextRequest MmrContextKey = "mmr.request" 10 | ContextRequestId MmrContextKey = "mmr.request_id" 11 | ContextRequestStartTime MmrContextKey = "mmr.request_start_time" 12 | ContextServerConfig MmrContextKey = "mmr.serverConfig" 13 | ContextDomainConfig MmrContextKey = "mmr.domain_config" 14 | ContextStatusCode MmrContextKey = "mmr.status_code" 15 | ) 16 | -------------------------------------------------------------------------------- /common/errorcodes.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | const ErrCodeInvalidHost = "M_INVALID_HOST" 4 | const ErrCodeNotFound = "M_NOT_FOUND" 5 | const ErrCodeUnknownToken = "M_UNKNOWN_TOKEN" 6 | const ErrCodeNoGuests = "M_GUEST_ACCESS_FORBIDDEN" 7 | const ErrCodeMissingToken = "M_MISSING_TOKEN" 8 | const ErrCodeMediaTooLarge = "M_MEDIA_TOO_LARGE" 9 | const ErrCodeMediaTooSmall = "M_MEDIA_TOO_SMALL" 10 | const ErrCodeTooLarge = "M_TOO_LARGE" 11 | const ErrCodeMethodNotAllowed = "M_METHOD_NOT_ALLOWED" 12 | const ErrCodeBadRequest = "M_BAD_REQUEST" 13 | const ErrCodeRateLimitExceeded = "M_LIMIT_EXCEEDED" 14 | const ErrCodeUnknown = "M_UNKNOWN" 15 | const ErrCodeForbidden = "M_FORBIDDEN" 16 | const ErrCodeUnauthorized = "M_UNAUTHORIZED" 17 | const ErrCodeQuotaExceeded = "M_QUOTA_EXCEEDED" 18 | const ErrCodeCannotOverwrite = "M_CANNOT_OVERWRITE_MEDIA" 19 | const ErrCodeNotYetUploaded = "M_NOT_YET_UPLOADED" 20 | -------------------------------------------------------------------------------- /common/errors.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | var ErrMediaNotFound = errors.New("media not found") 8 | var ErrMediaTooLarge = errors.New("media too large") 9 | var ErrInvalidHost = errors.New("invalid host") 10 | var ErrHostNotFound = errors.New("host not found") 11 | var ErrHostNotAllowed = errors.New("host not allowed") 12 | var ErrMediaQuarantined = errors.New("media quarantined") 13 | var ErrQuotaExceeded = errors.New("quota exceeded") 14 | var ErrWrongUser = errors.New("wrong user") 15 | var ErrExpired = errors.New("expired") 16 | var ErrAlreadyUploaded = errors.New("already uploaded") 17 | var ErrMediaNotYetUploaded = errors.New("media not yet uploaded") 18 | var ErrMediaDimensionsTooSmall = errors.New("media is too small dimensionally") 19 | var ErrRateLimitExceeded = errors.New("rate limit exceeded") 20 | var ErrRestrictedAuth = errors.New("authentication is required to download this media") 21 | -------------------------------------------------------------------------------- /common/globals/reload.go: -------------------------------------------------------------------------------- 1 | package globals 2 | 3 | var WebReloadChan = make(chan bool) 4 | var MetricsReloadChan = make(chan bool) 5 | var DatabaseReloadChan = make(chan bool) 6 | var DatastoresReloadChan = make(chan bool) 7 | var RecurringTasksReloadChan = make(chan bool) 8 | var AccessTokenReloadChan = make(chan bool) 9 | var CacheReplaceChan = make(chan bool) 10 | var PluginReloadChan = make(chan bool) 11 | var PoolReloadChan = make(chan bool) 12 | var ErrorCacheReloadChan = make(chan bool) 13 | var MatrixCachesReloadChan = make(chan bool) 14 | var PGOReloadChan = make(chan bool) 15 | var BucketsReloadChan = make(chan bool) 16 | -------------------------------------------------------------------------------- /common/import_cmdline/ask_machine_id.go: -------------------------------------------------------------------------------- 1 | package import_cmdline 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/version" 8 | "github.com/t2bot/matrix-media-repo/util/ids" 9 | "golang.org/x/term" 10 | ) 11 | 12 | func AskMachineId() { 13 | fmt.Printf("The importer runs as a MMR worker and needs to have a dedicated MACHINE_ID. See https://docs.t2bot.io/matrix-media-repo/%s/deployment/horizontal_scaling for details on what a MACHINE_ID is.", version.DocsVersion) 14 | if !term.IsTerminal(int(os.Stdin.Fd())) { 15 | fmt.Println("Please specify a MACHINE_ID environment variable.") 16 | os.Exit(2) 17 | return // for good measure 18 | } 19 | fmt.Println("If you don't use horizontal scaling, you can use '1' as the machine ID. Otherwise, please enter an unused machine ID in your environment.") 20 | fmt.Printf("Machine ID: ") 21 | var machineId int64 22 | if _, err := fmt.Scanf("%d", &machineId); err != nil { 23 | panic(err) 24 | } 25 | if err := ids.SetMachineId(machineId); err != nil { 26 | panic(err) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /common/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | "runtime/debug" 6 | 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | var GitCommit string 11 | var Version string 12 | 13 | // DocsVersion The version number used by docs.t2bot.io links throughout the application runtime 14 | const DocsVersion = "v1.3.3" 15 | 16 | func SetDefaults() { 17 | build, infoOk := debug.ReadBuildInfo() 18 | 19 | if GitCommit == "" { 20 | GitCommit = ".dev" 21 | if infoOk { 22 | for _, setting := range build.Settings { 23 | if setting.Key == "vcs.revision" { 24 | GitCommit = setting.Value 25 | break 26 | } 27 | } 28 | } 29 | } 30 | 31 | if Version == "" { 32 | Version = "unknown" 33 | } 34 | } 35 | 36 | func Print(usingLogger bool) { 37 | SetDefaults() 38 | 39 | if usingLogger { 40 | logrus.Info("Version: " + Version) 41 | logrus.Info("Commit: " + GitCommit) 42 | } else { 43 | fmt.Println("Version: " + Version) 44 | fmt.Println("Commit: " + GitCommit) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /database/json_value.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql/driver" 5 | "encoding/json" 6 | "errors" 7 | ) 8 | 9 | type AnonymousJson map[string]interface{} 10 | 11 | // Value implements driver.Valuer 12 | func (a *AnonymousJson) Value() (driver.Value, error) { 13 | return json.Marshal(a) 14 | } 15 | 16 | // Scan implements sql.Scanner 17 | func (a *AnonymousJson) Scan(value interface{}) error { 18 | if b, ok := value.([]byte); !ok { 19 | return errors.New("failed to assert jsonb is bytes") 20 | } else { 21 | return json.Unmarshal(b, &a) 22 | } 23 | } 24 | 25 | func (a *AnonymousJson) ApplyTo(val interface{}) error { 26 | if b, err := json.Marshal(a); err != nil { 27 | return err 28 | } else { 29 | if err = json.Unmarshal(b, &val); err != nil { 30 | return err 31 | } 32 | } 33 | return nil 34 | } 35 | 36 | func (a *AnonymousJson) ApplyFrom(val interface{}) error { 37 | if b, err := json.Marshal(val); err != nil { 38 | return err 39 | } else { 40 | if err = json.Unmarshal(b, &a); err != nil { 41 | return err 42 | } 43 | } 44 | return nil 45 | } 46 | -------------------------------------------------------------------------------- /database/table_last_access.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | ) 9 | 10 | type DbLastAccess struct { 11 | Sha256Hash string 12 | LastAccessTs int64 13 | } 14 | 15 | const upsertLastAccess = "INSERT INTO last_access (sha256_hash, last_access_ts) VALUES ($1, $2) ON CONFLICT (sha256_hash) DO UPDATE SET last_access_ts = $2;" 16 | 17 | type lastAccessTableStatements struct { 18 | upsertLastAccess *sql.Stmt 19 | } 20 | 21 | type lastAccessTableWithContext struct { 22 | statements *lastAccessTableStatements 23 | ctx rcontext.RequestContext 24 | } 25 | 26 | func prepareLastAccessTables(db *sql.DB) (*lastAccessTableStatements, error) { 27 | var err error 28 | var stmts = &lastAccessTableStatements{} 29 | 30 | if stmts.upsertLastAccess, err = db.Prepare(upsertLastAccess); err != nil { 31 | return nil, errors.New("error preparing upsertLastAccess: " + err.Error()) 32 | } 33 | 34 | return stmts, nil 35 | } 36 | 37 | func (s *lastAccessTableStatements) Prepare(ctx rcontext.RequestContext) *lastAccessTableWithContext { 38 | return &lastAccessTableWithContext{ 39 | statements: s, 40 | ctx: ctx, 41 | } 42 | } 43 | 44 | func (s *lastAccessTableWithContext) Upsert(sha256hash string, ts int64) error { 45 | _, err := s.statements.upsertLastAccess.ExecContext(s.ctx, sha256hash, ts) 46 | return err 47 | } 48 | -------------------------------------------------------------------------------- /database/table_media_hold.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/util" 9 | ) 10 | 11 | type DbHeldMedia struct { 12 | Origin string 13 | MediaId string 14 | Reason string 15 | } 16 | 17 | type HeldReason string 18 | 19 | const ( 20 | ForCreateHeldReason HeldReason = "media_create" 21 | ) 22 | 23 | const insertHeldMedia = "INSERT INTO media_id_hold (origin, media_id, reason, held_ts) VALUES ($1, $2, $3, $4);" 24 | const deleteHeldMedia = "DELETE FROM media_id_hold WHERE reason = $1 AND held_ts <= $2;" 25 | 26 | type heldMediaTableStatements struct { 27 | insertHeldMedia *sql.Stmt 28 | deleteHeldMedia *sql.Stmt 29 | } 30 | 31 | type heldMediaTableWithContext struct { 32 | statements *heldMediaTableStatements 33 | ctx rcontext.RequestContext 34 | } 35 | 36 | func prepareHeldMediaTables(db *sql.DB) (*heldMediaTableStatements, error) { 37 | var err error 38 | var stmts = &heldMediaTableStatements{} 39 | 40 | if stmts.insertHeldMedia, err = db.Prepare(insertHeldMedia); err != nil { 41 | return nil, errors.New("error preparing insertHeldMedia: " + err.Error()) 42 | } 43 | if stmts.deleteHeldMedia, err = db.Prepare(deleteHeldMedia); err != nil { 44 | return nil, errors.New("error preparing deleteHeldMedia: " + err.Error()) 45 | } 46 | 47 | return stmts, nil 48 | } 49 | 50 | func (s *heldMediaTableStatements) Prepare(ctx rcontext.RequestContext) *heldMediaTableWithContext { 51 | return &heldMediaTableWithContext{ 52 | statements: s, 53 | ctx: ctx, 54 | } 55 | } 56 | 57 | func (s *heldMediaTableWithContext) TryInsert(origin string, mediaId string, reason HeldReason) error { 58 | _, err := s.statements.insertHeldMedia.ExecContext(s.ctx, origin, mediaId, reason, util.NowMillis()) 59 | return err 60 | } 61 | 62 | func (s *heldMediaTableWithContext) DeleteOlderThan(reason HeldReason, olderThanTs int64) error { 63 | _, err := s.statements.deleteHeldMedia.ExecContext(s.ctx, reason, olderThanTs) 64 | return err 65 | } 66 | -------------------------------------------------------------------------------- /database/table_user_stats.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | ) 9 | 10 | type DbUserStats struct { 11 | UserId string 12 | UploadedBytes int64 13 | } 14 | 15 | const selectUserStatsUploadedBytes = "SELECT uploaded_bytes FROM user_stats WHERE user_id = $1;" 16 | 17 | type userStatsTableStatements struct { 18 | selectUserStatsUploadedBytes *sql.Stmt 19 | } 20 | 21 | type userStatsTableWithContext struct { 22 | statements *userStatsTableStatements 23 | ctx rcontext.RequestContext 24 | } 25 | 26 | func prepareUserStatsTables(db *sql.DB) (*userStatsTableStatements, error) { 27 | var err error 28 | var stmts = &userStatsTableStatements{} 29 | 30 | if stmts.selectUserStatsUploadedBytes, err = db.Prepare(selectUserStatsUploadedBytes); err != nil { 31 | return nil, errors.New("error preparing selectUserStatsUploadedBytes: " + err.Error()) 32 | } 33 | 34 | return stmts, nil 35 | } 36 | 37 | func (s *userStatsTableStatements) Prepare(ctx rcontext.RequestContext) *userStatsTableWithContext { 38 | return &userStatsTableWithContext{ 39 | statements: s, 40 | ctx: ctx, 41 | } 42 | } 43 | 44 | func (s *userStatsTableWithContext) UserUploadedBytes(userId string) (int64, error) { 45 | row := s.statements.selectUserStatsUploadedBytes.QueryRowContext(s.ctx, userId) 46 | val := int64(0) 47 | err := row.Scan(&val) 48 | if errors.Is(err, sql.ErrNoRows) { 49 | err = nil 50 | val = 0 51 | } 52 | return val, err 53 | } 54 | -------------------------------------------------------------------------------- /datastores/delete.go: -------------------------------------------------------------------------------- 1 | package datastores 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "path" 7 | 8 | "github.com/minio/minio-go/v7" 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/t2bot/matrix-media-repo/common/config" 11 | "github.com/t2bot/matrix-media-repo/common/rcontext" 12 | "github.com/t2bot/matrix-media-repo/metrics" 13 | ) 14 | 15 | func Remove(ctx rcontext.RequestContext, ds config.DatastoreConfig, location string) error { 16 | var err error 17 | if ds.Type == "s3" { 18 | var s3c *s3 19 | s3c, err = getS3(ds) 20 | if err != nil { 21 | return err 22 | } 23 | 24 | metrics.S3Operations.With(prometheus.Labels{"operation": "RemoveObject"}).Inc() 25 | err = s3c.client.RemoveObject(ctx.Context, s3c.bucket, location, minio.RemoveObjectOptions{}) 26 | } else if ds.Type == "file" { 27 | basePath := ds.Options["path"] 28 | err = os.Remove(path.Join(basePath, location)) 29 | if err != nil && os.IsNotExist(err) { 30 | return nil // not existing means it was deleted, as far as we care 31 | } 32 | } else { 33 | return errors.New("unknown datastore type - contact developer") 34 | } 35 | 36 | return err 37 | } 38 | 39 | func RemoveWithDsId(ctx rcontext.RequestContext, dsId string, location string) error { 40 | ds, ok := Get(ctx, dsId) 41 | if !ok { 42 | return errors.New("unknown datastore") 43 | } 44 | return Remove(ctx, ds, location) 45 | } 46 | -------------------------------------------------------------------------------- /datastores/download.go: -------------------------------------------------------------------------------- 1 | package datastores 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path" 9 | 10 | "github.com/minio/minio-go/v7" 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/t2bot/matrix-media-repo/common/config" 13 | "github.com/t2bot/matrix-media-repo/common/rcontext" 14 | "github.com/t2bot/matrix-media-repo/metrics" 15 | ) 16 | 17 | func Download(ctx rcontext.RequestContext, ds config.DatastoreConfig, dsFileName string) (io.ReadSeekCloser, error) { 18 | var err error 19 | var rsc io.ReadSeekCloser 20 | if ds.Type == "s3" { 21 | var s3c *s3 22 | s3c, err = getS3(ds) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | metrics.S3Operations.With(prometheus.Labels{"operation": "GetObject"}).Inc() 28 | rsc, err = s3c.client.GetObject(ctx.Context, s3c.bucket, dsFileName, minio.GetObjectOptions{}) 29 | } else if ds.Type == "file" { 30 | basePath := ds.Options["path"] 31 | 32 | rsc, err = os.Open(path.Join(basePath, dsFileName)) 33 | } else { 34 | return nil, errors.New("unknown datastore type - contact developer") 35 | } 36 | 37 | return rsc, err 38 | } 39 | 40 | func DownloadOrRedirect(ctx rcontext.RequestContext, ds config.DatastoreConfig, dsFileName string) (io.ReadSeekCloser, error) { 41 | if ds.Type != "s3" { 42 | return Download(ctx, ds, dsFileName) 43 | } 44 | 45 | s3c, err := getS3(ds) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | if s3c.publicBaseUrl != "" { 51 | metrics.S3Operations.With(prometheus.Labels{"operation": "RedirectGetObject"}).Inc() 52 | return nil, redirect(fmt.Sprintf("%s%s", s3c.publicBaseUrl, dsFileName)) 53 | } 54 | 55 | return Download(ctx, ds, dsFileName) 56 | } 57 | 58 | func WouldRedirectWhenCached(ctx rcontext.RequestContext, ds config.DatastoreConfig) (bool, error) { 59 | if ds.Type != "s3" { 60 | return false, nil 61 | } 62 | 63 | s3c, err := getS3(ds) 64 | if err != nil { 65 | return false, err 66 | } 67 | 68 | return s3c.redirectWhenCached && s3c.publicBaseUrl != "", nil 69 | } 70 | -------------------------------------------------------------------------------- /datastores/kind.go: -------------------------------------------------------------------------------- 1 | package datastores 2 | 3 | type Kind string 4 | 5 | const ( 6 | LocalMediaKind Kind = "local_media" 7 | RemoteMediaKind Kind = "remote_media" 8 | ThumbnailsKind Kind = "thumbnails" 9 | ArchivesKind Kind = "archives" 10 | AllKind Kind = "all" 11 | ) 12 | 13 | func HasListedKind(have []string, want Kind) bool { 14 | for _, k := range have { 15 | k2 := Kind(k) 16 | if k2 == want || k2 == AllKind { 17 | return true 18 | } 19 | } 20 | return false 21 | } 22 | -------------------------------------------------------------------------------- /datastores/locate.go: -------------------------------------------------------------------------------- 1 | package datastores 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/common/config" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | ) 7 | 8 | func Get(ctx rcontext.RequestContext, dsId string) (config.DatastoreConfig, bool) { 9 | for _, c := range ctx.Config.DataStores { 10 | if c.Id == dsId { 11 | return c, true 12 | } 13 | } 14 | return config.DatastoreConfig{}, false 15 | } 16 | -------------------------------------------------------------------------------- /datastores/pick.go: -------------------------------------------------------------------------------- 1 | package datastores 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/config" 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/database" 9 | ) 10 | 11 | func Pick(ctx rcontext.RequestContext, kind Kind) (config.DatastoreConfig, error) { 12 | usable := make([]config.DatastoreConfig, 0) 13 | for _, conf := range ctx.Config.DataStores { 14 | if !HasListedKind(conf.MediaKinds, kind) { 15 | continue 16 | } 17 | usable = append(usable, conf) 18 | } 19 | 20 | if len(usable) == 0 { 21 | return config.DatastoreConfig{}, errors.New("unable to locate a usable datastore") 22 | } 23 | if len(usable) == 1 { 24 | return usable[0], nil 25 | } 26 | 27 | // Find the smallest datastore, by relative size 28 | dsSize := int64(-1) 29 | idx := 0 30 | db := database.GetInstance().MetadataView.Prepare(ctx) 31 | for i, ds := range usable { 32 | size, err := db.EstimateDatastoreSize(ds.Id) 33 | if err != nil { 34 | return config.DatastoreConfig{}, err 35 | } 36 | if dsSize < 0 || size > dsSize { 37 | idx = i 38 | } 39 | } 40 | return usable[idx], nil 41 | } 42 | -------------------------------------------------------------------------------- /datastores/redirect.go: -------------------------------------------------------------------------------- 1 | package datastores 2 | 3 | import "errors" 4 | 5 | type RedirectError struct { 6 | error 7 | RedirectUrl string 8 | } 9 | 10 | func redirect(url string) RedirectError { 11 | return RedirectError{ 12 | error: errors.New("redirection"), 13 | RedirectUrl: url, 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /dev/element-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "default_hs_url": "http://localhost:8008", 3 | "brand": "Element", 4 | "dangerously_allow_unsafe_and_insecure_passwords": true, 5 | "enableLabs": true 6 | } -------------------------------------------------------------------------------- /dev/homeserver.nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80 default_server; 3 | server_name _; 4 | client_max_body_size 100000m; 5 | proxy_request_buffering off; 6 | 7 | location /_matrix/media { 8 | proxy_set_header Host localhost; 9 | proxy_pass http://host.docker.internal:8001; 10 | } 11 | 12 | location /_matrix/client/v1/media { 13 | proxy_set_header Host localhost; 14 | proxy_pass http://host.docker.internal:8001; 15 | } 16 | 17 | location /_matrix/federation/v1/media { 18 | proxy_set_header Host localhost; 19 | proxy_pass http://host.docker.internal:8001; 20 | } 21 | 22 | location /_matrix { 23 | proxy_pass http://media_repo_synapse:8008; 24 | } 25 | 26 | location / { 27 | proxy_pass http://media_repo_element:80; 28 | } 29 | } -------------------------------------------------------------------------------- /dev/synapse-db/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /docker/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | cd /data 3 | if [ ! -f media-repo.yaml ]; then 4 | cp /etc/media-repo.yaml.sample media-repo.yaml 5 | fi 6 | chown -R ${UID}:${GID} /data 7 | 8 | exec su-exec ${UID}:${GID} media_repo 9 | -------------------------------------------------------------------------------- /docs/releasing.md: -------------------------------------------------------------------------------- 1 | # Releasing MMR 2 | 3 | MMR is released whenever the changelog *feels* worthy of a release. 4 | 5 | ## Prerequisites 6 | 7 | 1. Ensure `CHANGELOG.md` is up-to-date and has consistent language. 8 | 2. Ensure tests pass on main branch. 9 | 10 | ## Release 11 | 12 | 1. Update `version/version.go#DocsVersion` to point to the about-to-be-released version. 13 | 2. In the [docs.t2bot.io repo](https://github.com/t2bot/docs.t2bot.io): 14 | 1. Rename `/content/matrix-media-repo/unstable` to `/content/matrix-media-repo/v1.3.4` (if releasing v1.3.4). 15 | 2. Run `npm run build` (after `npm install` if required). 16 | 3. Copy `/site/matrix-media-repo/v1.3.4` to `/old_versions/matrix-media-repo/v1.3.4`. 17 | 4. Rename `/content/matrix-media-repo/v1.3.4` back to `/content/matrix-media-repo/unstable`. 18 | 5. Commit and push changes. 19 | 3. Update the links and headers in `CHANGELOG.md`. 20 | 4. Commit any outstanding changes. 21 | 5. Create a git tag for `v1.3.4` and push both the tag and main branch. 22 | 6. On the GitHub releases page, create a new release for the pushed tag. The title is the tag name, and the content is the relevant changelog section. 23 | 7. Build binaries for Windows and Linux, then attach them to the GitHub release. 24 | 8. Publish the release. 25 | 9. Ensure the Docker image is built or building, then announce the release in the MMR Matrix room. 26 | -------------------------------------------------------------------------------- /errcache/cache.go: -------------------------------------------------------------------------------- 1 | package errcache 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/patrickmn/go-cache" 8 | ) 9 | 10 | type ErrCache struct { 11 | cache *cache.Cache 12 | mu sync.Mutex 13 | } 14 | 15 | func NewErrCache(expiration time.Duration) *ErrCache { 16 | return &ErrCache{cache: cache.New(expiration, expiration*2)} 17 | } 18 | 19 | func (e *ErrCache) Resize(expiration time.Duration) { 20 | e.mu.Lock() 21 | e.cache = cache.NewFrom(expiration, expiration*2, e.cache.Items()) 22 | e.mu.Unlock() 23 | } 24 | 25 | func (e *ErrCache) Get(key string) error { 26 | e.mu.Lock() 27 | if err, ok := e.cache.Get(key); ok { 28 | e.mu.Unlock() 29 | return err.(error) 30 | } 31 | e.mu.Unlock() 32 | return nil 33 | } 34 | 35 | func (e *ErrCache) Set(key string, err error) { 36 | e.mu.Lock() 37 | e.cache.Set(key, err, cache.DefaultExpiration) 38 | e.mu.Unlock() 39 | } 40 | -------------------------------------------------------------------------------- /errcache/init.go: -------------------------------------------------------------------------------- 1 | package errcache 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/config" 7 | ) 8 | 9 | var DownloadErrors *ErrCache 10 | 11 | func Init() { 12 | DownloadErrors = NewErrCache(time.Duration(config.Get().Downloads.FailureCacheMinutes) * time.Minute) 13 | } 14 | 15 | func AdjustSize() { 16 | DownloadErrors.Resize(time.Duration(config.Get().Downloads.FailureCacheMinutes) * time.Minute) 17 | } 18 | -------------------------------------------------------------------------------- /homeserver_interop/ImportDb.go: -------------------------------------------------------------------------------- 1 | package homeserver_interop 2 | 3 | type ImportDbMedia interface{} 4 | 5 | type ImportDb[M ImportDbMedia] interface { 6 | GetAllMedia() ([]*M, error) 7 | } 8 | -------------------------------------------------------------------------------- /homeserver_interop/any_server/signing_key.go: -------------------------------------------------------------------------------- 1 | package any_server 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 8 | "github.com/t2bot/matrix-media-repo/homeserver_interop/dendrite" 9 | "github.com/t2bot/matrix-media-repo/homeserver_interop/mmr" 10 | "github.com/t2bot/matrix-media-repo/homeserver_interop/synapse" 11 | ) 12 | 13 | func DecodeSigningKey(key io.ReadSeeker) (*homeserver_interop.SigningKey, error) { 14 | keys, err := DecodeAllSigningKeys(key) 15 | if err != nil { 16 | return nil, err 17 | } 18 | 19 | return keys[0], nil 20 | } 21 | 22 | func DecodeAllSigningKeys(key io.ReadSeeker) ([]*homeserver_interop.SigningKey, error) { 23 | var keys []*homeserver_interop.SigningKey 24 | var err error 25 | 26 | var errorStack error 27 | 28 | // Try Synapse first, as the most popular 29 | keys, err = synapse.DecodeAllSigningKeys(key) 30 | if err == nil { 31 | return keys, nil 32 | } 33 | errorStack = errors.Join(errors.New("synapse: unable to decode"), err, errorStack) 34 | 35 | // Rewind & try Dendrite 36 | if _, err = key.Seek(0, io.SeekStart); err != nil { 37 | return nil, err 38 | } 39 | keys, err = dendrite.DecodeAllSigningKeys(key) 40 | if err == nil { 41 | return keys, nil 42 | } 43 | errorStack = errors.Join(errors.New("dendrite: unable to decode"), err, errorStack) 44 | 45 | // Rewind & try MMR 46 | if _, err = key.Seek(0, io.SeekStart); err != nil { 47 | return nil, err 48 | } 49 | keys, err = mmr.DecodeAllSigningKeys(key) 50 | if err == nil { 51 | return keys, nil 52 | } 53 | errorStack = errors.Join(errors.New("mmr: unable to decode"), err, errorStack) 54 | 55 | // Fail case 56 | return nil, errors.Join(errors.New("unable to detect signing key format"), errorStack) 57 | } 58 | -------------------------------------------------------------------------------- /homeserver_interop/internal/signing_key_encode.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 8 | ) 9 | 10 | func EncodeNewlineAppendFormattedSigningKeys(keys []*homeserver_interop.SigningKey, encodeFn func(*homeserver_interop.SigningKey) ([]byte, error)) ([]byte, error) { 11 | buf := &bytes.Buffer{} 12 | for i, key := range keys { 13 | b, err := encodeFn(key) 14 | if err != nil { 15 | return nil, err 16 | } 17 | 18 | n, err := buf.Write(b) 19 | if err != nil { 20 | return nil, err 21 | } 22 | if n != len(b) { 23 | return nil, fmt.Errorf("wrote %d bytes but expected %d bytes", n, len(b)) 24 | } 25 | 26 | if b[len(b)-1] != '\n' && i != (len(keys)-1) { 27 | n, err = buf.Write([]byte{'\n'}) 28 | if err != nil { 29 | return nil, err 30 | } 31 | if n != 1 { 32 | return nil, fmt.Errorf("wrote %d bytes but expected %d bytes", n, 1) 33 | } 34 | } 35 | } 36 | return buf.Bytes(), nil 37 | } 38 | -------------------------------------------------------------------------------- /homeserver_interop/signing_key.go: -------------------------------------------------------------------------------- 1 | package homeserver_interop 2 | 3 | import ( 4 | "crypto/ed25519" 5 | "crypto/rand" 6 | "fmt" 7 | "sort" 8 | "strings" 9 | ) 10 | 11 | type SigningKey struct { 12 | PrivateKey ed25519.PrivateKey 13 | KeyVersion string 14 | } 15 | 16 | func GenerateSigningKey() (*SigningKey, error) { 17 | keyVersion := makeKeyVersion() 18 | 19 | _, priv, err := ed25519.GenerateKey(nil) 20 | priv = priv[len(priv)-32:] 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | return &SigningKey{ 26 | PrivateKey: priv, 27 | KeyVersion: keyVersion, 28 | }, nil 29 | } 30 | 31 | func makeKeyVersion() string { 32 | buf := make([]byte, 2) 33 | chars := strings.Split("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", "") 34 | for i := 0; i < len(chars); i++ { 35 | sort.Slice(chars, func(i int, j int) bool { 36 | c, err := rand.Read(buf) 37 | 38 | // "should never happen" clauses 39 | if err != nil { 40 | panic(err) 41 | } 42 | if c != len(buf) || c != 2 { 43 | panic(fmt.Errorf("crypto rand read %d bytes, expected %d", c, len(buf))) 44 | } 45 | 46 | return buf[0] < buf[1] 47 | }) 48 | } 49 | 50 | return strings.Join(chars[:6], "") 51 | } 52 | -------------------------------------------------------------------------------- /homeserver_interop/synapse/api.go: -------------------------------------------------------------------------------- 1 | package synapse 2 | 3 | const PrefixAdminApi = "/_synapse/admin" 4 | 5 | type SynUserStatRecord struct { 6 | DisplayName string `json:"displayname"` 7 | UserId string `json:"user_id"` 8 | MediaCount int64 `json:"media_count"` 9 | MediaLength int64 `json:"media_length"` 10 | } 11 | 12 | type SynUserStatsResponse struct { 13 | Users []*SynUserStatRecord `json:"users"` 14 | NextToken int64 `json:"next_token,omitempty"` 15 | Total int64 `json:"total"` 16 | } 17 | -------------------------------------------------------------------------------- /limits/leaky_buckets.go: -------------------------------------------------------------------------------- 1 | package limits 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/t2bot/go-leaky-bucket" 8 | "github.com/t2bot/matrix-media-repo/common/config" 9 | "github.com/t2bot/matrix-media-repo/common/rcontext" 10 | ) 11 | 12 | var buckets = make(map[string]*leaky.Bucket) 13 | var bucketLock = &sync.Mutex{} 14 | 15 | func GetBucket(ctx rcontext.RequestContext, subject string) (*leaky.Bucket, error) { 16 | if !config.Get().RateLimit.Enabled { 17 | return nil, nil 18 | } 19 | 20 | bucketLock.Lock() 21 | defer bucketLock.Unlock() 22 | 23 | bucket, ok := buckets[subject] 24 | if !ok { 25 | var err error 26 | bucket, err = leaky.NewBucket(config.Get().RateLimit.Buckets.Downloads.DrainBytesPerMinute, time.Minute, config.Get().RateLimit.Buckets.Downloads.CapacityBytes) 27 | if err != nil { 28 | return nil, err 29 | } 30 | bucket.OverflowLimit = config.Get().RateLimit.Buckets.Downloads.OverflowLimitBytes 31 | buckets[subject] = bucket 32 | } 33 | 34 | return bucket, nil 35 | } 36 | 37 | func ExpandBuckets() { 38 | bucketLock.Lock() 39 | defer bucketLock.Unlock() 40 | 41 | for _, bucket := range buckets { 42 | bucket.Capacity = config.Get().RateLimit.Buckets.Downloads.CapacityBytes 43 | bucket.DrainBy = config.Get().RateLimit.Buckets.Downloads.DrainBytesPerMinute 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /limits/rate_limiting.go: -------------------------------------------------------------------------------- 1 | package limits 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/didip/tollbooth/v7" 9 | "github.com/didip/tollbooth/v7/libstring" 10 | "github.com/didip/tollbooth/v7/limiter" 11 | "github.com/t2bot/matrix-media-repo/api/_responses" 12 | "github.com/t2bot/matrix-media-repo/common/config" 13 | ) 14 | 15 | var requestLimiter *limiter.Limiter 16 | 17 | func init() { 18 | requestLimiter = tollbooth.NewLimiter(0, nil) 19 | requestLimiter.SetIPLookups([]string{"X-Forwarded-For", "X-Real-IP", "RemoteAddr"}) 20 | requestLimiter.SetTokenBucketExpirationTTL(time.Hour) 21 | 22 | b, _ := json.Marshal(_responses.RateLimitReached()) 23 | requestLimiter.SetMessage(string(b)) 24 | requestLimiter.SetMessageContentType("application/json") 25 | } 26 | 27 | func GetRequestLimiter() *limiter.Limiter { 28 | requestLimiter.SetBurst(config.Get().RateLimit.BurstCount) 29 | requestLimiter.SetMax(config.Get().RateLimit.RequestsPerSecond) 30 | 31 | return requestLimiter 32 | } 33 | 34 | func GetRequestIP(r *http.Request) string { 35 | // Same implementation as tollbooth 36 | return libstring.RemoteIP(requestLimiter.GetIPLookups(), requestLimiter.GetForwardedForIndexFromBehind(), r) 37 | } 38 | -------------------------------------------------------------------------------- /matrix/errors.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/t2bot/matrix-media-repo/common" 8 | ) 9 | 10 | type ErrorResponse struct { 11 | ErrorCode string `json:"errcode"` 12 | Message string `json:"error"` 13 | } 14 | 15 | func (e ErrorResponse) Error() string { 16 | return fmt.Sprintf("code=%s message=%s", e.ErrorCode, e.Message) 17 | } 18 | 19 | func filterError(err error) (error, error) { 20 | if err == nil { 21 | return nil, nil 22 | } 23 | 24 | // Unknown token errors should be filtered out explicitly to ensure we don't break on bad requests 25 | var httpErr *ErrorResponse 26 | if errors.As(err, &httpErr) { 27 | // We send back our own version of errors to ensure we can filter them out elsewhere 28 | if httpErr.ErrorCode == common.ErrCodeUnknownToken { 29 | return nil, ErrInvalidToken 30 | } else if httpErr.ErrorCode == common.ErrCodeNoGuests { 31 | return nil, ErrGuestToken 32 | } 33 | } 34 | 35 | return err, err 36 | } 37 | 38 | type ServerNotAllowedError struct { 39 | error 40 | ServerName string 41 | } 42 | 43 | func MakeServerNotAllowedError(serverName string) ServerNotAllowedError { 44 | return ServerNotAllowedError{ 45 | error: errors.New("server " + serverName + " is not allowed"), 46 | ServerName: serverName, 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /matrix/requests_auth.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | ) 8 | 9 | var ErrInvalidToken = errors.New("missing or invalid access token") 10 | var ErrGuestToken = errors.New("token belongs to a guest") 11 | 12 | func GetUserIdFromToken(ctx rcontext.RequestContext, serverName string, accessToken string, appserviceUserId string, ipAddr string) (string, bool, error) { 13 | response := &userIdResponse{} 14 | err := doBreakerRequest(ctx, serverName, accessToken, appserviceUserId, ipAddr, "GET", "/_matrix/client/v3/account/whoami", response) 15 | if err != nil { 16 | return "", false, err 17 | } 18 | return response.UserId, response.IsGuest || response.IsGuest2, nil 19 | } 20 | 21 | func Logout(ctx rcontext.RequestContext, serverName string, accessToken string, appserviceUserId string, ipAddr string) error { 22 | response := &emptyResponse{} 23 | err := doBreakerRequest(ctx, serverName, accessToken, appserviceUserId, ipAddr, "POST", "/_matrix/client/v3/logout", response) 24 | if err != nil { 25 | return err 26 | } 27 | return nil 28 | } 29 | 30 | func LogoutAll(ctx rcontext.RequestContext, serverName string, accessToken string, appserviceUserId string, ipAddr string) error { 31 | response := &emptyResponse{} 32 | err := doBreakerRequest(ctx, serverName, accessToken, appserviceUserId, ipAddr, "POST", "/_matrix/client/v3/logout/all", response) 33 | if err != nil { 34 | return err 35 | } 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /matrix/requests_info.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import "github.com/t2bot/matrix-media-repo/common/rcontext" 4 | 5 | type ClientVersionsResponse struct { 6 | Versions []string `json:"versions"` 7 | UnstableFeatures map[string]bool `json:"unstable_features"` 8 | } 9 | 10 | func ClientVersions(ctx rcontext.RequestContext, serverName string, accessToken string, appserviceUserId string, ipAddr string) (*ClientVersionsResponse, error) { 11 | response := &ClientVersionsResponse{} 12 | err := doBreakerRequest(ctx, serverName, accessToken, appserviceUserId, ipAddr, "GET", "/_matrix/client/versions", response) 13 | if err != nil { 14 | return nil, err 15 | } 16 | return response, nil 17 | } 18 | -------------------------------------------------------------------------------- /matrix/responses.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | type emptyResponse struct { 4 | } 5 | 6 | type userIdResponse struct { 7 | UserId string `json:"user_id"` 8 | IsGuest bool `json:"org.matrix.msc3069.is_guest"` 9 | IsGuest2 bool `json:"is_guest"` 10 | } 11 | 12 | type whoisResponse struct { 13 | // We don't actually care about any of the fields here 14 | } 15 | 16 | type MediaListResponse struct { 17 | LocalMxcs []string `json:"local"` 18 | RemoteMxcs []string `json:"remote"` 19 | } 20 | 21 | type wellknownServerResponse struct { 22 | ServerAddr string `json:"m.server"` 23 | } 24 | -------------------------------------------------------------------------------- /matrix/signing_key_cache.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import ( 4 | "crypto/ed25519" 5 | "os" 6 | "time" 7 | 8 | "github.com/patrickmn/go-cache" 9 | "github.com/t2bot/matrix-media-repo/homeserver_interop/mmr" 10 | ) 11 | 12 | type LocalSigningKey struct { 13 | Key ed25519.PrivateKey 14 | Version string 15 | } 16 | 17 | var localSigningKeyCache = cache.New(5*time.Minute, 10*time.Minute) 18 | 19 | func FlushSigningKeyCache() { 20 | localSigningKeyCache.Flush() 21 | } 22 | 23 | func getLocalSigningKey(fromPath string) (*LocalSigningKey, error) { 24 | if val, ok := localSigningKeyCache.Get(fromPath); ok { 25 | return val.(*LocalSigningKey), nil 26 | } 27 | 28 | f, err := os.Open(fromPath) 29 | defer f.Close() 30 | if err != nil { 31 | return nil, err 32 | } 33 | key, err := mmr.DecodeSigningKey(f) 34 | if err != nil { 35 | return nil, err 36 | } 37 | sk := &LocalSigningKey{ 38 | Key: key.PrivateKey, 39 | Version: key.KeyVersion, 40 | } 41 | localSigningKeyCache.Set(fromPath, sk, cache.DefaultExpiration) 42 | return sk, nil 43 | } 44 | -------------------------------------------------------------------------------- /metrics/webserver.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net" 7 | "net/http" 8 | "strconv" 9 | "time" 10 | 11 | "github.com/getsentry/sentry-go" 12 | 13 | "github.com/prometheus/client_golang/prometheus/promhttp" 14 | "github.com/sirupsen/logrus" 15 | "github.com/t2bot/matrix-media-repo/common/config" 16 | ) 17 | 18 | var srv *http.Server 19 | 20 | func internalHandler(res http.ResponseWriter, req *http.Request) { 21 | promhttp.Handler().ServeHTTP(res, req) 22 | } 23 | 24 | func Init() { 25 | if !config.Get().Metrics.Enabled { 26 | logrus.Info("Metrics disabled") 27 | return 28 | } 29 | rtr := http.NewServeMux() 30 | rtr.HandleFunc("/metrics", internalHandler) 31 | rtr.HandleFunc("/_media/metrics", internalHandler) 32 | 33 | address := net.JoinHostPort(config.Get().Metrics.BindAddress, strconv.Itoa(config.Get().Metrics.Port)) 34 | srv = &http.Server{Addr: address, Handler: rtr} 35 | go func() { 36 | //goland:noinspection HttpUrlsUsage 37 | logrus.WithField("address", address).Info("Started metrics listener. Listening at http://" + address) 38 | if err := srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { 39 | sentry.CaptureException(err) 40 | logrus.Fatal(err) 41 | } 42 | }() 43 | } 44 | 45 | func Reload() { 46 | Stop() 47 | Init() 48 | } 49 | 50 | func Stop() { 51 | if srv != nil { 52 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 53 | defer cancel() 54 | if err := srv.Shutdown(ctx); err != nil { 55 | panic(err) 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /migrations/10_add_background_tasks_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS background_tasks; 2 | -------------------------------------------------------------------------------- /migrations/10_add_background_tasks_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS background_tasks ( 2 | id SERIAL PRIMARY KEY, 3 | task TEXT NOT NULL, 4 | params JSON NOT NULL, 5 | start_ts BIGINT NOT NULL, 6 | end_ts BIGINT NULL 7 | ); 8 | -------------------------------------------------------------------------------- /migrations/11_add_reserved_ids_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS reserved_media_index; 2 | DROP TABLE IF EXISTS reserved_media; 3 | -------------------------------------------------------------------------------- /migrations/11_add_reserved_ids_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS reserved_media ( 2 | origin TEXT NOT NULL, 3 | media_id TEXT NOT NULL, 4 | reason TEXT NOT NULL 5 | ); 6 | CREATE UNIQUE INDEX IF NOT EXISTS reserved_media_index ON reserved_media (media_id, origin); 7 | -------------------------------------------------------------------------------- /migrations/12_user_id_indexes_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_user_id_media; 2 | -------------------------------------------------------------------------------- /migrations/12_user_id_indexes_up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_user_id_media ON media(user_id); 2 | -------------------------------------------------------------------------------- /migrations/13_add_export_tables_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS export_parts_index; 2 | DROP TABLE IF EXISTS export_parts; 3 | DROP TABLE IF EXISTS exports; 4 | -------------------------------------------------------------------------------- /migrations/13_add_export_tables_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS exports ( 2 | export_id TEXT PRIMARY KEY NOT NULL, 3 | entity TEXT NOT NULL 4 | ); 5 | CREATE TABLE IF NOT EXISTS export_parts ( 6 | export_id TEXT NOT NULL, 7 | index INT NOT NULL, 8 | size_bytes BIGINT NOT NULL, 9 | file_name TEXT NOT NULL, 10 | datastore_id TEXT NOT NULL, 11 | location TEXT NOT NULL 12 | ); 13 | CREATE UNIQUE INDEX IF NOT EXISTS export_parts_index ON export_parts (export_id, index); 14 | -------------------------------------------------------------------------------- /migrations/14_add_blurhash_tables_down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS blurhashes; 2 | -------------------------------------------------------------------------------- /migrations/14_add_blurhash_tables_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS blurhashes ( 2 | sha256_hash TEXT PRIMARY KEY NOT NULL, 3 | blurhash TEXT NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /migrations/15_add_language_url_previews_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE url_previews DROP COLUMN language_header; 2 | -------------------------------------------------------------------------------- /migrations/15_add_language_url_previews_up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE url_previews ADD COLUMN language_header TEXT NULL DEFAULT NULL; 2 | -------------------------------------------------------------------------------- /migrations/16_add_media_attributes_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_media_attributes_purpose; 2 | DROP INDEX IF EXISTS idx_media_attributes; 3 | DROP TABLE IF EXISTS media_attributes; 4 | -------------------------------------------------------------------------------- /migrations/16_add_media_attributes_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS media_attributes ( 2 | origin TEXT NOT NULL, 3 | media_id TEXT NOT NULL, 4 | purpose TEXT NOT NULL 5 | ); 6 | CREATE UNIQUE INDEX IF NOT EXISTS idx_media_attributes ON media_attributes (media_id, origin); 7 | CREATE INDEX IF NOT EXISTS idx_media_attributes_purpose on media_attributes (purpose); 8 | -------------------------------------------------------------------------------- /migrations/17_add_user_stats_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP TRIGGER media_change_for_user; 2 | DELETE FUNCTION track_update_user_media(); 3 | DROP TABLE user_stats; 4 | -------------------------------------------------------------------------------- /migrations/17_add_user_stats_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS user_stats ( 2 | user_id TEXT PRIMARY KEY NOT NULL, 3 | uploaded_bytes BIGINT NOT NULL 4 | ); 5 | CREATE OR REPLACE FUNCTION track_update_user_media() 6 | RETURNS TRIGGER 7 | LANGUAGE PLPGSQL 8 | AS 9 | $$ 10 | BEGIN 11 | IF TG_OP = 'UPDATE' THEN 12 | INSERT INTO user_stats (user_id, uploaded_bytes) VALUES (NEW.user_id, 0) ON CONFLICT (user_id) DO NOTHING; 13 | INSERT INTO user_stats (user_id, uploaded_bytes) VALUES (OLD.user_id, 0) ON CONFLICT (user_id) DO NOTHING; 14 | 15 | IF NEW.user_id <> OLD.user_id THEN 16 | UPDATE user_stats SET uploaded_bytes = user_stats.uploaded_bytes - OLD.size_bytes WHERE user_stats.user_id = OLD.user_id; 17 | UPDATE user_stats SET uploaded_bytes = user_stats.uploaded_bytes + NEW.size_bytes WHERE user_stats.user_id = NEW.user_id; 18 | ELSIF NEW.size_bytes <> OLD.size_bytes THEN 19 | UPDATE user_stats SET uploaded_bytes = user_stats.uploaded_bytes - OLD.size_bytes + NEW.size_bytes WHERE user_stats.user_id = NEW.user_id; 20 | END IF; 21 | RETURN NEW; 22 | ELSIF TG_OP = 'DELETE' THEN 23 | UPDATE user_stats SET uploaded_bytes = user_stats.uploaded_bytes - OLD.size_bytes WHERE user_stats.user_id = OLD.user_id; 24 | RETURN OLD; 25 | ELSIF TG_OP = 'INSERT' THEN 26 | INSERT INTO user_stats (user_id, uploaded_bytes) VALUES (NEW.user_id, NEW.size_bytes) ON CONFLICT (user_id) DO UPDATE SET uploaded_bytes = user_stats.uploaded_bytes + NEW.size_bytes; 27 | RETURN NEW; 28 | END IF; 29 | END; 30 | $$; 31 | DROP TRIGGER IF EXISTS media_change_for_user ON media; 32 | CREATE TRIGGER media_change_for_user AFTER INSERT OR UPDATE OR DELETE ON media FOR EACH ROW EXECUTE PROCEDURE track_update_user_media(); 33 | -------------------------------------------------------------------------------- /migrations/18_populate_user_stats_table_down.sql: -------------------------------------------------------------------------------- 1 | -- Nothing 2 | -------------------------------------------------------------------------------- /migrations/18_populate_user_stats_table_up.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | BEGIN 3 | IF ((SELECT COUNT(*) FROM user_stats)) = 0 THEN 4 | INSERT INTO user_stats SELECT user_id, SUM(size_bytes) FROM media GROUP BY user_id; 5 | END IF; 6 | END $$; 7 | -------------------------------------------------------------------------------- /migrations/19_create_expiring_media_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_expiring_media; 2 | DROP INDEX IF EXISTS idx_expiring_media_user_id; 3 | DROP INDEX IF EXISTS idx_expiring_media_expires_ts; 4 | DROP TABLE IF EXISTS expiring_media; -------------------------------------------------------------------------------- /migrations/19_create_expiring_media_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS expiring_media ( 2 | origin TEXT NOT NULL, 3 | media_id TEXT NOT NULL, 4 | user_id TEXT NOT NULL, 5 | expires_ts BIGINT NOT NULL 6 | ); 7 | CREATE UNIQUE INDEX IF NOT EXISTS idx_expiring_media ON expiring_media (media_id, origin); 8 | CREATE INDEX IF NOT EXISTS idx_expiring_media_user_id ON expiring_media (user_id); 9 | CREATE INDEX IF NOT EXISTS idx_expiring_media_expires_ts ON expiring_media (expires_ts); -------------------------------------------------------------------------------- /migrations/1_create_tables_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS media_index; 2 | DROP INDEX IF EXISTS thumbnails_index; 3 | DROP INDEX IF EXISTS url_previews_index; 4 | DROP TABLE IF EXISTS url_previews; 5 | DROP TABLE IF EXISTS thumbnails; 6 | DROP TABLE IF EXISTS media; 7 | -------------------------------------------------------------------------------- /migrations/1_create_tables_up.sql: -------------------------------------------------------------------------------- 1 | -- MEDIA 2 | CREATE TABLE IF NOT EXISTS media ( 3 | origin TEXT NOT NULL, 4 | media_id TEXT NOT NULL, 5 | upload_name TEXT NOT NULL, 6 | content_type TEXT NOT NULL, 7 | user_id TEXT NOT NULL, 8 | sha256_hash TEXT NOT NULL, 9 | size_bytes BIGINT NOT NULL, 10 | location TEXT NOT NULL, 11 | creation_ts BIGINT NOT NULL 12 | ); 13 | CREATE UNIQUE INDEX IF NOT EXISTS media_index ON media (media_id, origin); 14 | 15 | -- THUMBNAILS 16 | CREATE TABLE IF NOT EXISTS thumbnails ( 17 | origin TEXT NOT NULL, 18 | media_id TEXT NOT NULL, 19 | width INT NOT NULL, 20 | height INT NOT NULL, 21 | method TEXT NOT NULL, 22 | content_type TEXT NOT NULL, 23 | size_bytes BIGINT NOT NULL, 24 | location TEXT NOT NULL, 25 | creation_ts BIGINT NOT NULL 26 | ); 27 | CREATE UNIQUE INDEX IF NOT EXISTS thumbnails_index ON thumbnails (media_id, origin, width, height, method); 28 | 29 | -- URL PREVIEWS 30 | CREATE TABLE IF NOT EXISTS url_previews ( 31 | url TEXT NOT NULL, 32 | error_code TEXT NOT NULL, 33 | bucket_ts BIGINT NOT NULL, 34 | site_url TEXT NOT NULL, 35 | site_name TEXT NOT NULL, 36 | resource_type TEXT NOT NULL, 37 | description TEXT NOT NULL, 38 | title TEXT NOT NULL, 39 | image_mxc TEXT NOT NULL, 40 | image_type TEXT NOT NULL, 41 | image_size BIGINT NOT NULL, 42 | image_width INT NOT NULL, 43 | image_height INT NOT NULL 44 | ); 45 | CREATE UNIQUE INDEX IF NOT EXISTS url_previews_index ON url_previews (url, error_code, bucket_ts); 46 | -------------------------------------------------------------------------------- /migrations/20_create_id_hold_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_media_id_hold; 2 | DROP TABLE IF EXISTS media_id_hold; -------------------------------------------------------------------------------- /migrations/20_create_id_hold_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS media_id_hold ( 2 | origin TEXT NOT NULL, 3 | media_id TEXT NOT NULL, 4 | reason TEXT NOT NULL 5 | ); 6 | CREATE UNIQUE INDEX IF NOT EXISTS idx_media_id_hold ON media_id_hold (media_id, origin); -------------------------------------------------------------------------------- /migrations/21_not_null_end_ts_background_tasks_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE background_tasks ALTER COLUMN end_ts SET NULL; 2 | UPDATE background_tasks SET end_ts = NULL WHERE end_ts = 0; -------------------------------------------------------------------------------- /migrations/21_not_null_end_ts_background_tasks_up.sql: -------------------------------------------------------------------------------- 1 | UPDATE background_tasks SET end_ts = 0 WHERE end_ts IS NULL; 2 | ALTER TABLE background_tasks ALTER COLUMN end_ts SET NOT NULL; -------------------------------------------------------------------------------- /migrations/22_add_thumb_creation_index_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_creation_ts_thumbnails; 2 | -------------------------------------------------------------------------------- /migrations/22_add_thumb_creation_index_up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_creation_ts_thumbnails ON thumbnails(creation_ts); 2 | -------------------------------------------------------------------------------- /migrations/23_add_datastore_locations_indexes_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_datastore_id_location_thumbnails; 2 | DROP INDEX IF EXISTS idx_datastore_id_location_media; 3 | -------------------------------------------------------------------------------- /migrations/23_add_datastore_locations_indexes_up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_datastore_id_location_thumbnails ON thumbnails(datastore_id, location); 2 | CREATE INDEX IF NOT EXISTS idx_datastore_id_location_media ON media(datastore_id, location); 3 | -------------------------------------------------------------------------------- /migrations/24_add_timestamp_to_media_id_hold_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE media_id_hold DROP COLUMN held_ts; 2 | -------------------------------------------------------------------------------- /migrations/24_add_timestamp_to_media_id_hold_up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE media_id_hold ADD COLUMN held_ts BIGINT NOT NULL DEFAULT 0; 2 | -------------------------------------------------------------------------------- /migrations/25_try_create_expiring_media_table_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_expiring_media; 2 | DROP INDEX IF EXISTS idx_expiring_media_user_id; 3 | DROP INDEX IF EXISTS idx_expiring_media_expires_ts; 4 | DROP TABLE IF EXISTS expiring_media; -------------------------------------------------------------------------------- /migrations/25_try_create_expiring_media_table_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS expiring_media ( 2 | origin TEXT NOT NULL, 3 | media_id TEXT NOT NULL, 4 | user_id TEXT NOT NULL, 5 | expires_ts BIGINT NOT NULL 6 | ); 7 | CREATE UNIQUE INDEX IF NOT EXISTS idx_expiring_media ON expiring_media (media_id, origin); 8 | CREATE INDEX IF NOT EXISTS idx_expiring_media_user_id ON expiring_media (user_id); 9 | CREATE INDEX IF NOT EXISTS idx_expiring_media_expires_ts ON expiring_media (expires_ts); -------------------------------------------------------------------------------- /migrations/26_add_datastore_id_indexes_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_datastore_id_thumbnails; 2 | DROP INDEX IF EXISTS idx_datastore_id_media; 3 | -------------------------------------------------------------------------------- /migrations/26_add_datastore_id_indexes_up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_datastore_id_thumbnails ON thumbnails(datastore_id); 2 | CREATE INDEX IF NOT EXISTS idx_datastore_id_media ON media(datastore_id); 3 | -------------------------------------------------------------------------------- /migrations/27_drop_blurhashes_down.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS blurhashes ( 2 | sha256_hash TEXT PRIMARY KEY NOT NULL, 3 | blurhash TEXT NOT NULL 4 | ); 5 | -------------------------------------------------------------------------------- /migrations/27_drop_blurhashes_up.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS blurhashes; 2 | -------------------------------------------------------------------------------- /migrations/28_add_task_error_column_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE background_tasks DROP COLUMN error; 2 | -------------------------------------------------------------------------------- /migrations/28_add_task_error_column_up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE background_tasks ADD COLUMN error TEXT NOT NULL DEFAULT ''; 2 | -------------------------------------------------------------------------------- /migrations/29_create_media_restrictions_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_restricted_media; 2 | DROP TABLE IF EXISTS restricted_media; -------------------------------------------------------------------------------- /migrations/29_create_media_restrictions_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS restricted_media (origin TEXT NOT NULL, media_id TEXT NOT NULL, condition_type TEXT NOT NULL, condition_value TEXT NOT NULL); 2 | CREATE UNIQUE INDEX IF NOT EXISTS idx_restricted_media ON restricted_media (origin, media_id); -------------------------------------------------------------------------------- /migrations/2_add_animated_col_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS thumbnails_index; 2 | CREATE UNIQUE INDEX IF NOT EXISTS thumbnails_index ON thumbnails (media_id, origin, width, height, method); 3 | ALTER TABLE thumbnails DROP COLUMN animated; 4 | -------------------------------------------------------------------------------- /migrations/2_add_animated_col_up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE thumbnails ADD COLUMN animated BOOL NOT NULL DEFAULT FALSE; 2 | DROP INDEX IF EXISTS thumbnails_index; 3 | CREATE UNIQUE INDEX IF NOT EXISTS thumbnails_index ON thumbnails (media_id, origin, width, height, method, animated); 4 | -------------------------------------------------------------------------------- /migrations/3_add_quarantine_flag_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE media DROP COLUMN quarantined; 2 | -------------------------------------------------------------------------------- /migrations/3_add_quarantine_flag_up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE media ADD COLUMN quarantined BOOL NOT NULL DEFAULT FALSE; 2 | -------------------------------------------------------------------------------- /migrations/4_add_hash_to_thumbnails_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE thumbnails DROP COLUMN sha256_hash; 2 | -------------------------------------------------------------------------------- /migrations/4_add_hash_to_thumbnails_up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE thumbnails ADD COLUMN sha256_hash TEXT NULL DEFAULT NULL; 2 | -------------------------------------------------------------------------------- /migrations/5_make_thumbnail_hash_required_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE thumbnails ALTER COLUMN sha256_hash SET NULL; 2 | UPDATE thumbnails SET sha256_hash = NULL WHERE sha256_hash = ''; -------------------------------------------------------------------------------- /migrations/5_make_thumbnail_hash_required_up.sql: -------------------------------------------------------------------------------- 1 | UPDATE thumbnails SET sha256_hash = '' WHERE sha256_hash IS NULL; 2 | ALTER TABLE thumbnails ALTER COLUMN sha256_hash SET NOT NULL; 3 | -------------------------------------------------------------------------------- /migrations/6_track_last_accessed_times_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS last_access_index; 2 | DROP TABLE IF EXISTS last_access; 3 | -------------------------------------------------------------------------------- /migrations/6_track_last_accessed_times_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS last_access ( 2 | sha256_hash TEXT NOT NULL, 3 | last_access_ts BIGINT NOT NULL 4 | ); 5 | CREATE UNIQUE INDEX IF NOT EXISTS last_access_index ON last_access (sha256_hash); -------------------------------------------------------------------------------- /migrations/7_add_datastore_down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE media DROP COLUMN datastore_id; 2 | ALTER TABLE thumbnails DROP COLUMN datastore_id; 3 | DROP INDEX IF EXISTS datastores_index; 4 | DROP TABLE IF EXISTS datastores; 5 | -------------------------------------------------------------------------------- /migrations/7_add_datastore_up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS datastores ( 2 | datastore_id TEXT NOT NULL, 3 | ds_type TEXT NOT NULL, 4 | uri TEXT NOT NULL 5 | ); 6 | CREATE UNIQUE INDEX IF NOT EXISTS datastores_index ON datastores (datastore_id); 7 | 8 | ALTER TABLE media ADD COLUMN datastore_id TEXT NOT NULL DEFAULT ''; 9 | ALTER TABLE thumbnails ADD COLUMN datastore_id TEXT NOT NULL DEFAULT ''; 10 | 11 | -------------------------------------------------------------------------------- /migrations/8_sha256_indexes_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_sha256_hash_media; 2 | DROP INDEX IF EXISTS idx_sha256_hash_thumbnails; 3 | -------------------------------------------------------------------------------- /migrations/8_sha256_indexes_up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_sha256_hash_media ON media (sha256_hash); 2 | CREATE INDEX IF NOT EXISTS idx_sha256_hash_thumbnails ON thumbnails (sha256_hash); 3 | -------------------------------------------------------------------------------- /migrations/9_origin_indexes_down.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS idx_origin_media; 2 | DROP INDEX IF EXISTS idx_origin_thumbnails; 3 | DROP INDEX IF EXISTS idx_origin_user_id_media; 4 | -------------------------------------------------------------------------------- /migrations/9_origin_indexes_up.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS idx_origin_media ON media(origin); 2 | CREATE INDEX IF NOT EXISTS idx_origin_thumbnails ON thumbnails(origin); 3 | CREATE INDEX IF NOT EXISTS idx_origin_user_id_media ON media(origin, user_id); 4 | -------------------------------------------------------------------------------- /notifier/tasks.go: -------------------------------------------------------------------------------- 1 | package notifier 2 | 3 | import ( 4 | "strconv" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/sirupsen/logrus" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/database" 10 | "github.com/t2bot/matrix-media-repo/redislib" 11 | ) 12 | 13 | type TaskId int 14 | 15 | const tasksNotifyRedisChannel = "mmr:bg_tasks" 16 | 17 | func SubscribeToTasks() <-chan TaskId { 18 | ch := redislib.Subscribe(tasksNotifyRedisChannel) 19 | if ch == nil { 20 | return nil 21 | } 22 | 23 | retCh := make(chan TaskId) 24 | go func() { 25 | for val := range ch { 26 | if i, err := strconv.Atoi(val); err != nil { 27 | sentry.CaptureException(err) 28 | logrus.Error("Internal error handling tasks subscribe: ", err) 29 | } else { 30 | retCh <- TaskId(i) 31 | } 32 | } 33 | }() 34 | return retCh 35 | } 36 | 37 | func TaskScheduled(ctx rcontext.RequestContext, task *database.DbTask) error { 38 | return redislib.Publish(ctx, tasksNotifyRedisChannel, strconv.Itoa(task.TaskId)) 39 | } 40 | -------------------------------------------------------------------------------- /pgo_internal/pgo.go: -------------------------------------------------------------------------------- 1 | package pgo_internal 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/pgo-fleet/embedded" 8 | ) 9 | 10 | func init() { 11 | pgo.ErrorFunc = func(err error) { 12 | sentry.CaptureException(err) 13 | } 14 | } 15 | 16 | func Enable(submitUrl string, submitKey string) { 17 | endpoint, err := pgo.NewCollectorEndpoint(submitUrl, submitKey) 18 | if err != nil { 19 | panic(err) 20 | } 21 | 22 | pgo.Enable(1*time.Hour, 5*time.Minute, endpoint) 23 | } 24 | 25 | func Disable() { 26 | pgo.Disable() 27 | } 28 | -------------------------------------------------------------------------------- /pgo_media_repo.pprof: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/t2bot/matrix-media-repo/ed4f3181a080fbfe6c37fa7fd456c7dd9e97d8c6/pgo_media_repo.pprof -------------------------------------------------------------------------------- /pipelines/_steps/download/open_stream.go: -------------------------------------------------------------------------------- 1 | package download 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/config" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/database" 10 | "github.com/t2bot/matrix-media-repo/datastores" 11 | "github.com/t2bot/matrix-media-repo/redislib" 12 | "github.com/t2bot/matrix-media-repo/util/readers" 13 | ) 14 | 15 | func OpenStream(ctx rcontext.RequestContext, media *database.Locatable) (io.ReadSeekCloser, error) { 16 | reader, ds, err := doOpenStream(ctx, media, false) 17 | if err != nil { 18 | return nil, err 19 | } 20 | if reader != nil { 21 | ctx.Log.Debugf("Got %s from cache", media.Sha256Hash) 22 | return readers.NopSeekCloser(reader), nil 23 | } 24 | 25 | return datastores.Download(ctx, ds, media.Location) 26 | } 27 | 28 | func OpenOrRedirect(ctx rcontext.RequestContext, media *database.Locatable) (io.ReadSeekCloser, error) { 29 | reader, ds, err := doOpenStream(ctx, media, true) 30 | if err != nil { 31 | return nil, err 32 | } 33 | if reader != nil { 34 | ctx.Log.Debugf("Got %s from cache", media.Sha256Hash) 35 | return readers.NopSeekCloser(reader), nil 36 | } 37 | 38 | return datastores.DownloadOrRedirect(ctx, ds, media.Location) 39 | } 40 | 41 | func doOpenStream(ctx rcontext.RequestContext, media *database.Locatable, canRedirect bool) (io.ReadSeekCloser, config.DatastoreConfig, error) { 42 | ds, ok := datastores.Get(ctx, media.DatastoreId) 43 | if !ok { 44 | return nil, ds, errors.New("unable to locate datastore for media") 45 | } 46 | 47 | redirectWhenCached, err := datastores.WouldRedirectWhenCached(ctx, ds) 48 | if err != nil { 49 | ctx.Log.Warn("Unable to determine if cache would be ignored: ", err) 50 | redirectWhenCached = false 51 | } 52 | 53 | if !redirectWhenCached || !canRedirect { 54 | reader, err := redislib.TryGetMedia(ctx, media.Sha256Hash) 55 | if err != nil || reader != nil { 56 | ctx.Log.Debugf("Got %s from cache", media.Sha256Hash) 57 | return readers.NopSeekCloser(reader), config.DatastoreConfig{}, err 58 | } 59 | } else { 60 | ctx.Log.Debugf("Ignoring cache for %s", media.Sha256Hash) 61 | } 62 | 63 | return nil, ds, nil 64 | } 65 | -------------------------------------------------------------------------------- /pipelines/_steps/download/wait.go: -------------------------------------------------------------------------------- 1 | package download 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/common" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/database" 7 | "github.com/t2bot/matrix-media-repo/notifier" 8 | ) 9 | 10 | func WaitForAsyncMedia(ctx rcontext.RequestContext, origin string, mediaId string) (*database.DbMedia, error) { 11 | db := database.GetInstance().ExpiringMedia.Prepare(ctx) 12 | record, err := db.Get(origin, mediaId) 13 | if err != nil { 14 | return nil, err 15 | } 16 | if record == nil || record.IsExpired() { 17 | return nil, nil // there's not going to be a record 18 | } 19 | 20 | ch, finish := notifier.GetUploadWaitChannel(origin, mediaId) 21 | defer finish() 22 | select { 23 | case <-ctx.Context.Done(): 24 | return nil, common.ErrMediaNotYetUploaded 25 | case val := <-ch: 26 | return val, nil 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /pipelines/_steps/meta/flag_access.go: -------------------------------------------------------------------------------- 1 | package meta 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/database" 7 | "github.com/t2bot/matrix-media-repo/metrics" 8 | "github.com/t2bot/matrix-media-repo/util" 9 | ) 10 | 11 | func FlagAccess(ctx rcontext.RequestContext, sha256hash string, uploadTime int64) { 12 | if uploadTime > 0 { 13 | metrics.MediaAgeAccessed.Observe(float64(util.NowMillis()-uploadTime) / 1000.0) 14 | } 15 | if err := database.GetInstance().LastAccess.Prepare(ctx).Upsert(sha256hash, util.NowMillis()); err != nil { 16 | ctx.Log.Warnf("Non-fatal error while updating last access for '%s': %s", sha256hash, err.Error()) 17 | sentry.CaptureException(err) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /pipelines/_steps/quarantine/logic.go: -------------------------------------------------------------------------------- 1 | package quarantine 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/t2bot/matrix-media-repo/common" 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | ) 9 | 10 | func ReturnAppropriateThing(ctx rcontext.RequestContext, isDownload bool, recordOnly bool, width int, height int) (io.ReadCloser, error) { 11 | flag := ctx.Config.Quarantine.ReplaceDownloads 12 | if !isDownload { 13 | flag = ctx.Config.Quarantine.ReplaceThumbnails 14 | } 15 | if !flag || recordOnly { 16 | return nil, common.ErrMediaQuarantined 17 | } else { 18 | return MakeThumbnail(ctx, width, height) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/deduplicate.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/common/rcontext" 5 | "github.com/t2bot/matrix-media-repo/database" 6 | ) 7 | 8 | func FindRecord(ctx rcontext.RequestContext, hash string, userId string, contentType string, fileName string) (*database.DbMedia, bool, error) { 9 | mediaDb := database.GetInstance().Media.Prepare(ctx) 10 | records, err := mediaDb.GetByHash(hash) 11 | if err != nil { 12 | return nil, false, err 13 | } 14 | var perfectMatch *database.DbMedia = nil 15 | var hashMatch *database.DbMedia = nil 16 | for _, r := range records { 17 | if hashMatch == nil { 18 | hashMatch = r 19 | } 20 | if r.UserId == userId && r.ContentType == r.ContentType && r.UploadName == fileName { 21 | perfectMatch = r 22 | break 23 | } 24 | } 25 | if perfectMatch != nil { 26 | return perfectMatch, true, nil 27 | } else { 28 | return hashMatch, false, nil 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/generate_media_id.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/config" 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/database" 9 | "github.com/t2bot/matrix-media-repo/util/ids" 10 | ) 11 | 12 | func GenerateMediaId(ctx rcontext.RequestContext, origin string) (string, error) { 13 | if config.Runtime.IsImportProcess { 14 | return "", errors.New("media IDs should not be generated from import processes") 15 | } 16 | heldDb := database.GetInstance().HeldMedia.Prepare(ctx) 17 | mediaDb := database.GetInstance().Media.Prepare(ctx) 18 | reservedDb := database.GetInstance().ReservedMedia.Prepare(ctx) 19 | var mediaId string 20 | var err error 21 | var exists bool 22 | attempts := 0 23 | for { 24 | attempts += 1 25 | if attempts > 10 { 26 | return "", errors.New("internal limit reached: unable to generate media ID") 27 | } 28 | 29 | mediaId, err = ids.NewUniqueId() 30 | 31 | err = heldDb.TryInsert(origin, mediaId, database.ForCreateHeldReason) 32 | if err != nil { 33 | return "", err 34 | } 35 | 36 | // Check if there's a media table record for this media as well (there shouldn't be) 37 | exists, err = mediaDb.IdExists(origin, mediaId) 38 | if err != nil { 39 | return "", err 40 | } 41 | if exists { 42 | continue 43 | } 44 | 45 | // Also check to see if the media ID is reserved due to a past action 46 | exists, err = reservedDb.IdExists(origin, mediaId) 47 | if err != nil { 48 | return "", err 49 | } 50 | if exists { 51 | continue 52 | } 53 | 54 | return mediaId, nil 55 | } 56 | return "", errors.New("internal limit reached: fell out of media ID generation loop") 57 | } 58 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/limit.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | "github.com/t2bot/matrix-media-repo/util/readers" 8 | ) 9 | 10 | func LimitStream(ctx rcontext.RequestContext, r io.ReadCloser) io.ReadCloser { 11 | if ctx.Config.Uploads.MaxSizeBytes > 0 { 12 | return readers.LimitReaderWithOverrunError(r, ctx.Config.Uploads.MaxSizeBytes) 13 | } else { 14 | return r 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/lock.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "time" 7 | 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/redislib" 10 | ) 11 | 12 | const maxLockAttemptTime = 30 * time.Second 13 | 14 | func LockForUpload(ctx rcontext.RequestContext, hash string) (func() error, error) { 15 | mutex := redislib.GetMutex(hash, 5*time.Minute) 16 | if mutex != nil { 17 | attemptDoneAt := time.Now().Add(maxLockAttemptTime) 18 | acquired := false 19 | for !acquired { 20 | if chErr := ctx.Context.Err(); chErr != nil { 21 | return nil, chErr 22 | } 23 | if err := mutex.LockContext(ctx.Context); err != nil { 24 | if time.Now().After(attemptDoneAt) { 25 | return nil, errors.New("failed to acquire upload lock: " + err.Error()) 26 | } else { 27 | ctx.Log.Warn("failed to acquire upload lock: ", err) 28 | } 29 | } else { 30 | acquired = true 31 | } 32 | } 33 | if !acquired { 34 | return nil, errors.New("failed to acquire upload lock: timeout") 35 | } 36 | ctx.Log.Debugf("Lock acquired until %s", mutex.Until().UTC()) 37 | return func() error { 38 | ctx.Log.Debug("Unlocking upload lock") 39 | // We use a background context here to prevent a cancelled context from keeping the lock open 40 | if ok, err := mutex.UnlockContext(context.Background()); !ok || err != nil { 41 | ctx.Log.Warn("Did not get quorum on unlock: ", err) 42 | return err 43 | } 44 | return nil 45 | }, nil 46 | } else { 47 | ctx.Log.Warn("Continuing upload without lock! Set up Redis to make this warning go away.") 48 | return func() error { return nil }, nil 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/quarantine.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/common" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/database" 7 | ) 8 | 9 | func CheckQuarantineStatus(ctx rcontext.RequestContext, hash string) error { 10 | q, err := database.GetInstance().Media.Prepare(ctx).IsHashQuarantined(hash) 11 | if err != nil { 12 | return err 13 | } 14 | if q { 15 | return common.ErrMediaQuarantined 16 | } 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/redis_async.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/redislib" 9 | ) 10 | 11 | func PopulateCacheAsync(ctx rcontext.RequestContext, reader io.Reader, size int64, sha256hash string) chan struct{} { 12 | var err error 13 | opChan := make(chan struct{}) 14 | go func() { 15 | //goland:noinspection GoUnhandledErrorResult 16 | defer io.Copy(io.Discard, reader) // we need to flush the reader as we might end up blocking the upload 17 | defer close(opChan) 18 | 19 | err = redislib.StoreMedia(ctx, sha256hash, reader, size) 20 | if err != nil { 21 | ctx.Log.Debug("Not populating cache due to error: ", err) 22 | sentry.CaptureException(err) 23 | return 24 | } 25 | }() 26 | return opChan 27 | } 28 | -------------------------------------------------------------------------------- /pipelines/_steps/upload/spam.go: -------------------------------------------------------------------------------- 1 | package upload 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | "github.com/t2bot/matrix-media-repo/plugins" 8 | ) 9 | 10 | type FileMetadata struct { 11 | Name string 12 | ContentType string 13 | UserId string 14 | Origin string 15 | MediaId string 16 | } 17 | 18 | type SpamResponse struct { 19 | Err error 20 | IsSpam bool 21 | } 22 | 23 | func CheckSpamAsync(ctx rcontext.RequestContext, reader io.Reader, metadata FileMetadata) chan SpamResponse { 24 | opChan := make(chan SpamResponse) 25 | go func() { 26 | //goland:noinspection GoUnhandledErrorResult 27 | defer io.Copy(io.Discard, reader) // we need to flush the reader as we might end up blocking the upload 28 | 29 | spam, err := plugins.CheckForSpam(reader, metadata.Name, metadata.ContentType, metadata.UserId, metadata.Origin, metadata.MediaId) 30 | go func() { 31 | // run async to avoid deadlock 32 | opChan <- SpamResponse{ 33 | Err: err, 34 | IsSpam: spam, 35 | } 36 | }() 37 | }() 38 | return opChan 39 | } 40 | -------------------------------------------------------------------------------- /pipelines/_steps/url_preview/preview.go: -------------------------------------------------------------------------------- 1 | package url_preview 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | "github.com/t2bot/matrix-media-repo/pool" 8 | "github.com/t2bot/matrix-media-repo/url_previewing/m" 9 | "github.com/t2bot/matrix-media-repo/url_previewing/p" 10 | ) 11 | 12 | type generateResult struct { 13 | preview m.PreviewResult 14 | err error 15 | } 16 | 17 | func Preview(ctx rcontext.RequestContext, targetUrl *m.UrlPayload, languageHeader string) (m.PreviewResult, error) { 18 | ch := make(chan generateResult) 19 | defer close(ch) 20 | fn := func() { 21 | var preview m.PreviewResult 22 | err := m.ErrPreviewUnsupported 23 | 24 | // Try oEmbed first 25 | if ctx.Config.UrlPreviews.OEmbed { 26 | ctx.Log.Debug("Trying oEmbed previewer") 27 | preview, err = p.GenerateOEmbedPreview(targetUrl, languageHeader, ctx) 28 | } 29 | 30 | // Try OpenGraph if that failed 31 | if errors.Is(err, m.ErrPreviewUnsupported) { 32 | ctx.Log.Debug("Trying OpenGraph previewer") 33 | preview, err = p.GenerateOpenGraphPreview(targetUrl, languageHeader, ctx) 34 | } 35 | 36 | // Try scraping if that failed 37 | if errors.Is(err, m.ErrPreviewUnsupported) { 38 | ctx.Log.Debug("Trying built-in previewer") 39 | preview, err = p.GenerateCalculatedPreview(targetUrl, languageHeader, ctx) 40 | } 41 | 42 | ch <- generateResult{ 43 | preview: preview, 44 | err: err, 45 | } 46 | } 47 | 48 | if err := pool.UrlPreviewQueue.Schedule(fn); err != nil { 49 | return m.PreviewResult{}, err 50 | } 51 | res := <-ch 52 | return res.preview, res.err 53 | } 54 | -------------------------------------------------------------------------------- /pipelines/_steps/url_preview/process.go: -------------------------------------------------------------------------------- 1 | package url_preview 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/common" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/database" 10 | "github.com/t2bot/matrix-media-repo/url_previewing/m" 11 | ) 12 | 13 | func Process(ctx rcontext.RequestContext, previewUrl string, preview m.PreviewResult, err error, onHost string, userId string, languageHeader string, ts int64) (*database.DbUrlPreview, error) { 14 | previewDb := database.GetInstance().UrlPreviews.Prepare(ctx) 15 | 16 | if err != nil { 17 | if errors.Is(err, m.ErrPreviewUnsupported) { 18 | err = common.ErrMediaNotFound 19 | } 20 | 21 | if errors.Is(err, common.ErrMediaNotFound) { 22 | previewDb.InsertError(previewUrl, common.ErrCodeNotFound) 23 | } else { 24 | previewDb.InsertError(previewUrl, common.ErrCodeUnknown) 25 | } 26 | return nil, err 27 | } else { 28 | result := &database.DbUrlPreview{ 29 | Url: previewUrl, 30 | ErrorCode: "", 31 | BucketTs: ts, // already bucketed 32 | SiteUrl: preview.Url, 33 | SiteName: preview.SiteName, 34 | ResourceType: preview.Type, 35 | Description: preview.Description, 36 | Title: preview.Title, 37 | LanguageHeader: languageHeader, 38 | } 39 | 40 | // Step 7: Store the thumbnail, if needed 41 | UploadImage(ctx, preview.Image, onHost, userId, result) 42 | 43 | // Step 8: Insert the record 44 | err = previewDb.Insert(result) 45 | if err != nil { 46 | ctx.Log.Warn("Non-fatal error caching URL preview: ", err) 47 | sentry.CaptureException(err) 48 | } 49 | 50 | return result, nil 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /pipelines/_steps/url_preview/upload_image.go: -------------------------------------------------------------------------------- 1 | package url_preview 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/database" 9 | "github.com/t2bot/matrix-media-repo/datastores" 10 | "github.com/t2bot/matrix-media-repo/pipelines/pipeline_upload" 11 | "github.com/t2bot/matrix-media-repo/thumbnailing" 12 | "github.com/t2bot/matrix-media-repo/url_previewing/m" 13 | "github.com/t2bot/matrix-media-repo/util" 14 | ) 15 | 16 | func UploadImage(ctx rcontext.RequestContext, image *m.PreviewImage, onHost string, userId string, forRecord *database.DbUrlPreview) { 17 | if image == nil || image.Data == nil { 18 | return 19 | } 20 | 21 | defer image.Data.Close() 22 | pr, pw := io.Pipe() 23 | tee := io.TeeReader(image.Data, pw) 24 | mediaChan := make(chan *database.DbMedia) 25 | defer close(mediaChan) 26 | go func() { 27 | media, err := pipeline_upload.Execute(ctx, onHost, "", io.NopCloser(tee), image.ContentType, image.Filename, userId, datastores.LocalMediaKind) 28 | if err != nil { 29 | _ = pw.CloseWithError(err) 30 | } else { 31 | _ = pw.Close() 32 | } 33 | go func() { 34 | defer func() { 35 | recover() // consume write-to-closed-channel errors 36 | }() 37 | mediaChan <- media 38 | }() 39 | }() 40 | 41 | w := 0 42 | h := 0 43 | g, r, err := thumbnailing.GetGenerator(pr, image.ContentType, false) 44 | _, _ = io.Copy(io.Discard, pr) 45 | if err != nil { 46 | ctx.Log.Warn("Non-fatal error handling URL preview thumbnail: ", err) 47 | sentry.CaptureException(err) 48 | return 49 | } 50 | if g != nil { 51 | _, w, h, err = g.GetOriginDimensions(r, image.ContentType, ctx) 52 | if err != nil { 53 | ctx.Log.Warn("Non-fatal error getting URL preview thumbnail dimensions: ", err) 54 | sentry.CaptureException(err) 55 | } 56 | } 57 | 58 | record := <-mediaChan 59 | if record == nil { 60 | return 61 | } 62 | 63 | forRecord.ImageMxc = util.MxcUri(record.Origin, record.MediaId) 64 | forRecord.ImageType = record.ContentType 65 | forRecord.ImageSize = record.SizeBytes 66 | forRecord.ImageWidth = w 67 | forRecord.ImageHeight = h 68 | } 69 | -------------------------------------------------------------------------------- /pipelines/pipeline_create/pipeline.go: -------------------------------------------------------------------------------- 1 | package pipeline_create 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/common/rcontext" 5 | "github.com/t2bot/matrix-media-repo/database" 6 | "github.com/t2bot/matrix-media-repo/pipelines/_steps/quota" 7 | "github.com/t2bot/matrix-media-repo/pipelines/_steps/upload" 8 | "github.com/t2bot/matrix-media-repo/util" 9 | ) 10 | 11 | const DefaultExpirationTime = 0 12 | 13 | func Execute(ctx rcontext.RequestContext, origin string, userId string, expirationTime int64) (*database.DbExpiringMedia, error) { 14 | // Step 1: Check quota 15 | if err := quota.Check(ctx, userId, quota.MaxPending); err != nil { 16 | return nil, err 17 | } 18 | 19 | // Step 2: Generate media ID 20 | mediaId, err := upload.GenerateMediaId(ctx, origin) 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | // Step 3: Insert record of expiration 26 | if expirationTime == DefaultExpirationTime { 27 | expirationTime = ctx.Config.Uploads.MaxAgeSeconds * 1000 28 | } 29 | expiresTs := util.NowMillis() + expirationTime 30 | if err = database.GetInstance().ExpiringMedia.Prepare(ctx).Insert(origin, mediaId, userId, expiresTs); err != nil { 31 | return nil, err 32 | } 33 | 34 | // Step 4: Return database record 35 | return &database.DbExpiringMedia{ 36 | Origin: origin, 37 | MediaId: mediaId, 38 | UserId: userId, 39 | ExpiresTs: expiresTs, 40 | }, nil 41 | } 42 | -------------------------------------------------------------------------------- /pipelines/pipeline_upload/pipeline2.go: -------------------------------------------------------------------------------- 1 | package pipeline_upload 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/t2bot/matrix-media-repo/common" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | "github.com/t2bot/matrix-media-repo/database" 10 | "github.com/t2bot/matrix-media-repo/datastores" 11 | ) 12 | 13 | func ExecutePut(ctx rcontext.RequestContext, origin string, mediaId string, r io.ReadCloser, contentType string, fileName string, userId string) (*database.DbMedia, error) { 14 | // Step 1: Do we already have a media record for this? 15 | mediaDb := database.GetInstance().Media.Prepare(ctx) 16 | mediaRecord, err := mediaDb.GetById(origin, mediaId) 17 | if err != nil { 18 | return nil, err 19 | } 20 | if mediaRecord != nil { 21 | return nil, common.ErrAlreadyUploaded 22 | } 23 | 24 | // Step 2: Try to find the holding record 25 | expiringDb := database.GetInstance().ExpiringMedia.Prepare(ctx) 26 | record, err := expiringDb.Get(origin, mediaId) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | // Step 3: Is the record expired? 32 | if record == nil || record.IsExpired() { 33 | return nil, common.ErrExpired 34 | } 35 | 36 | // Step 4: Is the correct user uploading this media? 37 | if record.UserId != userId { 38 | return nil, common.ErrWrongUser 39 | } 40 | 41 | // Step 5: Do the upload 42 | newRecord, err := Execute(ctx, origin, mediaId, r, contentType, fileName, userId, datastores.LocalMediaKind) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | // Step 6: Delete the holding record 48 | if err2 := expiringDb.Delete(origin, mediaId); err2 != nil { 49 | ctx.Log.Warn("Non-fatal error while deleting expiring media record: " + err2.Error()) 50 | sentry.CaptureException(err2) 51 | } 52 | 53 | return newRecord, err 54 | } 55 | -------------------------------------------------------------------------------- /plugins/manager.go: -------------------------------------------------------------------------------- 1 | package plugins 2 | 3 | import ( 4 | "encoding/base64" 5 | "io" 6 | 7 | "github.com/hashicorp/go-plugin" 8 | "github.com/sirupsen/logrus" 9 | "github.com/t2bot/matrix-media-repo/common/config" 10 | "github.com/t2bot/matrix-media-repo/plugins/plugin_interfaces" 11 | ) 12 | 13 | var pluginTypes = map[string]plugin.Plugin{ 14 | "antispam": &plugin_interfaces.AntispamPlugin{}, 15 | } 16 | 17 | var existingPlugins = make([]*mmrPlugin, 0) 18 | 19 | func ReloadPlugins() { 20 | for _, pl := range config.Get().Plugins { 21 | logrus.Info("Loading plugin: ", pl.Executable) 22 | mmr, err := newPlugin(pl.Executable, pl.Config) 23 | if err != nil { 24 | logrus.Errorf("failed to load plugin %s: %s", pl.Executable, err.Error()) 25 | continue 26 | } 27 | 28 | existingPlugins = append(existingPlugins, mmr) 29 | } 30 | } 31 | 32 | func StopPlugins() { 33 | if len(existingPlugins) == 0 { 34 | return 35 | } 36 | 37 | logrus.Info("Stopping plugin instances...") 38 | for _, pl := range existingPlugins { 39 | pl.Stop() 40 | } 41 | existingPlugins = make([]*mmrPlugin, 0) 42 | } 43 | 44 | func CheckForSpam(r io.Reader, filename string, contentType string, userId string, origin string, mediaId string) (bool, error) { 45 | b := make([]byte, 0) 46 | for _, pl := range existingPlugins { 47 | as, err := pl.Antispam() 48 | if err != nil { 49 | logrus.Warnf("error loading antispam plugin: %s", err.Error()) 50 | continue 51 | } 52 | 53 | if len(b) == 0 { 54 | b, err = io.ReadAll(r) 55 | if err != nil { 56 | return false, err 57 | } 58 | } 59 | 60 | b64 := base64.StdEncoding.EncodeToString(b) 61 | spam, err := as.CheckForSpam(b64, filename, contentType, userId, origin, mediaId) 62 | if err != nil { 63 | return false, err 64 | } 65 | if spam { 66 | return true, err 67 | } 68 | } 69 | return false, nil 70 | } 71 | -------------------------------------------------------------------------------- /plugins/mmr_plugin.go: -------------------------------------------------------------------------------- 1 | package plugins 2 | 3 | import ( 4 | "os/exec" 5 | 6 | "github.com/hashicorp/go-hclog" 7 | "github.com/hashicorp/go-plugin" 8 | "github.com/sirupsen/logrus" 9 | "github.com/t2bot/matrix-media-repo/plugins/plugin_common" 10 | "github.com/t2bot/matrix-media-repo/plugins/plugin_interfaces" 11 | ) 12 | 13 | type mmrPlugin struct { 14 | hcClient *plugin.Client 15 | rpcClient plugin.ClientProtocol 16 | config map[string]interface{} 17 | 18 | antispamPlugin plugin_interfaces.Antispam 19 | } 20 | 21 | func newPlugin(path string, config map[string]interface{}) (*mmrPlugin, error) { 22 | logger := hclog.New(&hclog.LoggerOptions{ 23 | Name: "plugin", 24 | Output: logrus.WithField("plugin", path).Writer(), 25 | Level: hclog.Debug, 26 | }) 27 | client := plugin.NewClient(&plugin.ClientConfig{ 28 | Cmd: exec.Command(path), 29 | Logger: logger, 30 | HandshakeConfig: plugin_common.HandshakeConfig, 31 | Plugins: pluginTypes, 32 | }) 33 | rpcClient, err := client.Client() 34 | if err != nil { 35 | client.Kill() 36 | return nil, err 37 | } 38 | return &mmrPlugin{ 39 | hcClient: client, 40 | rpcClient: rpcClient, 41 | config: config, 42 | }, nil 43 | } 44 | 45 | func (p *mmrPlugin) Antispam() (plugin_interfaces.Antispam, error) { 46 | if p.antispamPlugin != nil { 47 | return p.antispamPlugin, nil 48 | } 49 | 50 | raw, err := p.rpcClient.Dispense("antispam") 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | p.antispamPlugin = raw.(plugin_interfaces.Antispam) 56 | _ = p.antispamPlugin.HandleConfig(p.config) 57 | return p.antispamPlugin, nil 58 | } 59 | 60 | func (p *mmrPlugin) Stop() { 61 | p.antispamPlugin = nil 62 | p.hcClient.Kill() 63 | } 64 | -------------------------------------------------------------------------------- /plugins/plugin_common/handshake.go: -------------------------------------------------------------------------------- 1 | package plugin_common 2 | 3 | import ( 4 | "github.com/hashicorp/go-plugin" 5 | ) 6 | 7 | // dev note: HandshakeConfig is for UX, not security 8 | 9 | var HandshakeConfig = plugin.HandshakeConfig{ 10 | ProtocolVersion: 1, 11 | MagicCookieKey: "MEDIA_REPO_PLUGIN", 12 | MagicCookieValue: "hello world - I am a media repo", 13 | } 14 | -------------------------------------------------------------------------------- /pool/init.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/sirupsen/logrus" 6 | "github.com/t2bot/matrix-media-repo/common/config" 7 | ) 8 | 9 | var DownloadQueue *Queue 10 | var ThumbnailQueue *Queue 11 | var UrlPreviewQueue *Queue 12 | var TaskQueue *Queue 13 | 14 | func Init() { 15 | var err error 16 | if DownloadQueue, err = NewQueue(config.Get().Downloads.NumWorkers, "downloads"); err != nil { 17 | sentry.CaptureException(err) 18 | logrus.Error("Error setting up downloads queue") 19 | logrus.Fatal(err) 20 | } 21 | if ThumbnailQueue, err = NewQueue(config.Get().Thumbnails.NumWorkers, "thumbnails"); err != nil { 22 | sentry.CaptureException(err) 23 | logrus.Error("Error setting up thumbnails queue") 24 | logrus.Fatal(err) 25 | } 26 | if UrlPreviewQueue, err = NewQueue(config.Get().UrlPreviews.NumWorkers, "url_previews"); err != nil { 27 | sentry.CaptureException(err) 28 | logrus.Error("Error setting up url previews queue") 29 | logrus.Fatal(err) 30 | } 31 | if TaskQueue, err = NewQueue(config.Get().Tasks.NumWorkers, "tasks"); err != nil { 32 | sentry.CaptureException(err) 33 | logrus.Error("Error setting up tasks queue") 34 | logrus.Fatal(err) 35 | } 36 | } 37 | 38 | func AdjustSize() { 39 | DownloadQueue.pool.Tune(config.Get().Downloads.NumWorkers) 40 | ThumbnailQueue.pool.Tune(config.Get().Thumbnails.NumWorkers) 41 | UrlPreviewQueue.pool.Tune(config.Get().UrlPreviews.NumWorkers) 42 | TaskQueue.pool.Tune(config.Get().Tasks.NumWorkers) 43 | } 44 | 45 | func Drain() { 46 | DownloadQueue.pool.Release() 47 | ThumbnailQueue.pool.Release() 48 | UrlPreviewQueue.pool.Release() 49 | TaskQueue.pool.Release() 50 | } 51 | -------------------------------------------------------------------------------- /pool/queue.go: -------------------------------------------------------------------------------- 1 | package pool 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/getsentry/sentry-go" 7 | "github.com/panjf2000/ants/v2" 8 | "github.com/sirupsen/logrus" 9 | "github.com/t2bot/matrix-media-repo/common/logging" 10 | ) 11 | 12 | type Queue struct { 13 | pool *ants.Pool 14 | } 15 | 16 | func NewQueue(workers int, name string) (*Queue, error) { 17 | p, err := ants.NewPool(workers, ants.WithOptions(ants.Options{ 18 | ExpiryDuration: 1 * time.Minute, // worker lifespan when unused 19 | PreAlloc: false, 20 | MaxBlockingTasks: 0, // no limit on tasks we can submit 21 | Nonblocking: false, 22 | PanicHandler: func(err interface{}) { 23 | logrus.Errorf("Panic from internal queue %s", name) 24 | logrus.Error(err) 25 | //goland:noinspection GoTypeAssertionOnErrors 26 | if e, ok := err.(error); ok { 27 | sentry.CaptureException(e) 28 | } 29 | }, 30 | Logger: &logging.SendToDebugLogger{}, 31 | DisablePurge: false, 32 | })) 33 | if err != nil { 34 | return nil, err 35 | } 36 | return &Queue{pool: p}, nil 37 | } 38 | 39 | func (p *Queue) Schedule(task func()) error { 40 | return p.pool.Submit(task) 41 | } 42 | -------------------------------------------------------------------------------- /redislib/connection.go: -------------------------------------------------------------------------------- 1 | package redislib 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/go-redsync/redsync/v4" 8 | rsredis "github.com/go-redsync/redsync/v4/redis" 9 | "github.com/go-redsync/redsync/v4/redis/goredis/v9" 10 | "github.com/redis/go-redis/v9" 11 | "github.com/t2bot/matrix-media-repo/common/config" 12 | ) 13 | 14 | var connectionLock = &sync.Once{} 15 | var ring *redis.Ring 16 | var rs *redsync.Redsync 17 | var pools = make([]rsredis.Pool, 0) 18 | var clients = make([]*redis.Client, 0) 19 | 20 | func makeConnection() { 21 | if ring != nil { 22 | return 23 | } 24 | 25 | connectionLock.Do(func() { 26 | conf := config.Get().Redis 27 | if !conf.Enabled { 28 | return 29 | } 30 | addresses := make(map[string]string) 31 | for _, c := range conf.Shards { 32 | addresses[c.Name] = c.Address 33 | 34 | client := redis.NewClient(&redis.Options{ 35 | DialTimeout: 10 * time.Second, 36 | DB: conf.DbNum, 37 | Addr: c.Address, 38 | }) 39 | clients = append(clients, client) 40 | pools = append(pools, goredis.NewPool(client)) 41 | } 42 | ring = redis.NewRing(&redis.RingOptions{ 43 | Addrs: addresses, 44 | DialTimeout: 10 * time.Second, 45 | DB: conf.DbNum, 46 | }) 47 | rs = redsync.New(pools...) 48 | }) 49 | } 50 | 51 | func Reconnect() { 52 | softStop() 53 | makeConnection() 54 | resubscribeAll() 55 | } 56 | 57 | func Stop() { 58 | softStop() 59 | resubscribeAll() // since we don't have a `ring`, it'll close everything 60 | } 61 | 62 | func softStop() { 63 | if ring != nil { 64 | _ = ring.Close() 65 | } 66 | for _, c := range clients { 67 | _ = c.Close() 68 | } 69 | ring = nil 70 | rs = nil 71 | pools = make([]rsredis.Pool, 0) 72 | clients = make([]*redis.Client, 0) 73 | connectionLock = &sync.Once{} 74 | } 75 | -------------------------------------------------------------------------------- /redislib/locking.go: -------------------------------------------------------------------------------- 1 | package redislib 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/go-redsync/redsync/v4" 7 | ) 8 | 9 | func GetMutex(key string, expiration time.Duration) *redsync.Mutex { 10 | makeConnection() 11 | if rs == nil { 12 | return nil 13 | } 14 | 15 | // Dev note: the prefix is to prevent key conflicts. Specifically, we create an upload mutex using 16 | // the sha256 hash of the file *and* populate the redis cache with that file at the same key - this 17 | // causes the mutex lock to fail unlocking because the value "changed". A prefix avoids that conflict. 18 | return rs.NewMutex("mutex-"+key, redsync.WithExpiry(expiration)) 19 | } 20 | -------------------------------------------------------------------------------- /redislib/pubsub.go: -------------------------------------------------------------------------------- 1 | package redislib 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/redis/go-redis/v9" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | ) 10 | 11 | var subscribeMutex = new(sync.Mutex) 12 | var subscribeChans = make(map[string][]chan string) 13 | 14 | type PubSubValue struct { 15 | Err error 16 | Str string 17 | } 18 | 19 | func Publish(ctx rcontext.RequestContext, channel string, payload string) error { 20 | makeConnection() 21 | if ring == nil { 22 | return nil 23 | } 24 | 25 | if ring.PoolStats().TotalConns == 0 { 26 | ctx.Log.Warn("Not broadcasting upload to Redis - no connections available") 27 | return nil 28 | } 29 | 30 | r := ring.Publish(ctx.Context, channel, payload) 31 | if r.Err() != nil { 32 | if r.Err() == redis.Nil { 33 | ctx.Log.Warn("Not broadcasting upload to Redis - no connections available") 34 | return nil 35 | } 36 | return r.Err() 37 | } 38 | return nil 39 | } 40 | 41 | func Subscribe(channel string) <-chan string { 42 | makeConnection() 43 | if ring == nil { 44 | return nil 45 | } 46 | 47 | ch := make(chan string) 48 | subscribeMutex.Lock() 49 | if _, ok := subscribeChans[channel]; !ok { 50 | subscribeChans[channel] = make([]chan string, 0) 51 | } 52 | subscribeChans[channel] = append(subscribeChans[channel], ch) 53 | subscribeMutex.Unlock() 54 | doSubscribe(channel, ch) 55 | return ch 56 | } 57 | 58 | func doSubscribe(channel string, ch chan<- string) { 59 | sub := ring.Subscribe(context.Background(), channel) 60 | go func(ch chan<- string) { 61 | recvCh := sub.Channel() 62 | for { 63 | val := <-recvCh 64 | if val != nil { 65 | ch <- val.Payload 66 | } else { 67 | break 68 | } 69 | } 70 | }(ch) 71 | } 72 | 73 | func resubscribeAll() { 74 | subscribeMutex.Lock() 75 | defer subscribeMutex.Unlock() 76 | for channel, chs := range subscribeChans { 77 | for _, ch := range chs { 78 | if ring == nil { 79 | close(ch) 80 | } else { 81 | doSubscribe(channel, ch) 82 | } 83 | } 84 | } 85 | if ring == nil { 86 | subscribeChans = make(map[string][]chan string) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /restrictions/auth.go: -------------------------------------------------------------------------------- 1 | package restrictions 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/common/rcontext" 5 | "github.com/t2bot/matrix-media-repo/database" 6 | ) 7 | 8 | func DoesMediaRequireAuth(ctx rcontext.RequestContext, origin string, mediaId string) (bool, error) { 9 | restrictions, err := database.GetInstance().RestrictedMedia.Prepare(ctx).GetAllForId(origin, mediaId) 10 | if err != nil { 11 | return false, err 12 | } 13 | for _, restriction := range restrictions { 14 | if restriction.Condition == database.RestrictedRequiresAuth { 15 | return restriction.ConditionValue == "true", nil 16 | } 17 | } 18 | return false, nil 19 | } 20 | 21 | func SetMediaRequiresAuth(ctx rcontext.RequestContext, origin string, mediaId string) error { 22 | return database.GetInstance().RestrictedMedia.Prepare(ctx).Insert(origin, mediaId, database.RestrictedRequiresAuth, "true") 23 | } 24 | -------------------------------------------------------------------------------- /tasks/all.go: -------------------------------------------------------------------------------- 1 | package tasks 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/tasks/task_runner" 5 | ) 6 | 7 | func StartAll() { 8 | executeEnable() 9 | 10 | scheduleHourly(RecurringTaskPurgeRemoteMedia, task_runner.PurgeRemoteMedia) 11 | scheduleHourly(RecurringTaskPurgeThumbnails, task_runner.PurgeThumbnails) 12 | scheduleHourly(RecurringTaskPurgePreviews, task_runner.PurgePreviews) 13 | scheduleHourly(RecurringTaskPurgeHeldMediaIds, task_runner.PurgeHeldMediaIds) 14 | 15 | scheduleUnfinished() 16 | } 17 | 18 | func StopAll() { 19 | stopRecurring() 20 | } 21 | -------------------------------------------------------------------------------- /tasks/task_runner/00-internal.go: -------------------------------------------------------------------------------- 1 | package task_runner 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/database" 7 | "github.com/t2bot/matrix-media-repo/util" 8 | ) 9 | 10 | func markDone(ctx rcontext.RequestContext, task *database.DbTask) { 11 | taskDb := database.GetInstance().Tasks.Prepare(ctx) 12 | if err := taskDb.SetEndTime(task.TaskId, util.NowMillis()); err != nil { 13 | ctx.Log.Warn("Error updating task as complete: ", err) 14 | sentry.CaptureException(err) 15 | } 16 | ctx.Log.Infof("Task '%s' completed", task.Name) 17 | } 18 | 19 | func markError(ctx rcontext.RequestContext, task *database.DbTask, errVal error) { 20 | taskDb := database.GetInstance().Tasks.Prepare(ctx) 21 | if err := taskDb.SetError(task.TaskId, errVal.Error()); err != nil { 22 | ctx.Log.Warn("Error updating task with error message: ", err) 23 | sentry.CaptureException(err) 24 | } 25 | ctx.Log.Debugf("Task '%s' flagged with error", task.Name) 26 | } 27 | -------------------------------------------------------------------------------- /tasks/task_runner/purge_held_media_ids.go: -------------------------------------------------------------------------------- 1 | package task_runner 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/t2bot/matrix-media-repo/common/rcontext" 6 | "github.com/t2bot/matrix-media-repo/database" 7 | "github.com/t2bot/matrix-media-repo/util" 8 | ) 9 | 10 | func PurgeHeldMediaIds(ctx rcontext.RequestContext) { 11 | // dev note: don't use ctx for config lookup to avoid misreading it 12 | 13 | beforeTs := util.NowMillis() - int64(7*24*60*60*1000) // 7 days 14 | db := database.GetInstance().HeldMedia.Prepare(ctx) 15 | 16 | if err := db.DeleteOlderThan(database.ForCreateHeldReason, beforeTs); err != nil { 17 | ctx.Log.Error("Error deleting held media IDs: ", err) 18 | sentry.CaptureException(err) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /tasks/task_runner/purge_previews.go: -------------------------------------------------------------------------------- 1 | package task_runner 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/t2bot/matrix-media-repo/common/config" 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | "github.com/t2bot/matrix-media-repo/database" 8 | "github.com/t2bot/matrix-media-repo/util" 9 | ) 10 | 11 | func PurgePreviews(ctx rcontext.RequestContext) { 12 | // dev note: don't use ctx for config lookup to avoid misreading it 13 | 14 | if config.Get().UrlPreviews.ExpireDays <= 0 { 15 | return 16 | } 17 | 18 | beforeTs := util.NowMillis() - int64(config.Get().UrlPreviews.ExpireDays*24*60*60*1000) 19 | db := database.GetInstance().UrlPreviews.Prepare(ctx) 20 | 21 | // TODO: Fix https://github.com/t2bot/matrix-media-repo/issues/424 ("can't clean up preview media") 22 | if err := db.DeleteOlderThan(beforeTs); err != nil { 23 | ctx.Log.Error("Error deleting previews: ", err) 24 | sentry.CaptureException(err) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /tasks/task_runner/purge_remote_media.go: -------------------------------------------------------------------------------- 1 | package task_runner 2 | 3 | import ( 4 | "github.com/getsentry/sentry-go" 5 | "github.com/t2bot/matrix-media-repo/common/config" 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | "github.com/t2bot/matrix-media-repo/database" 8 | "github.com/t2bot/matrix-media-repo/util" 9 | ) 10 | 11 | func PurgeRemoteMedia(ctx rcontext.RequestContext) { 12 | // dev note: don't use ctx for config lookup to avoid misreading it 13 | 14 | if config.Get().Downloads.ExpireDays <= 0 { 15 | return 16 | } 17 | 18 | beforeTs := util.NowMillis() - int64(config.Get().Downloads.ExpireDays*24*60*60*1000) 19 | _, err := PurgeRemoteMediaBefore(ctx, beforeTs) 20 | if err != nil { 21 | ctx.Log.Error("Error purging media: ", err) 22 | sentry.CaptureException(err) 23 | } 24 | } 25 | 26 | // PurgeRemoteMediaBefore returns (count affected, error) 27 | func PurgeRemoteMediaBefore(ctx rcontext.RequestContext, beforeTs int64) (int, error) { 28 | mediaDb := database.GetInstance().Media.Prepare(ctx) 29 | 30 | origins := util.GetOurDomains() 31 | 32 | records, err := mediaDb.GetOldExcluding(origins, beforeTs) 33 | if err != nil { 34 | return 0, err 35 | } 36 | 37 | removed, err := doPurge(ctx.AsBackground(), records, &purgeConfig{IncludeQuarantined: false}) 38 | if err != nil { 39 | return 0, err 40 | } 41 | 42 | return len(removed), nil 43 | } 44 | -------------------------------------------------------------------------------- /templating/models.go: -------------------------------------------------------------------------------- 1 | package templating 2 | 3 | type ViewExportPartModel struct { 4 | ExportID string 5 | Index int 6 | SizeBytes int64 7 | SizeBytesHuman string 8 | FileName string 9 | } 10 | 11 | type ViewExportModel struct { 12 | ExportID string 13 | Entity string 14 | ExportParts []*ViewExportPartModel 15 | } 16 | 17 | type ExportIndexMediaModel struct { 18 | ExportID string 19 | ArchivedName string 20 | FileName string 21 | Origin string 22 | MediaID string 23 | SizeBytes int64 24 | SizeBytesHuman string 25 | UploadTs int64 26 | UploadDateHuman string 27 | Sha256Hash string 28 | ContentType string 29 | Uploader string 30 | } 31 | 32 | type ExportIndexModel struct { 33 | ExportID string 34 | Entity string 35 | Media []*ExportIndexMediaModel 36 | } 37 | -------------------------------------------------------------------------------- /templating/templates.go: -------------------------------------------------------------------------------- 1 | package templating 2 | 3 | import ( 4 | "fmt" 5 | "html/template" 6 | "path" 7 | "sync" 8 | 9 | "github.com/t2bot/matrix-media-repo/common/config" 10 | ) 11 | 12 | type templates struct { 13 | cached map[string]*template.Template 14 | } 15 | 16 | var instance *templates 17 | var singletonLock = &sync.Once{} 18 | 19 | func getInstance() *templates { 20 | if instance == nil { 21 | singletonLock.Do(func() { 22 | instance = &templates{ 23 | cached: make(map[string]*template.Template), 24 | } 25 | }) 26 | } 27 | return instance 28 | } 29 | 30 | func GetTemplate(name string) (*template.Template, error) { 31 | i := getInstance() 32 | if v, ok := i.cached[name]; ok { 33 | return v, nil 34 | } 35 | 36 | fname := fmt.Sprintf("%s.html", name) 37 | tmplPath := path.Join(config.Runtime.TemplatesPath, fname) 38 | t, err := template.New(fname).ParseFiles(tmplPath) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | i.cached[name] = t 44 | return t, nil 45 | } 46 | -------------------------------------------------------------------------------- /test/matrix_resolve_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/t2bot/matrix-media-repo/matrix" 8 | ) 9 | 10 | func doResolve(t *testing.T, origin string, expectedAddress string, expectedHost string) { 11 | url, host, err := matrix.GetServerApiUrl(origin) 12 | assert.NoError(t, err, origin) 13 | assert.Equal(t, expectedAddress, url, origin) 14 | assert.Equal(t, expectedHost, host, origin) 15 | } 16 | 17 | func TestResolveMatrix(t *testing.T) { 18 | doResolve(t, "2.s.resolvematrix.dev:7652", "https://2.s.resolvematrix.dev:7652", "2.s.resolvematrix.dev") 19 | doResolve(t, "3b.s.resolvematrix.dev", "https://wk.3b.s.resolvematrix.dev:7753", "wk.3b.s.resolvematrix.dev:7753") 20 | doResolve(t, "3c.s.resolvematrix.dev", "https://srv.wk.3c.s.resolvematrix.dev:7754", "wk.3c.s.resolvematrix.dev") 21 | doResolve(t, "3d.s.resolvematrix.dev", "https://wk.3d.s.resolvematrix.dev:8448", "wk.3d.s.resolvematrix.dev") 22 | doResolve(t, "4.s.resolvematrix.dev", "https://srv.4.s.resolvematrix.dev:7855", "4.s.resolvematrix.dev") 23 | doResolve(t, "5.s.resolvematrix.dev", "https://5.s.resolvematrix.dev:8448", "5.s.resolvematrix.dev") 24 | doResolve(t, "3c.msc4040.s.resolvematrix.dev", "https://srv.wk.3c.msc4040.s.resolvematrix.dev:7053", "wk.3c.msc4040.s.resolvematrix.dev") 25 | doResolve(t, "4.msc4040.s.resolvematrix.dev", "https://srv.4.msc4040.s.resolvematrix.dev:7054", "4.msc4040.s.resolvematrix.dev") 26 | } 27 | -------------------------------------------------------------------------------- /test/signing_anyserver_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/t2bot/matrix-media-repo/homeserver_interop/any_server" 9 | "github.com/t2bot/matrix-media-repo/util" 10 | ) 11 | 12 | func TestAnyServerDecodeDendrite(t *testing.T) { 13 | raw := `-----BEGIN MATRIX PRIVATE KEY----- 14 | Key-ID: ed25519:1Pu3u3 15 | 16 | 1Pu3u3solToI2pTdsHA4wj05bANnzPwJoxPepw2he2s= 17 | -----END MATRIX PRIVATE KEY----- 18 | ` 19 | 20 | key, err := any_server.DecodeSigningKey(bytes.NewReader([]byte(raw))) 21 | assert.NoError(t, err) 22 | assert.Equal(t, "1Pu3u3", key.KeyVersion) 23 | assert.Equal(t, "1Pu3u3solToI2pTdsHA4wj05bANnzPwJoxPepw2he2u4Fq1IRsE7q7tI3C83BUUIPhcZpLSKQ8jU8yA/meWHdw", util.EncodeUnpaddedBase64ToString(key.PrivateKey)) 24 | } 25 | 26 | func TestAnyServerDecodeSynapse(t *testing.T) { 27 | raw := `ed25519 a_RVfN wdSWsTNSOmMuNA1Ej6JUyeNbiBEt5jexHmVs7mHKZVc` 28 | 29 | key, err := any_server.DecodeSigningKey(bytes.NewReader([]byte(raw))) 30 | assert.NoError(t, err) 31 | assert.Equal(t, "a_RVfN", key.KeyVersion) 32 | assert.Equal(t, "wdSWsTNSOmMuNA1Ej6JUyeNbiBEt5jexHmVs7mHKZVc3XC3Hf2tee4KxuO3diGtvSOQ8j/MjmSmEhX1qLV6dbQ", util.EncodeUnpaddedBase64ToString(key.PrivateKey)) 33 | } 34 | 35 | func TestAnyServerDecodeMMR(t *testing.T) { 36 | raw := `-----BEGIN MMR PRIVATE KEY----- 37 | Key-ID: ed25519:e5d0oC 38 | Version: 1 39 | 40 | PJt0OaIImDJk8P/PDb4TNQHgI/1AA1C+AaQaABxAcgc= 41 | -----END MMR PRIVATE KEY----- 42 | ` 43 | 44 | key, err := any_server.DecodeSigningKey(bytes.NewReader([]byte(raw))) 45 | assert.NoError(t, err) 46 | assert.Equal(t, "e5d0oC", key.KeyVersion) 47 | assert.Equal(t, "PJt0OaIImDJk8P/PDb4TNQHgI/1AA1C+AaQaABxAcgdOiF6RhfMvHtXNXwW0tCUjdexJ0+/UKOFVhjmtmYUK9Q", util.EncodeUnpaddedBase64ToString(key.PrivateKey)) 48 | } 49 | -------------------------------------------------------------------------------- /test/templates/minio-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/ash 2 | set -x 3 | wget https://dl.min.io/client/mc/release/linux-amd64/mc 4 | chmod +x mc 5 | mv mc /usr/local/bin/mc 6 | mc alias set local {{.ConsoleAddress}} admin test1234 7 | mc admin user svcacct add local admin --access-key mykey --secret-key mysecret 8 | mc mb local/mybucket 9 | echo 'This line marks WaitFor as done' 10 | -------------------------------------------------------------------------------- /test/templates/mmr.config.yaml: -------------------------------------------------------------------------------- 1 | repo: 2 | bindAddress: '0.0.0.0' 3 | port: 8000 4 | logDirectory: "-" 5 | logColors: false 6 | jsonLogs: false 7 | logLevel: "debug" 8 | trustAnyForwardedAddress: false 9 | useForwardedHost: true 10 | database: 11 | postgres: "{{.PgConnectionString}}" 12 | pool: 13 | maxConnections: 25 14 | maxIdleConnections: 5 15 | homeservers: 16 | {{range .Homeservers}} 17 | - name: "{{.ServerName}}" 18 | csApi: "{{.ClientServerApiUrl}}" 19 | backoffAt: 10 20 | adminApiKind: "synapse" 21 | signingKeyPath: "{{.SigningKeyPath}}" 22 | {{end}} 23 | redis: 24 | enabled: true 25 | databaseNumber: 0 26 | shards: 27 | - name: "server1" 28 | addr: "{{.RedisAddr}}" 29 | accessTokens: 30 | maxCacheTimeSeconds: 43200 31 | datastores: 32 | - type: s3 33 | id: "s3_internal" 34 | forKinds: [ "thumbnails", "remote_media", "local_media", "archives" ] 35 | opts: 36 | tempPath: "/tmp/mediarepo_s3_upload" 37 | endpoint: "{{.S3Endpoint}}" 38 | bucketName: "mybucket" 39 | accessKeyId: "mykey" 40 | accessSecret: "mysecret" 41 | ssl: false 42 | rateLimit: 43 | enabled: false # we've got tests which intentionally spam 44 | urlPreviews: 45 | enabled: true 46 | maxPageSizeBytes: 10485760 47 | previewUnsafeCertificates: false 48 | numWords: 50 49 | maxLength: 200 50 | numTitleWords: 30 51 | maxTitleLength: 150 52 | filePreviewTypes: 53 | - "image/*" 54 | numWorkers: 10 55 | disallowedNetworks: [] 56 | allowedNetworks: ["0.0.0.0/0"] 57 | expireAfterDays: 0 58 | defaultLanguage: "en-US,en" 59 | userAgent: "matrix-media-repo" 60 | oEmbed: true 61 | -------------------------------------------------------------------------------- /test/templates/synapse.homeserver.yaml: -------------------------------------------------------------------------------- 1 | server_name: "{{.ServerName}}" 2 | pid_file: /app/homeserver.pid 3 | listeners: 4 | - port: 8008 5 | tls: false 6 | type: http 7 | x_forwarded: true 8 | resources: 9 | - names: [client, federation] 10 | compress: false 11 | database: 12 | name: psycopg2 13 | args: 14 | cp_min: 5 15 | cp_max: 10 16 | sslmode: "disable" 17 | host: "{{.PgHost}}" 18 | port: {{.PgPort}} 19 | user: postgres 20 | password: test1234 21 | database: synapse 22 | log_config: "/data/log.config" 23 | media_store_path: /app/media_store 24 | registration_shared_secret: "l,jbms,sR_Z82JNP2,sv-~^5bXqFTV-T=j,,~=OKZ8I_Tardk;" 25 | report_stats: false 26 | macaroon_secret_key: "KV*8qANyBE28e*pZ-9RP+u86~i8+.j9IZEKU8Vb4+jdIoe~ncw" 27 | form_secret: "mQrUxtt6^F3uQ3nVrGdg7yAK64p*#Uf@2n=e9y8ggLbhy3-QIy" 28 | signing_key_path: "/data/signing.key" 29 | enable_media_repo: false 30 | enable_registration: true 31 | enable_registration_without_verification: true 32 | allow_guest_access: true 33 | trusted_key_servers: [] 34 | -------------------------------------------------------------------------------- /test/templates/synapse.log.config: -------------------------------------------------------------------------------- 1 | version: 1 2 | formatters: 3 | precise: 4 | format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' 5 | handlers: 6 | console: 7 | class: logging.StreamHandler 8 | formatter: precise 9 | loggers: 10 | synapse.storage.SQL: 11 | level: INFO 12 | root: 13 | level: INFO 14 | handlers: [console] 15 | disable_existing_loggers: false -------------------------------------------------------------------------------- /test/test_internals/deps_docker_context.go: -------------------------------------------------------------------------------- 1 | package test_internals 2 | 3 | import ( 4 | "archive/tar" 5 | "io" 6 | "io/fs" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/sabhiram/go-gitignore" 12 | ) 13 | 14 | func createDockerContext() (*os.File, error) { 15 | ignoreFile, err := ignore.CompileIgnoreFile(".dockerignore") 16 | if err != nil { 17 | return nil, err 18 | } 19 | 20 | tmpF, err := os.CreateTemp(os.TempDir(), "mmr-docker-context") 21 | if err != nil { 22 | return nil, err 23 | } 24 | err = tmpF.Chmod(0644) 25 | if err != nil { 26 | return nil, err 27 | } 28 | tarContext := tar.NewWriter(tmpF) 29 | 30 | err = filepath.WalkDir(".", func(path string, d fs.DirEntry, err error) error { 31 | if err != nil { 32 | return err 33 | } 34 | if d.IsDir() { 35 | return nil 36 | } 37 | info, err := d.Info() 38 | if err != nil { 39 | return err 40 | } 41 | 42 | if match := ignoreFile.MatchesPath(path); match { 43 | return nil 44 | } 45 | //fmt.Println("[Image Build] Including file: ", path) 46 | err = tarContext.WriteHeader(&tar.Header{ 47 | Name: strings.ReplaceAll(path, "\\", "/"), 48 | Mode: int64(info.Mode()), 49 | ModTime: info.ModTime(), 50 | Size: info.Size(), 51 | }) 52 | if err != nil { 53 | return err 54 | } 55 | f2, err := os.Open(path) 56 | if err != nil { 57 | return err 58 | } 59 | defer f2.Close() 60 | _, err = io.Copy(tarContext, f2) 61 | if err != nil { 62 | return err 63 | } 64 | return nil 65 | }) 66 | if err != nil { 67 | return nil, err 68 | } 69 | err = tarContext.Close() 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | err = tmpF.Close() 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | return os.Open(tmpF.Name()) 80 | } 81 | -------------------------------------------------------------------------------- /test/test_internals/deps_network.go: -------------------------------------------------------------------------------- 1 | package test_internals 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/t2bot/matrix-media-repo/util/ids" 8 | "github.com/testcontainers/testcontainers-go" 9 | ) 10 | 11 | type NetworkDep struct { 12 | ctx context.Context 13 | dockerNet testcontainers.Network 14 | 15 | NetId string 16 | } 17 | 18 | type netCustomizer struct { 19 | testcontainers.ContainerCustomizer 20 | network *NetworkDep 21 | } 22 | 23 | func (c *netCustomizer) Customize(req *testcontainers.GenericContainerRequest) error { 24 | if req.Networks == nil { 25 | req.Networks = make([]string, 0) 26 | } 27 | req.Networks = append(req.Networks, c.network.NetId) 28 | return nil 29 | } 30 | 31 | func MakeNetwork() (*NetworkDep, error) { 32 | ctx := context.Background() 33 | 34 | netId, err := ids.NewUniqueId() 35 | if err != nil { 36 | return nil, err 37 | } 38 | dockerNet, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{ 39 | NetworkRequest: testcontainers.NetworkRequest{ 40 | Name: netId, 41 | }, 42 | ProviderType: testcontainers.ProviderDocker, 43 | }) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | return &NetworkDep{ 49 | ctx: ctx, 50 | dockerNet: dockerNet, 51 | NetId: netId, 52 | }, nil 53 | } 54 | 55 | func (n *NetworkDep) ApplyToContainer() testcontainers.ContainerCustomizer { 56 | return &netCustomizer{network: n} 57 | } 58 | 59 | func (n *NetworkDep) Teardown() { 60 | if err := n.dockerNet.Remove(n.ctx); err != nil { 61 | log.Fatalf("Error cleaning up docker network '%s': %s", n.NetId, err.Error()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /test/test_internals/testcontainers_ext.go: -------------------------------------------------------------------------------- 1 | package test_internals 2 | 3 | import "github.com/testcontainers/testcontainers-go" 4 | 5 | type EnvCustomizer struct { 6 | testcontainers.ContainerCustomizer 7 | varName string 8 | varValue string 9 | } 10 | 11 | func WithEnvironment(name string, value string) *EnvCustomizer { 12 | return &EnvCustomizer{ 13 | varName: name, 14 | varValue: value, 15 | } 16 | } 17 | 18 | func (c *EnvCustomizer) Customize(req *testcontainers.GenericContainerRequest) error { 19 | if req.Env == nil { 20 | req.Env = make(map[string]string) 21 | } 22 | req.Env[c.varName] = c.varValue 23 | return nil 24 | } 25 | -------------------------------------------------------------------------------- /test/test_internals/util.go: -------------------------------------------------------------------------------- 1 | package test_internals 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "image" 7 | "image/color" 8 | "io" 9 | "testing" 10 | 11 | "github.com/disintegration/imaging" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | var evenColor = color.RGBA{R: 255, G: 0, B: 0, A: 255} 16 | var oddColor = color.RGBA{R: 0, G: 255, B: 0, A: 255} 17 | var altColor = color.RGBA{R: 0, G: 0, B: 255, A: 255} 18 | 19 | func colorFor(x int, y int) color.Color { 20 | c := oddColor 21 | if (y%2.0) == 0 && (x%2.0) == 0 { 22 | c = altColor 23 | } else if (y%2.0) == 0 || (x%2.0) == 0 { 24 | c = evenColor 25 | } 26 | return c 27 | } 28 | 29 | func MakeTestImage(width int, height int) (string, io.Reader, error) { 30 | img := image.NewNRGBA(image.Rect(0, 0, width, height)) 31 | for x := 0; x < width; x++ { 32 | for y := 0; y < height; y++ { 33 | c := colorFor(x, y) 34 | img.Set(x, y, c) 35 | } 36 | } 37 | 38 | b := bytes.NewBuffer(make([]byte, 0)) 39 | err := imaging.Encode(b, img, imaging.PNG) 40 | if err != nil { 41 | return "", nil, err 42 | } 43 | 44 | return "image/png", b, nil 45 | } 46 | 47 | func AssertIsTestImage(t *testing.T, i io.Reader) { 48 | img, _, err := image.Decode(i) 49 | assert.NoError(t, err, "Error decoding image") 50 | width := img.Bounds().Max.X 51 | height := img.Bounds().Max.Y 52 | for x := 0; x < width; x++ { 53 | for y := 0; y < height; y++ { 54 | c := colorFor(x, y) 55 | if !assert.Equal(t, c, img.At(x, y), fmt.Sprintf("Wrong colour for pixel %d,%d", x, y)) { 56 | return // don't print thousands of errors 57 | } 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /test/test_internals/util_client_api_types.go: -------------------------------------------------------------------------------- 1 | package test_internals 2 | 3 | type MatrixErrorResponse struct { 4 | InjectedStatusCode int 5 | Code string `json:"errcode"` 6 | Message string `json:"error"` // optional 7 | } 8 | 9 | type MatrixUploadResponse struct { 10 | MxcUri string `json:"content_uri"` 11 | } 12 | 13 | type MatrixCreatedMediaResponse struct { 14 | *MatrixUploadResponse 15 | ExpiresTs int64 `json:"unused_expires_at"` 16 | } 17 | -------------------------------------------------------------------------------- /test/test_internals/util_keyserver.go: -------------------------------------------------------------------------------- 1 | package test_internals 2 | 3 | import ( 4 | "bytes" 5 | "crypto/ed25519" 6 | "encoding/json" 7 | "log" 8 | 9 | "github.com/t2bot/matrix-media-repo/database" 10 | "github.com/t2bot/matrix-media-repo/homeserver_interop" 11 | "github.com/t2bot/matrix-media-repo/homeserver_interop/mmr" 12 | "github.com/t2bot/matrix-media-repo/util" 13 | ) 14 | 15 | func MakeKeyServer(deps *ContainerDeps) (*HostedFile, *homeserver_interop.SigningKey) { 16 | // We'll use a pre-computed signing key for simplicity 17 | signingKey, err := mmr.DecodeSigningKey(bytes.NewReader([]byte(`-----BEGIN MMR PRIVATE KEY----- 18 | Key-ID: ed25519:e5d0oC 19 | Version: 1 20 | 21 | PJt0OaIImDJk8P/PDb4TNQHgI/1AA1C+AaQaABxAcgc= 22 | -----END MMR PRIVATE KEY----- 23 | `))) 24 | if err != nil { 25 | log.Fatal(err) 26 | } 27 | keyServerKey := signingKey 28 | // Create a /_matrix/key/v2/server response file (signed JSON) 29 | keyServer, writeFn, err := LazyServeFile("_matrix/key/v2/server", deps) 30 | if err != nil { 31 | log.Fatal(err) 32 | } 33 | serverKey := database.AnonymousJson{ 34 | "old_verify_keys": database.AnonymousJson{}, 35 | "server_name": keyServer.PublicHostname, 36 | "valid_until_ts": util.NowMillis() + (60 * 60 * 1000), // +1hr 37 | "verify_keys": database.AnonymousJson{ 38 | "ed25519:e5d0oC": database.AnonymousJson{ 39 | "key": "TohekYXzLx7VzV8FtLQlI3XsSdPv1CjhVYY5rZmFCvU", 40 | }, 41 | }, 42 | } 43 | canonical, err := util.EncodeCanonicalJson(serverKey) 44 | signature := util.EncodeUnpaddedBase64ToString(ed25519.Sign(signingKey.PrivateKey, canonical)) 45 | serverKey["signatures"] = database.AnonymousJson{ 46 | keyServer.PublicHostname: database.AnonymousJson{ 47 | "ed25519:e5d0oC": signature, 48 | }, 49 | } 50 | b, err := json.Marshal(serverKey) 51 | if err != nil { 52 | log.Fatal(err) 53 | } 54 | err = writeFn(string(b)) 55 | if err != nil { 56 | log.Fatal(err) 57 | } 58 | 59 | return keyServer, keyServerKey 60 | } 61 | -------------------------------------------------------------------------------- /test/xmatrix_header_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "crypto/ed25519" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/t2bot/matrix-media-repo/common/config" 9 | "github.com/t2bot/matrix-media-repo/matrix" 10 | "github.com/t2bot/matrix-media-repo/util" 11 | ) 12 | 13 | func TestXMatrixAuthHeader(t *testing.T) { 14 | body := []byte(nil) 15 | 16 | config.AddDomainForTesting("localhost", nil) 17 | 18 | pub, priv, err := ed25519.GenerateKey(nil) 19 | if err != nil { 20 | t.Fatal(err) 21 | } 22 | 23 | header, err := matrix.CreateXMatrixHeader("localhost:8008", "localhost", "GET", "/_matrix/media/v3/download/example.org/abc", body, priv, "0") 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | 28 | auths, err := util.GetXMatrixAuth([]string{header}) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | 33 | keys := make(matrix.ServerSigningKeys) 34 | keys["ed25519:0"] = pub 35 | err = matrix.ValidateXMatrixAuthHeader("GET", "/_matrix/media/v3/download/example.org/abc", body, auths, keys, "localhost") 36 | assert.NoError(t, err) 37 | } 38 | 39 | func TestXMatrixAuthDestinationMismatch(t *testing.T) { 40 | body := []byte(nil) 41 | 42 | config.AddDomainForTesting("localhost", nil) 43 | 44 | pub, priv, err := ed25519.GenerateKey(nil) 45 | if err != nil { 46 | t.Fatal(err) 47 | } 48 | 49 | header, err := matrix.CreateXMatrixHeader("localhost:8008", "localhost:1234", "GET", "/_matrix/media/v3/download/example.org/abc", body, priv, "0") 50 | if err != nil { 51 | t.Fatal(err) 52 | } 53 | 54 | auths, err := util.GetXMatrixAuth([]string{header}) 55 | if err != nil { 56 | t.Fatal(err) 57 | } 58 | 59 | keys := make(matrix.ServerSigningKeys) 60 | keys["ed25519:0"] = pub 61 | err = matrix.ValidateXMatrixAuthHeader("GET", "/_matrix/media/v3/download/example.org/abc", body, auths, keys, "localhost:1234") 62 | assert.ErrorIs(t, err, matrix.ErrWrongDestination) 63 | } 64 | -------------------------------------------------------------------------------- /thumbnailing/i/01-factories.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/rcontext" 7 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 8 | "github.com/t2bot/matrix-media-repo/util/readers" 9 | ) 10 | 11 | type Generator interface { 12 | supportedContentTypes() []string 13 | supportsAnimation() bool 14 | matches(img io.Reader, contentType string) bool 15 | GenerateThumbnail(img io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) 16 | GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) 17 | } 18 | 19 | type AudioGenerator interface { 20 | Generator 21 | GetAudioData(b io.Reader, nKeys int, ctx rcontext.RequestContext) (*m.AudioInfo, error) 22 | } 23 | 24 | var generators = make([]Generator, 0) 25 | 26 | func GetGenerator(img io.Reader, contentType string, needsAnimation bool) (Generator, io.Reader) { 27 | br := readers.NewBufferReadsReader(img) 28 | for _, g := range generators { 29 | if needsAnimation && !g.supportsAnimation() { 30 | continue 31 | } 32 | if g.matches(br, contentType) { 33 | return g, br.GetRewoundReader() 34 | } 35 | } 36 | if needsAnimation { 37 | // try again, this time without animation 38 | return GetGenerator(br.GetRewoundReader(), contentType, false) 39 | } 40 | return nil, br.GetRewoundReader() 41 | } 42 | 43 | func GetSupportedContentTypes() []string { 44 | a := make([]string, 0) 45 | for _, d := range generators { 46 | a = append(a, d.supportedContentTypes()...) 47 | } 48 | return a 49 | } 50 | -------------------------------------------------------------------------------- /thumbnailing/i/bmp.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 9 | "github.com/t2bot/matrix-media-repo/util" 10 | "golang.org/x/image/bmp" 11 | ) 12 | 13 | type bmpGenerator struct { 14 | } 15 | 16 | func (d bmpGenerator) supportedContentTypes() []string { 17 | return []string{"image/bmp", "image/x-bmp"} 18 | } 19 | 20 | func (d bmpGenerator) supportsAnimation() bool { 21 | return false 22 | } 23 | 24 | func (d bmpGenerator) matches(img io.Reader, contentType string) bool { 25 | return util.ArrayContains(d.supportedContentTypes(), contentType) 26 | } 27 | 28 | func (d bmpGenerator) GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) { 29 | i, err := bmp.DecodeConfig(b) 30 | if err != nil { 31 | return false, 0, 0, err 32 | } 33 | return true, i.Width, i.Height, nil 34 | } 35 | 36 | func (d bmpGenerator) GenerateThumbnail(b io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 37 | src, err := bmp.Decode(b) 38 | if err != nil { 39 | return nil, errors.New("bmp: error decoding thumbnail: " + err.Error()) 40 | } 41 | 42 | return pngGenerator{}.GenerateThumbnailOf(src, width, height, method, ctx) 43 | } 44 | 45 | func init() { 46 | generators = append(generators, bmpGenerator{}) 47 | } 48 | -------------------------------------------------------------------------------- /thumbnailing/i/heif.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "errors" 5 | "image" 6 | "io" 7 | 8 | _ "github.com/strukturag/libheif/go/heif" 9 | "github.com/t2bot/matrix-media-repo/common/rcontext" 10 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 11 | "github.com/t2bot/matrix-media-repo/util" 12 | ) 13 | 14 | type heifGenerator struct { 15 | } 16 | 17 | func (d heifGenerator) supportedContentTypes() []string { 18 | return []string{"image/heif", "image/heic"} 19 | } 20 | 21 | func (d heifGenerator) supportsAnimation() bool { 22 | return true 23 | } 24 | 25 | func (d heifGenerator) matches(img io.Reader, contentType string) bool { 26 | return util.ArrayContains(d.supportedContentTypes(), contentType) 27 | } 28 | 29 | func (d heifGenerator) GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) { 30 | cfg, _, err := image.DecodeConfig(b) 31 | if err != nil { 32 | return false, 0, 0, err 33 | } 34 | return true, cfg.Width, cfg.Height, nil 35 | } 36 | 37 | func (d heifGenerator) GenerateThumbnail(b io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 38 | src, _, err := image.Decode(b) 39 | if err != nil { 40 | return nil, errors.New("heif: error decoding thumbnail: " + err.Error()) 41 | } 42 | 43 | return pngGenerator{}.GenerateThumbnailOf(src, width, height, method, ctx) 44 | } 45 | 46 | func init() { 47 | generators = append(generators, heifGenerator{}) 48 | } 49 | -------------------------------------------------------------------------------- /thumbnailing/i/png.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "errors" 5 | "image" 6 | _ "image/png" 7 | "io" 8 | 9 | "github.com/disintegration/imaging" 10 | "github.com/t2bot/matrix-media-repo/common/rcontext" 11 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 12 | "github.com/t2bot/matrix-media-repo/thumbnailing/u" 13 | ) 14 | 15 | type pngGenerator struct { 16 | } 17 | 18 | func (d pngGenerator) supportedContentTypes() []string { 19 | return []string{"image/png"} 20 | } 21 | 22 | func (d pngGenerator) supportsAnimation() bool { 23 | return false 24 | } 25 | 26 | func (d pngGenerator) matches(img io.Reader, contentType string) bool { 27 | return contentType == "image/png" 28 | } 29 | 30 | func (d pngGenerator) GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) { 31 | i, _, err := image.DecodeConfig(b) 32 | if err != nil { 33 | return false, 0, 0, err 34 | } 35 | return true, i.Width, i.Height, nil 36 | } 37 | 38 | func (d pngGenerator) GenerateThumbnail(b io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 39 | src, err := imaging.Decode(b) 40 | if err != nil { 41 | return nil, errors.New("png: error decoding thumbnail: " + err.Error()) 42 | } 43 | 44 | return d.GenerateThumbnailOf(src, width, height, method, ctx) 45 | } 46 | 47 | func (d pngGenerator) GenerateThumbnailOf(src image.Image, width int, height int, method string, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 48 | thumb, err := u.MakeThumbnail(src, method, width, height) 49 | if err != nil || thumb == nil { 50 | return nil, err 51 | } 52 | 53 | pr, pw := io.Pipe() 54 | go func(pw *io.PipeWriter, p image.Image) { 55 | err = u.Encode(ctx, pw, p) 56 | if err != nil { 57 | _ = pw.CloseWithError(errors.New("png: error encoding thumbnail: " + err.Error())) 58 | } else { 59 | _ = pw.Close() 60 | } 61 | }(pw, thumb) 62 | 63 | return &m.Thumbnail{ 64 | Animated: false, 65 | ContentType: "image/png", 66 | Reader: pr, 67 | }, nil 68 | } 69 | 70 | func init() { 71 | generators = append(generators, pngGenerator{}) 72 | } 73 | -------------------------------------------------------------------------------- /thumbnailing/i/svg.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "os" 7 | "os/exec" 8 | "path" 9 | 10 | "github.com/t2bot/matrix-media-repo/common/rcontext" 11 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 12 | ) 13 | 14 | type svgGenerator struct { 15 | } 16 | 17 | func (d svgGenerator) supportedContentTypes() []string { 18 | return []string{"image/svg+xml"} 19 | } 20 | 21 | func (d svgGenerator) supportsAnimation() bool { 22 | return false 23 | } 24 | 25 | func (d svgGenerator) matches(img io.Reader, contentType string) bool { 26 | return contentType == "image/svg+xml" 27 | } 28 | 29 | func (d svgGenerator) GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) { 30 | return false, 0, 0, nil 31 | } 32 | 33 | func (d svgGenerator) GenerateThumbnail(b io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 34 | dir, err := os.MkdirTemp(os.TempDir(), "mmr-svg") 35 | if err != nil { 36 | return nil, errors.New("svg: error creating temporary directory: " + err.Error()) 37 | } 38 | 39 | tempFile1 := path.Join(dir, "i.svg") 40 | tempFile2 := path.Join(dir, "o.png") 41 | 42 | defer os.Remove(tempFile1) 43 | defer os.Remove(tempFile2) 44 | defer os.Remove(dir) 45 | 46 | f, err := os.OpenFile(tempFile1, os.O_RDWR|os.O_CREATE, 0640) 47 | if err != nil { 48 | return nil, errors.New("svg: error creating temp svg file: " + err.Error()) 49 | } 50 | if _, err = io.Copy(f, b); err != nil { 51 | return nil, errors.New("svg: error writing temp svg file: " + err.Error()) 52 | } 53 | 54 | err = exec.Command("magick", "SVG:"+tempFile1, "-resize", "'4096x4096>'", tempFile2).Run() 55 | if err != nil { 56 | return nil, errors.New("svg: error converting svg file: " + err.Error()) 57 | } 58 | 59 | f, err = os.OpenFile(tempFile2, os.O_RDONLY, 0640) 60 | if err != nil { 61 | return nil, errors.New("svg: error reading temp png file: " + err.Error()) 62 | } 63 | defer f.Close() 64 | 65 | return pngGenerator{}.GenerateThumbnail(f, "image/png", width, height, method, false, ctx) 66 | } 67 | 68 | func init() { 69 | generators = append(generators, svgGenerator{}) 70 | } 71 | -------------------------------------------------------------------------------- /thumbnailing/i/tiff.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 9 | "golang.org/x/image/tiff" 10 | ) 11 | 12 | type tiffGenerator struct { 13 | } 14 | 15 | func (d tiffGenerator) supportedContentTypes() []string { 16 | return []string{"image/tiff"} 17 | } 18 | 19 | func (d tiffGenerator) supportsAnimation() bool { 20 | return false 21 | } 22 | 23 | func (d tiffGenerator) matches(img io.Reader, contentType string) bool { 24 | return contentType == "image/tiff" 25 | } 26 | 27 | func (d tiffGenerator) GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) { 28 | i, err := tiff.DecodeConfig(b) 29 | if err != nil { 30 | return false, 0, 0, err 31 | } 32 | return true, i.Width, i.Height, nil 33 | } 34 | 35 | func (d tiffGenerator) GenerateThumbnail(b io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 36 | src, err := tiff.Decode(b) 37 | if err != nil { 38 | return nil, errors.New("tiff: error decoding thumbnail: " + err.Error()) 39 | } 40 | 41 | return pngGenerator{}.GenerateThumbnailOf(src, width, height, method, ctx) 42 | } 43 | 44 | func init() { 45 | generators = append(generators, tiffGenerator{}) 46 | } 47 | -------------------------------------------------------------------------------- /thumbnailing/i/webp.go: -------------------------------------------------------------------------------- 1 | package i 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | "github.com/t2bot/matrix-media-repo/common/rcontext" 8 | "github.com/t2bot/matrix-media-repo/thumbnailing/m" 9 | "golang.org/x/image/webp" 10 | ) 11 | 12 | type webpGenerator struct { 13 | } 14 | 15 | func (d webpGenerator) supportedContentTypes() []string { 16 | return []string{"image/webp"} 17 | } 18 | 19 | func (d webpGenerator) supportsAnimation() bool { 20 | return true 21 | } 22 | 23 | func (d webpGenerator) matches(img io.Reader, contentType string) bool { 24 | return contentType == "image/webp" 25 | } 26 | 27 | func (d webpGenerator) GetOriginDimensions(b io.Reader, contentType string, ctx rcontext.RequestContext) (bool, int, int, error) { 28 | i, err := webp.DecodeConfig(b) 29 | if err != nil { 30 | return false, 0, 0, err 31 | } 32 | return true, i.Width, i.Height, nil 33 | } 34 | 35 | func (d webpGenerator) GenerateThumbnail(b io.Reader, contentType string, width int, height int, method string, animated bool, ctx rcontext.RequestContext) (*m.Thumbnail, error) { 36 | src, err := webp.Decode(b) 37 | if err != nil { 38 | return nil, errors.New("webp: error decoding thumbnail: " + err.Error()) 39 | } 40 | 41 | return pngGenerator{}.GenerateThumbnailOf(src, width, height, method, ctx) 42 | } 43 | 44 | func init() { 45 | generators = append(generators, webpGenerator{}) 46 | } 47 | -------------------------------------------------------------------------------- /thumbnailing/m/audio_info.go: -------------------------------------------------------------------------------- 1 | package m 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type AudioInfo struct { 8 | KeySamples [][2]float64 9 | Duration time.Duration 10 | TotalSamples int 11 | Channels int 12 | } 13 | -------------------------------------------------------------------------------- /thumbnailing/m/thumbnail.go: -------------------------------------------------------------------------------- 1 | package m 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type Thumbnail struct { 8 | Animated bool 9 | ContentType string 10 | Reader io.ReadCloser 11 | } 12 | -------------------------------------------------------------------------------- /thumbnailing/u/dimensions.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | func AdjustProperties(srcWidth int, srcHeight int, desiredWidth int, desiredHeight int, wantAnimated bool, method string) (bool, int, int, string) { 4 | aspectRatio := float32(srcHeight) / float32(srcWidth) 5 | targetAspectRatio := float32(desiredHeight) / float32(desiredWidth) 6 | if aspectRatio == targetAspectRatio { 7 | // Super unlikely, but adjust to scale anyways 8 | method = "scale" 9 | } 10 | 11 | if srcWidth <= desiredWidth && srcHeight <= desiredHeight { 12 | if wantAnimated { 13 | return true, srcWidth, srcHeight, method 14 | } else { 15 | return false, desiredWidth, desiredHeight, method 16 | } 17 | } 18 | return true, desiredWidth, desiredHeight, method 19 | } 20 | -------------------------------------------------------------------------------- /thumbnailing/u/encode.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | import ( 4 | "image" 5 | "io" 6 | 7 | "github.com/disintegration/imaging" 8 | "github.com/t2bot/matrix-media-repo/common/rcontext" 9 | ) 10 | 11 | type EncodeSource int 12 | 13 | const ( 14 | GenericSource EncodeSource = 0 15 | JpegSource EncodeSource = 1 16 | ) 17 | 18 | func Encode(ctx rcontext.RequestContext, w io.Writer, img image.Image, sourceFlags ...EncodeSource) error { 19 | // This function is broken out for later trials around encoding formats (webp, jpg, etc) 20 | 21 | if len(sourceFlags) > 0 { 22 | for _, f := range sourceFlags { 23 | if f == JpegSource { 24 | // Encode JPEG source with JPEG thumbnails to avoid returning larger thumbnails 25 | // than what we started with 26 | return imaging.Encode(w, img, imaging.JPEG) 27 | } 28 | } 29 | } 30 | 31 | return imaging.Encode(w, img, imaging.PNG) 32 | } 33 | -------------------------------------------------------------------------------- /thumbnailing/u/exif.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/dsoprea/go-exif/v3" 9 | ) 10 | 11 | type ExifOrientation struct { 12 | RotateDegrees int // should be 0, 90, 180, or 270 13 | FlipVertical bool 14 | FlipHorizontal bool 15 | } 16 | 17 | func GetExifOrientation(img io.Reader) (*ExifOrientation, error) { 18 | rawExif, err := exif.SearchAndExtractExifWithReader(img) 19 | if err != nil { 20 | if errors.Is(err, exif.ErrNoExif) { 21 | return nil, nil 22 | } 23 | return nil, errors.New("exif: error reading possible exif data: " + err.Error()) 24 | } 25 | 26 | tags, _, err := exif.GetFlatExifData(rawExif, nil) 27 | if err != nil { 28 | return nil, errors.New("exif: error parsing exif data: " + err.Error()) 29 | } 30 | 31 | var tag exif.ExifTag 32 | for _, t := range tags { 33 | if t.TagName == "Orientation" { 34 | tag = t 35 | break 36 | } 37 | } 38 | if tag.TagName != "Orientation" { 39 | return nil, nil // not found 40 | } 41 | 42 | var orientation uint16 = 0 43 | vals, ok := tag.Value.([]uint16) 44 | if !ok || len(vals) <= 0 { 45 | orientation, ok = tag.Value.(uint16) 46 | if !ok { 47 | return nil, errors.New("exif: error parsing orientation: parse error (not an int)") 48 | } 49 | } else { 50 | orientation = vals[0] 51 | } 52 | 53 | // Some devices produce invalid exif data when they intend to mean "no orientation" 54 | if orientation == 0 { 55 | return nil, nil 56 | } 57 | 58 | if orientation < 1 || orientation > 8 { 59 | return nil, fmt.Errorf("orientation out of range: %d", orientation) 60 | } 61 | 62 | flipHorizontal := orientation < 5 && (orientation%2) == 0 63 | flipVertical := orientation > 4 && (orientation%2) != 0 64 | degrees := 0 65 | 66 | // TODO: There's probably a better way to represent this 67 | if orientation == 1 || orientation == 2 { 68 | degrees = 0 69 | } else if orientation == 3 || orientation == 4 { 70 | degrees = 180 71 | } else if orientation == 5 || orientation == 6 { 72 | degrees = 270 73 | } else if orientation == 7 || orientation == 8 { 74 | degrees = 90 75 | } 76 | 77 | return &ExifOrientation{degrees, flipVertical, flipHorizontal}, nil 78 | } 79 | -------------------------------------------------------------------------------- /thumbnailing/u/framing.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | import ( 4 | "errors" 5 | "image" 6 | "io" 7 | 8 | "github.com/disintegration/imaging" 9 | "github.com/getsentry/sentry-go" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func MakeThumbnail(src image.Image, method string, width int, height int) (image.Image, error) { 14 | var result image.Image 15 | if method == "scale" { 16 | result = imaging.Fit(src, width, height, imaging.Linear) 17 | } else if method == "crop" { 18 | result = imaging.Fill(src, width, height, imaging.Center, imaging.Linear) 19 | } else { 20 | return nil, errors.New("unrecognized method: " + method) 21 | } 22 | return result, nil 23 | } 24 | 25 | func ExtractExifOrientation(r io.Reader) *ExifOrientation { 26 | orientation, err := GetExifOrientation(r) 27 | if err != nil { 28 | // assume no orientation if there was an error reading the exif header 29 | logrus.Warn("Non-fatal error reading exif headers:", err.Error()) 30 | sentry.CaptureException(err) 31 | orientation = nil 32 | } 33 | return orientation 34 | } 35 | 36 | func ApplyOrientation(src image.Image, orientation *ExifOrientation) image.Image { 37 | result := src 38 | if orientation != nil { 39 | // Rotate first 40 | if orientation.RotateDegrees == 90 { 41 | result = imaging.Rotate90(result) 42 | } else if orientation.RotateDegrees == 180 { 43 | result = imaging.Rotate180(result) 44 | } else if orientation.RotateDegrees == 270 { 45 | result = imaging.Rotate270(result) 46 | } // else we don't care to rotate 47 | 48 | // Flip second 49 | if orientation.FlipHorizontal { 50 | result = imaging.FlipH(result) 51 | } 52 | if orientation.FlipVertical { 53 | result = imaging.FlipV(result) 54 | } 55 | } 56 | 57 | return result 58 | } 59 | -------------------------------------------------------------------------------- /thumbnailing/u/metadata.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "os" 7 | 8 | "github.com/dhowden/tag" 9 | "github.com/getsentry/sentry-go" 10 | "github.com/sirupsen/logrus" 11 | "github.com/t2bot/matrix-media-repo/util/readers" 12 | ) 13 | 14 | func GetID3Tags(b io.Reader) (tag.Metadata, io.ReadSeekCloser, error) { 15 | var f *os.File 16 | var err error 17 | 18 | tryCleanup := func() { 19 | if f != nil { 20 | if err = os.Remove(f.Name()); err != nil && !os.IsNotExist(err) { 21 | logrus.Warnf("Error deleting temp file '%s': %s", f.Name(), err.Error()) 22 | sentry.CaptureException(errors.New("id3: error deleting temp file: " + err.Error())) 23 | } 24 | } 25 | } 26 | 27 | f, err = os.CreateTemp(os.TempDir(), "mmr-id3") 28 | if err != nil { 29 | tryCleanup() 30 | return nil, nil, err 31 | } 32 | if _, err = io.Copy(f, b); err != nil { 33 | tryCleanup() 34 | return nil, nil, err 35 | } 36 | if err = f.Close(); err != nil { 37 | tryCleanup() 38 | return nil, nil, err 39 | } 40 | if f, err = os.OpenFile(f.Name(), os.O_WRONLY, 0644); err != nil { 41 | tryCleanup() 42 | return nil, nil, err 43 | } 44 | 45 | meta, _ := tag.ReadFrom(f) // we don't care about errors in this process 46 | if _, err = f.Seek(0, io.SeekStart); err != nil { 47 | tryCleanup() 48 | return nil, nil, err 49 | } 50 | 51 | return meta, readers.NewTempFileCloser("", f.Name(), f), nil 52 | } 53 | -------------------------------------------------------------------------------- /thumbnailing/u/sample.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | import ( 4 | "errors" 5 | "math" 6 | 7 | "github.com/faiface/beep" 8 | ) 9 | 10 | func FastSampleAudio(stream beep.StreamSeekCloser, numSamples int) ([][2]float64, error) { 11 | everyNth := int(math.Round(float64(stream.Len()) / float64(numSamples))) 12 | samples := make([][2]float64, numSamples) 13 | totalRead := 0 14 | for i := range samples { 15 | pos := i * everyNth 16 | if stream.Position() != pos { 17 | err := stream.Seek(pos) 18 | if err != nil { 19 | return nil, errors.New("fast-sample: could not seek: " + err.Error()) 20 | } 21 | } 22 | 23 | sample := make([][2]float64, 1) 24 | n, _ := stream.Stream(sample) 25 | if stream.Err() != nil { 26 | return nil, errors.New("fast-sample: could not stream: " + stream.Err().Error()) 27 | } 28 | if n > 0 { 29 | samples[i] = sample[0] 30 | totalRead++ 31 | } else { 32 | break 33 | } 34 | } 35 | if totalRead != len(samples) { 36 | return samples[:totalRead], nil 37 | } 38 | return samples, nil 39 | } 40 | -------------------------------------------------------------------------------- /url_previewing/m/errors.go: -------------------------------------------------------------------------------- 1 | package m 2 | 3 | import "errors" 4 | 5 | var ErrPreviewUnsupported = errors.New("preview not supported by this previewer") 6 | -------------------------------------------------------------------------------- /url_previewing/m/preview_result.go: -------------------------------------------------------------------------------- 1 | package m 2 | 3 | import "io" 4 | 5 | type PreviewResult struct { 6 | Url string 7 | SiteName string 8 | Type string 9 | Description string 10 | Title string 11 | Image *PreviewImage 12 | } 13 | 14 | type PreviewImage struct { 15 | ContentType string 16 | Data io.ReadCloser 17 | Filename string 18 | } 19 | -------------------------------------------------------------------------------- /url_previewing/m/url_payload.go: -------------------------------------------------------------------------------- 1 | package m 2 | 3 | import ( 4 | "net/url" 5 | ) 6 | 7 | type UrlPayload struct { 8 | UrlString string 9 | ParsedUrl *url.URL 10 | } 11 | -------------------------------------------------------------------------------- /url_previewing/u/summarize.go: -------------------------------------------------------------------------------- 1 | package u 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | var surroundingWhitespace = regexp.MustCompile(`^[\s\p{Zs}]+|[\s\p{Zs}]+$`) 9 | var interiorWhitespace = regexp.MustCompile(`[\s\p{Zs}]{2,}`) 10 | var newlines = regexp.MustCompile(`[\r\n]`) 11 | 12 | func Summarize(text string, maxWords int, maxLength int) string { 13 | // Normalize the whitespace to be something useful (crush it to one giant line) 14 | text = surroundingWhitespace.ReplaceAllString(text, "") 15 | text = interiorWhitespace.ReplaceAllString(text, " ") 16 | text = newlines.ReplaceAllString(text, " ") 17 | 18 | words := strings.Split(text, " ") 19 | result := text 20 | if len(words) >= maxWords { 21 | result = strings.Join(words[:maxWords], " ") 22 | } 23 | 24 | if len(result) > maxLength { 25 | // First try trimming off the last word 26 | words = strings.Split(result, " ") 27 | newResult := "" 28 | for _, word := range words { 29 | if len(newResult+" "+word) > maxLength { 30 | break 31 | } 32 | newResult = newResult + " " + word 33 | } 34 | result = newResult 35 | } 36 | 37 | if len(result) > maxLength { 38 | // It's still too long, just trim the thing and add an ellipsis 39 | result = result[:maxLength] + "..." 40 | } 41 | 42 | return result 43 | } 44 | -------------------------------------------------------------------------------- /util/arrays.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | func ArrayContains(a []string, v string) bool { 4 | for _, e := range a { 5 | if e == v { 6 | return true 7 | } 8 | } 9 | 10 | return false 11 | } 12 | -------------------------------------------------------------------------------- /util/canonical_json.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | ) 7 | 8 | func EncodeCanonicalJson(obj any) ([]byte, error) { 9 | b, err := json.Marshal(obj) 10 | if err != nil { 11 | return nil, err 12 | } 13 | 14 | // De-encode values 15 | b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) 16 | b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) 17 | b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) 18 | 19 | return b, nil 20 | } 21 | -------------------------------------------------------------------------------- /util/config.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/t2bot/matrix-media-repo/common/config" 7 | ) 8 | 9 | func IsServerOurs(server string) bool { 10 | hs := config.GetDomain(server) 11 | return hs != nil 12 | } 13 | 14 | func IsGlobalAdmin(userId string) bool { 15 | for _, admin := range config.Get().Admins { 16 | if admin == userId { 17 | return true 18 | } 19 | } 20 | 21 | return false 22 | } 23 | 24 | func IsHostIgnored(serverName string) bool { 25 | serverName = strings.ToLower(serverName) 26 | for _, host := range config.Get().Federation.IgnoredHosts { 27 | if strings.ToLower(host) == serverName { 28 | return true 29 | } 30 | } 31 | return false 32 | } 33 | 34 | func GetOurDomains() []string { 35 | vals := make([]string, 0) 36 | for _, d := range config.AllDomains() { 37 | vals = append(vals, d.Name) 38 | } 39 | return vals 40 | } 41 | -------------------------------------------------------------------------------- /util/encoding.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "io" 5 | "strings" 6 | "unicode/utf8" 7 | 8 | "github.com/saintfish/chardet" 9 | "golang.org/x/net/html/charset" 10 | ) 11 | 12 | func ToUtf8(text string, possibleContentType string) string { 13 | if utf8.ValidString(text) { 14 | return text 15 | } 16 | 17 | textCharset := "" 18 | 19 | if possibleContentType != "" { 20 | _, name, ok := charset.DetermineEncoding([]byte(text), possibleContentType) 21 | if ok { 22 | textCharset = name 23 | } 24 | } 25 | 26 | if textCharset == "" { 27 | detector := chardet.NewTextDetector() 28 | cs, err := detector.DetectBest([]byte(text)) 29 | if err != nil { 30 | return text // best we can do 31 | } 32 | textCharset = cs.Charset 33 | } 34 | 35 | r, err := charset.NewReader(strings.NewReader(text), textCharset) 36 | if err != nil { 37 | return text // best we can do 38 | } 39 | 40 | converted, err := io.ReadAll(r) 41 | if err != nil { 42 | return text // best we can do 43 | } 44 | 45 | return string(converted) 46 | } 47 | -------------------------------------------------------------------------------- /util/identifiers.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "errors" 5 | "strings" 6 | ) 7 | 8 | func SplitMxc(mxc string) (string, string, error) { 9 | if strings.Index(mxc, "mxc://") != 0 { 10 | return "", "", errors.New("not a valid mxc uri: missing protocol") 11 | } 12 | 13 | mxc = mxc[6:] // remove protocol 14 | mxc = strings.Split(mxc, "?")[0] // take off any query string 15 | 16 | parts := strings.Split(mxc, "/") 17 | if len(parts) != 2 { 18 | return "", "", errors.New("not a valid mxc uri: not in the format of mxc://origin/media_id") 19 | } 20 | 21 | return parts[0], parts[1], nil // origin, media id 22 | } 23 | 24 | func SplitUserId(userId string) (string, string, error) { 25 | if strings.Index(userId, "@") != 0 { 26 | return "", "", errors.New("not a valid user id: missing symbol") 27 | } 28 | 29 | userId = userId[1:] // remove symbol 30 | parts := strings.Split(userId, ":") 31 | 32 | if len(parts) < 2 { 33 | return "", "", errors.New("not a valid user id: not enough parts") 34 | } 35 | 36 | localpart := parts[0] 37 | domain := strings.Join(parts[1:], ":") 38 | 39 | return localpart, domain, nil 40 | } 41 | -------------------------------------------------------------------------------- /util/ids/snowflake.go: -------------------------------------------------------------------------------- 1 | package ids 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "strconv" 7 | 8 | "github.com/bwmarrin/snowflake" 9 | ) 10 | 11 | func GetMachineId() int64 { 12 | if val, ok := os.LookupEnv("MACHINE_ID"); ok { 13 | if i, err := strconv.ParseInt(val, 10, 64); err == nil { 14 | return i 15 | } 16 | } 17 | return 0 18 | } 19 | 20 | var sfnode *snowflake.Node 21 | 22 | func makeSnowflake() (*snowflake.Node, error) { 23 | if sfnode != nil { 24 | return sfnode, nil 25 | } 26 | machineId := GetMachineId() 27 | node, err := snowflake.NewNode(machineId) 28 | if err != nil { 29 | return nil, err 30 | } 31 | sfnode = node 32 | return sfnode, nil 33 | } 34 | 35 | func SetMachineId(id int64) error { 36 | if err := os.Setenv("MACHINE_ID", strconv.FormatInt(id, 10)); err != nil { 37 | return err 38 | } 39 | sfnode = nil 40 | if GetMachineId() != id { 41 | return errors.New("unexpected error setting machine ID") 42 | } 43 | if _, err := makeSnowflake(); err != nil { 44 | return err 45 | } 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /util/ids/unique.go: -------------------------------------------------------------------------------- 1 | package ids 2 | 3 | import ( 4 | "github.com/t2bot/matrix-media-repo/util" 5 | ) 6 | 7 | func NewUniqueId() (string, error) { 8 | r, err := util.GenerateRandomString(32) // pad out the snowflake 9 | if err != nil { 10 | return "", err 11 | } 12 | sf, err := makeSnowflake() 13 | if err != nil { 14 | return "", err 15 | } 16 | return r + sf.Generate().String(), nil 17 | } 18 | -------------------------------------------------------------------------------- /util/math.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | func MaxInt(a int, b int) int { 4 | if a > b { 5 | return a 6 | } 7 | return b 8 | } 9 | 10 | func MinInt(a int, b int) int { 11 | if a < b { 12 | return a 13 | } 14 | return b 15 | } 16 | 17 | func MinFloat32(a float32, b float32) float32 { 18 | if a < b { 19 | return a 20 | } 21 | return b 22 | } 23 | -------------------------------------------------------------------------------- /util/matrix_media_part.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "io" 5 | "mime/multipart" 6 | "net/http" 7 | "net/textproto" 8 | ) 9 | 10 | type MatrixMediaPart struct { 11 | Header textproto.MIMEHeader 12 | Body io.ReadCloser 13 | } 14 | 15 | func MatrixMediaPartFromResponse(r *http.Response) *MatrixMediaPart { 16 | return &MatrixMediaPart{ 17 | Header: textproto.MIMEHeader(r.Header), 18 | Body: r.Body, 19 | } 20 | } 21 | 22 | func MatrixMediaPartFromMimeMultipart(p *multipart.Part) *MatrixMediaPart { 23 | return &MatrixMediaPart{ 24 | Header: p.Header, 25 | Body: p, 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /util/mime.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "mime" 5 | "strings" 6 | ) 7 | 8 | func FixContentType(ct string) string { 9 | return strings.Split(ct, ";")[0] 10 | } 11 | 12 | func ExtensionForContentType(ct string) string { 13 | // custom overrides 14 | if ct == "image/png" { 15 | return ".png" 16 | } else if ct == "image/jpeg" { 17 | return ".jpg" 18 | } 19 | 20 | // Otherwise look it up 21 | exts, _ := mime.ExtensionsByType(ct) 22 | if len(exts) > 0 { 23 | return exts[0] 24 | } 25 | return ".bin" 26 | } 27 | 28 | func CanInline(ct string) bool { 29 | ct = FixContentType(ct) 30 | return ArrayContains(InlineContentTypes, ct) 31 | } 32 | 33 | var InlineContentTypes = []string{ 34 | // Types are inherited from https://github.com/matrix-org/synapse/pull/15988 35 | 36 | "text/css", 37 | "text/plain", 38 | "text/csv", 39 | "application/json", 40 | "application/ld+json", 41 | "image/jpeg", 42 | "image/gif", 43 | "image/png", 44 | "image/apng", 45 | "image/webp", 46 | "image/avif", 47 | "video/mp4", 48 | "video/webm", 49 | "video/ogg", 50 | "video/quicktime", 51 | "audio/mp4", 52 | "audio/webm", 53 | "audio/aac", 54 | "audio/mpeg", 55 | "audio/ogg", 56 | "audio/wave", 57 | "audio/wav", 58 | "audio/x-wav", 59 | "audio/x-pn-wav", 60 | "audio/flac", 61 | "audio/x-flac", 62 | } 63 | -------------------------------------------------------------------------------- /util/mime_detect.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "strings" 7 | 8 | "github.com/h2non/filetype" 9 | ) 10 | 11 | func DetectMimeType(r io.ReadSeeker) (string, error) { 12 | buf := make([]byte, 512) 13 | 14 | current, err := r.Seek(0, io.SeekCurrent) 15 | if err != nil { 16 | return "", err 17 | } 18 | restore := func() error { 19 | if _, err2 := r.Seek(current, io.SeekStart); err2 != nil { 20 | return err2 21 | } 22 | return nil 23 | } 24 | 25 | if _, err := r.Seek(0, io.SeekStart); err != nil { 26 | return "", err 27 | } 28 | if _, err := r.Read(buf); err != nil { 29 | return "", err 30 | } 31 | 32 | kind, err := filetype.Match(buf) 33 | if err != nil || kind == filetype.Unknown { 34 | // Try against http library upon error 35 | contentType := http.DetectContentType(buf) 36 | contentType = strings.Split(contentType, ";")[0] 37 | 38 | // http should return an octet-stream anyway, but just in case: 39 | if contentType == "" { 40 | contentType = "application/octet-stream" 41 | } 42 | 43 | return contentType, restore() 44 | } 45 | 46 | return kind.MIME.Value, restore() 47 | } 48 | -------------------------------------------------------------------------------- /util/mxc.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | func MxcUri(origin string, mediaId string) string { 4 | return "mxc://" + origin + "/" + mediaId 5 | } 6 | -------------------------------------------------------------------------------- /util/random.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/sha1" 6 | "encoding/hex" 7 | ) 8 | 9 | func GenerateRandomString(nBytes int) (string, error) { 10 | b := make([]byte, nBytes) 11 | _, err := rand.Read(b) 12 | if err != nil { 13 | return "", err 14 | } 15 | 16 | hasher := sha1.New() 17 | hasher.Write(b) 18 | return hex.EncodeToString(hasher.Sum(nil)), nil 19 | } 20 | -------------------------------------------------------------------------------- /util/readers/buffer_reads_reader.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | ) 8 | 9 | type BufferReadsReader struct { 10 | io.Reader 11 | r io.Reader 12 | original io.Reader 13 | b *bytes.Buffer 14 | pr io.Reader 15 | } 16 | 17 | func NewBufferReadsReader(r io.Reader) *BufferReadsReader { 18 | buf := bytes.NewBuffer(make([]byte, 0)) 19 | tee := io.TeeReader(r, buf) 20 | return &BufferReadsReader{ 21 | r: tee, 22 | b: buf, 23 | original: r, 24 | pr: nil, 25 | } 26 | } 27 | 28 | func (r *BufferReadsReader) Read(p []byte) (int, error) { 29 | if r.pr != nil { 30 | return 0, errors.New("cannot read from this stream anymore - use the created prefixed reader") 31 | } 32 | return r.r.Read(p) 33 | } 34 | 35 | func (r *BufferReadsReader) MakeRewoundReader() (io.Reader, error) { 36 | if r.pr != nil { 37 | return r.pr, errors.New("prefixed reader already created from this reader") 38 | } 39 | r.pr = io.MultiReader(r.b, r.original) 40 | return r.pr, nil 41 | } 42 | 43 | func (r *BufferReadsReader) GetRewoundReader() io.Reader { 44 | pr, _ := r.MakeRewoundReader() 45 | return pr 46 | } 47 | -------------------------------------------------------------------------------- /util/readers/cancel_closer.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import "io" 4 | 5 | type CancellableCloser interface { 6 | io.ReadCloser 7 | } 8 | 9 | type CancelCloser struct { 10 | io.ReadCloser 11 | cancel func() 12 | } 13 | 14 | type CancelSeekCloser struct { 15 | io.ReadSeekCloser 16 | cancel func() 17 | } 18 | 19 | func NewCancelCloser(r io.ReadCloser, cancel func()) CancellableCloser { 20 | if rsc, ok := r.(io.ReadSeekCloser); ok { 21 | return &CancelSeekCloser{ 22 | ReadSeekCloser: rsc, 23 | cancel: cancel, 24 | } 25 | } else { 26 | return &CancelCloser{ 27 | ReadCloser: r, 28 | cancel: cancel, 29 | } 30 | } 31 | } 32 | 33 | func (c *CancelCloser) Close() error { 34 | c.cancel() 35 | return c.ReadCloser.Close() 36 | } 37 | 38 | func (c *CancelSeekCloser) Close() error { 39 | c.cancel() 40 | return c.ReadSeekCloser.Close() 41 | } 42 | -------------------------------------------------------------------------------- /util/readers/error_limit_reader.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/t2bot/matrix-media-repo/common" 7 | ) 8 | 9 | func LimitReaderWithOverrunError(r io.ReadCloser, n int64) io.ReadCloser { 10 | return &limitedReader{r: r, n: n} 11 | } 12 | 13 | type limitedReader struct { 14 | r io.ReadCloser 15 | n int64 16 | } 17 | 18 | func (r *limitedReader) Read(p []byte) (int, error) { 19 | if r.n <= 0 { 20 | // See if we can read one more byte, indicating the stream is too big 21 | b := make([]byte, 1) 22 | n, err := r.r.Read(b) 23 | p[0] = b[0] 24 | if err != nil { 25 | // ignore - we're at the end anyways 26 | return n, io.EOF 27 | } 28 | if n > 0 { 29 | return n, common.ErrMediaTooLarge 30 | } 31 | 32 | return n, io.EOF 33 | } 34 | 35 | n, err := r.r.Read(p) 36 | r.n -= int64(n) 37 | return n, err 38 | } 39 | 40 | func (r *limitedReader) Close() error { 41 | return r.r.Close() 42 | } 43 | -------------------------------------------------------------------------------- /util/readers/maybe_closer.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import "io" 4 | 5 | func MakeCloser(r io.Reader) io.ReadCloser { 6 | if rc, ok := r.(io.ReadCloser); ok { 7 | return rc 8 | } 9 | return io.NopCloser(r) 10 | } 11 | -------------------------------------------------------------------------------- /util/readers/multipart_reader.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "mime/multipart" 7 | "net/textproto" 8 | "net/url" 9 | 10 | "github.com/alioygur/is" 11 | ) 12 | 13 | type MultipartPart struct { 14 | ContentType string 15 | FileName string 16 | Location string 17 | Reader io.ReadCloser 18 | } 19 | 20 | func NewMultipartReader(boundary string, parts ...*MultipartPart) io.ReadCloser { 21 | r, w := io.Pipe() 22 | go func() { 23 | mpw := multipart.NewWriter(w) 24 | err := mpw.SetBoundary(boundary) 25 | if err != nil { 26 | // We don't have a good error route, and don't expect this to fail anyways. 27 | panic(err) 28 | } 29 | 30 | for _, part := range parts { 31 | headers := textproto.MIMEHeader{} 32 | if part.ContentType != "" { 33 | headers.Set("Content-Type", part.ContentType) 34 | } 35 | if part.FileName != "" { 36 | if is.ASCII(part.FileName) { 37 | headers.Set("Content-Disposition", "attachment; filename="+url.QueryEscape(part.FileName)) 38 | } else { 39 | headers.Set("Content-Disposition", "attachment; filename*=utf-8''"+url.QueryEscape(part.FileName)) 40 | } 41 | } 42 | if part.Location != "" { 43 | headers.Set("Location", part.Location) 44 | part.Reader = io.NopCloser(bytes.NewReader(make([]byte, 0))) 45 | } 46 | 47 | partW, err := mpw.CreatePart(headers) 48 | if err != nil { 49 | _ = w.CloseWithError(err) 50 | return 51 | } 52 | if _, err = io.Copy(partW, part.Reader); err != nil { 53 | _ = w.CloseWithError(err) 54 | return 55 | } 56 | if err = part.Reader.Close(); err != nil { 57 | _ = w.CloseWithError(err) 58 | return 59 | } 60 | } 61 | 62 | if err := mpw.Close(); err != nil { 63 | _ = w.CloseWithError(err) 64 | } 65 | _ = w.Close() 66 | }() 67 | return MakeCloser(r) 68 | } 69 | -------------------------------------------------------------------------------- /util/readers/nop_seek_closer.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import "io" 4 | 5 | type nopSeekCloser struct { 6 | io.ReadSeeker 7 | } 8 | 9 | func (r nopSeekCloser) Close() error { 10 | return nil 11 | } 12 | 13 | func NopSeekCloser(r io.ReadSeeker) io.ReadSeekCloser { 14 | return nopSeekCloser{ReadSeeker: r} 15 | } 16 | -------------------------------------------------------------------------------- /util/readers/rewind_reader.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import "io" 4 | 5 | type RewindReader struct { 6 | io.ReadCloser 7 | r io.ReadSeeker 8 | } 9 | 10 | func NewRewindReader(r io.ReadSeeker) *RewindReader { 11 | return &RewindReader{ 12 | r: r, 13 | } 14 | } 15 | 16 | func (r *RewindReader) Read(p []byte) (int, error) { 17 | return r.r.Read(p) 18 | } 19 | 20 | func (r *RewindReader) Close() error { 21 | _, err := r.r.Seek(0, io.SeekStart) 22 | return err 23 | } 24 | -------------------------------------------------------------------------------- /util/readers/temp_file_closer.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | type TempFileCloser struct { 9 | io.ReadSeekCloser 10 | fname string 11 | fpath string 12 | upstream io.ReadSeekCloser 13 | closed bool 14 | } 15 | 16 | func NewTempFileCloser(fpath string, fname string, upstream io.ReadSeekCloser) *TempFileCloser { 17 | return &TempFileCloser{ 18 | fname: fname, 19 | fpath: fpath, 20 | upstream: upstream, 21 | closed: false, 22 | } 23 | } 24 | 25 | func (c *TempFileCloser) Close() error { 26 | if c.closed { 27 | return nil 28 | } 29 | 30 | upstreamErr := c.upstream.Close() 31 | // don't return upstreamErr yet because we want to try to delete the temp file 32 | 33 | var err error 34 | if err = os.Remove(c.fname); err != nil && !os.IsNotExist(err) { 35 | return err 36 | } 37 | if c.fpath != "" { 38 | if err = os.Remove(c.fpath); err != nil && !os.IsNotExist(err) { 39 | return err 40 | } 41 | } 42 | c.closed = true 43 | return upstreamErr 44 | } 45 | 46 | func (c *TempFileCloser) Read(p []byte) (n int, err error) { 47 | return c.upstream.Read(p) 48 | } 49 | 50 | func (c *TempFileCloser) Seek(offset int64, whence int) (int64, error) { 51 | return c.upstream.Seek(offset, whence) 52 | } 53 | -------------------------------------------------------------------------------- /util/sfcache/sfcache.go: -------------------------------------------------------------------------------- 1 | package sfcache 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/t2bot/go-typed-singleflight" 7 | ) 8 | 9 | type SingleflightCache[T comparable] struct { 10 | sf *typedsf.Group[T] 11 | cache *sync.Map 12 | } 13 | 14 | func NewSingleflightCache[T comparable]() *SingleflightCache[T] { 15 | return &SingleflightCache[T]{ 16 | sf: new(typedsf.Group[T]), 17 | cache: new(sync.Map), 18 | } 19 | } 20 | 21 | func (c *SingleflightCache[T]) Do(key string, fn func() (T, error)) (T, error) { 22 | if v, ok := c.cache.Load(key); ok { 23 | // Safe cast because incorrect types are filtered out before storage 24 | return v.(T), nil 25 | } 26 | var zero T 27 | v, err, _ := c.sf.Do(key, fn) 28 | if err == nil && v != zero { 29 | c.cache.Store(key, v) 30 | } 31 | return v, err 32 | } 33 | 34 | func (c *SingleflightCache[T]) OverwriteCacheKey(key string, val T) { 35 | c.cache.Store(key, val) 36 | } 37 | 38 | func (c *SingleflightCache[T]) ForgetCacheKey(key string) { 39 | c.cache.Delete(key) 40 | } 41 | -------------------------------------------------------------------------------- /util/strings.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func HasAnyPrefix(val string, prefixes []string) bool { 8 | for _, p := range prefixes { 9 | if strings.HasPrefix(val, p) { 10 | return true 11 | } 12 | } 13 | return false 14 | } 15 | -------------------------------------------------------------------------------- /util/time.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "strconv" 5 | "time" 6 | ) 7 | 8 | func NowMillis() int64 { 9 | return time.Now().UnixNano() / 1000000 10 | } 11 | 12 | func FromMillis(m int64) time.Time { 13 | return time.Unix(0, m*int64(time.Millisecond)) 14 | } 15 | 16 | func CalcBlockForDuration(timeoutMs string) (time.Duration, error) { 17 | blockFor := 20 * time.Second 18 | if timeoutMs != "" { 19 | parsed, err := strconv.Atoi(timeoutMs) 20 | if err != nil { 21 | return 0, err 22 | } 23 | if parsed > 0 { 24 | // Limit to 60 seconds 25 | if parsed > 60000 { 26 | parsed = 60000 27 | } 28 | blockFor = time.Duration(parsed) * time.Millisecond 29 | } 30 | } 31 | return blockFor, nil 32 | } 33 | 34 | func GetHourBucket(ts int64) int64 { 35 | return (ts / 3600000) * 3600000 36 | } 37 | -------------------------------------------------------------------------------- /util/unpadded_base64.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "encoding/base64" 5 | ) 6 | 7 | func DecodeUnpaddedBase64String(val string) ([]byte, error) { 8 | return base64.RawStdEncoding.DecodeString(val) 9 | } 10 | 11 | func EncodeUnpaddedBase64ToString(val []byte) string { 12 | return base64.RawStdEncoding.EncodeToString(val) 13 | } 14 | -------------------------------------------------------------------------------- /util/urls.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | func MakeUrl(parts ...string) string { 4 | res := "" 5 | for i, p := range parts { 6 | if p[len(p)-1:] == "/" { 7 | res += p[:len(p)-1] 8 | } else if p[0] != '/' && i > 0 { 9 | res += "/" + p 10 | } else { 11 | res += p 12 | } 13 | } 14 | return res 15 | } 16 | --------------------------------------------------------------------------------