├── .drone.jsonnet ├── .flake8 ├── .gitignore ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── administration.md ├── api.yaml ├── conftest.py ├── contrib ├── auth-example.py ├── blinding.py ├── deb.oxen.io.gpg ├── pg-import.py ├── upgrade-tests │ ├── common.sh │ ├── dump-db.py │ ├── v0.1.10-expected.txt │ ├── v0.1.10-upgrade.sh │ ├── v0.2.0-expected.txt │ ├── v0.2.0-upgrade.sh │ ├── v0.3.0-pg-expected.txt │ └── v0.3.0-pg-upgrade.sh ├── uwsgi-sogs-proxied-socket.ini ├── uwsgi-sogs-proxied.ini └── uwsgi-sogs-standalone.ini ├── docs ├── generate-api-docs.py ├── make-docs.sh └── snippets │ ├── dm.get_inbox.md │ ├── dm.get_outbox.md │ ├── dm.md │ ├── dm.poll_inbox.md │ ├── dm.poll_outbox.md │ ├── general.get_caps.md │ ├── general.md │ ├── legacy.md │ ├── messages.md │ ├── onion_request.md │ ├── rooms.get_one_room.md │ ├── rooms.md │ ├── rooms.update_room.md │ ├── sidebar.md │ ├── uncategorized.md │ ├── users.md │ └── views.md ├── install-debs.md ├── install-uwsgi.md ├── pyproject.toml ├── setup.cfg ├── sogs.ini.filter-sample ├── sogs.ini.sample ├── sogs ├── __init__.py ├── __main__.py ├── cleanup.py ├── config.py ├── crypto.py ├── db.py ├── hashing.py ├── http.py ├── key_convert │ ├── __init__.py │ └── __main__.py ├── migrations │ ├── __init__.py │ ├── exc.py │ ├── file_message.py │ ├── fix_info_update_triggers.py │ ├── import_hacks.py │ ├── message_views.py │ ├── new_columns.py │ ├── new_tables.py │ ├── reactions.py │ ├── room_accessible.py │ ├── room_moderators.py │ ├── seqno_creation.py │ ├── seqno_etc.py │ ├── user_perm_futures.py │ ├── user_permissions.py │ └── v_0_1_x.py ├── model │ ├── __init__.py │ ├── exc.py │ ├── file.py │ ├── message.py │ ├── post.py │ ├── room.py │ └── user.py ├── mule.py ├── omq.py ├── postfork.py ├── routes │ ├── __init__.py │ ├── auth.py │ ├── converters.py │ ├── dm.py │ ├── exc.py │ ├── general.py │ ├── legacy.py │ ├── messages.py │ ├── onion_request.py │ ├── rooms.py │ ├── subrequest.py │ ├── users.py │ └── views.py ├── schema.pgsql ├── schema.sqlite ├── session_pb2.py ├── static │ ├── protobuf.min.js │ ├── session.proto │ └── view_room.js ├── templates │ ├── base.html │ ├── index.html │ ├── setup.html │ └── view_room.html ├── utils.py └── web.py ├── tests ├── auth.py ├── request.py ├── test_auth.py ├── test_blinding.py ├── test_dm.py ├── test_files.py ├── test_hashing.py ├── test_legacy.py ├── test_onion_requests.py ├── test_reactions.py ├── test_room_routes.py ├── test_rooms.py ├── test_routes_general.py ├── test_user_routes.py ├── user.py └── util.py └── update-protobuf.sh /.drone.jsonnet: -------------------------------------------------------------------------------- 1 | local docker_base = 'registry.oxen.rocks/lokinet-ci-'; 2 | local apt_get_quiet = 'apt-get -o=Dpkg::Use-Pty=0 -q'; 3 | 4 | local default_deps = [ 5 | 'python3', 6 | 'python3-pytest', 7 | 'python3-oxenmq', 8 | 'python3-oxenc', 9 | 'python3-pyonionreq', 10 | 'python3-coloredlogs', 11 | 'python3-uwsgidecorators', 12 | 'python3-flask', 13 | 'python3-cryptography', 14 | 'python3-pycryptodome', 15 | 'python3-nacl', 16 | 'python3-pil', 17 | 'python3-protobuf', 18 | 'python3-openssl', 19 | 'python3-qrcode', 20 | 'python3-better-profanity', 21 | 'python3-sqlalchemy', 22 | 'python3-sqlalchemy-utils', 23 | ]; 24 | 25 | local apt_get_quiet = 'apt-get -o=Dpkg::Use-Pty=0 -q'; 26 | 27 | local auto_distro = '$$(lsb_release -sc)'; 28 | 29 | local setup_commands(deps=default_deps, distro=auto_distro) = [ 30 | 'echo "Running on ${DRONE_STAGE_MACHINE}"', 31 | 'echo "man-db man-db/auto-update boolean false" | debconf-set-selections', 32 | apt_get_quiet + ' update', 33 | apt_get_quiet + ' install -y eatmydata', 34 | 'eatmydata ' + apt_get_quiet + ' install --no-install-recommends -y lsb-release', 35 | 'cp contrib/deb.oxen.io.gpg /etc/apt/trusted.gpg.d', 36 | 'echo deb http://deb.oxen.io ' + distro + ' main >/etc/apt/sources.list.d/oxen.list', 37 | 'eatmydata ' + apt_get_quiet + ' update', 38 | 'eatmydata ' + apt_get_quiet + ' dist-upgrade -y', 39 | 'eatmydata ' + apt_get_quiet + ' install --no-install-recommends -y ' + std.join(' ', deps), 40 | ]; 41 | 42 | 43 | // Regular build on a debian-like system: 44 | local debian_pipeline(name, 45 | image, 46 | arch='amd64', 47 | deps=default_deps, 48 | before_pytest=[], 49 | pytest_opts='', 50 | extra_cmds=[], 51 | services=[], 52 | allow_fail=false, 53 | distro=auto_distro) = { 54 | kind: 'pipeline', 55 | type: 'docker', 56 | name: name, 57 | platform: { arch: arch }, 58 | trigger: { branch: { exclude: ['debian/*', 'ubuntu/*'] } }, 59 | steps: [ 60 | { 61 | name: '🐍 pytest', 62 | image: image, 63 | pull: 'always', 64 | [if allow_fail then 'failure']: 'ignore', 65 | commands: setup_commands(deps=deps, distro=distro) + before_pytest + [ 66 | 'pytest-3 -vv --color=yes ' + pytest_opts, 67 | ] 68 | + extra_cmds, 69 | }, 70 | ], 71 | services: services, 72 | }; 73 | 74 | local pg_deps = ['python3-psycopg2', 'postgresql-client']; 75 | local pg_service(pg_tag='bullseye') = { 76 | name: 'pg', 77 | image: 'postgres:' + pg_tag, 78 | pull: 'always', 79 | environment: { POSTGRES_USER: 'ci', POSTGRES_PASSWORD: 'ci' }, 80 | }; 81 | local pg_wait = 'for i in $(seq 0 30); do if pg_isready -d ci -h pg -U ci -t 1; then break; elif [ "$i" = 30 ]; then echo "Timeout waiting for postgresql" >&2; exit 1; fi; sleep 1; done'; 82 | 83 | local debian_pg_pipeline(name, image, pg_tag='bullseye', distro=auto_distro) = debian_pipeline( 84 | name, 85 | image, 86 | deps=default_deps + pg_deps, 87 | services=[pg_service(pg_tag)], 88 | before_pytest=[pg_wait], 89 | pytest_opts='--pgsql "postgresql://ci:ci@pg/ci"', 90 | distro=distro 91 | ); 92 | 93 | local upgrade_deps = default_deps + ['git', 'curl', 'sqlite3', 'python3-prettytable']; 94 | local upgrade_test(name, from='v0.1.10', intermediates=[], pg=false, pg_convert=false) = { 95 | name: name, 96 | image: docker_base + 'debian-stable', 97 | pull: 'always', 98 | commands: setup_commands(deps=upgrade_deps 99 | + (if pg || pg_convert then pg_deps else []) 100 | + (if pg_convert then ['python3-pip'] else [])) 101 | + [if pg || pg_convert then pg_wait] 102 | + [ 103 | './contrib/upgrade-tests/' + from + '-upgrade.sh --delete-my-crap ' + std.join(' ', intermediates), 104 | './contrib/upgrade-tests/dump-db.py >upgraded-db.txt', 105 | 'diff --color=always -sub contrib/upgrade-tests/' + from + '-expected.txt upgraded-db.txt', 106 | ] + (if pg_convert then [ 107 | 'eatmydata pip3 install psycopg', 108 | 'PYTHONPATH=. ./contrib/pg-import.py sogs.db postgresql://ci:ci@pg/ci --drop-tables --commit', 109 | 'SOGS_PGSQL=postgresql://ci:ci@pg/ci ./contrib/upgrade-tests/dump-db.py >converted-db.txt', 110 | 'diff --color=always -sub contrib/upgrade-tests/' + from + '-expected.txt converted-db.txt', 111 | ] else []), 112 | 113 | environment: if pg then { SOGS_PGSQL: 'postgresql://ci:ci@pg/ci' } else {}, 114 | }; 115 | 116 | 117 | [ 118 | { 119 | name: 'Lint checks', 120 | kind: 'pipeline', 121 | type: 'docker', 122 | platform: { arch: 'amd64' }, 123 | steps: [ 124 | { 125 | name: 'Formatting', 126 | image: docker_base + 'debian-stable', 127 | pull: 'always', 128 | commands: [ 129 | 'echo "Running on ${DRONE_STAGE_MACHINE}"', 130 | apt_get_quiet + ' install -y black', 131 | 'black --check --diff --color .', 132 | ], 133 | }, 134 | { 135 | name: 'Flake8', 136 | image: docker_base + 'debian-stable', 137 | pull: 'always', 138 | commands: [ 139 | 'echo "Running on ${DRONE_STAGE_MACHINE}"', 140 | apt_get_quiet + ' install -y flake8', 141 | 'flake8 .', 142 | ], 143 | }, 144 | ], 145 | }, 146 | 147 | debian_pipeline('Debian sid (amd64)', docker_base + 'debian-sid', distro='sid'), 148 | debian_pipeline('Debian stable (i386)', docker_base + 'debian-stable/i386'), 149 | debian_pipeline('Debian stable (amd64)', docker_base + 'debian-stable'), 150 | debian_pipeline('Ubuntu latest (amd64)', docker_base + 'ubuntu-rolling'), 151 | debian_pipeline('Ubuntu LTS (amd64)', docker_base + 'ubuntu-lts'), 152 | 153 | debian_pg_pipeline('PostgreSQL 15/sid', docker_base + 'debian-sid', pg_tag='15-bullseye', distro='sid'), 154 | debian_pg_pipeline('PostgreSQL 12/focal', docker_base + 'ubuntu-focal', pg_tag='12-bullseye'), 155 | 156 | // ARM builds (ARM64 and armhf) 157 | debian_pipeline('Debian sid (ARM64)', docker_base + 'debian-sid', arch='arm64', distro='sid'), 158 | debian_pipeline('Debian stable (armhf)', docker_base + 'debian-stable/arm32v7', arch='arm64'), 159 | 160 | // Import tests: 161 | { 162 | name: 'Upgrades (sqlite)', 163 | kind: 'pipeline', 164 | type: 'docker', 165 | platform: { arch: 'amd64' }, 166 | steps: [ 167 | upgrade_test('sqlite3: 0.1.10→now'), 168 | upgrade_test('sqlite3: 0.1.10→0.2.0→now', intermediates=['43380beaa2']), 169 | upgrade_test('sqlite3: 0.2.0→now', from='v0.2.0'), 170 | ], 171 | }, 172 | { 173 | name: 'Upgrades (pg)', 174 | kind: 'pipeline', 175 | type: 'docker', 176 | platform: { arch: 'amd64' }, 177 | services: [pg_service()], 178 | steps: [ 179 | upgrade_test('postgres: 0.1.10→now', pg=true), 180 | upgrade_test('postgres: 0.3.0→now', from='v0.3.0-pg', pg=true), 181 | upgrade_test('sqlite3-pg: 0.1.10→now→pg', pg_convert=true), 182 | ], 183 | }, 184 | 185 | // Macos: 186 | { 187 | kind: 'pipeline', 188 | type: 'exec', 189 | name: 'MacOS', 190 | platform: { os: 'darwin', arch: 'amd64' }, 191 | steps: [ 192 | { 193 | name: '🐍 pytest', 194 | commands: [ 195 | 'echo "Running on ${DRONE_STAGE_MACHINE}"', 196 | 'PYTHONPATH=. /opt/local/bin/python3 -mpytest -vv --color=yes', 197 | ], 198 | }, 199 | ], 200 | }, 201 | ] 202 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | per-file-ignores = 3 | sogs/web.py:F401,E402 4 | sogs/__main__.py:E402 5 | max-line-length = 100 6 | extend-ignore = E203 # See https://github.com/psf/black/issues/315 7 | exclude=sogs/session_pb2.py 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | *.db 3 | *.db-shm 4 | *.db-wal 5 | .DS_Store 6 | /uploads 7 | /key_x25519 8 | __pycache__ 9 | *.pyc 10 | /protobuf.*/ 11 | 12 | # This file is for local config which we never want committed: 13 | /sogs.ini 14 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include sogs/schema.sqlite 2 | include sogs/schema.pgsql 3 | include sogs/static/* 4 | include sogs/templates/* 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PySOGS (Session Community Server) 2 | 3 | PySOGS is the reference implementation of a Session Community server (formerly known as a Session 4 | Open Group). PySOGS is used to run the official Session Communities, and is the officially supported 5 | Session Community server. New features added to Session Communities are developed here in lockstep 6 | with the support added to the Session clients. 7 | 8 | ## Installation 9 | 10 | For most servers we provide and recommend using the .deb installation method on a VPS running Ubuntu 11 | 20.04 (or newer), or Debian 11 (or newer). 12 | 13 | Alternatively advanced users and developers interested in running or working on the latest 14 | development code may prefer to run directly from the repository using uwsgi. 15 | 16 | While both methods are supported modes of operation, the latter requires more configuration and 17 | maintenance and requires some experience with running Python code and web applications. 18 | 19 | [Debian/Ubuntu Package Install](install-debs.md) 20 | 21 | [Manual Installation](install-uwsgi.md) 22 | 23 | ## Administration 24 | 25 | For how to administer a running PySOGS see [SOGS Administration](administration.md). 26 | 27 | ## License 28 | 29 | Copyright (c) 2021-2022 The Oxen Project 30 | 31 | PySOGS is licensed under the [GNU General Public License (GPL) v3](LICENSE.txt), or (at your option) 32 | any later version. 33 | -------------------------------------------------------------------------------- /administration.md: -------------------------------------------------------------------------------- 1 | # SOGS administration 2 | 3 | Once installed (either via [install-debs.md](deb packages) or [install-uwsgi.md](manually)) SOGS 4 | requires some configuration on the backend in order to add rooms and initial administrators. When 5 | using the deb-based installation the commands are available through the `sogs` command-line tool. 6 | 7 | Note: When running from a session-pysogs project source code directory then you must run `python3 8 | -msogs` from the `session-pysogs` directory instead of the `sogs` command. 9 | 10 | The full list of available commands is available by running: 11 | 12 | ```bash 13 | sogs --help 14 | ``` 15 | 16 | (or `python3 -msogs --help` if running from the source code). 17 | 18 | We cover a few of the most common options here, to help get you started. 19 | 20 | ## Creating a room 21 | 22 | To add a room run: 23 | 24 | ```bash 25 | sogs --add-room TOKEN --name "NAME" 26 | ``` 27 | 28 | Replace `TOKEN` with the address to use in the room URL (which must consist of letters, numbers, 29 | underscores, or dashes), and replace `NAME` with the room name to display in Session. 30 | 31 | For example: 32 | 33 | ```bash 34 | sogs --add-room fishing --name "Fish Talk" 35 | ``` 36 | 37 | If you wish you may also provide a description (though this is not yet displayed in Session): 38 | 39 | ```bash 40 | sogs --add-room fishing --name "Fish Talk" --description "Three fish, four fish, yellow fish, green fish" 41 | ``` 42 | 43 | The add-room command will, on success, print the details of the new room, such as: 44 | 45 | ``` 46 | Created room fishing: 47 | 48 | fishing 49 | ======= 50 | Name: Fish Talk 51 | Description: Three fish, four fish, yellow fish, green fish 52 | URL: http://example.net/fishing?public_key=0ea1f6eeb5f16b44ddf0decf5a534ae437de272439e371a5ae04fdb1ba05e524 53 | Messages: 0 (0.0 MB) 54 | Attachments: 0 (0.0 MB) 55 | Active users: 0 (7d), 0 (14d) 0 (30d) 56 | Moderators: 0 admins (0 hidden), 0 moderators (0 hidden) 57 | ``` 58 | 59 | ## Add an administrator or moderator to a room 60 | 61 | To add an administrator or moderator of a room you use one of the following commands: 62 | 63 | ```bash 64 | sogs --rooms TOKEN --add-moderators SESSIONID --admin 65 | sogs --rooms TOKEN --add-moderators SESSIONID 66 | ``` 67 | 68 | The difference between an administrator and a moderator is that administrators are permitted to add 69 | and remove administrators/moderators from the room, while moderators cannot. (Aside from this, both 70 | have full moderation capabilities). 71 | 72 | Note that room moderators added from within Session are currently always added as administrators; 73 | this will change in a future Session update to support adding either type. 74 | 75 | You can also add one or more session IDs as *global* moderators/administrators by specifying a `+` 76 | for the room. Global moderators/administrators are considered to be moderators of every room on the 77 | server for both existing rooms and any new future rooms: 78 | 79 | ```bash 80 | sogs --rooms + --add-moderators SESSIONID --admin --visible 81 | ``` 82 | 83 | You can also add multiple moderators to multiple rooms at once by just adding more room tokens and 84 | session IDs on the command line. For example: 85 | 86 | ```bash 87 | sogs --rooms fishing boating --add-moderators SESSIONID_1 SESSIONID_2 SESSIONID_3 88 | ``` 89 | 90 | would add the three session IDs as moderators of both the `fishing` and `boating` rooms. 91 | 92 | ### Hidden vs visible moderators 93 | 94 | Moderators/admins can be either publicly visible (which is the default for room-specific 95 | moderators/admins) or hidden (which is the default for global server moderators/admins). 96 | 97 | A hidden moderator still has all the same moderation permissions as a visible moderator, but will 98 | not be displayed to regular (non-moderator) Session users as a room moderator (with a moderator 99 | badge, etc.). 100 | 101 | To explicitly control when adding moderators that they should be hidden or visible you can add the 102 | `--visible` or `--hidden` flags when adding a moderator. 103 | 104 | ## Listing rooms 105 | 106 | To list all current rooms use: 107 | 108 | ``` 109 | sogs -L 110 | ``` 111 | 112 | This includes details such as the number of messages, files, active users, and moderators. If you 113 | also want to list each of the individual moderators in each room add `-v` to the end of the command. 114 | 115 | ## More! 116 | 117 | For other commands, such as listing all global moderators, deleting rooms, and removing 118 | moderators, run: 119 | 120 | ``` 121 | sogs --help 122 | ``` 123 | 124 | for all available command-line options. 125 | -------------------------------------------------------------------------------- /contrib/deb.oxen.io.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxen-io/session-pysogs/9497ddbfffd34a5dabaf23cc9cca795a761521c7/contrib/deb.oxen.io.gpg -------------------------------------------------------------------------------- /contrib/upgrade-tests/common.sh: -------------------------------------------------------------------------------- 1 | 2 | set -o errexit 3 | 4 | if [ "$1" != "--delete-my-crap" ]; then 5 | echo " 6 | Warning: this script removes current database, files, and settings, and so should never be run on a 7 | live installation. 8 | 9 | Run with argument --delete-my-crap if that sounds okay" >&2 10 | 11 | exit 1 12 | fi 13 | 14 | shift 15 | 16 | if ! [ -d contrib/upgrade-tests ] || ! [ -e sogs/__init__.py ]; then 17 | echo "You need to run this as ./contrib/upgrade-test.sh from the top-level sogs directory" >&2 18 | exit 1 19 | fi 20 | 21 | export PYTHONPATH=. 22 | 23 | rm -rf rooms database.db files key_x25519 x25519_{public,private}_key.pem 24 | 25 | echo -e "[log]\nlevel = DEBUG" >sogs.ini 26 | echo -e "[rooms]\nactive_prune_threshold = 365000" >>sogs.ini 27 | if [ -n "$SOGS_PGSQL" ]; then 28 | echo -e "[db]\nurl = $SOGS_PGSQL" >>sogs.ini 29 | for table in rooms users messages message_history pinned_messages reactions user_reactions \ 30 | files room_users user_permission_overrides user_permission_futures user_ban_futures \ 31 | user_request_nonces inbox room_import_hacks file_id_hacks; do 32 | echo "DROP TABLE IF EXISTS $table CASCADE;" 33 | done | psql "$SOGS_PGSQL" 34 | else 35 | rm -f sogs.db{,-shm,-wal} 36 | fi 37 | 38 | for tag in "$@"; do 39 | if ! git rev-parse "$tag" >/dev/null; then 40 | echo "'$tag' doesn't look like a valid known git revision or tag!" 41 | exit 1 42 | fi 43 | done 44 | 45 | 46 | do_upgrades() { 47 | tags=("$@" "$(git rev-parse HEAD)") 48 | for tag in "${tags[@]}"; do 49 | echo "Upgrading to $tag..." 50 | git -c advice.detachedHead=false checkout "$tag" 51 | 52 | # Use the dedicated --upgrade command if it has been added in this revision: 53 | if [ -e sogs/__main__.py ] && grep -q '^ *"--upgrade",$' sogs/__main__.py; then 54 | args=("--upgrade") 55 | else 56 | # Before it was added, any command would implicitly upgrade: 57 | args=("-L") 58 | fi 59 | 60 | if [ -n "$sogs_need_initialize" ]; then 61 | sogs_need_initialize= 62 | 63 | if [ -n "$sogs_key_conv" ]; then 64 | python3 -msogs.key_convert 65 | 66 | echo "Checking key_x25519 for proper conversion" 67 | diff --color=always \ 68 | <(python3 -c 'f = open("key_x25519", "rb"); print(f.read().hex())') \ 69 | <(echo a0101f8bca7fa1cedf9620f5b80810b18f5b0f1acbb219640876be9d78a6195f) 70 | fi 71 | 72 | # In 0.2.0 and up until close to 0.3.0, just running any command-line commands will do the 73 | # database import and/or upgrade. Starting in 0.3.0 you have to specify --initialize to make 74 | # this happen. 75 | if [ -e sogs/__main__.py ] && grep -q '^ *"--initialize",$' sogs/__main__.py; then 76 | if [ "${args[0]}" == "--upgrade" ]; then 77 | # If we support the --upgrade flag then --initialize and --upgrade are exclusive: 78 | args=("--initialize") 79 | else 80 | args+=("--initialize") 81 | fi 82 | fi 83 | fi 84 | 85 | python3 -msogs "${args[@]}" 86 | 87 | if [ -n "$sogs_fix_updates_count" ]; then 88 | # 0.2.0 had a bug in one of the room update triggers that would unnecessarily update 89 | # `message_sequence` (then named `updates`) on metadata updates, which the 0.1.x import 90 | # triggered when setting the image value. This was fixed before v0.3.0, but if our 91 | # first tag imports via such a problematic version then we need to undo the increment so 92 | # that the final message_sequence value remains comparable to a version that imported 93 | # directly into a newer release. 94 | if sed -ne '/^CREATE TRIGGER room_metadata_update/,/^END;/p' sogs/schema.sql* \ 95 | | grep -q 'SET updates = updates + 1'; then 96 | sqlite3 sogs.db 'UPDATE rooms SET updates = updates - 1 WHERE image IS NOT NULL' 97 | fi 98 | sogs_fix_updates_count= 99 | fi 100 | done 101 | 102 | # This should exit cleanly to indicate no needed migrations (if it doesn't, i.e. we still 103 | # require migrations after doing a migration then Something Getting Wrong in migrations). 104 | python3 -msogs --check-upgrades 105 | } 106 | -------------------------------------------------------------------------------- /contrib/upgrade-tests/dump-db.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from prettytable import PrettyTable 4 | import time 5 | import os 6 | 7 | if 'SOGS_PGSQL' in os.environ and os.environ['SOGS_PGSQL']: 8 | try: 9 | import psycopg as pg 10 | 11 | conn = pg.connect(os.environ['SOGS_PGSQL'], autocommit=True) 12 | except ModuleNotFoundError: 13 | import psycopg2 as pg 14 | 15 | conn = pg.connect(os.environ['SOGS_PGSQL']) 16 | conn.autocommit = True 17 | else: 18 | pg = None 19 | import sqlite3 20 | 21 | conn = sqlite3.connect('file:sogs.db?mode=ro', uri=True) 22 | 23 | cur = conn.cursor() 24 | 25 | 26 | # Sorting priorities for column names (because different import paths could end up with different 27 | # column orders in the table when we `SELECT *`). Lower = earlier. Anything not in here gets a 28 | # sort value of 100, and equal orders are sorted alphabetically. 29 | column_priority = { 30 | 'id': 0, 31 | 'room': 1, 32 | 'token': 2, 33 | 'user': 3, 34 | 'session_id': 4, 35 | 'name': 4, 36 | 'description': 5, 37 | 'image': 6, 38 | 'admin': 101, 39 | 'moderator': 102, 40 | 'global_moderator': 103, 41 | 'room_moderator': 103, 42 | 'visible_mod': 103, 43 | 'read': 104, 44 | 'accessible': 105, 45 | 'write': 106, 46 | 'upload': 107, 47 | 'banned': 108, 48 | } 49 | 50 | 51 | def dump_rows(table, extra=None, where=None, order="id", skip=set()): 52 | print(f"{table}:") 53 | ob = order if isinstance(order, str) else ', '.join(order) 54 | extra = f', {extra}' if extra else '' 55 | cur.execute(f"SELECT * {extra} FROM {table} {'WHERE ' + where if where else ''} ORDER BY {ob}") 56 | cols = [x[0] for x in cur.description] 57 | 58 | indices = [i for i in range(len(cols)) if cols[i] not in skip] 59 | indices.sort(key=lambda i: (column_priority.get(cols[i], 100), cols[i])) 60 | 61 | table = PrettyTable() 62 | table.field_names = [cols[i] for i in indices] 63 | for r in cur: 64 | table.add_row( 65 | [ 66 | 'NULL' 67 | if r[i] is None 68 | else int(r[i]) 69 | if isinstance(r[i], bool) 70 | else f"{r[i]:.3f}" 71 | if isinstance(r[i], float) 72 | else r[i] 73 | for i in indices 74 | ] 75 | ) 76 | for c in cols: 77 | table.align[c] = 'l' if c in ('token', 'name', 'session_id', 'description', 'path') else 'r' 78 | return table.get_string() + "\n" 79 | 80 | 81 | print(dump_rows("rooms", skip={'created', 'info_updates'})) 82 | 83 | TableNotFoundError = pg.errors.UndefinedTable if pg else sqlite3.OperationalError 84 | try: 85 | print(dump_rows("room_import_hacks", order='room')) 86 | except TableNotFoundError: 87 | pass 88 | 89 | print(dump_rows("message_metadata")) 90 | 91 | print(dump_rows("pinned_messages", order=("room", "pinned_at"))) 92 | 93 | print( 94 | dump_rows( 95 | "files", 96 | extra=f"CASE WHEN expiry IS NULL THEN NULL ELSE uploaded > {time.time()-86400} END " 97 | "AS recent_upload", 98 | skip={'uploaded', 'expiry'}, 99 | ) 100 | ) 101 | 102 | try: 103 | print(dump_rows("file_id_hacks", order='file')) 104 | except TableNotFoundError: 105 | pass 106 | 107 | print(dump_rows("users", where="id != 0", skip={'created', 'last_active'})) 108 | 109 | print(dump_rows("room_users", order=('room', '"user"'))) 110 | 111 | print(dump_rows("user_permissions", where='"user" != 0', order=('room', '"user"'))) 112 | 113 | print(dump_rows("inbox")) 114 | -------------------------------------------------------------------------------- /contrib/upgrade-tests/v0.1.10-upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! [ -f contrib/upgrade-tests/common.sh ]; then 4 | echo "Wrong path: run from top-level sogs" >&2 5 | exit 1 6 | fi 7 | 8 | . contrib/upgrade-tests/common.sh 9 | 10 | set -o errexit 11 | 12 | for tag in "$@"; do 13 | if ! git rev-parse "$tag" >/dev/null; then 14 | echo "'$tag' doesn't look like a valid known git revision or tag!" 15 | exit 1 16 | fi 17 | done 18 | 19 | # Extract the SOGS 0.1.10 test database: 20 | if ! [ -f test-sogs-0-1-10.tar.xz ]; then 21 | curl -sSOL https://oxen.rocks/sogs-assets/test-sogs-0-1-10.tar.xz 22 | fi 23 | 24 | tar xf test-sogs-0-1-10.tar.xz 25 | 26 | # Update the timestamps to be relatively current (so that we are importing files that shouldn't be 27 | # expired): 28 | for roomdb in rooms/*.db; do 29 | sqlite3 $roomdb "update files set timestamp = timestamp - 1645500000 + cast(((julianday('now') - 2440587.5)*86400.0) AS INTEGER)" 30 | done 31 | 32 | sogs_key_conv=1 33 | sogs_fix_updates_count=1 34 | sogs_need_initialize=1 35 | do_upgrades "$@" 36 | -------------------------------------------------------------------------------- /contrib/upgrade-tests/v0.2.0-upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! [ -f contrib/upgrade-tests/common.sh ]; then 4 | echo "Wrong path: run from top-level sogs" >&2 5 | exit 1 6 | fi 7 | 8 | if [ -n "$SOGS_PGSQL" ]; then 9 | echo "Error: SOGS_PGSQL is not supported for the v0.2.0 upgrade" >&2 10 | exit 1 11 | fi 12 | 13 | . contrib/upgrade-tests/common.sh 14 | 15 | set -o errexit 16 | 17 | # Extract the SOGS 0.2.0 test database: 18 | if ! [ -f test-sogs-0-2-0.tar.xz ]; then 19 | curl -sSOL https://oxen.rocks/sogs-assets/test-sogs-0-2-0.tar.xz 20 | fi 21 | 22 | tar xf test-sogs-0-2-0.tar.xz 23 | 24 | # Update the timestamps to be relatively current (so that files aren't expired) 25 | sqlite3 sogs.db <&2 5 | exit 1 6 | fi 7 | 8 | if [ -z "$SOGS_PGSQL" ]; then 9 | echo "Error: must specify pg url via SOGS_PGSQL env variable" >&2 10 | exit 1 11 | fi 12 | 13 | . contrib/upgrade-tests/common.sh 14 | 15 | set -o errexit 16 | 17 | # Extract the SOGS 0.3.0 postgresql test database: 18 | if ! [ -f test-sogs-pg-f6dd80c04b.tar.xz ]; then 19 | curl -sSOL https://oxen.rocks/sogs-assets/test-sogs-pg-f6dd80c04b.tar.xz 20 | fi 21 | 22 | tar xf test-sogs-pg-f6dd80c04b.tar.xz 23 | 24 | psql -f sogstest.pgsql "$SOGS_PGSQL" 25 | 26 | # Update the timestamps to be relatively current (so that files aren't expired) 27 | psql "$SOGS_PGSQL" < section for the site (change USER 33 | # appropriately): 34 | # 35 | # ProxyPass / unix:/home/USER/session-pysogs/sogs.wsgi|uwsgi://uwsgi-session-pysogs/ 36 | # 37 | 38 | [uwsgi] 39 | chdir = /home/USER/session-pysogs 40 | uid = USER 41 | gid = www-data 42 | socket = sogs.wsgi 43 | chmod-socket = 660 44 | plugins = python3 45 | processes = 2 46 | enable-threads = true 47 | manage-script-name = true 48 | mount = /=sogs.web:app 49 | mule = sogs.mule:run 50 | log-4xx = true 51 | log-5xx = true 52 | disable-logging = true 53 | -------------------------------------------------------------------------------- /contrib/uwsgi-sogs-proxied.ini: -------------------------------------------------------------------------------- 1 | # uwsgi configuration for using with a front end reverse proxy that proxies full HTTP requests to a 2 | # localhost TCP port; this (or the unix socket alternative) is the recommended base configuration 3 | # for all but the simplest setups, but requires some extra initial setup. 4 | # 5 | # Note that, while this configuration listens for HTTP connections, it is meant to sit behind a 6 | # public reverse proxy server and is *not* suitable for use as a public-facing HTTP server; see 7 | # uwsgi-sogs-standalone.ini for a configuration suitable for service direct public requests. 8 | # 9 | # Configuration requires: 10 | # - change the chdir= to the path where you want to write the sogs data files (database, uploads, 11 | # keys, etc.) 12 | # 13 | # - change uid= and gid= values to the system user/group names that the script should run as. DO 14 | # NOT RUN AS root! That path that you specify for `chdir=` should be owned by this user/group. 15 | # 16 | # - tweak the `processes` argument, if desired and you have more than 2 cores and expect a very busy 17 | # SOGS. 18 | # 19 | # - if using uwsgi in vassal mode then put the configuration file into /etc/uwsgi/vassals 20 | # 21 | # - set up the front-end reverse proxy. For nginx this means setting up a basic site configuration 22 | # like this: 23 | # 24 | # server { 25 | # server_name mysite.com; 26 | # 27 | # client_max_body_size 10M; 28 | # 29 | # location / { 30 | # proxy_set_header X-Real-IP $remote_addr; 31 | # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 32 | # proxy_set_header Host $host; 33 | # proxy_pass http://127.3.2.1:4242/; 34 | # } 35 | # 36 | # listen 80; 37 | # listen [::]:80; 38 | # } 39 | # 40 | # For Apache enable the `proxy_http` module (`sudo a2enmod proxy_http` on Debian/Ubuntu systems) 41 | # and add this directive inside the section for the site: 42 | # 43 | # ProxyPass / http://127.3.2.1:4242/ 44 | # 45 | 46 | [uwsgi] 47 | chdir = /home/USER/session-pysogs 48 | uid = USER 49 | gid = GROUP 50 | plugins = python3 51 | http-socket = 127.3.2.1:4242 52 | processes = 2 53 | enable-threads = true 54 | manage-script-name = true 55 | mount = /=sogs.web:app 56 | mule = sogs.mule:run 57 | log-4xx = true 58 | log-5xx = true 59 | disable-logging = true 60 | -------------------------------------------------------------------------------- /contrib/uwsgi-sogs-standalone.ini: -------------------------------------------------------------------------------- 1 | # uwsgi configuration for listening directly on port 80; this is the simplest setup, but is less 2 | # flexible and slightly less performant than proxying through a front-end server (e.g. nginx) and 3 | # cannot be used if anything else on the system always wants to serve any HTTP traffic. 4 | # 5 | # Note that this configuration is meant for setups where sogs will be directly serving public HTTP 6 | # requests, and is sub-optimal for handling proxied requests; for that see the 7 | # uwsgi-sogs-proxied.ini configuration instead. 8 | 9 | # Configuration requires: 10 | # - change the chdir= to the path where you want to write the sogs data files (database, uploads, 11 | # keys, etc.) 12 | # 13 | # - change uid= and gid= values to the system user/group names that the script should run as. DO 14 | # NOT RUN AS root! That path that you specify for `chdir=` should be owned by this user/group. 15 | # 16 | # - tweak the `processes` argument, if desired and you have more than 2 cores and expect a very busy 17 | # SOGS. 18 | # 19 | # - if using uwsgi in vassal mode then put the configuration file into /etc/uwsgi/vassals 20 | # 21 | [uwsgi] 22 | chdir = /home/USER/session-pysogs 23 | uid = USER 24 | gid = GROUP 25 | plugins = python3,http 26 | processes = 2 27 | enable-threads = true 28 | http = :80 29 | mount = /=sogs.web:app 30 | mule = sogs.mule:run 31 | log-4xx = true 32 | log-5xx = true 33 | disable-logging = true 34 | -------------------------------------------------------------------------------- /docs/make-docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ "$(basename $(pwd))" != "docs" ] || ! [ -e "make-docs.sh" ]; then 6 | echo "Error: you must run this from the docs directory" >&2 7 | exit 1 8 | fi 9 | 10 | rm -rf api 11 | 12 | docsify init --local api 13 | 14 | rm -f api/README.md 15 | 16 | if [ -n "$NPM_PACKAGES" ]; then 17 | npm_dir="$NPM_PACKAGES/lib/node_modules" 18 | elif [ -n "$NODE_PATH" ]; then 19 | npm_dir="$NODE_PATH" 20 | elif [ -d "$HOME/node_modules" ]; then 21 | npm_dir="$HOME/node_modules" 22 | elif [ -d "/usr/local/lib/node_modules" ]; then 23 | npm_dir="/usr/local/lib/node_modules" 24 | else 25 | echo "Can't determine your node_modules path; set NPM_PACKAGES or NODE_PATH appropriately" >&2 26 | exit 1 27 | fi 28 | 29 | cp $npm_dir/docsify/node_modules/prismjs/components/prism-{json,python,http}.min.js api/vendor 30 | cp $npm_dir/docsify-katex/dist/docsify-katex.js api/vendor 31 | cp $npm_dir/docsify-katex/node_modules/katex/dist/katex.min.css api/vendor 32 | 33 | PYTHONPATH=.. ./generate-api-docs.py -m -o api 34 | 35 | perl -ni.bak -e ' 36 | BEGIN { $first = 0; } 37 | if (m{^\s*\s*$}) { 38 | if (not $first) { 39 | $first = false; 40 | print qq{ 41 | \n}; 50 | } 51 | } else { 52 | s{.*}{Session PySOGS API}; 53 | s{(name="description" content=)"[^"]*"}{$1"Session PySOGS API documentation"}; 54 | if (m{^\s*}) { 55 | print qq{ 56 | 57 | 58 | 59 | 60 | 61 | }; 62 | } 63 | print; 64 | }' api/index.html 65 | 66 | -------------------------------------------------------------------------------- /docs/snippets/dm.get_inbox.md: -------------------------------------------------------------------------------- 1 | # Query Parameters 2 | 3 | The request takes an optional `limit` query parameter indicating the number of messages to 4 | return (up to 256). If omitted, at most 100 messages are returned. 5 | 6 | # Return value 7 | 8 | Returns a JSON array of up to `limit` (default 100) messages, with oldest messages first. Each 9 | element is a JSON object with keys: 10 | 11 | - `id` — the unique integer message id. 12 | - `posted_at` — unix timestamp (float) when the message was received by SOGS. 13 | - `expires_at` — unix timestamp (float) when SOGS will expire and delete the message. 14 | - `message` — the encrypted message body. 15 | - `sender` — the (blinded) Session ID of the sender of the message. 16 | - `recipient` — the (blinded) Session ID of the recpient of the message. 17 | -------------------------------------------------------------------------------- /docs/snippets/dm.get_outbox.md: -------------------------------------------------------------------------------- 1 | # Query Parameters 2 | 3 | The request takes an optional `limit` query parameter indicating the number of messages to 4 | return (up to 256). If omitted, at most 100 messages are returned. 5 | 6 | # Return value 7 | 8 | Returns a JSON array of up to `limit` (default 100) messages, with oldest messages first. Each 9 | element is a JSON object with keys: 10 | 11 | - `id` — the unique integer message id. 12 | - `posted_at` — unix timestamp (float) when the message was received by SOGS. 13 | - `expires_at` — unix timestamp (float) when SOGS will expire and delete the message. 14 | - `message` — the encrypted message body. 15 | - `sender` — the (blinded) Session ID of the sender of the message. 16 | - `recipient` — the (blinded) Session ID of the recpient of the message. 17 | -------------------------------------------------------------------------------- /docs/snippets/dm.md: -------------------------------------------------------------------------------- 1 | # Direct Messages 2 | 3 | These endpoints are used for sending and retrieving direct messages between blinded users. Such 4 | messages are only used for introductions; once both parties know each other's Session ID messages 5 | should be done via regular Session one-to-one messaging. 6 | 7 | ## Encryption details 8 | 9 | SOGS itself does not have the ability to decrypt the message contents and thus cannot enforce 10 | any particular content; the following, however, is strongly recommended for Session client 11 | interoperability: 12 | 13 | Alice has master Session Ed25519 keypair $a$, $A$ and blinded keypair $ka$, $kA$. 14 | 15 | Bob has master Session Ed25519 keypair $b$, $B$ and blinded keypair $kb$, $kB$. 16 | 17 | Alice wants to send a message $M$ to Bob, knowing only $kB$ (i.e. the blinded Session ID after 18 | stripping the `0x15` prefix). 19 | 20 | Alice constructs a message using Session protobuf encoding, then concatenates her *unblinded* 21 | pubkey, $A$, to this message: 22 | 23 | $$ 24 | M \parallel A 25 | $$ 26 | 27 | Alice then constructs an encryption key: 28 | 29 | $$ 30 | E = H(a * kB \parallel kA \parallel kB) 31 | $$ 32 | 33 | where $H(\cdot)$ is 32-byte BLAKE2b, and the $*$ denotes unclamped Ed25519 scalar*point multiplication 34 | (e.g. libsodium's `crypto_scalarmult_ed25519_noclamp`). 35 | 36 | The $M \parallel A$ plaintext value is then encrypted using XChaCha20-Poly1305 (e.g. using 37 | libsodium's `crypto_aead_xchacha20poly1305_ietf_encrypt` function), using a secure-random 38 | 24-byte nonce, no additional data, and encryption key $E$. 39 | 40 | The final data message is then constructed as: 41 | 42 | $$ 43 | \texttt{0x00} \parallel \textit{Ciphertext} \parallel \textit{Nonce} 44 | $$ 45 | 46 | where $0x00$ is a version byte (allowing for future alternative encryption formats) and the rest 47 | are bytes. 48 | 49 | Finally this is base64-encoded when send to/retrieved from the SOGS. 50 | 51 | ## Decryption 52 | 53 | Decryption proceeds by reversing the steps above: 54 | 55 | 1. base64-decode the value. 56 | 57 | 2. Grab the version byte from the front and the 24-byte nonce from the back of the value. 58 | 59 | a) if the version byte is not $0x00$ abort because this message is from someone using a 60 | different encryption protocol. 61 | 62 | 3. Construct the encryption key by calculating: 63 | 64 | $$ 65 | E = H(b * kA || kA || kB) 66 | $$ 67 | 68 | where $kA$ is the sender's de-prefixed blinded Session ID, $b$ is the user's master Ed25519 key, 69 | and $kB$ is the user's blinded Ed25519 for this SOGS server. 70 | 71 | 4. Decrypt the remaining ciphertext using the nonce. 72 | 73 | 5. Unpack the plaintext value into MSG and the sender's $A$ value (i.e. the last 32 bytes). 74 | 75 | 6. Derive the sender's actual Session ID by converting $A$ from an ed25519 pubkey to an 76 | curve25519 pubkey and prepending 0x05. (E.g. using libsodium's 77 | `crypto_sign_ed25519_pk_to_curve25519` on $A$, then adding the `05` prefix to the front). 78 | 79 | This then leaves the receiving client with the true Session ID of the sender, and the message 80 | body (encoded according to typical Session message protobuf encoding). 81 | -------------------------------------------------------------------------------- /docs/snippets/dm.poll_inbox.md: -------------------------------------------------------------------------------- 1 | # URL Parameters 2 | 3 | - `msgid` — the numeric message ID of the last message. (Newer messages will always 4 | have a higher ID). 5 | 6 | # Query Parameters 7 | 8 | The request takes an optional `limit` query parameter indicating the number of messages to 9 | return (up to 256). If omitted, at most 100 messages are returned. 10 | 11 | # Return value 12 | 13 | This method, on success, returns *either* a 200 (OK) status code with a list of 1 or more new 14 | messages, or else returns a 304 (Not Modified) response with an empty body to indicate that 15 | there are no messages for this user newer than the given ID. 16 | 17 | If there are messages this returns a JSON array of up to `limit` (default 100) messages, with 18 | oldest messages first. Each element is exactly as described in the [all messages](#GET-inbox) 19 | endpoint. 20 | -------------------------------------------------------------------------------- /docs/snippets/dm.poll_outbox.md: -------------------------------------------------------------------------------- 1 | # URL Parameters 2 | 3 | - `msgid` — the numeric message ID of the last message. (Newer messages will always 4 | have a higher ID). 5 | 6 | # Query Parameters 7 | 8 | The request takes an optional `limit` query parameter indicating the number of messages to 9 | return (up to 256). If omitted, at most 100 messages are returned. 10 | 11 | # Return value 12 | 13 | This method, on success, returns *either* a 200 (OK) status code with a list of 1 or more new 14 | messages, or else returns a 304 (Not Modified) response with an empty body to indicate that 15 | there are no messages for this user newer than the given ID. 16 | 17 | If there are messages this returns a JSON array of up to `limit` (default 100) messages, with 18 | oldest messages first. Each element is exactly as described in the [all messages](#GET-inbox) 19 | endpoint. 20 | -------------------------------------------------------------------------------- /docs/snippets/general.get_caps.md: -------------------------------------------------------------------------------- 1 | # Example retrieving capabilities: 2 | 3 | ```http 4 | GET /capabilities HTTP/1.1 5 | Host: example.com 6 | ``` 7 | 8 | ```json 9 | { 10 | "capabilities": ["sogs", "batch"] 11 | } 12 | ``` 13 | 14 | # Example with capability check 15 | 16 | ```http 17 | GET /capabilities?required=magic,batch HTTP/1.1 18 | ``` 19 | 20 | ```json 21 | { 22 | "capabilities": ["sogs", "batch"], 23 | "missing": ["magic"] 24 | } 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/snippets/general.md: -------------------------------------------------------------------------------- 1 | # General 2 | 3 | These endpoints are for meta-operations such as request batches and querying the SOGS supported 4 | features. 5 | -------------------------------------------------------------------------------- /docs/snippets/legacy.md: -------------------------------------------------------------------------------- 1 | # Legacy SOGS Requests 2 | 3 | These endpoints are for backwards compatibility with older Session clients that do not understand 4 | the newer SOGS capabilities and endpoints. They are deprecated and their use is discouraged. 5 | -------------------------------------------------------------------------------- /docs/snippets/messages.md: -------------------------------------------------------------------------------- 1 | # Messages 2 | 3 | These endpoints are the core of SOGS messaging, containing the endpoints for room message 4 | submission, retrieval, deletion, etc. 5 | -------------------------------------------------------------------------------- /docs/snippets/onion_request.md: -------------------------------------------------------------------------------- 1 | # Onion Requests 2 | 3 | These are the endpoints invoked when making a onion request routed through the Oxen Storage Server 4 | network. 5 | -------------------------------------------------------------------------------- /docs/snippets/rooms.get_one_room.md: -------------------------------------------------------------------------------- 1 | # Example: 2 | 3 | ```http 4 | GET /room/sudoku HTTP/1.1 5 | Host: example.com 6 | ``` 7 | 8 | ```json 9 | { 10 | "active_users": 8471519, 11 | "active_users_cutoff": 604800, 12 | "admins": [ 13 | "050123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" 14 | ], 15 | "created": 1645556525.154345, 16 | "description": "All the best sodoku discussion!", 17 | "info_updates": 341, 18 | "message_sequence": 45091759, 19 | "moderators": [ 20 | "05fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210", 21 | "05ffffeeeeddddccccbbbbaaaa9999888877776666555544443333222211110000" 22 | ], 23 | "name": "Sudoku Solvers Club", 24 | "read": true, 25 | "token": "sudoku", 26 | "upload": true, 27 | "write": true 28 | } 29 | ``` 30 | -------------------------------------------------------------------------------- /docs/snippets/rooms.md: -------------------------------------------------------------------------------- 1 | # Rooms 2 | 3 | These endpoints are related to retrieving and setting properties of SOGS discussion rooms. 4 | -------------------------------------------------------------------------------- /docs/snippets/rooms.update_room.md: -------------------------------------------------------------------------------- 1 | # Example 2 | 3 | FIXME - need more docs here! 4 | 5 | -------------------------------------------------------------------------------- /docs/snippets/sidebar.md: -------------------------------------------------------------------------------- 1 | - [General](general.md) 2 | - [Rooms](rooms.md) 3 | - [Messages](messages.md) 4 | - [User management](users.md) 5 | - [Direct messages](dm.md) 6 | - [Onion requests](onion_request.md) 7 | - [Web viewer](views.md) 8 | - [Deprecated legacy SOGS](legacy.md) 9 | -------------------------------------------------------------------------------- /docs/snippets/uncategorized.md: -------------------------------------------------------------------------------- 1 | # Uncategorized endpoints 2 | -------------------------------------------------------------------------------- /docs/snippets/users.md: -------------------------------------------------------------------------------- 1 | # User Management 2 | 3 | These endpoint relate to user-specific actions such as server bans and moderator permissions. 4 | -------------------------------------------------------------------------------- /docs/snippets/views.md: -------------------------------------------------------------------------------- 1 | # Publicly viewable URLs 2 | 3 | These endpoint are not meant for Session client but rather for web browsers when attempting to open 4 | a SOGS URL. 5 | -------------------------------------------------------------------------------- /install-debs.md: -------------------------------------------------------------------------------- 1 | # Debian/Ubuntu Package Installation 2 | 3 | We package and update .deb packages for PySOGS for Ubuntu 20.04 and newer, and for Debian 10 and 4 | newer. If starting out with a new server we recommend either the latest Debian stable release 5 | (currently Debian 11) or Ubuntu LTS release (currently 20.04). 6 | 7 | Our apt repository includes various dependencies and libraries, but most important are these two 8 | packages (only one of which may be installed at a time): 9 | 10 | ### sogs-standalone 11 | 12 | This is the package most simple SOGS setup will want to install. It installs a SOGS that listens on 13 | a public IP/port for HTTP connections. It does not support HTTPS connections (but since all 14 | messages to/from SOGS are separately encrypted, HTTPS is not particularly recommended anyway). 15 | 16 | ### sogs-proxied 17 | 18 | This package provides a more advanced SOGS configuration where SOGS itself will listen on an 19 | internal port and expects to have requests proxied to it from an ngnix or apache2 front-end server 20 | that listens on the public IP/port. The package will install basic site configuration files for 21 | either nginx or apache2, but extra configuration may be necessary. 22 | 23 | This package is required if you want your SOGS to be reached over HTTPS: the HTTPS handling is 24 | configured on the front-end server (i.e. in nginx or apache) using a tool such as `certbot`. (This 25 | package does not auto-configure such HTTPS certificates, but there are many online help pages on 26 | setting up such HTTPS support for a front-end web server). 27 | 28 | If you don't know what any of this means then stick with the `sogs-standalone` package. 29 | 30 | ## Installation 31 | 32 | To install the debian packages you need to set up the Oxen apt repository using the following 33 | commands (this only needs to be done once on the server, and you may have already done it if you are 34 | already using other Oxen deb packages on the server): 35 | 36 | ```bash 37 | # Install the Oxen apt repository public signing key: 38 | sudo curl -so /etc/apt/trusted.gpg.d/oxen.gpg https://deb.oxen.io/pub.gpg 39 | # Add the Oxen apt repository to your package configuration: 40 | echo "deb https://deb.oxen.io $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/oxen.list 41 | # Update package lists: 42 | sudo apt update 43 | ``` 44 | 45 | and then install **ONE** of the sogs packages: 46 | ```bash 47 | sudo apt install sogs-standalone 48 | ``` 49 | or 50 | ```bash 51 | sudo apt install sogs-proxied 52 | ``` 53 | 54 | This will install and activate the sogs service. 55 | 56 | During installation you will be prompted to enter the public SOGS URL. While you may use your bare 57 | IP address here, we recommend instead using a DNS hostname so that your SOGS site can be moved to a 58 | different server or ISP in the future: hostnames are easily updated to point to a new location, IP 59 | addresses are not. 60 | 61 | ## Configuring sogs 62 | 63 | SOGS has a few options for configuration; the default packages install a configuration file in 64 | /etc/sogs/sogs.ini that may be edited to tweak how your SOGS operates. Comments are provided in the 65 | file to describe each option. 66 | 67 | After changing settings in `sogs.ini` you need to restart sogs for the changes to take effect using: 68 | 69 | ```bash 70 | sudo systemctl restart sogs.service 71 | ``` 72 | (which works for both the standalone or the proxied version). 73 | 74 | The default sogs installation has no rooms or admins; in order to start using it you must use the 75 | command-line tools to set up initial rooms/users. See [SOGS Administration](administration.md) for 76 | details. 77 | 78 | ## Upgrading 79 | 80 | As we develop SOGS we routinely push package updates to the deb repository. To upgrade to the 81 | latest version simply run: 82 | 83 | ```bash 84 | sudo apt update 85 | sudo apt upgrade 86 | ``` 87 | 88 | Note that this installs all available system package updates (not just SOGS-related packages), which 89 | is generally a good thing as there may be security updates for the OS that should be installed as 90 | well. 91 | 92 | ## Upgrading from SOGS 0.1.x 93 | 94 | The deb packages automatically upgrading from the previous versions of sogs (which used the package 95 | name `session-open-group-server`). The procedure is exactly the same as above; during installation 96 | the existing private key will be converted to the format PySOGS expects, and the first time SOGS 97 | (i.e. the new PySOGS code) starts up it will notice that it has an empty database, will detect the 98 | old SOGS databases, and will import all data from them. (The old database files are preserved in 99 | case anything goes wrong). 100 | 101 | ## Backing up 102 | 103 | It is recommended that you make automatic, regular backups of your PySOGS data files. In particular 104 | you want to regularly back up everything in /var/lib/session-open-group-server and the main sogs 105 | configuration file, /etc/sogs/sogs.ini. 106 | -------------------------------------------------------------------------------- /install-uwsgi.md: -------------------------------------------------------------------------------- 1 | # Manual Installation Instructions 2 | 3 | ## Step 0: Do Not Run PySOGS as root 4 | 5 | Do not run pysogs as root. Some inexperienced system administrators think it is easier to just run 6 | everything as root, without realizing that it is a significant security issue. Just don't do it. 7 | 8 | Instead, use an existing regular user or, even better, create a new regular user just for SOGS. 9 | 10 | ## Step 1: Clone the PySOGS repo 11 | 12 | ```bash 13 | git clone https://github.com/oxen-io/session-pysogs -b stable pysogs 14 | cd pysogs 15 | ``` 16 | 17 | This clones the `stable` branch rather than the default `dev` branch. If you are comfortable with 18 | filing bug reports if problems come up and want to run the development version change `stable` to 19 | `dev`, but keep in mind that things on the dev branch may sometimes be untested and broken. 20 | 21 | ## Step 2: Install dependencies 22 | 23 | PySOGS has a handful of required Python modules and programs. There are multiple ways to install 24 | these, but the easiest on a recent Ubuntu/Debian system is to install them for the system version of 25 | Python using: 26 | 27 | ```bash 28 | sudo curl -so /etc/apt/trusted.gpg.d/oxen.gpg https://deb.oxen.io/pub.gpg 29 | echo "deb https://deb.oxen.io $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/oxen.list 30 | sudo apt update 31 | sudo apt install python3-{oxenmq,oxenc,pyonionreq,coloredlogs,uwsgidecorators,flask,cryptography,nacl,pil,protobuf,openssl,qrcode,better-profanity,sqlalchemy,sqlalchemy-utils} uwsgi-plugin-python3 32 | ``` 33 | 34 | If you want to use a postgresql database backend then you will also need the python3-psycopg2 35 | package. If unsure then stick with the default (sqlite3) database. 36 | 37 | 38 | ## Step 3: Adjust configuration 39 | 40 | ### sogs.ini 41 | 42 | Copy the `sogs.ini.sample` to `sogs.ini`: 43 | 44 | ```bash 45 | cp sogs.ini.sample sogs.ini 46 | ``` 47 | 48 | and edit it to change settings as desired. At a minimum you must uncomment and set the `base_url` 49 | setting to your SOGS URL; this can be a domain name or a public ip address. Using a domain name is 50 | recommended over a bare IP as it can later be moved to a new host or new ISP, while while a bare IP 51 | cannot. 52 | 53 | For example: 54 | ```ini 55 | base_url = http://sogs.example.com 56 | ``` 57 | 58 | ### uwsgi.ini 59 | 60 | SOGS requires uwsgi to manage processes; sample configurations are available in the contrib/ 61 | directory. For a simple setup listening directly on a public IP/port you can use the standalone 62 | sample configuration: 63 | 64 | ```bash 65 | cp contrib/uwsgi-sogs-standalone.ini uwsgi-sogs.ini 66 | ``` 67 | 68 | Edit `uwsgi-sogs.ini`, change relevant config settings including chdir, uid, gid. Other settings 69 | such as http port can also be altered if required. 70 | 71 | ```ini 72 | chdir = LOCATION_OF_CLONED_DIRECTORY 73 | uid = USER_RUNNING_SOGS 74 | gid = USER_RUNNING_SOGS 75 | http = :UNUSED_PORT 76 | ``` 77 | 78 | Do *not* change the `mount`, `enable-threads`, or `mule` configuration lines. 79 | 80 | ## Step 4: Run SOGS 81 | 82 | Once configured you can temporarily run PySOGS by running the following command while inside the git 83 | repository base directory: 84 | 85 | ```bash 86 | uwsgi uwsgi-sogs.ini 87 | ``` 88 | 89 | For a more permanent installation, however, you'll want to set up and enable a system service; you 90 | can use [the service file from the debian 91 | packaging](https://github.com/oxen-io/session-pysogs/blob/debian/sid/debian/sogs-standalone.service) 92 | as a starting point. 93 | 94 | ## Step 5: Adding rooms, admins 95 | 96 | In order to do anything useful you will want to add a room and admins to your SOGS installation 97 | (unless upgrading: see below). 98 | 99 | To interact with the SOGS database you want to run `python3 -msogs --help` from the session-pysogs 100 | directory which will give you a description of the available commands to control your SOGS 101 | installation. 102 | 103 | See [SOGS Administration](administration.md) for details, but note that where that document 104 | indicates using the `sogs` command you should instead use `python3 -msogs` from the `session-pysogs` 105 | directory. 106 | 107 | ### Step 6: Check web viewer functionality 108 | 109 | Navigating to your SOGS URL should display a web viewer of your open group, including any configured 110 | rooms. Navigating to the listed rooms will give you the full SOGS URL (and QR code) that is used to 111 | have a Session client connect to the open group. 112 | 113 | ## Extras 114 | 115 | ### Upgrading 116 | 117 | To upgrade simple stop your sogs service, `git pull` to update to the latest git repository code, 118 | and start sogs again. It's recommended that you also install regular OS updates. 119 | 120 | ### Upgrading from SOGS 0.1.x 121 | 122 | To upgrade from a 0.1.x version of (Rust) SOGS you will need to do two things: 123 | 124 | - Copy `database.db`, `x25519_private_key.pem`, `files`, and `rooms` from the old sogs data 125 | directory into the session-pysogs project directory. 126 | - Manually convert your keys from the old openssl format, using: 127 | ```bash 128 | python3 -msogs.key_convert -i x25519_private_key.pem -o key_x25519 129 | ``` 130 | 131 | The first time you start sogs after doing this it will see that it has no rooms but that 132 | `database.db` exists and will perform a full import. Note that you should leave the `files` 133 | directory in place after this import: existing, imported uploads are left in their existing 134 | locations until they expire. The other old data files are not used after a successful import. 135 | 136 | ### Backing up 137 | 138 | It is recommended that you make automatic, regular backups of your PySOGS data files. In particular 139 | you want to regularly back up `sogs.db` (which contains all the rooms and posts) and the `uploads` 140 | directory (which contains uploaded files and room images). You also want to make a one-time backup 141 | of `key_x25519` (your SOGS private key needed to process SOGS requests). 142 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 100 3 | skip-string-normalization = true 4 | skip-magic-trailing-comma = true 5 | target-version = ['py38'] 6 | include = '\.py$' 7 | exclude = 'sogs/.*_pb2.py' 8 | 9 | [build-system] 10 | requires = ["setuptools", "wheel"] 11 | build-backend = "setuptools.build_meta" 12 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | 3 | name = sogs 4 | version = attr: sogs.__version__ 5 | 6 | [options] 7 | 8 | zip_safe=False 9 | include_package_data=True 10 | packages=find: 11 | install_requires= 12 | flask 13 | coloredlogs 14 | cryptography 15 | PyNaCl 16 | Pillow 17 | protobuf 18 | pyOpenSSL 19 | qrcode 20 | better_profanity 21 | oxenmq 22 | oxenc 23 | pyonionreq 24 | sqlalchemy 25 | setup_requires= 26 | tomli 27 | -------------------------------------------------------------------------------- /sogs.ini.filter-sample: -------------------------------------------------------------------------------- 1 | ; This file describes the settings you can use for advanced filtering controls. 2 | ; 3 | ; Note that when configuring this, it does *not* go in a separate file but rather in your active 4 | ; sogs.ini configuration file. (Since everything goes here in a separate section, it doesn't matter 5 | ; where in sogs.ini you add it). 6 | 7 | 8 | ; 9 | ; Room-specific filtering 10 | ; 11 | ; To set filtration rules for a specific room you add a [room:TOKEN] section and then set the 12 | ; rules that should apply to this specific room. For example, to enable the profanity filter and 13 | ; disallow (only) cyrillic characters in the room with token 'sudoku' you would add: 14 | ; 15 | ;[room:sudoku] 16 | ;profanity_filter=yes 17 | ;profanity_silent=yes 18 | ;alphabet_filters=cyrillic 19 | ; 20 | ; This overrides the default from the main [messages] config section for any given keys, so it can 21 | ; be used to replace or change the rules that apply to a given room. Currently only the 22 | ; profanity_filter, profanity_silent, alphabet_filters can be overridden in this way. 23 | 24 | ; 25 | ; Filtration responses 26 | ; 27 | ; When a message is filtered because of the profanity or alphabet filtrations SOGS can optionally 28 | ; send a reply in the room; this reply can either be visible to everyone, or just to the specific 29 | ; user. To enable such a reply, add a filter section here: the section name consists of 30 | ; 'filter:TYPE:ROOM' where TYPE and ROOM are the filtration type and room token, or '*' to match all 31 | ; types/rooms. 32 | ; 33 | ; Section names for all filtered messages: 34 | ;[filter:*:*] 35 | ; 36 | ; Section names for a particular filtration type: 37 | ;[filter:*:profanity] 38 | ;[filter:*:alphabet] 39 | ; 40 | ; The "type" can also be a specific language: 41 | ;[filter:*:arabic] 42 | ;[filter:*:cyrillic] 43 | ; etc. 44 | ; 45 | ; Room-specific filtration section names: 46 | ; 47 | ;[filter:fishing:*] 48 | ;[filter:sudoku:profanity] 49 | ; 50 | ; If using both '*' and specific values, the value from the more specific section will be used where 51 | ; present. 52 | ; 53 | ; Within this section there are currently three settings: 54 | ; 55 | ; - reply -- the body of a reply to send (see details below). If omitted or empty then no reply 56 | ; will be sent. 57 | ; - profile_name -- the profile name to use in that reply. 58 | ; - public -- whether the reply should be seen by everyone or just the poster. The default is 'no' 59 | ; (i.e. only the user will see the reply). 60 | ; 61 | ; The `reply` value should be specified on a single line of the config, and supports the following 62 | ; substitutions: 63 | ; 64 | ; \@ - the profile name, in @tag form, of the poster whose message was declined. 65 | ; \p - the profile name in plain text. 66 | ; \r - the name of the room 67 | ; \t - the token of the room 68 | ; \n - a line break 69 | ; \\ - a literal \ character 70 | ; 71 | ; You can also randomize among multiple responses by specifying multiple lines in the config: each 72 | ; additional line must be indented in the .ini file to be properly recognized. 73 | ; 74 | ; For example if you use this config: 75 | ; 76 | 77 | [messages] 78 | profanity_filter=yes 79 | profanity_silent=yes 80 | alphabet_filters=arabic cyrillic 81 | alphabet_silent=yes 82 | 83 | [room:sailors] 84 | profanity_filter=no 85 | 86 | [filter:*:*] 87 | profile_name=LanguagePolice 88 | reply=Hi \@, I'm afraid your message couldn't be sent: \r is English-only! 89 | 90 | [filter:*:profanity] 91 | profile_name=Swear Jar 92 | reply=Whoa there, \@! That language is too strong for the \r group! Try the Sailors group instead. 93 | 94 | [filter:sudoku:profanity] 95 | profile_name=Bot45 96 | public=yes 97 | reply=\@ got a little too enthusiastic today with their solve. Maybe someone can assist? 98 | Uh oh, I think \@ has two 3s in the same row! 99 | I think \@'s sudoku broke 😦 100 | 101 | ; then arabic/cyrillic/person would be blocked everywhere, profanity would be blocked everywhere 102 | ; except the 'sailors' room, and when a message is blocked you would get a message such as one of 103 | ; the following depending on the room and the rule applied: 104 | ; 105 | ; 106 | ; (LanguagePolice) 107 | ; Hi @Foreignsailor1988, I'm afraid your message couldn't be set: Salty Sailors is English-only! 108 | ; 109 | ; 110 | ; (Swear Jar) 111 | ; Whoa there @87yearoldgrandma! That language is too strong for the Cuddly Kittens group! Try the Sailors group instead. 112 | ; 113 | ; 114 | ; (Bot45); [one of the following would be sent randomly, visible to everyone in the group] 115 | ; @87yearoldgrandma got a little too enthusiastic today with their solve. Maybe someone can assist? 116 | ; 117 | ; Uh oh, I think @87yearoldgrandma has two 3s in the same row! 118 | ; 119 | ; I think @87yearoldgrandma's sudoku broke 😦 120 | -------------------------------------------------------------------------------- /sogs.ini.sample: -------------------------------------------------------------------------------- 1 | ; SOGS configuration. Copy this file to sogs.ini and edit as desired. It can be left mostly (or 2 | ; entirely) empty: defaults are shown in the comments. 3 | 4 | 5 | [db] 6 | 7 | ; The database connection string url for the sogs database where everything is stored. For sqlite 8 | ; this is `sqlite:///` followed by the path. E.g. ; `sqlite:///sogs.db` for sogs.db in the current 9 | ; working directory, or `sqlite:////path/to/sogs.db` for an absolute URL. 10 | ; PostgreSQL is also supported; see the SQLAlchemy docs for the required connect string format. 11 | ;url = sqlite:///sogs.db 12 | 13 | 14 | [crypto] 15 | 16 | ; Path to the x25519 private key file; this is a 32-byte file containing the raw private key data. 17 | ; 18 | ;key_file = key_x25519 19 | 20 | 21 | [net] 22 | 23 | ; Base url for generating self-referring links, for example for the open group URL and QR code shown 24 | ; when accessing via a web browser. Can be http, https, and may or may not include a port. This 25 | ; should be the address of a front-end proxy server (e.g. nginx or apache2) that forwards uwsgi 26 | ; requests to pysogs. Using HTTPS is possible but not particularly recommended: onion requests are 27 | ; already encrypted and so the extra layer of HTTPS encryption does not make requests more secure. 28 | ; 29 | ;base_url = http://example.net 30 | 31 | 32 | ; Listening address for oxenmq requests. The socket uses curve encryption using the same x25519 key 33 | ; as the server itself. Can be specified as a multiline value to listen on multiple 34 | ; addresses/ports. 35 | ; 36 | ;omq_listen = tcp://*:22028 37 | 38 | 39 | ; Internal listening address (usually a unix socket path starting with ipc://) for inter-process 40 | ; communication between sogs processes. This is unauthenticated and so must not be a publicly 41 | ; accessible address! 42 | ; 43 | ;omq_internal = ipc://./omq.sock 44 | 45 | 46 | ; Whether we should show an index of public rooms on the root URL when visited by a 47 | ; browser. 48 | ; 49 | ;http_show_index = yes 50 | 51 | 52 | ; Whether we should show recent messages for public rooms on the sogs room page when visited by a 53 | ; browser. 54 | ; 55 | ;http_show_recent = yes 56 | 57 | 58 | [files] 59 | 60 | ; How long newly uploaded files should be stored before being cleaned up, in days. Note that 61 | ; changing this only affects new files. This limit does not apply to room images and attachments in 62 | ; pinned messages, both of which do not expire. Can be set to 0 to never expire new uploads. 63 | ; 64 | ;expiry = 15 65 | 66 | 67 | ; The maximum size of files we accept, in bytes. Note that onion requests impose a maximum size of 68 | ; messages, and so this should not be larger than 10MB, but can be reduced to not accept larger files 69 | ; in open groups. Value is in bytes. 70 | ; 71 | ;max_size = 10000000 72 | 73 | 74 | [rooms] 75 | 76 | ; How many days we consider a user to be "active" in a room without having at least retrieved 77 | ; messages from the room, in days. (This must be ≤ active_prune_threshold, below). 78 | ; 79 | ;active_threshold = 7 80 | 81 | 82 | ; How long we store user-room activity information, so that we can determine "active within the past 83 | ; x days" values other than the default. 84 | ; 85 | ;active_prune_threshold = 60 86 | 87 | 88 | [messages] 89 | 90 | ; How long we keep message edit/deletion history, in days. 91 | ; 92 | ;history_prune_threshold = 30 93 | 94 | 95 | ; Whether we should pass words through a profanity filter, rejecting messages that contain profane 96 | ; words (and common permutations of those words). 97 | ; 98 | ;profanity_filter = no 99 | 100 | 101 | ; Whether the profanity filter should be "silent" or not. If enabled (the default), then messages 102 | ; containing banned words are accepted by the server but quarantined and never distributed to other 103 | ; users. If disabled then users will receive an error when attempting to post a message containing 104 | ; profanity. 105 | ; 106 | ;profanity_silent = yes 107 | 108 | 109 | ; Path to a file containing a custom profanity list for the profanity filter. If not specified then 110 | ; the default list of the Python `better_profanity` library is used, 111 | ; https://raw.githubusercontent.com/snguyenthanh/better_profanity/master/better_profanity/profanity_wordlist.txt 112 | ; To add/remove words download that file, modify as needed, and set this value to the path to that 113 | ; file. 114 | ; 115 | ;profanity_custom = 116 | 117 | ; Whether we should reject messages that use a particular alphabet. This is a space or 118 | ; comma-separated list of alphabet names; posts with characters in the given language ranges will be 119 | ; blocked (unless posted by a mod/admin). Currently supported are: arabic, cyrillic, and persian 120 | ; (note that persian is included within arabic). 121 | ; 122 | ; *This* setting is the default setting for all rooms, but you can also make room-specific filtering 123 | ; (see sogs.ini.filter-sample for details). 124 | ; 125 | ;alphabet_filters = 126 | 127 | 128 | ; Whether the alphabet filter should silently drop (true) or return an error (false). 129 | ; 130 | ;alphabet_silent = yes 131 | 132 | 133 | ; Whether the above filters should be applied to moderators and admins (=yes) or not (=no). The 134 | ; default is no, that is, mods and admins messages are not filtered by default. 135 | ; 136 | ;filter_mods = no 137 | 138 | 139 | ; The profanity and alphabet filters can be controlled on a per-room setting (which overrides the 140 | ; global default set above) and can have automated responses sent by the SOGS server. For details 141 | ; and examples see the sogs.ini.filter-sample file. 142 | 143 | 144 | [web] 145 | 146 | ; If set this should be an absolute path where we look for templates for the web view pages. When 147 | ; unset (or empty) the default package resources (in sogs/templates) are used. See also 148 | ; [net].http_show_recent. If you want to customize templates then make a copy of the sogs/templates 149 | ; directory and set this to the path of that copy. 150 | ; 151 | ;template_path = 152 | 153 | 154 | ; If set this should be an absolute path where we look for /static/* files for the web viewer. When 155 | ; unset/empty we use the default sogs/static resources. If you want to modify them locally without 156 | ; worrying about them conflicting with updates then make a copy of the sogs/static directory and set 157 | ; this to the full path of the copy. 158 | ; 159 | ;static_path = 160 | 161 | 162 | [log] 163 | 164 | ; The log level controlling which messages should be displayed. One of: CRITICAL, ERROR, WARNING, 165 | ; INFO, or DEBUG. 166 | ; 167 | ;level = WARNING 168 | -------------------------------------------------------------------------------- /sogs/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.3.8.dev0" 2 | -------------------------------------------------------------------------------- /sogs/cleanup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from .web import app 5 | from .db import query 6 | from . import config, db 7 | 8 | # Cleanup interval, in seconds. 9 | INTERVAL = 10 10 | 11 | 12 | def cleanup(): 13 | with app.app_context(): 14 | try: 15 | app.logger.debug("Pruning expired items") 16 | files = prune_files() 17 | msg_hist = prune_message_history() 18 | dms = prune_expired_dms() 19 | room_act = prune_room_activity() 20 | perm_upd = apply_permission_updates() 21 | exp_nonces = expire_nonce_history() 22 | app.logger.debug( 23 | f"Pruned {files} files, {msg_hist} msg hist, {room_act} room activity, " 24 | f"{exp_nonces} nonces, {dms} inbox msgs; applied {perm_upd} perm updates." 25 | ) 26 | return (files, msg_hist, room_act, perm_upd, exp_nonces) 27 | except Exception as e: 28 | app.logger.warning(f"Periodic database cleanup failed: {e}") 29 | return None 30 | 31 | 32 | def prune_files(): 33 | now = time.time() 34 | if db.have_returning: 35 | to_remove = [ 36 | row[0] for row in query("DELETE FROM files WHERE expiry < :exp RETURNING path", exp=now) 37 | ] 38 | else: 39 | with db.transaction(): 40 | to_remove = [ 41 | row[0] for row in query("SELECT path FROM files WHERE expiry < :exp", exp=now) 42 | ] 43 | 44 | if not to_remove: 45 | return 0 46 | 47 | query("DELETE FROM files WHERE expiry < :exp", exp=now) 48 | 49 | # Committed the transaction, so the files are gone: now go ahead and remove them from disk. 50 | unlink_count = 0 51 | for path in to_remove: 52 | try: 53 | os.unlink(path) 54 | unlink_count += 1 55 | except FileNotFoundError: 56 | pass 57 | except Exception as e: 58 | app.logger.error("Unable to remove expired upload '{}' from disk: {}".format(path, e)) 59 | 60 | app.logger.info( 61 | "Pruned {} expired/deleted files{}".format( 62 | len(to_remove), 63 | " ({} unlinked)".format(unlink_count) if unlink_count != len(to_remove) else "", 64 | ) 65 | ) 66 | return len(to_remove) 67 | 68 | 69 | def prune_message_history(): 70 | count = query( 71 | "DELETE FROM message_history WHERE replaced < :t", 72 | t=time.time() - config.MESSAGE_HISTORY_PRUNE_THRESHOLD, 73 | ).rowcount 74 | 75 | if count > 0: 76 | app.logger.info("Pruned {} message edit/deletion records".format(count)) 77 | return count 78 | 79 | 80 | def prune_expired_dms(): 81 | count = query("DELETE FROM inbox WHERE expiry < :now", now=time.time()).rowcount 82 | 83 | if count > 0: 84 | app.logger.info(f"Removed {count} expired inbox/message requests") 85 | return count 86 | 87 | 88 | def prune_room_activity(): 89 | count = query( 90 | "DELETE FROM room_users WHERE last_active < :t", 91 | t=time.time() - config.ROOM_ACTIVE_PRUNE_THRESHOLD, 92 | ).rowcount 93 | 94 | if count > 0: 95 | app.logger.info("Prune {} old room activity records".format(count)) 96 | 97 | query( 98 | """ 99 | UPDATE rooms SET active_users = ( 100 | SELECT COUNT(*) FROM room_users WHERE room = rooms.id AND last_active >= :since) 101 | """, 102 | since=time.time() - config.ROOM_DEFAULT_ACTIVE_THRESHOLD, 103 | ) 104 | 105 | return count 106 | 107 | 108 | def expire_nonce_history(): 109 | return query("DELETE FROM user_request_nonces WHERE expiry < :exp", exp=time.time()).rowcount 110 | 111 | 112 | def apply_permission_updates(): 113 | with db.transaction(): 114 | now = time.time() 115 | # This update gets a bit complicated; basically what we want to do is: 116 | # - if a future permission row is to be applied, then any `null` permission should not 117 | # change the current permission (i.e. preserve the current override value if an override 118 | # row exists, otherwise insert a null). 119 | # - for non-null permissions if the permission being applied equals the room's default then 120 | # we want to update the override value to null regardless of what it is now, because that 121 | # is almost always what was intended. 122 | # The: `CASE WHEN f.perm IS NULL THEN o.perm ELSE NULLIF(f.perm, r.perm) END`s below make 123 | # this happen: if our future is null we use the current override value (null if we have no 124 | # override), otherwise if the future and room defaults and equal we specifically set null; 125 | # otherwise we set the future value. 126 | num_perms = query( 127 | """ 128 | INSERT INTO user_permission_overrides (room, "user", read, write, upload) 129 | SELECT f.room, f."user", 130 | CASE WHEN f.read IS NULL THEN o.read ELSE NULLIF(f.read, r.read) END, 131 | CASE WHEN f.write IS NULL THEN o.write ELSE NULLIF(f.write, r.write) END, 132 | CASE WHEN f.upload IS NULL THEN o.upload ELSE NULLIF(f.upload, r.upload) END 133 | FROM user_permission_futures f 134 | JOIN rooms r ON f.room = r.id 135 | LEFT JOIN user_permission_overrides o ON f.room = o.room AND f."user" = o."user" 136 | WHERE at <= :now 137 | ORDER BY at 138 | ON CONFLICT (room, "user") DO UPDATE SET 139 | read = excluded.read, write = excluded.write, upload = excluded.upload 140 | """, 141 | now=now, 142 | ).rowcount 143 | 144 | if num_perms > 0: 145 | query("DELETE FROM user_permission_futures WHERE at <= :now", now=now) 146 | 147 | num_room_bans, num_user_bans = 0, 0 148 | for uid, rid, banned in query( 149 | 'SELECT "user", room, banned FROM user_ban_futures WHERE at <= :now ORDER BY at', 150 | now=now, 151 | ): 152 | if rid is None: 153 | query("UPDATE users SET banned = :b WHERE id = :u", u=uid, b=banned) 154 | num_user_bans += 1 155 | else: 156 | query( 157 | """ 158 | INSERT INTO user_permission_overrides (room, "user", banned) 159 | VALUES (:r, :u, :b) 160 | ON CONFLICT (room, "user") DO UPDATE SET banned = excluded.banned 161 | """, 162 | r=rid, 163 | u=uid, 164 | b=banned, 165 | ) 166 | num_room_bans += 1 167 | 168 | if num_room_bans > 0 or num_user_bans > 0: 169 | query("DELETE FROM user_ban_futures WHERE at <= :now", now=now) 170 | 171 | num_applied = num_perms + num_room_bans + num_user_bans 172 | 173 | if num_applied > 0: 174 | app.logger.info( 175 | f"Applied {num_applied} permission updates ({num_perms} perms; " 176 | f"{num_room_bans} room (un)bans; {num_user_bans} global (un)bans)" 177 | ) 178 | return num_applied 179 | -------------------------------------------------------------------------------- /sogs/crypto.py: -------------------------------------------------------------------------------- 1 | from . import config 2 | 3 | import os 4 | 5 | import nacl 6 | from nacl.public import PrivateKey 7 | from nacl.signing import SigningKey, VerifyKey 8 | from nacl.encoding import Base64Encoder, HexEncoder 9 | import nacl.bindings as sodium 10 | 11 | 12 | from cryptography.hazmat.primitives.ciphers.aead import AESGCM 13 | from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey, X25519PublicKey 14 | 15 | from .hashing import blake2b 16 | 17 | import secrets 18 | import hmac 19 | import functools 20 | 21 | import pyonionreq 22 | 23 | if [int(v) for v in nacl.__version__.split('.')] < [1, 4]: 24 | raise ImportError("SOGS requires nacl v1.4.0+") 25 | 26 | 27 | def persist_privkey(): 28 | """ 29 | Writes the current private key to disk if it is ephemeral. This is done automatically when a 30 | private key is generated in uwsgi application mode; for other interfaces it needs to be called 31 | manually if the key should be persisted. 32 | 33 | If the key was loaded from disk originally then this does nothing. 34 | """ 35 | global ephemeral_privkey 36 | if ephemeral_privkey: 37 | with open(os.open(config.KEY_FILE, os.O_CREAT | os.O_WRONLY, 0o400), 'wb') as f: 38 | f.write(_privkey.encode()) 39 | ephemeral_privkey = False 40 | 41 | 42 | ephemeral_privkey = True 43 | 44 | # generate seed as needed 45 | if os.path.exists(config.KEY_FILE): 46 | with open(config.KEY_FILE, 'rb') as f: 47 | _privkey = PrivateKey(f.read()) 48 | ephemeral_privkey = False 49 | else: 50 | _privkey = PrivateKey.generate() 51 | 52 | # Only save the key if we're running under uswgi to avoid leaving key_ed25519 files all over the 53 | # place wherever sogs is imported. 54 | if config.RUNNING_AS_APP: 55 | persist_privkey() 56 | 57 | _privkey_bytes = _privkey.encode() 58 | 59 | server_pubkey = _privkey.public_key 60 | 61 | server_pubkey_bytes = server_pubkey.encode() 62 | server_pubkey_hash_bytes = blake2b(server_pubkey_bytes) 63 | 64 | server_pubkey_hex = server_pubkey.encode(HexEncoder).decode('ascii') 65 | server_pubkey_base64 = server_pubkey.encode(Base64Encoder).decode('ascii') 66 | 67 | _junk_parser = pyonionreq.junk.Parser(privkey=_privkey_bytes, pubkey=server_pubkey_bytes) 68 | parse_junk = _junk_parser.parse_junk 69 | 70 | 71 | def verify_sig_from_pk(data, sig, pk): 72 | return VerifyKey(pk).verify(data, sig) 73 | 74 | 75 | server_signkey = SigningKey(_privkey_bytes) 76 | server_verifykey = server_signkey.verify_key 77 | 78 | server_verify = server_verifykey.verify 79 | server_sign = server_signkey.sign 80 | 81 | 82 | def server_encrypt(pk, data): 83 | nonce = secrets.token_bytes(12) 84 | pk = X25519PublicKey.from_public_bytes(pk) 85 | sk = X25519PrivateKey.from_private_bytes(_privkey_bytes) 86 | secret = hmac.digest(b'LOKI', sk.exchange(pk), 'SHA256') 87 | return nonce + AESGCM(secret).encrypt(nonce, data, None) 88 | 89 | 90 | xed25519_sign = pyonionreq.xed25519.sign 91 | xed25519_verify = pyonionreq.xed25519.verify 92 | xed25519_pubkey = pyonionreq.xed25519.pubkey 93 | 94 | # AKA "k" for blinding crypto: 95 | blinding_factor = sodium.crypto_core_ed25519_scalar_reduce( 96 | blake2b(server_pubkey_bytes, digest_size=64) 97 | ) 98 | 99 | 100 | @functools.lru_cache(maxsize=1024) 101 | def compute_blinded_abs_key(x_pk: bytes, *, k: bytes = blinding_factor): 102 | """ 103 | Computes the *positive* blinded Ed25519 pubkey from an unprefixed session X25519 pubkey (i.e. 32 104 | bytes). The returned value will always have the sign bit (i.e. the most significant bit of the 105 | last byte) set to 0; the actual derived key associated with this session id could have either 106 | sign. 107 | 108 | Input and result are in bytes, without the 0x05 or 0x15 prefix. 109 | 110 | k allows you to compute for an alternative blinding factor, but should normally be omitted. 111 | """ 112 | A = xed25519_pubkey(x_pk) 113 | kA = sodium.crypto_scalarmult_ed25519_noclamp(k, A) 114 | 115 | if kA[31] & 0x80: 116 | return kA[0:31] + bytes([kA[31] & 0x7F]) 117 | return kA 118 | 119 | 120 | def compute_blinded_abs_id(session_id: str, *, k: bytes = blinding_factor): 121 | """ 122 | Computes the *positive* blinded id, as hex, from a prefixed, hex session id. This function is a 123 | wrapper around compute_derived_key_bytes that handles prefixes and hex conversions. 124 | 125 | k allows you to compute for an alternative blinding factor, but should normally be omitted. 126 | """ 127 | return '15' + compute_blinded_abs_key(bytes.fromhex(session_id[2:]), k=k).hex() 128 | 129 | 130 | def blinded_abs(blinded_id: str): 131 | """ 132 | Takes a blinded hex pubkey (i.e. length 66, prefixed with 15) and returns the positive pubkey 133 | alternative: that is, if the pubkey is already positive, it is returned as-is; otherwise the 134 | returned value is a copy with the sign bit cleared. 135 | """ 136 | 137 | # Sign bit is the MSB of the last byte, which will be at [31] of the private key, hence 64 is 138 | # the most significant nibble once we convert to hex and add 2 for the prefix: 139 | msn = int(blinded_id[64], 16) 140 | if msn & 0x8: 141 | return blinded_id[0:64] + str(msn & 0x7) + blinded_id[65:] 142 | return blinded_id 143 | 144 | 145 | def blinded_neg(blinded_id: str): 146 | """ 147 | Counterpart to blinded_abs that always returns the *negative* pubkey alternative. 148 | """ 149 | 150 | msn = int(blinded_id[64], 16) 151 | if msn & 0x8: 152 | return blinded_id 153 | return blinded_id[0:64] + f"{msn | 0x8:x}" + blinded_id[65:] 154 | -------------------------------------------------------------------------------- /sogs/hashing.py: -------------------------------------------------------------------------------- 1 | import nacl.hashlib 2 | import hashlib 3 | 4 | 5 | def _multipart_hash(hasher, data): 6 | if isinstance(data, bytes): 7 | hasher.update(data) 8 | else: 9 | for part in data: 10 | hasher.update(part) 11 | 12 | return hasher.digest() 13 | 14 | 15 | def blake2b( 16 | data, *, digest_size: int = 32, key: bytes = b'', salt: bytes = b'', person: bytes = b'' 17 | ): 18 | """ 19 | Calculates a Blake2B hash. 20 | 21 | Parameters: 22 | 23 | data -- can be bytes, or an iterable containing bytes or byte-like values. (The latter case is 24 | particularly recommended to avoid needing to concatenate existing, potentially large, byte 25 | values). 26 | 27 | digest_size -- the digest size, in bytes, which affects both the resulting length but also the 28 | hash itself (i.e. shorter digest sizes are not substrings of longer hash sizes). 29 | 30 | key -- a key, for a keyed hash, which can be up to 64 bytes. 31 | 32 | salt -- a salt for generating distinct hashes for the same data. Can be up to 16 bytes; if 33 | shorter than 16 it will be padded with null bytes. 34 | 35 | person -- a personalization value, which works essentially like a second salt but is typically a 36 | unique fixed string for a particular hash purpose. 37 | 38 | Returns a bytes of length `digest_size`. 39 | """ 40 | 41 | return _multipart_hash( 42 | nacl.hashlib.blake2b(digest_size=digest_size, key=key, salt=salt, person=person), data 43 | ) 44 | 45 | 46 | def sha512(data): 47 | """ 48 | Calculates a SHA512 hash. 49 | 50 | data -- can be bytes, or an iterable containing bytes or byte-like values. (The latter case is 51 | particularly recommended to avoid needing to concatenate existing, potentially large, byte 52 | values). 53 | 54 | Returns a bytes of length 64. 55 | """ 56 | return _multipart_hash(hashlib.sha512(), data) 57 | -------------------------------------------------------------------------------- /sogs/http.py: -------------------------------------------------------------------------------- 1 | # success codes: 2 | OK = 200 3 | CREATED = 201 4 | 5 | # 3xx codes: 6 | NOT_MODIFIED = 304 7 | 8 | # error status codes: 9 | BAD_REQUEST = 400 10 | UNAUTHORIZED = 401 11 | FORBIDDEN = 403 12 | NOT_FOUND = 404 13 | NOT_ACCEPTABLE = 406 14 | PRECONDITION_FAILED = 412 15 | PAYLOAD_TOO_LARGE = 413 16 | TOO_EARLY = 425 17 | TOO_MANY_REQUESTS = 429 18 | INTERNAL_SERVER_ERROR = 500 19 | BAD_GATEWAY = 502 20 | INSUFFICIENT_STORAGE = 507 21 | 22 | 23 | # HTTP methods containing bodies 24 | BODY_METHODS = ('POST', 'PUT') 25 | -------------------------------------------------------------------------------- /sogs/key_convert/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxen-io/session-pysogs/9497ddbfffd34a5dabaf23cc9cca795a761521c7/sogs/key_convert/__init__.py -------------------------------------------------------------------------------- /sogs/key_convert/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | from OpenSSL import crypto as c 6 | from cryptography.hazmat.primitives import serialization as s 7 | import argparse 8 | 9 | parser = argparse.ArgumentParser( 10 | description="Convert old session-open-group-server key format to new key format" 11 | ) 12 | parser.add_argument( 13 | "--in", 14 | "-i", 15 | dest="in_", 16 | type=str, 17 | metavar="OLD_KEY_FILE", 18 | help="Path to the session-open-group-server key file", 19 | default='./x25519_private_key.pem', 20 | ) 21 | parser.add_argument( 22 | "--out", 23 | "-o", 24 | type=str, 25 | metavar="NEW_KEY_FILE", 26 | help="Path to the new sogs key to write", 27 | default='./key_x25519', 28 | ) 29 | parser.add_argument("--overwrite", "-W", action='store_true') 30 | 31 | args = parser.parse_args() 32 | 33 | with open(args.in_) as f: 34 | pkey_pem = f.read() 35 | 36 | if not args.overwrite and os.path.exists(args.out): 37 | print( 38 | f"Error: {args.out} already exists, not overwriting it without --overwrite flag!", 39 | file=sys.stderr, 40 | ) 41 | sys.exit(1) 42 | 43 | key = c.load_privatekey(c.FILETYPE_PEM, pkey_pem).to_cryptography_key() 44 | pubkey_hex = key.public_key().public_bytes(encoding=s.Encoding.Raw, format=s.PublicFormat.Raw).hex() 45 | 46 | print(f"Loaded private key; associated pubkey: {pubkey_hex}") 47 | 48 | with open(os.open(args.out, os.O_CREAT | os.O_WRONLY, 0o400), 'wb') as f: 49 | f.write( 50 | key.private_bytes( 51 | encoding=s.Encoding.Raw, 52 | format=s.PrivateFormat.Raw, 53 | encryption_algorithm=s.NoEncryption(), 54 | ) 55 | ) 56 | 57 | print("Wrote privkey to {}".format(args.out)) 58 | -------------------------------------------------------------------------------- /sogs/migrations/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import coloredlogs 3 | 4 | from .. import config 5 | 6 | from . import ( 7 | file_message, 8 | fix_info_update_triggers, 9 | import_hacks, 10 | message_views, 11 | new_columns, 12 | new_tables, 13 | reactions, 14 | room_accessible, 15 | room_moderators, 16 | seqno_creation, 17 | seqno_etc, 18 | user_permissions, 19 | user_perm_futures, 20 | v_0_1_x, 21 | ) 22 | 23 | logger = logging.getLogger(__name__) 24 | coloredlogs.install(milliseconds=True, isatty=True, logger=logger, level=config.LOG_LEVEL) 25 | 26 | 27 | def migrate(conn, *, check_only=False): 28 | """ 29 | Perform database migrations/updates/etc. If check_only is given then we only check whether 30 | migrations are needed, raising a RuntimeError (without performing any migrations) if we find 31 | any. 32 | """ 33 | 34 | from .. import db 35 | 36 | any_changes = False 37 | 38 | # NB: migration order here matters; some later migrations require earlier migrations 39 | for migration in ( 40 | v_0_1_x, 41 | new_tables, 42 | new_columns, 43 | seqno_etc, 44 | reactions, 45 | seqno_creation, 46 | message_views, 47 | user_perm_futures, 48 | room_accessible, 49 | room_moderators, 50 | user_permissions, 51 | file_message, 52 | fix_info_update_triggers, 53 | import_hacks, 54 | ): 55 | changes = False 56 | if check_only: 57 | migration.migrate(conn, check_only=True) 58 | else: 59 | with db.transaction(conn): 60 | changes = migration.migrate(conn, check_only=False) 61 | if changes: 62 | db.metadata.clear() 63 | db.metadata.reflect(bind=db.engine, views=True) 64 | any_changes = True 65 | 66 | return any_changes 67 | -------------------------------------------------------------------------------- /sogs/migrations/exc.py: -------------------------------------------------------------------------------- 1 | class DatabaseUpgradeRequired(RuntimeError): 2 | """Thrown when using check_only=True in database migrations and an upgrade is required.""" 3 | 4 | def __init__(self, desc): 5 | super().__init__(f"Database upgrade required: {desc}") 6 | -------------------------------------------------------------------------------- /sogs/migrations/file_message.py: -------------------------------------------------------------------------------- 1 | from .exc import DatabaseUpgradeRequired 2 | import logging 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | 7 | from .. import db 8 | 9 | fix_fk = False 10 | if 'message' in db.metadata.tables['files'].c: 11 | if db.engine.name == "sqlite" and db.metadata.tables['files'].c['message'].references( 12 | db.metadata.tables['rooms'].c['id'] 13 | ): 14 | fix_fk = True 15 | else: 16 | return False 17 | 18 | logging.warning("DB migration: adding message/file association") 19 | if check_only: 20 | raise DatabaseUpgradeRequired("Add message/file association") 21 | 22 | if db.engine.name == "sqlite" and fix_fk: 23 | # Prior versions of this script created the column referencing rooms(id) instead of 24 | # messages; we need to rewrite the schema to fix it. This schema updating feel janky, but 25 | # is the officially documented method (https://www.sqlite.org/lang_altertable.html) 26 | conn.execute("UPDATE files SET message = NULL") 27 | schema_ver = conn.execute("PRAGMA schema_version").first()[0] 28 | files_sql = conn.execute( 29 | "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = 'files'" 30 | ).first()[0] 31 | broken = 'message INTEGER REFERENCES rooms(id)' 32 | fixed = 'message INTEGER REFERENCES messages(id)' 33 | if broken not in files_sql: 34 | raise RuntimeError( 35 | "Didn't find expected schema in files table; cannot proceed with upgrade!" 36 | ) 37 | files_sql = files_sql.replace(broken, fixed) 38 | conn.execute("PRAGMA writable_schema=ON") 39 | conn.execute( 40 | "UPDATE sqlite_master SET sql = ? WHERE type = 'table' AND name = 'files'", (files_sql,) 41 | ) 42 | conn.execute(f"PRAGMA schema_version={schema_ver+1}") 43 | conn.execute("PRAGMA writable_schema=OFF") 44 | 45 | elif db.engine.name == "sqlite": 46 | conn.execute( 47 | "ALTER TABLE files ADD COLUMN message INTEGER REFERENCES messages(id)" 48 | " ON DELETE SET NULL" 49 | ) 50 | conn.execute("CREATE INDEX files_message ON files(message)") 51 | conn.execute("DROP TRIGGER IF EXISTS messages_after_delete") 52 | conn.execute( 53 | """ 54 | CREATE TRIGGER messages_after_delete AFTER UPDATE OF data ON messages 55 | FOR EACH ROW WHEN NEW.data IS NULL AND OLD.data IS NOT NULL 56 | BEGIN 57 | -- Unpin if we deleted a pinned message: 58 | DELETE FROM pinned_messages WHERE message = OLD.id; 59 | -- Expire the post's attachments immediately: 60 | UPDATE files SET expiry = 0.0 WHERE message = OLD.id; 61 | END 62 | """ 63 | ) 64 | conn.execute("DROP TRIGGER IF EXISTS room_metadata_pinned_add") 65 | conn.execute("DROP TRIGGER IF EXISTS room_metadata_pinned_update") 66 | conn.execute("DROP TRIGGER IF EXISTS room_metadata_pinned_remove") 67 | conn.execute( 68 | """ 69 | CREATE TRIGGER room_metadata_pinned_add AFTER INSERT ON pinned_messages 70 | FOR EACH ROW 71 | BEGIN 72 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = NEW.room; 73 | UPDATE files SET expiry = NULL WHERE message = NEW.message; 74 | END 75 | """ 76 | ) 77 | conn.execute( 78 | """ 79 | CREATE TRIGGER room_metadata_pinned_update AFTER UPDATE ON pinned_messages 80 | FOR EACH ROW 81 | BEGIN 82 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = NEW.room; 83 | UPDATE files SET expiry = NULL WHERE message = NEW.message; 84 | END 85 | """ 86 | ) 87 | conn.execute( 88 | """ 89 | CREATE TRIGGER room_metadata_pinned_remove AFTER DELETE ON pinned_messages 90 | FOR EACH ROW 91 | BEGIN 92 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = OLD.room; 93 | UPDATE files SET expiry = uploaded + 15.0 * 86400.0 WHERE message = OLD.message; 94 | END 95 | """ 96 | ) 97 | 98 | else: 99 | conn.execute( 100 | """ 101 | ALTER TABLE files ADD COLUMN message BIGINT REFERENCES messages ON DELETE SET NULL; 102 | 103 | CREATE INDEX files_message ON files(message); 104 | 105 | DROP TRIGGER IF EXISTS messages_after_delete ON messages; 106 | CREATE OR REPLACE FUNCTION trigger_messages_after_delete() 107 | RETURNS TRIGGER LANGUAGE PLPGSQL AS $$BEGIN 108 | -- Unpin if we deleted a pinned message: 109 | DELETE FROM pinned_messages WHERE message = OLD.id; 110 | -- Expire the posts attachments immediately: 111 | UPDATE files SET expiry = 0.0 WHERE message = OLD.id; 112 | RETURN NULL; 113 | END;$$; 114 | CREATE TRIGGER messages_after_delete AFTER UPDATE OF data ON messages 115 | FOR EACH ROW WHEN (NEW.data IS NULL AND OLD.data IS NOT NULL) 116 | EXECUTE PROCEDURE trigger_messages_after_delete(); 117 | 118 | 119 | DROP TRIGGER IF EXISTS room_metadata_pinned_add ON pinned_messages; 120 | DROP TRIGGER IF EXISTS room_metadata_pinned_remove ON pinned_messages; 121 | 122 | CREATE OR REPLACE FUNCTION trigger_room_metadata_pinned_add() 123 | RETURNS TRIGGER LANGUAGE PLPGSQL AS $$BEGIN 124 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = NEW.room; 125 | UPDATE files SET expiry = NULL WHERE message = NEW.message; 126 | RETURN NULL; 127 | END;$$; 128 | CREATE TRIGGER room_metadata_pinned_add AFTER INSERT OR UPDATE ON pinned_messages 129 | FOR EACH ROW 130 | EXECUTE PROCEDURE trigger_room_metadata_pinned_add(); 131 | 132 | CREATE OR REPLACE FUNCTION trigger_room_metadata_pinned_remove() 133 | RETURNS TRIGGER LANGUAGE PLPGSQL AS $$BEGIN 134 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = OLD.room; 135 | UPDATE files SET expiry = uploaded + 15.0*86400.0 WHERE message = OLD.message; 136 | RETURN NULL; 137 | END;$$; 138 | CREATE TRIGGER room_metadata_pinned_remove AFTER DELETE ON pinned_messages 139 | FOR EACH ROW 140 | EXECUTE PROCEDURE trigger_room_metadata_pinned_remove(); 141 | """ 142 | ) 143 | 144 | return True 145 | -------------------------------------------------------------------------------- /sogs/migrations/fix_info_update_triggers.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | from .. import db 7 | 8 | # Room info_updates triggers for global mods didn't fire for invisible global mods/admins, but 9 | # should (so that other mods/admins notice the change). 10 | has_bad_trigger = db.query( 11 | """ 12 | SELECT COUNT(*) FROM sqlite_master 13 | WHERE type = 'trigger' AND name = :trigger 14 | AND LOWER(sql) LIKE :bad 15 | """ 16 | if db.engine.name == "sqlite" 17 | else """ 18 | SELECT COUNT(*) FROM information_schema.triggers 19 | WHERE trigger_name = :trigger 20 | AND LOWER(action_condition) LIKE :bad 21 | """, 22 | trigger='room_metadata_global_mods_insert', 23 | bad='% new.visible_mod%', 24 | dbconn=conn, 25 | ).first()[0] 26 | 27 | if not has_bad_trigger: 28 | return False 29 | 30 | logging.warning("DB migration: fixing global hidden mod room triggers") 31 | if check_only: 32 | raise DatabaseUpgradeRequired("global hidden mod room triggers need to be recreated") 33 | 34 | if db.engine.name == "sqlite": 35 | conn.execute("DROP TRIGGER IF EXISTS room_metadata_global_mods_insert") 36 | conn.execute("DROP TRIGGER IF EXISTS room_metadata_global_mods_update") 37 | conn.execute("DROP TRIGGER IF EXISTS room_metadata_global_mods_delete") 38 | conn.execute( 39 | """ 40 | CREATE TRIGGER room_metadata_global_mods_insert AFTER INSERT ON users 41 | FOR EACH ROW WHEN (NEW.admin OR NEW.moderator) 42 | BEGIN 43 | UPDATE rooms SET info_updates = info_updates + 1; -- WHERE everything! 44 | END 45 | """ 46 | ) 47 | conn.execute( 48 | """ 49 | CREATE TRIGGER room_metadata_global_mods_update AFTER UPDATE ON users 50 | FOR EACH ROW WHEN (NEW.moderator != OLD.moderator OR NEW.admin != OLD.admin OR NEW.visible_mod != OLD.visible_mod) 51 | BEGIN 52 | UPDATE rooms SET info_updates = info_updates + 1; -- WHERE everything! 53 | END 54 | """ # noqa: E501 55 | ) 56 | conn.execute( 57 | """ 58 | CREATE TRIGGER room_metadata_global_mods_delete AFTER DELETE ON users 59 | FOR EACH ROW WHEN (OLD.moderator OR OLD.admin) 60 | BEGIN 61 | UPDATE rooms SET info_updates = info_updates + 1; -- WHERE everything! 62 | END 63 | """ 64 | ) 65 | 66 | else: # postgresql 67 | conn.execute( 68 | """ 69 | DROP TRIGGER IF EXISTS room_metadata_global_mods_insert ON users; 70 | DROP TRIGGER IF EXISTS room_metadata_global_mods_update ON users; 71 | DROP TRIGGER IF EXISTS room_metadata_global_mods_delete ON users; 72 | 73 | CREATE TRIGGER room_metadata_global_mods_insert AFTER INSERT ON users 74 | FOR EACH ROW WHEN (NEW.admin OR NEW.moderator) 75 | EXECUTE PROCEDURE trigger_room_metadata_info_update_all(); 76 | 77 | CREATE TRIGGER room_metadata_global_mods_update AFTER UPDATE OF moderator, admin, visible_mod ON users 78 | FOR EACH ROW WHEN (NEW.moderator != OLD.moderator OR NEW.admin != OLD.admin OR NEW.visible_mod != OLD.visible_mod) 79 | EXECUTE PROCEDURE trigger_room_metadata_info_update_all(); 80 | 81 | CREATE TRIGGER room_metadata_global_mods_delete AFTER DELETE ON users 82 | FOR EACH ROW WHEN (OLD.moderator OR OLD.admin) 83 | EXECUTE PROCEDURE trigger_room_metadata_info_update_all(); 84 | """ # noqa: E501 85 | ) 86 | 87 | return True 88 | -------------------------------------------------------------------------------- /sogs/migrations/import_hacks.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | The 0.1.x migration sets up a file_id_hacks table to map old ids to new ids; if it's present and 8 | non-empty then we enable "hack" mode. (This should empty out over 15 days as attachments 9 | expire). 10 | 11 | We also have a room_import_hacks table that lets us map old message ids to new ids (because in 12 | the old database message ids overlapped, but in the new database they are unique). The consists 13 | of a max id and an offset that lets us figure out the new (current database) id. For instance, 14 | some range of messages in room xyz with old ids [1,5000] could get inserted as ids [4321, 9320], 15 | so max would be 5000 and offset would be 4320: old message id 3333 will have new message id 16 | 3333+4320 = 7653. We read all the offsets once at startup and stash them in ROOM_IMPORT_HACKS. 17 | """ 18 | 19 | from .. import db 20 | 21 | changed = False 22 | 23 | # Older version of the table edit migration didn't drop the old table: 24 | if 'old_room_import_hacks' in db.metadata.tables: 25 | logging.warning("Dropping old_room_import_hacks temporary table") 26 | if check_only: 27 | raise DatabaseUpgradeRequired("old_room_import_hacks") 28 | conn.execute('DROP TABLE old_room_import_hacks') 29 | changed = True 30 | 31 | if 'file_id_hacks' in db.metadata.tables: 32 | # If the table exists but is empty (i.e. because all the attachments expired) then we should 33 | # drop it. 34 | if not check_only and conn.execute("SELECT COUNT(*) FROM file_id_hacks").first()[0] == 0: 35 | logging.warning("Dropping file_id_hacks old sogs import table (no longer required)") 36 | db.metadata.tables['file_id_hacks'].drop(db.engine) 37 | changed = True 38 | else: 39 | logging.warning("Keeping file_id_hacks old sogs import table (still required)") 40 | db.HAVE_FILE_ID_HACKS = True 41 | 42 | if 'room_import_hacks' in db.metadata.tables: 43 | rows = conn.execute( 44 | "SELECT room, old_message_id_max, message_id_offset FROM room_import_hacks" 45 | ) 46 | for (room, id_max, offset) in rows: 47 | db.ROOM_IMPORT_HACKS[room] = (id_max, offset) 48 | 49 | if not db.HAVE_FILE_ID_HACKS and 'room_import_hacks' not in db.metadata.tables: 50 | return changed 51 | 52 | # DB fix: the original import was missing a ON DELETE CASCADE on the rooms foreign key, 53 | # which prevents imported room deletion. 54 | 55 | if db.engine.name == 'sqlite': 56 | # SQLite can't add a foreign key, so we have to rename, recreate entirely, and copy 57 | # everything over. Ew. 58 | if db.HAVE_FILE_ID_HACKS: 59 | need_fix = False 60 | # Annoyingly, sqlalchemy doesn't pick up foreign key actions when reflecting 61 | # sqlite (probably because sqlite doesn't enforce foreign keys by default), so 62 | # we have to pragma query the info ourself: 63 | for fk in conn.execute('PRAGMA foreign_key_list("file_id_hacks")'): 64 | if fk['from'] == 'room' and fk['on_delete'] != 'CASCADE': 65 | need_fix = True 66 | if need_fix: 67 | logging.warning("Replacing file_id_hacks to add cascading foreign key") 68 | if check_only: 69 | raise DatabaseUpgradeRequired("file_id_hacks") 70 | conn.execute("ALTER TABLE file_id_hacks RENAME TO old_file_id_hacks") 71 | conn.execute( 72 | """ 73 | CREATE TABLE file_id_hacks ( 74 | room INTEGER NOT NULL REFERENCES rooms(id) ON DELETE CASCADE, 75 | old_file_id INTEGER NOT NULL, 76 | file INTEGER NOT NULL REFERENCES files(id) ON DELETE CASCADE, 77 | PRIMARY KEY(room, old_file_id) 78 | ) 79 | """ 80 | ) 81 | conn.execute( 82 | """ 83 | INSERT INTO file_id_hacks 84 | SELECT room, old_file_id, file FROM old_file_id_hacks 85 | """ 86 | ) 87 | 88 | changed = True 89 | 90 | if 'room_import_hacks' in db.metadata.tables: 91 | need_fix = False 92 | # Annoyingly, sqlalchemy doesn't pick up foreign key actions when reflecting 93 | # sqlite (probably because sqlite doesn't enforce foreign keys by default), so 94 | # we have to pragma query the info ourself: 95 | for fk in conn.execute('PRAGMA foreign_key_list("room_import_hacks")'): 96 | if fk['from'] == 'room' and fk['on_delete'] != 'CASCADE': 97 | need_fix = True 98 | if need_fix: 99 | logging.warning("Replacing room_import_hacks to add cascading foreign key") 100 | if check_only: 101 | raise DatabaseUpgradeRequired("room_import_hacks") 102 | conn.execute("ALTER TABLE room_import_hacks RENAME TO old_room_import_hacks") 103 | conn.execute( 104 | """ 105 | CREATE TABLE room_import_hacks ( 106 | room INTEGER PRIMARY KEY NOT NULL REFERENCES rooms(id) ON DELETE CASCADE, 107 | old_message_id_max INTEGER NOT NULL, 108 | message_id_offset INTEGER NOT NULL 109 | ) 110 | """ 111 | ) 112 | conn.execute( 113 | """ 114 | INSERT INTO room_import_hacks 115 | SELECT room, old_message_id_max, message_id_offset 116 | FROM old_room_import_hacks 117 | """ 118 | ) 119 | conn.execute('DROP TABLE old_room_import_hacks') 120 | 121 | changed = True 122 | 123 | else: # postgresql 124 | fix_fid = db.HAVE_FILE_ID_HACKS and any( 125 | f.ondelete != 'CASCADE' 126 | for f in db.metadata.tables['file_id_hacks'].c['room'].foreign_keys 127 | ) 128 | fix_room = 'room_import_hacks' in db.metadata.tables and any( 129 | f.ondelete != 'CASCADE' 130 | for f in db.metadata.tables['room_import_hacks'].c['room'].foreign_keys 131 | ) 132 | if fix_fid or fix_room: 133 | if check_only: 134 | raise DatabaseUpgradeRequired("v0.1.x import hacks tables") 135 | if fix_fid: 136 | conn.execute( 137 | """ 138 | ALTER TABLE file_id_hacks DROP CONSTRAINT file_id_hacks_room_fkey; 139 | ALTER TABLE file_id_hacks ADD CONSTRAINT 140 | file_id_hacks_room_fkey FOREIGN KEY (room) REFERENCES rooms(id); 141 | """ 142 | ) 143 | if fix_room: 144 | conn.execute( 145 | """ 146 | ALTER TABLE room_import_hacks DROP CONSTRAINT room_import_hacks_room_fkey; 147 | ALTER TABLE room_import_hacks ADD CONSTRAINT 148 | room_import_hacks_room_fkey FOREIGN KEY (room) REFERENCES rooms(id); 149 | """ 150 | ) 151 | changed = True 152 | 153 | return changed 154 | -------------------------------------------------------------------------------- /sogs/migrations/message_views.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | from .. import db 7 | 8 | if 'message_metadata' in db.metadata.tables and all( 9 | x in db.metadata.tables['message_metadata'].c 10 | for x in ('whisper_to', 'whisper_mods', 'filtered', 'seqno', 'seqno_data') 11 | ): 12 | query_bad_trigger = ( 13 | """ 14 | SELECT COUNT(*) FROM sqlite_master 15 | WHERE type = 'trigger' AND name = 'message_details_deleter' 16 | AND sql LIKE :like_bad 17 | """ 18 | if db.engine.name == "sqlite" 19 | else """ 20 | SELECT COUNT(*) FROM information_schema.routines 21 | WHERE routine_name = 'trigger_message_details_deleter' 22 | AND routine_definition LIKE :like_bad 23 | """ 24 | ) 25 | if ( 26 | db.query(query_bad_trigger, dbconn=conn, like_bad='%DELETE FROM reactions%').first()[0] 27 | == 0 28 | ): 29 | return False 30 | 31 | logging.warning("DB migration: recreating message_metadata/message_details views") 32 | if check_only: 33 | raise DatabaseUpgradeRequired("message views need to be recreated") 34 | 35 | conn.execute("DROP VIEW IF EXISTS message_metadata") 36 | conn.execute("DROP VIEW IF EXISTS message_details") 37 | 38 | if db.engine.name == "sqlite": 39 | conn.execute("DROP TRIGGER IF EXISTS message_details_deleter") 40 | conn.execute( 41 | """ 42 | CREATE VIEW message_details AS 43 | SELECT messages.*, uposter.session_id, uwhisper.session_id AS whisper_to 44 | FROM messages 45 | JOIN users uposter ON messages."user" = uposter.id 46 | LEFT JOIN users uwhisper ON messages.whisper = uwhisper.id 47 | """ 48 | ) 49 | conn.execute( 50 | """ 51 | CREATE TRIGGER message_details_deleter INSTEAD OF DELETE ON message_details 52 | FOR EACH ROW WHEN OLD.data IS NOT NULL 53 | BEGIN 54 | UPDATE messages SET data = NULL, data_size = NULL, signature = NULL 55 | WHERE id = OLD.id; 56 | DELETE FROM user_reactions WHERE reaction IN ( 57 | SELECT id FROM reactions WHERE message = OLD.id); 58 | END 59 | """ 60 | ) 61 | conn.execute( 62 | """ 63 | CREATE VIEW message_metadata AS 64 | SELECT id, room, "user", session_id, posted, edited, seqno, seqno_data, seqno_reactions, seqno_creation, 65 | filtered, whisper_to, whisper_mods, 66 | length(data) AS data_unpadded, data_size, length(signature) as signature_length 67 | FROM message_details 68 | """ # noqa: E501 69 | ) 70 | 71 | else: # postgresql 72 | conn.execute( 73 | """ 74 | -- Effectively the same as `messages` except that it also includes the `session_id` from the users 75 | -- table of the user who posted it, and the session id of the whisper recipient (as `whisper_to`) if 76 | -- a directed whisper. 77 | CREATE VIEW message_details AS 78 | SELECT messages.*, uposter.session_id, uwhisper.session_id AS whisper_to 79 | FROM messages 80 | JOIN users uposter ON messages.user = uposter.id 81 | LEFT JOIN users uwhisper ON messages.whisper = uwhisper.id; 82 | 83 | -- Delete trigger on message_details which lets us use a DELETE that gets transformed into an UPDATE 84 | -- that sets data, size, signature to NULL on the matched messages. 85 | CREATE OR REPLACE FUNCTION trigger_message_details_deleter() 86 | RETURNS TRIGGER LANGUAGE PLPGSQL AS $$BEGIN 87 | IF OLD.data IS NOT NULL THEN 88 | UPDATE messages SET data = NULL, data_size = NULL, signature = NULL 89 | WHERE id = OLD.id; 90 | DELETE FROM user_reactions WHERE reaction IN ( 91 | SELECT id FROM reactions WHERE message = OLD.id); 92 | END IF; 93 | RETURN NULL; 94 | END;$$; 95 | DROP TRIGGER IF EXISTS message_details_deleter ON message_details; 96 | CREATE TRIGGER message_details_deleter INSTEAD OF DELETE ON message_details 97 | FOR EACH ROW 98 | EXECUTE PROCEDURE trigger_message_details_deleter(); 99 | 100 | -- View of `messages` that is useful for manually inspecting table contents by only returning the 101 | -- length (rather than raw bytes) for data/signature. 102 | CREATE VIEW message_metadata AS 103 | SELECT id, room, "user", session_id, posted, edited, seqno, seqno_data, seqno_reactions, seqno_creation, 104 | filtered, whisper_to, whisper_mods, 105 | length(data) AS data_unpadded, data_size, length(signature) as signature_length 106 | FROM message_details; 107 | """ # noqa: E501 108 | ) 109 | 110 | return True 111 | -------------------------------------------------------------------------------- /sogs/migrations/new_columns.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | New columns that might need to be added that don't require more complex migrations beyond simply 8 | adding the column. 9 | """ 10 | 11 | from .. import db 12 | 13 | new_table_cols = { 14 | 'messages': { 15 | 'whisper': 'INTEGER REFERENCES users(id)', 16 | 'whisper_mods': 'BOOLEAN NOT NULL DEFAULT FALSE', 17 | 'filtered': 'BOOLEAN NOT NULL DEFAULT FALSE', 18 | }, 19 | 'rooms': {'active_users': 'BIGINT NOT NULL DEFAULT 0'}, 20 | } 21 | 22 | added = False 23 | 24 | for table, cols in new_table_cols.items(): 25 | for name, definition in cols.items(): 26 | if name not in db.metadata.tables[table].c: 27 | logging.warning(f"DB migration: Adding new column {table}.{name}") 28 | if check_only: 29 | raise DatabaseUpgradeRequired(f"new column {table}.{name}") 30 | conn.execute(f"ALTER TABLE {table} ADD COLUMN {name} {definition}") 31 | added = True 32 | 33 | return added 34 | -------------------------------------------------------------------------------- /sogs/migrations/new_tables.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | # { table_name => { 'sqlite': ['query1', 'query2'], 'pgsql': "query1; query2" } } 6 | table_creations = { 7 | 'user_request_nonces': { 8 | 'sqlite': [ 9 | """ 10 | CREATE TABLE user_request_nonces ( 11 | user INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, 12 | nonce BLOB NOT NULL UNIQUE, 13 | expiry FLOAT NOT NULL DEFAULT ((julianday('now') - 2440587.5 + 1.0)*86400.0) /* now + 24h */ 14 | ) 15 | """, 16 | """ 17 | CREATE INDEX user_request_nonces_expiry ON user_request_nonces(expiry) 18 | """, 19 | ], 20 | 'pgsql': """ 21 | CREATE TABLE user_request_nonces ( 22 | "user" BIGINT NOT NULL REFERENCES users ON DELETE CASCADE, 23 | nonce BYTEA NOT NULL UNIQUE, 24 | expiry FLOAT NOT NULL DEFAULT (extract(epoch from now() + '24 hours')) 25 | ); 26 | CREATE INDEX user_request_nonces_expiry ON user_request_nonces(expiry) 27 | """, 28 | }, 29 | 'inbox': { 30 | 'sqlite': [ 31 | """ 32 | CREATE TABLE inbox ( 33 | id INTEGER PRIMARY KEY, 34 | recipient INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, 35 | sender INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, 36 | body BLOB NOT NULL, 37 | posted_at FLOAT DEFAULT ((julianday('now') - 2440587.5)*86400.0), 38 | expiry FLOAT DEFAULT ((julianday('now') - 2440587.5 + 15.0)*86400.0) /* now + 15 days */ 39 | ) 40 | """, 41 | """ 42 | CREATE INDEX inbox_recipient ON inbox(recipient) 43 | """, 44 | ], 45 | 'pgsql': """ 46 | CREATE TABLE inbox ( 47 | id BIGINT GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 48 | recipient BIGINT NOT NULL REFERENCES users ON DELETE CASCADE, 49 | sender BIGINT NOT NULL REFERENCES users ON DELETE CASCADE, 50 | body BYTEA NOT NULL, 51 | posted_at FLOAT DEFAULT (extract(epoch from now())), 52 | expiry FLOAT DEFAULT (extract(epoch from now() + '15 days')) 53 | ); 54 | CREATE INDEX inbox_recipient ON inbox(recipient); 55 | """, 56 | }, 57 | 'needs_blinding': { 58 | 'sqlite': [ 59 | """ 60 | CREATE TABLE needs_blinding ( 61 | blinded_abs TEXT NOT NULL PRIMARY KEY, 62 | "user" BIGINT NOT NULL UNIQUE REFERENCES users ON DELETE CASCADE 63 | ) 64 | """ 65 | ], 66 | 'pgsql': """ 67 | CREATE TABLE needs_blinding ( 68 | blinded_abs TEXT NOT NULL PRIMARY KEY, 69 | "user" BIGINT NOT NULL UNIQUE REFERENCES users ON DELETE CASCADE 70 | ) 71 | """, 72 | }, 73 | } 74 | 75 | 76 | def migrate(conn, *, check_only): 77 | """Adds new tables that don't have any special migration requirement beyond creation""" 78 | 79 | from .. import db 80 | 81 | added = False 82 | 83 | for table, v in table_creations.items(): 84 | if table in db.metadata.tables: 85 | continue 86 | 87 | logging.warning(f"DB migration: Adding new table {table}") 88 | if check_only: 89 | raise DatabaseUpgradeRequired(f"new table {table}") 90 | 91 | if db.engine.name == 'sqlite': 92 | for query in v['sqlite']: 93 | conn.execute(query) 94 | else: 95 | conn.execute(v['pgsql']) 96 | 97 | added = True 98 | 99 | return added 100 | -------------------------------------------------------------------------------- /sogs/migrations/room_accessible.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """Add the room.accessible permission flag, and associated column/view changes""" 7 | 8 | from .. import db 9 | 10 | if 'accessible' in db.metadata.tables['rooms'].c: 11 | return False 12 | 13 | logging.warning("DB migration: adding 'accessible' room permission columns") 14 | if check_only: 15 | raise DatabaseUpgradeRequired("Add accessible room permission columns") 16 | 17 | conn.execute("ALTER TABLE rooms ADD COLUMN accessible BOOLEAN NOT NULL DEFAULT TRUE") 18 | conn.execute("ALTER TABLE user_permission_overrides ADD COLUMN accessible BOOLEAN") 19 | 20 | # Gets recreated in the user_permissions migration: 21 | conn.execute("DROP VIEW IF EXISTS user_permissions") 22 | 23 | if db.engine.name == "sqlite": 24 | conn.execute("DROP TRIGGER IF EXISTS user_perms_empty_cleanup") 25 | conn.execute( 26 | """ 27 | CREATE TRIGGER user_perms_empty_cleanup AFTER UPDATE ON user_permission_overrides 28 | FOR EACH ROW WHEN NOT (NEW.banned OR NEW.moderator OR NEW.admin) 29 | AND COALESCE(NEW.accessible, NEW.read, NEW.write, NEW.upload) IS NULL 30 | BEGIN 31 | DELETE from user_permission_overrides WHERE room = NEW.room AND user = NEW.user; 32 | END 33 | """ 34 | ) 35 | 36 | else: 37 | conn.execute( 38 | """ 39 | DROP TRIGGER IF EXISTS user_perms_empty_cleanup ON user_permission_overrides; 40 | 41 | CREATE TRIGGER user_perms_empty_cleanup AFTER UPDATE ON user_permission_overrides 42 | FOR EACH ROW WHEN (NOT (NEW.banned OR NEW.moderator OR NEW.admin) 43 | AND COALESCE(NEW.accessible, NEW.read, NEW.write, NEW.upload) IS NULL) 44 | EXECUTE PROCEDURE trigger_user_perms_empty_cleanup(); 45 | """ 46 | ) 47 | 48 | return True 49 | -------------------------------------------------------------------------------- /sogs/migrations/room_moderators.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | Adds the room_moderators view, along with a couple other optimizations that came at the same 8 | time: 9 | - we drop the user_permissions view (to be recreated in the user_permissions migration code) 10 | - we drop the user_permission_overrides_public_mods index and recreate a tighter index 11 | """ 12 | 13 | from .. import db 14 | 15 | if 'room_moderators' in db.metadata.tables: 16 | return False 17 | 18 | logging.warning("DB migration: create room_moderators view") 19 | if check_only: 20 | raise DatabaseUpgradeRequired("Create room_moderators view") 21 | 22 | if db.engine.name == "sqlite": 23 | conn.execute( 24 | """ 25 | CREATE VIEW room_moderators AS 26 | SELECT session_id, mods.* FROM ( 27 | SELECT 28 | room, 29 | "user", 30 | MAX(visible_mod) & 1 AS visible_mod, 31 | MAX(admin) AS admin, 32 | MAX(room_moderator) AS room_moderator, 33 | MAX(global_moderator) AS global_moderator 34 | FROM ( 35 | SELECT 36 | room, 37 | "user", 38 | CASE WHEN visible_mod THEN 3 ELSE 2 END AS visible_mod, 39 | admin, 40 | TRUE AS room_moderator, 41 | FALSE AS global_moderator 42 | FROM user_permission_overrides WHERE moderator 43 | 44 | UNION ALL 45 | 46 | SELECT 47 | rooms.id AS room, 48 | users.id as "user", 49 | CASE WHEN visible_mod THEN 1 ELSE 0 END AS visible_mod, 50 | admin, 51 | FALSE as room_moderator, 52 | TRUE as global_moderator 53 | FROM users CROSS JOIN rooms WHERE moderator 54 | ) m GROUP BY "user", room 55 | ) mods JOIN users on "user" = users.id 56 | """ 57 | ) 58 | else: # postgres 59 | conn.execute( 60 | """ 61 | CREATE VIEW room_moderators AS 62 | SELECT session_id, mods.* FROM ( 63 | SELECT 64 | room, 65 | "user", 66 | CAST(MAX(visible_mod) & 1 AS BOOLEAN) AS visible_mod, 67 | bool_or(admin) AS admin, 68 | bool_or(room_moderator) AS room_moderator, 69 | bool_or(global_moderator) AS global_moderator 70 | FROM ( 71 | SELECT 72 | room, 73 | "user", 74 | CASE WHEN visible_mod THEN 3 ELSE 2 END AS visible_mod, 75 | admin, 76 | TRUE AS room_moderator, 77 | FALSE AS global_moderator 78 | FROM user_permission_overrides WHERE moderator 79 | 80 | UNION ALL 81 | 82 | SELECT 83 | rooms.id AS room, 84 | users.id as "user", 85 | CASE WHEN visible_mod THEN 1 ELSE 0 END AS visible_mod, 86 | admin, 87 | FALSE as room_moderator, 88 | TRUE as global_moderator 89 | FROM users CROSS JOIN rooms WHERE moderator 90 | ) m GROUP BY "user", room 91 | ) mods JOIN users on "user" = users.id 92 | """ 93 | ) 94 | 95 | conn.execute("DROP VIEW IF EXISTS user_permissions") 96 | conn.execute("DROP INDEX IF EXISTS user_permission_overrides_public_mods") 97 | conn.execute( 98 | "CREATE INDEX IF NOT EXISTS user_permission_overrides_mods " 99 | "ON user_permission_overrides(room) WHERE moderator" 100 | ) 101 | 102 | return True 103 | -------------------------------------------------------------------------------- /sogs/migrations/seqno_creation.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | Adds a seqno_creation column to track the seqno when a message was created so that we can skip 8 | deleted messages entirely (i.e. omit the tombstone) when polling from a seqno before the message 9 | was created. 10 | """ 11 | 12 | from .. import db 13 | 14 | if 'seqno_creation' in db.metadata.tables['messages'].c: 15 | return False 16 | 17 | if check_only: 18 | raise DatabaseUpgradeRequired("message creation seqno") 19 | 20 | logging.warning("Adding messages.seqno_creation column") 21 | if db.engine.name == 'sqlite': 22 | conn.execute("ALTER TABLE messages ADD COLUMN seqno_creation INTEGER NOT NULL DEFAULT 0") 23 | conn.execute("DROP TRIGGER IF EXISTS messages_insert_counter") 24 | conn.execute( 25 | """ 26 | CREATE TRIGGER messages_insert_counter AFTER INSERT ON messages 27 | FOR EACH ROW 28 | BEGIN 29 | UPDATE rooms SET message_sequence = message_sequence + 1 WHERE id = NEW.room; 30 | UPDATE messages SET seqno_data = (SELECT message_sequence FROM rooms WHERE id = NEW.room) WHERE id = NEW.id; 31 | UPDATE messages SET seqno_creation = seqno_data WHERE id = NEW.id; 32 | END 33 | """ # noqa: E501 34 | ) 35 | else: # postgresql 36 | conn.execute( 37 | """ 38 | ALTER TABLE messages ADD COLUMN seqno_creation BIGINT NOT NULL DEFAULT 0; 39 | 40 | CREATE OR REPLACE FUNCTION trigger_messages_insert_counter() 41 | RETURNS TRIGGER LANGUAGE PLPGSQL AS $$ 42 | DECLARE 43 | new_seqno BIGINT := increment_room_sequence(NEW.room); 44 | BEGIN 45 | UPDATE messages SET seqno_data = new_seqno, seqno_creation = new_seqno WHERE id = NEW.id; 46 | RETURN NULL; 47 | END;$$; 48 | DROP TRIGGER IF EXISTS messages_insert_counter ON messages; 49 | CREATE TRIGGER messages_insert_counter AFTER INSERT ON messages 50 | FOR EACH ROW EXECUTE PROCEDURE trigger_messages_insert_counter(); 51 | """ 52 | ) 53 | 54 | # Drop these to be recreated (with the no column) in the message_views migration. 55 | conn.execute("DROP VIEW IF EXISTS message_metadata") 56 | conn.execute("DROP VIEW IF EXISTS message_details") 57 | 58 | return True 59 | -------------------------------------------------------------------------------- /sogs/migrations/seqno_etc.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | Rename rooms.updates/messages.updated to rooms.message_sequence/messages.seqno for better 8 | disambiguation with rooms.info_updates. 9 | 10 | This also does various other changes/fixes that came at the same time as the column rename: 11 | 12 | - remove "updated" from and add "pinned_by"/"pinned_at" to pinned_messages 13 | - recreate the pinned_messages table and triggers because we need several changes: 14 | - add trigger to unpin a message when the message is deleted 15 | - remove "updates" (now message_sequence) updates from room metadata update trigger 16 | - add AFTER UPDATE trigger to properly update room metadata counter when re-pinning an 17 | existing pinned message 18 | - fix user_permissions view to return true for read/write/upload to true for moderators 19 | """ 20 | 21 | from .. import db 22 | 23 | if 'seqno' in db.metadata.tables['messages'].c: 24 | return False 25 | 26 | if check_only: 27 | raise DatabaseUpgradeRequired("sequence column renames & pinned_messages table") 28 | 29 | # We can't insert the required pinned_messages because we don't have the pinned_by user, but 30 | # that isn't a big deal since we didn't have any endpoints for pinned messsages before this 31 | # anyway, so we just recreate the whole thing (along with triggers which we also need to 32 | # update/fix) 33 | logging.warning("Recreating pinned_messages table") 34 | conn.execute("DROP TABLE pinned_messages") 35 | if db.engine.name == 'sqlite': 36 | conn.execute( 37 | """ 38 | CREATE TABLE pinned_messages ( 39 | room INTEGER NOT NULL REFERENCES rooms(id) ON DELETE CASCADE, 40 | message INTEGER NOT NULL REFERENCES messages(id) ON DELETE CASCADE, 41 | pinned_by INTEGER NOT NULL REFERENCES users(id), 42 | pinned_at FLOAT NOT NULL DEFAULT ((julianday('now') - 2440587.5)*86400.0), /* unix epoch when pinned */ 43 | PRIMARY KEY(room, message) 44 | ) 45 | """ # noqa: E501 46 | ) 47 | conn.execute( 48 | """ 49 | CREATE TRIGGER room_metadata_pinned_add AFTER INSERT ON pinned_messages 50 | FOR EACH ROW 51 | BEGIN 52 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = NEW.room; 53 | END 54 | """ 55 | ) 56 | conn.execute( 57 | """ 58 | CREATE TRIGGER room_metadata_pinned_update AFTER UPDATE ON pinned_messages 59 | FOR EACH ROW 60 | BEGIN 61 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = NEW.room; 62 | END 63 | """ 64 | ) 65 | conn.execute( 66 | """ 67 | CREATE TRIGGER room_metadata_pinned_remove AFTER DELETE ON pinned_messages 68 | FOR EACH ROW 69 | BEGIN 70 | UPDATE rooms SET info_updates = info_updates + 1 WHERE id = OLD.room; 71 | END 72 | """ 73 | ) 74 | 75 | else: # postgresql 76 | logging.warning("Recreating pinned_messages table") 77 | conn.execute( 78 | """ 79 | CREATE TABLE pinned_messages ( 80 | room BIGINT NOT NULL REFERENCES rooms ON DELETE CASCADE, 81 | message BIGINT NOT NULL REFERENCES messages ON DELETE CASCADE, 82 | pinned_by BIGINT NOT NULL REFERENCES users, 83 | pinned_at FLOAT NOT NULL DEFAULT (extract(epoch from now())), 84 | PRIMARY KEY(room, message) 85 | ); 86 | 87 | 88 | CREATE TRIGGER room_metadata_pinned_add AFTER INSERT OR UPDATE ON pinned_messages 89 | FOR EACH ROW 90 | EXECUTE PROCEDURE trigger_room_metadata_info_update_new(); 91 | 92 | CREATE TRIGGER room_metadata_pinned_remove AFTER DELETE ON pinned_messages 93 | FOR EACH ROW 94 | EXECUTE PROCEDURE trigger_room_metadata_info_update_old(); 95 | """ 96 | ) 97 | 98 | logging.warning("Applying message_sequence renames") 99 | conn.execute("ALTER TABLE rooms RENAME COLUMN updates TO message_sequence") 100 | 101 | # The message_views migration will create these for us, and we need to drop them because: 102 | # 1) postgresql doesn't rename the view's output columns to match the new table column 103 | # 2) sqlite breaks if attempting the rename a column that is referenced in a view-of-a-view 104 | conn.execute("DROP VIEW message_metadata") 105 | conn.execute("DROP VIEW message_details") 106 | 107 | conn.execute("ALTER TABLE messages RENAME COLUMN updated TO seqno") 108 | 109 | # Gets recreated in the user_permissions migration: 110 | logging.warning("Dropping user_permissions view") 111 | conn.execute("DROP VIEW IF EXISTS user_permissions") 112 | 113 | return True 114 | -------------------------------------------------------------------------------- /sogs/migrations/user_perm_futures.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | Break up user_permission_futures to not be (room,user) unique, and to move ban futures to a 8 | separate table. 9 | """ 10 | 11 | from .. import db 12 | 13 | if 'user_ban_futures' in db.metadata.tables: 14 | return False 15 | 16 | logging.warning("Updating user_permission_futures") 17 | if check_only: 18 | raise DatabaseUpgradeRequired("user_permission_futures/user_ban_futures conversion") 19 | 20 | if db.engine.name == 'sqlite': 21 | # Under sqlite we have to drop and recreate the whole thing. (Since we didn't have a 22 | # release out that was using futures yet, we don't bother trying to migrate data). 23 | conn.execute("DROP TABLE user_permission_futures") 24 | conn.execute( 25 | """ 26 | CREATE TABLE user_permission_futures ( 27 | room INTEGER NOT NULL REFERENCES rooms ON DELETE CASCADE, 28 | user INTEGER NOT NULL REFERENCES users ON DELETE CASCADE, 29 | at FLOAT NOT NULL, /* when the change should take effect (unix epoch) */ 30 | read BOOLEAN, /* Set this value @ at, if non-null */ 31 | write BOOLEAN, /* Set this value @ at, if non-null */ 32 | upload BOOLEAN /* Set this value @ at, if non-null */ 33 | ) 34 | """ 35 | ) 36 | conn.execute("CREATE INDEX user_permission_futures_at ON user_permission_futures(at)") 37 | conn.execute( 38 | """ 39 | CREATE INDEX user_permission_futures_room_user ON user_permission_futures(room, user) 40 | """ 41 | ) 42 | 43 | conn.execute( 44 | """ 45 | CREATE TABLE user_ban_futures ( 46 | room INTEGER REFERENCES rooms ON DELETE CASCADE, 47 | user INTEGER NOT NULL REFERENCES users ON DELETE CASCADE, 48 | at FLOAT NOT NULL, /* when the change should take effect (unix epoch) */ 49 | banned BOOLEAN NOT NULL /* if true then ban at `at`, if false then unban */ 50 | ); 51 | """ 52 | ) 53 | conn.execute("CREATE INDEX user_ban_futures_at ON user_ban_futures(at)") 54 | conn.execute("CREATE INDEX user_ban_futures_room_user ON user_ban_futures(room, user)") 55 | 56 | else: # postgresql 57 | conn.execute( 58 | """ 59 | CREATE TABLE user_ban_futures ( 60 | room INTEGER REFERENCES rooms ON DELETE CASCADE, 61 | "user" INTEGER NOT NULL REFERENCES users ON DELETE CASCADE, 62 | at FLOAT NOT NULL, /* when the change should take effect (unix epoch) */ 63 | banned BOOLEAN NOT NULL /* if true then ban at `at`, if false then unban */ 64 | ); 65 | CREATE INDEX user_ban_futures_at ON user_ban_futures(at); 66 | CREATE INDEX user_ban_futures_room_user ON user_ban_futures(room, "user"); 67 | 68 | INSERT INTO user_ban_futures (room, "user", at, banned) 69 | SELECT room, "user", at, banned FROM user_permission_futures WHERE banned is NOT NULL; 70 | 71 | DELETE FROM user_permission_futures WHERE read IS NULL AND write IS NULL AND uploads IS NULL; 72 | 73 | ALTER TABLE user_permission_futures DROP CONSTRAINT IF EXISTS user_permission_futures_pkey; 74 | ALTER TABLE user_permission_futures DROP COLUMN banned; 75 | """ 76 | ) 77 | 78 | return True 79 | -------------------------------------------------------------------------------- /sogs/migrations/user_permissions.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .exc import DatabaseUpgradeRequired 3 | 4 | 5 | def migrate(conn, *, check_only): 6 | """ 7 | Recreates the user_permissions view if it doesn't exist; this is the common code for both 8 | room_accessible and seqno_etc as both drop the view (when migrating) to be recreated here. 9 | """ 10 | 11 | from .. import db 12 | 13 | if 'user_permissions' in db.metadata.tables: 14 | return False 15 | 16 | logging.warning("DB migration: recreating user_permissions view") 17 | if check_only: 18 | raise DatabaseUpgradeRequired("Recreate user_permissions view") 19 | 20 | conn.execute( 21 | """ 22 | CREATE VIEW user_permissions AS 23 | SELECT 24 | rooms.id AS room, 25 | users.id AS "user", 26 | users.session_id, 27 | CASE WHEN users.banned THEN TRUE ELSE COALESCE(user_permission_overrides.banned, FALSE) END AS banned, 28 | CASE WHEN users.moderator THEN TRUE ELSE COALESCE(user_permission_overrides.read, rooms.read) END AS read, 29 | CASE WHEN users.moderator THEN TRUE ELSE COALESCE(user_permission_overrides.accessible, rooms.accessible) END AS accessible, 30 | CASE WHEN users.moderator THEN TRUE ELSE COALESCE(user_permission_overrides.write, rooms.write) END AS write, 31 | CASE WHEN users.moderator THEN TRUE ELSE COALESCE(user_permission_overrides.upload, rooms.upload) END AS upload, 32 | CASE WHEN users.moderator THEN TRUE ELSE COALESCE(user_permission_overrides.moderator, FALSE) END AS moderator, 33 | CASE WHEN users.admin THEN TRUE ELSE COALESCE(user_permission_overrides.admin, FALSE) END AS admin, 34 | -- room_moderator will be TRUE if the user is specifically listed as a moderator of the room 35 | COALESCE(user_permission_overrides.moderator, FALSE) AS room_moderator, 36 | -- global_moderator will be TRUE if the user is a global moderator/admin (note that this is 37 | -- *not* exclusive of room_moderator: a moderator/admin could be listed in both). 38 | users.moderator as global_moderator, 39 | -- visible_mod will be TRUE if this mod is a publicly viewable moderator of the room 40 | CASE 41 | WHEN user_permission_overrides.moderator THEN user_permission_overrides.visible_mod 42 | WHEN users.moderator THEN users.visible_mod 43 | ELSE FALSE 44 | END AS visible_mod 45 | FROM 46 | users CROSS JOIN rooms LEFT OUTER JOIN user_permission_overrides ON 47 | (users.id = user_permission_overrides."user" AND rooms.id = user_permission_overrides.room) 48 | """ # noqa E501 49 | ) 50 | 51 | return True 52 | -------------------------------------------------------------------------------- /sogs/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .. import config 2 | 3 | if config.PROFANITY_FILTER: 4 | import better_profanity 5 | 6 | if config.PROFANITY_CUSTOM: 7 | better_profanity.profanity.load_censor_words_from_file(config.PROFANITY_CUSTOM) 8 | else: 9 | better_profanity.profanity.load_censor_words() 10 | 11 | 12 | # Set of free-form strings that indicate the capability of this sogs server. As new features are 13 | # added that a Session client might want to know about a string still be added here to allow session 14 | # to identify the server's capabilities and act accordingly. 15 | capabilities = { 16 | 'sogs', # Basic sogs capabilities 17 | 'reactions', # Reactions, added in 0.3.1 18 | # 'newcap', # Add here 19 | } 20 | 21 | if config.REQUIRE_BLIND_KEYS: 22 | # indicate blinding required if configured to do so 23 | capabilities.add('blind') 24 | -------------------------------------------------------------------------------- /sogs/model/exc.py: -------------------------------------------------------------------------------- 1 | class NotFound(LookupError): 2 | """Base class for NoSuchRoom, NoSuchFile, etc.""" 3 | 4 | pass 5 | 6 | 7 | class NoSuchRoom(NotFound): 8 | """Thrown when trying to construct a Room from a token that doesn't exist""" 9 | 10 | def __init__(self, token): 11 | self.token = token 12 | super().__init__(f"No such room: {token}") 13 | 14 | 15 | class NoSuchFile(NotFound): 16 | """Thrown when trying to construct a File from an id that doesn't exist""" 17 | 18 | def __init__(self, id): 19 | self.id = id 20 | super().__init__(f"No such file: {id}") 21 | 22 | 23 | class NoSuchUser(NotFound): 24 | """Thrown when attempting to retrieve a user that doesn't exist and auto-vivification of the 25 | user room is disabled""" 26 | 27 | def __init__(self, session_id): 28 | self.session_id = session_id 29 | super().__init__(f"No such user: {session_id}") 30 | 31 | 32 | class NoSuchPost(NotFound): 33 | """Thrown when attempting to retrieve or reference a post that doesn't exist""" 34 | 35 | def __init__(self, id): 36 | self.id = id 37 | super().__init__(f"No such post: {id}") 38 | 39 | 40 | class AlreadyExists(RuntimeError): 41 | """ 42 | Thrown when attempting to create a record (e.g. a Room) that already exists. 43 | 44 | e.type is the type object (e.g. sogs.model.Room) that could not be constructed, if applicable. 45 | e.value is the unique value that already exists (e.g. the room token), if applicable. 46 | """ 47 | 48 | def __init__(self, msg, type=None, value=None): 49 | super().__init__(msg) 50 | self.type = type 51 | self.value = value 52 | 53 | 54 | class BadPermission(RuntimeError): 55 | """Thrown when attempt to perform an action that the given user does not have permission to do; 56 | for example, attempting to delete someone else's posts when not a moderator.""" 57 | 58 | def __init__(self, msg=None): 59 | super().__init__("Permission denied" if msg is None else msg) 60 | 61 | 62 | class InvalidData(RuntimeError): 63 | """Thrown if something in model was fed invalid data, for example a signature of an invalid 64 | size, or an unparseable entity.""" 65 | 66 | 67 | class PostRejected(RuntimeError): 68 | """ 69 | Thrown when a post is refused for some reason other than a permission error (e.g. the post 70 | contains bad words) 71 | """ 72 | 73 | def __init__(self, msg=None): 74 | super().__init__("Post rejected" if msg is None else msg) 75 | 76 | 77 | class PostRateLimited(PostRejected): 78 | """Thrown when attempting to post too frequently in a room""" 79 | 80 | def __init__(self, msg=None): 81 | super().__init__("Rate limited" if msg is None else msg) 82 | -------------------------------------------------------------------------------- /sogs/model/file.py: -------------------------------------------------------------------------------- 1 | from ..db import query 2 | from .. import config, utils 3 | from .exc import NoSuchFile, NoSuchUser 4 | import time 5 | from typing import List 6 | 7 | 8 | class File: 9 | """ 10 | Class representing a user stored in the database. 11 | 12 | Properties: 13 | id - the numeric file id, i.e. primary key 14 | room - the Room that this file belongs to (only retrieved on demand). 15 | uploader - the User that uploaded this file (only retrieved on demand). 16 | post_id - the id of the post to which this file is attached, None if unattached. 17 | size - the size (in bytes) of this file 18 | uploaded - unix timestamp when the file was uploaded 19 | expiry - unix timestamp when the file expires. None for non-expiring files. 20 | path - the path of this file on disk, relative to the base data directory. 21 | filename - the suggested filename provided by the user. None for there is no suggestion 22 | (this will always be the case for files uploaded by legacy Session clients, and 23 | sometimes by newer Session clients, e.g. when uploading from a paste). 24 | """ 25 | 26 | def __init__(self, row=None, *, id=None): 27 | """ 28 | Constructs a file from a pre-retrieved row *or* a file id. Raises NoSuchFile if the id does 29 | not exist in the database. 30 | """ 31 | if sum(x is not None for x in (id, row)) != 1: 32 | raise ValueError("File() error: exactly one of id/row is required") 33 | if id is not None: 34 | row = query("SELECT * FROM files WHERE id = :f", f=id).first() 35 | if not row: 36 | raise NoSuchFile(id) 37 | 38 | ( 39 | self.id, 40 | self._fetch_room_id, 41 | self._fetch_uploader_id, 42 | self.post_id, 43 | self.size, 44 | self.uploaded, 45 | self.expiry, 46 | self.filename, 47 | self.path, 48 | ) = ( 49 | row[c] 50 | for c in ( 51 | 'id', 52 | 'room', 53 | 'uploader', 54 | 'message', 55 | 'size', 56 | 'uploaded', 57 | 'expiry', 58 | 'filename', 59 | 'path', 60 | ) 61 | ) 62 | self._room = None 63 | self._uploader = None 64 | 65 | @property 66 | def room(self): 67 | """ 68 | Accesses the Room in which this image is posted; this is fetched from the database the first 69 | time this is accessed. In theory this can return None if the Room is in the process of 70 | being deleted but the Room's uploaded files haven't been deleted yet. 71 | """ 72 | if self._fetch_room_id is not None: 73 | from .room import Room 74 | 75 | try: 76 | self._room = Room(id=self._fetch_room_id) 77 | except NoSuchFile: 78 | pass 79 | self._fetch_room_id = None 80 | return self._room 81 | 82 | @property 83 | def room_id(self): 84 | """ 85 | Accesses the id of the room to which this file was uploaded. Equivalent to .room.id, except 86 | that we don't fetch/cache the Room row. 87 | """ 88 | return self._fetch_room_id if self._room is None else self._room.id 89 | 90 | @property 91 | def uploader(self): 92 | """ 93 | Accesses the User who uploaded this file. Retrieves from the database the first time this 94 | is accessed. 95 | """ 96 | 97 | if self._fetch_uploader_id is not None: 98 | from .user import User 99 | 100 | try: 101 | self._uploader = User(id=self._fetch_uploader_id) 102 | except NoSuchUser: 103 | pass 104 | self._fetch_uploader_id = None 105 | return self._uploader 106 | 107 | @property 108 | def uploader_id(self): 109 | """ 110 | Accesses the id of the user who uploaded this file. Equivalent to .uploader.id, except 111 | that we don't fetch/cache the User row. 112 | """ 113 | return self._fetch_uploader_id if self._uploader is None else self._uploader.id 114 | 115 | def read(self): 116 | """Reads the file from disk, as bytes.""" 117 | with open(self.path, 'rb') as f: 118 | return f.read() 119 | 120 | def read_base64(self): 121 | """Reads the file from disk and encodes as base64.""" 122 | return utils.encode_base64(self.read()) 123 | 124 | def set_expiry(self, duration=None, forever=False): 125 | """ 126 | Updates the file expiry to `duration` seconds from now, or to unlimited if `forever` is 127 | True. If duration is None (and not using forever) then the default expiry (relative to the 128 | current time) will be used. 129 | """ 130 | if forever: 131 | expiry = None 132 | elif duration is not None: 133 | expiry = time.time() + duration 134 | elif config.UPLOAD_DEFAULT_EXPIRY: 135 | expiry = time.time() + config.UPLOAD_DEFAULT_EXPIRY 136 | else: 137 | expiry = None 138 | query("UPDATE files SET expiry = :when WHERE id = :f", when=expiry, f=self.id) 139 | self.expiry = expiry 140 | 141 | @staticmethod 142 | def reset_expiries(file_ids: List[int]): 143 | query( 144 | "UPDATE files SET expiry = uploaded + :exp WHERE id IN :ids", 145 | exp=config.UPLOAD_DEFAULT_EXPIRY, 146 | ids=file_ids, 147 | bind_expanding=['ids'], 148 | ) 149 | -------------------------------------------------------------------------------- /sogs/model/message.py: -------------------------------------------------------------------------------- 1 | from .. import config 2 | from ..db import insert_and_get_row, query 3 | 4 | from .user import User 5 | 6 | import time 7 | 8 | 9 | class Message: 10 | """Class representing a DM between users 11 | 12 | Properties: 13 | sender: sender user of the message 14 | recip: recipant user of the message 15 | data: opaque message data 16 | signature: signature of data 17 | """ 18 | 19 | def __init__(self, row=None, *, sender=None, recip=None, data=None): 20 | """ 21 | Constructs a Message from a pre-retrieved row *or* sender recipient and data. 22 | """ 23 | if row is None: 24 | if None in (sender, recip, data): 25 | raise ValueError("Message() error: no row or data provided") 26 | if not all(isinstance(arg, User) for arg in (sender, recip)): 27 | raise ValueError("Message() error: sender or recipient was not a User model") 28 | 29 | row = insert_and_get_row( 30 | """ 31 | INSERT INTO inbox (sender, recipient, body, expiry) 32 | VALUES (:sender, :recipient, :data, :expiry) 33 | """, 34 | "inbox", 35 | "id", 36 | sender=sender.id, 37 | recipient=recip.id, 38 | data=data, 39 | expiry=time.time() + config.DM_EXPIRY, 40 | ) 41 | # sanity check 42 | assert row is not None 43 | self._row = row 44 | 45 | @staticmethod 46 | def delete_all(*, recip=None, sender=None): 47 | """Delete all messages sent to a user or from a user. 48 | returns the number of rows affected. 49 | """ 50 | if sum(bool(x) for x in (sender, recip)) != 1: 51 | raise ValueError("delete_all(): exactly one of sender or recipient is required") 52 | 53 | result = query( 54 | f"DELETE FROM inbox WHERE {'recipient' if recip else 'sender'} = :id", 55 | id=recip.id if recip else sender.id, 56 | ) 57 | return result.rowcount 58 | 59 | @staticmethod 60 | def to(user, since=None, limit=None): 61 | """get all message for a user, returns a generator""" 62 | rows = query( 63 | f""" 64 | SELECT * FROM inbox WHERE recipient = :recip 65 | {'AND id > :since_id' if since else ''} 66 | ORDER BY id 67 | {'LIMIT :limit' if limit else ''} 68 | """, 69 | recip=user.id, 70 | since_id=since, 71 | limit=limit, 72 | ) 73 | for row in rows: 74 | yield Message(row=row) 75 | 76 | @staticmethod 77 | def sent(user, since=None, limit=None): 78 | """get all messages we sent, returns a generator""" 79 | rows = query( 80 | f""" 81 | SELECT * FROM inbox WHERE sender = :sender 82 | {'AND id > :since_id' if since else ''} 83 | ORDER BY id 84 | {'LIMIT :limit' if limit else ''} 85 | """, 86 | sender=user.id, 87 | since_id=since, 88 | limit=limit, 89 | ) 90 | for row in rows: 91 | yield Message(row=row) 92 | 93 | @property 94 | def id(self): 95 | return self._row["id"] 96 | 97 | @property 98 | def posted_at(self): 99 | return self._row["posted_at"] 100 | 101 | @property 102 | def expires_at(self): 103 | return self._row["expiry"] 104 | 105 | @property 106 | def data(self): 107 | return self._row['body'] 108 | 109 | @property 110 | def sender(self): 111 | if not hasattr(self, "_sender"): 112 | self._sender = User(id=self._row['sender'], autovivify=False) 113 | return self._sender 114 | 115 | @property 116 | def recipient(self): 117 | if not hasattr(self, "_recip"): 118 | self._recip = User(id=self._row['recipient'], autovivify=False) 119 | return self._recip 120 | -------------------------------------------------------------------------------- /sogs/model/post.py: -------------------------------------------------------------------------------- 1 | from .. import utils 2 | from .. import session_pb2 as protobuf 3 | 4 | 5 | class Post: 6 | """Class representing a post made in an open group""" 7 | 8 | _proto = None 9 | 10 | def __init__(self, raw=None, *, user=None, text=None): 11 | if isinstance(raw, bytes) or isinstance(raw, memoryview): 12 | msg = protobuf.Content() 13 | msg.ParseFromString(utils.remove_session_message_padding(raw)) 14 | self._proto = msg.dataMessage 15 | if self._proto is None: 16 | # TODO: implement other kinds of construction methods for Posts 17 | raise ValueError('must provide raw message bytes') 18 | 19 | @property 20 | def text(self): 21 | """ accessor for the post body """ 22 | return self._proto.body 23 | 24 | @property 25 | def username(self): 26 | """ accessor for the username of the post's author """ 27 | if self.profile is None: 28 | return 29 | return self.profile.displayName 30 | 31 | @property 32 | def profile(self): 33 | """ accessor for the user profile data containing things like username etc """ 34 | return self._proto.profile 35 | -------------------------------------------------------------------------------- /sogs/mule.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | import oxenmq 3 | from oxenc import bt_deserialize 4 | import time 5 | from datetime import timedelta 6 | import functools 7 | 8 | from .web import app 9 | from . import cleanup 10 | from . import config 11 | from . import omq as o 12 | 13 | # This is the uwsgi "mule" that handles things not related to serving HTTP requests: 14 | # - it holds the oxenmq instance (with its own interface into sogs) 15 | # - it handles cleanup jobs (e.g. periodic deletions) 16 | 17 | 18 | def run(): 19 | try: 20 | app.logger.info("OxenMQ mule started.") 21 | 22 | while True: 23 | time.sleep(1) 24 | 25 | except Exception: 26 | app.logger.error("mule died via exception:\n{}".format(traceback.format_exc())) 27 | 28 | 29 | def allow_conn(addr, pk, sn): 30 | # TODO: user recognition auth 31 | return oxenmq.AuthLevel.basic 32 | 33 | 34 | def admin_conn(addr, pk, sn): 35 | return oxenmq.AuthLevel.admin 36 | 37 | 38 | def inproc_fail(connid, reason): 39 | raise RuntimeError(f"Couldn't connect mule to itself: {reason}") 40 | 41 | 42 | def setup_omq(): 43 | omq = o.omq 44 | 45 | app.logger.debug("Mule setting up omq") 46 | if isinstance(config.OMQ_LISTEN, list): 47 | listen = config.OMQ_LISTEN 48 | elif config.OMQ_LISTEN is None: 49 | listen = [] 50 | else: 51 | listen = [config.OMQ_LISTEN] 52 | for addr in listen: 53 | omq.listen(addr, curve=True, allow_connection=allow_conn) 54 | app.logger.info(f"OxenMQ listening on {addr}") 55 | 56 | # Internal socket for workers to talk to us: 57 | omq.listen(config.OMQ_INTERNAL, curve=False, allow_connection=admin_conn) 58 | 59 | # Periodic database cleanup timer: 60 | omq.add_timer(cleanup.cleanup, timedelta(seconds=cleanup.INTERVAL)) 61 | 62 | # Commands other workers can send to us, e.g. for notifications of activity for us to know about 63 | worker = omq.add_category("worker", access_level=oxenmq.AuthLevel.admin) 64 | worker.add_command("message_posted", message_posted) 65 | worker.add_command("messages_deleted", messages_deleted) 66 | worker.add_command("message_edited", message_edited) 67 | 68 | app.logger.debug("Mule starting omq") 69 | omq.start() 70 | 71 | # Connect mule to itself so that if something the mule does wants to send something to the mule 72 | # it will work. (And so be careful not to recurse!) 73 | app.logger.debug("Mule connecting to self") 74 | o.mule_conn = omq.connect_inproc(on_success=None, on_failure=inproc_fail) 75 | 76 | 77 | def log_exceptions(f): 78 | @functools.wraps(f) 79 | def wrapper(*args, **kwargs): 80 | try: 81 | return f(*args, **kwargs) 82 | except Exception as e: 83 | app.logger.error(f"{f.__name__} raised exception: {e}") 84 | raise 85 | 86 | return wrapper 87 | 88 | 89 | @log_exceptions 90 | def message_posted(m: oxenmq.Message): 91 | id = bt_deserialize(m.data()[0]) 92 | app.logger.debug(f"FIXME: mule -- message posted stub, id={id}") 93 | 94 | 95 | @log_exceptions 96 | def messages_deleted(m: oxenmq.Message): 97 | ids = bt_deserialize(m.data()[0]) 98 | app.logger.debug(f"FIXME: mule -- message delete stub, deleted messages: {ids}") 99 | 100 | 101 | @log_exceptions 102 | def message_edited(m: oxenmq.Message): 103 | app.logger.debug("FIXME: mule -- message edited stub") 104 | -------------------------------------------------------------------------------- /sogs/omq.py: -------------------------------------------------------------------------------- 1 | # Common oxenmq object; this is used by workers and the oxenmq mule. We create, but do not start, 2 | # this pre-forking. 3 | 4 | import oxenmq 5 | from oxenc import bt_serialize 6 | 7 | from . import crypto, config 8 | from .postfork import postfork 9 | 10 | omq = None 11 | mule_conn = None 12 | test_suite = False 13 | 14 | 15 | def make_omq(): 16 | omq = oxenmq.OxenMQ(privkey=crypto._privkey.encode(), pubkey=crypto.server_pubkey.encode()) 17 | 18 | # We have multiple workers talking to the mule, so we *must* use ephemeral ids to not replace 19 | # each others' connections. 20 | omq.ephemeral_routing_id = True 21 | 22 | return omq 23 | 24 | 25 | # Postfork for workers: we start oxenmq and connect to the mule process 26 | @postfork 27 | def start_oxenmq(): 28 | try: 29 | import uwsgi 30 | except ModuleNotFoundError: 31 | return 32 | 33 | global omq, mule_conn 34 | 35 | omq = make_omq() 36 | 37 | if uwsgi.mule_id() != 0: 38 | from . import mule 39 | 40 | mule.setup_omq() 41 | return 42 | 43 | from .web import app # Imported here to avoid circular import 44 | 45 | app.logger.debug(f"Starting oxenmq connection to mule in worker {uwsgi.worker_id()}") 46 | 47 | omq.start() 48 | app.logger.debug("Started, connecting to mule") 49 | mule_conn = omq.connect_remote(oxenmq.Address(config.OMQ_INTERNAL)) 50 | 51 | app.logger.debug(f"worker {uwsgi.worker_id()} connected to mule OMQ") 52 | 53 | 54 | def send_mule(command, *args, prefix="worker."): 55 | """ 56 | Sends a command to the mule from a worker (or possibly from the mule itself). The command will 57 | be prefixed with "worker." (unless overridden). 58 | 59 | Any args will be bt-serialized and send as message parts. 60 | """ 61 | if prefix: 62 | command = prefix + command 63 | 64 | if test_suite and omq is None: 65 | pass # TODO: for mule call testing we may want to do something else here? 66 | else: 67 | omq.send(mule_conn, command, *(bt_serialize(data) for data in args)) 68 | -------------------------------------------------------------------------------- /sogs/postfork.py: -------------------------------------------------------------------------------- 1 | try: 2 | import uwsgi # noqa: F401 3 | except ModuleNotFoundError: 4 | 5 | class postfork: 6 | """Simple non-uwsgi stub that just calls the postfork function""" 7 | 8 | def __init__(self, f): 9 | f() 10 | 11 | def __call__(self, f): 12 | pass 13 | 14 | 15 | else: 16 | import uwsgidecorators 17 | 18 | postfork = uwsgidecorators.postfork 19 | -------------------------------------------------------------------------------- /sogs/routes/__init__.py: -------------------------------------------------------------------------------- 1 | from ..web import app 2 | 3 | from .legacy import legacy as legacy_endpoints 4 | from .general import general as general_endpoints 5 | from .onion_request import onion_request as onion_request_endpoints 6 | from .rooms import rooms as rooms_endpoints 7 | from .messages import messages as messages_endpoints 8 | from .users import users as users_endpoints 9 | from .dm import dm as dm_endpoints 10 | from .views import views as views_endpoints 11 | 12 | from . import exc # noqa: F401 13 | 14 | app.register_blueprint(dm_endpoints) 15 | app.register_blueprint(rooms_endpoints) 16 | app.register_blueprint(messages_endpoints) 17 | app.register_blueprint(users_endpoints) 18 | app.register_blueprint(general_endpoints) 19 | app.register_blueprint(onion_request_endpoints) 20 | app.register_blueprint(legacy_endpoints) 21 | app.register_blueprint(views_endpoints) 22 | -------------------------------------------------------------------------------- /sogs/routes/converters.py: -------------------------------------------------------------------------------- 1 | from ..web import app 2 | from ..model.room import Room 3 | from ..model.exc import NoSuchRoom 4 | 5 | from .. import config 6 | 7 | from werkzeug.routing import BaseConverter, ValidationError 8 | 9 | 10 | class RoomTokenConverter(BaseConverter): 11 | """ 12 | A room token name consisting of `a`-`z`, `A`-`Z`, `0`-`9`, `_`, and `-` characters. 13 | Max length 64. 14 | """ 15 | 16 | regex = r"[\w-]{1,64}" 17 | 18 | def to_python(self, value): 19 | try: 20 | return Room(token=value) 21 | except NoSuchRoom: 22 | raise ValidationError() 23 | 24 | def to_value(self, value): 25 | return value.token 26 | 27 | 28 | class AnySessionIDConverter(BaseConverter): 29 | """ 30 | A 66-hex-character Session ID (`05...`) or blinded Session ID (`15...`). 31 | """ 32 | 33 | regex = r"[01]5[0-9a-fA-F]{64}" 34 | 35 | def to_python(self, value): 36 | return value 37 | 38 | 39 | class BlindSessionIDConverter(BaseConverter): 40 | """ 41 | A 66-hex-character blinded Session ID (`15...`). Non-blinded Session IDs are not permitted. 42 | """ 43 | 44 | regex = r"15[0-9a-fA-F]{64}" 45 | 46 | def to_python(self, value): 47 | return value 48 | 49 | 50 | class UnblindedSessionIDConverter(BaseConverter): 51 | """ 52 | A 66-hex character unblinded Session ID (`05...`). *Blinded* Session IDs are not permitted. 53 | """ 54 | 55 | regex = r"05[0-9a-fA-F]{64}" 56 | 57 | def to_python(self, value): 58 | return value 59 | 60 | 61 | app.url_map.converters['Room'] = RoomTokenConverter 62 | app.url_map.converters['BlindSessionID'] = BlindSessionIDConverter 63 | app.url_map.converters['UnblindedSessionID'] = UnblindedSessionIDConverter 64 | app.url_map.converters['SessionID'] = ( 65 | BlindSessionIDConverter if config.REQUIRE_BLIND_KEYS else AnySessionIDConverter 66 | ) 67 | app.url_map.converters['AnySessionID'] = AnySessionIDConverter 68 | -------------------------------------------------------------------------------- /sogs/routes/dm.py: -------------------------------------------------------------------------------- 1 | from .. import db, http, utils 2 | from ..model.exc import NoSuchUser 3 | from ..model.user import User 4 | from ..model.message import Message 5 | from ..web import app 6 | from . import auth 7 | 8 | from flask import abort, jsonify, g, Blueprint, request, Response 9 | 10 | dm = Blueprint('dm', __name__) 11 | 12 | 13 | def _serialize_message(msg, include_message=True): 14 | m = { 15 | "id": msg.id, 16 | "posted_at": msg.posted_at, 17 | "expires_at": msg.expires_at, 18 | "sender": msg.sender.session_id, 19 | "recipient": msg.recipient.session_id, 20 | } 21 | if include_message: 22 | m["message"] = utils.encode_base64(msg.data) 23 | return m 24 | 25 | 26 | def _box(out: bool, *, since=None): 27 | """handle inbox/outbox endpoints common logic""" 28 | limit = utils.get_int_param('limit', 100, min=1, max=256, truncate=True) 29 | get = Message.sent if out else Message.to 30 | msgs = [_serialize_message(msg) for msg in get(user=g.user, limit=limit, since=since)] 31 | if msgs or since is None: 32 | return jsonify(msgs) 33 | return Response('', status=http.NOT_MODIFIED) 34 | 35 | 36 | @dm.get("/outbox") 37 | @auth.blind_user_required 38 | def get_outbox(): 39 | """ 40 | Retrieves all of the user's sent messages (up to `limit`). 41 | """ 42 | return _box(True) 43 | 44 | 45 | @dm.get("/outbox/since/") 46 | @auth.blind_user_required 47 | def poll_outbox(msgid): 48 | """ 49 | Polls for any DMs sent since the given id. 50 | """ 51 | return _box(True, since=msgid) 52 | 53 | 54 | @dm.get("/inbox") 55 | @auth.blind_user_required 56 | def get_inbox(): 57 | """ 58 | Retrieves all of the user's recieved messages (up to `limit`). 59 | """ 60 | return _box(False) 61 | 62 | 63 | @dm.get("/inbox/since/") 64 | @auth.blind_user_required 65 | def poll_inbox(msgid): 66 | """ 67 | Polls for any DMs received since the given id. 68 | """ 69 | return _box(False, since=msgid) 70 | 71 | 72 | @dm.post("/inbox/") 73 | @auth.user_required 74 | def send_inbox(sid): 75 | """ 76 | Delivers a direct message to a user via their blinded Session ID. 77 | 78 | The body of this request is a JSON object containing a `message` key with a value of the 79 | encrypted-then-base64-encoded message to deliver. 80 | 81 | Message encryption is described in the [`GET` /inbox](#GET-inbox) endpoint. 82 | 83 | # Return value 84 | 85 | On successful deposit of the message a 201 (Created) status code is returned. The body will be 86 | a JSON object containing the message details as would be returned by retrieving the message, 87 | except that it omits the encrypted message body. 88 | 89 | # Error status codes 90 | 91 | 400 Bad Request — if no message is provided. 92 | 93 | 404 Not Found — if the given Session ID does not exist on this server, either because they have 94 | never accessed the server, or because they have been permanently banned. 95 | """ 96 | try: 97 | recip_user = User(session_id=sid, autovivify=False) 98 | except NoSuchUser: 99 | abort(http.NOT_FOUND) 100 | 101 | if recip_user.banned: 102 | abort(http.NOT_FOUND) 103 | 104 | req = request.json 105 | message = req.get('message') 106 | if message is None: 107 | app.logger.warning("No message provided") 108 | abort(http.BAD_REQUEST) 109 | 110 | with db.transaction(): 111 | msg = Message(data=utils.decode_base64(message), recip=recip_user, sender=g.user) 112 | return jsonify(_serialize_message(msg, include_message=False)), http.CREATED 113 | 114 | 115 | @dm.delete("/inbox") 116 | @auth.blind_user_required 117 | def delete_inbox_items(): 118 | """ 119 | Deletes all of the user's received messages. 120 | 121 | # Return value 122 | 123 | Returns a JSON object with one key `"deleted"` set to the number of deleted messages. 124 | """ 125 | ret = dict() 126 | with db.transaction(): 127 | ret['deleted'] = Message.delete_all(recip=g.user) 128 | 129 | return jsonify(ret), http.OK 130 | -------------------------------------------------------------------------------- /sogs/routes/exc.py: -------------------------------------------------------------------------------- 1 | from ..web import app 2 | from .. import http 3 | from ..model import exc 4 | 5 | 6 | # Map uncaught model exceptions into flask http exceptions 7 | @app.errorhandler(exc.NotFound) 8 | def abort_bad_room(e): 9 | return str(e), http.NOT_FOUND 10 | 11 | 12 | @app.errorhandler(exc.BadPermission) 13 | def abort_perm_denied(e): 14 | return str(e), http.FORBIDDEN 15 | 16 | 17 | @app.errorhandler(exc.PostRejected) 18 | def abort_post_rejected(e): 19 | return str(e), http.TOO_MANY_REQUESTS 20 | 21 | 22 | @app.errorhandler(exc.InvalidData) 23 | def abort_invalid_data(e): 24 | return str(e), http.BAD_REQUEST 25 | -------------------------------------------------------------------------------- /sogs/routes/general.py: -------------------------------------------------------------------------------- 1 | from ..web import app 2 | from ..model import capabilities 3 | from .. import http 4 | from .. import utils 5 | from .subrequest import make_subrequest 6 | 7 | from flask import request, abort, jsonify, Blueprint 8 | 9 | # General purpose routes for things like capability retrieval and batching 10 | 11 | general = Blueprint('general', __name__) 12 | 13 | 14 | @general.get("/capabilities") 15 | def get_caps(): 16 | """ 17 | Return the list of server features/capabilities. Optionally takes a required= parameter 18 | containing a comma-separated list of capabilites; if any are not satisfied we return a 412 19 | (Precondition Failed) response with missing requested capabilities in the `missing` key. 20 | """ 21 | 22 | res = {'capabilities': sorted(capabilities)} 23 | needed = request.args.get('required') 24 | res_code = http.OK 25 | if needed is not None: 26 | missing = [cap for cap in needed.split(',') if cap not in capabilities] 27 | 28 | if missing: 29 | res['missing'] = missing 30 | res_code = http.PRECONDITION_FAILED 31 | 32 | return jsonify(res), res_code 33 | 34 | 35 | batch_args = """ 36 | """ 37 | 38 | 39 | def parse_batch_request(req): 40 | """ 41 | Checks a batch request dict for the required fields. 42 | 43 | See batch() body for arg details. 44 | 45 | Returns (method, path, headers, json, body). `headers` will be a dict (empty if no headers were 46 | provided); `json`/`body` will be None for GET/DELETE requests; `json` will simply be the `json` 47 | dict within the request for json bodies, and `body` will be the *bytes* data (i.e. decoded from 48 | base64, when using `b64`) for 'b64' or 'bytes' requests. 49 | """ 50 | if not isinstance(req, dict): 51 | app.logger.warning("Invalid batch request: batch request is not a dict") 52 | abort(http.BAD_REQUEST) 53 | if 'method' not in req: 54 | app.logger.warning("Invalid batch request: batch request has no method") 55 | abort(http.BAD_REQUEST) 56 | if 'path' not in req: 57 | app.logger.warning("Invalid batch request: batch request has no path") 58 | abort(http.BAD_REQUEST) 59 | 60 | method, path, headers, json, body = req['method'], req['path'], {}, None, None 61 | 62 | if 'headers' in req: 63 | if not isinstance(req['headers'], dict): 64 | app.logger.warning("Bad batch request: 'headers' must be a dict") 65 | abort(http.BAD_REQUEST) 66 | if any(not isinstance(k, str) or not isinstance(v, str) for k, v in req['headers'].items()): 67 | app.logger.warning("Bad batch request: 'headers' must contain only str/str pairs") 68 | abort(http.BAD_REQUEST) 69 | headers = req['headers'] 70 | 71 | has_body = method in ('POST', 'PUT') 72 | if not has_body and method not in ('GET', 'DELETE'): 73 | app.logger.warning(f"Bad batch request: invalid request method {method}") 74 | abort(http.BAD_REQUEST) 75 | 76 | if not path.startswith('/'): 77 | app.logger.warning(f"Bad batch request: path must start with /, got: [{path}]") 78 | abort(http.BAD_REQUEST) 79 | 80 | n_bodies = sum(k in req for k in ('b64', 'json', 'bytes')) 81 | if has_body: 82 | if not n_bodies: 83 | app.logger.warning(f"Bad batch request: {method} requires one of json/b64/bytes") 84 | abort(http.BAD_REQUEST) 85 | elif n_bodies > 1: 86 | app.logger.warning( 87 | f"Bad batch request: {method} cannot have more than one of json/bytes/b64" 88 | ) 89 | abort(http.BAD_REQUEST) 90 | 91 | if 'b64' in req: 92 | try: 93 | body = utils.decode_base64(req['b64']) 94 | except Exception: 95 | app.logger.warning("Bad batch request: b64 value is not valid base64") 96 | elif 'bytes' in req: 97 | body = req['bytes'] 98 | if not isinstance(body, bytes): 99 | body = body.encode() 100 | else: 101 | json = req['json'] 102 | 103 | elif n_bodies: 104 | app.logger.warning(f"Bad batch request: {req['method']} cannot have a json/b64/bytes body") 105 | abort(http.BAD_REQUEST) 106 | 107 | return method, path, headers, json, body 108 | 109 | 110 | @general.post("/batch") 111 | def batch(_sequential=False): 112 | """ 113 | Submits multiple requests wrapped up in a single request, runs them all, then returns the result 114 | of each one. Requests are performed independently, that is, if one fails the others will still 115 | be attempted. There is no guarantee on the order in which requests will be carried out. (For 116 | sequential, related requests invoke via /sequence instead). 117 | 118 | # Body 119 | 120 | Each individual batch subrequest is a list of dicts containing keys: 121 | 122 | - `method` is required and must be one of GET/DELETE/POST/PUT 123 | - `path` is required and must begin with a / 124 | - for POST/PUT requests there must be exactly one of: 125 | - a json value under the `json` key 126 | - a base64-encoded body under the `b64` key 127 | - a raw bytes value under the `bytes` key (not recommended for json) 128 | - `headers` may be provided, and must be a dict of k/v string pairs if provided. 129 | 130 | If non-conforming data is encountered then the request is terminated with a Bad Request error 131 | code. 132 | 133 | # Return value 134 | 135 | Returns a list of responses in the same order as the provided requests; each response consists 136 | of a dict containing: 137 | - code -- the numeric http response code (e.g. 200 for success) 138 | - content-type -- the content type of the request 139 | - body -- the body of the request; will be plain json if `content-type` is `application/json`, 140 | otherwise it will be base64 encoded data. 141 | """ 142 | 143 | subreqs = request.json 144 | if not isinstance(subreqs, list): 145 | abort(http.BAD_REQUEST) 146 | 147 | # Expand this into a list first (rather than during iteration below) so that we abort everything 148 | # if any subrequest is invalid. 149 | subreqs = [parse_batch_request(r) for r in subreqs] 150 | 151 | response = [] 152 | for method, path, headers, json, body in subreqs: 153 | try: 154 | subres, headers = make_subrequest(method, path, headers=headers, body=body, json=json) 155 | if subres.content_type == "application/json": 156 | body = subres.get_json() 157 | else: 158 | body = subres.get_data() 159 | 160 | response.append({"code": subres.status_code, "headers": headers, "body": body}) 161 | except Exception as e: 162 | app.logger.warning(f"Batch subrequest failed: {e}") 163 | response.append( 164 | {"code": http.INTERNAL_SERVER_ERROR, 'content-type': 'text/plain', 'body': ''} 165 | ) 166 | 167 | if _sequential and not 200 <= response[-1]['code'] < 300: 168 | break 169 | 170 | return utils.jsonify_with_base64(response) 171 | 172 | 173 | @general.post("/sequence") 174 | def sequence(): 175 | """ 176 | This is like batch, except that it guarantees to submit requests sequentially in the order 177 | provided and stops processing requests if the previous request returned a non-2xx response. 178 | 179 | Like batch, responses are returned in the same order as requests, but unlike batch there may be 180 | fewer elements in the response list (if requests were stopped because of a non-2xx response). 181 | In such a case, the final, non-2xx response is still included as the final response value. 182 | 183 | See [`/batch`](#post-batch) for arguments and response. 184 | """ 185 | 186 | return batch(_sequential=True) 187 | -------------------------------------------------------------------------------- /sogs/routes/subrequest.py: -------------------------------------------------------------------------------- 1 | from ..web import app 2 | 3 | from flask import request, g 4 | from io import BytesIO 5 | import traceback 6 | from typing import Optional, Union 7 | import urllib.parse 8 | 9 | 10 | def make_subrequest( 11 | method: str, 12 | path: str, 13 | *, 14 | headers={}, 15 | content_type: Optional[str] = None, 16 | body: Optional[Union[bytes, memoryview]] = None, 17 | json: Optional[Union[dict, list]] = None, 18 | user_reauth: bool = False, 19 | ): 20 | """ 21 | Makes a subrequest from the given parameters, returns the response object and a dict of 22 | lower-case response headers keys to header values. 23 | 24 | Parameters: 25 | method - the HTTP method, e.g. GET or POST 26 | path - the request path (optionally including a query string) 27 | headers - dict of HTTP headers for the request 28 | content_type - the content-type of the request (for POST/PUT methods) 29 | body - the bytes content of the body of a POST/PUT method. If specified then content_type will 30 | default to 'application/octet-stream'. 31 | json - a json value to dump as the body of the request. If specified then content_type will 32 | default to 'applicaton/json'. 33 | user_reauth - if True then we allow user re-authentication on the subrequest based on its 34 | X-SOGS-* headers; if False (the default) then the user auth on the outer request is preserved 35 | (even if it was None) and inner request auth headers will be ignored. 36 | """ 37 | 38 | http_headers = {'HTTP_{}'.format(h.upper().replace('-', '_')): v for h, v in headers.items()} 39 | 40 | if content_type is None: 41 | if 'HTTP_CONTENT_TYPE' in http_headers: 42 | content_type = http_headers['HTTP_CONTENT_TYPE'] 43 | elif body is not None: 44 | content_type = 'application/octet-stream' 45 | elif json is not None: 46 | content_type = 'application/json' 47 | else: 48 | content_type = '' 49 | 50 | for x in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): 51 | if x in http_headers: 52 | del http_headers[x] 53 | 54 | if body is None: 55 | if json is not None: 56 | from json import dumps 57 | 58 | body = dumps(json, separators=(',', ':')).encode() 59 | else: 60 | body = b'' 61 | 62 | body_input = BytesIO(body) 63 | content_length = len(body) 64 | 65 | if '?' in path: 66 | path, query_string = path.split('?', 1) 67 | else: 68 | query_string = '' 69 | 70 | if '%' in path: 71 | path = urllib.parse.unquote(path, errors='strict') 72 | 73 | # Werkzeug has some screwy internals: it requires PATH_INFO to be a bastardized string 74 | # masquerading as bytes: it encodes the string as latin1, then decodes *those* bytes to utf-8. 75 | # So we have to muck around here to get our unicode as utf-8 bytes then shove those into a 76 | # latin1 string. WTF. 77 | monkey_path = path 78 | if any(ord(c) > 127 for c in path): 79 | monkey_path = path.encode('utf-8').decode('latin1') 80 | 81 | # Set up the wsgi environ variables for the subrequest (see PEP 0333) 82 | subreq_env = { 83 | **request.environ, 84 | "REQUEST_METHOD": method, 85 | "PATH_INFO": monkey_path, 86 | "QUERY_STRING": query_string, 87 | "CONTENT_TYPE": content_type, 88 | "CONTENT_LENGTH": content_length, 89 | **http_headers, 90 | 'wsgi.input': body_input, 91 | 'flask._preserve_context': False, 92 | } 93 | 94 | try: 95 | app.logger.debug(f"Initiating sub-request for {method} {path}") 96 | g.user_reauth = user_reauth 97 | with app.request_context(subreq_env): 98 | try: 99 | response = app.full_dispatch_request() 100 | except Exception as e: 101 | response = app.make_response(app.handle_exception(e)) 102 | if response.status_code >= 400: 103 | app.logger.warning( 104 | f"Sub-request for {method} {path} returned status {response.status_code}" 105 | ) 106 | return ( 107 | response, 108 | { 109 | k.lower(): v 110 | for k, v in response.get_wsgi_headers(subreq_env) 111 | if k.lower() != 'content-length' 112 | }, 113 | ) 114 | 115 | except Exception: 116 | app.logger.warning(f"Sub-request for {method} {path} failed: {traceback.format_exc()}") 117 | raise 118 | -------------------------------------------------------------------------------- /sogs/routes/views.py: -------------------------------------------------------------------------------- 1 | from flask import abort, render_template, Response, Blueprint 2 | 3 | from .. import config, crypto, http 4 | from ..model.room import get_accessible_rooms 5 | from . import auth, converters # noqa: F401 6 | 7 | 8 | from io import BytesIO 9 | 10 | import qrcode 11 | 12 | import PIL.Image 13 | 14 | if hasattr(PIL.Image, 'Resampling'): 15 | NEAREST = PIL.Image.Resampling.NEAREST 16 | else: 17 | NEAREST = PIL.Image.NEAREST 18 | 19 | 20 | views = Blueprint('views', __name__) 21 | 22 | 23 | @views.get("/") 24 | def serve_index(): 25 | """ 26 | Publicly accessible URL that displays a list of public rooms to a web browser. This isn't a 27 | normal SOGS client endpoint, but rather a convenience web page for people who follow a SOGS 28 | pseudo-URL. 29 | """ 30 | rooms = get_accessible_rooms() 31 | if len(rooms) == 0: 32 | return render_template('setup.html') 33 | if not config.HTTP_SHOW_INDEX: 34 | abort(http.FORBIDDEN) 35 | return render_template( 36 | "index.html", url_base=config.URL_BASE, rooms=rooms, pubkey=crypto.server_pubkey_hex 37 | ) 38 | 39 | 40 | @views.get("/r//") 41 | def view_room(room): 42 | """ 43 | Publicly accessible URL that displays a room (including recent messages) to a web browser. This 44 | isn't a normal SOGS client endpoint, but rather a convenience web page for people who follow a 45 | SOGS pseudo-URL, that displays the SOGS URL and QR code along with a list of recent messages. 46 | """ 47 | if not room.default_read: 48 | abort(http.FORBIDDEN) 49 | 50 | return render_template("view_room.html", room=room, show_recent=config.HTTP_SHOW_RECENT) 51 | 52 | 53 | @views.get("/r//invite.png") 54 | def serve_invite_qr(room): 55 | """ 56 | URL that generates a SOGS open group URL in QR code format for consumption by a mobile device. 57 | This isn't a normal SOGS client endpoint, but rather part of the a convenience web page for 58 | people who view the SOGS URL in a browser and want to scan the URL into another device (i.e. 59 | mobile Session). 60 | """ 61 | if not room.default_read: 62 | abort(http.FORBIDDEN) 63 | 64 | img = qrcode.make(room.url) 65 | data = BytesIO() 66 | img = img.resize((512, 512), NEAREST) 67 | img.save(data, "PNG") 68 | return Response(data.getvalue(), mimetype="image/png") 69 | -------------------------------------------------------------------------------- /sogs/static/session.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | package signalservice; 3 | 4 | 5 | message Envelope { 6 | 7 | enum Type { 8 | SESSION_MESSAGE = 6; 9 | CLOSED_GROUP_MESSAGE = 7; 10 | } 11 | 12 | // @required 13 | required Type type = 1; 14 | optional string source = 2; 15 | // @required 16 | required uint64 timestamp = 5; 17 | optional bytes content = 8; 18 | } 19 | 20 | message TypingMessage { 21 | enum Action { 22 | STARTED = 0; 23 | STOPPED = 1; 24 | } 25 | // @required 26 | required uint64 timestamp = 1; 27 | // @required 28 | required Action action = 2; 29 | } 30 | 31 | 32 | message Unsend { 33 | // @required 34 | required uint64 timestamp = 1; 35 | // @required 36 | required string author = 2; 37 | } 38 | 39 | 40 | message Content { 41 | optional DataMessage dataMessage = 1; 42 | optional CallMessage callMessage = 3; 43 | optional ReceiptMessage receiptMessage = 5; 44 | optional TypingMessage typingMessage = 6; 45 | optional ConfigurationMessage configurationMessage = 7; 46 | optional DataExtractionNotification dataExtractionNotification = 8; 47 | optional Unsend unsendMessage = 9; 48 | } 49 | 50 | message KeyPair { 51 | // @required 52 | required bytes publicKey = 1; 53 | // @required 54 | required bytes privateKey = 2; 55 | } 56 | 57 | message DataExtractionNotification { 58 | 59 | enum Type { 60 | SCREENSHOT = 1; // no way to know this on Desktop 61 | MEDIA_SAVED = 2; // timestamp 62 | } 63 | 64 | // @required 65 | required Type type = 1; 66 | optional uint64 timestamp = 2; 67 | } 68 | 69 | message DataMessage { 70 | 71 | enum Flags { 72 | EXPIRATION_TIMER_UPDATE = 2; 73 | } 74 | 75 | message Quote { 76 | 77 | message QuotedAttachment { 78 | optional string contentType = 1; 79 | optional string fileName = 2; 80 | optional AttachmentPointer thumbnail = 3; 81 | } 82 | 83 | // @required 84 | required uint64 id = 1; 85 | // @required 86 | required string author = 2; 87 | optional string text = 3; 88 | repeated QuotedAttachment attachments = 4; 89 | } 90 | 91 | message Preview { 92 | // @required 93 | required string url = 1; 94 | optional string title = 2; 95 | optional AttachmentPointer image = 3; 96 | } 97 | 98 | message LokiProfile { 99 | optional string displayName = 1; 100 | optional string profilePicture = 2; 101 | } 102 | 103 | message OpenGroupInvitation { 104 | // @required 105 | required string url = 1; 106 | // @required 107 | required string name = 3; 108 | } 109 | 110 | message ClosedGroupControlMessage { 111 | 112 | enum Type { 113 | NEW = 1; // publicKey, name, encryptionKeyPair, members, admins, expireTimer 114 | ENCRYPTION_KEY_PAIR = 3; // publicKey, wrappers 115 | NAME_CHANGE = 4; // name 116 | MEMBERS_ADDED = 5; // members 117 | MEMBERS_REMOVED = 6; // members 118 | MEMBER_LEFT = 7; 119 | ENCRYPTION_KEY_PAIR_REQUEST = 8; 120 | } 121 | 122 | 123 | 124 | message KeyPairWrapper { 125 | // @required 126 | required bytes publicKey = 1; // The public key of the user the key pair is meant for 127 | // @required 128 | required bytes encryptedKeyPair = 2; // The encrypted key pair 129 | } 130 | 131 | // @required 132 | required Type type = 1; 133 | optional bytes publicKey = 2; 134 | optional string name = 3; 135 | optional KeyPair encryptionKeyPair = 4; 136 | repeated bytes members = 5; 137 | repeated bytes admins = 6; 138 | repeated KeyPairWrapper wrappers = 7; 139 | optional uint32 expireTimer = 8; 140 | } 141 | 142 | 143 | optional string body = 1; 144 | repeated AttachmentPointer attachments = 2; 145 | optional GroupContext group = 3; 146 | optional uint32 flags = 4; 147 | optional uint32 expireTimer = 5; 148 | optional bytes profileKey = 6; 149 | optional uint64 timestamp = 7; 150 | optional Quote quote = 8; 151 | repeated Preview preview = 10; 152 | optional LokiProfile profile = 101; 153 | optional OpenGroupInvitation openGroupInvitation = 102; 154 | optional ClosedGroupControlMessage closedGroupControlMessage = 104; 155 | optional string syncTarget = 105; 156 | } 157 | 158 | message CallMessage { 159 | 160 | enum Type { 161 | OFFER = 1; 162 | ANSWER = 2; 163 | PROVISIONAL_ANSWER = 3; 164 | ICE_CANDIDATES = 4; 165 | END_CALL = 5; 166 | } 167 | 168 | // @required 169 | required Type type = 1; 170 | repeated string sdps = 2; 171 | repeated uint32 sdpMLineIndexes = 3; 172 | repeated string sdpMids = 4; 173 | 174 | } 175 | 176 | message ConfigurationMessage { 177 | 178 | message ClosedGroup { 179 | optional bytes publicKey = 1; 180 | optional string name = 2; 181 | optional KeyPair encryptionKeyPair = 3; 182 | repeated bytes members = 4; 183 | repeated bytes admins = 5; 184 | } 185 | 186 | message Contact { 187 | // @required 188 | required bytes publicKey = 1; 189 | // @required 190 | required string name = 2; 191 | optional string profilePicture = 3; 192 | optional bytes profileKey = 4; 193 | } 194 | 195 | repeated ClosedGroup closedGroups = 1; 196 | repeated string openGroups = 2; 197 | optional string displayName = 3; 198 | optional string profilePicture = 4; 199 | optional bytes profileKey = 5; 200 | repeated Contact contacts = 6; 201 | } 202 | 203 | message ReceiptMessage { 204 | 205 | enum Type { 206 | READ = 1; 207 | } 208 | 209 | // @required 210 | required Type type = 1; 211 | repeated uint64 timestamp = 2; 212 | } 213 | 214 | message AttachmentPointer { 215 | 216 | enum Flags { 217 | VOICE_MESSAGE = 1; 218 | } 219 | 220 | // @required 221 | required fixed64 id = 1; 222 | optional string contentType = 2; 223 | optional bytes key = 3; 224 | optional uint32 size = 4; 225 | optional bytes thumbnail = 5; 226 | optional bytes digest = 6; 227 | optional string fileName = 7; 228 | optional uint32 flags = 8; 229 | optional uint32 width = 9; 230 | optional uint32 height = 10; 231 | optional string caption = 11; 232 | optional string url = 101; 233 | } 234 | 235 | message GroupContext { 236 | 237 | enum Type { 238 | UNKNOWN = 0; 239 | UPDATE = 1; 240 | DELIVER = 2; 241 | QUIT = 3; 242 | REQUEST_INFO = 4; 243 | } 244 | 245 | // @required 246 | optional bytes id = 1; 247 | // @required 248 | optional Type type = 2; 249 | optional string name = 3; 250 | repeated string members = 4; 251 | optional AttachmentPointer avatar = 5; 252 | repeated string admins = 6; 253 | } 254 | 255 | -------------------------------------------------------------------------------- /sogs/static/view_room.js: -------------------------------------------------------------------------------- 1 | 2 | const makebuffer = (raw) => { 3 | let b = Uint8Array.from(window.atob(raw), (v) => v.charCodeAt(0)); 4 | // This data is padded with a 0x80 delimiter followed by any number of 0x00 bytes, but these are 5 | // *not* part of the protocol buffer encoding, so we need to strip it off. 6 | let realLength = b.length; 7 | while (realLength > 0 && b[realLength-1] == 0) 8 | realLength--; 9 | if (realLength > 0 && b[realLength-1] == 0x80) 10 | realLength--; 11 | return b.subarray(0, realLength); 12 | }; 13 | const setup = async () => { 14 | const elem = document.getElementById("messages"); 15 | if(elem) 16 | { 17 | 18 | const req = await fetch("/static/session.proto"); 19 | const proto = await req.text(); 20 | const root = protobuf.parse(proto).root; 21 | 22 | const Message = root.lookupType("signalservice.Content"); 23 | const url = window.poll_room_url; 24 | const update = async () => { 25 | const req = await fetch(url); 26 | if(req.status != 200) 27 | { 28 | elem.replaceChildren(); 29 | elem.appendChild(document.createTextNode(`HTTP {req.status}`)); 30 | return; 31 | } 32 | const msgs = await req.json(); 33 | 34 | 35 | 36 | elem.replaceChildren(); 37 | 38 | if(msgs.length === 0) 39 | { 40 | elem.appendChild(document.createTextNode(`the ${window.view_room} room is empty`)); 41 | } 42 | 43 | for(let msg of msgs.reverse()) 44 | { 45 | let e = document.createElement("li") 46 | try 47 | { 48 | const data = makebuffer(msg.data); 49 | const err = Message.verify(data); 50 | if(err) 51 | { 52 | throw Error(err); 53 | } 54 | 55 | const plain = Message.decode(data).dataMessage; 56 | e.appendChild(document.createTextNode(plain.profile.displayName +": "+plain.body)); 57 | elem.appendChild(e); 58 | 59 | } 60 | catch(ex) 61 | { 62 | console.log(ex); 63 | } 64 | } 65 | }; 66 | if(url) 67 | { 68 | await update(); 69 | setInterval(update, 5000); 70 | } 71 | else 72 | { 73 | elem.replaceChildren(); 74 | elem.appendChild(document.createTextNode("no poll url set")); 75 | } 76 | } 77 | }; 78 | 79 | setTimeout(setup, 0); 80 | -------------------------------------------------------------------------------- /sogs/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | sogs 4 | 5 | 6 | {% block body %} 7 | {% endblock %} 8 | 9 | 10 | -------------------------------------------------------------------------------- /sogs/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block body %} 3 |

room directory

4 | 9 | 10 | {% endblock %} 11 | -------------------------------------------------------------------------------- /sogs/templates/setup.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block body %} 3 |
this sogs requires additional setup
4 | {% endblock %} 5 | -------------------------------------------------------------------------------- /sogs/templates/view_room.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 | 6 |
{{room.url}}
7 |
8 | {% if show_recent %} 9 |
    10 |
  • loading...
  • 11 |
12 | 16 | 17 | 18 | {%endif%} 19 | 20 | {% endblock %} 21 | -------------------------------------------------------------------------------- /sogs/utils.py: -------------------------------------------------------------------------------- 1 | from . import crypto 2 | from . import config 3 | from . import http 4 | 5 | import base64 6 | from flask import request, abort, Response 7 | import json 8 | from typing import Union, Tuple 9 | 10 | 11 | def encode_base64(data: bytes): 12 | return base64.b64encode(data).decode() 13 | 14 | 15 | def decode_base64(b64: str): 16 | """Decodes a base64 value with or without padding.""" 17 | # Accept unpadded base64 by appending padding; b64decode won't accept it otherwise 18 | if 2 <= len(b64) % 4 <= 3 and not b64.endswith('='): 19 | b64 += '=' * (4 - len(b64) % 4) 20 | return base64.b64decode(b64, validate=True) 21 | 22 | 23 | def decode_hex_or_b64(data: bytes, size: int): 24 | """ 25 | Decodes hex or base64-encoded input of a binary value of size `size`. Returns None if data is 26 | None; otherwise the bytes value, if parsing is successful. Throws on invalid data. 27 | 28 | (Size is required because many hex strings are valid base64 and vice versa.) 29 | """ 30 | if data is None: 31 | return None 32 | 33 | if len(data) == size * 2: 34 | return bytes.fromhex(data) 35 | 36 | b64_size = (size + 2) // 3 * 4 # bytes*4/3, rounded up to the next multiple of 4. 37 | b64_unpadded = (size * 4 + 2) // 3 38 | 39 | # Allow unpadded data; python's base64 has no ability to load an unpadded value, though, so pad 40 | # it ourselves: 41 | if b64_unpadded <= len(data) <= b64_size: 42 | decoded = decode_base64(data) 43 | if len(decoded) == size: # Might not equal our target size because of padding 44 | return decoded 45 | 46 | raise ValueError("Invalid value: could not decode as hex or base64") 47 | 48 | 49 | def _json_b64_impl(val): 50 | if isinstance(val, bytes) or isinstance(val, memoryview): 51 | return encode_base64(val) 52 | if isinstance(val, list): 53 | return [_json_b64_impl(v) for v in val] 54 | if isinstance(val, dict): 55 | return {_json_b64_impl(k): _json_b64_impl(v) for k, v in val.items()} 56 | return val 57 | 58 | 59 | def json_with_base64(val): 60 | """ 61 | Returns val encoded in json, but with any `bytes` or `memoryview` values encoded as base64 62 | strings. Note that this base64-conversion only supports following lists and dicts. 63 | """ 64 | return json.dumps(_json_b64_impl(val)) 65 | 66 | 67 | def jsonify_with_base64(val): 68 | """ 69 | Returns a flask response set up for json (like flask.jsonify(...)), but uses json_with_base64 70 | for the encoding. 71 | """ 72 | return Response(json_with_base64(val), mimetype="application/json") 73 | 74 | 75 | def bencode_consume_string(body: memoryview) -> Tuple[memoryview, memoryview]: 76 | """ 77 | Parses a bencoded byte string from the beginning of `body`. Returns a pair of memoryviews on 78 | success: the first is the string byte data; the second is the remaining data (i.e. after the 79 | consumed string). 80 | Raises ValueError on parse failure. 81 | """ 82 | pos = 0 83 | while pos < len(body) and 0x30 <= body[pos] <= 0x39: # 1+ digits 84 | pos += 1 85 | if pos == 0 or pos >= len(body) or body[pos] != 0x3A: # 0x3a == ':' 86 | raise ValueError("Invalid string bencoding: did not find `N:` length prefix") 87 | 88 | strlen = int(body[0:pos]) # parse the digits as a base-10 integer 89 | pos += 1 # skip the colon 90 | if pos + strlen > len(body): 91 | raise ValueError("Invalid string bencoding: length exceeds buffer") 92 | return body[pos : pos + strlen], body[pos + strlen :] 93 | 94 | 95 | def server_url(room): 96 | # TODO: Once Session supports it, prefix this with /r/ so that SOGS pseudo-URLs for Session 97 | # coincide with the web viewer URL. 98 | return '{}/{}?public_key={}'.format(config.URL_BASE, room or '', crypto.server_pubkey_hex) 99 | 100 | 101 | SIGNATURE_SIZE = 64 102 | SESSION_ID_SIZE = 33 103 | # Size returned by make_legacy_token (assuming it is given a standard 66-hex (33 byte) session id): 104 | LEGACY_TOKEN_SIZE = SIGNATURE_SIZE + SESSION_ID_SIZE 105 | 106 | 107 | def make_legacy_token(session_id): 108 | session_id = bytes.fromhex(session_id) 109 | return crypto.server_sign(session_id) 110 | 111 | 112 | def legacy_convert_time(float_time): 113 | """take a float unix timestamp and convert it into something legacy Session likes""" 114 | return int(float_time * 1000) 115 | 116 | 117 | def get_int_param(name, default=None, *, required=False, min=None, max=None, truncate=False): 118 | """ 119 | Returns a provided named parameter (typically a query string parameter) as an integer from the 120 | current request. On error we abort the request with a Bad Request error status code. 121 | 122 | Parameters: 123 | - required -- if True then not specifying the argument is an error. 124 | - default -- if the parameter is not given then return this. Ignored if `required` is true. 125 | - min -- the minimum acceptable value for the parameter; None means no minimum. 126 | - max -- the maximum acceptable value for the parameter; None means no maximum. 127 | - truncate -- if True then we truncate a >max or max: 147 | if truncate: 148 | val = max 149 | else: 150 | abort(http.BAD_REQUEST) 151 | return val 152 | 153 | 154 | def remove_session_message_padding(data: bytes): 155 | """Removes the custom padding that Session may have added. Returns the unpadded data.""" 156 | 157 | # Except sometimes it isn't padded, so if we find something other than 0x00 or 0x80 *or* we 158 | # strip off all the 0x00's and then find something that isn't 0x80, then we're supposed to use 159 | # the whole thing (without the 0's stripped off). Session code has a comment "This is dumb" 160 | # describing all of this. I concur. 161 | if data and data[-1] in (0x00, 0x80): 162 | stripped_data = data.rstrip(b'\x00') 163 | if stripped_data and stripped_data[-1] == 0x80: 164 | data = stripped_data[:-1] 165 | return data 166 | 167 | 168 | def add_session_message_padding(data: Union[bytes, memoryview], length): 169 | """Adds the custom padding that Session delivered the message with (and over which the signature 170 | is written). Returns the padded value.""" 171 | 172 | if length > len(data): 173 | if isinstance(data, memoryview): 174 | data = bytes(data) 175 | data += b'\x80' + b'\x00' * (length - len(data) - 1) 176 | return data 177 | -------------------------------------------------------------------------------- /sogs/web.py: -------------------------------------------------------------------------------- 1 | import flask 2 | from werkzeug.local import LocalProxy 3 | from . import config 4 | import coloredlogs 5 | 6 | app = flask.Flask(__name__, template_folder=config.TEMPLATE_PATH, static_folder=config.STATIC_PATH) 7 | coloredlogs.install(milliseconds=True, isatty=True, logger=app.logger, level=config.LOG_LEVEL) 8 | 9 | # Monkey-patch app.get/post/etc. for Flask <2 compatibility; this has to be before the imports, 10 | # below, because they depend on this existing. 11 | if not hasattr(flask.Flask, 'post'): 12 | 13 | def _add_route_shortcut(on, name): 14 | def meth(self, rule: str, **options): 15 | return self.route(rule, methods=[name.upper()], **options) 16 | 17 | setattr(on, name, meth) 18 | 19 | for method in ('get', 'post', 'put', 'delete', 'patch'): 20 | _add_route_shortcut(flask.Flask, method) 21 | _add_route_shortcut(flask.Blueprint, method) 22 | 23 | 24 | def get_db_conn(): 25 | if 'conn' not in flask.g: 26 | from . import db 27 | 28 | flask.g.conn = db.get_conn() 29 | 30 | return flask.g.conn 31 | 32 | 33 | @app.teardown_appcontext 34 | def teardown_db_conn(exception): 35 | conn = flask.g.pop('conn', None) 36 | 37 | if conn is not None: 38 | conn.close() 39 | 40 | 41 | # An application-context, lazily evaluated database connection. (Note that in some contexts, such 42 | # as __main__.py, we may have replaced this with a non-lazy, actual current connection). 43 | appdb = LocalProxy(get_db_conn) 44 | 45 | 46 | from . import routes 47 | from . import cleanup 48 | -------------------------------------------------------------------------------- /tests/auth.py: -------------------------------------------------------------------------------- 1 | from nacl.signing import SigningKey 2 | from nacl.public import PublicKey 3 | from typing import Optional 4 | import time 5 | from sogs.hashing import blake2b, sha512 6 | import nacl.bindings as sodium 7 | from nacl.utils import random 8 | import urllib.parse 9 | 10 | import sogs.utils 11 | import sogs.crypto 12 | 13 | 14 | def x_sogs_nonce(): 15 | return random(16) 16 | 17 | 18 | def x_sogs_raw( 19 | s: SigningKey, 20 | B: PublicKey, 21 | method: str, 22 | full_path: str, 23 | body: Optional[bytes] = None, 24 | *, 25 | b64_nonce: bool = True, 26 | blinded: bool = False, 27 | timestamp_off: int = 0, 28 | nonce: bytes = None, 29 | ): 30 | """ 31 | Calculates X-SOGS-* headers. 32 | 33 | Returns 4 elements: the headers dict, the nonce bytes, timestamp int, and signature bytes. 34 | 35 | Use x_sogs(...) instead if you don't need the nonce/timestamp/signature values. 36 | """ 37 | n = nonce if nonce else x_sogs_nonce() 38 | ts = int(time.time()) + timestamp_off 39 | 40 | if blinded: 41 | a = s.to_curve25519_private_key().encode() 42 | k = sodium.crypto_core_ed25519_scalar_reduce( 43 | blake2b(sogs.crypto.server_pubkey_bytes, digest_size=64) 44 | ) 45 | ka = sodium.crypto_core_ed25519_scalar_mul(k, a) 46 | kA = sodium.crypto_scalarmult_ed25519_base_noclamp(ka) 47 | pubkey = '15' + kA.hex() 48 | else: 49 | pubkey = '00' + s.verify_key.encode().hex() 50 | 51 | if '%' in full_path: 52 | full_path = urllib.parse.unquote(full_path) 53 | 54 | to_sign = [B.encode(), n, str(ts).encode(), method.encode(), full_path.encode()] 55 | if body: 56 | to_sign.append(blake2b(body, digest_size=64)) 57 | 58 | if blinded: 59 | H_rh = sha512(s.encode())[32:] 60 | r = sodium.crypto_core_ed25519_scalar_reduce(sha512([H_rh, kA, *to_sign])) 61 | sig_R = sodium.crypto_scalarmult_ed25519_base_noclamp(r) 62 | HRAM = sodium.crypto_core_ed25519_scalar_reduce(sha512([sig_R, kA, *to_sign])) 63 | sig_s = sodium.crypto_core_ed25519_scalar_add( 64 | r, sodium.crypto_core_ed25519_scalar_mul(HRAM, ka) 65 | ) 66 | sig = sig_R + sig_s 67 | 68 | else: 69 | sig = s.sign(b''.join(to_sign)).signature 70 | 71 | h = { 72 | 'X-SOGS-Pubkey': pubkey, 73 | 'X-SOGS-Nonce': sogs.utils.encode_base64(n) if b64_nonce else n.hex(), 74 | 'X-SOGS-Timestamp': str(ts), 75 | 'X-SOGS-Signature': sogs.utils.encode_base64(sig), 76 | } 77 | 78 | return h, n, ts, sig 79 | 80 | 81 | def x_sogs(*args, **kwargs): 82 | return x_sogs_raw(*args, **kwargs)[0] 83 | 84 | 85 | def x_sogs_for(user, *args, **kwargs): 86 | B = sogs.crypto.server_pubkey 87 | return x_sogs(user.ed_key, B, *args, blinded=user.is_blinded, **kwargs) 88 | -------------------------------------------------------------------------------- /tests/request.py: -------------------------------------------------------------------------------- 1 | from json import dumps 2 | from auth import x_sogs_for 3 | from werkzeug.datastructures import Headers 4 | 5 | 6 | def sogs_get(client, url, user): 7 | """ 8 | GETs a test `client` request to `url` with X-SOGS-* signature headers signing the request for 9 | `user`. 10 | """ 11 | return client.get(url, headers=x_sogs_for(user, "GET", url)) 12 | 13 | 14 | def sogs_delete(client, url, user): 15 | """ 16 | DETELEs a test `client` request to `url` with X-SOGS-* signature headers signing the request for 17 | `user`. 18 | """ 19 | return client.delete(url, headers=x_sogs_for(user, "DELETE", url)) 20 | 21 | 22 | def sogs_post_raw(client, url, data, user, *, ctype='application/octet-stream', extra_headers={}): 23 | """ 24 | POSTs a test `client` request to `url` with the given `data` as bytes body and X-SOGS-* 25 | signature headers signing the request for `user`. 26 | """ 27 | headers = Headers(x_sogs_for(user, "POST", url, data)) 28 | for k, v in extra_headers.items(): 29 | if isinstance(v, str): 30 | headers.add(k, v) 31 | else: 32 | headers.add(k, v[0], **v[1]) 33 | 34 | return client.post(url, data=data, content_type=ctype, headers=headers) 35 | 36 | 37 | def sogs_post(client, url, json, user): 38 | """ 39 | POSTs a test `client` request to `url` with the given `json` as body and X-SOGS-* signature 40 | headers signing the request for `user`. 41 | """ 42 | return sogs_post_raw(client, url, dumps(json).encode(), user, ctype='application/json') 43 | 44 | 45 | def sogs_put(client, url, json, user): 46 | """ 47 | PUTs a test `client` request to `url` with the given `json` as body and X-SOGS-* signature 48 | headers signing the request for `user`. 49 | """ 50 | data = dumps(json).encode() 51 | 52 | return client.put( 53 | url, data=data, content_type='application/json', headers=x_sogs_for(user, "PUT", url, data) 54 | ) 55 | -------------------------------------------------------------------------------- /tests/test_dm.py: -------------------------------------------------------------------------------- 1 | from request import sogs_get, sogs_post, sogs_delete 2 | from sogs import config 3 | from sogs.hashing import blake2b 4 | from sogs.utils import encode_base64 5 | from sogs.model.user import SystemUser 6 | import nacl.bindings as sodium 7 | from nacl.utils import random 8 | from util import from_now 9 | from itertools import product 10 | 11 | 12 | def test_dm_default_empty(client, blind_user): 13 | r = sogs_get(client, '/inbox', blind_user) 14 | assert r.status_code == 200 15 | assert r.json == [] 16 | 17 | 18 | def test_dm_banned_user(client, banned_user): 19 | r = sogs_get(client, '/inbox', banned_user) 20 | assert r.status_code == 403 21 | 22 | 23 | def make_post(message, sender, to): 24 | assert sender.is_blinded 25 | assert to.is_blinded 26 | a = sender.ed_key.to_curve25519_private_key().encode() 27 | kA = bytes.fromhex(sender.session_id[2:]) 28 | kB = bytes.fromhex(to.session_id[2:]) 29 | key = blake2b(sodium.crypto_scalarmult_ed25519_noclamp(a, kB) + kA + kB, digest_size=32) 30 | 31 | # MESSAGE || UNBLINDED_ED_PUBKEY 32 | plaintext = message + sender.ed_key.verify_key.encode() 33 | nonce = random(24) 34 | ciphertext = sodium.crypto_aead_xchacha20poly1305_ietf_encrypt( 35 | plaintext, aad=None, nonce=nonce, key=key 36 | ) 37 | data = b'\x00' + ciphertext + nonce 38 | return {'message': encode_base64(data)} 39 | 40 | 41 | def test_dm_send_from_banned_user(client, blind_user, blind_user2): 42 | blind_user2.ban(banned_by=SystemUser()) 43 | r = sogs_post( 44 | client, 45 | f'/inbox/{blind_user.session_id}', 46 | make_post(b'beep', sender=blind_user2, to=blind_user), 47 | blind_user2, 48 | ) 49 | assert r.status_code == 403 50 | 51 | 52 | def test_dm_send_to_banned_user(client, blind_user, blind_user2): 53 | blind_user2.ban(banned_by=SystemUser()) 54 | r = sogs_post( 55 | client, 56 | f'/inbox/{blind_user2.session_id}', 57 | make_post(b'beep', sender=blind_user, to=blind_user2), 58 | blind_user, 59 | ) 60 | assert r.status_code == 404 61 | 62 | 63 | def test_dm_send(client, blind_user, blind_user2): 64 | post = make_post(b'bep', sender=blind_user, to=blind_user2) 65 | msg_expected = { 66 | 'id': 1, 67 | 'message': post['message'], 68 | 'sender': blind_user.session_id, 69 | 'recipient': blind_user2.session_id, 70 | } 71 | 72 | r = sogs_post(client, f'/inbox/{blind_user2.session_id}', post, blind_user) 73 | assert r.status_code == 201 74 | data = r.json 75 | assert data.pop('posted_at') == from_now.seconds(0) 76 | assert data.pop('expires_at') == from_now.seconds(config.DM_EXPIRY) 77 | assert data == {k: v for k, v in msg_expected.items() if k != 'message'} 78 | 79 | r = sogs_get(client, '/inbox', blind_user2) 80 | assert r.status_code == 200 81 | assert len(r.json) == 1 82 | data = r.json[0] 83 | assert data.pop('posted_at') == from_now.seconds(0) 84 | assert data.pop('expires_at') == from_now.seconds(config.DM_EXPIRY) 85 | assert data == msg_expected 86 | 87 | r = sogs_get(client, '/outbox', blind_user) 88 | assert len(r.json) == 1 89 | data = r.json[0] 90 | assert data.pop('posted_at') == from_now.seconds(0) 91 | assert data.pop('expires_at') == from_now.seconds(config.DM_EXPIRY) 92 | assert data == msg_expected 93 | 94 | 95 | def test_dm_delete(client, blind_user, blind_user2): 96 | num_posts = 10 97 | for sender, recip in product((blind_user, blind_user2), repeat=2): 98 | # make DMs 99 | for n in range(num_posts): 100 | post = make_post(f"bep-{n}".encode('ascii'), sender=sender, to=recip) 101 | r = sogs_post(client, f'/inbox/{recip.session_id}', post, sender) 102 | assert r.status_code == 201 103 | 104 | # get DMs 105 | r = sogs_get(client, "/inbox", recip) 106 | assert r.status_code == 200 107 | posts = r.json 108 | assert isinstance(posts, list) 109 | assert len(posts) == num_posts 110 | 111 | # delete DMs 112 | r = sogs_delete(client, "/inbox", recip) 113 | assert r.status_code == 200 114 | assert r.json == {'deleted': num_posts} 115 | 116 | # make sure it is empty 117 | r = sogs_get(client, "/inbox", recip) 118 | assert r.status_code == 200 119 | posts = r.json 120 | assert posts == [] 121 | 122 | # delete again when nothing is there 123 | r = sogs_delete(client, "/inbox", recip) 124 | assert r.status_code == 200 125 | assert r.json == {'deleted': 0} 126 | 127 | # make sure it is still empty (probably redundant but good to have) 128 | r = sogs_get(client, "/inbox", recip) 129 | assert r.status_code == 200 130 | posts = r.json 131 | assert posts == [] 132 | -------------------------------------------------------------------------------- /tests/test_hashing.py: -------------------------------------------------------------------------------- 1 | from sogs.hashing import blake2b 2 | 3 | 4 | def test_blake2b(): 5 | # Test inputs/outputs from libsodium: 6 | salt = b'5b6b41ed9b343fe0' 7 | person = b'5126fb2a37400d2a' 8 | key = bytes(range(64)) 9 | data = bytes(range(64)) 10 | 11 | expected = [ 12 | "ba", 13 | "6139", 14 | "3a1666", 15 | "5797e9d0", 16 | "834a26efe6", 17 | "d7e9e862bbce", 18 | "40d8b84c374750", 19 | "276789189244cf04", 20 | "16f73ffe0673cc9992", 21 | "b3835bfaf6eb71d94078", 22 | "8c624e844d34f4a59f34cc", 23 | "e0a394962413ad09975df3cf", 24 | "47f043c3aacb501f97e0458ae3", 25 | "b4a11f2fb72a7e6f96fdacf98d49", 26 | "f434079e9adeb244047cb6855f9854", 27 | "5fbe885c4b2d4e0d78dc5905622a277a", 28 | "e262ba3e2ab76efdf83513108e3b987d1b", 29 | "add93dde78d32e77bc039c34a49043f19d26", 30 | "093842ac10e2eb1237ddc9ca9e7990cf397772", 31 | "09e7f6a0e2ea4888f1dbf6562effd1561c65029c", 32 | "bd33a9ec914f5b81864a49184338e4062d6c6b2b2e", 33 | "8dc46295235d94f5881d429a5ad47f9db9e35cf8c6b3", 34 | "ba5df554dca7ac1cba4889fa88adf3070fbf4ab5d187b5", 35 | "1ff84715e71c66214d271d421395fb6166db97b1d47ed697", 36 | "75a0d227c70549f5b0c933b7b21f151355bd47e04b6085c91f", 37 | "a32a5c9439a0fa771dcbe7f338b5dcef62a754edc4952614d6f0", 38 | "53a87de519cdcc7f64730d58bce6baaf7b44c5c428a4611a208ad4", 39 | "5e5ad8f0c4f083f9b7a5154d9c0dfd0f3d2fce94cf54fc215450314a", 40 | "9c76b9e63c77e6564b1e5111c2fb140046e1e5a4f900a7cfc2bac3fcfa", 41 | "bb919251ca310eb9b994e5d7883bc9fa2144b59b8d5d940677b7130ac777", 42 | "faa492a66f08ef0c7adb868fcb7b523aedd35b8ff1414bd1d554794f144474", 43 | "9b273ebe335540b87be899abe169389ed61ed262c3a0a16e4998bbf752f0bee3", 44 | "1e0070b92429c151b33bdd1bb4430a0e650a3dfc94d404054e93c8568330ecc505", 45 | "e3b64149f1b76231686d592d1d4af984ce2826ba03c2224a92f95f9526130ce4eb40", 46 | "5f8e378120b73db9eefa65ddcdcdcb4acd8046c31a5e47f298caa400937d5623f1394b", 47 | "74c757a4165a1782c933e587353a9fd8f6d7bf26b7f51b52c542747030bfb3d560c2e5c2", 48 | "2d5ee85cc238b923806dd98db18919d1924f2340ec88917d4ce1799cbfd5f2cb9df99db2e1", 49 | "c93ff727e6f9822efec0a77eed0025c0eff19127bf8746b7c71c2a098f57cef02febb86a1e6c", 50 | "adfb6d7ba13779a5dd1bbf268e400f4156f0f5c9d5b670ff539e1d9c1a63373416f3001f338407", 51 | "3a6900e58a448887d77c5911e4bdde620e64f25b2d71723fa60f7cb3efa7c320b6153bdbc3287949", 52 | "413eb0fd379b32dd88e82242a87cc58ce3e64c72352387a4c70f92ee5c8d23fa7ecd86f6df170a32d2", 53 | "92d0d3cacc3e25628caf6f2c4cd50d25d154ac45098f531d690230b859f37cfe089eb169f76bba72a3ff", 54 | "92f6ccc11a9a3bee520b17e0cddc4550c0e9cf47ddd9a6161284259ffb161c1d0675b505cb1066872768e8", 55 | "a3cd675804e6be7f120138a9eaadcd56bb7763d1c046e87fe0d358c8276b0d24621f46c60b46e397933b75b4", 56 | "304a1af53cbdd6486b8419d1ebd5e9528c540d8dc46a10be49067f46a0617229577015d776783f702b2954df43", # noqa: E501 57 | "d8a6358970446453ac0c82c758644ab68989b5b4f06f9768807ce0c5f2a0dbac1e8450f4e3a02deecf7b54b6a45d", # noqa: E501 58 | "1264b8dee9ac4aa8de69a43ada95cc95f20230f33836d4a1db8c2466ab38361686e5ac282025ccc2e0f6a1cd98a4dd", # noqa: E501 59 | "7eed787abaa7f4e8b8aa3090f0676201cfbaaf350899661cdd5216ac0b5cd874443f5c0688ffd7ca1ccbfe1ca7e1a3f5", # noqa: E501 60 | "8907f0218585167962a8e8213559a643dd03c2bf1a7a5ad3e3bc5f88c0ff1532ee8cd29880e7e0e68da22a5798aef27cc5", # noqa: E501 61 | "12dea17b0733e5060751b1115e10c3d4b2f4583bcd009d9f1f42ec23d4a6a0df1185d3abbdbe86de08569e70583d6de1c1fe", # noqa: E501 62 | "8ff75e91f1de547dc3a25472db2f51f5910a290c449603da54207b5e39bd735d240ec913b52df90709b5d29357971d6c341452", # noqa: E501 63 | "4a3b16b12400f38e74778efc3a4caa52ec6fdf6b0180a5bfac9189e52e162c10e8911a54ab33e2b389ee1949e58edaa119e2b2b9", # noqa: E501 64 | "c9943e7186fdc9bbfa1d7087fa7086babe6fcf95a6196d1772187854071304e2f1fff39e6e6f48f76addb16d5c00249e0523aac91f", # noqa: E501 65 | "0297f16fdd34add9cc87b4adf816525b590ba08ac733c43f8d225d194df4f9c83b4dce617be51e25b5f6c80dff249f27c707de20e422", # noqa: E501 66 | "576bb891eab9930998e2e73b5d0498e3c5f040f8dec9397a8c7a622c17de01fee7cc936e3bd4de1f7fd8b31dea9e70c65462bbb5dc7b50", # noqa: E501 67 | "9416a57ae7c8c51c6e008f940fe06d8ebc02c350c19a2f71583a6d260b085670d73a95248fef0f4cae5292ba7db1189a7cd9c51122ba7913", # noqa: E501 68 | "ea644b9051cca5eee8868a553e3f0f4e14739e1555474151156e10578256b288a233870dd43a380765400ea446df7f452c1e03a9e5b6731256", # noqa: E501 69 | "f99cc1603de221abc1ecb1a7eb4bbf06e99561d1cc5541d8d601bae2b1dd3cbe448ac276667f26de5e269183a09f7deaf35d33174b3cc8ad4aa2", # noqa: E501 70 | "ee2be1ec57fdac23f89402a534177eca0f4b982a4ed2c2e900b6a79e1f47a2d023eff2e647baf4f4c0da3a28d08a44bc780516974074e2523e6651", # noqa: E501 71 | "9cda001868949a2bad96c5b3950a8315e6e5214d0b54dcd596280565d351806ef22cf3053f63623da72fcad9afa3896641658632334c9ec4f644c984", # noqa: E501 72 | "c6d6722a916651a8671383d8260873347d9c248696b4cb3dac4dea9ba57ed971127cb18e44211d7e14177ace248b3c6e0785356ee261ebdc6ef0faf143", # noqa: E501 73 | "5dd258a3e7505bc6b9776b0df25676a1c19e2c8258c7b5f2e361423523d96299eb6827bc7c27e7bca2d2b59d717c2ebcb05e6dcaa32289d96fae9a4077ef", # noqa: E501 74 | "19c14de35fe19c92cc0e624280e4136355d4cfa9a0a98b090c4b06f5665021920725852ff1f566b0c8c37157b25fb9f947a2e70b40577a17860a0732c170ac", # noqa: E501 75 | "5fcdcc02be7714a0dbc77df498bf999ea9225d564adca1c121c9af03af92cac8177b9b4a86bcc47c79aa32aac58a3fef967b2132e9352d4613fe890beed2571b", # noqa: E501 76 | ] 77 | 78 | for i in range(64): 79 | assert ( 80 | blake2b(data[:i], digest_size=i + 1, key=key[: i + 1], salt=salt, person=person).hex() 81 | == expected[i] 82 | ) 83 | 84 | assert ( 85 | blake2b(data, digest_size=64, salt=salt, person=person).hex() 86 | == "1afc8ec818bef0a479d2b4cac81d40a52cafa27f6d80c42fc23cbaf4141882ab59ab1101922fcb6e707ef2f61efd07cce5d09094e6bee420b1b96998c7cee96d" # noqa: E501 87 | ) 88 | 89 | assert ( 90 | blake2b(data, digest_size=64, key=key, person=person).hex() 91 | == "5789f474edd5206ededaccfc35e7dd3ed730748125b5395abf802b2601126b19b109a1db67556945bc79bb25e1ab59610599d155070e0e04354f11a6a5d6f3ac" # noqa: E501 92 | ) 93 | 94 | assert ( 95 | blake2b(data, digest_size=64, key=key, salt=salt).hex() 96 | == "e78efc663a5547c089f2b3b08973c974c4bfd365eac18b80c68bdb3b1ba4554b54d6b8465a68a3b9aa0bc020621f16efd5b8dd8c7c01ed9ee3ec5544aae465ff" # noqa: E501 97 | ) 98 | 99 | exp = "fb4e2ad6b7fe6afd2ba06d5c1d79379c5bf10e336a35c89a1aaf408a805171716e0635a5b1d18190131e15b6888510bcb3e3752b050f892a09dbbde60b051495" # noqa: E501 100 | assert blake2b(data, digest_size=64, key=key, salt=salt, person=person).hex() == exp 101 | assert ( 102 | blake2b((data[0:7], data[7:]), digest_size=64, key=key, salt=salt, person=person).hex() 103 | == exp 104 | ) 105 | assert ( 106 | blake2b([data[0:7], data[7:]], digest_size=64, key=key, salt=salt, person=person).hex() 107 | == exp 108 | ) 109 | assert ( 110 | blake2b( 111 | [data[i : i + 1] for i in range(len(data))], 112 | digest_size=64, 113 | key=key, 114 | salt=salt, 115 | person=person, 116 | ).hex() 117 | == exp 118 | ) 119 | assert ( 120 | blake2b( 121 | [data[8 * i : 8 * i + 8] for i in range(8)], 122 | digest_size=64, 123 | key=key, 124 | salt=salt, 125 | person=person, 126 | ).hex() 127 | == exp 128 | ) 129 | assert ( 130 | blake2b( 131 | (data[8 * i : 8 * i + 8] for i in range(8)), 132 | digest_size=64, 133 | key=key, 134 | salt=salt, 135 | person=person, 136 | ).hex() 137 | == exp 138 | ) 139 | -------------------------------------------------------------------------------- /tests/test_legacy.py: -------------------------------------------------------------------------------- 1 | def test_empty(db): 2 | pass 3 | -------------------------------------------------------------------------------- /tests/test_routes_general.py: -------------------------------------------------------------------------------- 1 | from sogs.model import capabilities as core_caps 2 | from sogs import utils 3 | from sogs.web import app 4 | 5 | 6 | def test_capabilities(client): 7 | r = client.get("/capabilities") 8 | assert r.status_code == 200 9 | assert r.json == {"capabilities": sorted(core_caps)} 10 | 11 | r = client.get("/capabilities?required=sogs") 12 | assert r.status_code == 200 13 | assert r.json == {"capabilities": sorted(core_caps)} 14 | 15 | r = client.get("/capabilities?required=magic") 16 | assert r.status_code == 412 17 | assert r.json == {"capabilities": sorted(core_caps), "missing": ["magic"]} 18 | 19 | r = client.get("/capabilities?required=magic,sogs") 20 | assert r.status_code == 412 21 | assert r.json == {"capabilities": sorted(core_caps), "missing": ["magic"]} 22 | 23 | r = client.get("/capabilities?required=sogs,sogs") 24 | assert r.status_code == 200 25 | assert r.json == {"capabilities": sorted(core_caps)} 26 | 27 | 28 | def expected_result(code, body, ct="application/json"): 29 | return {"code": code, "headers": {"content-type": ct}, "body": body} 30 | 31 | 32 | def batch_data(): 33 | reqs = [ 34 | {"method": "GET", "path": "/capabilities"}, 35 | {"method": "GET", "path": "/capabilities?required=magic"}, 36 | {"method": "GET", "path": "/test_batch_2"}, 37 | {"method": "GET", "path": "/test_batch_1", "headers": {"x-header-123": "zzz"}}, 38 | {"method": "GET", "path": "/test_batch_1"}, 39 | {"method": "GET", "path": "/test_batch_3"}, 40 | {"method": "POST", "path": "/test_batch_4", "b64": 'aGVsbG8gd29ybGQ='}, # "hello world" 41 | {"method": "POST", "path": "/test_batch_4", "bytes": 'asdf\x00‽zzz'}, 42 | {"method": "POST", "path": "/test_batch_4", "json": [1, 2, 3]}, 43 | {"method": "GET", "path": "/test_batch_1?arg=456"}, 44 | ] 45 | expected = [ 46 | expected_result(200, {"capabilities": sorted(core_caps)}), 47 | expected_result(412, {"capabilities": sorted(core_caps), "missing": ["magic"]}), 48 | expected_result(403, {"z": 3}), 49 | expected_result(200, {"x": "zzz", "y": "N/A"}), 50 | expected_result(200, {"x": "def", "y": "N/A"}), 51 | expected_result(200, "YWJjZGVm", ct="application/octet-stream"), 52 | expected_result( 53 | 200, utils.encode_base64(b'echo: hello world'), ct="text/plain; charset=utf-8" 54 | ), 55 | expected_result( 56 | 200, utils.encode_base64('echo: asdf\x00‽zzz'.encode()), ct="text/plain; charset=utf-8" 57 | ), 58 | expected_result(200, {"echo": [1, 2, 3]}, ct="application/json"), 59 | expected_result(200, {"x": "def", "y": "456"}), 60 | ] 61 | return reqs, expected 62 | 63 | 64 | def batch_data2(): 65 | reqs = [ 66 | {"method": "GET", "path": "/capabilities"}, 67 | {"method": "GET", "path": "/capabilities?required=sogs"}, 68 | {"method": "GET", "path": "/test_batch_1", "headers": {"x-header-123": "zzz"}}, 69 | {"method": "GET", "path": "/test_batch_1"}, 70 | {"method": "GET", "path": "/test_batch_3"}, 71 | {"method": "POST", "path": "/test_batch_4", "b64": 'aGVsbG8gd29ybGQ='}, # "hello world" 72 | {"method": "GET", "path": "/test_batch_2"}, 73 | {"method": "POST", "path": "/test_batch_4", "bytes": 'asdf\x00‽zzz'}, 74 | {"method": "POST", "path": "/test_batch_4", "json": [1, 2, 3]}, 75 | {"method": "GET", "path": "/test_batch_1?arg=456"}, 76 | ] 77 | expected = [ 78 | expected_result(200, {"capabilities": sorted(core_caps)}), 79 | expected_result(200, {"capabilities": sorted(core_caps)}), 80 | expected_result(200, {"x": "zzz", "y": "N/A"}), 81 | expected_result(200, {"x": "def", "y": "N/A"}), 82 | expected_result(200, "YWJjZGVm", ct="application/octet-stream"), 83 | expected_result( 84 | 200, utils.encode_base64(b'echo: hello world'), ct="text/plain; charset=utf-8" 85 | ), 86 | expected_result(403, {"z": 3}), 87 | expected_result( 88 | 200, utils.encode_base64('echo: asdf\x00‽zzz'.encode()), ct="text/plain; charset=utf-8" 89 | ), 90 | expected_result(200, {"echo": [1, 2, 3]}, ct="application/json"), 91 | expected_result(200, {"x": "def", "y": "456"}), 92 | ] 93 | 94 | return reqs, expected 95 | 96 | 97 | def batch_data3(): 98 | reqs, expected = batch_data2() 99 | reqs = list(reqs[i] for i in range(len(reqs)) if 200 <= expected[i]['code'] < 300) 100 | expected = list(expected[i] for i in range(len(expected)) if 200 <= expected[i]['code'] < 300) 101 | return reqs, expected 102 | 103 | 104 | @app.get("/test_batch_1") 105 | def batch_test_endpoint1(): 106 | from flask import request, jsonify 107 | 108 | x = request.headers.get('X-Header-123', 'def') 109 | y = request.args.get('arg', 'N/A') 110 | return jsonify({"x": x, "y": y}) 111 | 112 | 113 | @app.get("/test_batch_2") 114 | def batch_test_endpoint2(): 115 | from flask import jsonify 116 | 117 | return jsonify({"z": 3}), 403 118 | 119 | 120 | @app.get("/test_batch_3") 121 | def batch_test_endpoint3(): 122 | from flask import Response 123 | 124 | return Response(b'abcdef', mimetype='application/octet-stream') 125 | 126 | 127 | @app.post("/test_batch_4") 128 | def batch_test_endpoint4(): 129 | from flask import request, jsonify, Response 130 | 131 | if request.is_json: 132 | return jsonify({"echo": request.json}) 133 | return Response(f"echo: {request.data.decode()}".encode(), mimetype='text/plain') 134 | 135 | 136 | def test_batch(client): 137 | 138 | d1, b1_exp = batch_data() 139 | b1 = client.post("/batch", json=d1) 140 | assert b1.json == b1_exp 141 | 142 | d2, b2_exp = batch_data2() 143 | b2 = client.post("/batch", json=d2) 144 | assert b2.json == b2_exp 145 | 146 | d3, b3_exp = batch_data3() 147 | b3 = client.post("/batch", json=d3) 148 | assert b3.json == b3_exp 149 | 150 | 151 | def until_bad_code(batch_exp): 152 | seq_exp = [] 153 | for e in batch_exp: 154 | seq_exp.append(e) 155 | if not 200 <= e['code'] < 300: 156 | break 157 | return seq_exp 158 | 159 | 160 | def test_sequence(client): 161 | d1, b1_exp = batch_data() 162 | s1 = client.post("/sequence", json=d1) 163 | assert s1.json == until_bad_code(b1_exp) 164 | 165 | d2, b2_exp = batch_data2() 166 | s2 = client.post("/sequence", json=d2) 167 | assert s2.json == until_bad_code(b2_exp) 168 | 169 | d3, b3_exp = batch_data3() 170 | s3 = client.post("/sequence", json=d3) 171 | assert s3.json == until_bad_code(b3_exp) 172 | -------------------------------------------------------------------------------- /tests/user.py: -------------------------------------------------------------------------------- 1 | import sogs.model.user 2 | from nacl.signing import SigningKey 3 | import nacl.bindings as sodium 4 | import sogs.crypto 5 | 6 | 7 | class User(sogs.model.user.User): 8 | def __init__(self, blinded=False): 9 | self.ed_key = SigningKey.generate() 10 | 11 | self.a = self.ed_key.to_curve25519_private_key().encode() 12 | self.ka = sodium.crypto_core_ed25519_scalar_mul(sogs.crypto.blinding_factor, self.a) 13 | self.kA = sodium.crypto_scalarmult_ed25519_base_noclamp(self.ka) 14 | self.blinded_id = '15' + self.kA.hex() 15 | if blinded: 16 | session_id = self.blinded_id 17 | else: 18 | session_id = '05' + self.ed_key.to_curve25519_private_key().public_key.encode().hex() 19 | 20 | super().__init__(session_id=session_id, touch=True) 21 | -------------------------------------------------------------------------------- /tests/util.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | import time 3 | from contextlib import contextmanager 4 | import sogs.config 5 | 6 | 7 | def pad64(data: Union[bytes, str]): 8 | """Returns the bytes (or str.encode()) padded to length 64 by appending null bytes""" 9 | if isinstance(data, str): 10 | data = data.encode() 11 | assert len(data) <= 64 12 | if len(data) < 64: 13 | return data + b'\0' * (64 - len(data)) 14 | return data 15 | 16 | 17 | class FuzzyTime: 18 | epsilon = 0.25 19 | 20 | def delta(self, other): 21 | return abs(float(other) - (self._t + time.time())) 22 | 23 | def __init__(self, t): 24 | self._t = float(t) 25 | 26 | def __eq__(self, other): 27 | return self.delta(other) <= self.epsilon 28 | 29 | def __mul__(self, other): 30 | return FuzzyTime(self._t * float(other)) 31 | 32 | def __repr__(self): 33 | return f"" 34 | 35 | 36 | class from_now: 37 | @staticmethod 38 | def seconds(n, epsilon=None): 39 | f = FuzzyTime(n) 40 | if epsilon is not None: 41 | f.epsilon = epsilon 42 | return f 43 | 44 | @staticmethod 45 | def now(epsilon=None): 46 | return from_now.seconds(0, epsilon) 47 | 48 | @staticmethod 49 | def minutes(n, epsilon=None): 50 | return from_now.seconds(60, epsilon) * n 51 | 52 | @staticmethod 53 | def hours(n, epsilon=None): 54 | return from_now.minutes(60, epsilon) * n 55 | 56 | @staticmethod 57 | def days(n, epsilon=None): 58 | return from_now.hours(24, epsilon) * n 59 | 60 | 61 | @contextmanager 62 | def config_override(**kwargs): 63 | """ 64 | Context manager that locally overrides one or more sogs.config.XXX values for all given XXX keys 65 | in kwargs. The original config values are restored when leaving the context. 66 | 67 | e.g. 68 | 69 | with config_override(UPLOAD_FILE_MAX_SIZE=1024): 70 | ... 71 | """ 72 | 73 | restore = {} 74 | for k, v in kwargs.items(): 75 | restore[k] = getattr(sogs.config, k) 76 | setattr(sogs.config, k, v) 77 | 78 | try: 79 | yield None 80 | finally: 81 | for k, v in restore.items(): 82 | setattr(sogs.config, k, v) 83 | -------------------------------------------------------------------------------- /update-protobuf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if ! [ -e "sogs/__init__.py" ]; then 6 | echo "Error: must run this from the session-pysogs root directory" >&2 7 | exit 1 8 | fi 9 | 10 | protos=(session.proto) 11 | 12 | tmp=$(mktemp -d protobuf.XXXXXXXX) 13 | cd $tmp 14 | mkdir sogs 15 | for proto in "${protos[@]}"; do 16 | ln -s "../../sogs/static/$proto" "sogs/$proto" 17 | done 18 | 19 | protoc --python_out . sogs/*.proto 20 | 21 | for proto in "${protos[@]}"; do 22 | pb2_py="${proto/-/_}" 23 | pb2_py="sogs/${pb2_py/.proto/}_pb2.py" 24 | if cmp -s $pb2_py ../$pb2_py; then 25 | rm -f $pb2_py 26 | echo "$pb2_py unchanged" 27 | else 28 | mv -f $pb2_py ../sogs/ 29 | echo "$pb2_py updated" 30 | fi 31 | rm sogs/$proto 32 | done 33 | 34 | rmdir sogs 35 | cd .. 36 | rmdir $tmp 37 | --------------------------------------------------------------------------------