├── .github └── workflows │ └── codeql-analysis.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── __init__.py ├── actions ├── change_to_json.py ├── channels.py ├── following.py ├── preview.py ├── react.py ├── search.py ├── timeline.py └── user_ops.py ├── authentication ├── auth.py └── check_token.py ├── client └── client_views.py ├── config_example.py ├── dates.py ├── feeds ├── clean.py ├── hfeed.py ├── json_feed.py ├── read_later.py └── xml_feed.py ├── main.py ├── poll_feeds.py ├── requirements.txt ├── requirements_dev.txt ├── screenshots ├── desktop.png ├── feed.png ├── mobile.png └── screenshot.png ├── seed.py ├── server ├── main.py ├── server_views.py └── websub.py ├── static ├── .DS_Store ├── css │ └── styles.css ├── favicon.ico ├── icons │ ├── .DS_Store │ ├── 100.png │ ├── 1024.png │ ├── 114.png │ ├── 120.png │ ├── 128.png │ ├── 144.png │ ├── 152.png │ ├── 16.png │ ├── 167.png │ ├── 172.png │ ├── 180.png │ ├── 196.png │ ├── 20.png │ ├── 216.png │ ├── 256.png │ ├── 29.png │ ├── 32.png │ ├── 40.png │ ├── 48.png │ ├── 50.png │ ├── 512.png │ ├── 55.png │ ├── 57.png │ ├── 58.png │ ├── 60.png │ ├── 64.png │ ├── 72.png │ ├── 76.png │ ├── 80.png │ ├── 87.png │ ├── 88.png │ ├── bell.svg │ ├── create.svg │ ├── follow.svg │ ├── home.svg │ ├── search.svg │ └── settings.svg ├── images │ ├── feed.png │ ├── gradient.png │ └── wood.avif ├── js │ ├── editor.js │ └── reader.js ├── robots.txt └── styles.css ├── templates ├── 404.html ├── auth.html ├── base.html ├── client │ ├── create.html │ ├── discover.html │ ├── feed_item.html │ ├── preview.html │ ├── read_article.html │ ├── reader.html │ ├── search.html │ └── settings.html ├── index.html ├── server │ ├── dashboard.html │ └── following.html ├── setup.html └── show_error.html ├── tox.ini └── wsgi.py /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '41 10 * * 1' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.db 6 | # C extensions 7 | *.so 8 | config.py 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | *.json 131 | import.py 132 | logs 133 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM python:3.9.0 4 | 5 | WORKDIR /app 6 | 7 | COPY requirements.txt requirements.txt 8 | 9 | RUN pip3 install -r requirements.txt 10 | COPY . . 11 | 12 | CMD ["python3", "-m", "flask", "run", "--host=0.0.0.0"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright 2022 capjamesg 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 6 | software and associated documentation files (the "Software"), to deal in the Software 7 | without restriction, including without limitation the rights to use, copy, modify, 8 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 9 | permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 12 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 13 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 14 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 15 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 16 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cinnamon Social Reader 2 | 3 | This repository contains the code that powers my personal Microsub social reader called Cinnamon. 4 | 5 | [Microsub](https://indieweb.org/Microsub) is an IndieWeb specification currently in development that separates the tasks of processing feeds and presenting feeds so that servers and feed readers can be developed independently but work together. 6 | 7 | The Microsub server currently supports: 8 | 9 | - Creating channels 10 | - Updating channel names 11 | - Changing the order of channels in a list 12 | - Deleting channels 13 | - Subscribing to a feed 14 | - Unsubscribing from a feed 15 | - Marking entries in a feed view as read 16 | - Creating a post via Micropub 17 | - Reacting to posts via Micropub 18 | - Among other features 19 | 20 | This project is in active development. The entire Microsub specification is not yet incorporated into this project. 21 | 22 | ## Screenshots 23 | 24 | ### Desktop 25 | 26 | ![Microsub desktop feed](screenshots/desktop.png) 27 | 28 | ### Mobile 29 | 30 | ![Microsub mobile feed](screenshots/mobile.png) 31 | 32 | ## Getting Started 33 | 34 | You can install and configure this Microsub server using Docker or manually. 35 | 36 | ### Docker Setup 37 | 38 | To set up this project with Docker, first install Docker on your local machine. 39 | 40 | Next, run the following command: 41 | 42 | docker build -t microsub . 43 | 44 | This will build the microsub image using the Dockerfile in the root directory of this project. 45 | 46 | Next, copy the config_example.py file into the config.py file and change the values to match your server: 47 | 48 | cp config_example.py config.py 49 | 50 | The Dockerfile automates the project setup process. 51 | 52 | Next, run: 53 | 54 | docker run microsub 55 | 56 | This will run the microsub server on port 5000. 57 | 58 | ### Manual Setup 59 | 60 | To use this Microsub server for yourself, please run the following command: 61 | 62 | pip3 install -r requirements.txt 63 | 64 | This command will install the dependencies you need to run the Microsub server. 65 | 66 | Next, copy the config_example.py file into the config.py file and change the values to match your server: 67 | 68 | cp config_example.py config.py 69 | 70 | Next, you need to set up the database for the server. You can do this using the following command: 71 | 72 | python3 seed.py 73 | 74 | Now that you have set up the database, you are ready to run the Microsub server. 75 | 76 | Execute this command to run the server: 77 | 78 | python3 wsgi.py 79 | 80 | ## File Definitions 81 | 82 | Here is the structure of this project: 83 | 84 | ── Dockerfile 85 | ├── LICENSE.md 86 | ├── README.md 87 | ├── actions # implementations of the actions defined in the Microsub specification 88 | │ ├── change_to_json.py 89 | │ ├── channels.py 90 | │ ├── following.py 91 | │ ├── preview.py 92 | │ ├── react.py 93 | │ ├── search.py 94 | │ ├── timeline.py 95 | │ └── user_ops.py 96 | ├── authentication # functions to handle authentication and authorization 97 | │ ├── auth.py 98 | │ └── check_token.py 99 | ├── client # views used to read and manage feeds 100 | │ └── client_views.py 101 | ├── config.py # configuration file required for the project to run 102 | ├── feeds # code to transform three different types of feed into a jf2 object, consumed by the server 103 | │ ├── hfeed.py 104 | │ ├── json_feed.py 105 | │ ├── read_later.py 106 | │ └── xml_feed.py 107 | ├── legacy # old code not currently in use 108 | │ └── dates.py 109 | ├── logs 110 | ├── main.py # the main microsub server that responds to queries at /microsub 111 | ├── poll_feeds.py 112 | ├── requirements.txt 113 | ├── requirements_dev.txt 114 | ├── seed.py 115 | ├── server # code that powers feed management and the back-end server 116 | │ ├── server_views.py 117 | │ └── websub.py 118 | ├── static # all static files used in the project 119 | │ ├── css 120 | │ │ └── styles.css 121 | │ ├── emojis.json 122 | │ ├── favicon.ico 123 | │ ├── icons 124 | │ ├── images 125 | │ │ └── wood.avif 126 | │ ├── js 127 | │ │ ├── editor.js # js to load the post editor form 128 | │ │ └── reader.js # js to enhance reading capabilities, including reactions 129 | │ ├── manifest.json 130 | │ └── robots.txt 131 | ├── templates # all the HTML templates for the project 132 | │ ├── 404.html 133 | │ ├── auth.html 134 | │ ├── base.html 135 | │ ├── client # HTML used by the client 136 | │ │ ├── discover.html 137 | │ │ ├── feed_item.html 138 | │ │ ├── preview.html 139 | │ │ ├── read_article.html 140 | │ │ ├── reader.html 141 | │ │ ├── search.html 142 | │ │ └── settings.html 143 | │ ├── index.html 144 | │ ├── server # HTML used by the server management client 145 | │ │ ├── dashboard.html 146 | │ │ └── following.html 147 | │ ├── setup.html 148 | │ └── show_error.html 149 | ├── tox.ini 150 | └── wsgi.py 151 | 152 | This tree was generated using the following command: 153 | 154 | tree -I '*.pyc|*.png|*.svg|*.log|__*' 155 | 156 | ## License 157 | 158 | This project is licensed under an [MIT 0 license](LICENSE). 159 | 160 | ## Acknowledgements 161 | 162 | The author of this project would like to thank the [Feather](https://github.com/feathericons/feather) open source icon set for creating an open repository of icons. This project uses a few icons from Feather in the mobile view. 163 | 164 | This project uses the "[Complete list of github markdown emoji markup](https://gist.github.com/rxaviers/7360908)" Gist for its emoji autocomplete dictionary. 165 | 166 | ## Maintainers 167 | 168 | - [capjamesg](https://github.com/capjamesg) -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import timedelta 3 | 4 | import requests 5 | from dateutil import parser 6 | from flask import Flask, render_template, request, send_from_directory, session 7 | 8 | from authentication.check_token import verify 9 | from config import SENTRY_DSN, SENTRY_SERVER_NAME 10 | 11 | # set up sentry for error handling 12 | if SENTRY_DSN != "": 13 | import sentry_sdk 14 | from sentry_sdk.integrations.flask import FlaskIntegration 15 | 16 | sentry_sdk.init( 17 | dsn=SENTRY_DSN, 18 | integrations=[FlaskIntegration()], 19 | traces_sample_rate=1.0, 20 | server_name=SENTRY_SERVER_NAME, 21 | ) 22 | 23 | 24 | def handle_error(request, session, error_code): 25 | auth_result = verify(request.headers, session) 26 | 27 | if auth_result: 28 | headers = {"Authorization": session["access_token"]} 29 | 30 | channel_req = requests.get( 31 | session.get("server_url") + "?action=channels", headers=headers 32 | ) 33 | 34 | all_channels = channel_req.json()["channels"] 35 | else: 36 | all_channels = [] 37 | 38 | template = "404.html" 39 | 40 | return ( 41 | render_template( 42 | template, title="Error", error=error_code, channels=all_channels 43 | ), 44 | 500, 45 | ) 46 | 47 | 48 | def create_app(): 49 | app = Flask(__name__) 50 | 51 | app.config["SECRET_KEY"] = os.urandom(32) 52 | app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///microsub.db" 53 | app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False 54 | 55 | # read config.py file 56 | app.config.from_pyfile(os.path.join(".", "config.py"), silent=False) 57 | 58 | # set maximum lifetime for session 59 | app.permanent_session_lifetime = timedelta(days=120) 60 | 61 | # blueprint for non-auth parts of app 62 | from server.main import main as main_blueprint 63 | 64 | app.register_blueprint(main_blueprint) 65 | 66 | from client.client_views import client as client_blueprint 67 | 68 | app.register_blueprint(client_blueprint) 69 | 70 | from authentication.auth import auth as auth_blueprint 71 | 72 | app.register_blueprint(auth_blueprint) 73 | 74 | from server.websub import websub as websub_blueprint 75 | 76 | app.register_blueprint(websub_blueprint) 77 | 78 | from server.server_views import server_views as server_views_blueprint 79 | 80 | app.register_blueprint(server_views_blueprint) 81 | 82 | # filter used to parse dates 83 | # source: https://stackoverflow.com/questions/4830535/how-do-i-format-a-date-in-jinja2 84 | @app.template_filter("strftime") 85 | def _jinja2_filter_datetime(date, fmt=None): 86 | date = parser.parse(date) 87 | native = date.replace(tzinfo=None) 88 | format = "%b %d, %Y" 89 | return native.strftime(format) 90 | 91 | @app.errorhandler(404) 92 | def page_not_found(e): 93 | return handle_error(request, session, 400) 94 | 95 | @app.errorhandler(405) 96 | def method_not_allowed(e): 97 | return handle_error(request, session, 405) 98 | 99 | @app.errorhandler(500) 100 | def server_error(): 101 | handle_error(request, session, 500) 102 | 103 | @app.route("/robots.txt") 104 | def robots(): 105 | return send_from_directory(app.static_folder, "robots.txt") 106 | 107 | @app.route("/favicon.ico") 108 | def favicon(): 109 | return send_from_directory(app.static_folder, "favicon.ico") 110 | 111 | @app.route("/emojis.json") 112 | def emojis(): 113 | return send_from_directory("static", "emojis.json") 114 | 115 | @app.route("/manifest.json") 116 | def web_app_manifest(): 117 | return send_from_directory("static", "manifest.json") 118 | 119 | @app.route("/assets/") 120 | def assets(path): 121 | return send_from_directory("assets", path) 122 | 123 | # from werkzeug.middleware.profiler import ProfilerMiddleware 124 | # app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[5], profile_dir='./profile') 125 | 126 | return app 127 | 128 | 129 | create_app() 130 | -------------------------------------------------------------------------------- /actions/change_to_json.py: -------------------------------------------------------------------------------- 1 | def change_to_json(database_result): 2 | columns = [column[0] for column in database_result.description] 3 | 4 | result = [dict(zip(columns, row)) for row in database_result] 5 | 6 | return result 7 | -------------------------------------------------------------------------------- /actions/channels.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sqlite3 3 | import string 4 | 5 | from flask import jsonify, request 6 | 7 | from .change_to_json import change_to_json 8 | 9 | 10 | def get_channels() -> dict: 11 | connection = sqlite3.connect("microsub.db") 12 | 13 | with connection: 14 | cursor = connection.cursor() 15 | 16 | cursor.execute("SELECT uid, channel FROM channels ORDER BY position ASC;") 17 | 18 | result = change_to_json(cursor) 19 | 20 | final_result = [] 21 | 22 | total_unread = 0 23 | 24 | for r in result: 25 | get_unread = cursor.execute( 26 | "SELECT COUNT(*) FROM timeline WHERE channel = ? AND read_status = 'unread';", 27 | (r["uid"],), 28 | ).fetchone() 29 | r["unread"] = get_unread[0] 30 | r["name"] = r["channel"] 31 | total_unread += r["unread"] 32 | final_result.append(r) 33 | del r["channel"] 34 | 35 | # add "all" as a special value 36 | # used to show every post stored in the server 37 | final_result.insert(0, {"uid": "all", "name": "All", "unread": total_unread}) 38 | 39 | return jsonify({"channels": final_result}), 200 40 | 41 | 42 | def create_channel(request: request) -> dict: 43 | connection = sqlite3.connect("microsub.db") 44 | 45 | with connection: 46 | cursor = connection.cursor() 47 | three_random_letters = "".join( 48 | random.choice(string.ascii_lowercase) for _ in range(3) 49 | ) 50 | # check if name taken 51 | cursor.execute( 52 | "SELECT * FROM channels WHERE channel = ?", (request.args.get("name"),) 53 | ) 54 | 55 | if cursor.fetchone(): 56 | return jsonify({"error": "This channel name has been taken."}), 400 57 | 58 | existing_channels = cursor.execute( 59 | "SELECT position FROM channels ORDER BY position DESC LIMIT 1" 60 | ).fetchone() 61 | 62 | if existing_channels and len(existing_channels) > 0: 63 | last_position = int(existing_channels[0]) 64 | else: 65 | last_position = 0 66 | 67 | cursor.execute( 68 | "INSERT INTO channels VALUES(?, ?, ?)", 69 | ( 70 | request.form.get("name"), 71 | request.form.get("name").lower() + three_random_letters, 72 | last_position + 1, 73 | ), 74 | ) 75 | 76 | all_channels = cursor.execute( 77 | "SELECT * FROM channels ORDER BY position ASC" 78 | ).fetchall() 79 | 80 | return jsonify(all_channels), 200 81 | 82 | 83 | def update_channel(request: request) -> dict: 84 | connection = sqlite3.connect("microsub.db") 85 | 86 | with connection: 87 | cursor = connection.cursor() 88 | cursor.execute( 89 | "UPDATE channels SET channel = ? WHERE uid = ?", 90 | (request.form.get("name"), request.form.get("channel")), 91 | ) 92 | 93 | get_updated_channel = cursor.execute( 94 | "SELECT * FROM channels WHERE uid = ?", (request.form.get("channel"),) 95 | ).fetchone() 96 | 97 | return get_updated_channel 98 | 99 | 100 | def delete_channel(request: request) -> dict: 101 | connection = sqlite3.connect("microsub.db") 102 | 103 | with connection: 104 | cursor = connection.cursor() 105 | 106 | get_channel = cursor.execute( 107 | "SELECT * FROM channels WHERE uid = ?", (request.form.get("channel"),) 108 | ).fetchone() 109 | 110 | if get_channel: 111 | cursor.execute( 112 | "DELETE FROM channels WHERE uid = ?", (request.form.get("channel"),) 113 | ) 114 | 115 | # get_channel[0] is the deleted channel name 116 | return jsonify({"channel": get_channel[0]}), 200 117 | else: 118 | return jsonify({"error": "channel not found"}), 400 119 | 120 | 121 | def reorder_channels(request: request) -> dict: 122 | connection = sqlite3.connect("microsub.db") 123 | 124 | if len(request.form.getlist("channels")) == 2: 125 | with connection: 126 | cursor = connection.cursor() 127 | position_for_first = cursor.execute( 128 | "SELECT position FROM channels WHERE uid = ?", 129 | (request.form.getlist("channels")[0],), 130 | ).fetchone() 131 | position_for_second = cursor.execute( 132 | "SELECT position FROM channels WHERE uid = ?", 133 | (request.form.getlist("channels")[1],), 134 | ).fetchone() 135 | cursor.execute( 136 | "UPDATE channels SET position = ? WHERE uid = ?", 137 | (position_for_second[0], request.form.getlist("channels")[0]), 138 | ) 139 | cursor.execute( 140 | "UPDATE channels SET position = ? WHERE uid = ?", 141 | (position_for_first[0], request.form.getlist("channels")[1]), 142 | ) 143 | 144 | return {"type": "reorder"} 145 | 146 | with connection: 147 | cursor = connection.cursor() 148 | cursor.execute("DELETE FROM channels") 149 | 150 | position = 1 151 | 152 | for channel in request.form.getlist("channels"): 153 | cursor.execute( 154 | "INSERT INTO channels VALUES(?, ?, ?)", 155 | ( 156 | channel["name"], 157 | channel["name"].lower(), 158 | position, 159 | ), 160 | ) 161 | 162 | position += 1 163 | 164 | return {"type": "reorder_channels"} 165 | -------------------------------------------------------------------------------- /actions/following.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sqlite3 3 | import string 4 | 5 | import indieweb_utils 6 | import requests 7 | from bs4 import BeautifulSoup 8 | from flask import jsonify, request 9 | from urllib.parse import urlparse as parse_url 10 | 11 | from config import CLIENT_ID 12 | 13 | 14 | def get_follow(channel: str) -> dict: 15 | connection = sqlite3.connect("microsub.db") 16 | 17 | if not channel: 18 | return jsonify({}), 200 19 | 20 | with connection: 21 | cursor = connection.cursor() 22 | if channel == "all": 23 | results = cursor.execute( 24 | "SELECT * FROM following ORDER BY id DESC;" 25 | ).fetchall() 26 | else: 27 | results = cursor.execute( 28 | "SELECT * FROM following WHERE channel = ? ORDER by id DESC;", 29 | (channel,), 30 | ).fetchall() 31 | 32 | results = [ 33 | {"type": "feed", "url": r[1], "photo": r[3], "name": r[4]} for r in results 34 | ] 35 | 36 | final_result = {"items": results} 37 | 38 | return jsonify(final_result), 200 39 | 40 | 41 | def create_follow(request: request) -> dict: 42 | connection = sqlite3.connect("microsub.db") 43 | 44 | with connection: 45 | cursor = connection.cursor() 46 | print(request.form) 47 | 48 | url = request.form.get("url").strip() 49 | 50 | # check if following 51 | cursor.execute( 52 | "SELECT * FROM following WHERE channel = ? AND url = ?", 53 | (request.form.get("channel"), url), 54 | ) 55 | 56 | if cursor.fetchone(): 57 | return ( 58 | jsonify( 59 | { 60 | "error": f"You are already following this feed in the {request.form.get('channel')} channel." 61 | } 62 | ), 63 | 400, 64 | ) 65 | title = url 66 | favicon = "" 67 | 68 | home_page_request = requests.get(indieweb_utils.canonicalize_url(url, url)).text 69 | 70 | home_page = BeautifulSoup(home_page_request, "lxml") 71 | 72 | if home_page.find("title"): 73 | title = home_page.find("title").text 74 | 75 | # "" empty string is etag which will be populated in poll_feeds.py if available 76 | last_id = cursor.execute("SELECT MAX(id) FROM following").fetchone() 77 | 78 | if last_id and last_id[0] is not None: 79 | last_id = int(last_id[0]) + 1 80 | else: 81 | last_id = 1 82 | 83 | favicon = get_feed_icon(home_page, url) 84 | 85 | # set cadence to hourly by default 86 | cursor.execute( 87 | "INSERT INTO following VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", 88 | ( 89 | request.form.get("channel"), 90 | url, 91 | "", 92 | favicon, 93 | title, 94 | last_id, 95 | 0, 96 | 0, 97 | "hourly", 98 | ), 99 | ) 100 | 101 | subscribe_to_websub_hub(request, home_page, url, cursor) 102 | 103 | return {"type": "feed", "url": url} 104 | 105 | 106 | def get_feed_icon(home_page: BeautifulSoup, url: str) -> str: 107 | favicon = home_page.find("link", rel="shortcut icon") 108 | 109 | url_domain = parse_url(url).netloc 110 | 111 | if favicon: 112 | favicon = indieweb_utils.canonicalize_url(favicon.get("href"), url_domain, url) 113 | else: 114 | favicon = "" 115 | 116 | if favicon == "": 117 | favicon = home_page.find("link", rel="icon") 118 | 119 | if favicon: 120 | favicon = indieweb_utils.canonicalize_url( 121 | favicon.get("href"), url_domain, url 122 | ) 123 | 124 | if favicon: 125 | try: 126 | r = requests.get(favicon) 127 | 128 | if r.status_code != 200: 129 | favicon = "" 130 | except requests.exceptions.RequestException: 131 | favicon = "" 132 | 133 | if not favicon or favicon == "": 134 | favicon = "/static/image/gradient.png" 135 | 136 | return favicon 137 | 138 | 139 | def subscribe_to_websub_hub( 140 | request: request, soup: BeautifulSoup, url: str, cursor: sqlite3.Cursor 141 | ) -> dict: 142 | # discover websub_hub 143 | 144 | # check link headers for websub hub 145 | 146 | link_header = request.headers.get("link") 147 | 148 | hub = None 149 | 150 | if link_header: 151 | # parse link header 152 | parsed_links = requests.utils.parse_header_links( 153 | link_header.rstrip(">").replace(">,<", ",<") 154 | ) 155 | 156 | for link in parsed_links: 157 | if "rel" in link and "hub" in link["rel"]: 158 | hub = link["url"] 159 | break 160 | 161 | if hub is None: 162 | hub_link_tags = soup.find_all("link", rel="hub") 163 | 164 | if len(hub_link_tags) > 0: 165 | hub = hub_link_tags[0].get("href") 166 | 167 | if hub is None: 168 | return 169 | 170 | random_string = "".join(random.choice(string.ascii_lowercase) for _ in range(10)) 171 | 172 | requests.post( 173 | hub, 174 | data={ 175 | "hub.mode": "subscribe", 176 | "hub.topic": url, 177 | "hub.callback": CLIENT_ID.strip("/") + "/websub_callback", 178 | }, 179 | ) 180 | 181 | cursor.execute( 182 | "INSERT INTO websub_subscriptions VALUES (?, ?, ?, ?);", 183 | (url, random_string, request.form.get("channel"), 1), 184 | ) 185 | 186 | 187 | def unfollow(request: request) -> dict: 188 | connection = sqlite3.connect("microsub.db") 189 | 190 | with connection: 191 | cursor = connection.cursor() 192 | cursor.execute( 193 | "DELETE FROM following WHERE url = ? AND channel = ?", 194 | ( 195 | request.form.get("url"), 196 | request.form.get("channel"), 197 | ), 198 | ) 199 | 200 | return {"type": "unfollow"} 201 | -------------------------------------------------------------------------------- /actions/preview.py: -------------------------------------------------------------------------------- 1 | import feedparser 2 | import indieweb_utils 3 | import mf2py 4 | import requests 5 | from bs4 import BeautifulSoup 6 | from flask import jsonify, request 7 | 8 | from feeds import hfeed, json_feed, xml_feed 9 | from urllib.parse import urlparse as parse_url 10 | 11 | 12 | def process_h_feed_preview( 13 | r: requests.Request, items_to_return: list, url: str 14 | ) -> list: 15 | print(r.headers) 16 | parsed = mf2py.parse(r.text) 17 | 18 | h_card = None 19 | 20 | for item in parsed["items"]: 21 | if "type" in item and item["type"] == "h-card": 22 | h_card = item 23 | 24 | for item in parsed["items"]: 25 | if "type" in item and item["type"][0] == "h-feed": 26 | for entry in item["children"]: 27 | if entry["type"][0] == "h-entry": 28 | result = hfeed.process_hfeed(entry, h_card, "", url, "") 29 | 30 | items_to_return.append(result) 31 | elif "type" in item and item["type"][0] == "h-entry": 32 | result = hfeed.process_hfeed(item, h_card, "", url, "") 33 | 34 | items_to_return.append(result) 35 | 36 | content_type = "h-feed" 37 | 38 | return items_to_return, content_type 39 | 40 | 41 | def get_preview_items(content_type: str, url: str, r: requests.Request) -> list: 42 | items_to_return = [] 43 | 44 | if "xml" in content_type or ".xml" in url: 45 | feed = feedparser.parse(url) 46 | 47 | print(url) 48 | 49 | for entry in feed.entries: 50 | result, _ = xml_feed.process_xml_feed(entry, feed, url) 51 | 52 | items_to_return.append(result) 53 | elif "json" in content_type or url.endswith(".json"): 54 | try: 55 | feed = requests.get(url, timeout=5).json() 56 | except requests.exceptions.RequestException: 57 | return jsonify({"error": "invalid url"}), 400 58 | 59 | for entry in feed.get("items", []): 60 | result, _ = json_feed.process_json_feed(entry, feed) 61 | 62 | items_to_return.append(result) 63 | else: 64 | items_to_return, content_type = process_h_feed_preview(r, items_to_return, url) 65 | 66 | return items_to_return, content_type 67 | 68 | 69 | def preview(request: request) -> dict: 70 | url = request.form.get("url") 71 | 72 | # get content type of url 73 | try: 74 | r = requests.head(url) 75 | except requests.exceptions.RequestException: 76 | return jsonify({"error": "invalid url"}), 400 77 | 78 | soup = BeautifulSoup(r.text, "lxml") 79 | 80 | if r.headers.get("content-type"): 81 | content_type = r.headers["content-type"] 82 | else: 83 | content_type = "" 84 | 85 | items_to_return, content_type = get_preview_items(content_type, url, r) 86 | 87 | feed = {"url": url, "feed_type": content_type} 88 | 89 | # get homepage favicon 90 | parsed_url = parse_url(url) 91 | url_protocol = parsed_url.scheme 92 | url_domain = parsed_url.netloc 93 | 94 | url_to_check = url_protocol + "://" + url_domain 95 | 96 | soup = BeautifulSoup(requests.get(url_to_check).text, "lxml") 97 | 98 | favicon = soup.find("link", rel="shortcut icon") 99 | 100 | if favicon: 101 | feed["icon"] = indieweb_utils.canonicalize_url( 102 | favicon.get("href"), url_domain, favicon.get("href") 103 | ) 104 | 105 | if soup.find("title"): 106 | feed["title"] = soup.find("title").text 107 | 108 | result = {"feed": feed, "items": items_to_return} 109 | 110 | return jsonify(result), 200 111 | -------------------------------------------------------------------------------- /actions/react.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sqlite3 3 | 4 | from flask import request 5 | 6 | 7 | def react(request: request) -> dict: 8 | connection = sqlite3.connect("microsub.db") 9 | 10 | uid = request.form.get("uid") 11 | reaction_type = request.form.get("reaction") 12 | reaction_url = request.form.get("url") 13 | 14 | with connection: 15 | cursor = connection.cursor() 16 | 17 | timeline_item = cursor.execute( 18 | "SELECT * FROM timeline WHERE uid = ?", (uid,) 19 | ).fetchone() 20 | 21 | jf2 = json.loads(timeline_item[1]) 22 | 23 | if not jf2.get("reactions"): 24 | jf2["reactions"] = {} 25 | 26 | if not jf2["reactions"].get("replies"): 27 | jf2["reactions"]["replies"] = [] 28 | 29 | if request.form.get("content"): 30 | jf2["reactions"]["replies"] = [ 31 | {"content": request.form.get("content"), "url": reaction_url} 32 | ] 33 | else: 34 | jf2["reactions"][reaction_type] = "" 35 | 36 | cursor.execute( 37 | "UPDATE timeline SET jf2 = ? WHERE uid = ?", (json.dumps(jf2), uid) 38 | ) 39 | 40 | return {"type": "success"}, 200 41 | 42 | 43 | def mark_as_read(request: request) -> dict: 44 | connection = sqlite3.connect("microsub.db") 45 | 46 | read_status = request.form.get("method") 47 | 48 | if read_status == "mark_read": 49 | read = "read" 50 | else: 51 | read = "unread" 52 | 53 | with connection: 54 | cursor = connection.cursor() 55 | 56 | if request.form.get("channel") == "all": 57 | # set all items in the timeline to read other than notifications 58 | 59 | notification_channel = cursor.execute( 60 | "SELECT uid FROM channels WHERE position = 1;" 61 | ).fetchone()[0] 62 | 63 | cursor.execute( 64 | "UPDATE timeline SET read_status = ? WHERE channel != ?", 65 | ( 66 | read, 67 | notification_channel, 68 | ), 69 | ) 70 | 71 | if request.form.getlist("entry[]"): 72 | for entry in request.form.getlist("entry[]"): 73 | cursor.execute( 74 | "UPDATE timeline SET read_status = ? WHERE uid = ?", 75 | ( 76 | read, 77 | entry, 78 | ), 79 | ) 80 | 81 | elif request.form.get("entry"): 82 | cursor.execute( 83 | "UPDATE timeline SET read_status = ? WHERE channel = ?", 84 | ( 85 | read, 86 | request.form.get("channel"), 87 | ), 88 | ) 89 | 90 | get_item = cursor.execute( 91 | "SELECT date, channel FROM timeline WHERE uid = ?;", 92 | (request.form.get("last_read_entry"),), 93 | ).fetchone() 94 | cursor.execute( 95 | "UPDATE timeline SET read_status = ? WHERE date <= ? AND channel = ?", 96 | (read, get_item[0], get_item[1]), 97 | ) 98 | 99 | return {"type": "mark_as_read"} 100 | -------------------------------------------------------------------------------- /actions/search.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sqlite3 3 | 4 | import requests 5 | from flask import jsonify, request 6 | 7 | 8 | def search_for_content(request: request) -> dict: 9 | channel = request.form.get("channel") 10 | query = request.form.get("query") 11 | 12 | connection = sqlite3.connect("microsub.db") 13 | 14 | with connection: 15 | cursor = connection.cursor() 16 | 17 | if channel == "all": 18 | result = cursor.execute( 19 | "SELECT jf2 FROM timeline WHERE jf2 LIKE ? ORDER BY date DESC;", 20 | (f"%{query}%",), 21 | ).fetchall() 22 | else: 23 | result = cursor.execute( 24 | "SELECT jf2 FROM timeline WHERE jf2 LIKE ? AND channel = ? ORDER BY date DESC;", 25 | (f"%{query}%", channel), 26 | ).fetchall() 27 | 28 | items = [[json.loads(item[1]), item[3], item[5]] for item in result] 29 | 30 | return jsonify({"items": items}) 31 | 32 | 33 | def search_for_feeds(request: request) -> dict: 34 | query = request.form.get("query").strip() 35 | 36 | search_url = ( 37 | f"https://indieweb-search.jamesg.blog/results?query=discover {query}&format=jf2" 38 | ) 39 | 40 | r = requests.get(search_url) 41 | 42 | if r.status_code == 200: 43 | return jsonify({"items": r.json()}) 44 | else: 45 | return jsonify({"items": []}) 46 | -------------------------------------------------------------------------------- /actions/timeline.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sqlite3 3 | 4 | from flask import jsonify, request 5 | 6 | from .change_to_json import change_to_json 7 | 8 | 9 | def get_timeline(request: request) -> dict: 10 | channel = request.args.get("channel") 11 | after = request.args.get("after") 12 | before = request.args.get("before") 13 | 14 | connection = sqlite3.connect("microsub.db") 15 | 16 | with connection: 17 | cursor = connection.cursor() 18 | 19 | channel_arg = "channel = ? AND" 20 | second_channel_arg = "channel = ? AND" 21 | channel_tuple = ( 22 | channel, 23 | channel, 24 | ) 25 | 26 | if channel == "all": 27 | channel_arg = "" 28 | second_channel_arg = "" 29 | channel_tuple = () 30 | 31 | if not after and not before: 32 | item_list = cursor.execute( 33 | f"""SELECT * FROM timeline WHERE {channel_arg} {second_channel_arg} hidden = 0 AND 34 | feed_id IN (SELECT id FROM following WHERE muted = 0 AND blocked = 0) 35 | ORDER BY date DESC, id DESC LIMIT 21;""", 36 | channel_tuple, 37 | ).fetchall() 38 | elif before and not after: 39 | item_list = cursor.execute( 40 | f"""SELECT * FROM timeline WHERE {channel_arg} {second_channel_arg} hidden = 0 AND 41 | id < ? AND feed_id IN (SELECT id FROM following WHERE muted = 0 AND blocked = 0) 42 | ORDER BY date DESC, id DESC LIMIT 21;""", 43 | channel_tuple + (int(before),), 44 | ).fetchall() 45 | else: 46 | item_list = cursor.execute( 47 | f"""SELECT * FROM timeline WHERE {channel_arg} {second_channel_arg} hidden = 0 AND 48 | id > ? AND feed_id IN (SELECT id FROM following WHERE muted = 0 AND blocked = 0) 49 | ORDER BY date DESC, id DESC LIMIT 21;""", 50 | channel_tuple + (int(after),), 51 | ).fetchall() 52 | 53 | items = [[json.loads(item[1]), item[3], item[5]] for item in item_list] 54 | 55 | for i in items: 56 | if i[1] == "unread": 57 | i[0]["_is_read"] = False 58 | else: 59 | i[0]["_is_read"] = True 60 | 61 | i[0]["_id"] = i[2] 62 | 63 | items = [i[0] for i in items] 64 | 65 | if ( 66 | len(item_list) > 20 67 | and not request.args.get("after") 68 | and not request.args.get("before") 69 | ): 70 | # 8 = id 71 | before = item_list[-1][8] 72 | after = "" 73 | elif len(item_list) <= 21 and len(item_list) != 0: 74 | before = item_list[0][8] 75 | after = item_list[-1][8] 76 | else: 77 | before = "" 78 | after = "" 79 | 80 | return jsonify({"items": items, "paging": {"before": before, "after": after}}), 200 81 | 82 | 83 | def get_post(request: request) -> dict: 84 | connection = sqlite3.connect("microsub.db") 85 | 86 | with connection: 87 | cursor = connection.cursor() 88 | 89 | cursor.execute( 90 | "SELECT * FROM timeline WHERE uid = ?", (request.args.get("id"),) 91 | ) 92 | 93 | return jsonify({"post": change_to_json(cursor)}), 200 94 | 95 | 96 | def remove_entry(request: request) -> dict: 97 | connection = sqlite3.connect("microsub.db") 98 | 99 | if request.form.getlist("entry[]"): 100 | for entry in request.form.getlist("entry[]"): 101 | with connection: 102 | cursor = connection.cursor() 103 | cursor.execute("UPDATE timeline SET hidden = 1 WHERE uid = ?", (entry,)) 104 | 105 | return {"type": "remove_entry"} 106 | 107 | else: 108 | with connection: 109 | cursor = connection.cursor() 110 | 111 | cursor.execute( 112 | "UPDATE timeline SET hidden = 1 WHERE uid = ?", 113 | (request.form.get("entry"),), 114 | ) 115 | 116 | return {"type": "remove_entry"} 117 | -------------------------------------------------------------------------------- /actions/user_ops.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | from flask import jsonify, request 4 | 5 | 6 | def get_muted(request: request) -> dict: 7 | connection = sqlite3.connect("microsub.db") 8 | 9 | with connection: 10 | cursor = connection.cursor() 11 | cursor.execute( 12 | "SELECT * FROM following WHERE muted = 1 AND channel = ?", 13 | (request.args.get("channel"),), 14 | ) 15 | 16 | return cursor.fetchall() 17 | 18 | 19 | def mute(request: request) -> dict: 20 | connection = sqlite3.connect("microsub.db") 21 | 22 | with connection: 23 | cursor = connection.cursor() 24 | 25 | cursor.execute( 26 | "UPDATE following SET muted = 1 WHERE url = ?", (request.form.get("url"),) 27 | ) 28 | 29 | get_url = cursor.execute( 30 | "SELECT url FROM following WHERE url = ?", (request.form.get("url"),) 31 | ).fetchone() 32 | 33 | if get_url: 34 | return jsonify({"url": get_url[0], "type": "mute"}), 200 35 | else: 36 | return jsonify({"error": "You are not following this feed."}), 400 37 | 38 | 39 | def block(request: request) -> dict: 40 | connection = sqlite3.connect("microsub.db") 41 | 42 | with connection: 43 | cursor = connection.cursor() 44 | 45 | cursor.execute( 46 | "UPDATE following SET blocked = 1 WHERE url = ?", (request.form.get("url"),) 47 | ) 48 | 49 | get_url = cursor.execute( 50 | "SELECT url FROM following WHERE url = ?", (request.form.get("url"),) 51 | ).fetchone() 52 | 53 | if get_url: 54 | return jsonify({"url": get_url[0], "type": "block"}), 200 55 | else: 56 | return jsonify({"error": "You are not following this feed."}), 400 57 | 58 | 59 | def unblock(request: request) -> dict: 60 | connection = sqlite3.connect("microsub.db") 61 | 62 | with connection: 63 | cursor = connection.cursor() 64 | 65 | cursor.execute( 66 | "UPDATE following SET blocked = 0 WHERE url = ?", (request.form.get("url"),) 67 | ) 68 | 69 | get_url = cursor.execute( 70 | "SELECT url FROM following WHERE url = ?", (request.form.get("url"),) 71 | ).fetchone() 72 | 73 | if get_url: 74 | return jsonify({"url": get_url[0], "type": "unblock"}), 200 75 | else: 76 | return jsonify({"error": "You are not following this feed."}), 400 77 | 78 | 79 | def unmute(request: request) -> dict: 80 | connection = sqlite3.connect("microsub.db") 81 | 82 | with connection: 83 | cursor = connection.cursor() 84 | 85 | cursor.execute( 86 | "UPDATE following SET muted = 0 WHERE url = ?", (request.form.get("url"),) 87 | ) 88 | 89 | get_url = cursor.execute( 90 | "SELECT url FROM following WHERE url = ?", (request.form.get("url"),) 91 | ).fetchone() 92 | 93 | if get_url: 94 | return jsonify({"url": get_url[0], "type": "unmute"}), 200 95 | else: 96 | return jsonify({"error": "You are not following this feed."}), 400 97 | -------------------------------------------------------------------------------- /authentication/auth.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | import random 4 | import string 5 | import requests 6 | 7 | import indieweb_utils 8 | from flask import Blueprint, flash, redirect, render_template, request, session 9 | 10 | from config import CALLBACK_URL, CLIENT_ID, ME 11 | 12 | auth = Blueprint("auth", __name__) 13 | 14 | 15 | @auth.route("/callback") 16 | def indieauth_callback_handler_view(): 17 | code = request.args.get("code") 18 | state = request.args.get("state") 19 | 20 | # these are the scopes necessary for the application to run 21 | required_scopes = ["read", "channels"] 22 | 23 | message, response = indieweb_utils.indieauth_callback_handler( 24 | code, 25 | state, 26 | session.get("token_endpoint"), 27 | session["code_verifier"], 28 | session.get("state"), 29 | ME, 30 | CALLBACK_URL, 31 | CLIENT_ID, 32 | required_scopes, 33 | ) 34 | 35 | if message is not None: 36 | flash(message) 37 | return redirect("/login") 38 | 39 | session.pop("code_verifier") 40 | 41 | session["me"] = response.get("me") 42 | session["access_token"] = response.get("access_token") 43 | session["scopes"] = response.get("scope", "") 44 | 45 | session.permanent = True 46 | 47 | # get media endpoint url 48 | try: 49 | req = requests.get( 50 | session.get("token_endpoint"), 51 | headers={"Authorization": "Bearer " + session.get("access_token")}, 52 | ) 53 | session["media_endpoint"] = req.json().get("media_endpoint") 54 | except requests.exceptions.RequestException: 55 | session["media_endpoint"] = None 56 | 57 | return redirect("/") 58 | 59 | 60 | @auth.route("/logout") 61 | def logout(): 62 | session.pop("me") 63 | session.pop("access_token") 64 | 65 | return redirect("/login") 66 | 67 | 68 | @auth.route("/login", methods=["GET"]) 69 | def login(): 70 | return render_template("auth.html", title="Cinnamon Login") 71 | 72 | 73 | @auth.route("/discover", methods=["POST"]) 74 | def discover_auth_endpoint(): 75 | domain = request.form.get("me") 76 | 77 | headers_to_find = [ 78 | "authorization_endpoint", 79 | "token_endpoint", 80 | "micropub", 81 | "microsub", 82 | ] 83 | 84 | headers = indieweb_utils.discover_endpoints(domain, headers_to_find) 85 | 86 | if not headers.get("authorization_endpoint"): 87 | flash( 88 | "A valid IndieAuth authorization endpoint could not be found on your website." 89 | ) 90 | return redirect("/login") 91 | 92 | if not headers.get("token_endpoint"): 93 | flash("A valid IndieAuth token endpoint could not be found on your website.") 94 | return redirect("/login") 95 | 96 | authorization_endpoint = headers.get("authorization_endpoint") 97 | token_endpoint = headers.get("token_endpoint") 98 | 99 | session["micropub_url"] = headers.get("micropub") 100 | session["server_url"] = headers.get("microsub") 101 | 102 | random_code = "".join( 103 | random.choice(string.ascii_uppercase + string.digits) for _ in range(30) 104 | ) 105 | 106 | session["code_verifier"] = random_code 107 | session["authorization_endpoint"] = authorization_endpoint 108 | session["token_endpoint"] = token_endpoint 109 | 110 | sha256_code = hashlib.sha256(random_code.encode("utf-8")).hexdigest() 111 | 112 | code_challenge = base64.b64encode(sha256_code.encode("utf-8")).decode("utf-8") 113 | 114 | state = "".join( 115 | random.choice(string.ascii_uppercase + string.digits) for _ in range(10) 116 | ) 117 | 118 | session["state"] = state 119 | 120 | return redirect( 121 | authorization_endpoint 122 | + "?client_id=" 123 | + CLIENT_ID 124 | + "&redirect_uri=" 125 | + CALLBACK_URL 126 | + "&scope=read follow mute block channels create&response_type=code&code_challenge=" 127 | + code_challenge 128 | + "&code_challenge_method=S256&state=" 129 | + state 130 | ) 131 | -------------------------------------------------------------------------------- /authentication/check_token.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from flask import session 3 | 4 | from config import ME 5 | 6 | 7 | def verify(headers, session): 8 | if headers.get("Authorization") is not None: 9 | access_token = headers.get("Authorization").split(" ")[-1] 10 | elif session.get("access_token"): 11 | access_token = session.get("access_token") 12 | else: 13 | return False 14 | 15 | # request = requests.get( 16 | # session.get("token_endpoint"), headers={"Authorization": "Bearer " + access_token} 17 | # ) 18 | 19 | # if request.status_code != 200 or ( 20 | # request.json().get("me") and request.json()["me"].strip("/") != ME.strip("/") 21 | # ): 22 | # return False 23 | 24 | return True 25 | -------------------------------------------------------------------------------- /config_example.py: -------------------------------------------------------------------------------- 1 | CLIENT_ID = "https://example.com" # url at which you will host your server 2 | CALLBACK_URL = CLIENT_ID + "/callback" 3 | ME = "https://example.com" # your domain name 4 | 5 | SECRET_KEY = "" # set this to a long, random string 6 | 7 | PROJECT_DIRECTORY = "/home/username/" # the root directory of the project 8 | 9 | SERVER_API_WEBHOOK = False # whether or not to use the server API webhook 10 | WEBHOOK_CHANNEL = ( 11 | "channel_name" # the channel to which new posts should be sent via a webhook 12 | ) 13 | WEBHOOK_TOKEN = "auth_token" # the auth token to be sent in an Authorization header with the webhook 14 | 15 | SENTRY_DSN = "sentry_url" # your sentry logging URL (if you want to log with Sentry) 16 | SENTRY_SERVER_NAME = ( 17 | "Microsub Client and Server" # the name of your server for use in Sentry 18 | ) 19 | 20 | TWITTER_BEARER_TOKEN = "" # used to generate reply contexts in the post editor 21 | -------------------------------------------------------------------------------- /dates.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | # get average of dates 4 | # not currently in use but may be used later 5 | 6 | 7 | def find_poll_cadence(dates): 8 | average_interval = [] 9 | 10 | for d in range(len(dates)): 11 | if d == 0: 12 | last_date = dates[d] 13 | else: 14 | last_date = dates[d - 1] 15 | 16 | current_date = dates[d] 17 | # convert to datetime 18 | current_date = datetime.datetime.strptime(current_date, "%Y%m%d") 19 | last_date = datetime.datetime.strptime(last_date, "%Y%m%d") 20 | 21 | day_delta = (current_date - last_date).days * 24 22 | hour_delta = (current_date - last_date).seconds // 3600 23 | 24 | average_interval.append(day_delta + hour_delta) 25 | 26 | if len(average_interval[:5]) > 0 and sum(average_interval[:5]) > 0: 27 | last_five_average = sum(average_interval[:5]) / len(average_interval[:5]) 28 | else: 29 | last_five_average = 24 30 | 31 | if last_five_average < 24: 32 | update_cadence = "hourly" 33 | else: 34 | update_cadence = "daily" 35 | 36 | return update_cadence 37 | -------------------------------------------------------------------------------- /feeds/clean.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | 3 | def clean_html_from_entry(text): 4 | content = BeautifulSoup(text, "lxml").get_text( 5 | separator="\n" 6 | ) 7 | 8 | # only allow p tags, a tags, divs, sections, and hrs 9 | soup = BeautifulSoup(content, "lxml") 10 | 11 | for tag in soup.find_all(reject=["p", "a", "div", "section", "hr"]): 12 | tag.extract() 13 | 14 | return soup.get_text(separator="\n") -------------------------------------------------------------------------------- /feeds/hfeed.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import random 4 | import string 5 | 6 | import indieweb_utils 7 | from .clean import clean_html_from_entry 8 | from dateutil.parser import parse 9 | from urllib.parse import urlparse as parse_url 10 | 11 | 12 | def process_hfeed_author( 13 | jf2: dict, url: str, child: dict, hcard: dict, feed_title: str, feed_icon: str 14 | ) -> dict: 15 | domain_name = parse_url(url).netloc 16 | 17 | if hcard: 18 | jf2["author"] = { 19 | "type": "card", 20 | "name": hcard[0]["properties"]["name"][0], 21 | "url": indieweb_utils.canonicalize_url( 22 | hcard[0]["properties"]["url"][0], 23 | domain_name, 24 | child["properties"]["url"][0], 25 | ), 26 | } 27 | 28 | if hcard[0]["properties"].get("photo"): 29 | jf2["photo"] = indieweb_utils.canonicalize_url( 30 | hcard[0]["properties"]["photo"][0], 31 | domain_name, 32 | child["properties"]["url"][0], 33 | ) 34 | 35 | elif child["properties"].get("author") is not None and isinstance( 36 | child["properties"].get("author"), dict 37 | ): 38 | if type(child["properties"].get("author")[0]["properties"]) == str: 39 | h_card = [{"properties": {"name": child["properties"].get("author")[0]}}] 40 | elif child["properties"].get("author")[0]["properties"].get("url"): 41 | h_card = indieweb_utils.discover_author( 42 | child["properties"].get("author")[0]["properties"].get("url")[0] 43 | ) 44 | else: 45 | h_card = [] 46 | 47 | if h_card and len(h_card) > 0: 48 | jf2["author"] = { 49 | "type": "card", 50 | "name": h_card["properties"]["name"][0], 51 | "url": indieweb_utils.canonicalize_url( 52 | h_card["properties"]["url"][0], 53 | domain_name, 54 | child["properties"]["url"][0], 55 | ), 56 | } 57 | 58 | if h_card["properties"].get("photo"): 59 | jf2["photo"] = indieweb_utils.canonicalize_url( 60 | h_card["properties"]["photo"][0], 61 | domain_name, 62 | child["properties"]["url"][0], 63 | ) 64 | elif feed_title is not None: 65 | jf2["author"] = { 66 | "type": "card", 67 | "name": feed_title, 68 | "url": indieweb_utils.canonicalize_url( 69 | url, domain_name, child["properties"]["url"][0] 70 | ), 71 | } 72 | 73 | if feed_icon is not None: 74 | jf2["author"]["photo"] = feed_icon 75 | 76 | return jf2 77 | 78 | 79 | def get_name_and_content(child: dict, jf2: dict, url: str) -> dict: 80 | if child["properties"].get("name"): 81 | jf2["title"] = child["properties"].get("name")[0] 82 | elif jf2.get("author") and jf2["author"]["name"]: 83 | jf2["title"] = f"Post by {jf2['author']['name']}" 84 | else: 85 | jf2["title"] = f"Post by {url.split('/')[2]}" 86 | 87 | if child["properties"].get("content"): 88 | jf2["content"] = { 89 | "html": clean_html_from_entry(child["properties"].get("content")[0]["html"]), 90 | "text": child["properties"].get("content")[0]["value"], 91 | } 92 | elif child["properties"].get("summary"): 93 | jf2["content"] = { 94 | "text": clean_html_from_entry(child["properties"].get("summary")[0]), 95 | "html": child["properties"].get("summary")[0], 96 | } 97 | 98 | return jf2 99 | 100 | 101 | def process_hfeed( 102 | child, hcard, channel_uid, url, feed_id, feed_title=None, feed_icon=None 103 | ): 104 | parsed_url = parse_url(url) 105 | domain_name = parsed_url.netloc 106 | 107 | if not child.get("properties") or not child["properties"].get("url"): 108 | return {} 109 | 110 | jf2 = { 111 | "url": indieweb_utils.canonicalize_url( 112 | child["properties"]["url"][0], 113 | domain_name, 114 | child["properties"]["url"][0], 115 | ), 116 | } 117 | 118 | if child["properties"].get("content"): 119 | jf2["type"] = indieweb_utils.get_post_type(child) 120 | else: 121 | jf2["type"] = "article" 122 | 123 | jf2 = process_hfeed_author(jf2, url, child, hcard, feed_title, feed_icon) 124 | 125 | if child["properties"].get("photo"): 126 | jf2["photo"] = indieweb_utils.canonicalize_url( 127 | child["properties"].get("photo")[0], 128 | domain_name, 129 | child["properties"]["url"][0], 130 | ) 131 | 132 | if child["properties"].get("video"): 133 | video_url = indieweb_utils.canonicalize_url( 134 | child["properties"].get("video")[0], 135 | domain_name, 136 | child["properties"]["url"][0], 137 | ) 138 | jf2["video"] = [{"content_type": "", "url": video_url}] 139 | 140 | if child["properties"].get("category"): 141 | jf2["category"] = child["properties"].get("category")[0] 142 | 143 | jf2 = get_name_and_content(child, jf2, url) 144 | 145 | wm_properties = ["in-reply-to", "like-of", "bookmark-of", "repost-of"] 146 | 147 | for w in wm_properties: 148 | if child["properties"].get(w): 149 | jf2[w] = child["properties"].get(w)[0] 150 | 151 | if child.get("published"): 152 | parse_date = parse(child["published"][0]) 153 | 154 | if parse_date: 155 | month_with_padded_zero = str(parse_date.month).zfill(2) 156 | day_with_padded_zero = str(parse_date.day).zfill(2) 157 | date = f"{parse_date.year}{month_with_padded_zero}{day_with_padded_zero}" 158 | else: 159 | month_with_padded_zero = str(datetime.datetime.now().month).zfill(2) 160 | day_with_padded_zero = str(datetime.datetime.now().day).zfill(2) 161 | date = f"{datetime.datetime.now().year}{month_with_padded_zero}{day_with_padded_zero}" 162 | else: 163 | date = datetime.datetime.now().strftime("%Y%m%d") 164 | 165 | ten_random_letters = "".join( 166 | random.choice(string.ascii_lowercase) for _ in range(10) 167 | ) 168 | 169 | jf2["published"] = date 170 | 171 | record = { 172 | "channel_uid": channel_uid, 173 | "result": json.dumps(jf2), 174 | "published": date, 175 | "unread": "unread", 176 | "url": jf2["url"], 177 | "uid": ten_random_letters, 178 | "hidden": 0, 179 | "feed_id": feed_id, 180 | "etag": "", 181 | "feed_url": url, 182 | } 183 | 184 | with open("feed_items.json", "a+") as file: 185 | file.write(json.dumps(record) + "\n") 186 | 187 | return jf2 188 | -------------------------------------------------------------------------------- /feeds/json_feed.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | import indieweb_utils 4 | from bs4 import BeautifulSoup 5 | from dateutil.parser import parse 6 | from urllib.parse import urlparse as parse_url 7 | from .clean import clean_html_from_entry 8 | 9 | 10 | def process_json_feed_author(item: dict, feed: dict, result: dict) -> dict: 11 | domain_name = parse_url(item.get("url")).netloc 12 | 13 | if feed.get("author") and not item.get("author"): 14 | result["author"] = {"type": "card", "name": feed.get("author").get("name")} 15 | if feed.get("home_page_url"): 16 | result["author"]["url"] = indieweb_utils.canonicalize_url( 17 | feed.get("home_page_url"), 18 | domain_name, 19 | feed.get("home_page_url"), 20 | ) 21 | else: 22 | result["author"]["url"] = indieweb_utils.canonicalize_url( 23 | feed.get("feed_url"), 24 | domain_name, 25 | feed.get("feed_url"), 26 | ) 27 | elif item.get("author") is not None and item["author"].get("url"): 28 | author_url_domain = parse_url(item["author"].get("url")).netloc 29 | 30 | result["author"] = { 31 | "type": "card", 32 | "name": item.get("author").get("name"), 33 | "url": indieweb_utils.canonicalize_url( 34 | item["author"].get("url"), 35 | author_url_domain, 36 | item["author"].get("url"), 37 | ), 38 | } 39 | 40 | if item["author"].get("avatar"): 41 | result["author"]["photo"] = item["author"].get("avatar") 42 | else: 43 | author_url_domain = parse_url(item["author"].get("url")).netloc 44 | 45 | result["author"] = { 46 | "type": "card", 47 | "name": feed.get("title"), 48 | "url": indieweb_utils.canonicalize_url( 49 | item["author"].get("url"), 50 | author_url_domain, 51 | item["author"].get("url"), 52 | ), 53 | } 54 | 55 | return result 56 | 57 | 58 | def process_attachments(item: dict, result: dict) -> dict: 59 | for i in item.get("attachments"): 60 | if "audio" in i.get("mime_type"): 61 | result["audio"] = [ 62 | {"content_type": i.get("mime_type"), "url": i.get("url")} 63 | ] 64 | break 65 | elif "video" in i.get("mime_type"): 66 | result["video"] = [ 67 | {"content_type": i.get("mime_type"), "url": i.get("url")} 68 | ] 69 | break 70 | 71 | return result 72 | 73 | 74 | def process_json_feed(item: dict, feed: dict) -> dict: 75 | parsed_url = parse_url(item.get("url")) 76 | result = { 77 | "type": "entry", 78 | "url": indieweb_utils.canonicalize_url( 79 | item.get("url"), parsed_url.netloc, item.get("url") 80 | ), 81 | } 82 | 83 | if item.get("image"): 84 | result["photo"] = item.get("image") 85 | 86 | result = process_json_feed_author(item, feed, result) 87 | 88 | # get audio or video attachment 89 | # only collect one because clients will only be expected to render one attachment 90 | if item.get("attachments"): 91 | result = process_attachments(item, result) 92 | 93 | if item.get("published"): 94 | parse_date = parse(item["published"]) 95 | 96 | if parse_date: 97 | month_with_padded_zero = str(parse_date.month).zfill(2) 98 | day_with_padded_zero = str(parse_date.day).zfill(2) 99 | date = f"{parse_date.year}{month_with_padded_zero}{day_with_padded_zero}" 100 | else: 101 | month_with_padded_zero = str(datetime.datetime.now().month).zfill(2) 102 | day_with_padded_zero = str(datetime.datetime.now().day).zfill(2) 103 | date = f"{datetime.datetime.now().year}{month_with_padded_zero}{day_with_padded_zero}" 104 | else: 105 | date = datetime.datetime.now().strftime("%Y%m%d") 106 | 107 | result["published"] = date 108 | 109 | if item.get("content_html"): 110 | result["content"] = {} 111 | result["content"]["text"] = clean_html_from_entry(item.get("content_html")) 112 | result["content"]["html"] = item.get("content_html") 113 | 114 | if item.get("title"): 115 | result["title"] = item.get("title") 116 | else: 117 | result[ 118 | "title" 119 | ] = f"Post by {result['author'].get('name', item.get('url').split('/')[2])}" 120 | 121 | if item.get("url"): 122 | result["url"] = item.get("url") 123 | 124 | if item.get("post_type"): 125 | result["post-type"] = indieweb_utils.get_post_type(item) 126 | 127 | return result, date 128 | -------------------------------------------------------------------------------- /feeds/read_later.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import sqlite3 4 | 5 | import indieweb_utils 6 | import requests 7 | from bs4 import BeautifulSoup 8 | from urllib.parse import urlparse as parse_url 9 | from .clean import clean_html_from_entry 10 | 11 | 12 | def save_read_later_to_database(record: dict) -> None: 13 | database = sqlite3.connect("microsub.db") 14 | 15 | with database: 16 | cursor = database.cursor() 17 | 18 | last_id = cursor.execute("SELECT MAX(id) FROM timeline;").fetchone() 19 | 20 | if last_id[0] is not None: 21 | last_id = last_id[0] + 1 22 | else: 23 | last_id = 0 24 | 25 | last_id += 1 26 | 27 | feed_id = cursor.execute( 28 | "SELECT id FROM following WHERE channel = 'read-later';" 29 | ).fetchone()[0] 30 | 31 | cursor.execute( 32 | """INSERT INTO timeline VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);""", 33 | ( 34 | "read-later", 35 | json.dumps(record["result"]), 36 | record["result"]["published"], 37 | 0, 38 | record["result"]["url"], 39 | record["result"]["url"], 40 | 0, 41 | feed_id, 42 | last_id, 43 | ), 44 | ) 45 | 46 | 47 | def get_read_later_photo(record: dict, soup: BeautifulSoup, url: str) -> dict: 48 | # we will remove header and nav tags so that we are more likely to find a "featured image" for the post 49 | # remove
tags 50 | parsed_url = parse_url(url) 51 | 52 | for header in soup.find_all("header"): 53 | header.decompose() 54 | 55 | # remove