├── kvdb ├── __init__.py ├── README.md ├── client.py ├── dbserver.py └── protocol.md ├── task_queue ├── __init__.py ├── demo │ ├── __init__.py │ ├── wsgi.py │ ├── asgi.py │ ├── urls.py │ └── settings.py ├── tasks │ ├── __init__.py │ ├── management │ │ ├── __init__.py │ │ └── commands │ │ │ ├── __init__.py │ │ │ └── custom_command.py │ ├── migrations │ │ └── __init__.py │ ├── models.py │ ├── tests.py │ ├── admin.py │ ├── views.py │ └── apps.py ├── requirements.txt ├── Dockerfile ├── tools │ └── run_development.sh ├── manage.py ├── docker-compose.yml └── README.md ├── pubsub ├── client.py ├── protocol.md ├── broker.py └── README.md ├── .gitignore ├── hipochain └── README.md └── README.md /kvdb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/demo/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/tasks/management/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/tasks/migrations/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/tasks/management/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /task_queue/requirements.txt: -------------------------------------------------------------------------------- 1 | Django==3.0.7 2 | psycopg2==2.8.3 3 | -------------------------------------------------------------------------------- /task_queue/tasks/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | # Create your models here. 4 | -------------------------------------------------------------------------------- /task_queue/tasks/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /task_queue/tasks/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | # Register your models here. 4 | -------------------------------------------------------------------------------- /task_queue/tasks/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render 2 | 3 | # Create your views here. 4 | -------------------------------------------------------------------------------- /task_queue/tasks/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class TasksConfig(AppConfig): 5 | name = 'tasks' 6 | -------------------------------------------------------------------------------- /task_queue/tasks/management/commands/custom_command.py: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand, CommandError 2 | 3 | 4 | class Command(BaseCommand): 5 | help = "" 6 | 7 | def handle(self, *args, **options): 8 | pass 9 | -------------------------------------------------------------------------------- /task_queue/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.1 as base 2 | 3 | RUN apt-get update && \ 4 | apt-get dist-upgrade -y && \ 5 | apt-get -y --no-install-recommends install \ 6 | gdal-bin \ 7 | && apt-get install -y --no-install-recommends postgresql-client \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | COPY . . 11 | RUN pip install -U pip && pip install -r requirements.txt 12 | 13 | WORKDIR /demo 14 | 15 | FROM base as application 16 | COPY . . 17 | -------------------------------------------------------------------------------- /task_queue/demo/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for demo project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /task_queue/demo/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for task_queue project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 15 | 16 | application = get_asgi_application() 17 | -------------------------------------------------------------------------------- /task_queue/tools/run_development.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script starts the development environment using Docker 4 | # Launch as: source tools/run_development.sh from the project's root 5 | 6 | SERVICE_NAME="demo" 7 | 8 | CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) docker-compose stop 9 | CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) docker-compose rm --force 10 | CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) docker-compose up -d --remove-orphans --build 11 | CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) docker-compose exec ${SERVICE_NAME} bash 12 | -------------------------------------------------------------------------------- /task_queue/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /task_queue/demo/urls.py: -------------------------------------------------------------------------------- 1 | """demo URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.0/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.urls import path 18 | 19 | urlpatterns = [ 20 | path('admin/', admin.site.urls), 21 | ] 22 | -------------------------------------------------------------------------------- /task_queue/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.4" 2 | 3 | services: 4 | 5 | postgres: 6 | image: mdillon/postgis:11 7 | environment: 8 | - POSTGRES_DB=demo 9 | - POSTGRES_USER=demo_dbu 10 | - POSTGRES_PASSWORD=7NVhgp2izNliDnl 11 | ports: 12 | - "4321:4321" 13 | volumes: 14 | - ".data/db:/var/lib/postgresql/data" 15 | healthcheck: 16 | test: ["CMD-SHELL", "pg_isready --quiet || exit 1"] 17 | interval: 1m 18 | timeout: 30s 19 | retries: 10 20 | start_period: 40s 21 | 22 | container_name: demo_postgres 23 | restart: unless-stopped 24 | 25 | demo: 26 | image: demo 27 | build: 28 | context: . 29 | target: application 30 | volumes: 31 | - ".:/demo" 32 | - ".data/root:/root" 33 | restart: unless-stopped 34 | tty: true 35 | depends_on: 36 | - postgres 37 | ports: 38 | - "80:8080" 39 | -------------------------------------------------------------------------------- /kvdb/README.md: -------------------------------------------------------------------------------- 1 | # Build a Key-Value Database Server from scratch 2 | 3 | Key-Value stores like Redis or MemCached are simple databases used for storing unstructured data associated with a unique key. 4 | They have a client server architecture with multiple clients accessing a single server. 5 | Usually the data is kept fully in memory but may be persisted to disk to allow recovery after a restart. 6 | They are often used for caching and thus support setting expiration time for keys and purging in LRU (least recently used) order. 7 | 8 | We will build a database server supporting the following commands: 9 | * GET key, value 10 | * SET key, value 11 | * DELETE key 12 | * INCR key 13 | * DECR key 14 | * EXPIRE key, seconds 15 | * TTL key 16 | * PING 17 | 18 | We will agree a protocol and create a client beforehand. 19 | The challenge is to create a database server that appropriately responds to the client. 20 | The client will be responsible for serialising the data to be stored into a suitable text based format (e.g. json or pickle). 21 | 22 | In addition to correctly implementing the above commands the server should: 23 | * support multiple concurrent clients 24 | * persist its datastore to disk every N seconds 25 | * ensure a maximum of N MB of memory is used 26 | -------------------------------------------------------------------------------- /pubsub/client.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | import time 4 | 5 | 6 | class Client(object): 7 | def __init__(self, host='localhost', port=4243): 8 | self.host = host 9 | self.port = port 10 | self.socket = None 11 | self.buff = '' 12 | 13 | def connect(self): 14 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 15 | self.socket.connect((self.host, self.port)) 16 | 17 | def _send(self, message): 18 | d = json.dumps(message) 19 | self.socket.sendall(bytes(d + '\n', 'utf-8')) 20 | 21 | def _receive(self): 22 | while True: 23 | if '\n' in self.buff: 24 | message = json.loads(self.buff.split('\n')[0]) 25 | self.buff = self.buff.split('\n', 1)[-1] 26 | return message 27 | self.buff += self.socket.recv(2048).decode('utf8') 28 | if not self.buff: 29 | return None 30 | 31 | def disconnect(self): 32 | self.socket.close() 33 | 34 | 35 | if __name__ == '__main__': 36 | client = Client() 37 | client.connect() 38 | client._send({'command': 'SUBSCRIBE', 'channel': 'foo'}) 39 | while True: 40 | r = client._receive() 41 | if r: 42 | print(r) 43 | time.sleep(0.1) 44 | -------------------------------------------------------------------------------- /pubsub/protocol.md: -------------------------------------------------------------------------------- 1 | 2 | # Protocol 3 | 4 | The protocol is JSON based. Each message is sent as a JSON object and each response is a JSON object. 5 | 6 | 7 | ## Commands 8 | 9 | Commands are sent by clients to the broker. 10 | 11 | ### SUBSCRIBE 12 | Subscribe a client to a channel 13 | 14 | ```json 15 | { 16 | "command": "SUBSCRIBE", 17 | "args": { 18 | "channel": "foo" 19 | } 20 | } 21 | ``` 22 | 23 | 24 | ### UNSUBSCRIBE 25 | Unsubscribe a client from a channel 26 | 27 | ```json 28 | { 29 | "command": "UNSUBSCRIBE", 30 | "args": { 31 | "channel": "foo" 32 | } 33 | } 34 | ``` 35 | 36 | 37 | ### PUBLISH 38 | Publish a message on a channel 39 | 40 | ```json 41 | { 42 | "command": "PUBLISH", 43 | "args": { 44 | "channel": "foo", 45 | "message": "hello world" 46 | } 47 | } 48 | ``` 49 | 50 | 51 | ## Messages 52 | 53 | Messages are sent by the broker to the client. 54 | 55 | ### Subscribe 56 | 57 | ```json 58 | { 59 | "type": "SUBSCRIBE", 60 | "channel": "foo", 61 | "count": 1 62 | } 63 | ``` 64 | 65 | 66 | ### Unsubscribe 67 | 68 | ```json 69 | { 70 | "type": "UNSUBSCRIBE", 71 | "channel": "foo", 72 | "count": 0 73 | } 74 | ``` 75 | 76 | ### Message 77 | 78 | ```json 79 | { 80 | "type": "MESSAGE", 81 | "channel": "foo", 82 | "message": "hello world" 83 | } 84 | ``` 85 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | .idea 92 | 93 | *.db 94 | 95 | # demo 96 | task_queue/db.sqlite3 97 | task_queue/.data 98 | -------------------------------------------------------------------------------- /kvdb/client.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | import time 4 | 5 | 6 | class Client(object): 7 | def __init__(self, host='localhost', port=4242): 8 | self.host = host 9 | self.port = port 10 | self.socket = None 11 | 12 | def connect(self): 13 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | self.socket.connect((self.host, self.port)) 15 | 16 | def _send(self, message): 17 | d = json.dumps(message) 18 | self.socket.sendall(bytes(d + '\n', 'utf-8')) 19 | 20 | def _receive(self): 21 | buff = '' 22 | while True: 23 | buff += self.socket.recv(2048).decode('utf8') 24 | if not buff: 25 | return None 26 | if buff[-1] == '\n': 27 | break 28 | message = json.loads(buff) 29 | return message 30 | 31 | def send(self, message): 32 | try: 33 | self._send(message) 34 | except Exception: 35 | self.connect() 36 | self._send(message) 37 | result = self._receive() 38 | self.socket.close() 39 | return result 40 | 41 | def execute(self, command, **kwargs): 42 | message = { 43 | 'command': command, 44 | 'args': kwargs, 45 | } 46 | r = self.send(message) 47 | if r['status'] == 'OK': 48 | if r.get('result'): 49 | return r['result'] 50 | else: 51 | return None 52 | else: 53 | raise Exception(r['message']) 54 | 55 | def disconnect(self): 56 | self.socket.close() 57 | 58 | 59 | if __name__ == '__main__': 60 | client = Client() 61 | while True: 62 | r = client.send({'command': 'PING'}) 63 | if r: 64 | print(r) 65 | time.sleep(1) 66 | -------------------------------------------------------------------------------- /task_queue/README.md: -------------------------------------------------------------------------------- 1 | # Postgresql based Task Queue 2 | 3 | For this challenge we will design and implement a task queue that uses Postgresql to store queued up tasks. 4 | Workers will query the database to fetch tasks to work on and write results/status back to the database. 5 | We will use Django for this challenge as it is familiar to all participants and will provide a useful level of abstraction 6 | while not limiting access to the required Postgresql features. 7 | 8 | 9 | ## Discussion Points 10 | 11 | * What is a Task Queue? 12 | * What do we use Task Queues for? 13 | * What features do we expect from a Task Queue? 14 | * What data structures/data models are required? 15 | 16 | 17 | ## Implementation 18 | 19 | * Create a Task model 20 | * Write a tasks module with some sample task functions 21 | - A function that prints something 22 | - A function that saves some object to the database 23 | - A function that sleeps for some seconds 24 | - A function that raises an exception with some probability 25 | * Write a Worker program that executes the appropriate function for each Task object created (tip: use a management command) 26 | * Track task execution status 27 | * Run multiple workers concurrently 28 | * Locking 29 | * Retries 30 | * Task TTL (task expiry) 31 | * Timeouts 32 | * Task scheduler 33 | 34 | 35 | 36 | ## Post Implementation Discussion 37 | 38 | * How well will this system scale? 39 | * What guarantees does this system provide? 40 | * What are the advantages of using Postgresql for a task Queue? 41 | * What are the disadvantages of using Postgresql for a task Queue? 42 | * When would it be useful & appropriate to use this type of system? 43 | * When would it not be appropriate to use this type of system? 44 | 45 | ## Setup guide 46 | 47 | You can run `source tools/run_development.sh` 48 | You can run `python manage.py runserver 0:8080` and it will be binded to http://localhost 49 | -------------------------------------------------------------------------------- /kvdb/dbserver.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | 4 | 5 | class Server(object): 6 | def __init__(self, host='localhost', port=4242): 7 | self.host = host 8 | self.port = port 9 | self.socket = None 10 | 11 | def run(self): 12 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 13 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 14 | # bind the socket to a public host, and a well-known port 15 | self.socket.bind((self.host, self.port)) 16 | # become a server socket 17 | self.socket.listen(5) 18 | 19 | while True: 20 | # accept connections from outside 21 | (clientsocket, address) = self.socket.accept() 22 | print('Client connected: %s' % (address,)) 23 | # now do something with the clientsocket 24 | self.handle_client(clientsocket) 25 | 26 | def handle_client(self, client): 27 | message = self._receive(client) 28 | result = self.handle_message(message) 29 | d = json.dumps(result) 30 | client.sendall(bytes(d + '\n', 'utf8')) 31 | 32 | def _receive(self, client): 33 | buff = '' 34 | while True: 35 | buff += client.recv(2048).decode('utf8') 36 | if not buff: 37 | # connection has been closed 38 | return None 39 | # messages are delimited by \n 40 | if buff[-1] == '\n': 41 | break 42 | buff = buff[:-1] 43 | print(buff) 44 | message = json.loads(buff) 45 | return message 46 | 47 | def handle_message(self, message): 48 | if message['command'] == 'PING': 49 | return {'result': 'PONG', 'status': 'OK'} 50 | return {'message': 'Unknown command', 'status': 'ERROR'} 51 | 52 | 53 | if __name__ == '__main__': 54 | server = Server() 55 | server.run() 56 | -------------------------------------------------------------------------------- /pubsub/broker.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | import select 4 | 5 | 6 | class Broker(object): 7 | def __init__(self, host='localhost', port=4243): 8 | self.host = host 9 | self.port = port 10 | self.socket = None 11 | self.client_sockets = [] 12 | 13 | def run(self): 14 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 15 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 16 | self.socket.setblocking(0) 17 | # bind the socket to a public host, and a well-known port 18 | self.socket.bind((self.host, self.port)) 19 | # become a server socket 20 | self.socket.listen(5) 21 | 22 | while True: 23 | ready_to_read, ready_to_write, in_error = \ 24 | select.select( 25 | [self.socket] + self.client_sockets, 26 | [], 27 | [], 28 | 60) 29 | 30 | for s in ready_to_read: 31 | if s == self.socket: 32 | # accept connections from outside 33 | (clientsocket, address) = self.socket.accept() 34 | print('Client connected: %s' % (address,)) 35 | clientsocket.setblocking(0) 36 | # now do something with the clientsocket 37 | self.client_sockets.append(clientsocket) 38 | else: 39 | self.handle_client(s) 40 | 41 | def handle_client(self, client): 42 | message = self._receive(client) 43 | 44 | def _receive(self, client): 45 | buff = '' 46 | while True: 47 | buff += client.recv(1).decode('utf8') 48 | if not buff: 49 | # connection has been closed 50 | return None 51 | # messages are delimited by \n 52 | if buff[-1] == '\n': 53 | break 54 | buff = buff[:-1] 55 | print(buff) 56 | message = json.loads(buff) 57 | return message 58 | 59 | def _send(self, client, message): 60 | d = json.dumps(message) 61 | client.sendall(bytes(d + '\n', 'utf8')) 62 | 63 | 64 | if __name__ == '__main__': 65 | broker = Broker() 66 | broker.run() 67 | -------------------------------------------------------------------------------- /hipochain/README.md: -------------------------------------------------------------------------------- 1 | # Hipochain 2 | ## Objectives 3 | * send transactions. 4 | * parse transactions. 5 | * implement ledgering. 6 | 7 | ## Assumptions 8 | * No consensus. 9 | * No peer to peer gossip. 10 | * No authentication. 11 | * There exists a Redis client which is used for broadcasting the transactions. 12 | * Redis client is accessable by everyone under the local network. 13 | * Transactions are stored in a Redis List which is accessed by the key `hiponet`. 14 | * New transactions are appended to the end of the Redis list. 15 | * Some of the submitted transactions might be invalid. 16 | * There is only one asset whose `id` is 0. 17 | * Every account, **except** HIPO, has 0 asset balance at the start. 18 | * HIPO creates the genesis transaction, which distributes the assets. 19 | * HIPO is trustable. No need to perform validations for their transactions. 20 | * Hipochain Node is responsible for verifying the validity of transactions. 21 | * Each computer that adheres to the below protocol is a Hipochain Node. 22 | 23 | ## Protocol 24 | * Transaction must be a JSON Array of objects. 25 | * Transaction must contain the fields below: 26 | * `amount` 27 | * `asset_id` 28 | * `first_valid` 29 | * `receiver` 30 | * `sender` 31 | * `type` 32 | * Transaction `sender`, `receiver` and `type` should be strings. 33 | * Transaction `amount`, `asset_id` and `first_valid` should be integers. 34 | * Each valid transaction group increments an internal variable `round` which starts from 0 before the first transactions. 35 | * `first_valid` must be smaller or equal to the current round at the time of transaction validation. 36 | * Reject (ignore) transactions that cause overspend (negative amounts). Remember, in HIPO we trust. 37 | * Transactions after `round` 2 must not have HIPO as `sender`. 38 | * If a group contains more than 1 transaction, reject ALL transactions in the group if ANY transaction fails for any reason. 39 | * Transaction `amount` must be non-negative. 40 | * Transaction `type` must be `transfer`. 41 | * `asset_id` must be 0. 42 | 43 | ## Steps 44 | 1. Connect to redis and print contents of `hiponet` list. 45 | 2. Continuously check list and print new messages `(while True:…)`. 46 | 3. Connect to redis and append to the `hiponet` list. Use `rpush`. 47 | 4. Filter messages and discard anything that is not a valid JSON array of objects. 48 | 5. Manually/mentally parse the valid looking transactions to see the flow of funds to accounts. 49 | 6. Send a transaction of the same format as the existing transactions. 50 | 7. Implement ledgering (accounting logic). Keep running balances of accounts. Reject (ignore) transactions that cause overspend (negative amounts) 51 | 8. Stop and compare balances after round 42. 52 | 53 | ## External Links 54 | * [Redis List Commands](https://redis.io/commands/?group=list) 55 | * [2022 Backend Day Slack Thread](https://hipo.slack.com/archives/C0G5PTL8Z/p1650618219636349) 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Backend Team Challenges 2 | 3 | The aim of these challenges is to learn more about the software systems we use everyday. One of the best ways to learn how something works is to implement it yourself. We will attempt to implement our own versions of systems like Redis, Memcached, Postgresql, Celery, Flask, etc. By thinking about and implementing our own version of these systems the internals will become transparent to us, allowing us to better understand the real versions. 4 | 5 | We do not of course plan to use anything we implement in these challenges in production. We are not pretending we can reimplement such systems in a single day. Our implementations will be optimised for readability and simplicity over performance. They will support a minimal feature set, dealing with (some of) the most common use cases rather than edge cases. 6 | 7 | These challenges are NOT competitions! We will each write our own code but we will solve the problem together. We will discuss the challenge together before we write any code and at intervals during the coding session. When we get stuck we will discuss as a group and help each other out. The aim of these challenges is for everyone to learn something, whether you have never used the the system we are implementing or you wrote the original! 8 | 9 | 10 | # General Rules 11 | * The implementation must be in Python 3 and use only the standard library. 12 | * The implementation should be optimised for readability first, then efficiency. 13 | * The implementation must support the agreed protocol and client. 14 | * Ask each other for help before asking google/stackoverflow (except for simple syntax things that you don't keep in your head because you know they are on the internet!) 15 | * Please do not copy/paste any code from StackOverflow etc. Research and discuss until you understand and then write your own code! 16 | 17 | 18 | 19 | # Challenge 1 - Build a Key-Value Database Server from scratch 20 | 21 | A Redis/Memcached 'clone'. 22 | 23 | [KVDB Challenge Details](kvdb/) 24 | 25 | 26 | # Challenge 2 - Build a Document Oriented Database Server from scratch 27 | 28 | A MongoDB/CouchDb/Dynamodb 'clone'. 29 | 30 | 31 | # Challenge 3 - Build a Relational Database Server from scratch 32 | 33 | A Postgresql/Mysql 'clone'. 34 | 35 | 36 | # Challenge 4 - Build a PubSub Server from scratch 37 | 38 | Like Redis PubSub / RabbitMQ PubSub 39 | 40 | [PubSub Challenge Details](pubsub/) 41 | 42 | 43 | # Challenge 5 - Build a Task Queue from scratch 44 | 45 | Like Celery/Kuyruk/Resque/Fifo 46 | [Task Queue Challenge Details](task_queue/) 47 | 48 | 49 | # Challenge 6 - Build a Micro Web Framework from scratch 50 | 51 | Like Flask/Pico/Bottle/WebPy 52 | 53 | 54 | # Challenge 7 - Hipochain 55 | 56 | Send & parse transactions and implement ledgering. 57 | 58 | [Hipochain Challenge Details](hipochain/) 59 | 60 | 61 | # Challenge N+1 - Make a suggestion! 62 | 63 | -------------------------------------------------------------------------------- /pubsub/README.md: -------------------------------------------------------------------------------- 1 | # Build a Pub Sub System 2 | 3 | Pub Sub is a system architecture where clients can Subscribe to channels/topics where other clients can Publish messages. The subscribers have no knowledge of the publishers nor the publishers of the subscribers. A central broker is responsible for managing subscriptions, receiving messages from publishers and pushing messages to subscribers. Brokers can also be clients themselves, forming a hub topology. 4 | 5 | The broker must: 6 | * accept multiple concurrent client connections 7 | * hold client-channel subscriptions 8 | * handle subscription messages 9 | * handle publish messages 10 | * receive messages from publishers and send to appropriate subscribers 11 | 12 | When a client disconnects from the broker all of its subscriptions are lost. If it reconnects it must subscribe again. The client does not receive any messages that were sent on a channel before it subscribed. 13 | 14 | See [Protocol](protocol.md) for details of the command and message format. 15 | 16 | 17 | ## Steps: 18 | 1. Run `python3 broker.py` 19 | 1. Run `python3 client.py` 20 | 1. Verify that both work and the broker prints the message from the client. 21 | 1. Modify `Broker.handle_client` to process the message based on the `command` key of the message. 22 | 1. For now the broker can just write to the client socket with the appropriate message ( defined in the [Protocol](protocol.md)). Later we will implement the actual functionality for subscribe, unsubscribe, publish. 23 | 1. Restart both broker and client and verify that the client prints the acknowledgement message after sending the `subscribe` command. 24 | 1. Implement higher level functions for `subscribe`, `unsubscribe` and `publish` in ` client.py`. 25 | 1. `subscribe(channel)` 26 | 1. `unsubscribe(channel)` 27 | 1. `publish(channel, message)` 28 | 1. Modify the client main to use these higher level functions. 29 | 1. Implement a separate publisher script that creates a client object that publishes to one or more channels in a loop every second. This is useful for testing the client. 30 | 1. Go back to `broker.py` and modify it to keep track of client channel subscriptions. Remember that socket objects are hashable so they can be used as keys in a `dict` and as elements of a `set`. 31 | 1. Modify `Broker.handle_client` to correctly handle messages with the `PUBLISH` command. The broker should iterate through all client sockets that are subscribed to the given channel and send a `MESSAGE` message to them. 32 | 1. Run the broker, client and publisher and verify that the client receives all the messages it should, but none for other channels. 33 | 1. Run multiple clients/publishers and see if things still work. 34 | 35 | ## Bonus Steps: 36 | 1. Implement a 2 way chat script that listens on channels and publishes user input to some channels. Ideally it should use threads so the listening is not interrupted to send commands. 37 | 1. Optimise the socket handling code to be more efficient. 38 | 1. Measure how many simultaneous clients your broker can support. What are the limits? Why? 39 | -------------------------------------------------------------------------------- /task_queue/demo/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for demo project. 3 | 4 | Generated by 'django-admin startproject' using Django 3.0.3. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/3.0/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = 'jt$k5x_%b1ajf%ep*(m*1*ty=ht^86=hq*io7r$hso+na-dwgv' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'django.contrib.admin', 35 | 'django.contrib.auth', 36 | 'django.contrib.contenttypes', 37 | 'django.contrib.sessions', 38 | 'django.contrib.messages', 39 | 'django.contrib.staticfiles', 40 | 41 | 'tasks', 42 | ] 43 | 44 | MIDDLEWARE = [ 45 | 'django.middleware.security.SecurityMiddleware', 46 | 'django.contrib.sessions.middleware.SessionMiddleware', 47 | 'django.middleware.common.CommonMiddleware', 48 | 'django.middleware.csrf.CsrfViewMiddleware', 49 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 50 | 'django.contrib.messages.middleware.MessageMiddleware', 51 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 52 | ] 53 | 54 | ROOT_URLCONF = 'demo.urls' 55 | 56 | TEMPLATES = [ 57 | { 58 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 59 | 'DIRS': [], 60 | 'APP_DIRS': True, 61 | 'OPTIONS': { 62 | 'context_processors': [ 63 | 'django.template.context_processors.debug', 64 | 'django.template.context_processors.request', 65 | 'django.contrib.auth.context_processors.auth', 66 | 'django.contrib.messages.context_processors.messages', 67 | ], 68 | }, 69 | }, 70 | ] 71 | 72 | WSGI_APPLICATION = 'demo.wsgi.application' 73 | 74 | 75 | # Database 76 | # https://docs.djangoproject.com/en/3.0/ref/settings/#databases 77 | 78 | DATABASES = { 79 | 'default': { 80 | 'ENGINE': 'django.contrib.gis.db.backends.postgis', 81 | 'NAME': 'demo', 82 | 'USER': 'demo_dbu', 83 | 'PASSWORD': '7NVhgp2izNliDnl', 84 | 'HOST': 'postgres', 85 | 'PORT': '5432', 86 | } 87 | } 88 | 89 | 90 | # Password validation 91 | # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators 92 | 93 | AUTH_PASSWORD_VALIDATORS = [ 94 | { 95 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 96 | }, 97 | { 98 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 99 | }, 100 | { 101 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 102 | }, 103 | { 104 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 105 | }, 106 | ] 107 | 108 | 109 | # Internationalization 110 | # https://docs.djangoproject.com/en/3.0/topics/i18n/ 111 | 112 | LANGUAGE_CODE = 'en-us' 113 | 114 | TIME_ZONE = 'UTC' 115 | 116 | USE_I18N = True 117 | 118 | USE_L10N = True 119 | 120 | USE_TZ = True 121 | 122 | 123 | # Static files (CSS, JavaScript, Images) 124 | # https://docs.djangoproject.com/en/3.0/howto/static-files/ 125 | 126 | STATIC_URL = '/static/' 127 | -------------------------------------------------------------------------------- /kvdb/protocol.md: -------------------------------------------------------------------------------- 1 | 2 | # Protocol 3 | 4 | The protocol is JSON based. Each command is sent as a JSON object and each response is a JSON object. 5 | Values are always set, stored, and returned as strings. 6 | 7 | ## SET 8 | Set the value of a key, overwriting any existing value. 9 | If optional ttl argument is provided the key will expire in ttl seconds. 10 | 11 | ### Request: 12 | ```json 13 | { 14 | "command": "SET", 15 | "args": { 16 | "key": "foo", 17 | "value": "some string value" 18 | } 19 | } 20 | ``` 21 | 22 | With a TTL of 60 seconds: 23 | ```json 24 | { 25 | "status": "OK", 26 | "result": "some string value", 27 | "ttl": 60, 28 | } 29 | ``` 30 | ### Response: 31 | ```json 32 | { 33 | "status": "OK", 34 | } 35 | ``` 36 | 37 | ## GET 38 | Get the value of the key. If it does not exist returns null. 39 | 40 | ### Request: 41 | ```json 42 | { 43 | "command": "GET", 44 | "args": { 45 | "key": "foo" 46 | } 47 | } 48 | ``` 49 | 50 | ### Response: 51 | ```json 52 | { 53 | "status": "OK", 54 | "result": "some string value" 55 | } 56 | ``` 57 | 58 | If key does not exist: 59 | ```json 60 | { 61 | "status": "OK", 62 | "result": null 63 | } 64 | ``` 65 | 66 | ## DELETE 67 | Remove the key and associated value. Succeeds even if key does not exist. 68 | 69 | ### Request: 70 | ```json 71 | { 72 | "command": "DELETE", 73 | "args": { 74 | "key": "foo" 75 | } 76 | } 77 | ```` 78 | 79 | ### Response: 80 | ```json 81 | { 82 | "status": "OK", 83 | } 84 | ``` 85 | 86 | ## INCR 87 | Add 1 to the current value, assuming it is an integer. If it is not parsable as an integer return an error. 88 | If the key does not exist set it to "0" before performing the operation. 89 | Returns the new value. 90 | 91 | ### Request: 92 | ```json 93 | { 94 | "command": "INCR", 95 | "args": { 96 | "key": "x" 97 | } 98 | } 99 | ``` 100 | 101 | ### Response: 102 | ```json 103 | { 104 | "status": "OK", 105 | "result": "2" 106 | } 107 | ``` 108 | 109 | ### Error: 110 | ```json 111 | { 112 | "status": "ERROR", 113 | "message": "Value is not an integer" 114 | } 115 | ``` 116 | 117 | ## DECR 118 | Subtract 1 from the current value, assuming it is an integer. If it is not parsable as an integer return an error. 119 | If the key does not exist set it to "0" before performing the operation. 120 | Returns the new value. 121 | ### Request: 122 | ```json 123 | { 124 | "command": "DECR", 125 | "args": { 126 | "key": "x" 127 | } 128 | } 129 | ``` 130 | 131 | ### Response: 132 | ```json 133 | { 134 | "status": "OK", 135 | "result": "0" 136 | } 137 | ``` 138 | 139 | ### Error: 140 | ```json 141 | { 142 | "status": "ERROR", 143 | "message": "Value is not an integer" 144 | } 145 | ``` 146 | 147 | 148 | ## EXPIRE 149 | Set a TTL (time to live) in seconds for the key. The key will no longer exist after the TTL has expired. 150 | 151 | ### Request: 152 | ```json 153 | { 154 | "command": "EXPIRE", 155 | "args": { 156 | "key": "foo", 157 | "ttl": 60 158 | } 159 | } 160 | ``` 161 | 162 | ### Response: 163 | ```json 164 | { 165 | "status": "OK", 166 | } 167 | ``` 168 | 169 | 170 | 171 | ## TTL 172 | Get the current TTL (time to live) for a key. 173 | Returns the time in seconds or null if the key does not exist or does not have a TTL associated with it. 174 | 175 | ### Request: 176 | ```json 177 | { 178 | "command": "TTL", 179 | "args": { 180 | "key": "foo" 181 | } 182 | } 183 | ``` 184 | 185 | ### Response: 186 | ```json 187 | { 188 | "status": "OK", 189 | "result": 59 190 | } 191 | ``` 192 | 193 | ## PING 194 | Returns PONG. To be used to test of the connection/server is working. 195 | 196 | ### Request: 197 | ```json 198 | { 199 | "command": "PING" 200 | } 201 | ``` 202 | 203 | ### Response: 204 | ```json 205 | { 206 | "status": "OK", 207 | "result": "PONG" 208 | } 209 | ``` 210 | --------------------------------------------------------------------------------