├── robot-robbers
├── requirements.txt
├── sprites
│ ├── cashbag.png
│ ├── robot.png
│ ├── scrooge.png
│ └── dropspot.png
├── utilities
│ ├── utilities.py
│ ├── environment.py
│ ├── logging
│ │ ├── formatters.py
│ │ ├── sinks.py
│ │ ├── handlers.py
│ │ └── config.py
│ ├── singleton.py
│ └── exceptions.py
├── models
│ └── dtos.py
├── static
│ ├── render.py
│ └── index.html
├── router.py
├── run.sh
├── run_game.py
├── api.py
├── .gitignore
├── sample_predict_request.json
├── README.md
└── game
│ └── environment.py
├── images
├── cashbag.png
├── robot.png
├── scrooge.png
├── dropspot.png
├── example2.jpg
├── amazon_review.jpg
├── robot_robbers.png
├── removed_sample2.jpg
└── robot_robbers_diagram.png
├── pig-piglet-detection
├── data
│ ├── 1.jpg
│ ├── 2.jpg
│ ├── 2.txt
│ └── 1.txt
├── utilities
│ ├── utilities.py
│ ├── environment.py
│ ├── logging
│ │ ├── formatters.py
│ │ ├── sinks.py
│ │ ├── handlers.py
│ │ └── config.py
│ ├── singleton.py
│ └── exceptions.py
├── static
│ ├── render.py
│ └── index.html
├── run.sh
├── models
│ └── dtos.py
├── api.py
├── router.py
├── .gitignore
└── README.md
├── sentiment-analysis
├── utilities
│ ├── utilities.py
│ ├── environment.py
│ ├── logging
│ │ ├── formatters.py
│ │ ├── sinks.py
│ │ ├── handlers.py
│ │ └── config.py
│ ├── singleton.py
│ └── exceptions.py
├── models
│ └── dtos.py
├── static
│ ├── render.py
│ └── index.html
├── router.py
├── run.sh
├── api.py
├── data
│ └── data.csv
├── .gitignore
└── README.md
├── .gitignore
└── README.md
/robot-robbers/requirements.txt:
--------------------------------------------------------------------------------
1 | gym==0.21
2 | numpy
3 | matplotlib
4 | pygame
5 |
--------------------------------------------------------------------------------
/images/cashbag.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/cashbag.png
--------------------------------------------------------------------------------
/images/robot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/robot.png
--------------------------------------------------------------------------------
/images/scrooge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/scrooge.png
--------------------------------------------------------------------------------
/images/dropspot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/dropspot.png
--------------------------------------------------------------------------------
/images/example2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/example2.jpg
--------------------------------------------------------------------------------
/images/amazon_review.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/amazon_review.jpg
--------------------------------------------------------------------------------
/images/robot_robbers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/robot_robbers.png
--------------------------------------------------------------------------------
/images/removed_sample2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/removed_sample2.jpg
--------------------------------------------------------------------------------
/images/robot_robbers_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/images/robot_robbers_diagram.png
--------------------------------------------------------------------------------
/pig-piglet-detection/data/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/pig-piglet-detection/data/1.jpg
--------------------------------------------------------------------------------
/pig-piglet-detection/data/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/pig-piglet-detection/data/2.jpg
--------------------------------------------------------------------------------
/robot-robbers/sprites/cashbag.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/robot-robbers/sprites/cashbag.png
--------------------------------------------------------------------------------
/robot-robbers/sprites/robot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/robot-robbers/sprites/robot.png
--------------------------------------------------------------------------------
/robot-robbers/sprites/scrooge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/robot-robbers/sprites/scrooge.png
--------------------------------------------------------------------------------
/robot-robbers/sprites/dropspot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amboltio/DM-i-AI-2022/HEAD/robot-robbers/sprites/dropspot.png
--------------------------------------------------------------------------------
/pig-piglet-detection/data/2.txt:
--------------------------------------------------------------------------------
1 | 1 0.730501 0.381238 0.538999 0.626904
2 | 1 0.360350 0.436792 0.709987 0.674859
3 | 1 0.472703 0.602880 0.621051 0.630413
4 |
--------------------------------------------------------------------------------
/pig-piglet-detection/data/1.txt:
--------------------------------------------------------------------------------
1 | 1 0.779950 0.636889 0.229737 0.307613
2 | 1 0.659249 0.676071 0.133166 0.325150
3 | 1 0.530751 0.693609 0.083329 0.336842
4 | 0 0.499981 0.387180 0.881564 0.671353
5 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/utilities.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import time
3 |
4 | start_time = time.time()
5 |
6 | def get_uptime():
7 | return '{}'.format(datetime.timedelta(seconds=time.time() - start_time))
8 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/utilities.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import time
3 |
4 | start_time = time.time()
5 |
6 | def get_uptime():
7 | return '{}'.format(datetime.timedelta(seconds=time.time() - start_time))
8 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/utilities.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import time
3 |
4 | start_time = time.time()
5 |
6 | def get_uptime():
7 | return '{}'.format(datetime.timedelta(seconds=time.time() - start_time))
8 |
--------------------------------------------------------------------------------
/sentiment-analysis/models/dtos.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from pydantic import BaseModel
3 |
4 |
5 | class SentimentAnalysisRequestDto(BaseModel):
6 | reviews: List[str]
7 |
8 |
9 | class SentimentAnalysisResponseDto(BaseModel):
10 | scores: List[int]
11 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/environment.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseSettings
2 | from argparse import ArgumentParser
3 | from utilities.singleton import singleton
4 |
5 | @singleton
6 | class Environment(BaseSettings):
7 | ENVIRONMENT: str = 'production'
8 |
9 | HOST_IP: str
10 | CONTAINER_PORT: int
11 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/environment.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseSettings
2 | from argparse import ArgumentParser
3 | from utilities.singleton import singleton
4 |
5 | @singleton
6 | class Environment(BaseSettings):
7 | ENVIRONMENT: str = 'production'
8 |
9 | HOST_IP: str
10 | CONTAINER_PORT: int
11 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/environment.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseSettings
2 | from argparse import ArgumentParser
3 | from utilities.singleton import singleton
4 |
5 | @singleton
6 | class Environment(BaseSettings):
7 | ENVIRONMENT: str = 'production'
8 |
9 | HOST_IP: str
10 | CONTAINER_PORT: int
11 |
--------------------------------------------------------------------------------
/robot-robbers/models/dtos.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from typing import List
3 |
4 |
5 | class RobotRobbersPredictRequestDto(BaseModel):
6 | state: List[List[List[int]]]
7 | reward: float
8 | is_terminal: bool
9 | total_reward: float
10 | game_ticks: int
11 |
12 |
13 | class RobotRobbersPredictResponseDto(BaseModel):
14 | moves: List[int]
15 |
--------------------------------------------------------------------------------
/robot-robbers/static/render.py:
--------------------------------------------------------------------------------
1 | def render(file: str, **kwargs) -> str:
2 | '''Renders a template file with placeholders of {{kwarg}} form with the provided value.'''
3 | try:
4 | with open(file, encoding="utf8") as fp:
5 | contents = fp.read()
6 |
7 | for key, value in kwargs.items():
8 | contents = contents.replace(f'{{{{{key}}}}}', str(value))
9 |
10 | return contents
11 | except Exception as e:
12 | print(e)
--------------------------------------------------------------------------------
/sentiment-analysis/static/render.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def render(file: str, **kwargs) -> str:
4 | '''Renders a template file with placeholders of {{kwarg}} form with the provided value.'''
5 | try:
6 | with open(file, encoding="utf8") as fp:
7 | contents = fp.read()
8 |
9 | for key, value in kwargs.items():
10 | contents = contents.replace(f'{{{{{key}}}}}', str(value))
11 |
12 | return contents
13 | except Exception as e:
14 | print(e)
15 |
--------------------------------------------------------------------------------
/pig-piglet-detection/static/render.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def render(file: str, **kwargs) -> str:
4 | '''Renders a template file with placeholders of {{kwarg}} form with the provided value.'''
5 | try:
6 | with open(file, encoding="utf8") as fp:
7 | contents = fp.read()
8 |
9 | for key, value in kwargs.items():
10 | contents = contents.replace(f'{{{{{key}}}}}', str(value))
11 |
12 | return contents
13 | except Exception as e:
14 | print(e)
15 |
--------------------------------------------------------------------------------
/sentiment-analysis/router.py:
--------------------------------------------------------------------------------
1 | import random
2 | from fastapi import APIRouter
3 | from models.dtos import SentimentAnalysisRequestDto, SentimentAnalysisResponseDto
4 |
5 |
6 | router = APIRouter()
7 |
8 |
9 | @router.post('/predict', response_model=SentimentAnalysisResponseDto)
10 | def predict_endpoint(request: SentimentAnalysisRequestDto):
11 |
12 | predicted_scores = [random.randint(1,5) for _ in request.reviews]
13 |
14 | response = SentimentAnalysisResponseDto(
15 | scores=predicted_scores
16 | )
17 |
18 | return response
19 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/logging/formatters.py:
--------------------------------------------------------------------------------
1 | from pprint import pformat
2 |
3 | from loguru._defaults import LOGURU_FORMAT
4 |
5 |
6 | def single_line_format(record: dict) -> str:
7 | """
8 | Custom format for loguru loggers.
9 | Uses pformat for log any data like request/response body during debug.
10 | Works with logging if loguru handler it.
11 | """
12 |
13 | format_string = LOGURU_FORMAT
14 | if record["extra"].get("payload") is not None:
15 | record["extra"]["payload"] = pformat(
16 | record["extra"]["payload"], indent=4, compact=True, width=88
17 | )
18 | format_string += "\n{extra[payload]}"
19 |
20 | format_string += "{exception}\n"
21 | return format_string
22 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/logging/formatters.py:
--------------------------------------------------------------------------------
1 | from pprint import pformat
2 |
3 | from loguru._defaults import LOGURU_FORMAT
4 |
5 |
6 | def single_line_format(record: dict) -> str:
7 | """
8 | Custom format for loguru loggers.
9 | Uses pformat for log any data like request/response body during debug.
10 | Works with logging if loguru handler it.
11 | """
12 |
13 | format_string = LOGURU_FORMAT
14 | if record["extra"].get("payload") is not None:
15 | record["extra"]["payload"] = pformat(
16 | record["extra"]["payload"], indent=4, compact=True, width=88
17 | )
18 | format_string += "\n{extra[payload]}"
19 |
20 | format_string += "{exception}\n"
21 | return format_string
22 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/logging/formatters.py:
--------------------------------------------------------------------------------
1 | from pprint import pformat
2 |
3 | from loguru._defaults import LOGURU_FORMAT
4 |
5 |
6 | def single_line_format(record: dict) -> str:
7 | """
8 | Custom format for loguru loggers.
9 | Uses pformat for log any data like request/response body during debug.
10 | Works with logging if loguru handler it.
11 | """
12 |
13 | format_string = LOGURU_FORMAT
14 | if record["extra"].get("payload") is not None:
15 | record["extra"]["payload"] = pformat(
16 | record["extra"]["payload"], indent=4, compact=True, width=88
17 | )
18 | format_string += "\n{extra[payload]}"
19 |
20 | format_string += "{exception}\n"
21 | return format_string
22 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/singleton.py:
--------------------------------------------------------------------------------
1 | def singleton(_class):
2 | """
3 | Wraps a class definition, ensuring that Class() constructor calls
4 | will instantiate an instance only once, to which all subsequent
5 | calls to Class() will refer.
6 |
7 | ```
8 | @singleton
9 | class MyClass():
10 | def __init__(self, ...):
11 | ...
12 |
13 | a = MyClass() # Instantiates MyClass
14 | b = MyClass() # Re-uses existing instantiation
15 |
16 | a == b # true
17 | ```
18 | """
19 | instances = {}
20 |
21 | def get(*args, **kwargs):
22 | if _class not in instances:
23 | instances[_class] = _class(*args, **kwargs)
24 |
25 | return instances[_class]
26 |
27 | return get
28 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/singleton.py:
--------------------------------------------------------------------------------
1 | def singleton(_class):
2 | """
3 | Wraps a class definition, ensuring that Class() constructor calls
4 | will instantiate an instance only once, to which all subsequent
5 | calls to Class() will refer.
6 |
7 | ```
8 | @singleton
9 | class MyClass():
10 | def __init__(self, ...):
11 | ...
12 |
13 | a = MyClass() # Instantiates MyClass
14 | b = MyClass() # Re-uses existing instantiation
15 |
16 | a == b # true
17 | ```
18 | """
19 | instances = {}
20 |
21 | def get(*args, **kwargs):
22 | if _class not in instances:
23 | instances[_class] = _class(*args, **kwargs)
24 |
25 | return instances[_class]
26 |
27 | return get
28 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/singleton.py:
--------------------------------------------------------------------------------
1 | def singleton(_class):
2 | """
3 | Wraps a class definition, ensuring that Class() constructor calls
4 | will instantiate an instance only once, to which all subsequent
5 | calls to Class() will refer.
6 |
7 | ```
8 | @singleton
9 | class MyClass():
10 | def __init__(self, ...):
11 | ...
12 |
13 | a = MyClass() # Instantiates MyClass
14 | b = MyClass() # Re-uses existing instantiation
15 |
16 | a == b # true
17 | ```
18 | """
19 | instances = {}
20 |
21 | def get(*args, **kwargs):
22 | if _class not in instances:
23 | instances[_class] = _class(*args, **kwargs)
24 |
25 | return instances[_class]
26 |
27 | return get
28 |
--------------------------------------------------------------------------------
/robot-robbers/router.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from fastapi import APIRouter
3 | from models.dtos import RobotRobbersPredictResponseDto, RobotRobbersPredictRequestDto
4 |
5 |
6 | router = APIRouter()
7 |
8 |
9 | @router.post('/predict', response_model=RobotRobbersPredictResponseDto)
10 | def predict(request: RobotRobbersPredictRequestDto):
11 | # robots = [(x, y, w, h) for (x, y, w, h) in request.state[0] if x >= 0 and y >= 0]
12 | # scrooges = [(x, y, w, h) for (x, y, w, h) in request.state[1] if x >= 0 and y >= 0]
13 | # cashbags = [(x, y, w, h) for (x, y, w, h) in request.state[2] if x >= 0 and y >= 0]
14 | # dropspots = [(x, y, w, h) for (x, y, w, h) in request.state[3] if x >= 0 and y >= 0]
15 | # obstacles = request.state[4]
16 |
17 | # Your moves go here!
18 | n_robbers = 5
19 | moves = [np.random.randint(-1, 2) for _ in range(n_robbers * 2)]
20 |
21 | return RobotRobbersPredictResponseDto(
22 | moves=moves
23 | )
24 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/exceptions.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | from fastapi import Request, FastAPI
3 | from starlette.responses import JSONResponse
4 |
5 |
6 | def value_error_exceptions(_: Request, exception: ValueError):
7 | return JSONResponse(
8 | status_code=500,
9 | content={
10 | 'errors': [str(exception), 'Something went wrong.']
11 | }
12 | )
13 |
14 |
15 | def handle_generic_exceptions(request: Request, exception: Exception):
16 | logger.error('Runtime error', request=request, exception=exception)
17 | return JSONResponse(
18 | status_code=500,
19 | content={
20 | 'errors': [str(exception), 'Something went wrong.']
21 | }
22 | )
23 |
24 |
25 | def configure_exception_handlers(app: FastAPI):
26 | app.add_exception_handler(
27 | ValueError, value_error_exceptions
28 | )
29 | app.add_exception_handler(
30 | Exception, handle_generic_exceptions
31 | )
32 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/exceptions.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | from fastapi import Request, FastAPI
3 | from starlette.responses import JSONResponse
4 |
5 |
6 | def value_error_exceptions(_: Request, exception: ValueError):
7 | return JSONResponse(
8 | status_code=500,
9 | content={
10 | 'errors': [str(exception), 'Something went wrong.']
11 | }
12 | )
13 |
14 |
15 | def handle_generic_exceptions(request: Request, exception: Exception):
16 | logger.error('Runtime error', request=request, exception=exception)
17 | return JSONResponse(
18 | status_code=500,
19 | content={
20 | 'errors': [str(exception), 'Something went wrong.']
21 | }
22 | )
23 |
24 |
25 | def configure_exception_handlers(app: FastAPI):
26 | app.add_exception_handler(
27 | ValueError, value_error_exceptions
28 | )
29 | app.add_exception_handler(
30 | Exception, handle_generic_exceptions
31 | )
32 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/exceptions.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | from fastapi import Request, FastAPI
3 | from starlette.responses import JSONResponse
4 |
5 |
6 | def value_error_exceptions(_: Request, exception: ValueError):
7 | return JSONResponse(
8 | status_code=500,
9 | content={
10 | 'errors': [str(exception), 'Something went wrong.']
11 | }
12 | )
13 |
14 |
15 | def handle_generic_exceptions(request: Request, exception: Exception):
16 | logger.error('Runtime error', request=request, exception=exception)
17 | return JSONResponse(
18 | status_code=500,
19 | content={
20 | 'errors': [str(exception), 'Something went wrong.']
21 | }
22 | )
23 |
24 |
25 | def configure_exception_handlers(app: FastAPI):
26 | app.add_exception_handler(
27 | ValueError, value_error_exceptions
28 | )
29 | app.add_exception_handler(
30 | Exception, handle_generic_exceptions
31 | )
32 |
--------------------------------------------------------------------------------
/robot-robbers/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | BIND=$HOST_IP:$CONTAINER_PORT # Address to listen on.
4 | N_WORKERS=$N_WORKERS # The number of workers to run in parallel (provided by default in .prod.env)
5 | WORKER_CLASS=uvicorn.workers.UvicornWorker # The type of workers to use.
6 | TIMEOUT=300 # Workers silent for more than this many seconds are killed and restarted.
7 | GRACEFUL_TIMEOUT=120 # Timeout for graceful workers restart.
8 | MAX_REQUESTS=10000 # The maximum number of requests a worker will process before restarting (useful for preventing memory leaks)
9 | MAX_REQUESTS_JITTER=4 # The jitter causes the restart per worker to be randomized by randint(0, max_requests_jitter). This is intended to stagger worker restarts to avoid all workers restarting at the same time.
10 | LOG_FILE=gunicorn.log # Access/error logs from gunicorn
11 |
12 | exec gunicorn 'api:app' \
13 | --bind=$BIND \
14 | --workers=$N_WORKERS \
15 | --worker-class=$WORKER_CLASS \
16 | --timeout=$TIMEOUT \
17 | --max-requests=$MAX_REQUESTS \
18 | --max-requests-jitter=$MAX_REQUESTS_JITTER \
19 | --graceful-timeout=$GRACEFUL_TIMEOUT
--------------------------------------------------------------------------------
/pig-piglet-detection/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | BIND=$HOST_IP:$CONTAINER_PORT # Address to listen on.
4 | N_WORKERS=$N_WORKERS # The number of workers to run in parallel (provided by default in .prod.env)
5 | WORKER_CLASS=uvicorn.workers.UvicornWorker # The type of workers to use.
6 | TIMEOUT=300 # Workers silent for more than this many seconds are killed and restarted.
7 | GRACEFUL_TIMEOUT=120 # Timeout for graceful workers restart.
8 | MAX_REQUESTS=10000 # The maximum number of requests a worker will process before restarting (useful for preventing memory leaks)
9 | MAX_REQUESTS_JITTER=4 # The jitter causes the restart per worker to be randomized by randint(0, max_requests_jitter). This is intended to stagger worker restarts to avoid all workers restarting at the same time.
10 | LOG_FILE=gunicorn.log # Access/error logs from gunicorn
11 |
12 |
13 | exec gunicorn 'api:app' \
14 | --bind=$BIND \
15 | --workers=$N_WORKERS \
16 | --worker-class=$WORKER_CLASS \
17 | --timeout=$TIMEOUT \
18 | --max-requests=$MAX_REQUESTS \
19 | --max-requests-jitter=$MAX_REQUESTS_JITTER \
20 | --graceful-timeout=$GRACEFUL_TIMEOUT
21 |
--------------------------------------------------------------------------------
/sentiment-analysis/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | BIND=$HOST_IP:$CONTAINER_PORT # Address to listen on.
4 | N_WORKERS=$N_WORKERS # The number of workers to run in parallel (provided by default in .prod.env)
5 | WORKER_CLASS=uvicorn.workers.UvicornWorker # The type of workers to use.
6 | TIMEOUT=300 # Workers silent for more than this many seconds are killed and restarted.
7 | GRACEFUL_TIMEOUT=120 # Timeout for graceful workers restart.
8 | MAX_REQUESTS=10000 # The maximum number of requests a worker will process before restarting (useful for preventing memory leaks)
9 | MAX_REQUESTS_JITTER=4 # The jitter causes the restart per worker to be randomized by randint(0, max_requests_jitter). This is intended to stagger worker restarts to avoid all workers restarting at the same time.
10 | LOG_FILE=gunicorn.log # Access/error logs from gunicorn
11 |
12 |
13 | exec gunicorn 'api:app' \
14 | --bind=$BIND \
15 | --workers=$N_WORKERS \
16 | --worker-class=$WORKER_CLASS \
17 | --timeout=$TIMEOUT \
18 | --max-requests=$MAX_REQUESTS \
19 | --max-requests-jitter=$MAX_REQUESTS_JITTER \
20 | --graceful-timeout=$GRACEFUL_TIMEOUT
21 |
--------------------------------------------------------------------------------
/pig-piglet-detection/models/dtos.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from pydantic import BaseModel, validator
3 |
4 |
5 | class BoundingBoxClassification(BaseModel):
6 | class_id: int
7 | min_x: float
8 | min_y: float
9 | max_x: float
10 | max_y: float
11 | confidence: float
12 |
13 | @validator('min_x', 'min_y', 'max_x', 'max_y', 'confidence')
14 | def must_be_between_zero_one(cls, v):
15 | if v < 0.0 or v > 1.0:
16 | raise ValueError(
17 | f'Value must be between 0 or 1 ({v} given).')
18 | return v
19 |
20 | @validator('class_id')
21 | def must_be_binary(cls, v):
22 | if v not in [0, 1]:
23 | raise ValueError(
24 | f'Value must be either 0 or 1 ({v} given).')
25 | return v
26 |
27 | def __str__(self):
28 | class_str = 'pig ' if self.class_id == 0 else 'piglet'
29 | class_confidence = f'{self.confidence*100:.5f}%'
30 | x = f'{self.min_x:.2f} - {self.max_x:.2f}'
31 | y = f'{self.min_y:.2f} - {self.max_y:.2f}'
32 | return f'Class: {class_str} ({class_confidence}), X: {x}, Y: {y}.'
33 |
34 |
35 | class PredictRequestDto(BaseModel):
36 | img: str
37 |
38 |
39 | class PredictResponseDto(BaseModel):
40 | boxes: List[BoundingBoxClassification]
41 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/logging/sinks.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | from typing import Any
4 |
5 | from loguru import logger
6 | from utilities.logging.formatters import single_line_format
7 |
8 |
9 | def add_terminal_sink(logger: logger):
10 | """
11 | Adds a log sink with the terminal as the destination.
12 | """
13 | logger.add(sys.stdout, level=logging.DEBUG, format=single_line_format)
14 |
15 |
16 | def add_file_sink(logger: logger, filename="emily.log", rotation="5 MB"):
17 | """
18 | Adds a log sink with a file as the destination.
19 | By default, the log file is rotated with a max file size of 5 MB.
20 | """
21 | logger.add(filename, level=logging.DEBUG,
22 | format=single_line_format, rotation=rotation)
23 |
24 |
25 | def add_custom_sink(logger: logger, sink: Any):
26 | """
27 | Adds a log sink with an arbitrary function handler as the destination.
28 | The sink handler as provided with a raw log record.
29 | See https://loguru.readthedocs.io/en/stable/api/logger.html#the-record-dict
30 | for details on the contents of a raw log record.
31 | """
32 | # In Loguru, a log message is simply a string with a special
33 | # property (message.record) that contains all contextual information
34 | # for custom processing of a log record.
35 | logger.add(lambda message: sink(message.record), level=logging.DEBUG)
36 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/logging/sinks.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | from typing import Any
4 |
5 | from loguru import logger
6 | from utilities.logging.formatters import single_line_format
7 |
8 |
9 | def add_terminal_sink(logger: logger):
10 | """
11 | Adds a log sink with the terminal as the destination.
12 | """
13 | logger.add(sys.stdout, level=logging.DEBUG, format=single_line_format)
14 |
15 |
16 | def add_file_sink(logger: logger, filename="emily.log", rotation="5 MB"):
17 | """
18 | Adds a log sink with a file as the destination.
19 | By default, the log file is rotated with a max file size of 5 MB.
20 | """
21 | logger.add(filename, level=logging.DEBUG,
22 | format=single_line_format, rotation=rotation)
23 |
24 |
25 | def add_custom_sink(logger: logger, sink: Any):
26 | """
27 | Adds a log sink with an arbitrary function handler as the destination.
28 | The sink handler as provided with a raw log record.
29 | See https://loguru.readthedocs.io/en/stable/api/logger.html#the-record-dict
30 | for details on the contents of a raw log record.
31 | """
32 | # In Loguru, a log message is simply a string with a special
33 | # property (message.record) that contains all contextual information
34 | # for custom processing of a log record.
35 | logger.add(lambda message: sink(message.record), level=logging.DEBUG)
36 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/logging/sinks.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | from typing import Any
4 |
5 | from loguru import logger
6 | from utilities.logging.formatters import single_line_format
7 |
8 |
9 | def add_terminal_sink(logger: logger):
10 | """
11 | Adds a log sink with the terminal as the destination.
12 | """
13 | logger.add(sys.stdout, level=logging.DEBUG, format=single_line_format)
14 |
15 |
16 | def add_file_sink(logger: logger, filename="emily.log", rotation="5 MB"):
17 | """
18 | Adds a log sink with a file as the destination.
19 | By default, the log file is rotated with a max file size of 5 MB.
20 | """
21 | logger.add(filename, level=logging.DEBUG,
22 | format=single_line_format, rotation=rotation)
23 |
24 |
25 | def add_custom_sink(logger: logger, sink: Any):
26 | """
27 | Adds a log sink with an arbitrary function handler as the destination.
28 | The sink handler as provided with a raw log record.
29 | See https://loguru.readthedocs.io/en/stable/api/logger.html#the-record-dict
30 | for details on the contents of a raw log record.
31 | """
32 | # In Loguru, a log message is simply a string with a special
33 | # property (message.record) that contains all contextual information
34 | # for custom processing of a log record.
35 | logger.add(lambda message: sink(message.record), level=logging.DEBUG)
36 |
--------------------------------------------------------------------------------
/robot-robbers/run_game.py:
--------------------------------------------------------------------------------
1 | import pygame
2 | from game.environment import RobotRobbersEnv
3 |
4 | env = RobotRobbersEnv()
5 | env.reset(42)
6 | env.render()
7 | sample = env.observation_space.sample()
8 |
9 |
10 | current_key = None
11 |
12 |
13 | def get_move_from_keyboard():
14 | global current_key
15 |
16 | for event in pygame.event.get():
17 | if event.type == pygame.KEYDOWN:
18 | current_key = event.key
19 | elif event.type == pygame.KEYUP:
20 | current_key = None
21 |
22 | if current_key == pygame.K_LEFT:
23 | return (-1, 0)
24 | elif current_key == pygame.K_RIGHT:
25 | return (1, 0)
26 | elif current_key == pygame.K_UP:
27 | return (0, -1)
28 | elif current_key == pygame.K_DOWN:
29 | return (0, 1)
30 | else:
31 | return (0, 0)
32 |
33 |
34 | while True:
35 | move = get_move_from_keyboard()
36 |
37 | state, reward, is_done, info = env.step([
38 | *move, # Robot 1 moves according to arrow keys
39 | 0, 0, # All other robots stand still
40 | 0, 0,
41 | 0, 0,
42 | 0, 0
43 | ])
44 |
45 | # If you want to render the game as it runs, it's recommended to
46 | # run this script locally:
47 | #
48 | # ```bash
49 | # python3 -m venv .venv
50 | # source .venv/bin/activate
51 | # pip install -r requirements.txt
52 | # python run_game.py
53 | # ```
54 | #
55 | env.render()
56 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/logging/handlers.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi import Request
4 | from loguru import logger
5 | from starlette.responses import Response
6 |
7 |
8 | class LoggingIntercepter(logging.Handler):
9 | """
10 | Default handler from examples in loguru documentaion.
11 | See https://loguru.readthedocs.io/en/stable/overview.html#entirely-compatible-with-standard-logging
12 | """
13 |
14 | def emit(self, record: logging.LogRecord):
15 | # Get corresponding Loguru level if it exists
16 | try:
17 | level = logger.level(record.levelname).name
18 | except ValueError:
19 | level = record.levelno
20 |
21 | # Find caller from where originated the logged message
22 | frame, depth = logging.currentframe(), 2
23 | while frame.f_code.co_filename == logging.__file__:
24 | frame = frame.f_back
25 | depth += 1
26 |
27 | logger.opt(depth=depth, exception=record.exc_info).log(
28 | level, record.getMessage()
29 | )
30 |
31 |
32 | async def http_request_logging_middleware(request: Request, call_next) -> Response:
33 | """
34 | Intercepts all HTTP requests and responses to log rudimentary information.
35 | The request and response objects are assigned to the record.extra dict.
36 | """
37 | logger.info(f'HTTP {request.method} for {request.url}', request=request)
38 | response: Response = await call_next(request)
39 | logger.info(f'HTTP {response.status_code} for {request.url}',
40 | request=request, response=response)
41 | return response
42 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/logging/handlers.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi import Request
4 | from loguru import logger
5 | from starlette.responses import Response
6 |
7 |
8 | class LoggingIntercepter(logging.Handler):
9 | """
10 | Default handler from examples in loguru documentaion.
11 | See https://loguru.readthedocs.io/en/stable/overview.html#entirely-compatible-with-standard-logging
12 | """
13 |
14 | def emit(self, record: logging.LogRecord):
15 | # Get corresponding Loguru level if it exists
16 | try:
17 | level = logger.level(record.levelname).name
18 | except ValueError:
19 | level = record.levelno
20 |
21 | # Find caller from where originated the logged message
22 | frame, depth = logging.currentframe(), 2
23 | while frame.f_code.co_filename == logging.__file__:
24 | frame = frame.f_back
25 | depth += 1
26 |
27 | logger.opt(depth=depth, exception=record.exc_info).log(
28 | level, record.getMessage()
29 | )
30 |
31 |
32 | async def http_request_logging_middleware(request: Request, call_next) -> Response:
33 | """
34 | Intercepts all HTTP requests and responses to log rudimentary information.
35 | The request and response objects are assigned to the record.extra dict.
36 | """
37 | logger.info(f'HTTP {request.method} for {request.url}', request=request)
38 | response: Response = await call_next(request)
39 | logger.info(f'HTTP {response.status_code} for {request.url}',
40 | request=request, response=response)
41 | return response
42 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/logging/handlers.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi import Request
4 | from loguru import logger
5 | from starlette.responses import Response
6 |
7 |
8 | class LoggingIntercepter(logging.Handler):
9 | """
10 | Default handler from examples in loguru documentaion.
11 | See https://loguru.readthedocs.io/en/stable/overview.html#entirely-compatible-with-standard-logging
12 | """
13 |
14 | def emit(self, record: logging.LogRecord):
15 | # Get corresponding Loguru level if it exists
16 | try:
17 | level = logger.level(record.levelname).name
18 | except ValueError:
19 | level = record.levelno
20 |
21 | # Find caller from where originated the logged message
22 | frame, depth = logging.currentframe(), 2
23 | while frame.f_code.co_filename == logging.__file__:
24 | frame = frame.f_back
25 | depth += 1
26 |
27 | logger.opt(depth=depth, exception=record.exc_info).log(
28 | level, record.getMessage()
29 | )
30 |
31 |
32 | async def http_request_logging_middleware(request: Request, call_next) -> Response:
33 | """
34 | Intercepts all HTTP requests and responses to log rudimentary information.
35 | The request and response objects are assigned to the record.extra dict.
36 | """
37 | logger.info(f'HTTP {request.method} for {request.url}', request=request)
38 | response: Response = await call_next(request)
39 | logger.info(f'HTTP {response.status_code} for {request.url}',
40 | request=request, response=response)
41 | return response
42 |
--------------------------------------------------------------------------------
/robot-robbers/api.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 | from fastapi import FastAPI
3 | from fastapi.middleware.cors import CORSMiddleware
4 | from starlette.responses import HTMLResponse
5 |
6 | from static.render import render
7 | from utilities.environment import Environment
8 | from utilities.logging.config import (initialize_logging,
9 | initialize_logging_middleware)
10 | from utilities.utilities import get_uptime
11 | from utilities.exceptions import configure_exception_handlers
12 |
13 | import router
14 |
15 |
16 | # --- Welcome to your Emily API! --- #
17 | # See the README for guides on how to test it.
18 |
19 | # Your API endpoints under http://yourdomain/api/...
20 | # are accessible from any origin by default.
21 | # Make sure to restrict access below to origins you
22 | # trust before deploying your API to production.
23 |
24 |
25 | app = FastAPI()
26 |
27 | initialize_logging()
28 | initialize_logging_middleware(app)
29 | configure_exception_handlers(app)
30 |
31 | app.add_middleware(
32 | CORSMiddleware,
33 | allow_origins=["*"],
34 | allow_credentials=True,
35 | allow_methods=["*"],
36 | allow_headers=["*"],
37 | )
38 |
39 | app.include_router(router.router, tags=['Robot Robbers'])
40 |
41 |
42 | @app.get('/api')
43 | def hello():
44 | return {
45 | "service": "robot-robbers-usecase",
46 | "uptime": get_uptime()
47 | }
48 |
49 |
50 | @app.get('/')
51 | def index():
52 | return HTMLResponse(
53 | render(
54 | 'static/index.html',
55 | host=Environment().HOST_IP,
56 | port=Environment().CONTAINER_PORT
57 | )
58 | )
59 |
60 |
61 | if __name__ == '__main__':
62 |
63 | uvicorn.run(
64 | 'api:app',
65 | host=Environment().HOST_IP,
66 | port=Environment().CONTAINER_PORT
67 | )
68 |
--------------------------------------------------------------------------------
/pig-piglet-detection/api.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 | from fastapi import FastAPI
3 | from fastapi.middleware.cors import CORSMiddleware
4 | from starlette.responses import HTMLResponse
5 |
6 | from static.render import render
7 | from utilities.environment import Environment
8 | from utilities.logging.config import (initialize_logging,
9 | initialize_logging_middleware)
10 | from utilities.utilities import get_uptime
11 | from utilities.exceptions import configure_exception_handlers
12 |
13 | import router
14 |
15 |
16 | # --- Welcome to your Emily API! --- #
17 | # See the README for guides on how to test it.
18 |
19 | # Your API endpoints under http://yourdomain/api/...
20 | # are accessible from any origin by default.
21 | # Make sure to restrict access below to origins you
22 | # trust before deploying your API to production.
23 |
24 |
25 | app = FastAPI()
26 |
27 | initialize_logging()
28 | initialize_logging_middleware(app)
29 | configure_exception_handlers(app)
30 |
31 | app.add_middleware(
32 | CORSMiddleware,
33 | allow_origins=["*"],
34 | allow_credentials=True,
35 | allow_methods=["*"],
36 | allow_headers=["*"],
37 | )
38 |
39 | app.include_router(router.router, tags=['Pig Detection'])
40 |
41 |
42 | @app.get('/api')
43 | def hello():
44 | return {
45 | "service": "piglet-detection-usecase",
46 | "uptime": get_uptime()
47 | }
48 |
49 |
50 | @app.get('/')
51 | def index():
52 | return HTMLResponse(
53 | render(
54 | 'static/index.html',
55 | host=Environment().HOST_IP,
56 | port=Environment().CONTAINER_PORT
57 | )
58 | )
59 |
60 |
61 | if __name__ == '__main__':
62 |
63 | uvicorn.run(
64 | 'api:app',
65 | host=Environment().HOST_IP,
66 | port=Environment().CONTAINER_PORT
67 | )
68 |
--------------------------------------------------------------------------------
/sentiment-analysis/api.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 | from fastapi import FastAPI
3 | from fastapi.middleware.cors import CORSMiddleware
4 | from starlette.responses import HTMLResponse
5 |
6 | from static.render import render
7 | from utilities.environment import Environment
8 | from utilities.logging.config import (initialize_logging,
9 | initialize_logging_middleware)
10 | from utilities.utilities import get_uptime
11 | from utilities.exceptions import configure_exception_handlers
12 |
13 | import router
14 |
15 | # --- Welcome to your Emily API! --- #
16 | # See the README for guides on how to test it.
17 |
18 | # Your API endpoints under http://yourdomain/api/...
19 | # are accessible from any origin by default.
20 | # Make sure to restrict access below to origins you
21 | # trust before deploying your API to production.
22 |
23 |
24 | app = FastAPI()
25 |
26 | initialize_logging()
27 | initialize_logging_middleware(app)
28 | configure_exception_handlers(app)
29 |
30 | app.add_middleware(
31 | CORSMiddleware,
32 | allow_origins=["*"],
33 | allow_credentials=True,
34 | allow_methods=["*"],
35 | allow_headers=["*"],
36 | )
37 |
38 | app.include_router(router.router, tags=['Sentiment analysis'])
39 |
40 |
41 | @app.get('/api')
42 | def hello():
43 | return {
44 | "service": "sentiment-analysis-usecase",
45 | "uptime": get_uptime()
46 | }
47 |
48 |
49 | @app.get('/')
50 | def index():
51 | return HTMLResponse(
52 | render(
53 | 'static/index.html',
54 | host=Environment().HOST_IP,
55 | port=Environment().CONTAINER_PORT
56 | )
57 | )
58 |
59 |
60 | if __name__ == '__main__':
61 |
62 | uvicorn.run(
63 | 'api:app',
64 | host=Environment().HOST_IP,
65 | port=Environment().CONTAINER_PORT
66 | )
67 |
--------------------------------------------------------------------------------
/pig-piglet-detection/router.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import random
3 | import base64
4 | import numpy as np
5 | from typing import List
6 | from loguru import logger
7 | from fastapi import APIRouter
8 | from models.dtos import PredictRequestDto, PredictResponseDto, BoundingBoxClassification
9 |
10 |
11 | router = APIRouter()
12 |
13 |
14 | @router.post('/predict', response_model=PredictResponseDto)
15 | def predict_endpoint(request: PredictRequestDto):
16 | img: np.ndarray = decode_request(request)
17 |
18 | dummy_bounding_boxes = predict(img)
19 | response = PredictResponseDto(
20 | boxes=dummy_bounding_boxes
21 | )
22 |
23 | return response
24 |
25 |
26 | def decode_request(request: PredictRequestDto) -> np.ndarray:
27 | encoded_img: str = request.img
28 | np_img = np.fromstring(base64.b64decode(encoded_img), np.uint8)
29 | return cv2.imdecode(np_img, cv2.IMREAD_ANYCOLOR)
30 |
31 |
32 | def predict(img: np.ndarray) -> List[BoundingBoxClassification]:
33 | logger.info(f'Recieved image: {img.shape}')
34 | bounding_boxes: List[BoundingBoxClassification] = []
35 | for _ in range(random.randint(0, 9)):
36 | bounding_box: BoundingBoxClassification = get_dummy_box()
37 | bounding_boxes.append(bounding_box)
38 | logger.info(bounding_box)
39 | return bounding_boxes
40 |
41 |
42 | def get_dummy_box() -> BoundingBoxClassification:
43 | random_class = random.randint(0, 1) # 0 = PIG, 1 = PIGLET
44 | random_min_x = random.uniform(0, .9)
45 | random_min_y = random.uniform(0, .9)
46 | random_max_x = random.uniform(random_min_x + .05, 1)
47 | random_max_y = random.uniform(random_min_y + .05, 1)
48 | return BoundingBoxClassification(
49 | class_id=random_class,
50 | min_x=random_min_x,
51 | min_y=random_min_y,
52 | max_x=random_max_x,
53 | max_y=random_max_y,
54 | confidence=random.uniform(0, 1)
55 | )
56 |
--------------------------------------------------------------------------------
/robot-robbers/utilities/logging/config.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi.applications import FastAPI
4 | from loguru import logger
5 | from utilities.logging.handlers import (LoggingIntercepter,
6 | http_request_logging_middleware)
7 | from utilities.logging.sinks import (add_custom_sink, add_file_sink,
8 | add_terminal_sink)
9 |
10 |
11 | def _clear_default_logging_handlers(prefix=''):
12 | """
13 | Clears the handlers for all existing loggers.
14 | Provide a logger prefix to limit the set of loggers
15 | to clear handlers for.
16 | """
17 | loggers = (
18 | logging.getLogger(name)
19 | for name in logging.root.manager.loggerDict
20 | if name.startswith(prefix)
21 | )
22 | for log in loggers:
23 | log.handlers = []
24 |
25 |
26 | def _clear_default_loguru_handlers():
27 | logger.configure(handlers=[])
28 |
29 |
30 | def initialize_logging():
31 | """
32 | Initializes logging handlers and sinks. New sinks and handlers
33 | should be registered in this function.
34 | """
35 |
36 | # Uvicorn is set up with default loggers.
37 | # We override them here in order to control how, when, and where
38 | # uvicorn (and all other) logs are handled.
39 | _clear_default_logging_handlers(prefix='uvicorn.')
40 | _clear_default_loguru_handlers()
41 |
42 | # Intercept all uvicorn logs so we can process them as we see fit
43 | logging.getLogger("uvicorn").handlers = [LoggingIntercepter()]
44 |
45 | # All logs emitted by 1) the intercepter and 2) all loguru.logger.* method calls
46 | # will be sent to a loguru sink. Sinks are simply destinations for logging data.
47 | # By default, we add two sinks; one for sending logs to the console and to a file.
48 | # To send logs to a database, e.g. an Elasticsearch instance, simply add a custom
49 | # sink to send the data there.
50 | # See https://loguru.readthedocs.io/en/stable/api/logger.html for details.
51 | add_file_sink(logger)
52 | add_terminal_sink(logger)
53 |
54 | # Arbitrary sinks to process raw log records (for sending to log databases for example)
55 | # can be configured as such:
56 |
57 | # add_custom_sink(logger, lambda record: print(
58 | # f'Received raw log record: {record}'
59 | # ))
60 |
61 |
62 | def initialize_logging_middleware(app: FastAPI):
63 | app.middleware("http")(http_request_logging_middleware)
64 |
--------------------------------------------------------------------------------
/pig-piglet-detection/utilities/logging/config.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi.applications import FastAPI
4 | from loguru import logger
5 | from utilities.logging.handlers import (LoggingIntercepter,
6 | http_request_logging_middleware)
7 | from utilities.logging.sinks import (add_custom_sink, add_file_sink,
8 | add_terminal_sink)
9 |
10 |
11 | def _clear_default_logging_handlers(prefix=''):
12 | """
13 | Clears the handlers for all existing loggers.
14 | Provide a logger prefix to limit the set of loggers
15 | to clear handlers for.
16 | """
17 | loggers = (
18 | logging.getLogger(name)
19 | for name in logging.root.manager.loggerDict
20 | if name.startswith(prefix)
21 | )
22 | for log in loggers:
23 | log.handlers = []
24 |
25 |
26 | def _clear_default_loguru_handlers():
27 | logger.configure(handlers=[])
28 |
29 |
30 | def initialize_logging():
31 | """
32 | Initializes logging handlers and sinks. New sinks and handlers
33 | should be registered in this function.
34 | """
35 |
36 | # Uvicorn is set up with default loggers.
37 | # We override them here in order to control how, when, and where
38 | # uvicorn (and all other) logs are handled.
39 | _clear_default_logging_handlers(prefix='uvicorn.')
40 | _clear_default_loguru_handlers()
41 |
42 | # Intercept all uvicorn logs so we can process them as we see fit
43 | logging.getLogger("uvicorn").handlers = [LoggingIntercepter()]
44 |
45 | # All logs emitted by 1) the intercepter and 2) all loguru.logger.* method calls
46 | # will be sent to a loguru sink. Sinks are simply destinations for logging data.
47 | # By default, we add two sinks; one for sending logs to the console and to a file.
48 | # To send logs to a database, e.g. an Elasticsearch instance, simply add a custom
49 | # sink to send the data there.
50 | # See https://loguru.readthedocs.io/en/stable/api/logger.html for details.
51 | add_file_sink(logger)
52 | add_terminal_sink(logger)
53 |
54 | # Arbitrary sinks to process raw log records (for sending to log databases for example)
55 | # can be configured as such:
56 |
57 | # add_custom_sink(logger, lambda record: print(
58 | # f'Received raw log record: {record}'
59 | # ))
60 |
61 |
62 | def initialize_logging_middleware(app: FastAPI):
63 | app.middleware("http")(http_request_logging_middleware)
64 |
--------------------------------------------------------------------------------
/sentiment-analysis/utilities/logging/config.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi.applications import FastAPI
4 | from loguru import logger
5 | from utilities.logging.handlers import (LoggingIntercepter,
6 | http_request_logging_middleware)
7 | from utilities.logging.sinks import (add_custom_sink, add_file_sink,
8 | add_terminal_sink)
9 |
10 |
11 | def _clear_default_logging_handlers(prefix=''):
12 | """
13 | Clears the handlers for all existing loggers.
14 | Provide a logger prefix to limit the set of loggers
15 | to clear handlers for.
16 | """
17 | loggers = (
18 | logging.getLogger(name)
19 | for name in logging.root.manager.loggerDict
20 | if name.startswith(prefix)
21 | )
22 | for log in loggers:
23 | log.handlers = []
24 |
25 |
26 | def _clear_default_loguru_handlers():
27 | logger.configure(handlers=[])
28 |
29 |
30 | def initialize_logging():
31 | """
32 | Initializes logging handlers and sinks. New sinks and handlers
33 | should be registered in this function.
34 | """
35 |
36 | # Uvicorn is set up with default loggers.
37 | # We override them here in order to control how, when, and where
38 | # uvicorn (and all other) logs are handled.
39 | _clear_default_logging_handlers(prefix='uvicorn.')
40 | _clear_default_loguru_handlers()
41 |
42 | # Intercept all uvicorn logs so we can process them as we see fit
43 | logging.getLogger("uvicorn").handlers = [LoggingIntercepter()]
44 |
45 | # All logs emitted by 1) the intercepter and 2) all loguru.logger.* method calls
46 | # will be sent to a loguru sink. Sinks are simply destinations for logging data.
47 | # By default, we add two sinks; one for sending logs to the console and to a file.
48 | # To send logs to a database, e.g. an Elasticsearch instance, simply add a custom
49 | # sink to send the data there.
50 | # See https://loguru.readthedocs.io/en/stable/api/logger.html for details.
51 | add_file_sink(logger)
52 | add_terminal_sink(logger)
53 |
54 | # Arbitrary sinks to process raw log records (for sending to log databases for example)
55 | # can be configured as such:
56 |
57 | # add_custom_sink(logger, lambda record: print(
58 | # f'Received raw log record: {record}'
59 | # ))
60 |
61 |
62 | def initialize_logging_middleware(app: FastAPI):
63 | app.middleware("http")(http_request_logging_middleware)
64 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | # .env # Needed to run the APIs
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # pytype static type analyzer
135 | .pytype/
136 |
137 | # Cython debug symbols
138 | cython_debug/
139 |
140 | # macOS
141 | .DS_Store
142 |
143 | *.pickle
144 | *.log
145 |
146 | */.base
147 | */.mounts
148 | */.devcontainer
149 | */.vscode
150 | docker-compose.yml
--------------------------------------------------------------------------------
/sentiment-analysis/data/data.csv:
--------------------------------------------------------------------------------
1 | 5 Really happy with the quality! Works for paper background.. Wasn’t sure at the beginning as the stand looked very unstable and kind of “slim legged” .. but as I’ve assembled it I’m very happy and satisfied with the quality it’s solid and strong! Really worth buying ! :)
2 | 1 I purchased two of these via Amazon and suspect they are frauds.I have 1 3 year old ps4 controller and these 2 new ones. The old controller battery lasts 4x longer vs the new ones from amazon. And the USB charging port on the controller is snug on the original vs extremely loose falls off on the two new ones.These ones being sold via amazon are junk, but unfort cs not doing much to assist me at this times with workable solutions (I'm intl).You have all been warned!
3 | 3 It was okay. Poor packaging. I did make use of a couple things, mainly the floatation piece and screws. The quality of the pieces was just “okay”. In hindsight I may have just bought the pieces separately from the main brand.
4 | 1 These will not charge and have not worked since they arrived! Very disappointed.
5 | 4 Bought this because my ancient DVD player finally gave up. Wanted to upgrade to a blue ray player but the gf has a very large DVD collection and they are still much cheaper. Have watched at least 25 dvds so far and love the up-scaling. Noticed a larger difference in the newer dvds than the older but i expected that. Remote is well organized and the small size of the player is nice. Sleek and un assuming.
6 | 5 Too easy with the app. I’m an old IT guy.. you kids have no idea how good you have it. This was just ridiculous how easy it is to install. Bypassed the routing function on AT&T modem and set this up to mask the old SSID in like 10 minutes tops.
7 | 2 Touchscreen was not as responsive with it in, so I took it off after a day. Wish I worked better
8 | 2 I received this item and installed it about 20 days later, and at least one of the bearings are bad. Noisy rotation, especially at speed, with a grinding noise. Overall, this manufacturer is normally quite good, but this one's fan bearing quality leaves something to be desired.
9 | 4 I bought this about 3 months ago to primarily have a 2nd monitor for my laptop when I travel. I like that it is lightweight and actually has a wider screen than my laptop but still fits in my laptop backpack. TheRe are two downsides: one is the screen brightness is not as bright as I’d like (and i have it as bright as it will go and the second downside is the power cord that plugs into an outlet is not very long. I either have to use an extension cord or be right next to a wall plug. Suggestion to the manufacturer is to make the power cord a little longer. Other than that it works well and is great for travel!
10 | 3 These chargers seem to be fairly hit and miss. I ordered one pair about three years ago and they survived two full years of wear and tear, however I bought a replacement set in January of this year and both of them have already broken (it’s now April) so it seems like you never really know what you’re going to get with these. The length is perfect, although I am extremely disappointed with this last set. I am the kind of person that is not hard on my Chargers at all, I’ve been able to make them last 3+ years before. I don’t know what it is with this last batch, but there is no reason they should’ve given out on me after only three months of use. If I hadn’t already passed my 30 day refund capability, I would be returning these as they are NOT up to par.
11 |
--------------------------------------------------------------------------------
/robot-robbers/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Emily-specific files
3 | .emily
4 | .idea
5 | .vscode
6 | .devcontainer
7 | .jupyter
8 |
9 | # Generic Python files (Source: https://github.com/github/gitignore/blob/main/Python.gitignore @ 2022-03-23):
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 |
16 | # C extensions
17 | *.so
18 |
19 | # Distribution / packaging
20 | .Python
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | wheels/
33 | share/python-wheels/
34 | *.egg-info/
35 | .installed.cfg
36 | *.egg
37 | MANIFEST
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .nox/
53 | .coverage
54 | .coverage.*
55 | .cache
56 | nosetests.xml
57 | coverage.xml
58 | *.cover
59 | *.py,cover
60 | .hypothesis/
61 | .pytest_cache/
62 | cover/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 | local_settings.py
71 | db.sqlite3
72 | db.sqlite3-journal
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | .pybuilder/
86 | target/
87 |
88 | # Jupyter Notebook
89 | .ipynb_checkpoints
90 |
91 | # IPython
92 | profile_default/
93 | ipython_config.py
94 |
95 | # pyenv
96 | # For a library or package, you might want to ignore these files since the code is
97 | # intended to run in multiple environments; otherwise, check them in:
98 | # .python-version
99 |
100 | # pipenv
101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
104 | # install all needed dependencies.
105 | #Pipfile.lock
106 |
107 | # poetry
108 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
109 | # This is especially recommended for binary packages to ensure reproducibility, and is more
110 | # commonly ignored for libraries.
111 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
112 | #poetry.lock
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
--------------------------------------------------------------------------------
/pig-piglet-detection/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Emily-specific files
3 | .emily
4 | .idea
5 | .vscode
6 | .devcontainer
7 | .jupyter
8 |
9 | # Generic Python files (Source: https://github.com/github/gitignore/blob/main/Python.gitignore @ 2022-03-23):
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 |
16 | # C extensions
17 | *.so
18 |
19 | # Distribution / packaging
20 | .Python
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | wheels/
33 | share/python-wheels/
34 | *.egg-info/
35 | .installed.cfg
36 | *.egg
37 | MANIFEST
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .nox/
53 | .coverage
54 | .coverage.*
55 | .cache
56 | nosetests.xml
57 | coverage.xml
58 | *.cover
59 | *.py,cover
60 | .hypothesis/
61 | .pytest_cache/
62 | cover/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 | local_settings.py
71 | db.sqlite3
72 | db.sqlite3-journal
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | .pybuilder/
86 | target/
87 |
88 | # Jupyter Notebook
89 | .ipynb_checkpoints
90 |
91 | # IPython
92 | profile_default/
93 | ipython_config.py
94 |
95 | # pyenv
96 | # For a library or package, you might want to ignore these files since the code is
97 | # intended to run in multiple environments; otherwise, check them in:
98 | # .python-version
99 |
100 | # pipenv
101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
104 | # install all needed dependencies.
105 | #Pipfile.lock
106 |
107 | # poetry
108 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
109 | # This is especially recommended for binary packages to ensure reproducibility, and is more
110 | # commonly ignored for libraries.
111 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
112 | #poetry.lock
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
--------------------------------------------------------------------------------
/sentiment-analysis/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Emily-specific files
3 | .emily
4 | .idea
5 | .vscode
6 | .devcontainer
7 | .jupyter
8 |
9 | # Generic Python files (Source: https://github.com/github/gitignore/blob/main/Python.gitignore @ 2022-03-23):
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 |
16 | # C extensions
17 | *.so
18 |
19 | # Distribution / packaging
20 | .Python
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | wheels/
33 | share/python-wheels/
34 | *.egg-info/
35 | .installed.cfg
36 | *.egg
37 | MANIFEST
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .nox/
53 | .coverage
54 | .coverage.*
55 | .cache
56 | nosetests.xml
57 | coverage.xml
58 | *.cover
59 | *.py,cover
60 | .hypothesis/
61 | .pytest_cache/
62 | cover/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 | local_settings.py
71 | db.sqlite3
72 | db.sqlite3-journal
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | .pybuilder/
86 | target/
87 |
88 | # Jupyter Notebook
89 | .ipynb_checkpoints
90 |
91 | # IPython
92 | profile_default/
93 | ipython_config.py
94 |
95 | # pyenv
96 | # For a library or package, you might want to ignore these files since the code is
97 | # intended to run in multiple environments; otherwise, check them in:
98 | # .python-version
99 |
100 | # pipenv
101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
104 | # install all needed dependencies.
105 | #Pipfile.lock
106 |
107 | # poetry
108 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
109 | # This is especially recommended for binary packages to ensure reproducibility, and is more
110 | # commonly ignored for libraries.
111 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
112 | #poetry.lock
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
--------------------------------------------------------------------------------
/sentiment-analysis/README.md:
--------------------------------------------------------------------------------
1 | # Sentiment Analysis of Amazon Reviews
2 | In this use case, you will be receiving 1000 unique reviews of various eletronic equipments found on Amazon. Your task is to predict the number of stars for the given review. See the image below for an illustration of the concept.
3 |
4 |
5 |
6 |
7 |
8 | The stars given are in the interval [1-5]. You'll receive 1000 reviews as a list of reviews. You should return the ratings as a list of floats corresponding to a rating for each review. You can find some example data in the `data` folder.
9 |
10 | ## Evaluation
11 | During the week of competition, you will be able to validate your solution against a validation set. The best score your model achieves on the validation set will be displayed on the scoreboard.
12 |
13 | Your model will be evaluated on how close your predictions are to the actual ratings. To be exact, your score is measured as the distance between your prediction and the actual rating. An average error for all reviews is calculated and used as your score. i.e. the evaluation metric is mean absolute error.
14 | The score will be normalized in the interval 0.0 to 1.0 and flipped such that a low error grants the most points.
15 | The validation request timeouts after 30 seconds, so you need to make sure that your solution can handle 1000 reviews in under 30 seconds.
16 |
17 | Notice that you can only submit once! We encourage you to validate your code and API before you submit your final model. You can find the documentation of your API where you can try out your model and verify the prediction.
18 | The documentation is by default found at `0.0.0.0:4242/docs`, and then find the prediction endpoint for the use case.
19 |
20 |
21 | After evaluation, your final score will be provided. This score can be seen on the scoreboard shortly after.
22 |
23 | ## Getting started using Emily
24 | Once the repository is cloned, navigate to the folder using a terminal and type:
25 | ```
26 | emily open sentiment-analysis
27 | ```
28 | You will be prompted for selecting an application. For this use case, it might be beneficial to use a Natural Language Processing image, where you can select your prefered deep learning framework. Then select an editor of your choice to open the Emily template for the use case. A Docker container with a Python environment will be opened. Some content needs to be downloaded the first time a project is opened, this might take a bit of time. You can mount a folder with data to your project using the ```emily mount``` command.
29 |
30 | To take full advantage of Emily and the template, your code for prediction should go in api.py:
31 | ```python
32 | @app.post('/api/predict', response_model=SentimentAnalysisResponseDto)
33 | def predict(request: SentimentAnalysisRequestDto) -> SentimentAnalysisResponseDto:
34 |
35 | ratings = [random.randint(1, 5) for review in request.reviews]
36 |
37 | return SentimentAnalysisResponseDto(scores=ratings)
38 | ```
39 |
40 | You can add new packages to the Python environment by adding the names of the packages to requirements.txt and restarting the project, or by using pip install on a terminal within the container which will result in the package being installed temporarily i.e. it is not installed if the project is restarted. Click here to visit the Emily documentation.
41 |
42 | ## Submission
43 | When you are ready for submission, click here for instructions on how to deploy with Emily. Then, head over to the Submission Form and submit your model by providing the host address for your API and the API key we have provided to you. Make sure that you have tested your connection to the API before you submit!
44 |
--------------------------------------------------------------------------------
/pig-piglet-detection/README.md:
--------------------------------------------------------------------------------
1 | # Pig & Piglet Detection
2 | In this use case, your task is to detect pigs and piglets and separate them from each other.
3 |
4 | You will be receiving images of animals, where you need to predict the bounding box locations of the pigs and piglets. The predictions should contain the top left and bottom right normalized coordinates of the predicted bounding box, as well as an object class and a confidence score. **Pigs have object class 0 and piglets have object class 1.** The confidence score is a value in the range [0-1] referring to the certainty of the detection. See the image below for an illustation of the concept. The validation set contains 59 samples and the test set contains 180 samples.
5 |
6 |
7 |
8 |
9 |
10 | The images have a maximum width of 800 px. A single image can contain up to 33 objects. **You have 10 seconds to return your predictions for each image.**
11 | Samples with piglets will only contain what we deem a *visually clear piglet*. Thus, if it has been difficult to assess wheter an image contains a pig or a piglet, the image is removed. Such an example is given below.
12 |
13 |
14 |
15 |
16 | Two annotated sample images are found in the ```data``` folder. The annotations are based on the ```YOLO v1.1``` format.
17 |
18 | ## Evaluation
19 | During the week of the competition, you will be able to validate your solution against a validation set. The best score your model achieves on the validation set will be displayed on the scoreboard.
20 |
21 | Your model will be evaluated using the COCO mean Average Precission (COCO mAP). The score ranges from [0-1], with 1 being the highest score.
22 | The validation request timeouts after 10 seconds, so you need to make sure that your solution can handle an images in under 10 seconds.
23 |
24 | Notice that you can only submit once! We encourage you to validate your code and API before you submit your final model. You can find the documentation of your API where you can try out your model and verify the prediction.
25 | The documentation is by default found at `0.0.0.0:4242/docs`, and then find the prediction endpoint for the use case.
26 |
27 | After evaluation, your final score will be provided. This score can be seen on the scoreboard shortly after.
28 |
29 | ### Evaluation Metric
30 | We use the mean-average-precission package to evaluate you model. Specifically, we use:
31 | ```python
32 | from mean_average_precision import MetricBuilder
33 | metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=2)
34 | # add predictions
35 | print(f"COCO mAP: {metric_fn.value(iou_thresholds=np.arange(0.5, 1.0, 0.05), recall_thresholds=np.arange(0., 1.01, 0.01), mpolicy='soft')['mAP']}")
36 | ```
37 |
38 | ## Getting started using Emily
39 | Once the repository is cloned, navigate to the folder using a terminal and type:
40 | ```
41 | emily open pig-piglet-detection
42 | ```
43 | You will be prompted for selecting an application. Please note, that you need to select a Computer Vision image, if you want to use opencv. Afterwards you can select your prefered deep learning framework. Then select an editor of your choice to open the Emily template for the use case. A Docker container with a Python environment will be opened. Some content needs to be downloaded the first time a project is opened, this might take a bit of time. You can mount a folder with data to your project using the ```emily mount``` command.
44 |
45 | A dummy prediction endpoint has been created in ```router.py```. The prediction uses the DTOs from ```models/dtos.py```, to ensure that the request and response have the correct format. To take full advantage of Emily and the template, your code for prediction should go in here:
46 | ```python
47 | @router.post('/predict', response_model=PredictResponseDto)
48 | def predict_endpoint(request: PredictRequestDto):
49 | img: np.ndarray = decode_request(request)
50 |
51 | dummy_bounding_boxes = predict(img)
52 | response = PredictResponseDto(
53 | boxes=dummy_bounding_boxes
54 | )
55 |
56 | return response
57 | ```
58 | You can add new packages to the Python environment by adding the names of the packages to requirements.txt and restarting the project, or by using pip install on a terminal within the container which will result in the package being installed temporarily i.e. it is not installed if the project is restarted. Click here to visit the Emily documentation.
59 |
60 | ## Submission
61 | When you are ready for submission, click here for instructions on how to deploy with Emily. Then, head over to the Submission Form and submit your model by providing the host address for your API and the API key we have provided to you. Make sure that you have tested your connection to the API before you submit!
62 |
--------------------------------------------------------------------------------
/sentiment-analysis/static/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | Document
9 |
10 |
11 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
Your Emily API is running!
194 |
To begin, try some of the following:
195 |
196 |
197 | ↪️ Run curl http://{{host}}:{{port}}/api to test your API endpoint
198 |
3 | In this repository, you will find all the information needed to participate in the event. Please read the entire information before proceeding to the use cases, and please make sure to read the full description of every use case. You will be granted points for every use case that you provide a submission for and a total score will be calculated based on the individual submissions.
4 |
5 |
6 |
Use cases
7 | Below you can find the three use cases for the DM i AI 2022 event.
8 | Within each use case, you find a description together with a template that can be used to setup an API endpoint.
9 | The API endpoint will be used for submission and is required. Emily can help with setting up the API, but you should feel free to set them up on your own. The requirements for the API endpoints are specified in the respective use cases.
13 |
14 | Clone this GitHub repository to download Emily templates for all three use cases.
15 | ```
16 | git clone https://github.com/amboltio/DM-i-AI-2022.git
17 | ```
18 | Inside the DM-i-AI-2022 folder, you will find the three use cases. To open a use case with Emily type `emily open ` e.g. `emily open robot-robbers` to open the last use case.
19 |
20 |
Emily CLI
21 | The Emily CLI is built and maintained by Ambolt to help developers and teams implement and run production ready machine learning powered micro services fast and easy. Click here for getting started with Emily. If you sign up to Emily using your student email, you can get free access to the full Emily-CLI with Emily Academy.
22 | Emily can assist you with developing the required API endpoints for the use cases. Together with every use case a predefined and documented template is provided to ensure correct API endpoints and DTOs for the specific use case. You can find the documentation of the entire framework here.
23 | The use cases have been built on top of the FastAPI framework, and can be used to specify endpoints in every use case.
24 |
25 |
Discord Channel
26 | Come hang out and talk to other competitors of the event on our Discord channel. Discuss the use cases with each other or get in touch with any of the Ambolt staff, to solve issues or questions that may arise during the competition. Join here!
27 |
28 |
Getting started without emily
29 |
30 | You are not required to use Emily for competing in this event, however, we strongly recommend using Emily if you are not an expert in developing APIs and microservices. If you do not choose to use Emily, you should check the individual template and find the requirements for the different API endpoints. These have to be exactly the same for the evaluation service to work. Inside ```/models/dtos.py``` you can find information on the request and response DTOs, describing the input and output requirements for your API.
31 |
32 |
Submission
33 | When you are ready for submission, head over to the Submission Form and submit your solution for a use case by providing the host address for your API and the API key we have provided to you. Make sure that you have tested and validated your connection to the API before you submit!
34 | Click here for a guide on how to deploy your api with Emily (It is recommended that you go through the full getting started guide).
35 |
36 | **You can only submit once per use case.** We highly recommend that you validate your solution before submitting. You can do this on the submission form by using the `QUEUE VALIDATION ATTEMPT` button. You can validate as many times as you like, but you can only evaluate once per use case. When you queue validation, your score from the run will show up on the scoreboard, so you can see how you compare to the other teams.
37 |
38 | When you validate your solution on the submission form, it will be evaluated on a validation set. When you submit your solution and get the final score for that use case, your solution will be evaluated on a **test set which is different from the validation set**. This means that the score you obtained when validating your solution may be different from the score you get when evaluating. Therefore, we encourage you not to overfit to the validation set!
39 |
40 |
Ranked score and total score
41 | The scoreboard will display a score for each usecase and a "total score".
42 | The individual score reflects the placement your best model has achieved relative to the other participants' models.
43 |
44 | The total score is simply an average of your individual scores.
45 |
46 | This format also means that you can loose points / be overtaken by other teams during the week if they submit a model that is better than yours.
47 |
48 |
Deadline for submission
49 | The deadline for submission is Monday the 10th of October at 14:00.
50 |
51 |
Final evaluation
52 |
53 | Upon completion of the contest, the top 3 highest-ranking teams will be asked to submit their training code and the trained models for validation no later than Tuesday the 11th of October at 14:00 (24 hours after the deadline). The final ranking is announced Friday the 14th of October.
54 |
55 |
How to get a server for deployment?
56 | When you are doing the submission we are expecting you to host the server at which the REST API can be deployed. You can sign up to Azure for Students, where you will get free credits that you can use to create a virtual machine. We expect you all to be able to do this, since the competition is only for students. Alternatively, you can also deploy your submission locally (This requires a public IP).
57 | The following contains the necessary links for creating a virtual machine:
62 |
63 | Please make sure to get a server up and running early in the competition, and make sure to get connection to the evaluation service as quickly as possible, so if you have any server related issues, we can catch them early and not close to deadline!
64 |
65 |
66 |
What if I have already used my Azure student credits?
67 | If you have already used your credicts, reach out to us on either discord or on DMiAI@ambolt.io and we will help you out. However, we cannot provide you with GPU servers, so remember to design your solutions such that they can run inference within the time constraints specified for the independent use cases.
68 |
69 | **Please note, that we do not provide servers for training!** You are expected to train your models and solutions using your own hardware, Google Colab, etc.
70 |
71 |
72 |
Frequently Asked Questions
73 |
74 | **Q: Can I use a pretrained model I found on the internet?**
75 |
76 | **A:** Yes you are allowed to use a pretrained model for your task. If you can find a pretrained model fitting your purpose, you would save a lot of time, just like you would do if you were solving a problem for a company.
77 |
78 | **Q: Should we gather our own data?**
79 |
80 | **A:** Yes. You'll not be supplied with data from us. If you need any data to train a model, you should go gather the data yourself. We are not supplying data, as this might limit the creativity and the freedom of how you approach the use case.
81 |
82 | **Q: How do I use Emily to deploy my service?**
83 |
84 | **A:** Emily can help you with deployment of your service, in most cases you can get around deployment by typing `emily deploy `, you will be asked several questions guiding your towards deployment on your server. In this guide you can read more about how to get started using Emily.
85 |
--------------------------------------------------------------------------------
/robot-robbers/README.md:
--------------------------------------------------------------------------------
1 | # Robot Robbers
2 | In this use case, you should develop a system for playing robot robbers.
3 | The Robot Robbers game is an interactive 2D game where you, the player, control 5 robots trying to steal money from angry scrooges.
4 |
5 |
6 |
7 |
8 | The objective of this use case is to reach the highest possible reward in 2 wall-clock minutes. Balance the trade off between time and performance well!
9 |
10 | ## About the game
11 | Every game runs in a 128x128 grid environment. Every game is initialized with:
12 |
13 | 1. 5 robots (controlled by the player)
14 | 2. 7 scrooges (controlled by the game)
15 | 3. 5 cashbags
16 | 4. 3 dropspots
17 | 5. Between 2-5 obstacles, the height and width of which range between 1 and 20.
18 |
19 | All of these are randomly placed at every game start.
20 | You will recive the states at your prediction endpoint for every game tick.
21 |
22 | ### Controlling robots
23 |
24 |
25 |
26 | You have to control all 5 robots for every game tick. Every robot kan only move 1 step at a time in either horizontal, vertical or diagonal direction. Movement instructions has to be provided as delta `x`and `y`.
27 |
28 | For example:
29 | ```python
30 | moves = [
31 | 1, 1, # Move robot 0 one cell to the right, one cell down
32 | -1, -1, # Move robot 1 one cell to the left, one cell up
33 | 0, 0 # Make robot 2 stand still
34 | ]
35 | ```
36 |
37 | ### Cashbags & Dropspots
38 |
39 |
40 | and
41 |
42 |
43 | When a robot robber intersects with a cashbag, the robot picks it up. When a robot carrying cashbags intersects with a dropspot, the cashbags are deposited and a reward is provided.
44 |
45 | The reward of depositing cashbags increases exponentially by the number of cashbags carried, e.g.:
46 |
47 | 1. Carrying 1 cashbag -> reward of 1
48 | 2. Carrying 2 cashbags -> reward of 4
49 | 3. Carrying 3 cashbags -> reward of 9
50 |
51 | However, robots become burdened by carrying cashbags and they move slower the more they carry:
52 | 1. Robot speed (0 cashbags): 1 ticks / move
53 | 2. Robot speed (1 cashbag): 2 ticks / move
54 | 3. Robot speed (2 cashbags): 3 ticks / move
55 |
56 | The number of cashbags on the screen always remains the same. Cashbacks respawn when they are deposited or when they are taken away by scrooges.
57 |
58 |
59 | ### Scrooges
60 |
61 |
62 |
63 |
64 | The scrooges are the game antagonists. They will try their very best to keep the cashbags from being stolen.
65 |
66 | Initially, scrooges will move around randomly on the map.
67 |
68 | If a robot carrying cashbags intersects with a scrooge, the cashbags are taken away and the player receives a -3 reward penalty.
69 |
70 | If a robot is within a distance of 15 units of a scrooge, the scrooge will chase the robot until:
71 |
72 | * The scrooge reaches the robot, at which point the robot will not be chased again by any scrooge for 100 game ticks.
73 | * The robot comes out of range, at which point the scrooge will wander randomly again.
74 |
75 | Scrooges always move at the speed of 2 ticks / move.
76 |
77 | ## Rules
78 | * Robots and scrooges cannot move outside of the grid.
79 | * Robots and scrooges can only move one unit in either direction in a single game tick.
80 | * Robots and scrooges cannot move through obstacles.
81 |
82 | ## Interaction
83 | You'll recive a `RobotRobbersPredictRequestDto` which contain the following:
84 | ```python
85 | class RobotRobbersPredictRequestDto(BaseModel):
86 | state: List[List[List[int]]]
87 | reward: float
88 | is_terminal: bool
89 | total_reward: float
90 | game_ticks: int
91 | ```
92 | Where **state** is composed of the following:
93 | Given an observation matrix $M \in \mathbb{Z}^{6 \times 10 \times 4}$, the contents are as follows:
94 |
95 | 1. $M_{0}$ is an array of 4-d vectors containing the $x, y, w, h$ of all **robot robbers** ($w, h$ is always $1$).
96 | 2. $M_{1}$ is an array of 4-d vectors containing the $x, y, w, h$ of all **scrooges** ($w, h$ is always $1$).
97 | 3. $M_{2}$ is an array of 4-d vectors containing the $x, y, w, h$ of all **cashbags** ($w, h$ is always $1$).
98 | 4. $M_{3}$ is an array of 4-d vectors containing the $x, y, w, h$ of all **dropspots** ($w, h$ is always $1$).
99 | 5. $M_{4}$ is an array of 4-d vectors containing the $x, y, w, h$ of all **obstacles**.
100 | 6. $M_{5}$ is an array of 4-d vectors where the first element of the vector is the number of cashbags carried by the robot robber with the same index. The rest of the vector elements are always $0$.
101 | - For example, given the robber $i$ at $M_{0,i}$, the number of cashbags carried by this robber is $M_{5, i, 0}$.
102 |
103 | Each row of the observation matrix contains 10 4-d vectors, but not all vectors represent active game elements.
104 | Inactive game elements (e.g., the last 5 vectors in $M_{0}$) are represented by their position being placed outside the game grid `(-1, -1)`.
105 | > For example, the vector of an inactive cashbag (cashbags are inactive while being carried by a robot) will always be `(-1, -1, 1, 1)`.
106 |
107 | * **Reward** is when you gain points, this will be appearent in the reward.
108 | * **total_reward** is the total score for your current game.
109 | * **game_ticks** is the number of game tick currently running.
110 |
111 | From these information you should be able to predict your next move and return:
112 | ```python
113 | class RobotRobbersPredictResponseDto(BaseModel):
114 | moves: List[int]
115 | ```
116 | which is a 10-d vector of moves:
117 | moves = [ $Δ_{x,0}$, $Δ_{y,0}$, $Δ_{x,1}$, $Δ_{y,2}$, ... $Δ_{x,4}$, $Δ_{y,4}$ ]
118 | where, $Δ_{x,n}$ and $Δ_{y,n}$ are the change in x and y direction for robot n.
119 |
120 |
121 | ## Evaluation
122 | During the week of the competition, you will be able to validate your solution against a validation seed. The best score your model achieves on the validation seed will be displayed on the scoreboard. You'll have exactly 2 minutes to play the game for each attempt.
123 |
124 | Once you have developed your solution, you should submit it for evaluation.
125 |
126 | Notice that you can only submit once! We encourage you to validate your code and API before you submit your final model. You can find the documentation of your API where you can try out your model and verify the prediction.
127 | The documentation is by default found at `0.0.0.0:4242/docs`, and then find the prediction endpoint for the use case.
128 |
129 | After evaluation, your final score will be provided. This score can be seen on the scoreboard shortly after and cannot be changed.
130 |
131 | ## Getting started using Emily
132 | Once the repository is cloned, navigate to the folder using a terminal and type:
133 | ```
134 | emily open robot-robbers
135 | ```
136 | A Docker container with a Python environment will be opened. Some content needs to be downloaded the first time a project is opened, this might take a bit of time.
137 |
138 | A dummy response has been created in ```router.py```. To take full advantage of Emily and the template, your code for the moves should go in here:
139 | ```python
140 |
141 | @router.post('/predict', response_model=RobotRobbersPredictResponseDto)
142 | def predict(request: RobotRobbersPredictRequestDto):
143 | # robots = [(x, y, w, h) for (x, y, w, h) in request.state[0] if x >= 0 and y >= 0]
144 | # scrooges = [(x, y, w, h) for (x, y, w, h) in request.state[1] if x >= 0 and y >= 0]
145 | # cashbags = [(x, y, w, h) for (x, y, w, h) in request.state[2] if x >= 0 and y >= 0]
146 | # dropspots = [(x, y, w, h) for (x, y, w, h) in request.state[3] if x >= 0 and y >= 0]
147 | # obstacles = request.state[4]
148 |
149 | # Your moves go here!
150 | n_robbers = 5
151 | moves = [np.random.randint(-1, 2) for _ in range(n_robbers * 2)]
152 |
153 | return RobotRobbersPredictResponseDto(
154 | moves=moves
155 | )
156 | ```
157 | You can add new packages to the Python environment by adding the names of the packages to requirements.txt and restarting the project, or by using pip install on a terminal within the container which will result in the package being installed temporarily i.e. it is not installed if the project is restarted. Click here to visit the Emily documentation.
158 |
159 | ## Run the game locally
160 | To better explore the game's behavior, you can run the game locally.
161 |
162 | ### Virtual Environment
163 | Start by setting up the environment, for example using either venv or conda:
164 | ```shell
165 | python -m venv .venv
166 | source .venv/bin/activate # bash
167 | .venv\Scripts\activate.bat # cmd
168 | .venv\Scripts\Activate.ps1 # PowerShell
169 | ```
170 |
171 | ```shell
172 | conda create -n robot_robbers
173 | conda activate robot_robbers
174 | ```
175 |
176 | ### Install dependencies
177 | ```shell
178 | pip install -r requirements.txt
179 | ```
180 |
181 | ### Experiment locally
182 | You can either play the game, using the `run_game.py` script, or create your own script for local experiments.
183 |
184 | To create a custom script, start by initializing of the environment:
185 | ```python
186 | from game.environment import RobotRobbersEnv
187 |
188 | env = RobotRobbersEnv()
189 | state = env.reset()
190 | ```
191 | You can then move your robots and recieve the new state:
192 | ```python
193 | while True:
194 | moves = [randint(-1,1) for _ in range(env.n_robbers * 2)]
195 | state, reward, is_done, info = env.step(moves)
196 | ```
197 | You can also view and alter the environment itself, but be aware of the bias you are introducing.
198 | ```python
199 | env.max_n_scrooges = 1
200 | env.max_n_dropspots = 1
201 | env.max_n_cashbags = 1
202 | env.max_n_dropspots = 1
203 | env.max_n_obstacles = 0
204 | env.n_robbers = 1
205 | state = env.reset()
206 | ```
207 |
208 | ## Submission
209 | When you are ready for submission, click here for instructions on how to deploy. Then, head over to the Submission Form and submit your model by providing the host address for your API and the API key we have provided to you. Make sure that you have tested your connection to the API before you submit!
210 |
--------------------------------------------------------------------------------
/robot-robbers/game/environment.py:
--------------------------------------------------------------------------------
1 | import gym
2 | import numpy as np
3 | from gym import spaces
4 |
5 | from os import path
6 |
7 | legal_moves = set((-1, 1, 0))
8 |
9 |
10 | class RobotRobbersEnv(gym.Env):
11 | def __init__(self) -> None:
12 | super(RobotRobbersEnv, self).__init__()
13 |
14 | # Game settings
15 | self.width = 128
16 | self.height = 128
17 |
18 | self.n_element_types = 5
19 | self.max_n_elements_per_type = 10
20 | self.observation_shape = (
21 | self.n_element_types + 1,
22 | self.max_n_elements_per_type,
23 | 4
24 | )
25 |
26 | self.n_robbers = 5
27 | self.max_n_scrooges = 7
28 | self.max_n_cashbags = 5
29 | self.max_n_dropspots = 3
30 | self.max_n_obstacles = 5
31 | self._max_obstacle_size = 20
32 | self._max_carry_capacity = 4
33 | self._reward_multiplier = 2
34 | self._scrooge_radius = 15
35 | self._scrooges_move_interval = 2
36 | self._robber_cooldown_ticks = 100
37 |
38 | # Observation/action spaces
39 | self.action_space = spaces.Box(
40 | low=-1, high=1, shape=(self.n_robbers * 2,), dtype=np.int8)
41 | self.observation_space = spaces.Box(
42 | low=-1,
43 | high=self.height,
44 | shape=self.observation_shape,
45 | dtype=np.int16
46 | )
47 |
48 | # Empty world state
49 | self._robber_positions = np.ones(
50 | (self.n_robbers, 2), dtype=np.int16) * -1
51 | self._scrooge_positions = np.ones(
52 | (self.max_n_scrooges, 2), dtype=np.int16) * -1
53 | self._cashbag_positions = np.ones(
54 | (self.max_n_cashbags, 2), dtype=np.int16) * -1
55 | self._dropspot_positions = np.ones(
56 | (self.max_n_dropspots, 2), dtype=np.int16) * -1
57 | self._obstacles = np.ones(
58 | (self.max_n_obstacles, 4), dtype=np.int16) * -1
59 | self._cashbag_carriers = np.zeros((self.n_robbers, ), dtype=np.int16)
60 |
61 | self._robber_cooldown = np.zeros((self.n_robbers, ), dtype=np.int16)
62 |
63 | self._reward = 0
64 | self._total_reward = 0
65 | self._n_cashbags = 0
66 | self._n_dropspots = 0
67 | self._n_scrooges = 0
68 | self._n_obstacles = 0
69 | self._game_ticks = 0
70 |
71 | # Rendering
72 | self.scaling = 8
73 | self.clock = None
74 | self.screen = None
75 | self.surface = None
76 | self.gfxdraw = None
77 | self.font = None
78 |
79 | self.robot_sprite = None
80 | self.scrooge_sprite = None
81 | self.cashbag_sprite = None
82 | self.dropspot_sprite = None
83 |
84 | # Seeding
85 | self.random = None
86 |
87 | def reset(self, seed=None) -> tuple:
88 |
89 | self.seed(seed)
90 | self.random = np.random.RandomState(seed)
91 |
92 | self._n_scrooges = self.max_n_scrooges
93 | self._n_obstacles = self.random.randint(2, self.max_n_obstacles)
94 |
95 | for i in range(self._n_obstacles):
96 | x, y = self._get_free_cell()
97 | w, h = self.random.randint(1, self._max_obstacle_size), self.random.randint(
98 | 1, self._max_obstacle_size)
99 | self._obstacles[i, :] = (x, y, w, h)
100 |
101 | for i in range(self._n_scrooges):
102 | self._scrooge_positions[i, :] = self._get_free_cell()
103 |
104 | for i in range(self.n_robbers):
105 | self._robber_positions[i, :] = self._get_free_cell()
106 |
107 | return self._get_observation()
108 |
109 | def step(self, actions) -> tuple:
110 | assert self.action_space.contains(
111 | actions), f'Move instructions must be pairs of {{-1, 0, 1}} but was {actions}'
112 | self._move_robbers(actions)
113 |
114 | # Check if any robbers are in the same location as cashbags
115 | for i in range(self.n_robbers):
116 | rx, ry = self._robber_positions[i]
117 | for j in range(self.max_n_cashbags):
118 | cx, cy = self._cashbag_positions[j]
119 |
120 | if rx == cx and ry == cy:
121 | self._n_cashbags -= 1
122 | self._cashbag_carriers[i] += 1
123 | self._cashbag_positions[j] = -1, -1
124 |
125 | # Check if any robbers are in the same location as dropspots
126 | for dx, dy in self._dropspot_positions:
127 | for i in range(self.n_robbers):
128 | rx, ry = self._robber_positions[i]
129 |
130 | if rx == dx and ry == dy:
131 | self._reward += self._cashbag_carriers[i] ** self._reward_multiplier
132 | self._cashbag_carriers[i] = 0
133 |
134 | # Check if any robbers are in the same location as scrooges
135 | for sx, sy in self._scrooge_positions:
136 | for i in range(self.n_robbers):
137 | rx, ry = self._robber_positions[i]
138 | has_cashbags = self._cashbag_carriers[i] > 0
139 |
140 | # Give a penalty and take away the cashbag
141 | if rx == sx and ry == sy and self._robber_cooldown[i] <= 0:
142 | self._robber_cooldown[i] = self._robber_cooldown_ticks
143 |
144 | if has_cashbags:
145 | self._reward -= 3
146 | self._cashbag_carriers[i] = 0
147 |
148 | # Move scrooges
149 | self._move_scrooges()
150 |
151 | n_cashbags_on_screen = int(self._n_cashbags + self._cashbag_carriers.sum())
152 |
153 | if n_cashbags_on_screen < self.max_n_cashbags:
154 | for ci in range(self.max_n_cashbags):
155 | cix, ciy = self._cashbag_positions[ci]
156 | if cix == -1:
157 | self._cashbag_positions[ci, :] = self._get_free_cell()
158 | self._n_cashbags += 1
159 |
160 | if self._n_dropspots < self.max_n_dropspots:
161 | self._dropspot_positions[self._n_dropspots, :] = self._get_free_cell()
162 | self._n_dropspots += 1
163 |
164 | episode_reward = self._reward
165 | self._total_reward += episode_reward
166 | self._reward = 0
167 | self._game_ticks += 1
168 |
169 | for i in range(self.n_robbers):
170 | self._robber_cooldown[i] -= 1
171 |
172 | return (
173 | self._get_observation(),
174 | episode_reward,
175 | False, # Never terminate
176 | {
177 | "total_reward": self._total_reward,
178 | "game_ticks": self._game_ticks
179 | }
180 | )
181 |
182 | def render(self, mode=None) -> None:
183 | try:
184 | import pygame
185 | from pygame import gfxdraw
186 | self.gfxdraw = gfxdraw
187 | self.pygame = pygame
188 | except ImportError:
189 | raise Exception("Please install pygame to use the render method")
190 |
191 | if self.screen is None:
192 | pygame.init()
193 | pygame.display.init()
194 | self.screen = pygame.display.set_mode((
195 | self.width * self.scaling,
196 | self.height * self.scaling
197 | ))
198 |
199 | self.robot_sprite = pygame.transform.scale(
200 | pygame.image.load(path.join('sprites', 'robot.png')),
201 | (self.scaling * 2, self.scaling * 2)
202 | )
203 |
204 | self.scrooge_sprite = pygame.transform.scale(
205 | pygame.image.load(path.join('sprites', 'scrooge.png')),
206 | (self.scaling * 2, self.scaling * 2)
207 | )
208 |
209 | self.cashbag_sprite = pygame.transform.scale(
210 | pygame.image.load(path.join('sprites', 'cashbag.png')),
211 | (self.scaling * 1.4, self.scaling * 1.4)
212 | )
213 |
214 | self.dropspot_sprite = pygame.transform.scale(
215 | pygame.image.load(path.join('sprites', 'dropspot.png')),
216 | (self.scaling * 2, self.scaling * 2)
217 | )
218 |
219 | if self.font is None:
220 | pygame.font.init()
221 | # Any PRs to change this will be rejected
222 | self.font = pygame.font.SysFont('Comic Sans MS', 30)
223 |
224 | if self.clock is None:
225 | self.clock = pygame.time.Clock()
226 |
227 | self.surface = pygame.Surface(
228 | (self.width * self.scaling, self.height * self.scaling))
229 | self.surface.fill((50, 50, 50))
230 |
231 | for i in range(self.max_n_obstacles):
232 | self._render_obstacle(i)
233 |
234 | for i in range(self.max_n_scrooges):
235 | self._render_scrooge(i)
236 |
237 | for i in range(self.n_robbers):
238 | self._render_robber(i)
239 |
240 | for i in range(self.max_n_cashbags):
241 | self._render_cashbag(i)
242 |
243 | for i in range(self.max_n_dropspots):
244 | self._render_dropspot(i)
245 |
246 | self.screen.blit(self.surface, (0, 0))
247 |
248 | # Draw the reward and game time in the top left of screen
249 | reward_txt = self.font.render(
250 | f'Reward: {self._total_reward}', False, (0, 0, 0))
251 | time_txt = self.font.render(
252 | f'Game ticks: {str(self._game_ticks)}', False, (0, 0, 0))
253 |
254 | self.screen.blit(reward_txt, (30, 30))
255 | self.screen.blit(time_txt, (30, 60))
256 |
257 | pygame.event.pump()
258 | self.clock.tick(60)
259 | pygame.display.flip()
260 |
261 | def _is_cell_free(self, cx, cy):
262 | # Out of bounds
263 | if cx < 0 or cx >= self.width or cy < 0 or cy >= self.height:
264 | return False
265 |
266 | # Inside obstacle
267 | for x, y, w, h in self._obstacles:
268 | if cx >= x and cx <= x + w and cy >= y and cy <= y + h:
269 | return False
270 |
271 | return True
272 |
273 | def _get_free_cell(self):
274 | x = self.random.randint(0, self.width)
275 | y = self.random.randint(0, self.height)
276 |
277 | while not self._is_cell_free(x, y):
278 | x = self.random.randint(0, self.width)
279 | y = self.random.randint(0, self.height)
280 |
281 | return x, y
282 |
283 | def _get_observation(self):
284 | observation = np.ones(self.observation_shape, dtype=np.int16) * -1
285 |
286 | for i, (x, y) in enumerate(self._robber_positions):
287 | observation[0, i, :] = [x, y, 1, 1]
288 |
289 | for i, (x, y) in enumerate(self._scrooge_positions):
290 | observation[1, i, :] = [x, y, 1, 1]
291 |
292 | for i, (x, y) in enumerate(self._cashbag_positions):
293 | observation[2, i, :] = [x, y, 1, 1]
294 |
295 | for i, (x, y) in enumerate(self._dropspot_positions):
296 | observation[3, i, :] = [x, y, 1, 1]
297 |
298 | for i, (x, y, w, h) in enumerate(self._obstacles):
299 | observation[4, i, :] = [x, y, w, h]
300 |
301 | for i, c in enumerate(self._cashbag_carriers):
302 | observation[5, i, :] = [c, 0, 0, 0]
303 |
304 | return observation
305 |
306 | def _render_obstacle(self, idx):
307 | x, y, w, h = self._obstacles[idx]
308 | gx = x * self.scaling
309 | gy = y * self.scaling
310 | gw = (w + 1) * self.scaling
311 | gh = (h + 1) * self.scaling
312 |
313 | self.gfxdraw.filled_polygon(
314 | self.surface,
315 | [(gx, gy), (gx + gw, gy), (gx + gw, gy + gh), (gx, gy + gh)],
316 | (0, 0, 0)
317 | )
318 |
319 | def _render_scrooge(self, idx):
320 | x, y = self._scrooge_positions[idx]
321 | gx = (x * self.scaling) - self.scaling * .5
322 | gy = (y * self.scaling) - self.scaling * .5
323 |
324 | self.surface.blit(self.scrooge_sprite, (gx, gy))
325 |
326 | def _render_robber(self, idx):
327 | x, y = self._robber_positions[idx]
328 | gx = (x * self.scaling) - self.scaling * .5
329 | gy = (y * self.scaling) - self.scaling * .5
330 |
331 | self.surface.blit(self.robot_sprite, (gx, gy))
332 |
333 | for i in range(self._cashbag_carriers[idx]):
334 | robber_cx, robber_cy = (
335 | gx + self.scaling // 2, gy + self.scaling // 2)
336 | w = i + 1
337 | self.gfxdraw.filled_polygon(
338 | self.surface,
339 | [(robber_cx - w, robber_cy - w), (robber_cx + w, robber_cy - w),
340 | (robber_cx + w, robber_cy + w), (robber_cx - w, robber_cy + w)],
341 | (255, 255, 0)
342 | )
343 |
344 | def _render_cashbag(self, idx):
345 | x, y = self._cashbag_positions[idx]
346 | gx = (x * self.scaling) - self.scaling * .2
347 | gy = (y * self.scaling) - self.scaling * .2
348 |
349 | self.surface.blit(self.cashbag_sprite, (gx, gy))
350 |
351 | def _render_dropspot(self, idx):
352 | x, y = self._dropspot_positions[idx]
353 | gx = (x * self.scaling) - self.scaling * .5
354 | gy = (y * self.scaling) - self.scaling * .5
355 |
356 | self.surface.blit(self.dropspot_sprite, (gx, gy))
357 |
358 | def _dist(self, x1, y1, x2, y2):
359 | return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
360 |
361 | def _move_robbers(self, actions):
362 | for i in range(self.n_robbers):
363 |
364 | wait_ticks = self._cashbag_carriers[i] + 1
365 | if not self._game_ticks % wait_ticks == 0:
366 | continue
367 |
368 | dx, dy = actions[i * 2], actions[i * 2 + 1]
369 | x, y = self._robber_positions[i]
370 | x += dx
371 | y += dy
372 |
373 | if not self._is_cell_free(x, y):
374 | continue
375 |
376 | self._robber_positions[i] = x, y
377 |
378 | def _move_scrooges(self):
379 | if not self._game_ticks % self._scrooges_move_interval == 0:
380 | return
381 |
382 | for i in range(self._n_scrooges):
383 |
384 | x, y = self._scrooge_positions[i]
385 |
386 | distances = [
387 | self._dist(x, y, *self._robber_positions[idx])
388 | if self._robber_cooldown[idx] <= 0 else np.inf
389 | for idx in range(self.n_robbers)
390 | ]
391 |
392 | if len(distances) == 0:
393 | # Move randomly
394 | dx, dy = self.random.randint(-1, 2), self.random.randint(-1, 2)
395 | x += dx
396 | y += dy
397 |
398 | self._scrooge_positions[i] = x, y
399 |
400 | continue
401 |
402 | closest_robber = np.argmin(distances)
403 | if distances[closest_robber] < self._scrooge_radius:
404 | # Move towards the closest robber if within radius
405 | rx, ry = self._robber_positions[closest_robber]
406 | dx, dy = rx - x, ry - y
407 | x += 1 if dx > 0 else -1 if dx < 0 else 0
408 | y += 1 if dy > 0 else -1 if dy < 0 else 0
409 |
410 | else:
411 | # Move randomly
412 | dx, dy = self.random.randint(-1, 2), self.random.randint(-1, 2)
413 | x += dx
414 | y += dy
415 |
416 | if not self._is_cell_free(x, y):
417 | continue
418 |
419 | self._scrooge_positions[i] = x, y
420 |
--------------------------------------------------------------------------------