├── .flake8 ├── .github └── workflows │ └── docker-publish.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── Pipfile ├── Pipfile.lock ├── README.md ├── brave.py ├── brave ├── __init__.py ├── abstract_collection.py ├── api │ ├── __init__.py │ ├── route_handler.py │ └── websockets_handler.py ├── config.py ├── config_file.py ├── connections │ ├── __init__.py │ ├── connection.py │ ├── connection_to_mixer.py │ └── connection_to_output.py ├── exceptions.py ├── helpers.py ├── inputoutputoverlay.py ├── inputs │ ├── __init__.py │ ├── decklink.py │ ├── html.py │ ├── image.py │ ├── input.py │ ├── streamlink.py │ ├── tcp_client.py │ ├── test_audio.py │ ├── test_video.py │ ├── uri.py │ └── youtubedl.py ├── mixers │ ├── __init__.py │ └── mixer.py ├── outputs │ ├── __init__.py │ ├── file.py │ ├── image.py │ ├── kvs.py │ ├── local.py │ ├── output.py │ ├── rtmp.py │ ├── tcp.py │ └── webrtc.py ├── overlays │ ├── __init__.py │ ├── clock.py │ ├── effect.py │ ├── overlay.py │ └── text.py ├── pipeline_messaging.py └── session.py ├── config ├── default.yaml ├── empty.yaml ├── example_empty.yaml ├── example_four_squares.yaml ├── example_test_sounds.yaml └── video_wall.yaml ├── docker-compose.yaml ├── docs ├── api.md ├── assets │ ├── arch.png │ ├── blocks_example.png │ ├── brave-screenshot.png │ ├── screenshot.png │ └── video_wall.png ├── building_webrender.md ├── config_file.md ├── faq.md ├── howto.md ├── inputs.md ├── install_centos7.md ├── install_kvs.md ├── install_macos.md ├── install_ubuntu.md ├── mixers.md ├── outputs.md ├── overlays.md └── plugins_used.md ├── gst-WebRenderSrc ├── .gitignore ├── CMakeLists.txt ├── cmake │ └── DownloadCEF.cmake └── src │ ├── CMakeLists.txt │ ├── cef │ ├── Browser.cpp │ ├── Browser.h │ ├── Client.cpp │ └── Client.h │ ├── cef_bridge.cpp │ ├── cef_bridge.h │ ├── cef_subprocess.cpp │ ├── gstwebrendersrc.c │ └── gstwebrendersrc.h ├── public ├── elements_table.html ├── index.html ├── js │ ├── components.js │ ├── index.js │ ├── inputs.js │ ├── mixers.js │ ├── outputs.js │ ├── overlays.js │ ├── preview.js │ ├── webrtc.js │ └── websocket.js └── style.css ├── pytest.ini ├── tests ├── assets │ ├── 2_second_audio.m4a │ ├── 2_second_audio.mp3 │ ├── 2_second_video.mp4 │ ├── 5_second_audio.m4a │ ├── 5_second_audio.mp3 │ ├── 5_second_video.mp4 │ └── image_640_360.png ├── test_add_and_remove_from_mix.py ├── test_api.py ├── test_config_file.py ├── test_config_file_output.py ├── test_effect_overlay.py ├── test_elements_api_endpoint.py ├── test_file_output.py ├── test_image_input.py ├── test_image_output.py ├── test_initial_state.py ├── test_inputs.py ├── test_local_output.py ├── test_mixer_positioning.py ├── test_mixer_to_mixer.py ├── test_mixers.py ├── test_multiple_braves.py ├── test_multiple_inputs_and_mixers.py ├── test_multiple_outputs.py ├── test_outputs.py ├── test_outputs_with_input_source.py ├── test_outputs_with_no_source.py ├── test_overlays.py ├── test_overlays_on_multiple_mixers.py ├── test_restart.py ├── test_rtmp.py ├── test_seek.py ├── test_tcp.py ├── test_text_overlay_element_connects_successfully.py ├── test_uri_input.py ├── unit │ ├── test_config.py │ └── test_connection_collection.py └── utils.py ├── tox.ini └── youtubedltest.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E402 3 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | push: 5 | # Publish `master` as Docker `latest` image. 6 | branches: 7 | - master 8 | 9 | # Publish `v1.2.3` tags as releases. 10 | tags: 11 | - v* 12 | 13 | # Run tests for any PRs. 14 | pull_request: 15 | 16 | env: 17 | IMAGE_NAME: bitwavetv/brave 18 | 19 | jobs: 20 | # Run tests. 21 | # See also https://docs.docker.com/docker-hub/builds/automated-testing/ 22 | test: 23 | runs-on: ubuntu-latest 24 | 25 | steps: 26 | - uses: actions/checkout@v2 27 | 28 | - name: Run tests 29 | run: | 30 | if [ -f docker-compose.test.yml ]; then 31 | docker-compose --file docker-compose.test.yml build 32 | docker-compose --file docker-compose.test.yml run sut 33 | else 34 | docker build . --file Dockerfile 35 | fi 36 | 37 | # Push image to GitHub Packages. 38 | # See also https://docs.docker.com/docker-hub/builds/ 39 | push: 40 | # Ensure test job passes before pushing image. 41 | needs: test 42 | 43 | runs-on: ubuntu-latest 44 | if: github.event_name == 'push' 45 | 46 | steps: 47 | - uses: actions/checkout@v2 48 | 49 | - name: Build image 50 | run: docker build . --file Dockerfile --tag $IMAGE_NAME 51 | 52 | - name: Log into registry 53 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin 54 | 55 | - name: Push image 56 | run: | 57 | IMAGE_ID=docker.pkg.github.com/${{ github.repository }}/$IMAGE_NAME 58 | 59 | # Change all uppercase to lowercase 60 | IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') 61 | 62 | # Strip git ref prefix from version 63 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 64 | 65 | # Strip "v" prefix from tag name 66 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') 67 | 68 | # Use Docker `latest` tag convention 69 | [ "$VERSION" == "master" ] && VERSION=latest 70 | 71 | echo IMAGE_ID=$IMAGE_ID 72 | echo VERSION=$VERSION 73 | 74 | docker tag $IMAGE_NAME $IMAGE_ID:$VERSION 75 | docker push $IMAGE_ID:$VERSION 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | #vscode 104 | .vscode/ 105 | 106 | #phpstorm 107 | .idea 108 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:19.10 2 | # FROM amd64/debian:stretch-slim 3 | 4 | ENV LC_ALL=C.UTF-8 5 | ENV LANG=C.UTF-8 6 | 7 | RUN apt-get update && \ 8 | apt-get install -y \ 9 | build-essential \ 10 | gcc \ 11 | git \ 12 | libtool \ 13 | automake \ 14 | make \ 15 | cmake \ 16 | gtk-doc-tools \ 17 | libffi6 libffi-dev \ 18 | gobject-introspection \ 19 | libgstreamer1.0-dev \ 20 | libgstreamer-plugins-base1.0-dev \ 21 | gstreamer1.0-libav \ 22 | gstreamer1.0-nice \ 23 | gstreamer1.0-plugins-base \ 24 | gstreamer1.0-plugins-good \ 25 | gstreamer1.0-plugins-bad \ 26 | gstreamer1.0-plugins-ugly \ 27 | gstreamer1.0-tools \ 28 | gir1.2-gst-plugins-bad-1.0 \ 29 | libcairo2-dev \ 30 | libgirepository1.0-dev \ 31 | pkg-config \ 32 | python3-dev \ 33 | python3-wheel \ 34 | python3-gst-1.0 \ 35 | python3-pip \ 36 | python3-gi \ 37 | python3-websockets \ 38 | python3-psutil \ 39 | python3-uvloop 40 | 41 | RUN pip3 install pipenv sanic 42 | 43 | COPY . /src 44 | WORKDIR /src 45 | 46 | RUN cd gst-WebRenderSrc && \ 47 | cmake -DCMAKE_BUILD_TYPE=Release . && \ 48 | CC=clang CXX=clang++ make -stdlib=g++ && \ 49 | make install 50 | 51 | RUN git clone --depth 1 https://github.com/RidgeRun/gst-interpipe.git && \ 52 | cd gst-interpipe && \ 53 | ./autogen.sh --libdir /usr/lib/x86_64-linux-gnu/gstreamer-1.0/ && \ 54 | make && \ 55 | make install 56 | 57 | RUN pipenv install && \ 58 | mkdir -p /usr/local/share/brave/output_images/ 59 | 60 | #ARG BRAVE_REPO=bitwave-tv 61 | #RUN git clone --depth 1 https://github.com/${BRAVE_REPO}/brave.git && \ 62 | # cd brave && \ 63 | # pip3 install pipenv sanic && \ 64 | # pipenv install && \ 65 | # mkdir -p /usr/local/share/brave/output_images/ 66 | 67 | EXPOSE 5000 68 | 69 | CMD ["pipenv", "run", "/src/brave.py", "-c", "/config/config.yaml"] 70 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .phoney: usage 2 | 3 | proxy=http://10.118.66.5:80 4 | tag=bbc/brave:latest 5 | 6 | usage: 7 | @echo Usage 8 | @echo 9 | @echo make bor - Build on Reith 10 | @echo make run - Run Brave 11 | @echo make bash - Run with bash 12 | 13 | 14 | bor: 15 | docker build --build-arg http_proxy=${proxy} --build-arg https_proxy=${proxy} -t ${tag} . 16 | 17 | run: 18 | docker run --name brave --rm -t -i -p 5000:5000 ${tag} 19 | 20 | bash: 21 | docker run --name brave --rm -t -i -p 5000:5000 ${tag} /bin/bash 22 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | setproctitle = "*" 8 | pyyaml = "*" 9 | gbulb = "*" 10 | sanic = "*" 11 | websockets = "*" 12 | pytest = "*" 13 | pillow = "*" 14 | pygobject = "*" 15 | vext = "*" 16 | psutil = "*" 17 | gobject = "*" 18 | #streamlink = {git = "https://github.com/streamlink/streamlink.git"} 19 | streamlink = "*" 20 | youtube-dl = "*" 21 | cef = "*" 22 | 23 | [dev-packages] 24 | 25 | [requires] 26 | python_version = "3" 27 | -------------------------------------------------------------------------------- /brave.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | Runs Brave as a daemon with RestAPI interface 4 | ''' 5 | 6 | # "r_frame_rate":"47/1" trying to track down the issue with the stuttering 7 | 8 | import sys 9 | import threading 10 | import signal 11 | import argparse 12 | import brave.session 13 | from gi.repository import Gst 14 | assert sys.version_info >= (3, 6) 15 | import brave.api 16 | import brave.config 17 | from brave.helpers import run_on_master_thread_when_idle 18 | import brave.exceptions 19 | 20 | 21 | import setproctitle 22 | setproctitle.setproctitle('brave') 23 | 24 | def setup_args(): 25 | parser = argparse.ArgumentParser(description='Basic Remote AV Editor') 26 | parser.add_argument('-c', nargs=1, metavar='', 27 | help='path to config file') 28 | return vars(parser.parse_args()) 29 | 30 | 31 | def setup_config(args): 32 | if ('c' in args and args['c'] is not None): 33 | brave.config.init(args['c'][0]) 34 | else: 35 | brave.config.init() 36 | 37 | 38 | def check_gstreamer_plugins(): 39 | needed = ['opus', 'vpx', 'nice', 'webrtc', 'dtls', 'x264', 'srtp', 40 | 'multifile', 'tcp', 'rtmp', 'rtpmanager', 'videotestsrc', 'audiotestsrc'] 41 | missing = list(filter(lambda p: Gst.Registry.get().find_plugin(p) is None, needed)) 42 | if len(missing): 43 | print('Missing gstreamer plugins:', missing) 44 | return False 45 | return True 46 | 47 | 48 | def start_brave(): 49 | session = brave.session.init() 50 | 51 | def start_rest_api_in_separate_thread(): 52 | try: 53 | brave.api.RestApi(session) 54 | except Exception as e: 55 | print('Cannot start Rest API:', e) 56 | run_on_master_thread_when_idle(session.end) 57 | 58 | threading.Thread(target=start_rest_api_in_separate_thread, name='api-thread', daemon=True).start() 59 | 60 | def keyboard_exit(signal, frame): 61 | print("Received keyboard interrupt to exit, so tidying up...") 62 | session.end() 63 | 64 | signal.signal(signal.SIGINT, keyboard_exit) 65 | session.start() 66 | 67 | 68 | if __name__ == '__main__': 69 | Gst.init(None) 70 | if not check_gstreamer_plugins(): 71 | sys.exit(1) 72 | args = setup_args() 73 | try: 74 | setup_config(args) 75 | start_brave() 76 | except brave.exceptions.InvalidConfiguration as e: 77 | print('Invalid configuration: %s' % e) 78 | sys.exit(1) 79 | except brave.exceptions.PipelineFailure as e: 80 | print('Failed to create GStreamer pipeline: %s' % e) 81 | sys.exit(1) 82 | -------------------------------------------------------------------------------- /brave/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/brave/__init__.py -------------------------------------------------------------------------------- /brave/abstract_collection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Abstract superclass of InputCollection, OutputCollection, MixerCollection, OverlayCollection 3 | ''' 4 | import collections.abc 5 | from brave.helpers import get_pipeline_details 6 | 7 | 8 | class AbstractCollection(collections.abc.MutableMapping): 9 | def __init__(self, session): 10 | self.session = session 11 | self._items = {} 12 | self._next_id = 1 13 | 14 | def __getitem__(self, key): 15 | if key in self._items: 16 | return self._items[key] 17 | raise KeyError 18 | 19 | def __delitem__(self, key): 20 | if key in self._items: 21 | del self._items[key] 22 | 23 | def __setitem__(self, key, value): 24 | raise Exception('Do not add to a collection directly; use add()') 25 | 26 | def __iter__(self): 27 | return self._items.__iter__() 28 | 29 | def __len__(self): 30 | return len(self._items) 31 | 32 | def get_new_id(self): 33 | while self._next_id in self._items: 34 | self._next_id += 1 35 | return self._next_id 36 | 37 | def summarise(self, for_config_file=False): 38 | s = [] 39 | for id, obj in self.items(): 40 | s.append(obj.summarise()) 41 | return s 42 | 43 | def print_state_summary(self): 44 | for id, obj in self.items(): 45 | obj.print_state_summary() 46 | 47 | def get_pipeline_details(self, show_inside_bin_elements): 48 | details = {} 49 | for id, obj in self.items(): 50 | if hasattr(obj, 'pipeline'): 51 | details[id] = get_pipeline_details(obj.pipeline, show_inside_bin_elements) 52 | return details 53 | 54 | def get_entry_with_lowest_id(self): 55 | ''' 56 | Returns the item with the lowest ID, or None, if there are no items. 57 | ''' 58 | ids = sorted(self._items.keys()) 59 | if len(ids) == 0: 60 | return None 61 | return self._items[ids[0]] 62 | -------------------------------------------------------------------------------- /brave/api/__init__.py: -------------------------------------------------------------------------------- 1 | import brave.helpers 2 | import asyncio 3 | import uvloop 4 | logger = brave.helpers.get_logger('api') 5 | from sanic import Sanic 6 | import sanic.response 7 | from sanic.exceptions import NotFound, InvalidUsage 8 | import brave.config as config 9 | import brave.api.websockets_handler 10 | import brave.api.route_handler 11 | import brave.exceptions 12 | 13 | 14 | class RestApi(object): 15 | ''' 16 | Class to provide Brave's Restful API 17 | ''' 18 | 19 | def __init__(self, session): 20 | app = Sanic() 21 | app.config.KEEP_ALIVE = False 22 | session.rest_api = self 23 | self.webockets_handler = brave.api.websockets_handler.WebsocketsHandler(session) 24 | route_handler = brave.api.route_handler 25 | 26 | app.static('/', './public/index.html', name='index.html') 27 | app.static('/elements_table', './public/elements_table.html', name='elements_table.html') 28 | app.static('/style.css', './public/style.css', name='style.css') 29 | app.static('/js/', './public/js/') 30 | app.static('/output_images/', '/usr/local/share/brave/output_images/') 31 | 32 | @app.exception(NotFound) 33 | async def not_found(request, exception): 34 | return sanic.response.json({'error': 'Not found'}, 404) 35 | 36 | @app.exception(InvalidUsage) 37 | async def invalid_usage(request, exception): 38 | return sanic.response.json({'error': 'Invalid request: %s' % exception}, 400) 39 | 40 | @app.middleware('request') 41 | async def give_session_to_each_route_handler(request): 42 | request['session'] = session 43 | 44 | @app.middleware('request') 45 | async def ensure_objects_always_provided_in_json(request): 46 | if request.method in ['POST', 'PUT'] and not isinstance(request.json, dict): 47 | return sanic.response.json({'error': 'Invalid JSON'}, 400) 48 | 49 | @app.exception(brave.exceptions.InvalidConfiguration) 50 | async def invalid_cf(request, exception): 51 | msg = 'Invalid configuration: ' + str(exception) 52 | logger.debug(msg) 53 | return sanic.response.json({'error': msg}, 400) 54 | 55 | @app.exception(brave.exceptions.PipelineFailure) 56 | async def pipeline_creation_failure(request, exception): 57 | return sanic.response.json({'error': str(exception)}, 500) 58 | 59 | app.add_route(route_handler.all, "/api/all") 60 | app.add_route(route_handler.inputs, "/api/inputs") 61 | app.add_route(route_handler.outputs, "/api/outputs") 62 | app.add_route(route_handler.overlays, "/api/overlays") 63 | app.add_route(route_handler.mixers, "/api/mixers") 64 | app.add_route(route_handler.elements, "/api/elements") 65 | 66 | app.add_route(route_handler.create_input, '/api/inputs', methods=['PUT']) 67 | app.add_route(route_handler.create_output, '/api/outputs', methods=['PUT']) 68 | app.add_route(route_handler.create_overlay, '/api/overlays', methods=['PUT']) 69 | app.add_route(route_handler.create_mixer, '/api/mixers', methods=['PUT']) 70 | 71 | app.add_route(route_handler.update_input, '/api/inputs/', methods=['POST']) 72 | app.add_route(route_handler.update_output, '/api/outputs/', methods=['POST']) 73 | app.add_route(route_handler.update_overlay, '/api/overlays/', methods=['POST']) 74 | app.add_route(route_handler.update_mixer, '/api/mixers/', methods=['POST']) 75 | 76 | app.add_route(route_handler.delete_input, '/api/inputs/', methods=['DELETE']) 77 | app.add_route(route_handler.delete_output, '/api/outputs/', methods=['DELETE']) 78 | app.add_route(route_handler.delete_overlay, '/api/overlays/', methods=['DELETE']) 79 | app.add_route(route_handler.delete_mixer, '/api/mixers/', methods=['DELETE']) 80 | 81 | app.add_route(route_handler.cut_to_source, '/api/mixers//cut_to_source', methods=['POST']) 82 | app.add_route(route_handler.overlay_source, '/api/mixers//overlay_source', methods=['POST']) 83 | app.add_route(route_handler.remove_source, '/api/mixers//remove_source', methods=['POST']) 84 | 85 | app.add_route(route_handler.get_body, '/api/outputs//body') 86 | 87 | app.add_route(route_handler.restart, '/api/restart', methods=['POST']) 88 | app.add_route(route_handler.config_yaml, '/api/config/current.yaml', methods=['GET']) 89 | 90 | @app.websocket('/socket') 91 | async def feed(request, ws): 92 | await self.webockets_handler.feed(request, ws) 93 | 94 | def start_server(): 95 | asyncio.set_event_loop(uvloop.new_event_loop()) 96 | loop = asyncio.get_event_loop() 97 | server = app.create_server(host=config.api_host(), port=config.api_port(), access_log=False, return_asyncio_server=True) 98 | asyncio.ensure_future(server) 99 | loop.create_task(self.webockets_handler.periodic_check()) 100 | loop.run_forever() 101 | 102 | start_server() 103 | -------------------------------------------------------------------------------- /brave/api/websockets_handler.py: -------------------------------------------------------------------------------- 1 | import websockets 2 | import asyncio 3 | import json 4 | import brave.helpers 5 | import psutil 6 | from brave.outputs.webrtc import WebRTCOutput 7 | logger = brave.helpers.get_logger('websockets') 8 | 9 | 10 | class WebsocketsHandler(): 11 | ''' 12 | Class to handle websockets to clients 13 | ''' 14 | 15 | def __init__(self, session): 16 | self.session = session 17 | self._websocket_clients = [] 18 | 19 | async def send_message_to_first_client(self, m): 20 | ws = self._websocket_clients[0] 21 | await ws.send(json.dumps({'msg': m})) 22 | 23 | async def feed(self, request, ws): 24 | self._websocket_clients.append(ws) 25 | 26 | logger.debug('New websocket client... I now have %d websocket clients' % len(self._websocket_clients)) 27 | 28 | async def heartbeat(): 29 | try: 30 | HEARTBEAT_PERIOD = 5 31 | while True: 32 | await ws.send(json.dumps({'msg_type': 'ping', 'cpu_percent': psutil.cpu_percent(interval=0)})) 33 | await asyncio.sleep(HEARTBEAT_PERIOD) 34 | except websockets.ConnectionClosed: 35 | if ws in self._websocket_clients: 36 | self._websocket_clients.remove(ws) 37 | if hasattr(ws, 'webrtc_output'): 38 | await ws.webrtc_output.remove_peer_request(ws) 39 | delattr(ws, 'webrtc_output') 40 | # await output.remove_peer_request() 41 | 42 | asyncio.ensure_future(heartbeat()) 43 | while True: 44 | data_json = await ws.recv() 45 | data = json.loads(data_json) 46 | if ('msg_type' in data and data['msg_type'] == 'pong'): 47 | pass 48 | elif ('msg_type' in data and data['msg_type'] == 'webrtc-init'): 49 | if 'output_id' not in data or data['output_id'] is None: 50 | await ws.send(json.dumps({'error': 'no output_id'})) 51 | return 52 | 53 | try: 54 | output_id = int(data['output_id']) 55 | except ValueError: 56 | await ws.send(json.dumps({'error': 'output_id not an integer'})) 57 | return 58 | 59 | if type(self.session.outputs[output_id]) != WebRTCOutput: 60 | await ws.send(json.dumps({'error': 'webrtc-init called on output that is not WebRTC'})) 61 | return 62 | 63 | try: 64 | ws.webrtc_output = self.session.outputs[output_id] 65 | except KeyError: 66 | await ws.send(json.dumps({'error': 'no such id'})) 67 | return 68 | await ws.webrtc_output.new_peer_request(ws) 69 | 70 | # Allow the client to report when it does not want webrtc anymore: 71 | elif ('msg_type' in data and data['msg_type'] == 'webrtc-close'): 72 | if hasattr(ws, 'webrtc_output'): 73 | await ws.webrtc_output.remove_peer_request(ws) 74 | delattr(ws, 'webrtc_output') 75 | 76 | elif 'sdp' in data: 77 | await ws.webrtc_output.sdp_message_from_peer(ws, data['sdp']) 78 | elif 'ice' in data: 79 | await ws.webrtc_output.ice_message_from_peer(ws, data['ice']) 80 | else: 81 | logger.warning('Unknown websocket message from client:' + data_json) 82 | 83 | async def periodic_check(self): 84 | while True: 85 | UPDATE_PERIOD = 0.1 86 | try: 87 | messages_to_send = await self.check_for_items_recently_updated() 88 | messages_to_send.extend(await self.check_for_items_recently_deleted()) 89 | await self.send_to_all_clients(messages_to_send) 90 | except Exception as e: 91 | logger.warning('Error on periodic websocket check:' + str(e)) 92 | await asyncio.sleep(UPDATE_PERIOD) 93 | 94 | async def check_for_items_recently_updated(self): 95 | items_recently_updated = set(self.session.items_recently_updated) 96 | self.session.items_recently_updated = [] 97 | messages_to_send = [] 98 | for o in items_recently_updated: 99 | messages_to_send.append({ 100 | 'msg_type': 'update', 101 | 'block_type': o.input_output_overlay_or_mixer(), 102 | 'data': o.summarise() 103 | }) 104 | return messages_to_send 105 | 106 | async def check_for_items_recently_deleted(self): 107 | messages_to_send = [] 108 | for item in self.session.items_recently_deleted: 109 | messages_to_send.append({ 110 | 'msg_type': 'delete', 111 | 'block_type': item['block_type'], 112 | 'id': item['id'] 113 | }) 114 | self.session.items_recently_deleted = [] 115 | return messages_to_send 116 | 117 | async def send_to_all_clients(self, messages_to_send): 118 | for msg in messages_to_send: 119 | for ws in self._websocket_clients: 120 | try: 121 | await ws.send(json.dumps(msg)) 122 | except websockets.ConnectionClosed: 123 | self._websocket_clients.remove(ws) 124 | -------------------------------------------------------------------------------- /brave/config.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import os 3 | import brave.exceptions 4 | DEFAULT_CONFIG_FILENAME = 'config/default.yaml' 5 | c = {} 6 | 7 | 8 | def init(filename=DEFAULT_CONFIG_FILENAME): 9 | global c 10 | try: 11 | with open(filename, 'r') as stream: 12 | c = yaml.load(stream, Loader=yaml.FullLoader) 13 | if c is None: 14 | c = {} 15 | except FileNotFoundError as e: 16 | print( 'Unable to open config file "%s": %s' % (filename, e) ) 17 | # exit(1) 18 | 19 | # attempt to load default configuration if specified configuration fails 20 | try: 21 | with open(DEFAULT_CONFIG_FILENAME, 'r') as stream: 22 | c = yaml.load(stream, Loader=yaml.FullLoader) 23 | if c is None: 24 | c = {} 25 | except FileNotFoundError as e: 26 | print( 'FATAL! Unable to load default configuration! File: "%s": %s' % (filename, e) ) 27 | exit(1) 28 | 29 | _validate() 30 | 31 | 32 | def raw(): 33 | return {**c} 34 | 35 | 36 | def api_host(): 37 | if 'HOST' in os.environ: 38 | return os.environ['HOST'] 39 | return c['api_host'] if 'api_host' in c else '0.0.0.0' 40 | 41 | 42 | def api_port(): 43 | if 'PORT' in os.environ: 44 | return int(os.environ['PORT']) 45 | return c['api_port'] if 'api_port' in c else 5000 46 | 47 | 48 | def enable_audio(): 49 | return 'enable_audio' not in c or c['enable_audio'] is True 50 | 51 | 52 | def enable_video(): 53 | return 'enable_video' not in c or c['enable_video'] is True 54 | 55 | 56 | def default_mixer_width(): 57 | return c['default_mixer_width'] if 'default_mixer_width' in c else 1280 58 | 59 | 60 | def default_mixer_height(): 61 | return c['default_mixer_height'] if 'default_mixer_height' in c else 720 62 | 63 | 64 | def inputs(): 65 | if 'inputs' in c and c['inputs'] is not None: 66 | return c['inputs'] 67 | else: 68 | return [] 69 | 70 | 71 | def outputs(): 72 | if 'outputs' in c and c['outputs'] is not None: 73 | return c['outputs'] 74 | else: 75 | return [] 76 | 77 | 78 | def overlays(): 79 | if 'overlays' in c and c['overlays'] is not None: 80 | return c['overlays'] 81 | else: 82 | return [] 83 | 84 | 85 | def mixers(): 86 | return c['mixers'] if ('mixers' in c and c['mixers'] is not None) else [] 87 | 88 | 89 | def default_audio_caps(): 90 | return 'audio/x-raw,channels=2,layout=interleaved,rate=44100,format=S16LE' 91 | 92 | 93 | def stun_server(): 94 | 'Should be in the format :' 95 | if 'STUN_SERVER' in os.environ: 96 | return os.environ['STUN_SERVER'] 97 | return c['stun_server'] if 'stun_server' in c else 'stun.l.google.com:19302' 98 | 99 | 100 | def turn_server(): 101 | 'Should be in the format :@:' 102 | if 'TURN_SERVER' in os.environ: 103 | return os.environ['TURN_SERVER'] 104 | return c['turn_server'] if 'turn_server' in c else None 105 | 106 | 107 | def _validate(): 108 | for type in ['inputs', 'outputs', 'overlays', 'mixers']: 109 | if type in c and c[type] is not None: 110 | if not isinstance(c[type], list): 111 | raise brave.exceptions.InvalidConfiguration( 112 | 'Config entry "%s" must be an array (list). It is currently: %s' % (type, c[type])) 113 | for entry in c[type]: 114 | if not isinstance(entry, dict): 115 | raise brave.exceptions.InvalidConfiguration( 116 | 'Config entry "%s" contains an entry that is not a dictionary: %s' % (type, c[type])) 117 | for key, value in entry.items(): 118 | if not isinstance(key, str): 119 | raise brave.exceptions.InvalidConfiguration( 120 | 'Config entry "%s" contains an entry with key "%s" that is not a string' % (type, key)) 121 | -------------------------------------------------------------------------------- /brave/config_file.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import brave.config 3 | import tempfile 4 | 5 | 6 | def as_yaml(session): 7 | ''' 8 | Get the current config, as a YAML string. 9 | This can then be used to start another Brave with the same configuration. 10 | ''' 11 | config = brave.config.raw() 12 | 13 | for block_type in ['inputs', 'outputs', 'overlays', 'mixers']: 14 | if block_type in config: 15 | del(config[block_type]) 16 | collection = getattr(session, block_type) 17 | if len(collection) > 0: 18 | config[block_type] = [] 19 | for name, block in collection.items(): 20 | config[block_type].append(block.summarise(for_config_file=True)) 21 | return yaml.dump(config) 22 | 23 | 24 | def as_yaml_file(session): 25 | ''' 26 | Returns the current config, as a temporary YAML file. 27 | ''' 28 | config_as_yaml = as_yaml(session) 29 | fp = tempfile.NamedTemporaryFile(delete=False) 30 | fp.write(config_as_yaml.encode()) 31 | fp.close() 32 | return fp.name 33 | -------------------------------------------------------------------------------- /brave/connections/__init__.py: -------------------------------------------------------------------------------- 1 | from brave.abstract_collection import AbstractCollection 2 | from brave.connections.connection_to_mixer import ConnectionToMixer 3 | from brave.connections.connection_to_output import ConnectionToOutput 4 | from brave.inputs.input import Input 5 | from brave.outputs.output import Output 6 | from brave.mixers.mixer import Mixer 7 | from brave.exceptions import InvalidConfiguration 8 | 9 | 10 | class ConnectionCollection(AbstractCollection): 11 | ''' 12 | A collection of all Connections. 13 | A Connection connects inputs, mixers, and outputs. 14 | ''' 15 | 16 | def add(self, source, dest, **args): 17 | if isinstance(source, Output): 18 | raise ValueError('Cannot have a connection with output as source') 19 | if isinstance(dest, Input): 20 | raise ValueError('Cannot have a connection with input as source') 21 | if isinstance(dest, Output): 22 | if dest.source_connection() is not None: 23 | raise InvalidConfiguration('Output %d is already connected to a source' % dest.id) 24 | 25 | args['id'] = self.get_new_id() 26 | if isinstance(dest, Mixer): 27 | self._items[args['id']] = ConnectionToMixer(source=source, dest=dest, collection=self, **args) 28 | else: 29 | self._items[args['id']] = ConnectionToOutput(source=source, dest=dest, collection=self, **args) 30 | return self._items[args['id']] 31 | 32 | def get_first_for_source(self, source): 33 | return next((x for x in self._items.values() if x.source == source), None) 34 | 35 | def get_all_for_source(self, source): 36 | return list(filter(lambda x: x.source == source, self._items.values())) 37 | 38 | def get_first_for_dest(self, dest): 39 | return next((x for x in self._items.values() if x.dest == dest), None) 40 | 41 | def get_all_for_dest(self, dest): 42 | return list ( filter ( lambda x: x.dest == dest, self._items.values() ) ) 43 | 44 | def get_connection_between_source_and_dest(self, source, dest): 45 | return next((x for x in self._items.values() if (x.source == source and x.dest == dest)), None) 46 | 47 | def get_or_add_connection_between_source_and_dest(self, source, dest): 48 | c = self.get_connection_between_source_and_dest(source, dest) 49 | if c is None: 50 | return self.add(source, dest) 51 | else: 52 | return c 53 | -------------------------------------------------------------------------------- /brave/connections/connection_to_output.py: -------------------------------------------------------------------------------- 1 | from brave.connections.connection import Connection 2 | 3 | 4 | class ConnectionToOutput(Connection): 5 | ''' 6 | A connection from an input/mixer to an output. 7 | ''' 8 | def setup(self): 9 | if self.has_video(): 10 | self._create_inter_elements('video') 11 | 12 | if self.has_audio(): 13 | self._create_inter_elements('audio') 14 | 15 | self._link_source_to_dest() 16 | self._sync_element_states() 17 | 18 | if self.has_video(): 19 | self.video_is_linked = True 20 | if self.has_audio(): 21 | self.audio_is_linked = True 22 | 23 | # If source and destination have already started, we need to unblock straightaway: 24 | self.unblock_intersrc_if_ready() 25 | 26 | def _get_intersrc(self, audio_or_video): 27 | ''' 28 | Return the intervideosrc/interaudiosrc that the output pipeline has for accepting content. 29 | ''' 30 | assert(audio_or_video in ['audio', 'video']) 31 | element_name = 'inter%ssrc' % audio_or_video 32 | return getattr(self.dest, element_name) if hasattr(self.dest, element_name) else None 33 | 34 | def _create_intersrc(self, audio_or_video): 35 | ''' 36 | The intervideosrc/interaudiosrc will already be made by the output, so no need to make again. 37 | ''' 38 | return self._get_intersrc(audio_or_video) 39 | 40 | def _link_source_to_dest(self): 41 | ''' 42 | Link the source (input/mixer) to the dest (output). 43 | ''' 44 | if self.has_video(): 45 | self._connect_tee_to_intersink('video') 46 | if self.has_audio(): 47 | self._connect_tee_to_intersink('audio') 48 | -------------------------------------------------------------------------------- /brave/exceptions.py: -------------------------------------------------------------------------------- 1 | class InvalidConfiguration(Exception): 2 | pass 3 | 4 | 5 | class PipelineFailure(Exception): 6 | pass 7 | -------------------------------------------------------------------------------- /brave/inputs/__init__.py: -------------------------------------------------------------------------------- 1 | from brave.inputs.uri import UriInput 2 | from brave.inputs.streamlink import StreamlinkInput 3 | from brave.inputs.youtubedl import YoutubeDLInput 4 | from brave.inputs.test_video import TestVideoInput 5 | from brave.inputs.test_audio import TestAudioInput 6 | from brave.inputs.image import ImageInput 7 | from brave.inputs.html import HTMLInput 8 | from brave.inputs.decklink import DecklinkInput 9 | from brave.inputs.tcp_client import TcpClientInput 10 | from brave.abstract_collection import AbstractCollection 11 | import brave.exceptions 12 | 13 | 14 | class InputCollection(AbstractCollection): 15 | def add(self, **args): 16 | if 'id' not in args: 17 | args['id'] = self.get_new_id() 18 | 19 | if 'type' not in args: 20 | raise brave.exceptions.InvalidConfiguration("Invalid input missing 'type'") 21 | elif args['type'] == 'uri': 22 | input = UriInput(**args, collection=self) 23 | elif args['type'] == 'streamlink': 24 | input = StreamlinkInput(**args, collection=self) 25 | elif args['type'] == 'youtubedl': 26 | input = YoutubeDLInput(**args, collection=self) 27 | elif args['type'] == 'test_video': 28 | input = TestVideoInput(**args, collection=self) 29 | elif args['type'] == 'test_audio': 30 | input = TestAudioInput(**args, collection=self) 31 | elif args['type'] == 'image': 32 | input = ImageInput(**args, collection=self) 33 | elif args['type'] == 'html': 34 | input = HTMLInput(**args, collection=self) 35 | elif args['type'] == 'decklink': 36 | input = DecklinkInput(**args, collection=self) 37 | elif args['type'] == 'tcp_client': 38 | input = TcpClientInput(**args, collection=self) 39 | else: 40 | raise brave.exceptions.InvalidConfiguration(f"Invalid input type '{str(args['type'])}'") 41 | 42 | self._items[args['id']] = input 43 | return input 44 | -------------------------------------------------------------------------------- /brave/inputs/decklink.py: -------------------------------------------------------------------------------- 1 | from brave.inputs.input import Input 2 | from gi.repository import Gst 3 | import brave.config as config 4 | 5 | 6 | class DecklinkInput(Input): 7 | ''' 8 | Handles input via a deckoink card/device. 9 | This can allow SDI/HDMI singals to be localy mixed with brave 10 | ''' 11 | def permitted_props(self): 12 | return { 13 | **super().permitted_props(), 14 | 'device': { 15 | 'type': 'int', 16 | 'default': 0, 17 | }, 18 | 'connection': { 19 | 'type': 'int', 20 | 'default': 1, 21 | }, 22 | 'mode': { 23 | 'type': 'int', 24 | 'default': 17, 25 | }, 26 | 'width': { 27 | 'type': 'int', 28 | 'default': 1280 29 | }, 30 | 'height': { 31 | 'type': 'int', 32 | 'default': 720 33 | } 34 | } 35 | 36 | def create_elements(self): 37 | #TODO: Audio is currently lcoked to HDI/HDMI mode may need to figure a btter way to auto select the best one 38 | if not self.create_pipeline_from_string('decklinkvideosrc' 39 | ' device-number=' + str(self.device) + 40 | ' connection=' + str(self.connection) + 41 | ' mode=' + str(self.mode) + 42 | ' ! videoconvert ! ' 43 | + self.default_video_pipeline_string_end() + 44 | ' decklinkaudiosrc device-number=' + str(self.device) + ' connection=1 ! audioconvert' 45 | + self.default_audio_pipeline_string_end()): 46 | return False 47 | 48 | self.intervideosink = self.pipeline.get_by_name('intervideosink') 49 | self.final_video_tee = self.pipeline.get_by_name('final_video_tee') 50 | self.final_audio_tee = self.pipeline.get_by_name('final_audio_tee') 51 | self.handle_updated_props() 52 | 53 | def get_input_cap_props(self): 54 | ''' 55 | Parses the caps that arrive from the input, and returns them. 56 | This allows the height/width/framerate/audio_rate to be retrieved. 57 | ''' 58 | elements = {} 59 | if hasattr(self, 'intervideosink'): 60 | elements['video'] = self.intervideosink 61 | 62 | props = {} 63 | for (audioOrVideo, element) in elements.items(): 64 | if not element: 65 | return 66 | caps = element.get_static_pad('sink').get_current_caps() 67 | if not caps: 68 | return 69 | size = caps.get_size() 70 | if size == 0: 71 | return 72 | 73 | structure = caps.get_structure(0) 74 | props[audioOrVideo + '_caps_string'] = structure.to_string() 75 | if structure.has_field('framerate'): 76 | framerate = structure.get_fraction('framerate') 77 | props['framerate'] = framerate.value_numerator / framerate.value_denominator 78 | if structure.has_field('height'): 79 | props['height'] = structure.get_int('height').value 80 | if structure.has_field('width'): 81 | props['width'] = structure.get_int('width').value 82 | 83 | return props 84 | -------------------------------------------------------------------------------- /brave/inputs/html.py: -------------------------------------------------------------------------------- 1 | from brave.inputs.input import Input 2 | import brave.config as config 3 | 4 | 5 | class HTMLInput(Input): 6 | ''' 7 | Handles an image input. 8 | Freezes the image to create a video stream. 9 | ''' 10 | 11 | def has_audio(self): 12 | return False 13 | 14 | def permitted_props(self): 15 | return { 16 | **super().permitted_props(), 17 | 'uri': { 18 | 'type': 'str', 19 | 'default': 'https://www.bbc.co.uk' 20 | }, 21 | 'width': { 22 | 'type': 'int', 23 | 'default': 1280 24 | }, 25 | 'height': { 26 | 'type': 'int', 27 | 'default': 720 28 | } 29 | } 30 | 31 | def create_elements(self): 32 | if not config.enable_video(): 33 | return 34 | 35 | self.create_pipeline_from_string(f'cef url="{self.uri}" ! videoconvert ! video/x-raw,format=ARGB ! queue ! {self.default_video_pipeline_string_end()}') 36 | 37 | self.intervideosink = self.pipeline.get_by_name('intervideosink') 38 | self.final_video_tee = self.pipeline.get_by_name('final_video_tee') 39 | self.video_output_queue = self.pipeline.get_by_name('video_output_queue') 40 | 41 | def get_input_cap_props(self): 42 | ''' 43 | Gets the width/height of the input. 44 | ''' 45 | 46 | element = self.intervideosink 47 | if not element: 48 | return 49 | caps = element.get_static_pad('sink').get_current_caps() 50 | if not caps: 51 | return 52 | size = caps.get_size() 53 | if size == 0: 54 | return 55 | 56 | structure = caps.get_structure(0) 57 | props = {'video_caps_string': structure.to_string()} 58 | if structure.has_field('height'): 59 | props['height'] = structure.get_int('height').value 60 | if structure.has_field('width'): 61 | props['width'] = structure.get_int('width').value 62 | 63 | return props 64 | -------------------------------------------------------------------------------- /brave/inputs/image.py: -------------------------------------------------------------------------------- 1 | from brave.inputs.input import Input 2 | import brave.config as config 3 | 4 | 5 | class ImageInput(Input): 6 | ''' 7 | Handles an image input. 8 | Freezes the image to create a video stream. 9 | ''' 10 | 11 | def has_audio(self): 12 | return False 13 | 14 | def permitted_props(self): 15 | return { 16 | **super().permitted_props(), 17 | 'uri': { 18 | 'type': 'str', 19 | }, 20 | 'width': { 21 | 'type': 'int' 22 | }, 23 | 'height': { 24 | 'type': 'int' 25 | } 26 | } 27 | 28 | def create_elements(self): 29 | if not config.enable_video(): 30 | return 31 | 32 | # To crop (not resize): videobox autocrop=true border-alpha=0 33 | 34 | pipeline_string = (f'uridecodebin name=uridecodebin uri="self.uri" ! ' 35 | 'imagefreeze ! videoconvert ! video/x-raw,pixel-aspect-ratio=1/1,framerate=30/1 ! ' 36 | f'{self.default_video_pipeline_string_end()}') 37 | 38 | self.create_pipeline_from_string(pipeline_string) 39 | self.final_video_tee = self.pipeline.get_by_name('final_video_tee') 40 | self.uridecodebin = self.pipeline.get_by_name('uridecodebin') 41 | 42 | def get_input_cap_props(self): 43 | ''' 44 | Gets the width/height of the input. 45 | ''' 46 | 47 | element = self.uridecodebin 48 | if not element: 49 | return 50 | pad = element.get_static_pad('src_0') 51 | if not pad: 52 | return 53 | caps = pad.get_current_caps() 54 | if not caps: 55 | return 56 | size = caps.get_size() 57 | if size == 0: 58 | return 59 | 60 | structure = caps.get_structure(0) 61 | props = {'video_caps_string': structure.to_string()} 62 | if structure.has_field('height'): 63 | props['height'] = structure.get_int('height').value 64 | if structure.has_field('width'): 65 | props['width'] = structure.get_int('width').value 66 | 67 | return props 68 | -------------------------------------------------------------------------------- /brave/inputs/test_audio.py: -------------------------------------------------------------------------------- 1 | from brave.inputs.input import Input 2 | import brave.config as config 3 | 4 | 5 | class TestAudioInput(Input): 6 | def has_video(self): 7 | return False 8 | 9 | def permitted_props(self): 10 | return { 11 | **super().permitted_props(), 12 | 'freq': { 13 | 'type': 'int', 14 | 'default': 440 15 | }, 16 | 'wave': { 17 | 'type': 'int', 18 | 'default': 0 19 | }, 20 | 'volume': { 21 | 'type': 'float', 22 | 'default': 0.8 23 | } 24 | } 25 | 26 | def create_elements(self): 27 | pipeline_string = 'audiotestsrc is-live=true name=audiotestsrc volume=0.2 ! ' + \ 28 | config.default_audio_caps() + self.default_audio_pipeline_string_end() 29 | 30 | self.create_pipeline_from_string(pipeline_string) 31 | 32 | self.final_audio_tee = self.pipeline.get_by_name('final_audio_tee') 33 | self.audiotestsrc = self.pipeline.get_by_name('audiotestsrc') 34 | 35 | def handle_updated_props(self): 36 | super().handle_updated_props() 37 | if hasattr(self, 'wave'): 38 | self.audiotestsrc.set_property('wave', int(self.wave)) 39 | if hasattr(self, 'freq'): 40 | self.audiotestsrc.set_property('freq', self.freq) 41 | if hasattr(self, 'volume'): 42 | self.audiotestsrc.set_property('volume', self.volume) 43 | -------------------------------------------------------------------------------- /brave/inputs/test_video.py: -------------------------------------------------------------------------------- 1 | from brave.inputs.input import Input 2 | 3 | 4 | class TestVideoInput(Input): 5 | def has_audio(self): 6 | return False 7 | 8 | def permitted_props(self): 9 | return { 10 | **super().permitted_props(), 11 | 'pattern': { 12 | 'type': 'int', 13 | 'default': 0 14 | }, 15 | 'width': { 16 | 'type': 'int', 17 | 'default': 640 18 | }, 19 | 'height': { 20 | 'type': 'int', 21 | 'default': 360 22 | } 23 | } 24 | 25 | def create_elements(self): 26 | pipeline_string = ('videotestsrc is-live=true name=videotestsrc' 27 | ' ! videoconvert ! videoscale ! capsfilter name=capsfilter ! ' + 28 | self.default_video_pipeline_string_end()) 29 | # FOR TESTING TO VIEW LOCALLY, APPEND: + ' final_video_tee. ! queue ! glimagesink ') 30 | self.create_pipeline_from_string(pipeline_string) 31 | 32 | self.final_video_tee = self.pipeline.get_by_name('final_video_tee') 33 | self.video_output_queue = self.pipeline.get_by_name('video_output_queue') 34 | self.videotestsrc = self.pipeline.get_by_name('videotestsrc') 35 | self.capsfilter = self.pipeline.get_by_name('capsfilter') 36 | 37 | def handle_updated_props(self): 38 | super().handle_updated_props() 39 | if hasattr(self, 'pattern'): 40 | self.videotestsrc.set_property('pattern', self.pattern) 41 | -------------------------------------------------------------------------------- /brave/mixers/__init__.py: -------------------------------------------------------------------------------- 1 | from brave.abstract_collection import AbstractCollection 2 | from brave.mixers.mixer import Mixer 3 | 4 | 5 | class MixerCollection(AbstractCollection): 6 | def add(self, **args): 7 | if 'id' not in args: 8 | args['id'] = self.get_new_id() 9 | mixer = Mixer(**args, collection=self) 10 | self._items[args['id']] = mixer 11 | return mixer 12 | -------------------------------------------------------------------------------- /brave/outputs/__init__.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.local import LocalOutput 2 | from brave.outputs.rtmp import RTMPOutput 3 | from brave.outputs.tcp import TCPOutput 4 | from brave.outputs.image import ImageOutput 5 | from brave.outputs.file import FileOutput 6 | from brave.outputs.webrtc import WebRTCOutput 7 | from brave.outputs.kvs import KvsOutput 8 | from brave.abstract_collection import AbstractCollection 9 | import brave.exceptions 10 | 11 | 12 | class OutputCollection(AbstractCollection): 13 | def add(self, **args): 14 | if 'id' not in args: 15 | args['id'] = self.get_new_id() 16 | 17 | if 'type' not in args: 18 | raise brave.exceptions.InvalidConfiguration("Invalid output, no 'type'") 19 | elif args['type'] == 'local': 20 | output = LocalOutput(**args, collection=self) 21 | elif args['type'] == 'rtmp': 22 | output = RTMPOutput(**args, collection=self) 23 | elif args['type'] == 'tcp': 24 | output = TCPOutput(**args, collection=self) 25 | elif args['type'] == 'image': 26 | output = ImageOutput(**args, collection=self) 27 | elif args['type'] == 'file': 28 | output = FileOutput(**args, collection=self) 29 | elif args['type'] == 'webrtc': 30 | output = WebRTCOutput(**args, collection=self) 31 | elif args['type'] == 'kvs': 32 | output = KvsOutput(**args, collection=self) 33 | else: 34 | raise brave.exceptions.InvalidConfiguration("Invalid output type '%s'" % args['type']) 35 | 36 | self._items[args['id']] = output 37 | return output 38 | -------------------------------------------------------------------------------- /brave/outputs/file.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.output import Output 2 | import brave.config as config 3 | from gi.repository import Gst 4 | 5 | 6 | class FileOutput(Output): 7 | ''' 8 | For outputing to a file 9 | ''' 10 | 11 | def permitted_props(self): 12 | return { 13 | **super().permitted_props(), 14 | 'width': { 15 | 'type': 'int', 16 | 'default': config.default_mixer_width() 17 | }, 18 | 'height': { 19 | 'type': 'int', 20 | 'default': config.default_mixer_height() 21 | }, 22 | 'location': { 23 | 'type': 'str' 24 | } 25 | } 26 | 27 | def create_elements(self): 28 | pipeline_string = 'mp4mux name=mux ! filesink name=sink' 29 | 30 | if config.enable_video(): 31 | pipeline_string += ' ' + self._video_pipeline_start() + 'x264enc name=video_encoder ! queue ! mux.' 32 | 33 | if config.enable_audio(): 34 | audio_pipeline_string = ('interaudiosrc name=interaudiosrc ! ' 35 | 'audioconvert ! audioresample ! avenc_aac name=audio_encoder') 36 | 37 | # A larger queue size enables the video encoder to take longer 38 | audio_pipeline_string += f' ! queue max-size-bytes={10*(3 ** 20)} ! mux.' 39 | 40 | pipeline_string = pipeline_string + ' ' + audio_pipeline_string 41 | 42 | self.create_pipeline_from_string(pipeline_string) 43 | self.logger.debug('Writing to the file ' + self.location) 44 | sink = self.pipeline.get_by_name('sink') 45 | sink.set_property('location', self.location) 46 | 47 | if config.enable_video(): 48 | self.video_encoder = self.pipeline.get_by_name('video_encoder') 49 | 50 | if config.enable_audio(): 51 | self.audio_encoder = self.pipeline.get_by_name('audio_encoder') 52 | 53 | def set_pipeline_state(self, new_state): 54 | sent_eos = False 55 | # If this is ending the file creation (identified by moving to READY or NULL) 56 | # we must send an EOS so that the file is completed correctly. 57 | if (new_state == Gst.State.READY or new_state == Gst.State.NULL): 58 | 59 | for encoder_name in ['video_encoder', 'audio_encoder']: 60 | if hasattr(self, encoder_name): 61 | encoder = getattr(self, encoder_name) 62 | encoder_state = encoder.get_state(0).state 63 | if encoder_state in [Gst.State.PAUSED, Gst.State.PLAYING]: 64 | if encoder.send_event(Gst.Event.new_eos()): 65 | self.logger.debug('Successfully send EOS event to the ' + encoder_name) 66 | sent_eos = True 67 | else: 68 | self.logger.warning('Failed to send EOS event to the %s' % encoder_name) 69 | 70 | # If we've sent an EOS, allow that to propogate the pipeline. 71 | # (Separate code will then catch the EOS successful message and cause a state change.) 72 | # Otherwise, lets go ahead and set the state of the pipeline. 73 | if sent_eos: 74 | return 75 | 76 | return super().set_pipeline_state(new_state) 77 | 78 | def create_caps_string(self): 79 | # format=I420 ensures the mp4 is playable with QuickTime. 80 | return super().create_caps_string(format='I420') 81 | -------------------------------------------------------------------------------- /brave/outputs/image.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.output import Output 2 | import brave.config as config 3 | import os 4 | import random 5 | 6 | 7 | class ImageOutput(Output): 8 | ''' 9 | For creating an image file of the output. 10 | ''' 11 | 12 | def permitted_props(self): 13 | return { 14 | **super().permitted_props(), 15 | 'width': { 16 | 'type': 'int', 17 | 'default': 640 18 | }, 19 | 'height': { 20 | 'type': 'int', 21 | 'default': 360 22 | }, 23 | 'update_frequency': { 24 | 'type': 'int', 25 | 'default': 1 26 | }, 27 | 'location': { 28 | 'type': 'str', 29 | # TODO reconsider this default: 30 | 'default': '/usr/local/share/brave/output_images/img_%d.jpg' % random.randint(10000, 20000) 31 | } 32 | } 33 | 34 | def has_audio(self): 35 | return False 36 | 37 | def create_caps_string(self): 38 | return super().create_caps_string(format='RGB') + ',framerate=1/' + str(self.update_frequency) 39 | 40 | def create_elements(self): 41 | if not config.enable_video(): 42 | return 43 | self.__delete_file_if_exists() 44 | pipeline_string = self._video_pipeline_start() + 'jpegenc ! multifilesink name=sink' 45 | self.create_pipeline_from_string(pipeline_string) 46 | sink = self.pipeline.get_by_name('sink') 47 | sink.set_property('location', self.location) 48 | 49 | def __delete_file_if_exists(self): 50 | try: 51 | os.remove(self.location) 52 | except FileNotFoundError: 53 | pass 54 | -------------------------------------------------------------------------------- /brave/outputs/kvs.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.output import Output 2 | import brave.config as config 3 | import brave.exceptions 4 | import os 5 | 6 | 7 | class KvsOutput(Output): 8 | ''' 9 | For outputting to AWS's Kinesis Video Stream 10 | ''' 11 | 12 | def permitted_props(self): 13 | return { 14 | **super().permitted_props(), 15 | 'width': { 16 | 'type': 'int', 17 | 'default': 640, 18 | }, 19 | 'height': { 20 | 'type': 'int', 21 | 'default': 360 22 | }, 23 | 'stream_name': { 24 | 'type': 'str' 25 | } 26 | } 27 | 28 | def create_elements(self): 29 | if not config.enable_video(): 30 | return 31 | 32 | access_key = os.environ['AWS_ACCESS_KEY_ID'] 33 | secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] 34 | if not access_key: 35 | raise brave.exceptions.InvalidConfiguration('Missing AWS_ACCESS_KEY_ID environment variable') 36 | if not secret_key: 37 | raise brave.exceptions.InvalidConfiguration('Missing AWS_SECRET_ACCESS_KEY environment variable') 38 | 39 | pipeline_string = (self._video_pipeline_start() + 'x264enc bframes=0 key-int-max=45 bitrate=500 ! ' 40 | 'video/x-h264,stream-format=avc,alignment=au ! kvssink name=kvssink') 41 | 42 | self.create_pipeline_from_string(pipeline_string) 43 | 44 | kvssink = self.pipeline.get_by_name('kvssink') 45 | kvssink.set_property('access-key', access_key) 46 | kvssink.set_property('secret-key', secret_key) 47 | kvssink.set_property('stream-name', self.stream_name) 48 | 49 | def create_caps_string(self): 50 | return super().create_caps_string(format='I420') + ',framerate=30/1' 51 | -------------------------------------------------------------------------------- /brave/outputs/local.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.output import Output 2 | import brave.config as config 3 | import brave.exceptions 4 | 5 | 6 | class LocalOutput(Output): 7 | ''' 8 | For previewing audio and video locally. 9 | ''' 10 | 11 | def permitted_props(self): 12 | return { 13 | **super().permitted_props(), 14 | 'width': { 15 | 'type': 'int', 16 | 'default': 640, 17 | }, 18 | 'height': { 19 | 'type': 'int', 20 | 'default': 360 21 | } 22 | } 23 | 24 | def check_item_can_be_created(self): 25 | ''' 26 | Prevent more than one local output from being created. It crashes (on MacOS at least). 27 | ''' 28 | other_local_outputs = dict((k, v) for k, v in self.session().outputs.items() if isinstance(v, LocalOutput)) 29 | if len(other_local_outputs): 30 | raise brave.exceptions.InvalidConfiguration('There cannot be more than one local output') 31 | 32 | def create_elements(self): 33 | pipeline_string = '' 34 | if config.enable_video(): 35 | pipeline_string += self._video_pipeline_start() + 'queue ! glimagesink' 36 | if config.enable_audio(): 37 | pipeline_string += ' interaudiosrc name=interaudiosrc ! queue ! autoaudiosink' 38 | 39 | self.create_pipeline_from_string(pipeline_string) 40 | 41 | def create_caps_string(self): 42 | # format=RGB removes the alpha channel which can crash glimagesink 43 | return super().create_caps_string(format='RGB') 44 | -------------------------------------------------------------------------------- /brave/outputs/rtmp.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.output import Output 2 | import brave.config as config 3 | 4 | 5 | class RTMPOutput(Output): 6 | ''' 7 | For sending an output to a third-party RTMP server (such as Facebook Live). 8 | ''' 9 | 10 | def permitted_props(self): 11 | return { 12 | **super().permitted_props(), 13 | 'uri': { 14 | 'type': 'str' 15 | }, 16 | 'width': { 17 | 'type': 'int', 18 | 'default': config.default_mixer_width() 19 | }, 20 | 'height': { 21 | 'type': 'int', 22 | 'default': config.default_mixer_height() 23 | } 24 | } 25 | 26 | def create_elements(self): 27 | ''' 28 | Create the elements needed whether this is audio, video, or both 29 | ''' 30 | pipeline_string = 'flvmux name=mux streamable=true ! rtmpsink name=sink ' 31 | 32 | if config.enable_video(): 33 | # key-int-max=60 puts a keyframe every 2 seconds (60 as 2*framerate) 34 | pipeline_string += ' ' + self._video_pipeline_start() + \ 35 | 'x264enc name=video_encoder tune=zerolatency key-int-max=30 ! h264parse ! queue ! mux.' 36 | 37 | if config.enable_audio(): 38 | pipeline_string += ' ' + self._audio_pipeline_start() + \ 39 | 'avenc_aac name=audio_encoder ! aacparse ! audio/mpeg, mpegversion=4 ! queue ! mux.' 40 | 41 | self.create_pipeline_from_string(pipeline_string) 42 | self.pipeline.get_by_name('sink').set_property('location', self.uri + ' live=1') 43 | 44 | self.logger.info('RTMP output now configured to send to ' + self.uri) 45 | 46 | def create_caps_string(self): 47 | # framerate=30/1 because Facebook Live and YouTube live want this framerate. 48 | # profile=baseline may be superflous but some have recommended it for Facebook 49 | # need to look a this some more, to see what else can be tuned up 50 | #return super().create_caps_string(format='I420') + ',framerate=30/1,profile=baseline' 51 | return super().create_caps_string(format='I420') + ',framerate=30/1,profile=baseline' 52 | -------------------------------------------------------------------------------- /brave/outputs/tcp.py: -------------------------------------------------------------------------------- 1 | from brave.outputs.output import Output 2 | import socket 3 | import brave.config as config 4 | 5 | 6 | class TCPOutput(Output): 7 | ''' 8 | For outputing as a TCP server (which VLC can play) 9 | ''' 10 | 11 | def permitted_props(self): 12 | return { 13 | **super().permitted_props(), 14 | 'host': { 15 | 'type': 'str' 16 | }, 17 | 'port': { 18 | 'type': 'int' 19 | }, 20 | 'width': { 21 | 'type': 'int', 22 | 'default': config.default_mixer_width() 23 | }, 24 | 'height': { 25 | 'type': 'int', 26 | 'default': config.default_mixer_height() 27 | }, 28 | 'audio_bitrate': { 29 | 'type': 'int', 30 | 'default': 128000 31 | }, 32 | 'container': { 33 | 'type': 'str', 34 | 'default': 'mpeg', 35 | 'permitted_values': { 36 | 'mpeg': 'MPEG', 37 | 'ogg': 'OGG' 38 | } 39 | } 40 | } 41 | 42 | def create_elements(self): 43 | ''' 44 | Create the elements needed whether this is audio, video, or both 45 | ''' 46 | mux_type = 'oggmux' if self.container == 'ogg' else 'mpegtsmux' 47 | video_encoder_type = 'theoraenc' if self.container == 'ogg' else 'x264enc' 48 | audio_encoder_type = 'vorbisenc' if self.container == 'ogg' else 'avenc_ac3' 49 | 50 | pipeline_string = 'queue name=queue ! tcpserversink name=sink' 51 | 52 | # We only want a mux if there's video: 53 | has_mux = config.enable_video 54 | if has_mux: 55 | pipeline_string = f'{mux_type} name=mux ! {pipeline_string}' 56 | 57 | if config.enable_video(): 58 | pipeline_string += ' ' + self._video_pipeline_start() + video_encoder_type + ' name=encoder ! queue ! mux.' 59 | 60 | if config.enable_audio(): 61 | audio_bitrate = self.audio_bitrate 62 | 63 | # Having default_audio_caps() in the pipeline stops them from changing and interrupting the encoder. 64 | audio_pipeline_string = ('interaudiosrc name=interaudiosrc ! ' + config.default_audio_caps() + 65 | ' ! audioconvert ! audioresample ! %s name=audio_encoder bitrate=%d') % \ 66 | (audio_encoder_type, audio_bitrate) 67 | if has_mux: 68 | audio_pipeline_string += f' ! queue ! mux.' 69 | else: 70 | audio_pipeline_string += ' ! queue.' 71 | 72 | pipeline_string = pipeline_string + ' ' + audio_pipeline_string 73 | 74 | self.create_pipeline_from_string(pipeline_string) 75 | 76 | if config.enable_video(): 77 | # pass 78 | if self.container == 'mpeg': 79 | # Testing has shown 60 (i.e. once every 2s at 30 fps) works best 80 | self.pipeline.get_by_name('encoder').set_property('key-int-max', 60) 81 | 82 | # tune=zerolatency reduces the delay of TCP output 83 | # self.pipeline.get_by_name('encoder').set_property('tune', 'zerolatency') 84 | 85 | if not hasattr(self, 'host'): 86 | self.host = socket.gethostbyname(socket.gethostname()) 87 | if not hasattr(self, 'port'): 88 | self.port = self._get_next_available_port() 89 | 90 | sink = self.pipeline.get_by_name('sink') 91 | sink.set_property('port', int(self.port)) 92 | sink.set_property('host', self.host) 93 | sink.set_property('recover-policy', 'keyframe') 94 | sink.set_property('sync', False) 95 | 96 | self.logger.info('TCP output created at tcp://%s:%s' % (self.host, self.port)) 97 | 98 | def _get_next_available_port(self): 99 | ports_in_use = self.get_ports_in_use() 100 | PORT_RANGE_START = 7000 101 | port = PORT_RANGE_START 102 | while True: 103 | if port not in ports_in_use: 104 | return port 105 | port += 1 106 | 107 | def get_ports_in_use(self): 108 | ports_in_use = [] 109 | for name, output in self.session().outputs.items(): 110 | if hasattr(self, 'port'): 111 | ports_in_use.append(int(output.port)) 112 | return ports_in_use 113 | 114 | def create_caps_string(self): 115 | # x264enc cannot accept RGB format, so we move to one that it does (I420) 116 | return super().create_caps_string(format='I420') 117 | -------------------------------------------------------------------------------- /brave/overlays/clock.py: -------------------------------------------------------------------------------- 1 | from brave.overlays.text import TextOverlay 2 | 3 | 4 | class ClockOverlay(TextOverlay): 5 | ''' 6 | For doing a text overlay (text graphic). 7 | ''' 8 | 9 | def create_elements(self): 10 | self.element = self.source.add_element('clockoverlay', self, audio_or_video='video') 11 | self.set_element_values_from_props() 12 | -------------------------------------------------------------------------------- /brave/overlays/effect.py: -------------------------------------------------------------------------------- 1 | from brave.overlays.overlay import Overlay 2 | from gi.repository import Gst 3 | 4 | 5 | class EffectOverlay(Overlay): 6 | ''' 7 | For doing applying a video effect. 8 | ''' 9 | 10 | def permitted_props(self): 11 | return { 12 | **super().permitted_props(), 13 | 'effect_name': { 14 | 'type': 'str', 15 | 'default': 'edgetv', 16 | 'permitted_values': { 17 | 'agingtv': 'AgingTV effect', 18 | 'burn': 'Burn', 19 | 'chromium': 'Chromium', 20 | 'dicetv': 'DiceTV effect', 21 | 'dilate': 'Dilate', 22 | 'dodge': 'Dodge', 23 | 'edgetv': 'EdgeTV effect', 24 | 'exclusion': 'Exclusion', 25 | 'optv': 'OpTV effect', 26 | 'radioactv': 'RadioacTV effect', 27 | 'revtv': 'RevTV effect', 28 | 'rippletv': 'RippleTV effect', 29 | 'solarize': 'Solarize', 30 | 'streaktv': 'StreakTV effect', 31 | 'vertigotv': 'VertigoTV effect', 32 | 'warptv': 'WarpTV effect' 33 | # Note: quarktv and shagadelictv are removed as they were unreliable in testing 34 | } 35 | }, 36 | 'visible': { 37 | 'type': 'bool', 38 | 'default': False 39 | } 40 | } 41 | 42 | def create_elements(self): 43 | # The effects filters can mess with the alpha channel. 44 | # The best solution I've found is to allow it to move into RGBx, then force a detour via RGB 45 | # to remove the alpha channel, before moving back to our default RGBA. 46 | # This is done in a 'bin' so that the overlay can be manipulated as one thing. 47 | desc = ('videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! ' 48 | 'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"') % self.effect_name 49 | self.element = Gst.parse_bin_from_description(desc, True) 50 | self.element.set_name('%s_bin' % self.uid) 51 | place_to_add_elements = getattr(self.source, 'final_video_tee').parent 52 | if not place_to_add_elements.add(self.element): 53 | self.logger.warning('Unable to add effect overlay bin to the source pipeline') 54 | -------------------------------------------------------------------------------- /brave/overlays/overlay.py: -------------------------------------------------------------------------------- 1 | from gi.repository import Gst 2 | from brave.inputoutputoverlay import InputOutputOverlay 3 | import brave.exceptions 4 | 5 | 6 | class Overlay(InputOutputOverlay): 7 | ''' 8 | An abstract superclass representing an overlay. 9 | ''' 10 | 11 | def __init__(self, **args): 12 | source_uid = None 13 | if 'source' in args: 14 | source_uid = args['source'] 15 | del args['source'] 16 | 17 | super().__init__(**args) 18 | self._set_source(source_uid) 19 | self.visible = self.visible 20 | 21 | def input_output_overlay_or_mixer(self): 22 | return 'overlay' 23 | 24 | def has_audio(self): 25 | return False # no such thing as audio on overlays 26 | 27 | def summarise(self, for_config_file=False): 28 | s = super().summarise(for_config_file) 29 | s['source'] = self.source.uid if self.source else None 30 | return s 31 | 32 | def update(self, updates): 33 | ''' 34 | Handle updates to this overlay. Overridden to handle update to the input/mixer source. 35 | ''' 36 | if 'source' in updates and self.source != updates['source']: 37 | self._set_source(updates['source']) 38 | self.report_update_to_user() 39 | del updates['source'] 40 | 41 | if 'visible' in updates: 42 | if not self.visible and updates['visible']: 43 | if not self.source: 44 | raise brave.exceptions.InvalidConfiguration( 45 | 'Cannot make overlay %d visible - source not set' % self.id) 46 | self.logger.debug('Becoming visible') 47 | self._make_visible() 48 | if self.visible and not updates['visible']: 49 | self.logger.debug('Becoming invisible') 50 | self._make_invisible() 51 | 52 | return super().update(updates) 53 | 54 | def _set_source(self, new_source_uid): 55 | ''' 56 | Called when a new source (input or mixer) is set by the user (either creation or update). 57 | ''' 58 | if not hasattr(self, 'source'): 59 | self.source = None 60 | 61 | # Special case - user specifying no source 62 | if new_source_uid is None: 63 | if self.source is None: 64 | return 65 | self._delete_elements() 66 | self.source = None 67 | return 68 | 69 | if hasattr(self, 'source') and self.source is not None and self.source.uid == new_source_uid: 70 | return 71 | 72 | # If overlay is visible, then it's attached. We must make it invisible first. 73 | visible = hasattr(self, 'visible') and self.visible 74 | self._delete_elements() 75 | 76 | self.source = self.session().uid_to_block(new_source_uid, error_if_not_exists=True) 77 | 78 | if self.source is not None: 79 | self.create_elements() 80 | if visible: 81 | self._make_visible() 82 | 83 | def handle_updated_props(self): 84 | ''' 85 | Called after the props have been set/updated, to update the elements 86 | ''' 87 | if self.source is None: 88 | return 89 | self.set_element_values_from_props() 90 | 91 | def set_element_values_from_props(self): 92 | pass 93 | 94 | def delete(self): 95 | ''' 96 | Delete this overlay. Works whether the overlay is visible or not. 97 | ''' 98 | self._delete_elements() 99 | self.collection.pop(self.id) 100 | self.session().report_deleted_item(self) 101 | 102 | def _delete_elements(self): 103 | if self.source is not None: 104 | self._make_invisible() 105 | self.element.set_state(Gst.State.NULL) 106 | if not self.element.parent.remove(self.element): 107 | self.logger.warning('Whilst deleting, unable to remove elements') 108 | 109 | def _make_visible(self): 110 | self.element.sync_state_with_parent() 111 | self.visible = True 112 | self.collection.ensure_overlays_are_correctly_connected(self.source) 113 | 114 | def _make_invisible(self): 115 | self.visible = False 116 | self.collection.ensure_overlays_are_correctly_connected(self.source) 117 | 118 | def ensure_src_pad_not_blocked(self): 119 | ''' 120 | When unlinking the source (output) pad, it is blocked. 121 | This unblocks it again, so should be called when it is reconnected. 122 | ''' 123 | if hasattr(self, 'src_block_probe'): 124 | src_pad = self.element.get_static_pad('src') 125 | src_pad.remove_probe(self.src_block_probe) 126 | delattr(self, 'src_block_probe') 127 | 128 | def get_sort_value(self): 129 | return self.id 130 | -------------------------------------------------------------------------------- /brave/overlays/text.py: -------------------------------------------------------------------------------- 1 | from brave.overlays.overlay import Overlay 2 | 3 | 4 | class TextOverlay(Overlay): 5 | ''' 6 | For doing a text overlay (text graphic). 7 | ''' 8 | 9 | def permitted_props(self): 10 | return { 11 | **super().permitted_props(), 12 | 'text': { 13 | 'type': 'str', 14 | 'default': 'Default text', 15 | }, 16 | 'font_size': { 17 | 'type': 'int', 18 | 'default': 18, 19 | }, 20 | 'valignment': { 21 | 'type': 'str', 22 | 'default': 'bottom', 23 | 'permitted_values': { 24 | 'top': 'Top', 25 | 'center': 'Center', 26 | 'bottom': 'Bottom', 27 | 'baseline': 'Baseline', 28 | }, 29 | }, 30 | 'halignment': { 31 | 'type': 'str', 32 | 'default': 'left', 33 | 'permitted_values': { 34 | 'left': 'Left', 35 | 'center': 'Center', 36 | 'right': 'Right', 37 | }, 38 | }, 39 | 'outline': { 40 | 'type': 'bool', 41 | 'default': False, 42 | }, 43 | 'shadow': { 44 | 'type': 'bool', 45 | 'default': True, 46 | }, 47 | 'shaded_background': { 48 | 'type': 'bool', 49 | 'default': False, 50 | }, 51 | 'visible': { 52 | 'type': 'bool', 53 | 'default': False, 54 | }, 55 | } 56 | 57 | def create_elements(self): 58 | self.element = self.source.add_element('textoverlay', self, audio_or_video='video') 59 | self.set_element_values_from_props() 60 | 61 | def set_element_values_from_props(self): 62 | self.element.set_property('text', self.text) 63 | self.element.set_property('valignment', self.valignment) 64 | self.element.set_property('halignment', self.halignment) 65 | self.element.set_property('font-desc', 'Sans, %d' % self.font_size) 66 | self.element.set_property('draw-outline', self.outline) 67 | self.element.set_property('draw-shadow', self.shadow) 68 | self.element.set_property('shaded-background', self.shaded_background) 69 | -------------------------------------------------------------------------------- /config/default.yaml: -------------------------------------------------------------------------------- 1 | # ## 2 | ## Video, audio, or both? Default is both 3 | ## 4 | enable_video: true 5 | enable_audio: true 6 | 7 | ## 8 | ## Default width and height (If ommitted, it will be 640,360) 9 | ## 10 | default_mixer_width: 1280 11 | default_mixer_height: 720 12 | 13 | ## 14 | ## DEFAULT INPUTS 15 | ## Here you can define inputs that should exist at startup. 16 | ## Types available: 'uri', 'image', 'test_video', 'test_audio' 17 | ## 18 | inputs: 19 | # - type: uri 20 | # uri: file:///path/to/file.mp4 21 | # - type: image 22 | # uri: https://upload.wikimedia.org/wikipedia/commons/e/eb/BBC.svg 23 | # - type: test_audio 24 | # freq: 200 25 | # - type: test_video 26 | # pattern: 18 27 | 28 | ## 29 | ## DEFAULT OUTPUTS 30 | ## Here you can define inputs that should exist at startup. 31 | ## Types available: 'rtmp', 'tcp, 'image', 'file', 'local' 32 | ## 33 | outputs: 34 | # - type: local 35 | # - type: file 36 | # location: /path/to/file.mp4 37 | # - type: image 38 | # - type: tcp 39 | # - type: rtmp 40 | # uri: rtmp://domain/path/name 41 | 42 | ## 43 | ## DEFAULT OVERLAYS 44 | ## Types available: 'text', 'clock', 'effect' 45 | ## 46 | overlays: 47 | # - type: text 48 | # text: 'I am some text' 49 | # visible: true 50 | # - type: clock 51 | # valignment: 'top' 52 | # visible: true 53 | # - type: effect 54 | # effect_name: 'warptv' 55 | # visible: true 56 | 57 | ## 58 | ## DEFAULT MIXERS 59 | ## No 'type' field requided for mixers. 60 | ## 'props' can have 'width' and 'height' and 'pattern' (all integers) 61 | ## 62 | mixers: 63 | # Default to one mixer, with no specific props: 64 | - {} 65 | # Add inputs to the mixer on start-up like this: 66 | # - sources: 67 | # - uid: input1 68 | # zorder: 2 69 | # width: 160 70 | # height: 90 71 | # - uid: input2 72 | # zorder: 1 73 | # volume: 0 74 | -------------------------------------------------------------------------------- /config/empty.yaml: -------------------------------------------------------------------------------- 1 | ## The simplest example of a config file is... nothing at all. 2 | ## No inputs, mixers, outputs or overlays - they can then be added later via the API. 3 | -------------------------------------------------------------------------------- /config/example_empty.yaml: -------------------------------------------------------------------------------- 1 | ## The simplest example of a config file is... nothing at all. 2 | ## No inputs or outputs - they can then be added later via the API. 3 | -------------------------------------------------------------------------------- /config/example_four_squares.yaml: -------------------------------------------------------------------------------- 1 | enable_audio: false 2 | 3 | inputs: 4 | - type: test_video 5 | pattern: 18 6 | width: 320 7 | height: 180 8 | - type: test_video 9 | pattern: 18 10 | width: 320 11 | height: 180 12 | - type: test_video 13 | pattern: 18 14 | width: 320 15 | height: 180 16 | - type: test_video 17 | pattern: 18 18 | width: 320 19 | height: 180 20 | 21 | outputs: 22 | - type: local 23 | 24 | mixers: 25 | - width: 640 26 | height: 360 27 | sources: 28 | - uid: input1 29 | width: 320 30 | height: 180 31 | - uid: input2 32 | xpos: 320 33 | width: 320 34 | height: 180 35 | - uid: input3 36 | ypos: 180 37 | width: 320 38 | height: 180 39 | - uid: input4 40 | xpos: 320 41 | ypos: 180 42 | width: 320 43 | height: 180 44 | -------------------------------------------------------------------------------- /config/example_test_sounds.yaml: -------------------------------------------------------------------------------- 1 | enable_video: false 2 | 3 | inputs: 4 | - type: test_audio 5 | props: 6 | freq: 200 7 | - type: test_audio 8 | props: 9 | freq: 400 10 | volume: 1.0 11 | - type: test_audio 12 | props: 13 | freq: 600 14 | volume: 0.2 15 | 16 | outputs: 17 | - type: local 18 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | # Microservices 4 | services: 5 | 6 | # Brave Server Configuration 7 | BRAVE: 8 | container_name: bitwave-brave 9 | image: bitwavetv/brave:latest 10 | 11 | ports: 12 | - "5000:5000" 13 | 14 | # Mount archives 15 | volumes: 16 | - ./config:/config 17 | #- /srv/brave/videos:/videos 18 | 19 | environment: 20 | - PORT=5000 21 | - GST_DEBUG=3 22 | - LOG_LEVEL=debug 23 | - FORCE_COLOR=1 24 | 25 | deploy: 26 | replicas: 1 27 | restart_policy: 28 | condition: any 29 | delay: 5s 30 | window: 10s 31 | -------------------------------------------------------------------------------- /docs/assets/arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/docs/assets/arch.png -------------------------------------------------------------------------------- /docs/assets/blocks_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/docs/assets/blocks_example.png -------------------------------------------------------------------------------- /docs/assets/brave-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/docs/assets/brave-screenshot.png -------------------------------------------------------------------------------- /docs/assets/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/docs/assets/screenshot.png -------------------------------------------------------------------------------- /docs/assets/video_wall.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/docs/assets/video_wall.png -------------------------------------------------------------------------------- /docs/building_webrender.md: -------------------------------------------------------------------------------- 1 | # WebRenderSrc (WIP) 2 | 3 | We also ship an experimental Cef render which can be given an URL and renders it to a video src. The intention to use this for HTML grahics 4 | 5 | ## Building 6 | The building tools require cmake which can be install with `brew install cmake`. To compile on MacOS place make sure you have xcode and xcode-cli tools install and available on the `$PATH` 7 | ``` 8 | cd gst-WebRenderSrc 9 | CC=clang CXX=clang++ cmake . 10 | make 11 | make install 12 | ``` 13 | 14 | ## Dependencies 15 | The make script will try and fetch and lovate as many of the required libs as possible before building. This package uses a precompiled CEF build make available with thanks to spotify. 16 | 17 | If you have an error on macosx64 on the lines of `ld: library not found for -lintl` This may be due to the compiler requring gettext. 18 | To install on MacOS use brew. 19 | 20 | ``` 21 | brew install gettext 22 | brew link --force gettext 23 | ``` 24 | 25 | This package also requires libcef to function correctly on win and linux it is realvilty simple to link to. On macOS cef was desgined to function with app bundling. 26 | 27 | To get around this issue in a _hacky_ way we will just move the cef frameworks to `/usr/local/Frameworks` and use the linking tool to change were the plugin can find the refrences using `install_name_tool`. 28 | 29 | TODO: Make this lees hacky or figure out how the bundling method works with .so objects 🤔 30 | 31 | ``` 32 | cp "/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" "/usr/local/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" 33 | 34 | install_name_tool -change "@rpath/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" "/usr/local/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" ./src/Release/libwebrendersrc.so 35 | `` 36 | -------------------------------------------------------------------------------- /docs/config_file.md: -------------------------------------------------------------------------------- 1 | # Brave config file 2 | [Brave](../README.md) can be configured by config file. 3 | This includes being able to set inputs, mixers, outputs and overlays that are created when Brave starts. It is an alternative to configuring Brave via the [API](api.md). 4 | 5 | Brave does not reconsider the config file after it has started. To configure Brave after it has started (e.g. to add another input), use the API. 6 | 7 | ## Contents 8 | 9 | - [Selecting a config file](#selecting-a-config-file) 10 | - [Default config file](#default-config-file) 11 | - [Creating a config file](#creating-a-config-file) 12 | + [Inputs](#inputs) 13 | + [Mixers](#mixers) 14 | + [Outputs](#outputs) 15 | + [Overlays](#overlays) 16 | + [Disabling audio or video](#disabling-audio-or-video) 17 | + [Video width and height](#video-width-and-height) 18 | + [STUN and TURN servers](#stun-and-turn-servers) 19 | 20 | 21 | 22 | ## Selecting a config file 23 | Provide Brave with the config file at startup with the `-c` parameter, e.g. 24 | 25 | ``` 26 | ./brave.py -c config/empty.yaml 27 | ``` 28 | 29 | ## Default config file 30 | The default config file can be found at `config/default.yaml`. 31 | 32 | It creates one mixer, and no inputs or outputs. 33 | 34 | ## Creating a config file 35 | Config files are written in [YAML](http://yaml.org/), and are simple to create by hand. 36 | 37 | The following options can be included in the config file. 38 | 39 | ### Inputs 40 | Use the `inputs` entry to provide an array of inputs that should be created when Brave starts. 41 | 42 | Example: 43 | 44 | ``` 45 | inputs: 46 | - type: test_video 47 | - type: uri 48 | state: PAUSED 49 | uri: rtmp://184.72.239.149/vod/BigBuckBunny_115k.mov 50 | - type: image 51 | uri: file:///home/user/images/image.jpg 52 | ``` 53 | 54 | Each input must have a type (e.g. `uri`). Inputs also have a range of other properties. For the full list, see the [inputs](inputs.md) page. 55 | 56 | 57 | ### Mixers 58 | Use the `mixers` entry to provide an array of inputs that should be created when Brave starts. If omitted, one mixer will automatically be created. 59 | 60 | 61 | Example: 62 | 63 | ``` 64 | mixers: 65 | - width: 640 66 | height: 360 67 | pattern: 6 68 | source: 69 | input1: {} 70 | ``` 71 | 72 | Unlike inputs, outputs and overlays, mixers do not have a type. The [mixers](mixers.md) page shows the properties that a mixer can have. 73 | 74 | ### Outputs 75 | Use `outputs` to define an array of outputs that should be created when Brave starts. 76 | 77 | Example (creating four outputs of different types): 78 | 79 | ``` 80 | outputs: 81 | - type: local 82 | state: READY 83 | input_id: 0 84 | source: mixer1 85 | - type: image 86 | source: input1 87 | - type: tcp 88 | state: READY 89 | source: mixer1 90 | - type: rtmp 91 | uri: rtmp://domain/path/name 92 | ``` 93 | 94 | Each output must have a type (either 'rtmp', 'tcp', 'image', 'file', 'local', or 'webrtc'). Outputs also have a range of other properties. For the full list, see the [outputs](outputs.md) page. 95 | 96 | ### Overlays 97 | `overlays` is an array of overlays that should be created when Brave starts. 98 | 99 | Example: 100 | 101 | ``` 102 | overlays: 103 | - type: text 104 | text: 'I am some text' 105 | visible: true 106 | source: mixer1 107 | - type: effect 108 | effect_name: warptv 109 | ``` 110 | 111 | Each overlay must have a type (either 'text', 'clock', or 'effect'). 112 | Overlays also have a range of other properties. For the full list, see the [overlays](overlays.md) page. 113 | 114 | ### Disabling audio or video 115 | By default Brave handles video and audio. To disable audio, add the line: 116 | 117 | ``` 118 | enable_audio: false 119 | ``` 120 | 121 | To disable video, add the line: 122 | 123 | ``` 124 | enable_video: false 125 | ``` 126 | 127 | Note that audio and video cannot be enabled/disabled via the API. 128 | 129 | ### Video width and height 130 | The `default_mixer_height` and `default_mixer_width` values allow you to set the default width and height for a mixer. 131 | The default is a width of 640 and a height of 360. 132 | 133 | ### STUN and TURN servers 134 | Up to one STUN server and/or one TURN server can be provided. Use the `stun_server` and `turn_server` fields. 135 | 136 | Example: 137 | 138 | ``` 139 | stun_server: stun.l.google.com:19302 140 | turn_server: my_name:my_password@my_turn_server_hostname 141 | ``` 142 | -------------------------------------------------------------------------------- /docs/faq.md: -------------------------------------------------------------------------------- 1 | # Frequently Asked Questions 2 | 3 | ## What is Brave? 4 | [Brave](../README.md) is a wrapper around parts of [GStreamer](http://gstreamer.freedesktop.org/) that allows live audio and video streams to be handled via a RESTful API. Content can be received, mixed, monitored, and sent to other destinations. It allows multiple 'blocks' - inputs, outputs, mixers and overlays - to be created and connected. Brave is designed to work remotely, such as on the cloud. 5 | 6 | ## What do the four states (NULL, READY, PAUSED, PLAYING) mean? 7 | Inputs, mixers and outputs are always in one of four states. These states are the same as [GStreamer's states](https://gstreamer.freedesktop.org/documentation/design/states.html). 8 | 9 | | Name | Meaning | 10 | | ---- | ------- | 11 | | `NULL` | Block is not initialised, or has had an error. | 12 | | `READY` | Block it not currently playing. | 13 | | `PAUSED` | Block has connected and the content is paused. | 14 | | `PLAYING` | Block is successfully playing content. | 15 | 16 | 17 | ## Can support be added for another input or output type/protocol? 18 | Because Brave is based on GStreamer, it can only support what GStreamer supports. It cannot act as an RTMP server, for example, because there is no GStreamer element that can do that. 19 | 20 | Where GStreamer plugins do exist, adding them as a new form of input or output should be relatively easy. Start with an existing one (in the `brave/inputs` and `brave/outputs` directories) and clone it. Then please raise as a pull request. Or you could [request it by raising an issue](https://github.com/bbc/brave/issues). 21 | 22 | ## How do I... ? 23 | See the [How-To](howto.md) page. 24 | 25 | ## I've found a bug 26 | Please [raise an issue in GitHub](https://github.com/bbc/brave/issues). 27 | 28 | ## Can I contribute? 29 | Yes, pull requests are welcome. 30 | Please ensure the tests are passing and the flake8 linting is returning success first. (Information on these can be found in the [README](../README.md)). 31 | -------------------------------------------------------------------------------- /docs/howto.md: -------------------------------------------------------------------------------- 1 | # Brave How-To Guide 2 | This is a how-to guide for some common use-cases of [Brave](../README.md). 3 | See also the [FAQ](faq.md), as well as documentation on the [config file](config_file.md) and [API](api.md). 4 | 5 | ## How to use Brave as an audio mixer 6 | Brave can be set to handle just audio, and no video. To do so, create a config file containing `enable_video: false`. Then start Brave providing it, for example: 7 | 8 | ``` 9 | echo 'enable_video: false' > /tmp/no_video.yaml 10 | ./brave.py -c /tmp/no_video.yaml 11 | ``` 12 | 13 | Here is a richer config file example, setting audio-only and two MP3 inputs: 14 | 15 | ``` 16 | enable_video: false 17 | 18 | inputs: 19 | - type: uri 20 | uri: "file:///path/to/music1.mp3" 21 | loop: true 22 | - type: uri 23 | uri: "file:///path/to/music2.mp3" 24 | loop: true 25 | 26 | mixers: 27 | - sources: 28 | - uid: input1 29 | 30 | outputs: 31 | - type: local 32 | source: mixer1 33 | ``` 34 | 35 | The mixer has the first input as its source. To switch to the other source, use either the web interface, or the API, e.g. 36 | 37 | ``` 38 | # Switch to input 2: 39 | curl -X POST -d '{"source": "input2"}' http://localhost:5000/api/mixers/1/cut_to_source 40 | ``` 41 | 42 | Mixing two inputs together can be done with `overlay_source` rather than `cut_to_source`. 43 | 44 | Seeking to a certain position in (non-live) audio can be done with the `position` property. Multiply the number of seconds position by 1000000000. For example, this will seek to 60 seconds: 45 | 46 | ``` 47 | # Move input 1 to 60 seconds: 48 | curl -X POST -d '{"position": 60000000000}' http://localhost:5000/inputs/1 49 | ``` 50 | 51 | Adding and removing inputs can also be done via the web interface or API. 52 | 53 | ## How to make a video wall 54 | ![Video wall](assets/video_wall.png "Video wall") 55 | 56 | A video wall (multiple videos side by side) can be created by resizing video and then declaring where it should appear on the mix. 57 | 58 | This can be done as at start-up as a config file, or via the API. 59 | 60 | An example config file, to make the image above, can be found in the `config` directory as [video_wall.yaml](../config/video_wall.yaml). 61 | 62 | ## How to connect a separate GStreamer pipeline to Brave 63 | The best method to output a video (either with or without audio) from GStreamer to Brave is using the TCP protocol. Use the [`tcpserversink`](https://developer.gnome.org/gst-plugins-libs/stable/gst-plugins-base-plugins-tcpserversink.html) element to act as TCP server; which Brave can then connect to. 64 | 65 | The GStreamer process can be running on the same server as Brave, or a different one that has good network connectivity. 66 | 67 | Here is an example (a moving ball image and an audio test sound): 68 | 69 | ``` 70 | gst-launch-1.0 \ 71 | videotestsrc pattern=ball ! video/x-raw,width=640,height=360 ! \ 72 | x264enc ! muxer. \ 73 | audiotestsrc ! avenc_ac3 ! muxer. \ 74 | mpegtsmux name=muxer ! queue ! \ 75 | tcpserversink host=0.0.0.0 port=13000 recover-policy=keyframe sync-method=latest-keyframe sync=false 76 | ``` 77 | 78 | To connect Brave to this, create an input of type `tcp_client`. This can be done in the start-up config file, or by REST API, or by the web client. For example, to create an input using the REST API, as a Curl command: 79 | 80 | ``` 81 | curl -X PUT -d '{"type": "tcp_client", "host": "0.0.0.0", "port":13001}' http://localhost:5000/api/inputs 82 | ``` 83 | 84 | Not that this input type assumes the content is delivered as an *MPEG* container. Support for an *Ogg* container is also possible by setting the parameter `container` to `ogg`. 85 | 86 | ## How to output Brave to a separate GStreamer pipeline 87 | Like above, a TCP connection works well for this, both on the same box and on remote boxes (with good network connections). 88 | 89 | First, create a `TCP` output in Brave. This creates a TCP Server from which other GStreamers can connect as clients. You can do this in the config file, or GUI, or as an API call on the command line like this: 90 | 91 | ``` 92 | curl -X PUT -d '{"type": "tcp", "source": "mixer1", "port": 13000}' http://localhost:5000/api/outputs 93 | ``` 94 | 95 | Then, create a GStreamer pipeline that listens to that port. For example, this one will play it locally (audio & video): 96 | 97 | ``` 98 | gst-launch-1.0 tcpclientsrc host=0.0.0.0 port=13000 ! tsdemux name=demux ! queue2 max-size-time=3000000000 ! decodebin ! autovideosink demux. ! queue2 max-size-time=3000000000 ! decodebin ! autoaudiosink 99 | ``` 100 | 101 | (The large `max-size-time` values help accomodate the key-frame interval.) 102 | -------------------------------------------------------------------------------- /docs/install_kvs.md: -------------------------------------------------------------------------------- 1 | # Installing the 'kvssink' plugin to enable outputting to Kinesis Video Streams 2 | This guide explains how to install [Amazon's Kinesis Video element](https://aws.amazon.com/kinesis/video-streams/) for GStreamer, called 'kvssink'. Doing so allows [Brave](../README.md) to output to a Kinesis video stream. 3 | 4 | These instructions are for Ubuntu 18.10. It should be possible to do something similar for MacOS and CentOS. _(Pull requests to add this to the documentation are very welcome!)_ 5 | 6 | It is assumed that Brave and its dependencies are already installed. [Click here for the guide to installing Brave on Ubuntu.](./install_ubuntu.md) 7 | 8 | _NOTE: The current implementation only sends video, not audio, to Kinesis Video._ 9 | 10 | ## STEP 1: Dependencies 11 | We need some additional dependencies in order to compile Kinesis Video: 12 | 13 | ``` 14 | sudo apt-get install libssl-dev libgirepository1.0-dev liblog4cplus-dev libglib2.0-dev libgstreamer-plugins-bad1.0-dev libssl1.1 yasm libltdl7 libfl2 bison cmake libcurl4-openssl-dev libgtest-dev 15 | ``` 16 | 17 | ## STEP 2: Build Kinesis Video 18 | These instructions don't use the `install-script` that Amazon provides because it compiles its own GStreamer, whereas we want to use the one already installed. 19 | 20 | ``` 21 | git clone https://github.com/awslabs/amazon-kinesis-video-streams-producer-sdk-cpp 22 | cd amazon-kinesis-video-streams-producer-sdk-cpp/kinesis-video-native-build 23 | 24 | PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig/" cmake CMakeLists.txt 25 | make 26 | ``` 27 | 28 | Test what's been made with: 29 | 30 | ``` 31 | gst-inspect-1.0 ./libgstkvssink.so 32 | ``` 33 | 34 | Check that there are no errors. 35 | 36 | Then, manually install the two files into GStreamer's standard location with: 37 | 38 | ``` 39 | sudo cp libgstkvssink.so libproducer.so /usr/lib/x86_64-linux-gnu/gstreamer-1.0/ 40 | ``` 41 | 42 | Now, we can test GStreamer's acceptance of the element with: 43 | 44 | ``` 45 | gst-inspect-1.0 kvssink 46 | ``` 47 | 48 | If that works without error, then we are done! 49 | 50 | ## STEP 3: Use Brave to output to Kinesis Video 51 | 52 | First, [create a Kinesis Video Stream](https://us-west-2.console.aws.amazon.com/kinesisvideo/streams). 53 | 54 | You can instruct Brave to output the stream either by adding it to Brave's config file, or via the API (or web GUI). You can add any number of streams, subject to the capacity of your server (and the size of your Amazon bill!) 55 | 56 | Here is an example config file to set up one stream: 57 | 58 | ``` 59 | outputs: 60 | - type: kvs 61 | stream_name: 'name-of-your-stream' 62 | ``` 63 | 64 | Ensure the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables are set. This is the same as [used by the AWS command-line tool](https://docs.aws.amazon.com/cli/latest/userguide/cli-environment.html). 65 | 66 | For example, if the above config file was written to `kvs.yaml`, then it could be invocated with: 67 | 68 | ``` 69 | AWS_ACCESS_KEY_ID="XXX" AWS_SECRET_ACCESS_KEY="YYY" ./brave.py -c kvs.yaml 70 | ``` 71 | 72 | Happy streaming! 73 | -------------------------------------------------------------------------------- /docs/install_macos.md: -------------------------------------------------------------------------------- 1 | # Installing on macOS 2 | 3 | This explains how to install Brave on macOS. 4 | 5 | First up, make sure you have Homebrew. 6 | 7 | ## Installing Dependancies 8 | 9 | Brave uses from of the newer features of Python. As such we recomend python 3.6 (or higher). 10 | 11 | ### Managing Dependencies 12 | 13 | Brave uses [Pipeenv](https://packaging.python.org/tutorials/managing-dependencies/#managing-dependencies) to manage an isolate its dependencies 14 | 15 | If not installed please install using: 16 | 17 | `pip install --user pipenv` or `pip3 install --user pipenv` 18 | 19 | If your python was installed by brew please use `brew install pipenv` 20 | 21 | `pipenv install` 22 | 23 | ### Errors while installing 24 | 25 | Brave uses python-gst which requires the uses of GI. This can be a little tricky to working on OSX with a virtual enviroment. To get around this we can use vext. It requires the libary `libffi`. 26 | 27 | Install the libary 28 | `brew install libffi` 29 | 30 | Add the location to the env. 31 | `export PKG_CONFIG_PATH="/usr/local/opt/libffi/lib/pkgconfig"` 32 | 33 | Try running the install process again. 34 | `pipenv install` 35 | 36 | ## Installing gstreamer 37 | 38 | First up, dependencies: 39 | 40 | ``` 41 | brew install libnice openssl librsvg libvpx srtp 42 | ``` 43 | 44 | Now on to gsteamer and all of its bits. 45 | Note, versions earlier than 1.14.2 won't work (The .2 point-release fixed WebRTC.) 46 | 47 | ``` 48 | brew install gstreamer 49 | brew install gst-plugins-base 50 | brew install gst-plugins-good 51 | brew install gst-plugins-bad 52 | brew install gst-plugins-ugly 53 | brew install gst-libav gst-python 54 | ``` 55 | 56 | ## Changes to brew gstreamer packages 57 | Depending on you brew version some of the options listed above may not be present. In order to add some of these missing dependencies back we will need to edit the different formula used to build the gstreamer libs. When brave trys to run it will look for the missing the dependencies, and print them out. 58 | 59 | ### Plugins Ugly 60 | ``` 61 | brew edit gst-plugins-bad 62 | ``` 63 | Add the following under `depends_on "orc"` 64 | ``` 65 | depends_on "libnice" => :recommended 66 | depends_on "rtmpdump" => :recommended 67 | depends_on "srtp" => :recommended 68 | ``` 69 | 70 | The rebuild/install the packages from source. `brew reinstall --build-from-source gst-plugins-bad` 71 | 72 | ## All done! 73 | 74 | Try it out 75 | 76 | `pipenv run python3 brave.py` 77 | -------------------------------------------------------------------------------- /docs/install_ubuntu.md: -------------------------------------------------------------------------------- 1 | # Installing on Ubuntu 2 | This is instructions on how to install [Brave](../README.md), and its dependencies, on Ubuntu. 3 | 4 | ## Ubuntu version 5 | You must use at least version 18.10, to access a recent version of GStreamer (1.14.3 or later) via Apt. 6 | 7 | ## STEP 1: Dependencies 8 | Use `apt-get` to install the bulk of the dependencies: 9 | 10 | ``` 11 | sudo apt-get update 12 | sudo apt-get install -y \ 13 | gcc \ 14 | gir1.2-gst-plugins-bad-1.0 \ 15 | git \ 16 | gobject-introspection \ 17 | gstreamer1.0-libav \ 18 | gstreamer1.0-nice \ 19 | gstreamer1.0-plugins-bad \ 20 | gstreamer1.0-plugins-base \ 21 | gstreamer1.0-plugins-good \ 22 | gstreamer1.0-plugins-ugly \ 23 | gstreamer1.0-tools \ 24 | libcairo2-dev \ 25 | libcairo2-dev \ 26 | libgirepository1.0-dev \ 27 | pkg-config \ 28 | python-gst-1.0 \ 29 | python3-dev \ 30 | python3-pip 31 | ``` 32 | 33 | ### STEP 2: Python dependencies 34 | Brave uses [Pipenv](https://packaging.python.org/tutorials/managing-dependencies/#managing-dependencies) to manage an isolate its dependencies. 35 | 36 | Install with: 37 | 38 | ``` 39 | pip3 install --user pipenv 40 | export PATH="${HOME}/.local/bin:$PATH" 41 | pipenv install 42 | ``` 43 | 44 | If you're having difficulties, try this reference: https://xkcd.com/1987/. Gotta love Python. 45 | 46 | 47 | ## STEP 3: Installing Brave 48 | Simply clone Brave from GitHub, and try running it: 49 | 50 | ``` 51 | git clone https://github.com/bbc/brave.git 52 | cd brave 53 | pipenv install 54 | pipenv run ./brave.py 55 | ``` 56 | 57 | That's it! 58 | 59 | ## Optional: Support fort AWS Kinesis Video 60 | 61 | If you wish to output to a Kinesis Video Stream, there are [separate instructions](./install_kvs.md) on installing the necessary GStreamer element. 62 | -------------------------------------------------------------------------------- /docs/mixers.md: -------------------------------------------------------------------------------- 1 | # Mixers 2 | Mixers allow video and audio to be switched and mixed together. 3 | 4 | A mixer can have any number of *sources*. Sources can be either [inputs](inputs.md) or the output from other mixers. 5 | 6 | [Outputs](outputs.md) can then take a mixer as a source, to deliver its mix elsewhere (e.g. as an RTMP stream, or writing to a file). 7 | 8 | Mixers can also have [overlays](overlays.md) applied to them. 9 | 10 | There can be any number of mixers. They can be created, updated, and deleted using the [API](api.md). They can also be created at start-up using a [config file](config_file.md). 11 | 12 | All together, this allows mixers to be interconnected in interesting ways, for example: 13 | 14 | ![Example of connected blocks](assets/blocks_example.png "Example of connected blocks") 15 | 16 | ## Properties 17 | There is only one type of mixer, and it has the following properties: 18 | 19 | | Name | Can be set initially? | Can be updated?? | Description | Default value (if not set) | 20 | | ---- | --------------------- | ---------------- | ----------- | -------------------------- | 21 | | `id` | No | No | ID of the mixer. Positive integer. Starts at 1 and increases by 1 for each new mixer. | n/a | 22 | | `uid` | No | No | Unqiue ID - a string in the format 'mixerX' where X is the ID | n/a | 23 | | `state` | Yes | Yes | Either `NULL`, `READY`, `PAUSED` or `PLAYING`. [_What are the four states?_](faq.md#what-are-the-four-states) | `PLAYING` | 24 | | `sources` | Yes | Yes (both directly and also via helper API methods `cut_to_source` and `overlay_source`) | An array of inputs and mixers that are the source of this mixer. See below for more. | None | 25 | | `pattern` | Yes | Yes | The pattern used for the background, as an integer. See the [test video](inputs.md#test-video) input type for the list of available patterns. | 0 (SMPTE 100% color bars) | 26 | | `width` and `height` | Yes | Yes | Override of the width and height | The values of `default_mixer_width` and `default_mixer_height` in the [config file](config_file.md). | 27 | 28 | ### `sources` property 29 | 30 | The `sources` properly is an array of dictionaries, for each source that the mixer is currently including. An empty array shows that the mixer has no source. The order of the array has no meaning. 31 | 32 | Properties of each source: 33 | 34 | | Name | Description | For audio or video sources? | Default | 35 | | ---- | ----------- | --------------------------- | ------- | 36 | | `uid` | the unique ID of the source (e.g. `input1` or `mixer2`) | Both | n/a (required) | 37 | | `in_mix` | `true` iff source is overlayed into mix | Both | `true` | 38 | | `zorder` | The z-order (aka z-index) of the video. Sources with a higher z-order will appear over the sources with a lower z-order. | Video | `1` | 39 | | `width` and `height` | The size of the video. It is useful to set these for 'picture in picture' or 'video wall' use-cases. | Video | If omitted, defaults to the full size of the mixer. | 40 | | `xpos` and `ypos` | The location of the video. | Video | Defaults to 0,0 (i.e. the top-left corner). | 41 | | `volume` | The volume that the input should be mixed at, between 0 (silent) and 1.0 (full volume). | Audio | `1.0` | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs/overlays.md: -------------------------------------------------------------------------------- 1 | # Overlays 2 | Overlays allow a graphic or effect to be placed on top of the video. They have no effect on audio. 3 | 4 | Overlays can be added to any [input](inputs.md) or [mixer](mixers.md). This is defined when creating or updating the overlay, by setting the `source` to e.g. `input1` or `mixer2`. Multiple overlays can be added to each input or mixer. 5 | 6 | Overlays can be created, updated, and deleted using the [API](api.md). They can also be created at start-up using a [config file](config_file.md). 7 | 8 | ## Common properties 9 | 10 | All types of overlays have the following properties: 11 | 12 | | Name | Can be set initially? | Can be updated?? | Description | Default value (if not set) | 13 | | ---- | --------------------- | ---------------- | ----------- | -------------------------- | 14 | | `id` | No | No | ID of the overlay. Positive integer. Starts at 1 and increases by 1 for each new overlay. | n/a | 15 | | `uid` | No | No | Unqiue ID - a string in the format 'overlayX' where X is the ID | n/a | 16 | | `type` | Yes | No | The name of the overlay type, e.g. `text`. | N/A - **REQUIRED** | 17 | | `visible` | Yes | Yes | Boolean. Whether the effect is visible on the video. | False | 18 | | `source` | Yes | Yes (but only in the NULL or READY states) | The `uid` of the input or mixer that the overlay is overlaying. e.g. `mixer1` | The mixer with the lowest ID (usually `mixer1`) | 19 | 20 | ## Overlay types 21 | Brave currently supports these overlay types: 22 | 23 | - [text](#text) 24 | - [clock](#clock) 25 | - [effect](#effect) 26 | 27 | ### text 28 | Shows text on the screen. 29 | 30 | ### Additional properties 31 | In addition to the common properties defined above, this overlay also has: 32 | 33 | | Name | Can be set initially? | Can be updated?? | Description | Default value (if not set) | 34 | | ---- | --------------------- | ---------------- | ----------- | -------------------------- | 35 | | `text` | Yes | Yes | The text to display. | Empty string | 36 | | `valignment` | Yes | Yes | The vertical alignment of the text. Can be `top`, `center` or `bottom`. | Empty string | 37 | 38 | 39 | ### clock 40 | The `clock` overlay shows the current time, and also any other text provided in the `text` property. It is a useful overlay to determine if there is any delay in the video. 41 | 42 | This overlay shares the same properties as the `text` overlay. 43 | 44 | 45 | ### effect 46 | The `effect` overlay allows a range of video transformation effects to be applied. 47 | 48 | This overlay has one additional property - `effect_name` - which can be set to one of the following values: 49 | 50 | * `agingtv`: AgingTV (adds age to video input using scratches and dust) 51 | * `burn`: Burn (adjusts the colors in the video) 52 | * `chromium`: Chromium (breaks the colors of the video) 53 | * `dicetv`: DiceTV (\'Dices\' the screen up into many small squares) 54 | * `dilate`: Dilate (copies the brightest pixel around) 55 | * `dodge`: Dodge (saturates the colors in the video) 56 | * `edgetv`: EdgeTV effect 57 | * `exclusion`: Exclusion (exclodes the colors in the video) 58 | * `optv`: OpTV (Optical art meets real-time video) 59 | * `radioactv`: RadioacTV (motion-enlightment) 60 | * `revtv`: RevTV (A video waveform monitor for each line of video) 61 | * `rippletv`: RippleTV (ripple mark effect on the video) 62 | * `solarize`: Solarize (tunable inverse in the video) 63 | * `streaktv`: StreakTV (makes after images of moving objects) 64 | * `vertigotv`: VertigoTV (blending effector with rotating and scaling) 65 | * `warptv`: WarpTV (goo\'ing of the video) 66 | 67 | If omitted, the default is `edgetv`. 68 | -------------------------------------------------------------------------------- /docs/plugins_used.md: -------------------------------------------------------------------------------- 1 | # GStreamer elements used by Brave 2 | 3 | | Element | Module | Where is it used? | Notes | 4 | | ----- | ------- | ------------ | ---- | 5 | | x264 (encoding) | Ugly | RTMP Output | 6 | | Lame (MP3 encoder) | Ugly | NOWHERE! | 7 | | LibMPEG2 | Ugly | TCP Output - consider removing support for? | 8 | | MPEG Audio Decoder (MAD) | Ugly | Indirectly in the URI input, by `playbin`, for MP3 decode | 9 | | Theora | Ugly | TCP output (as a video encoder, can be swapped for x264) | 10 | | RTMP | Bad | RTMP output, URI Input | This implentation uses AAC audio encoding and h264 video encoding. | 11 | | AAC | Bad | RTMP Output | No licenses or payments are required for a user to stream or distribute content in AAC format (Source: [https://en.wikipedia.org/wiki/Advanced_Audio_Coding](Wikipedia)) | 12 | | SRT | Bad | | 13 | | Nice | Bad | | 14 | | Ogg (audio encoder) | Base | TCP Output | Described as free and open at https://xiph.org/ | 15 | | Vorbis (audio encoder) | Base | TCP output (as an alternative to AC3) | Described as free and open at https://xiph.org/ | 16 | 17 | 18 | -------------------------------------------------------------------------------- /gst-WebRenderSrc/.gitignore: -------------------------------------------------------------------------------- 1 | src/subprocess 2 | aclocal.m4 3 | autom4te.cache 4 | autoregen.sh 5 | config.* 6 | configure 7 | libtool 8 | INSTALL 9 | Makefile.in 10 | Makefile 11 | depcomp 12 | install-sh 13 | ltmain.sh 14 | missing 15 | stamp-* 16 | my-plugin-*.tar.* 17 | compile 18 | .vscode 19 | third_party 20 | 21 | build/ 22 | CMakeFiles/ 23 | GPUCache/ 24 | package/ 25 | *~ 26 | 27 | aclocal.m4 28 | autom4te.cache 29 | build 30 | config.h 31 | config.h.in 32 | config.log 33 | config.status 34 | config.guess 35 | config.sub 36 | config.rpath 37 | configure 38 | gstreamer-libs-uninstalled*.pc 39 | gstreamer-libs*.pc 40 | gstreamer-play-uninstalled*.pc 41 | gstreamer-play*.pc 42 | libtool 43 | stamp-h 44 | stamp-h.in 45 | stamp-h1 46 | gst-element-check-*.m4 47 | ltmain.sh 48 | missing 49 | mkinstalldirs 50 | compile 51 | install-sh 52 | depcomp 53 | autoregen.sh 54 | ABOUT-NLS 55 | /INSTALL 56 | _stdint.h 57 | gst-plugins-bad-*.tar.* 58 | .vs 59 | 60 | .deps 61 | .idea 62 | .libs 63 | .dirstamp 64 | *.lo 65 | *.la 66 | *.so 67 | *.o 68 | *.pyc 69 | *~ 70 | /m4 71 | Makefile.in 72 | Makefile 73 | *.gir 74 | *.typelib 75 | *.gc?? 76 | 77 | tmp-orc.c 78 | gst*orc.h 79 | 80 | 81 | Build 82 | *.user 83 | *.suo 84 | *.ipch 85 | *.sdf 86 | *.opensdf 87 | *.DS_Store 88 | 89 | *.log 90 | *.trs 91 | 92 | *.gypcmd 93 | *.mk 94 | *.ncb 95 | *.opensdf 96 | *.props 97 | *.pyc 98 | *.rules 99 | *.sdf 100 | *.sln 101 | *.sublime-project 102 | *.sublime-workspace 103 | *.suo 104 | *.targets 105 | *.user 106 | *.vcproj 107 | *.vcxproj 108 | *.vcxproj.filters 109 | *.vpj 110 | *.vpw 111 | *.vpwhistu 112 | *.vtg 113 | *.xcodeproj 114 | *.xcworkspace 115 | *_proto.xml 116 | *_proto_cpp.xml 117 | *~ 118 | 119 | # CEF build directory 120 | /build 121 | /src/Release 122 | # CEF download directory 123 | /third_party/cef/ 124 | libcef_dll_wrapper/ 125 | 126 | cmake_install.cmake 127 | CMakeCache.txt 128 | 129 | install_manifest.txt -------------------------------------------------------------------------------- /gst-WebRenderSrc/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016 The Chromium Embedded Framework Authors. All rights 2 | # reserved. Use of this source code is governed by a BSD-style license that 3 | # can be found in the LICENSE file. 4 | 5 | # See the included README.md file for usage instructions. 6 | 7 | cmake_minimum_required(VERSION 2.8.12.1) 8 | 9 | # Only generate Debug and Release configuration types. 10 | set(CMAKE_CONFIGURATION_TYPES Release Debug) 11 | 12 | # Project name. 13 | project(gst-WebRenderSrc) 14 | 15 | # Use folders in the resulting project files. 16 | set_property(GLOBAL PROPERTY OS_FOLDERS ON) 17 | set(GST_INSTALL_BASE "${CMAKE_SOURCE_DIR}/third_party" CACHE INTERNAL "Path to the GStreamer install base") 18 | 19 | # Specify the CEF distribution version. 20 | set(CEF_VERSION "3.3497.1836.gb472a8d") 21 | 22 | #Need a build of Gstreamer with WebRTC fixes 23 | set(GST_REQUIRED 1.14.3) 24 | set(GLIB_REQUIRED 2.58.1) 25 | 26 | # Determine the platform. 27 | if("${CMAKE_SYSTEM_NAME}" STREQUAL "Darwin") 28 | set(CEF_PLATFORM "macosx64") 29 | elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") 30 | if(CMAKE_SIZEOF_VOID_P MATCHES 8) 31 | set(CEF_PLATFORM "linux64") 32 | else() 33 | set(CEF_PLATFORM "linux32") 34 | endif() 35 | elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Windows") 36 | if(CMAKE_SIZEOF_VOID_P MATCHES 8) 37 | set(CEF_PLATFORM "windows64") 38 | else() 39 | set(CEF_PLATFORM "windows32") 40 | endif() 41 | endif() 42 | 43 | set(PROJECT_ARCH "x86_64") 44 | set(CMAKE_BUILD_TYPE "Release") 45 | 46 | include(GNUInstallDirs) 47 | 48 | # Add this project's cmake/ directory to the module path. 49 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") 50 | 51 | # Download and extract the CEF binary distribution (executes DownloadCEF.cmake). 52 | include(DownloadCEF) 53 | include(FindPkgConfig) 54 | DownloadCEF("${CEF_PLATFORM}" "${CEF_VERSION}" "${CMAKE_SOURCE_DIR}/third_party/cef") 55 | 56 | # Add the CEF binary distribution's cmake/ directory to the module path. 57 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CEF_ROOT}/cmake") 58 | 59 | # Load the CEF configuration (executes FindCEF.cmake). 60 | find_package(CEF REQUIRED) 61 | 62 | # find GStreamer packages 63 | pkg_check_modules(GSTREAMER REQUIRED gstreamer-1.0>=${GST_REQUIRED}) 64 | pkg_check_modules(GSTREAMER_BASE REQUIRED gstreamer-base-1.0>=${GST_REQUIRED}) 65 | pkg_check_modules(GSTREAMER_VIDEO REQUIRED gstreamer-video-1.0>=${GST_REQUIRED}) 66 | 67 | #Find getText 68 | find_package(Gettext REQUIRED) 69 | 70 | set(CMAKE_INSTALL_GST_PLUGINS_DIR ${CMAKE_INSTALL_LIBDIR}/gstreamer-1.0) 71 | 72 | # Include the libcef_dll_wrapper target (executes libcef_dll/CMakeLists.txt). 73 | add_subdirectory(${CEF_LIBCEF_DLL_WRAPPER_PATH} libcef_dll_wrapper) 74 | 75 | include_directories(${CMAKE_CURRENT_BINARY_DIR}) 76 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) 77 | 78 | message(STATUS "Compiling : ${PROJECT_NAME}-${PROJECT_VERSION}") 79 | 80 | add_subdirectory(src) 81 | -------------------------------------------------------------------------------- /gst-WebRenderSrc/cmake/DownloadCEF.cmake: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016 The Chromium Embedded Framework Authors. All rights 2 | # reserved. Use of this source code is governed by a BSD-style license that 3 | # can be found in the LICENSE file. 4 | 5 | # Download the CEF binary distribution for |platform| and |version| to 6 | # |download_dir|. The |CEF_ROOT| variable will be set in global scope pointing 7 | # to the extracted location. 8 | # Visit http://opensource.spotify.com/cefbuilds/index.html for the list of 9 | # supported platforms and versions. 10 | 11 | function(DownloadCEF platform version download_dir) 12 | # Specify the binary distribution type and download directory. 13 | set(CEF_DISTRIBUTION "cef_binary_${version}_${platform}") 14 | set(CEF_DOWNLOAD_DIR "${download_dir}") 15 | 16 | # The location where we expect the extracted binary distribution. 17 | set(CEF_ROOT "${CEF_DOWNLOAD_DIR}/${CEF_DISTRIBUTION}" CACHE INTERNAL "CEF_ROOT") 18 | 19 | # Download and/or extract the binary distribution if necessary. 20 | if(NOT IS_DIRECTORY "${CEF_ROOT}") 21 | set(CEF_DOWNLOAD_FILENAME "${CEF_DISTRIBUTION}.tar.bz2") 22 | set(CEF_DOWNLOAD_PATH "${CEF_DOWNLOAD_DIR}/${CEF_DOWNLOAD_FILENAME}") 23 | if(NOT EXISTS "${CEF_DOWNLOAD_PATH}") 24 | set(CEF_DOWNLOAD_URL "http://opensource.spotify.com/cefbuilds/${CEF_DOWNLOAD_FILENAME}") 25 | 26 | # Download the SHA1 hash for the binary distribution. 27 | message(STATUS "Downloading ${CEF_DOWNLOAD_PATH}.sha1...") 28 | file(DOWNLOAD "${CEF_DOWNLOAD_URL}.sha1" "${CEF_DOWNLOAD_PATH}.sha1") 29 | file(READ "${CEF_DOWNLOAD_PATH}.sha1" CEF_SHA1) 30 | 31 | # Download the binary distribution and verify the hash. 32 | message(STATUS "Downloading ${CEF_DOWNLOAD_PATH}...") 33 | file( 34 | DOWNLOAD "${CEF_DOWNLOAD_URL}" "${CEF_DOWNLOAD_PATH}" 35 | EXPECTED_HASH SHA1=${CEF_SHA1} 36 | SHOW_PROGRESS 37 | ) 38 | endif() 39 | 40 | # Extract the binary distribution. 41 | message(STATUS "Extracting ${CEF_DOWNLOAD_PATH}...") 42 | execute_process( 43 | COMMAND ${CMAKE_COMMAND} -E tar xzf "${CEF_DOWNLOAD_DIR}/${CEF_DOWNLOAD_FILENAME}" 44 | WORKING_DIRECTORY ${CEF_DOWNLOAD_DIR} 45 | ) 46 | endif() 47 | endfunction() -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(${GSTREAMER_INCLUDE_DIRS}) 2 | include_directories(${GSTREAMER_VIDEO_INCLUDE_DIRS}) 3 | 4 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) 5 | 6 | include_directories(${CEF_INCLUDE_PATH}) 7 | include_directories(${CEF_PATH}) 8 | 9 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Wall ") 10 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -stdlib=libc++ -std=c++11") 11 | 12 | # Logical target used to link the libcef library. 13 | ADD_LOGICAL_TARGET("libcef_lib" "${CEF_LIB_DEBUG}" "${CEF_LIB_RELEASE}") 14 | # Determine the target output directory. 15 | SET_CEF_TARGET_OUT_DIR() 16 | 17 | set(WEBRENDERSRC_SOURCES 18 | gstwebrendersrc.c 19 | gstwebrendersrc.h 20 | cef_bridge.cpp 21 | cef_bridge.h 22 | cef/Browser.cpp 23 | cef/Browser.h 24 | cef/Client.cpp 25 | cef/Client.h 26 | ) 27 | 28 | add_library(webrendersrc MODULE ${WEBRENDERSRC_SOURCES}) 29 | add_executable(cefsubprocess cef_subprocess.cpp) 30 | 31 | target_link_libraries(webrendersrc 32 | ${GSTREAMER_LIBRARIES} 33 | ${GSTREAMER_VIDEO_LIBRARIES} 34 | libcef_lib 35 | libcef_dll_wrapper 36 | ${CEF_STANDARD_LIBS} 37 | ${CEF_BINARY_DIR} 38 | ) 39 | 40 | target_link_libraries(cefsubprocess 41 | libcef_lib 42 | libcef_dll_wrapper 43 | ) 44 | 45 | if("${CMAKE_SYSTEM_NAME}" STREQUAL "Darwin") 46 | add_custom_command( 47 | TARGET webrendersrc 48 | POST_BUILD 49 | COMMAND ${CMAKE_COMMAND} -E copy_directory 50 | "${CEF_BINARY_DIR}/Chromium Embedded Framework.framework" 51 | "${CEF_TARGET_OUT_DIR}/Frameworks/Chromium Embedded Framework.framework" 52 | ) 53 | 54 | add_custom_command( 55 | TARGET webrendersrc 56 | POST_BUILD 57 | COMMAND install_name_tool -change 58 | "@rpath/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" 59 | "/usr/local/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" 60 | "$" 61 | ) 62 | 63 | add_custom_command( 64 | TARGET cefsubprocess 65 | POST_BUILD 66 | COMMAND install_name_tool -change 67 | "@rpath/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" 68 | "/usr/local/Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" 69 | "$" 70 | ) 71 | 72 | endif() 73 | 74 | install( 75 | TARGETS webrendersrc 76 | RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} 77 | LIBRARY DESTINATION ${CMAKE_INSTALL_GST_PLUGINS_DIR} 78 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} 79 | ) 80 | 81 | install( 82 | TARGETS cefsubprocess 83 | RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} 84 | LIBRARY DESTINATION ${CMAKE_INSTALL_GST_PLUGINS_DIR} 85 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} 86 | ) 87 | -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/cef/Browser.h: -------------------------------------------------------------------------------- 1 | #ifndef CEFBROWSER_H 2 | #define CEFBROWSER_H 3 | 4 | #include 5 | 6 | #include "include/cef_app.h" 7 | #include "include/cef_client.h" 8 | #include "Client.h" 9 | 10 | /* 11 | * Application Structure 12 | * Every CEF3 application has the same general structure. 13 | * Provide an entry-point function that initializes CEF and runs either sub-process executable logic or the CEF message loop. 14 | * Provide an implementation of CefApp to handle process-specific callbacks. 15 | * Provide an implementation of CefClient to handle browser-instance-specific callbacks. 16 | * Call CefBrowserHost::CreateBrowser() to create a browser instance and manage the browser life span using CefLifeSpanHandler. 17 | */ 18 | 19 | class Browser : 20 | public CefApp, 21 | public Client::Listener, 22 | public CefBrowserProcessHandler, 23 | public CefRenderProcessHandler 24 | { 25 | public: 26 | static Browser& getInstance() 27 | { 28 | static Browser instance; 29 | return instance; 30 | } 31 | 32 | private: 33 | // Dont forget to declare these two. You want to make sure they 34 | // are unaccessable otherwise you may accidently get copies of 35 | // your singelton appearing. 36 | Browser(); 37 | Browser(Browser const&); // Don't Implement 38 | void operator=(Browser const&); // Don't implement 39 | 40 | public: 41 | int Init(void *webRenderSrc, void* push_frame); 42 | void Run(); 43 | int CreateFrame(std::string url, int width, int height); 44 | int End(); 45 | virtual ~Browser(); 46 | 47 | virtual void OnBeforeCommandLineProcessing(const CefString& process_type,CefRefPtr command_line) override { 48 | command_line->AppendSwitch("disable-gpu"); 49 | command_line->AppendSwitch("disable-gpu-compositing"); 50 | command_line->AppendSwitch("enable-begin-frame-scheduling"); 51 | command_line->AppendSwitch("enable-media-stream"); 52 | command_line->AppendSwitchWithValue("disable-gpu-vsync", "gpu"); 53 | } 54 | 55 | // CLient::Listner functions 56 | //From client listener 57 | virtual bool GetViewRect(CefRect& rect) override; 58 | virtual void OnPaint(CefRenderHandler::PaintElementType type, const CefRenderHandler::RectList& rects, const void* buffer, int width, int height) override; 59 | 60 | IMPLEMENT_REFCOUNTING(Browser); 61 | 62 | private: 63 | bool inited; 64 | // Specify CEF global settings here. 65 | CefSettings settings; 66 | 67 | void *webRenderSrc; 68 | void (* push_frame)(void *webRenderSrc, const void *buffer, int width, int height); 69 | }; 70 | 71 | #endif /* CEFBROWSER_H */ 72 | -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/cef/Client.cpp: -------------------------------------------------------------------------------- 1 | #include "Client.h" 2 | 3 | Client::Client(Listener* listener) 4 | { 5 | //Store 6 | this->listener = listener; 7 | } 8 | 9 | Client::~Client() 10 | { 11 | } 12 | 13 | bool Client::GetViewRect(CefRefPtr browser, CefRect& rect) 14 | { 15 | //Call listener 16 | return listener && listener->GetViewRect(rect); 17 | } 18 | 19 | void Client::OnPaint(CefRefPtr, CefRenderHandler::PaintElementType type, const RectList& rects, const void* buffer, int width, int height) 20 | { 21 | GST_INFO("Browser::OnPaint %uX%u", width, height); 22 | 23 | if (type != PET_VIEW) 24 | return; 25 | 26 | //Call listener 27 | if (listener) listener->OnPaint(type, rects, buffer, width, height); 28 | } -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/cef/Client.h: -------------------------------------------------------------------------------- 1 | #ifndef CLIENT_H 2 | #define CLIENT_H 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | class Client : 10 | public CefClient, 11 | public CefRenderHandler, 12 | public CefLifeSpanHandler 13 | { 14 | public: 15 | class Listener { 16 | public: 17 | virtual bool GetViewRect(CefRect& rect) = 0; 18 | virtual void OnPaint(CefRenderHandler::PaintElementType type, const CefRenderHandler::RectList& rects, const void* buffer, int width, int height) = 0; 19 | }; 20 | public: 21 | Client(Listener *listener); 22 | virtual ~Client(); 23 | 24 | //Overrride 25 | virtual CefRefPtr GetRenderHandler() override { 26 | // Return the handler for off-screen rendering events. 27 | return this; 28 | } 29 | virtual CefRefPtr GetLifeSpanHandler() override { 30 | // Return browser life span handler 31 | return this; 32 | } 33 | 34 | virtual bool GetViewRect(CefRefPtr browser, CefRect& rect) override; 35 | virtual void OnPaint(CefRefPtr browser, CefRenderHandler::PaintElementType type, const RectList& rects, const void* buffer, int width, int height) override; 36 | 37 | IMPLEMENT_REFCOUNTING(Client); 38 | private: 39 | Listener* listener; 40 | }; 41 | 42 | #endif /* CLIENT_H */ 43 | -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/cef_bridge.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "cef_bridge.h" 4 | #include "cef/Browser.h" 5 | 6 | namespace 7 | { 8 | static gint message_loop_running = 0; 9 | } 10 | 11 | void new_browser_instance(gpointer args) 12 | { 13 | Browser& browser = Browser::getInstance(); 14 | struct cef_interface *cb = (struct cef_interface *)args; 15 | 16 | browser.Init(cb->gstWebRenderSrc, cb->push_frame); 17 | 18 | g_atomic_int_set(&message_loop_running, 1); 19 | 20 | browser.CreateFrame(cb->url, cb->width, cb->height); 21 | } 22 | 23 | void end_browser_instance() 24 | { 25 | g_atomic_int_set(&message_loop_running, 0); 26 | Browser::getInstance().End(); 27 | } 28 | 29 | static bool _inner_browser_run(void) 30 | { 31 | if (g_atomic_int_get(&message_loop_running)) { 32 | Browser::getInstance().Run(); 33 | return true; 34 | } else { 35 | return false; 36 | } 37 | } 38 | 39 | void run_browser_message_loop(gpointer args) 40 | { 41 | g_idle_add((GSourceFunc)_inner_browser_run, NULL); 42 | } 43 | -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/cef_bridge.h: -------------------------------------------------------------------------------- 1 | #ifndef _CEF_BRIDGE_H_ 2 | #define _CEF_BRIDGE_H_ 3 | 4 | #include 5 | #include 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | GST_EXPORT GstDebugCategory *gst_web_render_src_debug; 12 | #define GST_CAT_DEFAULT gst_web_render_src_debug 13 | 14 | struct cef_interface 15 | { 16 | void *gstWebRenderSrc; 17 | void *push_frame; 18 | char *url; 19 | int width; 20 | int height; 21 | }; 22 | 23 | void new_browser_instance(gpointer args); 24 | void end_browser_instance(); 25 | void run_browser_message_loop(gpointer args); 26 | 27 | #ifdef __cplusplus 28 | } 29 | #endif 30 | 31 | #endif -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/cef_subprocess.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | class BrowserApp : public CefApp 4 | { 5 | public: 6 | BrowserApp(); 7 | 8 | virtual void OnBeforeCommandLineProcessing( 9 | const CefString &process_type, 10 | CefRefPtr command_line) OVERRIDE; 11 | 12 | virtual void OnRegisterCustomSchemes( 13 | CefRawPtr registrar) OVERRIDE; 14 | 15 | IMPLEMENT_REFCOUNTING(BrowserApp); 16 | }; 17 | 18 | BrowserApp::BrowserApp() {} 19 | 20 | void BrowserApp::OnBeforeCommandLineProcessing( 21 | const CefString &process_type, 22 | CefRefPtr command_line) 23 | { 24 | command_line->AppendSwitch("disable-gpu"); 25 | command_line->AppendSwitch("disable-gpu-compositing"); 26 | command_line->AppendSwitch("enable-begin-frame-scheduling"); 27 | command_line->AppendSwitch("enable-system-flash"); 28 | command_line->AppendSwitch("log-severity=disable"); 29 | } 30 | 31 | void BrowserApp::OnRegisterCustomSchemes( 32 | CefRawPtr registrar) 33 | { 34 | } 35 | 36 | int main(int argc, char* argv[]) 37 | { 38 | CefMainArgs mainArgs(argc, argv); 39 | CefRefPtr app(new BrowserApp()); 40 | return CefExecuteProcess(mainArgs, app.get(), NULL); 41 | } -------------------------------------------------------------------------------- /gst-WebRenderSrc/src/gstwebrendersrc.h: -------------------------------------------------------------------------------- 1 | /* 2 | * GStreamer 3 | * Copyright (C) 2005 Thomas Vander Stichele 4 | * Copyright (C) 2005 Ronald S. Bultje 5 | * Copyright (C) 2018 <> 6 | * 7 | * Permission is hereby granted, free of charge, to any person obtaining a 8 | * copy of this software and associated documentation files (the "Software"), 9 | * to deal in the Software without restriction, including without limitation 10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 | * and/or sell copies of the Software, and to permit persons to whom the 12 | * Software is furnished to do so, subject to the following conditions: 13 | * 14 | * The above copyright notice and this permission notice shall be included in 15 | * all copies or substantial portions of the Software. 16 | * 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | * DEALINGS IN THE SOFTWARE. 24 | * 25 | * Alternatively, the contents of this file may be used under the 26 | * GNU Lesser General Public License Version 2.1 (the "LGPL"), in 27 | * which case the following provisions apply instead of the ones 28 | * mentioned above: 29 | * 30 | * This library is free software; you can redistribute it and/or 31 | * modify it under the terms of the GNU Library General Public 32 | * License as published by the Free Software Foundation; either 33 | * version 2 of the License, or (at your option) any later version. 34 | * 35 | * This library is distributed in the hope that it will be useful, 36 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 37 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 38 | * Library General Public License for more details. 39 | * 40 | * You should have received a copy of the GNU Library General Public 41 | * License along with this library; if not, write to the 42 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 43 | * Boston, MA 02111-1307, USA. 44 | */ 45 | 46 | #ifndef __GST_WEBRENDERSRC_H__ 47 | #define __GST_WEBRENDERSRC_H__ 48 | 49 | #include 50 | #include 51 | #include 52 | #include 53 | 54 | G_BEGIN_DECLS 55 | 56 | /* #defines don't like whitespacey bits */ 57 | #define GST_TYPE_WEBRENDERSRC \ 58 | (gst_web_render_src_get_type()) 59 | #define GST_WEBRENDERSRC(obj) \ 60 | (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_WEBRENDERSRC,GstWebRenderSrc)) 61 | #define GST_WEBRENDERSRC_CLASS(klass) \ 62 | (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_WEBRENDERSRC,GstWebRenderSrcClass)) 63 | #define GST_IS_WEBRENDERSRC(obj) \ 64 | (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_WEBRENDERSRC)) 65 | #define GST_IS_WEBRENDERSRC_CLASS(klass) \ 66 | (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_WEBRENDERSRC)) 67 | 68 | typedef struct _GstWebRenderSrc GstWebRenderSrc; 69 | typedef struct _GstWebRenderSrcClass GstWebRenderSrcClass; 70 | 71 | struct _GstWebRenderSrc 72 | { 73 | GstPushSrc src; 74 | 75 | const gchar *url; 76 | int width; 77 | int height; 78 | 79 | //Thread references 80 | GThread *active_thread; 81 | GAsyncQueue *frames; 82 | guint64 n_frames; 83 | }; 84 | 85 | struct _GstWebRenderSrcClass 86 | { 87 | GstPushSrcClass parent_class; 88 | }; 89 | 90 | GType gst_web_render_src_get_type (void); 91 | 92 | G_END_DECLS 93 | 94 | #endif /* __GST_WEBRENDERSRC_H__ */ 95 | -------------------------------------------------------------------------------- /public/js/mixers.js: -------------------------------------------------------------------------------- 1 | // 2 | // This web interface has been quickly thrown together. It's not production code. 3 | // 4 | 5 | mixersHandler = {}; 6 | mixersHandler.items = []; 7 | 8 | mixersHandler.findById = function(id) { 9 | return mixersHandler.items.find(function(x) { return x.id == id }) 10 | }; 11 | 12 | mixersHandler.showFormToEdit = function(mixer) { 13 | mixersHandler._showForm(mixer) 14 | }; 15 | 16 | mixersHandler.draw = function() { 17 | mixersHandler._drawCards() 18 | }; 19 | 20 | mixersHandler.setState = function(id, state) { 21 | submitCreateOrEdit('mixer', id, {state}) 22 | }; 23 | 24 | mixersHandler.remove = (mixer, source) => { 25 | mixersHandler._sendMixerCommand(mixer, source, 'remove_source') 26 | }; 27 | 28 | mixersHandler.cut = (mixer, source) => { 29 | mixersHandler._sendMixerCommand(mixer, source, 'cut_to_source') 30 | }; 31 | 32 | mixersHandler.overlay = (mixer, source) => { 33 | mixersHandler._sendMixerCommand(mixer, source, 'overlay_source') 34 | }; 35 | 36 | mixersHandler._drawCards = () => { 37 | $('#cards').append(mixersHandler.items.map(mixersHandler._asCard)) 38 | }; 39 | 40 | mixersHandler._asCard = (mixer) => { 41 | return components.card({ 42 | title: 'Mixer ' + mixer.id, 43 | options: mixersHandler._optionButtonsForMixer(mixer), 44 | body: mixersHandler._mixerCardBody(mixer), 45 | state: components.stateBox(mixer, mixersHandler.setState), 46 | mixOptions: components.getMixOptions(mixer) 47 | }) 48 | }; 49 | 50 | mixersHandler._optionButtonsForMixer = (mixer) => { 51 | const editButton = components.editButton().click(() => { mixersHandler.showFormToEdit(mixer); return false }); 52 | const deleteButton = components.deleteButton().click(() => { mixersHandler.delete(mixer); return false }); 53 | return [editButton, deleteButton] 54 | }; 55 | 56 | mixersHandler._mixerCardBody = (mixer) => { 57 | var details = []; 58 | if (mixer.hasOwnProperty('pattern')) details.push('
Background: ' + inputsHandler.patternTypes[mixer.pattern] + '
'); 59 | if (mixer.hasOwnProperty('width') && 60 | mixer.hasOwnProperty('height')) details.push('
Dimension: ' + prettyDimensions(mixer) + '
'); 61 | return details 62 | }; 63 | 64 | mixersHandler._sendMixerCommand = function ( mixer, source, command ) { 65 | $.ajax({ 66 | type: 'POST', 67 | url: 'api/mixers/' + mixer.id + '/' + command, 68 | dataType: 'json', 69 | data: JSON.stringify({uid:source.uid}), 70 | success: function() { 71 | showMessage( `Success in ${command} for ${source.uid} to ${mixer.uid}`, 'success' ); 72 | updatePage() 73 | }, 74 | error: function() { 75 | showMessage( 'Sorry, an error occurred', 'danger' ); 76 | } 77 | }); 78 | }; 79 | 80 | mixersHandler._showForm = function(mixer) { 81 | mixersHandler.currentForm = $('
'); 82 | var label = mixer && mixer.hasOwnProperty('id') ? 'Edit mixer ' + mixer.id : 'Add mixer'; 83 | showModal(label, mixersHandler.currentForm, mixersHandler._handleFormSubmit); 84 | mixersHandler._populateForm(mixer) 85 | }; 86 | 87 | mixersHandler._populateForm = function(mixer) { 88 | var form = mixersHandler.currentForm; 89 | form.empty(); 90 | 91 | if (mixer.hasOwnProperty('id')) { 92 | form.append('') 93 | } 94 | 95 | form.append(getDimensionsSelect('dimensions', mixer.width, mixer.height)); 96 | 97 | form.append(formGroup({ 98 | id: 'mixer-pattern', 99 | label: 'Pattern', 100 | name: 'pattern', 101 | options: inputsHandler.patternTypes, 102 | initialOption: 'Select a pattern...', 103 | value: mixer.pattern 104 | })) 105 | }; 106 | 107 | mixersHandler._handleFormSubmit = function() { 108 | const form = mixersHandler.currentForm; 109 | const idField = form.find('input[name="id"]'); 110 | const id = idField.length ? idField.val() : null; 111 | const mixer = (id != null) ? mixersHandler.findById(id) : {}; 112 | const newProps = {}; 113 | 114 | const fields = [ 'pattern', 'dimensions' ]; 115 | fields.forEach(function(f) { 116 | const mixer = form.find('[name="' + f + '"]'); 117 | if (mixer && mixer.val() != null) { 118 | newProps[f] = mixer.val() 119 | } 120 | }); 121 | splitDimensionsIntoWidthAndHeight(newProps); 122 | submitCreateOrEdit('mixer', id, newProps); 123 | hideModal(); 124 | }; 125 | 126 | mixersHandler.create = () => { 127 | submitCreateOrEdit('mixer', null, {}) 128 | }; 129 | 130 | mixersHandler.delete = function ( mixer ) { 131 | $.ajax({ 132 | contentType: "application/json", 133 | type: 'DELETE', 134 | url: 'api/mixers/' + mixer.id, 135 | dataType: 'json', 136 | success: function() { 137 | showMessage( `Successfully deleted mixer ${mixer.id}`, 'success' ); 138 | updatePage() 139 | }, 140 | error: function() { 141 | showMessage( `Sorry, an error occurred while deleting mixer ${mixer.id}`, 'danger' ); 142 | } 143 | }); 144 | return false 145 | }; 146 | -------------------------------------------------------------------------------- /public/js/websocket.js: -------------------------------------------------------------------------------- 1 | // 2 | // This web interface has been quickly thrown together. It's not production code. 3 | // 4 | 5 | websocket = { 6 | setupErrorCount: 0, 7 | volume: {channels: 0, data: [] }, 8 | }; 9 | 10 | websocket.setup = function() { 11 | var hostAndPort = window.location.host; 12 | var protocol = window.location.protocol == 'http:' ? 'ws:' : 'wss:'; 13 | var websocketUrl = protocol + '//' + hostAndPort + '/socket'; 14 | websocket.socket = new WebSocket(websocketUrl); 15 | websocket.socket.addEventListener('open', websocket._onSocketOpen); 16 | websocket.socket.addEventListener('error', websocket._onSocketError); 17 | websocket.socket.addEventListener('message', websocket._onMessageReceived); 18 | websocket.socket.addEventListener('close', websocket._onSocketClose); 19 | }; 20 | 21 | websocket._onSocketOpen = event => { 22 | websocket.setupErrorCount = 0 23 | }; 24 | 25 | websocket._onSocketError = event => { 26 | websocket.setupErrorCount++ 27 | }; 28 | 29 | websocket._onSocketClose = event => { 30 | console.log('Websocket closed, reconnecting...'); 31 | var NUM_RETRY_ATTEMPTS = 10; 32 | if (websocket.setupErrorCount < NUM_RETRY_ATTEMPTS) { 33 | console.error("Websocket error, now happened " + websocket.setupErrorCount + ' times'); 34 | showMessage('Server connection lost, retrying...'); 35 | window.setTimeout(websocket.setup, 1000 + (1000 * websocket.setupErrorCount)); 36 | } 37 | else { 38 | console.error("Websocket error, now happened " + websocket.setupErrorCount + ' times, not attempting again.'); 39 | showMessage('Unable to connect to server, please refresh the page') 40 | } 41 | }; 42 | 43 | websocket._onMessageReceived = event => { 44 | dataParsed = JSON.parse(event.data); 45 | if (dataParsed.msg_type === 'ping') { 46 | if (dataParsed.cpu_percent) { 47 | websocket._setCpuPercent(dataParsed.cpu_percent) 48 | } 49 | 50 | } 51 | else if (dataParsed.msg_type === 'update') { 52 | websocket._handleUpdate(dataParsed) 53 | } 54 | else if (dataParsed.msg_type === 'delete') { 55 | websocket._handleDelete(dataParsed) 56 | } 57 | else if (dataParsed.msg_type === 'webrtc-initialising') { 58 | if (dataParsed.ice_servers) webrtc.setIceServers(dataParsed.ice_servers) 59 | } 60 | else if (dataParsed.msg_type === 'volume') { 61 | websocket.volume.channels = dataParsed.channels; 62 | websocket.volume.data = dataParsed.data; 63 | } 64 | else if (dataParsed.sdp != null) { 65 | webrtc.onIncomingSDP(dataParsed.sdp); 66 | } else if (dataParsed.ice != null) { 67 | webrtc.onIncomingICE(dataParsed.ice); 68 | } else { 69 | console.warning("Unexpected websocket message:", dataParsed); 70 | } 71 | }; 72 | 73 | websocket._getHandlerForBlockType = function(t) { 74 | switch(t) { 75 | case 'input': 76 | return inputsHandler; 77 | case 'output': 78 | return outputsHandler; 79 | case 'mixer': 80 | return mixersHandler; 81 | case 'overlay': 82 | return overlaysHandler 83 | } 84 | 85 | console.error('Unknown block type', t) 86 | }; 87 | 88 | websocket._handleUpdate = function(item) { 89 | var handler = websocket._getHandlerForBlockType(item.block_type); 90 | if (handler) { 91 | handler.items = handler.items.filter(x => x.id != item.data.id); 92 | handler.items.push(item.data); 93 | handler.items.sort((a,b) => a.id - b.id); 94 | drawAllItems(); 95 | } 96 | }; 97 | 98 | websocket._handleDelete = function(item) { 99 | const handler = websocket._getHandlerForBlockType(item.block_type); 100 | if (handler) { 101 | handler.items = handler.items.filter(x => x.id != item.id); 102 | drawAllItems(); 103 | } 104 | }; 105 | 106 | websocket._setCpuPercent = (num) => { 107 | const cpuString = num.toFixed(1); 108 | const cpuLabel = `CPU: ${cpuString.padStart(5 - cpuString.length, '0')}%`; 109 | $('#cpu-stats') 110 | .empty() 111 | .text( cpuLabel ); 112 | }; 113 | -------------------------------------------------------------------------------- /public/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin-bottom: 300px; 3 | font-weight: lighter; 4 | } 5 | 6 | #top-message { 7 | position: fixed; 8 | left: 0; 9 | bottom: 0; 10 | width: 100%; 11 | } 12 | 13 | #top-message-close { 14 | float:right; 15 | font-size: 0.8em; 16 | font-weight: bold; 17 | } 18 | 19 | .change-box { 20 | border: 2px solid #666; 21 | background-color: #333; 22 | padding: 16px; 23 | margin: 8px 0 8px 0; 24 | } 25 | 26 | .change-box h3 { 27 | padding: 0 0 12px 0; 28 | margin: 0; 29 | } 30 | 31 | .PLAYING { 32 | background: linear-gradient(#379337, #3b9e3b 40%, #3ea63e); 33 | color: #ddd; 34 | } 35 | .PAUSED { 36 | background: linear-gradient(#9e5f04, #ad6704 40%, #b76d04); 37 | color: #ddd; 38 | } 39 | .READY { 40 | background: linear-gradient(#2596b8, #28a1c5 40%, #29a8cd); 41 | color: #ddd; 42 | } 43 | .NULL { 44 | background: linear-gradient(#f17a77, #ee5f5b 60%, #ec4d49); 45 | color: #ddd; 46 | } 47 | 48 | #output-box { 49 | margin-top: 20px; 50 | } 51 | 52 | #elements-table em { 53 | color: red; 54 | } 55 | 56 | .icon-unselected { 57 | color: #999; 58 | } 59 | 60 | .icon-unselected a:hover { 61 | color: white; 62 | } 63 | 64 | .state-icons { 65 | padding: 8px; 66 | margin: 0; 67 | } 68 | 69 | .state-icons a { 70 | margin-right: 4px; 71 | } 72 | 73 | .option-icons { 74 | margin-left: 16px; 75 | float: right; 76 | } 77 | 78 | .option-icons a { 79 | font-size: 1.1em; 80 | margin-left: 8px; 81 | } 82 | 83 | .option-icons a:hover { 84 | text-decoration: none; 85 | color: #999; 86 | } 87 | 88 | #preview-bar { 89 | text-align: center; 90 | margin: 8px 0 8px 0; 91 | } 92 | 93 | #preview-bar-select { 94 | text-align: center; 95 | } 96 | 97 | #preview-bar #image-preview { 98 | background-color: #222; 99 | } 100 | 101 | #preview-bar #stream { 102 | background-color: #222; 103 | } 104 | 105 | #preview-bar canvas { 106 | background-color: #222; 107 | width: 80px; 108 | height: 180px; 109 | } 110 | 111 | @media (min-width: 576px) { 112 | #preview-bar #stream { 113 | height: 270px; 114 | } 115 | #preview-bar canvas { 116 | height: 270px; 117 | } 118 | } 119 | 120 | /* @media (min-width: 992px) { 121 | #preview-bar #stream { 122 | height: 360px; 123 | } 124 | #preview-bar canvas { 125 | height: 360px; 126 | } 127 | } */ 128 | 129 | .preview-select { 130 | border: 1px solid #ccc; 131 | /* padding: 0 4px 0 4px; */ 132 | /* border-radius: 1px; */ 133 | box-shadow: none; 134 | background: transparent; 135 | background-image: none; 136 | color: white; 137 | } 138 | 139 | .block-card-head { 140 | background-color: #111; 141 | padding: 8px; 142 | font-weight: bold; 143 | } 144 | 145 | .block-card-body { 146 | padding: 8px; 147 | } 148 | 149 | .block-card-toggle { 150 | padding: 8px; 151 | } 152 | 153 | .block-card { 154 | padding: 0; 155 | margin: 0; 156 | border: 2px solid #333; 157 | border-radius: 4px; 158 | height: 100%; 159 | background-color: #222; 160 | color: #ddd; 161 | } 162 | 163 | .block-card-outer { 164 | padding: 16px 16px 0 0; 165 | } 166 | 167 | .block-card-selected { 168 | border: 2px solid blue; 169 | } 170 | 171 | .mix-option { 172 | padding: 8px; 173 | vertical-align: middle; 174 | margin-top: 1px; 175 | } 176 | 177 | .mix-option-showing { 178 | background: linear-gradient(#379337, #3b9e3b 40%, #3ea63e); 179 | } 180 | 181 | .mix-option-hidden { 182 | background: linear-gradient(#2596b8, #28a1c5 40%, #29a8cd); 183 | } 184 | 185 | .mix-option-not-connected { 186 | background: grey; 187 | } 188 | 189 | .slider { 190 | margin-left: 16px; 191 | margin-right: 16px; 192 | } 193 | 194 | #mute-button { 195 | width: 2em; 196 | padding: 0 4px 0 4px; 197 | vertical-align: middle; 198 | } 199 | 200 | #mute-button:hover { 201 | text-decoration: none; 202 | color: #999; 203 | } 204 | 205 | #cpu-stats { 206 | color: #ddd; 207 | vertical-align: middle; 208 | padding: 4px 8px 4px 4px; 209 | } 210 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | # This stops test_audio and test_video from being considered as tests: 3 | norecursedirs = brave 4 | -------------------------------------------------------------------------------- /tests/assets/2_second_audio.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/2_second_audio.m4a -------------------------------------------------------------------------------- /tests/assets/2_second_audio.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/2_second_audio.mp3 -------------------------------------------------------------------------------- /tests/assets/2_second_video.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/2_second_video.mp4 -------------------------------------------------------------------------------- /tests/assets/5_second_audio.m4a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/5_second_audio.m4a -------------------------------------------------------------------------------- /tests/assets/5_second_audio.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/5_second_audio.mp3 -------------------------------------------------------------------------------- /tests/assets/5_second_video.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/5_second_video.mp4 -------------------------------------------------------------------------------- /tests/assets/image_640_360.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bitwave-tv/brave/506d5e9ed07123f4497e0576b197590cc60b35c2/tests/assets/image_640_360.png -------------------------------------------------------------------------------- /tests/test_add_and_remove_from_mix.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | def test_adding_and_removing_sources_to_a_mix(run_brave, create_config_file): 6 | set_up_two_sources(run_brave, create_config_file) 7 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 8 | remove_source('input2', 1) 9 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': False}]) 10 | remove_source('input2', 1) # Prove it's safe to do repeatedly 11 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': False}]) 12 | remove_source('input1', 1) 13 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': False}, {'uid': 'input2', 'zorder': 3, 'in_mix': False}]) 14 | overlay_source('input2', 1) 15 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': False}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 16 | overlay_source('input2', 1) # Prove it's safe to do repeatedly 17 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': False}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 18 | overlay_source('input1', 1) 19 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 20 | 21 | 22 | def test_removing_input_whilst_in_a_mix(run_brave, create_config_file): 23 | set_up_two_sources(run_brave, create_config_file) 24 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 25 | assert_number_of_sinks_on_mixer(3) # 3 because there's always a dummy one with test video src 26 | delete_input(2) 27 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}]) 28 | assert_number_of_sinks_on_mixer(2) 29 | 30 | 31 | def test_switching(run_brave, create_config_file): 32 | set_up_two_sources(run_brave, create_config_file) 33 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 34 | cut_to_source('input2', 1) 35 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': False}, {'uid': 'input2', 'zorder': 3, 'in_mix': True}]) 36 | cut_to_source('input1', 1) 37 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': False}]) 38 | cut_to_source('input1', 1) 39 | assert_api_returns_right_mixer_sources([{'uid': 'input1', 'zorder': 2, 'in_mix': True}, {'uid': 'input2', 'zorder': 3, 'in_mix': False}]) 40 | 41 | def set_up_two_sources(run_brave, create_config_file): 42 | output_video_location = create_output_video_location() 43 | 44 | config = { 45 | 'inputs': [ 46 | {'type': 'test_video', 'pattern': 4}, # pattern 4 is red 47 | {'type': 'test_video', 'pattern': 5}, # pattern 5 is green 48 | ], 49 | 'mixers': [ 50 | { 51 | 'sources': [ 52 | {'uid': 'input1', 'zorder': 2}, 53 | {'uid': 'input2', 'zorder': 3}, 54 | ] 55 | } 56 | ], 57 | 'outputs': [ 58 | # {'block_type': 'local'} # good for debugging 59 | ] 60 | } 61 | config_file = create_config_file(config) 62 | run_brave(config_file.name) 63 | time.sleep(2) 64 | check_brave_is_running() 65 | 66 | 67 | def assert_api_returns_right_mixer_sources(inputs): 68 | response = api_get('/api/all') 69 | assert response.status_code == 200 70 | if len(inputs) == 0: 71 | assert ('sources' not in response.json()['mixers'][0]) or \ 72 | (len(response.json()['mixers'][0]['sources']) == 0) 73 | else: 74 | assert response.json()['mixers'][0]['sources'] == inputs 75 | 76 | 77 | def assert_number_of_sinks_on_mixer(num): 78 | response = api_get('/api/elements') 79 | assert response.status_code == 200 80 | json_response = response.json() 81 | elements = json_response['mixers']['1']['elements'] 82 | video_mixer = next((x for x in elements if x['name'] == 'video_mixer'), None) 83 | pad_names = video_mixer['pads'].keys() 84 | sink_pad_names = list(filter(lambda x: x.startswith('sink'), pad_names)) 85 | assert len(sink_pad_names) == num, 'Expected %d sinks on mixer but got %s' % (num, video_mixer['pads'].keys()) 86 | -------------------------------------------------------------------------------- /tests/test_api.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | ''' 6 | Some core API testing 7 | ''' 8 | 9 | def test_404(run_brave): 10 | run_brave() 11 | response = api_get('/api/overlays') 12 | assert response.status_code == 200 13 | response = api_get('/api/bad-api-path') 14 | assert response.status_code == 404 15 | assert response.json()['error'] == 'Not found' 16 | 17 | 18 | def test_bad_body(run_brave): 19 | run_brave() 20 | response = api_post('/api/overlays', 'This is not a JSON object') 21 | assert response.status_code == 400 22 | assert response.json()['error'] == 'Invalid JSON' 23 | 24 | 25 | def test_missing_type(run_brave): 26 | run_brave() 27 | response = api_put('/api/inputs', {}) 28 | assert response.status_code == 400 29 | print('text=', response.text) 30 | assert "input missing 'type'" in response.json()['error'] 31 | -------------------------------------------------------------------------------- /tests/test_config_file.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | import yaml 3 | from utils import * 4 | 5 | 6 | def test_brave_with_no_config_file(run_brave): 7 | run_brave() 8 | check_brave_is_running() 9 | 10 | 11 | def test_brave_with_missing_config_file(run_brave): 12 | run_brave('not-a-real-config-file') 13 | check_return_value(1) 14 | 15 | 16 | def test_brave_with_invalid_input_type(run_brave, create_config_file): 17 | config = {'inputs': [{'type': 'not-a-valid-type'}]} 18 | config_file = create_config_file(config) 19 | run_brave(config_file.name) 20 | check_return_value(1) 21 | 22 | def test_brave_with_full_config_file(run_brave, create_config_file): 23 | output_image_location = create_output_image_location() 24 | output_video_location = create_output_video_location() 25 | 26 | file_asset = test_directory() + '/assets/5_second_video.mp4' 27 | 28 | config = { 29 | 'inputs': [ 30 | {'type': 'test_video'}, 31 | {'type': 'test_audio', 'freq': 200 } , 32 | {'type': 'test_audio', 'freq': 600 } , 33 | {'type': 'uri', 'uri': 'file://' + file_asset } 34 | ], 35 | 'outputs': [ 36 | {'type': 'local', 'source': 'input4'}, 37 | {'type': 'tcp'}, 38 | {'type': 'file', 'source': 'input1', 'location': output_video_location}, 39 | {'type': 'image', 'source': 'input2', 'location': output_image_location} 40 | ] 41 | } 42 | config_file = create_config_file(config) 43 | run_brave(config_file.name) 44 | time.sleep(3) 45 | check_brave_is_running() 46 | response = api_get('/api/all') 47 | assert response.status_code == 200 48 | assert_everything_in_playing_state(response.json()) 49 | assert response.json()['inputs'][0]['type'] == 'test_video' 50 | assert response.json()['inputs'][1]['type'] == 'test_audio' 51 | assert response.json()['inputs'][2]['type'] == 'test_audio' 52 | assert response.json()['inputs'][1]['freq'] == 200 53 | assert response.json()['inputs'][2]['freq'] == 600 54 | assert response.json()['outputs'][0]['type'] == 'local' 55 | assert response.json()['outputs'][1]['type'] == 'tcp' 56 | assert response.json()['outputs'][2]['type'] == 'file' 57 | assert response.json()['outputs'][3]['type'] == 'image' 58 | assert response.json()['outputs'][2]['location'] == output_video_location 59 | assert response.json()['outputs'][2]['source'] == 'input1' 60 | assert response.json()['outputs'][3]['source'] == 'input2' 61 | 62 | 63 | def test_non_string_keys(run_brave, create_config_file): 64 | config = { 65 | 'inputs': [ 66 | { 1: 'oh look 1 is not a string'} 67 | ] 68 | } 69 | config_file = create_config_file(config) 70 | run_brave(config_file.name) 71 | check_return_value(1) 72 | 73 | 74 | def test_config_file_with_ids(run_brave, create_config_file): 75 | config = { 76 | 'inputs': [ 77 | {'type': 'test_video'}, 78 | {'type': 'test_video', 'id': 10} 79 | ], 80 | 'outputs': [ 81 | {'type': 'image', 'id': 1}, 82 | {'type': 'image'}, 83 | {'type': 'image'} 84 | ], 85 | 'mixers': [ 86 | {}, 87 | {'id': 2} 88 | ], 89 | 'overlays': [ 90 | {'type': 'clock', 'id': 7} 91 | ], 92 | } 93 | config_file = create_config_file(config) 94 | run_brave(config_file.name) 95 | check_brave_is_running() 96 | response = api_get('/api/all') 97 | assert response.status_code == 200 98 | assert len(response.json()['inputs']) == 2 99 | assert len(response.json()['outputs']) == 3 100 | assert len(response.json()['mixers']) == 2 101 | assert len(response.json()['overlays']) == 1 102 | assert response.json()['inputs'][0]['id'] == 1 103 | assert response.json()['inputs'][1]['id'] == 10 104 | assert response.json()['outputs'][0]['id'] == 1 105 | assert response.json()['outputs'][1]['id'] == 2 106 | assert response.json()['outputs'][2]['id'] == 3 107 | assert response.json()['mixers'][0]['id'] == 1 108 | assert response.json()['mixers'][1]['id'] == 2 109 | assert response.json()['overlays'][0]['id'] == 7 110 | -------------------------------------------------------------------------------- /tests/test_config_file_output.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | import yaml 3 | from utils import * 4 | 5 | ''' 6 | Tests for /api/config/current.yaml 7 | ''' 8 | 9 | def test_empty_config_file(run_brave, create_config_file): 10 | config = {} 11 | subtest_start_brave_and_check_config_file(run_brave, create_config_file, config) 12 | 13 | def test_simple_config_file(run_brave, create_config_file): 14 | config = { 15 | 'enable_video': False, 16 | 'inputs': [{'type': 'test_video'}], 17 | 'mixers': [{}], 18 | 'outputs': [{'type': 'local', 'source': 'input1'}], 19 | } 20 | subtest_start_brave_and_check_config_file(run_brave, create_config_file, config) 21 | 22 | def test_complex_config_file(run_brave, create_config_file): 23 | config = { 24 | 'default_mixer_width': 123, 25 | 'stun_server': 'some_stun_server', 26 | 'inputs': [ 27 | {'type': 'test_video'}, 28 | {'type': 'test_audio', 'freq': 200 } , 29 | {'type': 'test_audio', 'freq': 600, 'id': 6 } 30 | ], 31 | 'mixers': [ 32 | {'sources': [{'uid': 'input1'}, {'uid': 'input2'}]}, 33 | {'state': 'READY', 'id': 2} , 34 | ], 35 | 'overlays': [ 36 | {'type': 'clock', 'valignment': 'top', 'source': 'input1', 'id': 9} 37 | ], 38 | 'outputs': [ 39 | {'type': 'local', 'source': 'input6', 'id': 17}, 40 | {'type': 'tcp', 'source': 'input2'}, 41 | {'type': 'file', 'source': 'input1', 'location': '/tmp/x', 'state': 'NULL'}, 42 | {'type': 'image', 'source': 'mixer2'} 43 | ] 44 | } 45 | 46 | subtest_start_brave_and_check_config_file(run_brave, create_config_file, config) 47 | 48 | def subtest_start_brave_and_check_config_file(run_brave, create_config_file, config): 49 | config_file = create_config_file(config) 50 | run_brave(config_file.name) 51 | time.sleep(0.5) 52 | check_brave_is_running() 53 | subtest_validate_config_response(config) 54 | 55 | def subtest_validate_config_response(orig): 56 | config_response = api_get('/api/config/current.yaml') 57 | assert config_response.status_code == 200 58 | parsed = yaml.load(config_response.text) 59 | 60 | check_first_dict_exists_in_second(orig, parsed) 61 | 62 | def check_first_dict_exists_in_second(a, b): 63 | for key, value in a.items(): 64 | assert key in b 65 | if isinstance(value, list): 66 | check_first_array_exists_in_second(value, b[key]) 67 | else: 68 | # print('Comparing %s with %s' % (value, b[key])) 69 | assert value == b[key] 70 | 71 | def check_first_array_exists_in_second(a, b): 72 | assert len(a) == len(b) 73 | for i in range(len(a)): 74 | check_first_dict_exists_in_second(a[i], b[i]) 75 | -------------------------------------------------------------------------------- /tests/test_effect_overlay.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | 6 | def test_effect_overlay_visible_after_creation(run_brave): 7 | run_brave() 8 | time.sleep(0.5) 9 | check_brave_is_running() 10 | 11 | add_overlay({'type': 'effect', 'source': 'mixer1', 'effect_name': 'edgetv'}) 12 | time.sleep(0.1) 13 | assert_overlays([{'id': 1, 'visible': False, 'effect_name': 'edgetv'}]) 14 | 15 | update_overlay(1, {'visible': True}, status_code=200) 16 | time.sleep(0.1) 17 | assert_overlays([{'id': 1, 'visible': True,'effect_name': 'edgetv'}]) 18 | 19 | add_overlay({'type': 'effect', 'source': 'mixer1', 'effect_name': 'solarize'}) 20 | time.sleep(0.1) 21 | assert_overlays([{'id': 1, 'visible': True,'effect_name': 'edgetv'}, 22 | {'id': 2, 'visible': False, 'effect_name': 'solarize'}]) 23 | 24 | update_overlay(2, {'visible': True}, status_code=200) 25 | time.sleep(0.1) 26 | assert_overlays([{'id': 1, 'visible': True,'effect_name': 'edgetv'}, 27 | {'id': 2, 'visible': True,'effect_name': 'solarize'}]) 28 | 29 | delete_overlay(1) 30 | time.sleep(0.1) 31 | assert_overlays([{'id': 2, 'visible': True,'effect_name': 'solarize'}]) 32 | 33 | delete_overlay(2) 34 | time.sleep(0.1) 35 | assert_overlays([]) 36 | 37 | # @pytest.mark.skip(reason="known bug that effects made visible at start should not be permitted") 38 | def test_effect_overlay_visible_at_creation(run_brave): 39 | '''Test that visible:true on creation also does not work if mixer is playing/paused''' 40 | run_brave() 41 | time.sleep(0.5) 42 | check_brave_is_running() 43 | 44 | # This time, visible from the start with visible=True 45 | add_overlay({'type': 'effect', 'source': 'mixer1', 'effect_name': 'warptv', 'visible': True}, status_code=200) 46 | time.sleep(0.1) 47 | assert_overlays([{'visible': True, 'effect_name': 'warptv'}]) 48 | 49 | 50 | def test_set_up_effect_overlay_in_config_file(run_brave, create_config_file): 51 | '''Test that an effect in a config file works fine''' 52 | output_video_location = create_output_video_location() 53 | 54 | config = { 55 | 'mixers': [{}], 56 | 'overlays': [ 57 | {'type': 'effect', 'source': 'mixer1', 'effect_name': 'burn', 'visible': True}, 58 | {'type': 'effect', 'source': 'mixer1', 'effect_name': 'vertigotv', 'visible': False} 59 | ] 60 | } 61 | config_file = create_config_file(config) 62 | run_brave(config_file.name) 63 | time.sleep(0.5) 64 | check_brave_is_running() 65 | assert_overlays([{'id': 1, 'effect_name': 'burn', 'visible': True}, 66 | {'id': 2, 'effect_name': 'vertigotv', 'visible': False}]) 67 | -------------------------------------------------------------------------------- /tests/test_elements_api_endpoint.py: -------------------------------------------------------------------------------- 1 | import time, pytest 2 | from utils import * 3 | 4 | 5 | def test_elements_api_endpoint(run_brave): 6 | ''' 7 | Check that /api/elements returns a list of element details, 8 | including when "?show_inside_bin_elements=yes" 9 | ''' 10 | run_brave() 11 | time.sleep(0.5) 12 | check_brave_is_running() 13 | add_input({'type': 'image', 'uri': 'file://' + test_directory() + '/assets/image_640_360.png'}) 14 | cut_to_source('input1', 1) 15 | time.sleep(0.5) 16 | 17 | subtest_elements_endpoint() 18 | subtest_elements_endpoint_with_bin_elements() 19 | 20 | def subtest_elements_endpoint(): 21 | elements_response = api_get('/api/elements') 22 | assert elements_response.status_code == 200 23 | elements_object = elements_response.json() 24 | assert len(elements_object['inputs'].items()) == 1 25 | assert len(elements_object['mixers'].items()) == 1 26 | assert len(elements_object['outputs'].items()) == 0 27 | assert len(elements_object['inputs']['1']['elements']) == 10 28 | 29 | def subtest_elements_endpoint_with_bin_elements(): 30 | elements_response = api_get('/api/elements?show_inside_bin_elements=yes') 31 | assert elements_response.status_code == 200 32 | elements_object = elements_response.json() 33 | assert len(elements_object['inputs'].items()) == 1 34 | assert len(elements_object['mixers'].items()) == 1 35 | assert len(elements_object['outputs'].items()) == 0 36 | assert len(elements_object['inputs']['1']['elements']) == 15 37 | -------------------------------------------------------------------------------- /tests/test_file_output.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | def test_can_create_video_file_output(run_brave, create_config_file): 6 | output_video_location = create_output_video_location() 7 | 8 | config = { 9 | 'inputs': [ 10 | {'type': 'test_video', 'pattern': 4}, # pattern 4 is red 11 | ], 12 | 'outputs': [ 13 | {'type': 'file', 'location': output_video_location } 14 | # ,{'type': 'local'} # good for debugging 15 | ] 16 | } 17 | config_file = create_config_file(config) 18 | run_brave(config_file.name) 19 | time.sleep(4) 20 | check_brave_is_running() 21 | response = api_get('/api/all') 22 | assert response.status_code == 200 23 | assert_everything_in_playing_state(response.json()) 24 | assert os.path.exists(output_video_location) 25 | 26 | 27 | def test_valid_output_file(): 28 | assert_valid_output_file(get_output_video_location()) 29 | 30 | 31 | def stop_output(num): 32 | path = '/api/outputs/%d' % num 33 | response = api_post(path, {'state': 'READY'}) 34 | assert response.status_code == 200, 'Status code for %s was %d' % (path, response.status_code) 35 | 36 | def assert_valid_output_file(output_video_location): 37 | ''' 38 | Given a file, validates it is a video (mp4) file 39 | ''' 40 | import gi 41 | gi.require_version('Gst', '1.0') 42 | from gi.repository import Gst, GLib 43 | 44 | Gst.init(None) 45 | mainloop = GLib.MainLoop() 46 | 47 | # We create a pipeline so that we can read the file and check it: 48 | pipeline = Gst.ElementFactory.make("playbin") 49 | pipeline.set_property('uri','file://'+output_video_location) 50 | playsink = pipeline.get_by_name('playsink') 51 | playsink.set_property('video-sink', Gst.ElementFactory.make('fakesink')) 52 | pipeline.set_state(Gst.State.PAUSED) 53 | 54 | def after_a_second(): 55 | assert pipeline.get_state(0).state == Gst.State.PAUSED 56 | element = pipeline.get_by_name('inputselector1') 57 | caps = element.get_static_pad('src').get_current_caps() 58 | assert caps.to_string() == 'audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)48000, channels=(int)2, channel-mask=(bitmask)0x0000000000000003' 59 | 60 | element = pipeline.get_by_name('inputselector0') 61 | caps = element.get_static_pad('src').get_current_caps() 62 | assert caps.to_string() == 'video/x-raw, format=(string)NV12, width=(int)640, height=(int)360, interlace-mode=(string)progressive, multiview-mode=(string)mono, multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono, pixel-aspect-ratio=(fraction)1/1, chroma-site=(string)jpeg, colorimetry=(string)bt601, framerate=(fraction)30/1' 63 | 64 | pipeline.set_state(Gst.State.NULL) 65 | mainloop.quit() 66 | 67 | GLib.timeout_add(1000, after_a_second) 68 | mainloop.run() 69 | -------------------------------------------------------------------------------- /tests/test_image_input.py: -------------------------------------------------------------------------------- 1 | import time, pytest 2 | from utils import * 3 | 4 | 5 | def test_image_input_from_command_line(run_brave, create_config_file): 6 | image_uri = 'file://' + test_directory() + '/assets/image_640_360.png' 7 | config = { 8 | 'enable_audio': False, # useful for debugging TODO remove 9 | 'inputs': [ 10 | # {'type': 'test_video', 'pattern': 4}, # pattern 4 is red 11 | {'type': 'image', 'uri': image_uri}, # pattern 4 is red 12 | ], 13 | 'outputs': [ 14 | {'type': 'local'} # good for debugging 15 | ] 16 | } 17 | # print('Config:', config) 18 | config_file = create_config_file(config) 19 | run_brave(config_file.name) 20 | time.sleep(0.5) 21 | check_brave_is_running() 22 | time.sleep(2) 23 | response = api_get('/api/all') 24 | assert response.status_code == 200 25 | assert_everything_in_playing_state(response.json()) 26 | -------------------------------------------------------------------------------- /tests/test_image_output.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_image_output(run_brave, create_config_file): 6 | output_image_location = create_output_image_location() 7 | 8 | config = { 9 | 'mixers': [{'sources': [{'uid': 'input1', 'zorder': 2}]}], 10 | 'inputs': [ 11 | {'type': 'test_video', 'pattern': 4}, # pattern 4 is red 12 | ], 13 | 'outputs': [ 14 | {'type': 'local'}, # good for debugging 15 | {'type': 'image', 'location': output_image_location } 16 | ] 17 | } 18 | config_file = create_config_file(config) 19 | run_brave(config_file.name) 20 | time.sleep(4) 21 | check_brave_is_running() 22 | response = api_get('/api/all') 23 | assert response.status_code == 200 24 | assert_everything_in_playing_state(response.json()) 25 | 26 | 27 | assert_image_file_color(output_image_location, (255,0,0)) 28 | -------------------------------------------------------------------------------- /tests/test_initial_state.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_state_property_on_startup(run_brave, create_config_file): 6 | ''' 7 | Test that if 'state' is set as a property, it is honored. 8 | It can be set for inputs, outputs and mixers. 9 | ''' 10 | output_image_location0 = create_output_image_location() 11 | output_image_location1 = create_output_image_location() 12 | 13 | config = { 14 | 'inputs': [ 15 | {'type': 'test_video', 'pattern': 4, 'state': 'PLAYING'}, 16 | {'type': 'test_video', 'pattern': 5, 'state': 'PAUSED'}, 17 | {'type': 'test_video', 'pattern': 6, 'state': 'READY'}, 18 | {'type': 'test_video', 'pattern': 7, 'state': 'NULL'}, 19 | ], 20 | 'mixers': [ 21 | {'state': 'PLAYING'}, 22 | {'state': 'PAUSED'}, 23 | {'state': 'READY'}, 24 | {'state': 'NULL'}, 25 | ], 26 | 'outputs': [ 27 | {'type': 'image', 'location': output_image_location0, 'state': 'PLAYING'}, 28 | {'type': 'image', 'location': output_image_location0, 'state': 'PAUSED'}, 29 | {'type': 'image', 'location': output_image_location0, 'state': 'READY'}, 30 | {'type': 'image', 'location': output_image_location0, 'state': 'NULL'}, 31 | ] 32 | } 33 | config_file = create_config_file(config) 34 | run_brave(config_file.name) 35 | time.sleep(3) 36 | check_brave_is_running() 37 | response = api_get('/api/all') 38 | assert response.status_code == 200 39 | details = response.json() 40 | assert details['inputs'][0]['state'] == 'PLAYING' 41 | assert details['inputs'][1]['state'] == 'PAUSED' 42 | assert details['inputs'][2]['state'] == 'READY' 43 | assert details['inputs'][3]['state'] == 'NULL' 44 | assert details['mixers'][0]['state'] == 'PLAYING' 45 | assert details['mixers'][1]['state'] == 'PAUSED' 46 | assert details['mixers'][2]['state'] == 'READY' 47 | assert details['mixers'][3]['state'] == 'NULL' 48 | assert details['outputs'][0]['state'] == 'PLAYING' 49 | assert details['outputs'][1]['state'] == 'PAUSED' 50 | assert details['outputs'][2]['state'] == 'READY' 51 | assert details['outputs'][3]['state'] == 'NULL' 52 | 53 | 54 | def test_state_property_via_api(run_brave): 55 | ''' 56 | Test that if 'state' is set as a property, it is honored. 57 | It can be set for inputs, outputs and mixers. 58 | ''' 59 | run_brave() 60 | check_brave_is_running() 61 | response = api_get('/api/all') 62 | assert response.status_code == 200 63 | assert_everything_in_playing_state(response.json()) 64 | output_image_location0 = create_output_image_location() 65 | 66 | add_input({'type': 'test_audio', 'state': 'NULL'}) 67 | add_input({'type': 'test_audio', 'state': 'READY'}) 68 | add_input({'type': 'test_audio', 'state': 'PAUSED'}) 69 | add_input({'type': 'test_audio', 'state': 'PLAYING'}) 70 | 71 | add_mixer({'state': 'NULL'}) 72 | add_mixer({'state': 'READY'}) 73 | add_mixer({'state': 'PAUSED'}) 74 | add_mixer({'state': 'PLAYING'}) 75 | 76 | add_output({'type': 'image', 'location': output_image_location0, 'state': 'NULL'}) 77 | add_output({'type': 'image', 'location': output_image_location0, 'state': 'READY'}) 78 | add_output({'type': 'image', 'location': output_image_location0, 'state': 'PAUSED'}) 79 | add_output({'type': 'image', 'location': output_image_location0, 'state': 'PLAYING'}) 80 | 81 | time.sleep(1) 82 | 83 | response = api_get('/api/all') 84 | assert response.status_code == 200 85 | details = response.json() 86 | assert details['inputs'][0]['state'] == 'NULL' 87 | assert details['inputs'][1]['state'] == 'READY' 88 | assert details['inputs'][2]['state'] == 'PAUSED' 89 | assert details['inputs'][3]['state'] == 'PLAYING' 90 | 91 | # Mixer 0 is the default: 92 | assert details['mixers'][0]['state'] == 'PLAYING' 93 | 94 | assert details['mixers'][1]['state'] == 'NULL' 95 | assert details['mixers'][2]['state'] == 'READY' 96 | assert details['mixers'][3]['state'] == 'PAUSED' 97 | assert details['mixers'][4]['state'] == 'PLAYING' 98 | 99 | assert details['outputs'][0]['state'] == 'NULL' 100 | assert details['outputs'][1]['state'] == 'READY' 101 | assert details['outputs'][2]['state'] == 'PAUSED' 102 | assert details['outputs'][3]['state'] == 'PLAYING' 103 | -------------------------------------------------------------------------------- /tests/test_inputs.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_inputs(run_brave): 6 | run_brave() 7 | check_brave_is_running() 8 | assert_inputs([]) 9 | 10 | # Create input, and can set the id 11 | add_input({'type': 'test_video', 'id': 99}) 12 | time.sleep(2) 13 | assert_inputs([{'type': 'test_video', 'id': 99, 'uid': 'input99'}]) 14 | 15 | # Different types of inputs work: 16 | add_input({'type': 'test_audio'}) 17 | time.sleep(1) 18 | assert_inputs([{'type': 'test_video', 'id': 99}, {'type': 'test_audio', 'id': 1}]) 19 | 20 | # Change state to PAUSED 21 | update_input(1, {'state': 'NULL'}) 22 | assert_inputs([{'type': 'test_video', 'id': 99}, {'type': 'test_audio', 'id': 1, 'state': 'NULL'}], check_playing_state=False) 23 | 24 | # Change state to READY 25 | update_input(1, {'state': 'READY'}) 26 | assert_inputs([{'type': 'test_video', 'id': 99}, {'type': 'test_audio', 'id': 1, 'state': 'READY'}], check_playing_state=False) 27 | 28 | # Change state to NULL 29 | update_input(1, {'state': 'PAUSED'}) 30 | assert_inputs([{'type': 'test_video', 'id': 99}, {'type': 'test_audio', 'id': 1, 'state': 'PAUSED'}], check_playing_state=False) 31 | 32 | # Change state to PLAYING 33 | update_input(1, {'state': 'PLAYING'}) 34 | time.sleep(1) 35 | assert_inputs([{'type': 'test_video', 'id': 99}, {'type': 'test_audio', 'id': 1}]) 36 | 37 | # Add a property to existing input 38 | update_input(99, {'pattern': 5}) 39 | assert_inputs([{'type': 'test_video', 'id': 99, 'pattern': 5}, {'type': 'test_audio', 'id': 1}]) 40 | 41 | # Add a bad property to existing input 42 | update_input(1, {'not_real': 100}, 400) 43 | assert_inputs([{'type': 'test_video', 'id': 99}, {'type': 'test_audio', 'id': 1}]) 44 | 45 | # Add a property to missing input 46 | update_input(55, {'pattern': 6}, 400) 47 | 48 | # Change an ID of an input does not work 49 | update_input(99, {'id': 10}, 400) 50 | 51 | # Removing an existing input works: 52 | delete_input(99) 53 | assert_inputs([{'type': 'test_audio', 'id': 1}]) 54 | 55 | # Removing a non-existant input causes a user error 56 | delete_input(55, expected_status_code=400) # Does not exist 57 | assert_inputs([{'type': 'test_audio', 'id': 1}]) 58 | -------------------------------------------------------------------------------- /tests/test_local_output.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_can_only_have_one_local_output(run_brave): 6 | run_brave() 7 | check_brave_is_running() 8 | assert_outputs([]) 9 | 10 | add_output({'type': 'local'}) 11 | assert_outputs([{'type': 'local', 'id': 1}]) 12 | 13 | # 400 user error response for making two local outputs: 14 | add_output({'type': 'local'}, status_code=400) 15 | assert_outputs([{'type': 'local', 'id': 1}]) 16 | -------------------------------------------------------------------------------- /tests/test_mixer_to_mixer.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | GREEN=[0, 255, 0] 6 | BLUE=[0, 0, 255] 7 | 8 | 9 | def test_mixer_to_mixer(run_brave, create_config_file): 10 | '''Ensure a mixer can accept another mixer as source''' 11 | subtest_start_brave_with_three_mixers(run_brave, create_config_file) 12 | subtest_add_mixer2_as_source_of_mixer1() 13 | 14 | # mixer1's background is red. 15 | # mixer2's background is green. 16 | # mixer2 has been added as a source of mixer1, so the output of mixer1 should be green. 17 | subtest_ensure_mixer1_color(GREEN) 18 | 19 | subtest_add_mixer3_as_source_of_mixer1() 20 | 21 | # mixer3 had a lower zorder than mixer2 so it will still be the color of mixer2 (green_) 22 | subtest_ensure_mixer1_color(GREEN) 23 | 24 | # But if we increase the zorder of mixer3, then it will appear on top of mixer2, making it blue 25 | subtest_update_mixer3_zorder() 26 | subtest_ensure_mixer1_color(BLUE) 27 | 28 | def subtest_add_mixer2_as_source_of_mixer1(): 29 | overlay_source('mixer2', 1, details={'zorder': 2}) 30 | time.sleep(1) 31 | assert_everything_in_playing_state() 32 | 33 | 34 | def subtest_add_mixer3_as_source_of_mixer1(): 35 | overlay_source('mixer3', 1, details={'zorder': 1}) 36 | time.sleep(1) 37 | assert_everything_in_playing_state() 38 | assert_mixers([ 39 | {'pattern': 4, 'sources': [ 40 | {'uid': 'mixer2', 'in_mix': True, 'zorder': 2}, 41 | {'uid': 'mixer3', 'in_mix': True, 'zorder': 1} 42 | ]}, 43 | {'pattern': 5, 'sources': []}, 44 | {'pattern': 6, 'sources': []} 45 | ]) 46 | 47 | def subtest_ensure_mixer1_color(color): 48 | time.sleep(3) 49 | assert_image_output_color(1, color) 50 | 51 | 52 | def subtest_update_mixer3_zorder(): 53 | overlay_source('mixer3', 1, details={'zorder': 3}) 54 | time.sleep(1) 55 | assert_everything_in_playing_state() 56 | assert_mixers([ 57 | {'pattern': 4, 'sources': [ 58 | {'uid': 'mixer2', 'in_mix': True, 'zorder': 2}, 59 | {'uid': 'mixer3', 'in_mix': True, 'zorder': 3} 60 | ]}, 61 | {'pattern': 5, 'sources': []}, 62 | {'pattern': 6, 'sources': []} 63 | ]) 64 | 65 | 66 | def subtest_start_brave_with_three_mixers(run_brave, create_config_file): 67 | # Pattern 4 is red, pattern 5 is green, pattern 6 is blue 68 | config = { 69 | 'mixers': [{'pattern': 4}, {'pattern': 5}, {'pattern': 6}], 70 | 'outputs': [{'type': 'image', 'source': 'mixer1'}] 71 | } 72 | config_file = create_config_file(config) 73 | run_brave(config_file.name) 74 | time.sleep(0.5) 75 | check_brave_is_running() 76 | -------------------------------------------------------------------------------- /tests/test_mixers.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | 6 | def test_mixer_from_config(run_brave, create_config_file): 7 | subtest_start_brave_with_mixers(run_brave, create_config_file) 8 | subtest_assert_two_mixers(mixer_1_props={'width': 160, 'height': 90, 'pattern': 6}) 9 | subtest_change_mixer_pattern() 10 | subtest_assert_two_mixers(mixer_1_props={'width': 160, 'height': 90, 'pattern': 7}) 11 | subtest_change_width_and_height() 12 | subtest_assert_two_mixers(mixer_1_props={'width': 200, 'height': 300, 'pattern': 7}) 13 | subtest_delete_mixers() 14 | subtest_delete_nonexistant_mixer() 15 | 16 | def subtest_start_brave_with_mixers(run_brave, create_config_file): 17 | MIXER1 = { 18 | 'width': 160, 19 | 'height': 90, 20 | 'pattern': 6 21 | } 22 | MIXER2 = { 23 | 'width': 640, 24 | 'height': 360 25 | } 26 | config = {'mixers': [MIXER1, MIXER2]} 27 | config_file = create_config_file(config) 28 | run_brave(config_file.name) 29 | time.sleep(1) 30 | check_brave_is_running() 31 | 32 | 33 | def subtest_assert_two_mixers(mixer_1_props): 34 | assert_mixers([{ 35 | 'id': 1, 36 | 'uid': 'mixer1', 37 | **mixer_1_props, 38 | }, { 39 | 'id': 2, 40 | 'uid': 'mixer2', 41 | 'width': 640, 'height': 360, 'pattern': 0, 42 | }]) 43 | 44 | def subtest_change_mixer_pattern(): 45 | update_mixer(1, {'pattern': 7}) 46 | 47 | 48 | def subtest_change_width_and_height(): 49 | update_mixer(1, {'width': 200, 'height': 300}) 50 | 51 | 52 | def subtest_delete_mixers(): 53 | delete_mixer(1) 54 | delete_mixer(2) 55 | assert_mixers([]) 56 | 57 | 58 | def subtest_delete_nonexistant_mixer(): 59 | delete_mixer(10, 400) 60 | 61 | 62 | def test_mixer_from_api(run_brave): 63 | run_brave() 64 | 65 | # There is one mixer by default 66 | assert_mixers([{'id': 1, 'width': 640, 'height': 360}]) 67 | 68 | # Create input, ignore attempts to set an ID 69 | add_mixer({'width': 200, 'height': 200}) 70 | time.sleep(1) 71 | assert_mixers([{'id': 1, 'width': 640, 'height': 360}, 72 | {'id': 2, 'width': 200, 'height': 200}]) 73 | subtest_delete_mixers() 74 | -------------------------------------------------------------------------------- /tests/test_multiple_braves.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | PORT_FROM_CONFIG_FILE = 12345 5 | PORT_FROM_COMMAND_LINE = 12346 6 | 7 | def test_running_two_braves_on_different_ports(run_brave, create_config_file): 8 | launch_brave_setting_port_in_config_file(run_brave, create_config_file) 9 | launch_brave_setting_port_in_environment_variable(run_brave) 10 | 11 | for port in [PORT_FROM_CONFIG_FILE, PORT_FROM_COMMAND_LINE]: 12 | response = api_get('/api/all', port=port) 13 | assert response.status_code == 200 14 | 15 | time.sleep(1) 16 | 17 | def launch_brave_setting_port_in_config_file(run_brave, create_config_file): 18 | config = {'api_port': PORT_FROM_CONFIG_FILE} 19 | config_file = create_config_file(config) 20 | run_brave(config_file.name) 21 | check_brave_is_running() 22 | 23 | 24 | def launch_brave_setting_port_in_environment_variable(run_brave): 25 | run_brave(port=PORT_FROM_COMMAND_LINE) 26 | check_brave_is_running() 27 | -------------------------------------------------------------------------------- /tests/test_multiple_inputs_and_mixers.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | ''' 6 | This test ensures that if there are multiple mixers, sharing multiple playing inputs, 7 | they all successfully share without imacting each other. 8 | ''' 9 | 10 | MIXER1 = { 11 | 'width': 160, 12 | 'height': 90, 13 | 'pattern': 3 # Black 14 | } 15 | MIXER2 = { 16 | 'width': 640, 17 | 'height': 360 18 | } 19 | 20 | # INPUT1 is RED and INPUT2 is GREEN: 21 | INPUT1 = {'type': 'test_video', 'pattern': 4 } 22 | INPUT2 = {'type': 'test_video', 'pattern': 5 } 23 | OUTPUT1 = {'type': 'image', 'source': 'mixer1'} 24 | OUTPUT2 = {'type': 'image', 'source': 'mixer2'} 25 | 26 | 27 | def test_mixer_from_config(run_brave, create_config_file): 28 | subtest_start_brave_with_mixers(run_brave, create_config_file) 29 | 30 | # Both mixers will be green because green INPUT1 has a higher zorder 31 | assert_image_output_color(1, [0, 255, 0]) 32 | assert_image_output_color(2, [0, 255, 0]) 33 | 34 | subtest_ensure_one_mixer_does_not_affect_another() 35 | subtest_addition_of_input() 36 | subtest_overlay_of_new_input() 37 | 38 | subtest_addition_of_mixer() 39 | subtest_addition_of_destination_to_new_mixer() 40 | subtest_overlay_of_input_onto_new_mixer() 41 | 42 | 43 | def subtest_ensure_one_mixer_does_not_affect_another(): 44 | # Set mixer1 to just use INPUT1. It should go RED, leaving mixer 2 on GREEN: 45 | cut_to_source('input1', 1) 46 | time.sleep(3) 47 | assert_image_output_color(1, [255, 0, 0]) 48 | assert_image_output_color(2, [0, 255, 0]) 49 | 50 | 51 | def subtest_addition_of_input(): 52 | # Create a third input. This is BLUE 53 | new_input = add_input({'type': 'test_video', 'pattern': 6}) 54 | cut_to_source(new_input['uid'], 1) 55 | time.sleep(3) 56 | 57 | # We added this to mixer 1 only, so it will be blue whilst mixer 2 remains green: 58 | assert_image_output_color(1, [0, 0, 255]) 59 | assert_image_output_color(2, [0, 255, 0]) 60 | 61 | 62 | def subtest_overlay_of_new_input(): 63 | # We overlay the new input onto mixer 2, but to a lower zorder than the existing content: 64 | overlay_source('input3', 2, details={'zorder': 5}) 65 | time.sleep(3) 66 | 67 | # Mixer 2 will not have changed, because the zorder is too low 68 | assert_image_output_color(2, [0, 255, 0]) 69 | 70 | # Now, let's increase the zorder, to bring it to the front 71 | overlay_source('input3', 2, details={'zorder': 40}) 72 | time.sleep(3) 73 | assert_image_output_color(2, [0, 0, 255]) 74 | 75 | 76 | def subtest_start_brave_with_mixers(run_brave, create_config_file): 77 | config = { 78 | 'mixers': [ 79 | # TODO different zorders for different mixers: 80 | {**MIXER1, 'sources': [{'uid': 'input1', 'zorder': 10}, {'uid': 'input2', 'zorder': 20}]}, 81 | {**MIXER2, 'sources': [{'uid': 'input1', 'zorder': 10}, {'uid': 'input2', 'zorder': 20}]} 82 | ], 83 | 'inputs': [INPUT1, INPUT2], 84 | 'outputs': [OUTPUT1, OUTPUT2] 85 | } 86 | config_file = create_config_file(config) 87 | run_brave(config_file.name) 88 | check_brave_is_running() 89 | time.sleep(4) 90 | assert_mixers([ 91 | {'id': 1, **MIXER1}, 92 | {'id': 2, **MIXER2} 93 | ]) 94 | assert_inputs([INPUT1, INPUT2]) 95 | assert_outputs([OUTPUT1, OUTPUT2]) 96 | 97 | 98 | def subtest_addition_of_mixer(): 99 | response = add_mixer({}) 100 | assert response['id'] == 3 101 | time.sleep(3) 102 | response = api_get('/api/all') 103 | assert response.status_code == 200 104 | assert_everything_in_playing_state(response.json()) 105 | response_json = response.json() 106 | 107 | # By default, a new mixer gets no sources: 108 | assert response_json['mixers'][2]['sources'] == [] 109 | 110 | 111 | def subtest_addition_of_destination_to_new_mixer(): 112 | add_output({'type': 'image', 'source': 'mixer2'}) 113 | time.sleep(2) 114 | response = api_get('/api/all') 115 | assert response.status_code == 200 116 | assert_everything_in_playing_state(response.json()) 117 | 118 | 119 | def subtest_overlay_of_input_onto_new_mixer(): 120 | cut_to_source('input3', 3, details={'zorder': 30}) 121 | time.sleep(2) 122 | 123 | # Now all three outputs will be input 2, i.e. blue 124 | assert_image_output_color(1, [0, 0, 255]) 125 | assert_image_output_color(2, [0, 0, 255]) 126 | assert_image_output_color(3, [0, 0, 255]) 127 | -------------------------------------------------------------------------------- /tests/test_multiple_outputs.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def start_with_multiple_outputs(run_brave, create_config_file, output_image_location1, output_image_location2): 6 | config = { 7 | 'mixers': [ 8 | {'pattern': 4}, # 4 is red 9 | {'pattern': 5} # 5 is green 10 | ], 11 | 'outputs': [ 12 | {'type': 'image', 'source': 'mixer2', 'location': output_image_location1 }, 13 | {'type': 'image', 'location': output_image_location2 } 14 | # ,{'type': 'local'} 15 | ] 16 | } 17 | config_file = create_config_file(config) 18 | run_brave(config_file.name) 19 | time.sleep(2) 20 | check_brave_is_running() 21 | 22 | 23 | def test_multiple_outputs_at_startup(run_brave, create_config_file): 24 | output_image_location1 = create_output_image_location() 25 | output_image_location2 = create_output_image_location() 26 | start_with_multiple_outputs(run_brave, create_config_file, output_image_location1, output_image_location2) 27 | assert_outputs([ 28 | {'type': 'image', 'source': 'mixer2', 'location': output_image_location1 }, 29 | {'type': 'image', 'source': 'mixer1', 'location': output_image_location2 } 30 | ]) 31 | assert_mixers([ 32 | {'id': 1, 'pattern': 4}, 33 | {'id': 2, 'pattern': 5} 34 | ]) 35 | 36 | # If they've linked right, one will be red and the other will be green 37 | time.sleep(2) 38 | assert_image_file_color(output_image_location1, (0,255,0)) 39 | assert_image_file_color(output_image_location2, (255,0,0)) 40 | 41 | def test_output_at_startup_to_missing_mixer(run_brave, create_config_file): 42 | config = { 43 | 'outputs': [ 44 | {'type': 'image', 'source': 'mixer2'}, 45 | ] 46 | } 47 | config_file = create_config_file(config) 48 | run_brave(config_file.name) 49 | time.sleep(1) 50 | check_return_value(1) 51 | 52 | def test_multiple_outputs_at_runtime(run_brave): 53 | run_brave() 54 | time.sleep(1) 55 | 56 | # Mixer ID 1 exists: 57 | add_output({'type': 'image', 'source': 'mixer1'}) 58 | 59 | # Mixer ID 2 does not exist: 60 | response = add_output({'type': 'image', 'source': 'mixer2'}, 400) 61 | assert 'does not exist' in response['error'] 62 | time.sleep(0.5) 63 | 64 | assert_outputs([{'type': 'image', 'id': 1, 'source': 'mixer1'}]) 65 | add_mixer({}) 66 | 67 | # Now we have a second mixer, this will work: 68 | add_output({'type': 'image', 'source': 'mixer2'}) 69 | # Do it again to prove we can have multiple outputs on the same mixer 70 | add_output({'type': 'image', 'source': 'mixer2'}) 71 | time.sleep(1) 72 | 73 | assert_outputs([{'type': 'image', 'source': 'mixer1'}, 74 | {'type': 'image', 'source': 'mixer2'}, 75 | {'type': 'image', 'source': 'mixer2'}]) 76 | -------------------------------------------------------------------------------- /tests/test_outputs.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_outputs(run_brave): 6 | run_brave() 7 | check_brave_is_running() 8 | assert_outputs([]) 9 | 10 | # Create output, including allowing the ID to be set 11 | add_output({'type': 'local', 'id': 99}) 12 | assert_outputs([{'type': 'local', 'id': 99, 'uid': 'output99'}]) 13 | 14 | # Different types of outputs work: 15 | add_output({'type': 'image'}) 16 | time.sleep(1.5) 17 | assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1}]) 18 | 19 | # Change state to PAUSED 20 | update_output(1, {'state': 'NULL'}) 21 | assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1, 'state': 'NULL'}], check_playing_state=False) 22 | 23 | # Change state to READY 24 | update_output(1, {'state': 'READY'}) 25 | assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1, 'state': 'READY'}], check_playing_state=False) 26 | 27 | # Change state to NULL 28 | update_output(1, {'state': 'PAUSED'}) 29 | assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1, 'state': 'PAUSED'}], check_playing_state=False) 30 | 31 | # Change state to PLAYING 32 | update_output(1, {'state': 'PLAYING'}) 33 | time.sleep(1) 34 | assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1}]) 35 | 36 | # TODO outputs need to support being updated 37 | # # Add a property to existing output 38 | # update_output(1, {'update_frequency': 5}) 39 | # assert_outputs([{'type': 'image', 'id': 1, 'update_frequency': 5}]) 40 | 41 | # Add a bad property to existing output 42 | update_output(1, {'not_real': 100}, 400) 43 | assert_outputs([{'type': 'local', 'id': 99}, {'type': 'image', 'id': 1}]) 44 | 45 | # Add a property to missing output 46 | update_output(999, {'update_frequency': 5}, 400) 47 | 48 | # Removing an existing output works: 49 | delete_output(99) 50 | assert_outputs([{'type': 'image', 'id': 1}]) 51 | 52 | # Removing a non-existant output causes a user error 53 | delete_output(999, expected_status_code=400) # Does not exist 54 | assert_outputs([{'type': 'image', 'id': 1}]) 55 | -------------------------------------------------------------------------------- /tests/test_outputs_with_input_source.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_outputs_with_input_source(run_brave): 6 | run_brave('config/empty.yaml') 7 | check_brave_is_running() 8 | subtest_can_connect_input_to_output() 9 | subtest_can_connect_change_output_to_another_input() 10 | subtest_can_connect_change_output_from_input_to_mixer() 11 | subtest_deleting_mixer_removes_it_as_source() 12 | 13 | def subtest_can_connect_input_to_output(): 14 | add_input({'type': 'test_video', 'pattern': 4}) # pattern 4 is green 15 | add_output({'type': 'image', 'source': 'input1'}) 16 | time.sleep(1) 17 | assert_outputs([{'type': 'image', 'id': 1, 'source': 'input1'}]) 18 | assert_everything_in_playing_state() 19 | time.sleep(2) 20 | assert_image_output_color(1, [255, 0, 0]) 21 | 22 | def subtest_can_connect_change_output_to_another_input(): 23 | add_input({'type': 'test_video', 'pattern': 5}) # pattern 5 is green 24 | 25 | # Will fail whilst in PLAYING state: 26 | update_output(1, {'source': 'input2'}, 400) 27 | assert_outputs([{'type': 'image', 'id': 1, 'source': 'input1'}]) 28 | 29 | # But will succeed in the READY state 30 | update_output(1, {'state': 'READY'}) 31 | update_output(1, {'source': 'input2'}) 32 | update_output(1, {'state': 'PLAYING'}) 33 | time.sleep(1) 34 | assert_outputs([{'type': 'image', 'id': 1, 'source': 'input2'}]) 35 | assert_everything_in_playing_state() 36 | time.sleep(2) 37 | assert_image_output_color(1, [0, 255, 0]) 38 | 39 | def subtest_can_connect_change_output_from_input_to_mixer(): 40 | add_mixer({'pattern': 6}) # pattern 5 is blue 41 | # Will fail whilst in PLAYING state: 42 | update_output(1, {'source': 'mixer1'}, 400) 43 | assert_outputs([{'type': 'image', 'id': 1, 'source': 'input2'}]) 44 | 45 | # But will succeed in the READY state 46 | update_output(1, {'state': 'READY'}) 47 | update_output(1, {'source': 'mixer1'}) 48 | update_output(1, {'state': 'PLAYING'}) 49 | time.sleep(1) 50 | assert_outputs([{'type': 'image', 'id': 1, 'source': 'mixer1'}]) 51 | assert_everything_in_playing_state() 52 | time.sleep(2) 53 | assert_image_output_color(1, [0, 0, 255]) 54 | 55 | def subtest_deleting_mixer_removes_it_as_source(): 56 | delete_mixer(1) 57 | time.sleep(1) 58 | assert_everything_in_playing_state() 59 | response = api_get('/api/outputs') 60 | assert response.status_code == 200 61 | assert len(response.json()) == 1 62 | output_details = response.json()[0] 63 | assert output_details['source'] is None 64 | -------------------------------------------------------------------------------- /tests/test_outputs_with_no_source.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | 5 | def test_outputs_with_no_source(run_brave, create_config_file): 6 | start_with_two_outputs(run_brave, create_config_file) 7 | assert_outputs([ 8 | {'type': 'image', 'id': 1, 'source': None}, 9 | {'type': 'image', 'id': 2, 'source': 'mixer1'} 10 | ]) 11 | 12 | update_output(1, {'state': 'ready'}) 13 | update_output(2, {'state': 'ready'}) 14 | 15 | update_output(1, {'source': 'mixer1'}) 16 | update_output(2, {'source': None}) 17 | 18 | update_output(1, {'state': 'playing'}) 19 | update_output(2, {'state': 'playing'}) 20 | time.sleep(2) 21 | 22 | assert_outputs([ 23 | {'type': 'image', 'id': 1, 'source': 'mixer1', 'state': 'PLAYING'}, 24 | {'type': 'image', 'id': 2, 'source': None, 'state': 'PAUSED'} # Goes PAUSED when no source 25 | ], check_playing_state=False) 26 | 27 | def start_with_two_outputs(run_brave, create_config_file): 28 | output_video_location = create_output_video_location() 29 | 30 | config = { 31 | 'mixers': [{}], 32 | 'outputs': [ 33 | {'type': 'image', 'source': None}, 34 | {'type': 'image'}, # Will default to mixer1 35 | ] 36 | } 37 | config_file = create_config_file(config) 38 | run_brave(config_file.name) 39 | time.sleep(2) 40 | check_brave_is_running() 41 | -------------------------------------------------------------------------------- /tests/test_overlays.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | def test_overlay_at_start(run_brave, create_config_file): 6 | set_up_overlay_at_start(run_brave, create_config_file) 7 | assert_overlays([{'id': 1, 'uid': 'overlay1'}]) 8 | 9 | add_overlay({'type': 'text', 'source': 'mixer1', 'text': 'Overlay #1', 'visible': True}) 10 | time.sleep(1) 11 | assert_overlays([{'id': 1, 'source': 'mixer1', 'visible': True}, 12 | {'id': 2, 'source': 'mixer1', 'visible': True}]) 13 | 14 | # Try adding one that's not visible: 15 | add_overlay({'type': 'text', 'source': 'mixer1', 'text': 'Overlay #2', 'visible': False}) 16 | time.sleep(1) 17 | assert_overlays([{'id': 1, 'visible': True}, 18 | {'id': 2, 'visible': True}, 19 | {'id': 3, 'visible': False}]) 20 | 21 | # Try changing visible flag 22 | update_overlay(1, {'visible': False}) 23 | update_overlay(3, {'visible': True}) 24 | time.sleep(1) 25 | assert_overlays([{'id': 1, 'visible': False}, 26 | {'id': 2, 'visible': True}, 27 | {'id': 3, 'visible': True}]) 28 | 29 | subtest_delete_overlay() 30 | subtest_add_overlay_without_source() 31 | subtest_make_overlay_without_source_visible() 32 | 33 | 34 | def subtest_delete_overlay(): 35 | delete_overlay(3) 36 | time.sleep(1) 37 | assert_overlays([{'id': 1, 'visible': False}, 38 | {'id': 2, 'visible': True}]) 39 | 40 | 41 | def subtest_add_overlay_without_source(): 42 | add_overlay({'type': 'text', 'text': 'Overlay #3b', 'visible': False}) 43 | time.sleep(1) 44 | assert_overlays([{'id': 1, 'visible': False, 'source': 'mixer1'}, 45 | {'id': 2, 'visible': True, 'source': 'mixer1'}, 46 | {'id': 3, 'visible': False, 'source': None}]) 47 | 48 | 49 | def subtest_make_overlay_without_source_visible(): 50 | # Cant't make visible if no source - so will return with a 400 51 | update_overlay(3, {'visible': True}, status_code=400) 52 | time.sleep(1) 53 | assert_overlays([{'id': 1, 'visible': False, 'source': 'mixer1'}, 54 | {'id': 2, 'visible': True, 'source': 'mixer1'}, 55 | {'id': 3, 'visible': False, 'source': None}]) 56 | 57 | 58 | def set_up_overlay_at_start(run_brave, create_config_file): 59 | output_video_location = create_output_video_location() 60 | 61 | config = { 62 | 'mixers': [ 63 | {} 64 | ], 65 | 'overlays': [ 66 | {'type': 'text', 'source': 'mixer1', 'text': 'Overlay #1', 'visible': True} 67 | ], 68 | 'outputs': [ 69 | # {'type': 'local'} # good for debugging 70 | {'type': 'local'} 71 | ] 72 | } 73 | config_file = create_config_file(config) 74 | run_brave(config_file.name) 75 | time.sleep(2) 76 | check_brave_is_running() 77 | -------------------------------------------------------------------------------- /tests/test_overlays_on_multiple_mixers.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | def test_overlays_on_multiple_mixers(run_brave, create_config_file): 6 | init_three_overlays(run_brave, create_config_file) 7 | assert_mixers([{'id': 1, 'state': 'PLAYING'}, 8 | {'id': 2, 'state': 'PLAYING'}]) 9 | assert_overlays([{'id': 1, 'source': 'mixer2'}, 10 | {'id': 2, 'source': 'mixer2'}, 11 | {'id': 3, 'source': 'mixer1'}]) 12 | 13 | add_overlay({'type': 'text', 'source': 'mixer1', 'text': 'Overlay #3', 'visible': True}) 14 | add_overlay({'type': 'text', 'source': 'mixer2', 'text': 'Overlay #4', 'visible': True}) 15 | time.sleep(1) 16 | assert_overlays([{'id': 1, 'source': 'mixer2'}, 17 | {'id': 2, 'source': 'mixer2'}, 18 | {'id': 3, 'source': 'mixer1'}, 19 | {'id': 4, 'source': 'mixer1'}, 20 | {'id': 5, 'source': 'mixer2'}]) 21 | 22 | delete_overlay(2) 23 | delete_overlay(3) 24 | time.sleep(1) 25 | assert_overlays([{'id': 1, 'source': 'mixer2'}, 26 | {'id': 4, 'source': 'mixer1'}, 27 | {'id': 5, 'source': 'mixer2'}]) 28 | 29 | 30 | def test_overlay_on_unknown_mixer_via_api_returns_error(run_brave, create_config_file): 31 | init_three_overlays(run_brave, create_config_file) 32 | add_overlay({'type': 'text', 'source': 'mixer999', 'text': 'Overlay #4'}, status_code=400) 33 | 34 | 35 | def test_overlay_on_unknown_mixer_in_config_returns_error(run_brave, create_config_file): 36 | config = {'overlays': [ 37 | {'type': 'text', 'source': 'mixer999', 'text': 'No such mixer', 'visible': False}, 38 | ]} 39 | config_file = create_config_file(config) 40 | run_brave(config_file.name) 41 | check_return_value(1) 42 | 43 | 44 | def test_can_move_overlay_between_mixers(run_brave, create_config_file): 45 | init_three_overlays(run_brave, create_config_file) 46 | update_overlay(1, {'source': 'mixer1'}) # Changing an invisible overlay 47 | update_overlay(3, {'source': 'mixer2'}) # Changing a visible overlay 48 | update_overlay(2, {'source': None}) # Removing a source 49 | assert_overlays([{'id': 1, 'source': 'mixer1', 'visible': False}, 50 | {'id': 2, 'source': None, 'visible': False}, 51 | {'id': 3, 'source': 'mixer2', 'visible': True}]) 52 | 53 | 54 | def test_handles_bad_source(run_brave, create_config_file): 55 | init_three_overlays(run_brave, create_config_file) 56 | update_overlay(1, {'source': 'mixer999'}, status_code=400) 57 | 58 | 59 | def test_overlay_copes_when_source_mixer_is_deleted(run_brave, create_config_file): 60 | init_three_overlays(run_brave, create_config_file) 61 | delete_mixer(1) 62 | assert_overlays([{'id': 1, 'source': 'mixer2'}, 63 | {'id': 2, 'source': 'mixer2'}, 64 | {'id': 3, 'source': None}]) 65 | 66 | 67 | def test_overlay_can_start_without_a_source(run_brave, create_config_file): 68 | output_video_location = create_output_video_location() 69 | 70 | config = { 71 | 'overlays': [{'type': 'text', 'source': None, 'text': 'foo', 'visible': False}], 72 | 'mixers': [{}] 73 | } 74 | config_file = create_config_file(config) 75 | run_brave(config_file.name) 76 | time.sleep(1) 77 | check_brave_is_running() 78 | assert_overlays([{'id': 1, 'source': None, 'text': 'foo'}]) 79 | 80 | # Now update a prop, to check not having a source doesn't cause a problem 81 | update_overlay(1, {'text': 'bar'}) 82 | assert_overlays([{'id': 1, 'source': None, 'text': 'bar'}]) 83 | 84 | # Now add a source 85 | update_overlay(1, {'source': 'mixer1'}) 86 | assert_overlays([{'id': 1, 'source': 'mixer1', 'text': 'bar'}]) 87 | 88 | assert_everything_in_playing_state() 89 | 90 | 91 | def init_three_overlays(run_brave, create_config_file): 92 | output_video_location = create_output_video_location() 93 | 94 | config = { 95 | 'overlays': [ 96 | {'type': 'text', 'source': 'mixer2', 'text': 'Overlay #1', 'visible': False}, 97 | {'type': 'text', 'source': 'mixer2', 'text': 'Overlay #2', 'visible': True}, 98 | {'type': 'text', 'source': 'mixer1', 'text': 'Overlay #3', 'visible': True} 99 | ], 100 | 'mixers': [ 101 | {}, 102 | {} 103 | ] 104 | } 105 | config_file = create_config_file(config) 106 | run_brave(config_file.name) 107 | time.sleep(1) 108 | check_brave_is_running() 109 | -------------------------------------------------------------------------------- /tests/test_restart.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | 4 | PORT_FROM_CONFIG_FILE = 12345 5 | PORT_FROM_COMMAND_LINE = 12346 6 | 7 | def test_restart_with_original_config(run_brave, create_config_file): 8 | ''' 9 | WHEN user calls /api/restart with {'config':'original'} 10 | THEN Brave restarts and retains original config but not any additions 11 | ''' 12 | config = {'inputs': [{'type': 'test_video'}]} 13 | config_file = create_config_file(config) 14 | run_brave(config_file.name) 15 | check_brave_is_running() 16 | add_input({'type': 'test_audio'}) 17 | time.sleep(0.2) 18 | assert_inputs([{'type': 'test_video', 'id': 1}, {'type': 'test_audio', 'id': 2}]) 19 | 20 | restart_brave({'config': 'original'}) 21 | time.sleep(0.5) 22 | assert_inputs([{'type': 'test_video', 'id': 1}]) 23 | 24 | 25 | def test_restart_with_current_config(run_brave, create_config_file): 26 | ''' 27 | WHEN user calls /api/restart with {'config':'current'} 28 | THEN Brave restarts and retains both original config and any additions 29 | ''' 30 | config = {'inputs': [{'type': 'test_video'}]} 31 | config_file = create_config_file(config) 32 | run_brave(config_file.name) 33 | check_brave_is_running() 34 | add_input({'type': 'test_audio'}) 35 | time.sleep(0.2) 36 | assert_inputs([{'type': 'test_video', 'id': 1}, {'type': 'test_audio', 'id': 2}]) 37 | 38 | restart_brave({'config': 'current'}) 39 | time.sleep(0.5) 40 | assert_inputs([{'type': 'test_video', 'id': 1}, {'type': 'test_audio', 'id': 2}]) 41 | -------------------------------------------------------------------------------- /tests/test_rtmp.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | import utils 4 | import pytest 5 | from utils import run_brave, create_config_file 6 | 7 | BRAVE_1_PORT = 12345 8 | BRAVE_2_PORT = 12346 9 | 10 | 11 | def test_rtmp(run_brave, create_config_file): 12 | ''' 13 | We test both RTMP as an input and output by sending an RTMP stream from one Brave to another. 14 | An RTMP server is required in the middle, which needs to be provided seprately. 15 | Set as the RTMP_SERVER env variable, e.g. 16 | export RTMP_SERVER="myserver.com:10000" 17 | ''' 18 | if 'RTMP_SERVER' not in os.environ: 19 | pytest.skip('RMTP_SERVER environment variable not set') 20 | return 21 | 22 | rtmp_url = 'rtmp://' + os.environ['RTMP_SERVER'] + '/live/test1' 23 | 24 | subtest_start_brave_with_rtmp_output(run_brave, create_config_file, rtmp_url) 25 | subtest_start_brave_with_rtmp_input(run_brave, create_config_file, rtmp_url) 26 | 27 | attempts_remaining = 20 28 | everthing_in_playing_state = False 29 | while attempts_remaining > 0: 30 | attempts_remaining -= 1 31 | time.sleep(1) 32 | everything_in_playing_state = is_brave_in_playing_state(BRAVE_1_PORT) and is_brave_in_playing_state(BRAVE_2_PORT) 33 | 34 | assert everything_in_playing_state, 'Cannot get everything into PLAYING state' 35 | 36 | # Becaue Brave 1 is showing all red, and Brave 2 is receiving Brave 1 via RTMP, 37 | # Then the output of Brave 2 should be all Red 38 | print('NOW RED') 39 | time.sleep(20) 40 | utils.assert_image_output_color(1, [255, 0, 0], port=BRAVE_2_PORT) 41 | 42 | # Now make first Brave all blue 43 | utils.update_input(1, {'pattern': 6}, port=BRAVE_1_PORT) # Pattern 6 is blue 44 | 45 | # Second brave will now be blue, if TCP connection is working 46 | print('NOW BLUE') 47 | time.sleep(20) # Takes an annoyingly long time to buffer.... 48 | utils.assert_image_output_color(1, [0, 0, 255], port=BRAVE_2_PORT) 49 | 50 | 51 | def subtest_start_brave_with_rtmp_output(run_brave, create_config_file, rtmp_url): 52 | config = { 53 | 'inputs': [ 54 | { 55 | 'type': 'test_video', 56 | 'pattern': 4, # RED 57 | } 58 | ], 59 | 'outputs': [ 60 | { 61 | 'type': 'rtmp', 62 | 'source': 'input1', 63 | 'uri': rtmp_url 64 | } 65 | ] 66 | } 67 | config_file = create_config_file(config) 68 | run_brave(config_file.name, BRAVE_1_PORT) 69 | utils.check_brave_is_running() 70 | # time.sleep(0.5) 71 | 72 | 73 | def subtest_start_brave_with_rtmp_input(run_brave, create_config_file, rtmp_url): 74 | config = { 75 | 'inputs': [ 76 | { 77 | 'type': 'uri', 78 | 'uri': rtmp_url 79 | } 80 | ], 81 | # 'mixers': [ 82 | # { 83 | # 'sources': [ 84 | # {'uid': 'input1'} 85 | # ] 86 | # } 87 | # ], 88 | 'outputs': [ 89 | { 90 | 'type': 'image', 91 | 'source': 'input1' 92 | # ADD A PREVIEW, for debugging: 93 | }, 94 | { 95 | 'type': 'local', 96 | 'source': 'input1' 97 | } 98 | ] 99 | } 100 | config_file = create_config_file(config) 101 | run_brave(config_file.name, BRAVE_2_PORT) 102 | utils.check_brave_is_running() 103 | 104 | 105 | def is_brave_in_playing_state(port): 106 | response = utils.api_get('/api/all', port=port) 107 | assert response.status_code == 200 108 | response_json = response.json() 109 | return response_json['inputs'][0]['state'] == 'PLAYING' and response_json['outputs'][0]['state'] == 'PLAYING' 110 | -------------------------------------------------------------------------------- /tests/test_seek.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | 6 | def test_seek(run_brave): 7 | '''Ensure that the user can seek to a position of an input.''' 8 | run_brave() 9 | create_input() 10 | 11 | # STEP 1: Check initial position is 0 12 | state, position, duration = get_input_details() 13 | assert state == 'PAUSED' 14 | assert position == 0 15 | assert_duation(duration) 16 | 17 | # STEP 2: Update to 4 seconds and validate 18 | update_input(1, {'position': 4000000000}) 19 | state, position, duration = get_input_details() 20 | assert state == 'PAUSED' 21 | assert position == 4000000000 22 | assert_duation(duration) 23 | 24 | # STEP 3: Play video and check it completes within 2 seconds. (It's a 5 second video so it should.) 25 | update_input(1, {'state': 'PLAYING'}) 26 | time.sleep(2) 27 | state, position, duration = get_input_details() 28 | assert state == 'READY' 29 | assert position in [-1, 0, None] # Ideally this would be more consistent 30 | 31 | 32 | def create_input(): 33 | uri = 'file://' + test_directory() + '/assets/5_second_video.mp4' 34 | run_brave() 35 | add_input({'type': 'uri', 'state': 'PAUSED', 'uri': uri}) 36 | time.sleep(1) 37 | 38 | 39 | def get_input_details(): 40 | response = api_get('/api/inputs') 41 | assert response.status_code == 200 42 | response_json = response.json() 43 | assert len(response_json) == 1 44 | state = response_json[0]['state'] 45 | position = response_json[0]['position'] if 'position' in response_json[0] else None 46 | duration = response_json[0]['duration'] if 'duration' in response_json[0] else None 47 | return state, position, duration 48 | 49 | 50 | def assert_duation(duration): 51 | GRACE_DURATION_DIFFERENCE = 100000000 52 | assert (5000000000 + GRACE_DURATION_DIFFERENCE) > duration > (5000000000 - GRACE_DURATION_DIFFERENCE) 53 | -------------------------------------------------------------------------------- /tests/test_tcp.py: -------------------------------------------------------------------------------- 1 | ''' 2 | We test both TCP as an input and output by connecting two Braves together. 3 | ''' 4 | import time 5 | import utils 6 | from utils import run_brave, create_config_file 7 | 8 | BRAVE_1_PORT = 12345 9 | BRAVE_2_PORT = 12346 10 | TCP_HOST = '0.0.0.0' 11 | TCP_PORT = 13100 12 | 13 | 14 | def test_tcp_with_mpeg_container(run_brave, create_config_file): 15 | _test_tcp(run_brave, create_config_file, 'mpeg', {'enable_video': True, 'enable_audio': True}) 16 | 17 | 18 | def test_tcp_with_mpeg_container_video_only(run_brave, create_config_file): 19 | _test_tcp(run_brave, create_config_file, 'mpeg', {'enable_video': True, 'enable_audio': False}) 20 | 21 | 22 | def test_tcp_with_mpeg_container_audio_only(run_brave, create_config_file): 23 | _test_tcp(run_brave, create_config_file, 'mpeg', {'enable_video': False, 'enable_audio': True}) 24 | 25 | 26 | def test_tcp_with_ogg_container(run_brave, create_config_file): 27 | _test_tcp(run_brave, create_config_file, 'ogg', {'enable_video': True, 'enable_audio': True}) 28 | 29 | 30 | # Could test OGG with just video/audio but there's probably no need. 31 | 32 | def _test_tcp(run_brave, create_config_file, container, config): 33 | subtest_start_brave_with_tcp_output(run_brave, create_config_file, container, {**config}) 34 | subtest_start_brave_with_tcp_input(run_brave, create_config_file, container, {**config}) 35 | 36 | if config['enable_video']: 37 | time.sleep(3) 38 | subtest_ensure_first_brave_content_appears_in_second() 39 | 40 | 41 | def subtest_start_brave_with_tcp_output(run_brave, create_config_file, container, config): 42 | 43 | if config['enable_video']: 44 | config['inputs'] = [ 45 | { 46 | 'type': 'test_video', 47 | 'pattern': 4, # RED 48 | } 49 | ] 50 | else: 51 | config['inputs'] = [ 52 | { 53 | 'type': 'test_audio' 54 | } 55 | ] 56 | 57 | config['outputs'] = [ 58 | { 59 | 'type': 'tcp', 60 | 'container': container, 61 | 'source': 'input1', 62 | 'host': TCP_HOST, 63 | 'port': TCP_PORT 64 | } 65 | ] 66 | 67 | config_file = create_config_file(config) 68 | run_brave(config_file.name, BRAVE_1_PORT) 69 | time.sleep(2) 70 | utils.check_brave_is_running() 71 | utils.assert_everything_in_playing_state(port=BRAVE_1_PORT) 72 | 73 | 74 | def subtest_start_brave_with_tcp_input(run_brave, create_config_file, container, config): 75 | config['inputs'] = [ 76 | { 77 | 'type': 'tcp_client', 78 | 'container': container, 79 | 'host': TCP_HOST, 80 | 'port': TCP_PORT 81 | } 82 | ] 83 | 84 | if config['enable_video']: 85 | config['outputs'] = [ 86 | { 87 | 'type': 'image', 88 | 'source': 'input1' 89 | }, 90 | # PREVIEW for debugging: 91 | # { 92 | # 'type': 'local', 93 | # 'source': 'input1' 94 | # } 95 | ] 96 | 97 | config_file = create_config_file(config) 98 | run_brave(config_file.name, BRAVE_2_PORT) 99 | utils.check_brave_is_running() 100 | time.sleep(3) 101 | utils.assert_everything_in_playing_state(port=BRAVE_2_PORT) 102 | 103 | 104 | def subtest_ensure_first_brave_content_appears_in_second(): 105 | # First brave is all red. So second Brave should be red too. 106 | utils.assert_image_output_color(1, [255, 0, 0], port=BRAVE_2_PORT) 107 | 108 | # Now make first Brave all green 109 | utils.update_input(1, {'pattern': 5}, port=BRAVE_1_PORT) # Pattern 5 is green 110 | 111 | # Second brave will now be green, if TCP connection is working 112 | time.sleep(6) 113 | utils.assert_image_output_color(1, [0, 255, 0], port=BRAVE_2_PORT) 114 | -------------------------------------------------------------------------------- /tests/test_text_overlay_element_connects_successfully.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect 2 | from utils import * 3 | from PIL import Image 4 | 5 | def test_text_overlay_element_connects_successfully(run_brave): 6 | ''' 7 | Confirms that the 'text' overlay creates a 'textoverlay' element and that it connects successfully. 8 | ''' 9 | run_brave() 10 | 11 | elements_before_adding_overlay = get_mixer_elements(1) 12 | 13 | add_overlay({'type': 'text', 'source': 'mixer1', 'text': 'Overlay #1', 'visible': False}) 14 | assert_overlays([{'id': 1, 'source': 'mixer1', 'visible': False}]) 15 | 16 | check_mixer_elements_contain_one_textoverlay(elements_before_adding_overlay, is_connected=False) 17 | 18 | # Now make visible and it will be connected 19 | update_overlay(1, {'visible': True}) 20 | check_mixer_elements_contain_one_textoverlay(elements_before_adding_overlay, is_connected=True) 21 | 22 | # Now make invisible and it will not be connected 23 | update_overlay(1, {'visible': False}) 24 | check_mixer_elements_contain_one_textoverlay(elements_before_adding_overlay, is_connected=False) 25 | 26 | # Now make active then move to another mixer... the first mixer should go down to not having that 27 | update_overlay(1, {'visible': True}) 28 | add_mixer({}) 29 | update_overlay(1, {'source': 'mixer2'}) 30 | elements_after_moving_overlay = get_mixer_elements(1) 31 | assert len(elements_after_moving_overlay) == len(elements_before_adding_overlay) 32 | 33 | # But mixer2 will have the extra 'textoverlay' successfully 34 | check_mixer_elements_contain_one_textoverlay(elements_before_adding_overlay, is_connected=True, mixer_id=2) 35 | 36 | 37 | def check_pads_peer(element, expect_a_peer): 38 | assert ('peer' in element['pads']['video_sink']) is expect_a_peer 39 | assert ('peer' in element['pads']['src']) is expect_a_peer 40 | 41 | def get_mixer_elements(mixer_id): 42 | response = api_get('/api/elements') 43 | assert response.status_code == 200 44 | json_response = response.json() 45 | return json_response['mixers'][str(mixer_id)]['elements'] 46 | 47 | def check_mixer_elements_contain_one_textoverlay(elements_before_adding_overlay, is_connected=False, mixer_id=1): 48 | elements = get_mixer_elements(mixer_id) 49 | 50 | # 'textoverlay' should be the only additional element 51 | assert len(elements_before_adding_overlay) == len(elements) - 1 52 | textoverlay_elements = [x for x in elements if x['type'] == 'textoverlay'] 53 | assert len(textoverlay_elements) == 1 54 | 55 | # The textoverlay element will not be connected 56 | check_pads_peer(textoverlay_elements[0], is_connected) 57 | -------------------------------------------------------------------------------- /tests/unit/test_config.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | sys.path.append('tests') 3 | sys.path.append('.') 4 | from utils import create_config_file 5 | import brave.config 6 | 7 | ''' 8 | Unit test for brave/config.py 9 | ''' 10 | 11 | def test_stun_server_config(create_config_file): 12 | config = {'stun_server': 'a-stun-server'} 13 | config_file = create_config_file(config) 14 | brave.config.init(config_file.name) 15 | assert brave.config.stun_server() == 'a-stun-server' 16 | 17 | 18 | def test_stun_server_via_env_var(): 19 | os.environ['STUN_SERVER'] = 'a-stun-server-from-env-var' 20 | brave.config.init() 21 | assert brave.config.stun_server() == 'a-stun-server-from-env-var' 22 | 23 | 24 | def test_api_host_and_port_config(create_config_file): 25 | config = {'api_host': 'api-host', 'api_port': 12345} 26 | config_file = create_config_file(config) 27 | brave.config.init(config_file.name) 28 | assert brave.config.api_host() == 'api-host' 29 | assert brave.config.api_port() == 12345 30 | 31 | 32 | def test_api_host_and_port_vir_env_var(): 33 | os.environ['HOST'] = 'host-via-env' 34 | os.environ['PORT'] = '23456' 35 | brave.config.init() 36 | assert brave.config.api_host() == 'host-via-env' 37 | assert brave.config.api_port() == 23456 38 | -------------------------------------------------------------------------------- /tests/unit/test_connection_collection.py: -------------------------------------------------------------------------------- 1 | import time, pytest, inspect, sys 2 | sys.path.append('.') 3 | import brave.session 4 | from brave.inputs.input import Input 5 | from brave.connections import ConnectionCollection 6 | from brave.exceptions import InvalidConfiguration 7 | 8 | def test_connection_collection(): 9 | session = brave.session.init() 10 | cc = session.connections 11 | input = session.inputs.add(type='test_video') 12 | output1 = session.outputs.add(type='local') 13 | output2 = session.outputs.add(type='image') 14 | mixer = session.mixers.add() 15 | 16 | connection1 = cc.add(input, mixer) 17 | assert connection1.source == input 18 | assert connection1.dest == mixer 19 | 20 | connection2 = cc.add(mixer, output1) 21 | assert connection2.source == mixer 22 | assert connection2.dest == output1 23 | 24 | connection3 = cc.add(input, output2) 25 | assert connection3.source == input 26 | assert connection3.dest == output2 27 | 28 | subtest_cannot_link_output_to_more_than_one_source(cc, input, output2) 29 | subtest_can_find_source_and_dest(cc, input, mixer, output1, connection1, connection2, connection3) 30 | subtest_cannot_link_the_wrong_way(cc, input, mixer, output1) 31 | subtest_can_access_connections_from_input(session, input, [connection1, connection3]) 32 | subtest_can_access_source_connections_from_mixer(session, mixer, [connection1]) 33 | subtest_can_access_dest_connections_from_mixer(session, mixer, [connection2]) 34 | subtest_can_access_connections_from_output(session, output1, connection2) 35 | subtest_can_access_connections_from_output(session, output2, connection3) 36 | 37 | 38 | def test_creating_connection_from_input_to_mixer(): 39 | session = brave.session.init() 40 | mixer = session.mixers.add() 41 | input = session.inputs.add(type='test_video') 42 | assert mixer.connection_for_source(input) == None 43 | connection1 = mixer.connection_for_source(input, create_if_not_made=True) 44 | assert connection1.source == input 45 | assert connection1.dest == mixer 46 | connection1_copy = mixer.connection_for_source(input, create_if_not_made=True) 47 | assert connection1_copy == connection1 48 | 49 | 50 | def test_creating_connection_from_mixer_to_output(): 51 | session = brave.session.init() 52 | mixer = session.mixers.add() 53 | output = session.outputs.add(type='local') 54 | connection2 = output.connection_for_source(mixer, create_if_not_made=True) 55 | assert connection2.source == mixer 56 | assert connection2.dest == output 57 | connection2_copy = output.connection_for_source(mixer, create_if_not_made=True) 58 | assert connection2_copy == connection2 59 | 60 | 61 | def subtest_cannot_link_output_to_more_than_one_source(cc, input, output2): 62 | with pytest.raises(InvalidConfiguration): 63 | cc.add(input, output2) 64 | 65 | def subtest_can_find_source_and_dest(cc, input, mixer, output1, connection1, connection2, connection3): 66 | assert cc.get_first_for_source(input) == connection1 67 | assert cc.get_first_for_source(mixer) == connection2 68 | assert cc.get_first_for_source(output1) == None 69 | 70 | assert cc.get_all_for_source(input) == [connection1, connection3] 71 | assert cc.get_all_for_source(mixer) == [connection2] 72 | assert cc.get_all_for_source(output1) == [] 73 | 74 | assert cc.get_first_for_dest(input) == None 75 | assert cc.get_first_for_dest(mixer) == connection1 76 | assert cc.get_first_for_dest(output1) == connection2 77 | 78 | assert cc.get_all_for_dest(input) == [] 79 | assert cc.get_all_for_dest(mixer) == [connection1] 80 | assert cc.get_all_for_dest(output1) == [connection2] 81 | 82 | 83 | def subtest_cannot_link_the_wrong_way(cc, input, mixer, output): 84 | with pytest.raises(ValueError): 85 | cc.add(mixer, input) 86 | 87 | with pytest.raises(ValueError): 88 | cc.add(output, mixer) 89 | 90 | def subtest_can_access_connections_from_input(session, input, connections): 91 | assert input.dest_connections() == connections 92 | 93 | def subtest_can_access_source_connections_from_mixer(session, mixer, connections): 94 | assert mixer.source_connections() == connections 95 | 96 | def subtest_can_access_dest_connections_from_mixer(session, mixer, connections): 97 | assert mixer.dest_connections() == connections 98 | 99 | def subtest_can_access_connections_from_output(session, output, connection): 100 | assert output.source_connection() == connection 101 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | format = ${cyan}%(path)s${reset}:${yellow_bold}%(row)d${reset}:${green_bold}%(col)d${reset}: ${red_bold}%(code)s${reset} %(text)s 4 | -------------------------------------------------------------------------------- /youtubedltest.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | import youtube_dl 3 | streamurl = 'https://www.youtube.com/watch?v=vQ8xjg7mcgE' 4 | 5 | purl = 'notset' 6 | 7 | # should be able to just pass a -g and get the url 8 | 9 | # forceurl or --get-url 10 | # extracting and examples https://www.bogotobogo.com/VideoStreaming/YouTube/youtube-dl-embedding.php 11 | # sudo -H pip install --upgrade youtube-dl 12 | # /usr/local/lib/python2.7/dist-packages/youtube_dl/ 13 | 14 | # need to look at the info dict.. f['url'] 15 | 16 | class MyLogger(object): 17 | #purl = 'notset' 18 | def debug(self, msg): 19 | global purl 20 | if "https" in msg: 21 | #print(msg) 22 | purl = msg 23 | pass 24 | 25 | def warning(self, msg): 26 | # print(msg) 27 | pass 28 | 29 | def error(self, msg): 30 | print(msg) 31 | 32 | 33 | def my_hook(d): 34 | if d['status'] == 'finished': 35 | print('Done downloading, now converting ...') 36 | if d['status'] == 'downloading': 37 | print(d) 38 | 39 | 40 | ydl_opts_audio = { 41 | 'format': 'bestaudio/best', 42 | 'postprocessors': [{ 43 | 'key': 'FFmpegExtractAudio', 44 | 'preferredcodec': 'mp3', 45 | 'preferredquality': '192', 46 | }], 47 | 'logger': MyLogger(), 48 | 'progress_hooks': [my_hook], 49 | } 50 | 51 | ydl_opts = { 52 | 'simulate': True, 53 | 'noplaylist' : True, 54 | 'forceurl' : True, 55 | 'logger': MyLogger(), 56 | 'progress_hooks': [my_hook], 57 | } 58 | 59 | with youtube_dl.YoutubeDL(ydl_opts) as ydl: 60 | ydl.download([streamurl]) 61 | #meta = ydl.extract_info([streamurl],download=False) 62 | 63 | #print("url:",meta['title']) 64 | print("url:", purl) 65 | 66 | 67 | 68 | #print("url:", purl) 69 | --------------------------------------------------------------------------------