├── chacra ├── __init__.py ├── commands │ ├── __init__.py │ └── populate.py ├── tests │ ├── __init__.py │ ├── async │ │ ├── __init__.py │ │ ├── test_rpm.py │ │ ├── test_checks.py │ │ └── test_callbacks.py │ ├── models │ │ ├── __init__.py │ │ ├── test_repos.py │ │ └── test_binaries.py │ ├── controllers │ │ ├── __init__.py │ │ ├── repos │ │ │ ├── __init__.py │ │ │ ├── test_flavors.py │ │ │ ├── test_sha1s.py │ │ │ ├── test_refs.py │ │ │ ├── test_distros.py │ │ │ └── test_projects.py │ │ ├── test_health.py │ │ ├── test_refs.py │ │ ├── test_projects.py │ │ ├── test_search.py │ │ ├── test_sha1s.py │ │ ├── test_distros.py │ │ └── test_flavors.py │ ├── util.py │ ├── test_metrics.py │ ├── config.py │ └── conftest.py ├── templates │ ├── distributions │ └── repo.mako ├── compat.py ├── controllers │ ├── health.py │ ├── __init__.py │ ├── repos │ │ ├── sha1s.py │ │ ├── refs.py │ │ ├── distros.py │ │ ├── projects.py │ │ └── __init__.py │ ├── projects.py │ ├── root.py │ ├── binaries │ │ ├── refs.py │ │ ├── sha1s.py │ │ ├── distros.py │ │ ├── archs.py │ │ └── __init__.py │ ├── errors.py │ ├── search.py │ └── util.py ├── schemas │ └── __init__.py ├── app.py ├── auth.py ├── asynch │ ├── base.py │ ├── rpm.py │ ├── checks.py │ ├── debian.py │ └── __init__.py ├── constants.py ├── models │ ├── types.py │ ├── projects.py │ ├── __init__.py │ └── repos.py ├── hooks.py ├── wsgi.py └── metrics.py ├── alembic ├── versions │ ├── .gitignore │ ├── 11c965e8acc9_add_is_updating_for_repos.py │ ├── 432dc36d3105_default_sha1_to_head.py │ ├── 53483b1161b_add_is_queued_field_for_repos.py │ ├── 4021ff3a9dc5_adds_repo_extra.py │ ├── 52d176771ae6_biginteger_size.py │ ├── 375985186100_adds_sha1_to_repo_and_binary.py │ └── 4ee54bf1cca3_add_flavors_for_repos_and_binaries.py ├── README ├── script.py.mako └── env.py ├── deploy └── playbooks │ ├── roles │ ├── common │ │ ├── templates │ │ │ ├── systemd │ │ │ │ ├── 80-chacra.preset.j2 │ │ │ │ ├── chacra.sysconfig.j2 │ │ │ │ ├── chacra-celerybeat.service.j2 │ │ │ │ ├── chacra.service.j2 │ │ │ │ └── chacra-celery.service.j2 │ │ │ ├── prod_api_creds.py.j2 │ │ │ ├── nginx_tmp_site.conf │ │ │ ├── prod_db.py.j2 │ │ │ ├── prod_callbacks.py.j2 │ │ │ ├── hosts.j2 │ │ │ ├── nginx.conf │ │ │ ├── nginx_site.conf │ │ │ └── alembic-prod.ini.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── ssl.yml │ │ │ ├── systemd.yml │ │ │ ├── letsencrypt.yml │ │ │ ├── nginx.yml │ │ │ ├── rabbitmq.yml │ │ │ ├── postgresql.yml │ │ │ └── main.yml │ ├── statsd │ │ ├── templates │ │ │ ├── systemd │ │ │ │ ├── 80-statsd.preset.j2 │ │ │ │ └── statsd.service.j2 │ │ │ └── config │ │ │ │ └── localConfig.js.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── systemd.yml │ │ │ └── main.yml │ └── repos │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml │ ├── ansible.cfg │ ├── examples │ ├── hosts.sample │ ├── deploy_vagrant.yml │ ├── deploy_prod.yml │ └── deploy_test.yml │ ├── .gitignore │ ├── files │ └── ssl │ │ └── dev │ │ └── generate.sh │ └── redeploy-all.yml ├── requirements.txt ├── tox.ini ├── .gitignore ├── alembic.ini ├── setup.py └── config └── dev.py /chacra/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /alembic/versions/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chacra/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chacra/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chacra/tests/async/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chacra/tests/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chacra/tests/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /chacra/tests/controllers/repos/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /alembic/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/systemd/80-chacra.preset.j2: -------------------------------------------------------------------------------- 1 | # chacra web service 2 | 3 | enable chacra.service 4 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/templates/systemd/80-statsd.preset.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # chacra web service 3 | 4 | enable statsd.service 5 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/prod_api_creds.py.j2: -------------------------------------------------------------------------------- 1 | # Basic HTTP Auth credentials 2 | api_user = "{{ api_user }}" 3 | api_key = "{{ api_key }}" 4 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/systemd/chacra.sysconfig.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | PECAN_CONFIG={{ app_home }}/src/{{ app_name }}/prod.py 3 | -------------------------------------------------------------------------------- /deploy/playbooks/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | retry_files_enabled = False 3 | stdout_callback=yaml 4 | stderr_callback=yaml 5 | 6 | [ssh_connection] 7 | pipelining=True 8 | 9 | -------------------------------------------------------------------------------- /chacra/templates/distributions: -------------------------------------------------------------------------------- 1 | % for distro in distributions: 2 | Codename: ${ distro } 3 | % for key, value in data.items(): 4 | ${key}: ${value} 5 | % endfor 6 | 7 | % endfor 8 | -------------------------------------------------------------------------------- /deploy/playbooks/examples/hosts.sample: -------------------------------------------------------------------------------- 1 | [production] 2 | chacra.ceph.com short_hostname=chacra 3 | 4 | [test] 5 | 1.chacra.ceph.com short_hostname=chacra1 6 | 2.chacra.ceph.com short_hostname=chacra2 7 | -------------------------------------------------------------------------------- /chacra/compat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | PY3 = sys.version_info[0] == 3 4 | 5 | if PY3: 6 | def b_(s): 7 | return s.encode("utf-8") 8 | 9 | else: 10 | def b_(s): 11 | return s 12 | 13 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | statsd_debug: 'false' 4 | statsd_version: v0.8.0 5 | statsd_port: 8125 6 | 7 | graphite_port: 2003 8 | graphite_host: localhost 9 | delete_idle_stats: 'false' 10 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/nginx_tmp_site.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name {{ fqdn }}; 3 | location '/.well-known/acme-challenge' { 4 | default_type "text/plain"; 5 | root {{ ssl_webroot_path }}; 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | psycopg2-binary==2.9.9 2 | gunicorn<20.1.0 3 | pecan 4 | sqlalchemy==1.3.0 5 | pecan-notario 6 | celery[librabbitmq] 7 | alembic 8 | ipython 9 | python-statsd 10 | requests 11 | importlib_metadata<=3.6; python_version<'3.8' 12 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart statsd 4 | become: true 5 | service: name=statsd state=restarted enabled=yes 6 | 7 | - name: reload systemd 8 | become: true 9 | command: systemctl daemon-reload 10 | -------------------------------------------------------------------------------- /chacra/controllers/health.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort 2 | 3 | from chacra.asynch import checks 4 | 5 | 6 | class HealthController(object): 7 | 8 | @expose() 9 | def index(self): 10 | if not checks.is_healthy(): 11 | abort(500) 12 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/repos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | reprepro_distros: 3 | - bullseye 4 | - buster 5 | - sid 6 | - stretch 7 | - jessie 8 | - wheezy 9 | - squeeze 10 | - precise 11 | - quantal 12 | - saucy 13 | - trusty 14 | - utopic 15 | - vivid 16 | - wily 17 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py311,py312,py313,flake8 3 | skip_missing_interpreters = true 4 | 5 | [testenv] 6 | deps= 7 | pytest 8 | WebTest 9 | commands=pytest -v {posargs:chacra/tests} 10 | 11 | [testenv:flake8] 12 | deps=flake8 13 | commands=flake8 --select=F,E9 {posargs:chacra} 14 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/repos/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update apt 3 | become: true 4 | apt: 5 | update_cache: yes 6 | 7 | - name: Install needed packages 8 | become: true 9 | apt: 10 | name: "{{ item }}" 11 | state: present 12 | with_items: 13 | - reprepro 14 | - createrepo 15 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/prod_db.py.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | sqlalchemy = { 4 | 'url': 'postgresql+psycopg2://{{ app_name }}:{{ db_password.stdout }}@127.0.0.1/{{ app_name }}', 5 | 'echo': False, 6 | 'echo_pool': False, 7 | 'pool_recycle': 3600, 8 | 'encoding': 'utf-8' 9 | } 10 | -------------------------------------------------------------------------------- /chacra/schemas/__init__.py: -------------------------------------------------------------------------------- 1 | from notario.validators import types 2 | from notario.decorators import optional 3 | 4 | repo_schema = ( 5 | (optional("distro"), types.string), 6 | (optional("distro_version"), types.string), 7 | (optional("needs_update"), types.boolean), 8 | (optional("ref"), types.string), 9 | (optional("type"), types.string), 10 | ) 11 | -------------------------------------------------------------------------------- /deploy/playbooks/.gitignore: -------------------------------------------------------------------------------- 1 | # Top level YAML files are ignored, so that users can simply copy 2 | # from examples/* to the top level dir and not worry about committing them 3 | /*.yml 4 | 5 | # these should never make it to version control 6 | files/ssl/dev 7 | files/ssl/test 8 | 9 | # ignore a hosts file at the top level that was copied from examples/ 10 | /hosts 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # From GitHub's .gitignore template collection 2 | 3 | *.py[co] 4 | 5 | # Packages 6 | *.egg 7 | *.egg-info 8 | dist 9 | build 10 | eggs 11 | 12 | develop-eggs 13 | 14 | # Installer logs 15 | pip-log.txt 16 | 17 | # Unit test / coverage reports 18 | .coverage 19 | .tox 20 | htmlcov 21 | .cache 22 | 23 | # development files 24 | dev.db 25 | celerybeat-schedule 26 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/templates/systemd/statsd.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=statsd service 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | ExecStart=/usr/bin/node stats.js {{ app_home }}/src/statsd/localConfig.js 9 | User={{ ansible_ssh_user }} 10 | WorkingDirectory={{ app_home }}/src/statsd 11 | StandardOutput=journal 12 | StandardError=journal 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /chacra/app.py: -------------------------------------------------------------------------------- 1 | from pecan import make_app 2 | from chacra import models, asynch 3 | 4 | 5 | def setup_app(config): 6 | 7 | models.init_model() 8 | app_conf = dict(config.app) 9 | 10 | app = make_app( 11 | app_conf.pop('root'), 12 | logging=getattr(config, 'logging', {}), 13 | **app_conf 14 | ) 15 | 16 | # make a series of health checks, post if they are good 17 | asynch.post_if_healthy() 18 | 19 | return app 20 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | app_home: /opt/{{ app_name }} 3 | app_use_ssl: yes 4 | ssl_support_email: "adeza@redhat.com" 5 | ssl_webroot_path: "/var/www/{{ fqdn }}" 6 | letsencrypt_command: "certbot certonly --webroot -w {{ ssl_webroot_path }} -d {{ fqdn }} --email {{ ssl_support_email }} --agree-tos --renew-by-default" 7 | ssl_cert_path: "files/ssl/dev/ssl.crt" 8 | ssl_key_path: "files/ssl/dev/ssl.key" 9 | # this gives us a way to use self signed certs 10 | # on non-dev environments 11 | use_self_signed_ssl: False 12 | -------------------------------------------------------------------------------- /chacra/auth.py: -------------------------------------------------------------------------------- 1 | import base64 2 | from pecan import request, abort, response, conf 3 | 4 | 5 | def basic_auth(): 6 | try: 7 | auth = request.headers.get('Authorization') 8 | assert auth 9 | decoded = base64.b64decode(auth.split(' ')[1]).decode('utf-8') 10 | username, password = decoded.split(':') 11 | 12 | assert username == conf.api_user 13 | assert password == conf.api_key 14 | except: 15 | response.headers['WWW-Authenticate'] = 'Basic realm="Chacra :: Binary API"' 16 | abort(401) 17 | 18 | return True 19 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/prod_callbacks.py.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | callback_url = "{{ callback_url|default('') }}" 4 | callback_user = "{{ callback_user|default('') }}" 5 | callback_key = "{{ callback_key|default('') }}" 6 | callback_verify_ssl = {{ callback_verify_ssl|default(True) }} 7 | 8 | # Health Pings 9 | health_ping = {{ health_ping|default(False) }} 10 | # note this url will get the hostname appended 11 | health_ping_url = "{{ health_ping_url|default('') }}" 12 | # this value will be used when registering the node with shaman 13 | hostname = "{{ fqdn }}" 14 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/systemd/chacra-celerybeat.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=chacra celerybeat service 4 | After=network.target rabbitmq-server.service 5 | Requires=rabbitmq-server.service 6 | 7 | [Service] 8 | Type=simple 9 | EnvironmentFile=/etc/default/chacra 10 | User={{ ansible_ssh_user }} 11 | WorkingDirectory={{ app_home }}/src/{{ app_name }}/{{ app_name }} 12 | StandardOutput=journal 13 | StandardError=journal 14 | ExecStart={{ app_home }}/bin/celery -A asynch beat --loglevel=info 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | system_packages: 4 | - python3-dev 5 | - git 6 | - g++ 7 | - gcc 8 | - libpq-dev 9 | - postgresql 10 | - postgresql-common 11 | - postgresql-contrib 12 | - python3-psycopg2 13 | - nginx 14 | - vim 15 | # needed for the ansible apt_repository module 16 | - python-apt-dev 17 | # - libsemanage-python 18 | - python3 19 | - liblz4-tool 20 | - zstd 21 | - virtualenv 22 | 23 | ssl_requirements: 24 | - openssl 25 | - libssl-dev 26 | 27 | circus_system_packages: 28 | - libzmq-dev 29 | - libevent-dev 30 | -------------------------------------------------------------------------------- /chacra/asynch/base.py: -------------------------------------------------------------------------------- 1 | import celery 2 | from chacra import models 3 | 4 | 5 | class SQLATask(celery.Task): 6 | """ 7 | An abstract Celery Task that ensures that the connection the the 8 | database is closed on task completion 9 | 10 | .. note:: On logs, it may appear as there are errors in the transaction but 11 | this is not an error condition: SQLAlchemy rolls back the transaction if no 12 | change was done. 13 | """ 14 | abstract = True 15 | 16 | def after_return(self, status, retval, task_id, args, kwargs, einfo): 17 | models.clear() 18 | 19 | 20 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: reload systemd 4 | become: true 5 | command: systemctl daemon-reload 6 | 7 | - name: restart app 8 | become: true 9 | service: name=chacra state=restarted enabled=yes 10 | 11 | - name: restart chacra-celery 12 | service: name=chacra-celery state=restarted enabled=yes 13 | become: true 14 | 15 | - name: restart chacra-celerybeat 16 | service: name=chacra-celerybeat state=restarted enabled=yes 17 | become: true 18 | 19 | - name: restart nginx 20 | become: true 21 | action: service name=nginx state=restarted 22 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/systemd/chacra.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=chacra gunicorn service 4 | After=network.target chacra-celery.service 5 | Requires=chacra-celery.service 6 | 7 | [Service] 8 | Type=simple 9 | ExecStart={{ app_home }}/bin/gunicorn_pecan -w 10 -t 1200 {{ app_home }}/src/{{ app_name }}/prod.py 10 | EnvironmentFile=-/etc/default/{{ app_name }} 11 | User={{ ansible_ssh_user }} 12 | WorkingDirectory={{ app_home }}/src/{{ app_name }} 13 | StandardOutput=journal 14 | StandardError=journal 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = ${repr(up_revision)} 11 | down_revision = ${repr(down_revision)} 12 | branch_labels = ${repr(branch_labels)} 13 | depends_on = ${repr(depends_on)} 14 | 15 | from alembic import op 16 | import sqlalchemy as sa 17 | ${imports if imports else ""} 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_health.py: -------------------------------------------------------------------------------- 1 | from chacra.controllers import health 2 | 3 | 4 | class TestHealthController(object): 5 | 6 | def test_passes_health_check(self, session, monkeypatch): 7 | monkeypatch.setattr(health.checks, "is_healthy", lambda: True) 8 | result = session.app.get("/health/") 9 | assert result.status_int == 204 10 | 11 | def test_fails_health_check(self, session, monkeypatch): 12 | monkeypatch.setattr(health.checks, "is_healthy", lambda: False) 13 | result = session.app.get("/health/", expect_errors=True) 14 | assert result.status_int == 500 15 | -------------------------------------------------------------------------------- /chacra/tests/util.py: -------------------------------------------------------------------------------- 1 | from pecan import conf 2 | import base64 3 | from chacra import compat 4 | 5 | 6 | def make_credentials(correct=True, username=None, secret=None): 7 | if correct and not username and not secret: 8 | creds = "%s:%s" % (conf.api_user, conf.api_key) 9 | elif username and secret: 10 | creds = "%s:%s" % (username, secret) 11 | else: 12 | creds = 'you:wrong' 13 | garbled_creds = base64.b64encode(creds.encode('utf-8')).decode('utf8') 14 | if compat.PY3: 15 | return 'Basic %s' % garbled_creds 16 | else: 17 | return str('Basic %s' % garbled_creds) 18 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 3 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 4 | {% if short_hostname %} 5 | 127.0.1.1 {{ short_hostname }} 6 | {% endif %} 7 | 8 | {% if fqdn %} 9 | {{ ansible_default_ipv4.address }} {{ fqdn }} 10 | {% endif %} 11 | 12 | # The following lines are desirable for IPv6 capable hosts 13 | ::1 ip6-localhost ip6-loopback 14 | fe00::0 ip6-localnet 15 | ff00::0 ip6-mcastprefix 16 | ff02::1 ip6-allnodes 17 | ff02::2 ip6-allrouters 18 | ff02::3 ip6-allhosts 19 | -------------------------------------------------------------------------------- /chacra/constants.py: -------------------------------------------------------------------------------- 1 | # a list of distro versions that chacra will generate a distributions file for 2 | DISTRIBUTIONS = [ 3 | 'bookworm', 4 | 'bullseye', 5 | 'buster', 6 | 'sid', 7 | 'stretch', 8 | 'jessie', 9 | 'wheezy', 10 | 'squeeze', 11 | 'precise', 12 | 'quantal', 13 | 'saucy', 14 | 'trusty', 15 | 'utopic', 16 | 'vivid', 17 | 'wily', 18 | 'xenial', 19 | 'bionic', 20 | 'focal', 21 | 'jammy', 22 | 'noble', 23 | ] 24 | 25 | # These are reserved keys that will be ignored when processing repos. Otherwise 26 | # they would be treated as refs. 27 | REPO_OPTION_KEYS = ( 28 | 'combined', 29 | 'automatic', 30 | ) 31 | -------------------------------------------------------------------------------- /chacra/models/types.py: -------------------------------------------------------------------------------- 1 | import json 2 | from copy import deepcopy 3 | from sqlalchemy import types as SATypes 4 | from chacra.compat import PY3 5 | 6 | 7 | class JSONType(SATypes.TypeDecorator): 8 | impl = SATypes.UnicodeText 9 | 10 | def process_bind_param(self, value, engine): 11 | if PY3: 12 | return json.dumps(value) 13 | else: 14 | return unicode(json.dumps(value)) # noqa 15 | 16 | def process_result_value(self, value, engine): 17 | if value: 18 | return json.loads(value) 19 | else: 20 | return {} # pragma: nocover 21 | 22 | def copy_value(self, value): 23 | return deepcopy(value) 24 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/templates/config/localConfig.js.j2: -------------------------------------------------------------------------------- 1 | { 2 | port: {{ statsd_port }} 3 | ,backends: [ 4 | {% if graphite_host %} 5 | './backends/graphite', 6 | {% endif %} 7 | ] 8 | {% if statsd_debug == 'true' %} 9 | ,debug: {{ statsd_debug }} 10 | {% endif %} 11 | {% if graphite_api_key %} 12 | ,graphite: 13 | { 14 | globalPrefix: "{{ graphite_api_key }}" 15 | } 16 | {% endif %} 17 | {% if graphite_host %} 18 | ,graphiteHost: "{{ graphite_host }}" 19 | ,graphitePort: {{ graphite_port }} 20 | {% endif %} 21 | {% if delete_idle_stats == 'true' %} 22 | ,deleteIdleStats: true 23 | {% endif %} 24 | } 25 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/ssl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: ensure ssl certs directory 4 | file: 5 | dest: /etc/ssl/certs 6 | state: directory 7 | become: true 8 | 9 | - name: ensure ssl private directory 10 | file: 11 | dest: /etc/ssl/private 12 | state: directory 13 | become: true 14 | 15 | - name: copy SSL cert 16 | copy: 17 | src: "{{ ssl_cert_path }}" 18 | dest: "/etc/ssl/certs/{{ fqdn }}-bundled.crt" 19 | mode: 0777 20 | force: yes 21 | become: true 22 | notify: restart nginx 23 | 24 | - name: copy SSL key 25 | copy: 26 | src: "{{ ssl_key_path }}" 27 | dest: "/etc/ssl/private/{{ fqdn }}.key" 28 | force: yes 29 | become: true 30 | notify: restart nginx 31 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/tasks/systemd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: ensure /etc/default/ dir exists 4 | become: true 5 | file: path=/etc/default state=directory 6 | 7 | - name: ensure /etc/systemd/system-preset dir exists 8 | become: true 9 | file: path=/etc/systemd/system-preset state=directory 10 | notify: 11 | - reload systemd 12 | 13 | - name: install the systemd unit file for statsd 14 | template: src=systemd/statsd.service.j2 dest=/etc/systemd/system/statsd.service 15 | become: true 16 | notify: 17 | - reload systemd 18 | 19 | - name: install the preset file for statsd 20 | template: src=systemd/80-statsd.preset.j2 dest=/etc/systemd/system-preset/80-statsd.preset 21 | become: true 22 | notify: 23 | - reload systemd 24 | -------------------------------------------------------------------------------- /chacra/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | from pecan import request, redirect 3 | 4 | 5 | def error(url, msg=None): 6 | if msg: 7 | request.context['error_message'] = msg 8 | url = path.join(url, '?error_message=%s' % msg) 9 | redirect(url, internal=True) 10 | 11 | 12 | def set_id_in_context(name, object_model, value): 13 | # if the object_model is None, then it will save it as None 14 | # saving us from having to do this everywhere 15 | object_name = name.split('_id')[0] 16 | if object_model is not None: 17 | request.context[name] = object_model.id 18 | request.context[object_name] = object_model.name 19 | else: 20 | request.context[name] = None 21 | request.context[object_name] = value 22 | -------------------------------------------------------------------------------- /chacra/templates/repo.mako: -------------------------------------------------------------------------------- 1 | % if type == "deb": 2 | deb [trusted=yes] ${base_url} ${distro_version} main 3 | % elif type == "rpm": 4 | % if distro_name in ["opensuse", "sle"]: 5 | [${project_name}] 6 | name=${project_name} packages 7 | baseurl=${base_url} 8 | enabled=1 9 | gpgcheck=0 10 | type=rpm-md 11 | % else: 12 | [${project_name}] 13 | name=${project_name} packages for $basearch 14 | baseurl=${base_url}$basearch 15 | enabled=1 16 | gpgcheck=0 17 | type=rpm-md 18 | 19 | [${project_name}-noarch] 20 | name=${project_name} noarch packages 21 | baseurl=${base_url}noarch 22 | enabled=1 23 | gpgcheck=0 24 | type=rpm-md 25 | 26 | [${project_name}-source] 27 | name=${project_name} source packages 28 | baseurl=${base_url}SRPMS 29 | enabled=1 30 | gpgcheck=0 31 | type=rpm-md 32 | % endif 33 | % endif 34 | -------------------------------------------------------------------------------- /alembic/versions/11c965e8acc9_add_is_updating_for_repos.py: -------------------------------------------------------------------------------- 1 | """add is_updating for repos 2 | 3 | Revision ID: 11c965e8acc9 4 | Revises: 5 | Create Date: 2016-02-25 07:45:16.994206 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '11c965e8acc9' 11 | down_revision = None 12 | branch_labels = None 13 | depends_on = None 14 | 15 | from alembic import op 16 | import sqlalchemy as sa 17 | 18 | 19 | def upgrade(): 20 | ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('repos', sa.Column('is_updating', sa.Boolean(), nullable=True)) 22 | ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('repos', 'is_updating') 28 | ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /alembic/versions/432dc36d3105_default_sha1_to_head.py: -------------------------------------------------------------------------------- 1 | """default sha1 to HEAD 2 | 3 | Revision ID: 432dc36d3105 4 | Revises: 375985186100 5 | Create Date: 2016-06-10 11:21:08.357158 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '432dc36d3105' 11 | down_revision = '375985186100' 12 | branch_labels = None 13 | depends_on = None 14 | 15 | from alembic import op 16 | from sqlalchemy.sql import table, column 17 | import sqlalchemy as sa 18 | 19 | 20 | def upgrade(): 21 | binaries = table("binaries", column("sha1", sa.String)) 22 | op.execute( 23 | binaries.update().values(dict(sha1="HEAD")) 24 | ) 25 | repos = table("repos", column("sha1", sa.String)) 26 | op.execute( 27 | repos.update().values(dict(sha1="HEAD")) 28 | ) 29 | 30 | 31 | def downgrade(): 32 | pass 33 | -------------------------------------------------------------------------------- /alembic/versions/53483b1161b_add_is_queued_field_for_repos.py: -------------------------------------------------------------------------------- 1 | """Add is_queued field for repos 2 | 3 | Revision ID: 53483b1161b 4 | Revises: 11c965e8acc9 5 | Create Date: 2016-06-07 15:06:14.618478 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '53483b1161b' 11 | down_revision = '11c965e8acc9' 12 | branch_labels = None 13 | depends_on = None 14 | 15 | from alembic import op 16 | import sqlalchemy as sa 17 | 18 | 19 | def upgrade(): 20 | ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('repos', sa.Column('is_queued', sa.Boolean(), nullable=True)) 22 | ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('repos', 'is_queued') 28 | ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /alembic/versions/4021ff3a9dc5_adds_repo_extra.py: -------------------------------------------------------------------------------- 1 | """Adds Repo.extra 2 | 3 | Revision ID: 4021ff3a9dc5 4 | Revises: 4ee54bf1cca3 5 | Create Date: 2016-08-29 16:09:41.853490 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '4021ff3a9dc5' 11 | down_revision = '4ee54bf1cca3' 12 | branch_labels = None 13 | depends_on = None 14 | 15 | from alembic import op 16 | import sqlalchemy as sa 17 | 18 | from chacra import models 19 | 20 | 21 | def upgrade(): 22 | ### commands auto generated by Alembic - please adjust! ### 23 | op.add_column('repos', sa.Column('extra', models.types.JSONType(), nullable=True)) 24 | ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | ### commands auto generated by Alembic - please adjust! ### 29 | op.drop_column('repos', 'extra') 30 | ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /alembic/versions/52d176771ae6_biginteger_size.py: -------------------------------------------------------------------------------- 1 | """BigInteger size 2 | 3 | Revision ID: 52d176771ae6 4 | Revises: 4021ff3a9dc5 5 | Create Date: 2019-01-22 16:47:06.695469 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '52d176771ae6' 11 | down_revision = '4021ff3a9dc5' 12 | branch_labels = None 13 | depends_on = None 14 | from sqlalchemy import BigInteger, Integer 15 | 16 | from alembic import op 17 | import sqlalchemy as sa 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.alter_column('binaries', 'size', type_=BigInteger) 23 | # ### end Alembic commands ### 24 | 25 | 26 | def downgrade(): 27 | # ### commands auto generated by Alembic - please adjust! ### 28 | op.alter_column('binaries', 'size', type_=Integer) 29 | # ### end Alembic commands ### 30 | -------------------------------------------------------------------------------- /chacra/hooks.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from webob.exc import WSGIHTTPException 3 | from pecan.hooks import PecanHook 4 | 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class CustomErrorHook(PecanHook): 10 | """ 11 | Only needed for prod environments where it looks like multi-worker servers 12 | will swallow exceptions. This will ensure a traceback is logged correctly. 13 | """ 14 | 15 | def on_error(self, state, exc): 16 | if isinstance(exc, WSGIHTTPException): 17 | if exc.code == 404: 18 | log.error("Not Found: %s" % state.request.url) 19 | return 20 | # explicit redirect codes that should not be handled at all by this 21 | # utility 22 | elif exc.code in [300, 301, 302, 303, 304, 305, 306, 307, 308]: 23 | return 24 | 25 | log.exception('unhandled error by Chacra') 26 | 27 | -------------------------------------------------------------------------------- /chacra/wsgi.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | from pecan.deploy import deploy 4 | 5 | 6 | def config_file(file_name=None): 7 | file_name = file_name or 'config.py' 8 | _file = os.path.abspath(__file__) 9 | dirname = lambda x: os.path.dirname(x) 10 | parent_dir = dirname(dirname(_file)) 11 | return os.path.join(parent_dir, file_name) 12 | 13 | 14 | def application(environ, start_response): 15 | wsgi_app = deploy(config_file('prod.py')) 16 | return wsgi_app(environ, start_response) 17 | 18 | if __name__ == '__main__': 19 | from wsgiref.simple_server import make_server 20 | # at some point, it would be nice to use pecan_mount 21 | #import pecan_mount 22 | #httpd = make_server('', 8181, pecan_mount.tree) 23 | httpd = make_server('', 8181, deploy(config_file('config.py'))) 24 | print("Serving HTTP on port 8181...") 25 | 26 | # Respond to requests until process is killed 27 | httpd.serve_forever() 28 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/systemd/chacra-celery.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # logs for celery can be found in /var/log/celery/ 3 | # The logs can not be sent to the journal because the `celery multi` command will not log to 4 | # stderr or stdout. 5 | [Unit] 6 | Description=chacra celery service 7 | After=network.target rabbitmq-server.service 8 | Requires=rabbitmq-server.service 9 | 10 | [Service] 11 | Type=forking 12 | EnvironmentFile=/etc/default/chacra 13 | User={{ ansible_ssh_user }} 14 | WorkingDirectory={{ app_home }}/src/{{ app_name }}/{{ app_name }} 15 | RuntimeDirectory=celery 16 | StandardOutput=journal 17 | StandardError=journal 18 | ExecStart={{ app_home }}/bin/celery multi start 5 -Q:1,2 poll_repos,celery -Q:3-5 build_repos -A asynch --logfile=/var/log/celery/%n%I.log 19 | ExecStop={{ app_home }}/bin/celery multi stopwait 5 -Q:1,2 poll_repos,celery -Q:3-5 build_repos --pidfile=%n.pid 20 | ExecReload={{ app_home }}/bin/celery multi restart 5 -Q:1,2 poll_repos,celery -Q:3-5 build_repos -A asynch --logfile=/var/log/celery/%n%I.log 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /chacra/tests/async/test_rpm.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import subprocess 3 | from chacra.asynch import rpm 4 | 5 | 6 | class TestRPM(object): 7 | 8 | @pytest.mark.parametrize("distro", ['opensuse', 'openSUSE', 'sle', 'SLE']) 9 | def test__createrepo_opensuse(self, monkeypatch, distro): 10 | repo_dirs = ['basepath/noarch', 'basepath/x86_64'] 11 | basepath = 'basepath' 12 | def mock_check_call(*args, **kwargs): 13 | assert args == (['createrepo', '--no-database', basepath],) 14 | 15 | monkeypatch.setattr(subprocess, 'check_call', mock_check_call) 16 | rpm._createrepo(basepath, repo_dirs, distro) 17 | 18 | @pytest.mark.parametrize("distro", ['centos', 'Fedora', 'RHEL']) 19 | def test__createrepo_other(self, monkeypatch, distro): 20 | repo_dirs = ['basepath/noarch', 'basepath/x86_64'] 21 | basepath = 'basepath' 22 | def mock_check_call(*args, **kwargs): 23 | assert args == (['createrepo', '--no-database', repo_dirs.pop(0)],) 24 | 25 | monkeypatch.setattr(subprocess, 'check_call', mock_check_call) 26 | rpm._createrepo(basepath, repo_dirs, distro) 27 | -------------------------------------------------------------------------------- /chacra/tests/test_metrics.py: -------------------------------------------------------------------------------- 1 | from chacra import metrics 2 | 3 | 4 | class TestHostname(object): 5 | 6 | def test_gets_short_hostname(self, fake): 7 | socket = fake(gethostname=lambda: 'chacra.ceph.com') 8 | result = metrics.short_hostname(_socket=socket) 9 | assert result == 'chacra' 10 | 11 | 12 | class TestAppendSuffix(object): 13 | 14 | def test_gets_suffix_appended(self): 15 | result = metrics.append_suffix('chacra', 'expensive_function') 16 | assert result == 'chacra.expensive_function' 17 | 18 | def test_gets_suffix_appended_with_dotted_paths(self): 19 | result = metrics.append_suffix('chacra.async', 'add_rpms') 20 | assert result == 'chacra.async.add_rpms' 21 | 22 | 23 | class TestGetPrefix(object): 24 | 25 | def test_with_secret(self, fake): 26 | conf = fake(graphite_api_key='1234') 27 | result = metrics.get_prefix(conf=conf, host='local') 28 | assert result == '1234.local' 29 | 30 | def test_no_secret(self, fake): 31 | conf = fake() 32 | result = metrics.get_prefix(conf=conf, host='local') 33 | assert result == 'local' 34 | -------------------------------------------------------------------------------- /deploy/playbooks/examples/deploy_vagrant.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | user: vagrant 5 | roles: 6 | - common 7 | - repos 8 | - statsd 9 | vars: 10 | app_name: "chacra" 11 | use_ssl: true 12 | wsgi_file: wsgi.py 13 | wsgi_callable: application 14 | ansible_ssh_port: 22 15 | restart_app: true 16 | binary_root: "/opt/binaries" 17 | repos_root: "/opt/repos" 18 | branch: "main" 19 | development_server: true 20 | short_hostname: "3" 21 | fqdn: "3.node.a" 22 | api_user: "admin" 23 | api_key: "secret" 24 | 25 | # callbacks 26 | # using an IP here because vagrant won't know about 27 | # the hostnames of other vagrant machines 28 | callback_url: "https://192.168.111.116/api/repos/" 29 | callback_user: "admin" 30 | callback_key: "secret" 31 | callback_verify_ssl: False 32 | graphite_api_key: '1234-asdf-1234' 33 | 34 | health_ping: True 35 | health_ping_url: "https://192.168.111.116/api/nodes/" 36 | 37 | nginx_ssl_cert_path: "/etc/ssl/certs/{{ fqdn }}-bundled.crt" 38 | nginx_ssl_key_path: "/etc/ssl/private/{{ fqdn }}.key" 39 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/statsd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install Statsd Dependencies for Debian 4 | apt: "name={{ item }} update_cache=yes" 5 | become: true 6 | with_items: 7 | - git 8 | - nodejs 9 | when: ansible_pkg_mgr == 'apt' 10 | 11 | - name: Create node user 12 | become: true 13 | user: name=node state=present shell=/bin/false system=yes 14 | 15 | - name: Install statsd from GitHub 16 | git: "repo=https://github.com/etsy/statsd.git dest={{ app_home }}/src/statsd update=no version={{ statsd_version }}" 17 | 18 | - name: Get directory permissions 19 | stat: path={{ app_home }}/src/statsd 20 | register: permissions 21 | 22 | - name: Set file permissions 23 | become: true 24 | file: path={{ app_home }}/src/statsd owner=node group=node 25 | when: permissions.stat.pw_name != 'node' 26 | 27 | - include_tasks: systemd.yml 28 | 29 | - name: Configure statsd 30 | become: true 31 | template: src=config/localConfig.js.j2 dest={{ app_home }}/src/statsd/localConfig.js owner=node group=node mode=0444 32 | notify: restart statsd 33 | 34 | - name: Start statsd 35 | become: true 36 | service: name=statsd state=restarted enabled=yes 37 | -------------------------------------------------------------------------------- /deploy/playbooks/files/ssl/dev/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Easily generate a 10 year SSL certificate and key for development. It 3 | # creates a configuration file for wild card domains, if no argument is passed 4 | # in will fallback to "node.a" as the domain to use. 5 | # 6 | # Upon completion, these files should now exist:: 7 | # 8 | # * openssl.cnf 9 | # * ssl.key 10 | # * ssl.crt 11 | # 12 | # If those files exist they will be overwritten 13 | 14 | set -e 15 | 16 | if [ ! -z $1 ] 17 | then 18 | domain=$1 19 | else 20 | domain="node.a" 21 | fi 22 | 23 | template="[req] 24 | distinguished_name = req_distinguished_name 25 | x509_extensions = v3_req 26 | prompt = no 27 | [req_distinguished_name] 28 | CN = *.${domain} 29 | [v3_req] 30 | keyUsage = keyEncipherment, dataEncipherment 31 | extendedKeyUsage = serverAuth 32 | subjectAltName = @alt_names 33 | [alt_names] 34 | DNS.1 = *.${domain}" 35 | 36 | echo "-> generating openssl.cnf configuration file" 37 | echo "$template" > openssl.cnf 38 | command="openssl req -new -newkey rsa:2048 -sha256 -days 3650 -nodes -x509 -keyout ssl.key -out ssl.crt -config openssl.cnf" 39 | echo "-> running: $command" 40 | eval $command 41 | echo "-> completed self signed certs" 42 | -------------------------------------------------------------------------------- /deploy/playbooks/examples/deploy_prod.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is used for deploying the production chacra instance that's 3 | # used for creating ceph releases. It does not register or report status 4 | # to shaman.ceph.com. 5 | 6 | - hosts: production 7 | user: ubuntu 8 | roles: 9 | - common 10 | - repos 11 | - statsd 12 | vars: 13 | app_name: "chacra" 14 | use_ssl: true 15 | wsgi_file: wsgi.py 16 | wsgi_callable: application 17 | ansible_ssh_port: 2222 18 | binary_root: "/opt/binaries" 19 | repos_root: "/opt/repos" 20 | branch: "main" 21 | development_server: false 22 | use_letsencrypt: true 23 | fqdn: "chacra.ceph.com" 24 | 25 | # graphite reporting for statsd 26 | graphite_host: "shaman.ceph.com" 27 | # required 28 | # graphite_api_key: '1234-asdf-1234' 29 | 30 | # required on first deploy 31 | # api_user: "admin" 32 | # api_key: "secret" 33 | 34 | # leave blank, we want prod_callbacks.py created, by not used 35 | callback_user: "" 36 | callback_key: "" 37 | 38 | nginx_ssl_cert_path: "/etc/letsencrypt/live/{{ fqdn }}/fullchain.pem" 39 | nginx_ssl_key_path: "/etc/letsencrypt/live/{{ fqdn }}/privkey.pem" 40 | -------------------------------------------------------------------------------- /alembic/versions/375985186100_adds_sha1_to_repo_and_binary.py: -------------------------------------------------------------------------------- 1 | """adds sha1 to repo and binary 2 | 3 | Revision ID: 375985186100 4 | Revises: 53483b1161b 5 | Create Date: 2016-06-10 10:18:51.037610 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '375985186100' 11 | down_revision = '53483b1161b' 12 | branch_labels = None 13 | depends_on = None 14 | 15 | from alembic import op 16 | import sqlalchemy as sa 17 | 18 | 19 | def upgrade(): 20 | ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('binaries', sa.Column('sha1', sa.String(length=256), nullable=True)) 22 | op.create_index(op.f('ix_binaries_sha1'), 'binaries', ['sha1'], unique=False) 23 | op.add_column('repos', sa.Column('sha1', sa.String(length=256), nullable=True)) 24 | op.create_index(op.f('ix_repos_sha1'), 'repos', ['sha1'], unique=False) 25 | ### end Alembic commands ### 26 | 27 | def downgrade(): 28 | ### commands auto generated by Alembic - please adjust! ### 29 | op.drop_index(op.f('ix_repos_sha1'), table_name='repos') 30 | op.drop_column('repos', 'sha1') 31 | op.drop_index(op.f('ix_binaries_sha1'), table_name='binaries') 32 | op.drop_column('binaries', 'sha1') 33 | ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /chacra/controllers/repos/sha1s.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra.models import Project 3 | from chacra.controllers import error 4 | from chacra.controllers.repos.distros import DistroController 5 | 6 | 7 | class SHA1Controller(object): 8 | 9 | def __init__(self, sha1): 10 | self.sha1 = sha1 11 | self.project = Project.get(request.context['project_id']) 12 | self.ref = request.context['ref'] 13 | request.context['sha1'] = sha1 14 | 15 | @expose('json', generic=True) 16 | def index(self): 17 | if self.sha1 not in self.project.repo_sha1s: 18 | abort(404) 19 | resp = {} 20 | for distro in self.project.repo_distros: 21 | resp[distro] = list(set( 22 | [b.distro_version for b in 23 | self.project.repos.filter_by(distro=distro, ref=self.ref, sha1=self.sha1).all()] 24 | )) 25 | return resp 26 | 27 | @index.when(method='POST', template='json') 28 | def index_post(self): 29 | error('/errors/not_allowed', 30 | 'POST requests to this url are not allowed') 31 | 32 | @expose() 33 | def _lookup(self, name, *remainder): 34 | return DistroController(name), remainder 35 | -------------------------------------------------------------------------------- /chacra/controllers/projects.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra.models import Project 3 | from chacra import models 4 | from chacra.controllers.binaries.refs import RefController 5 | 6 | 7 | class ProjectController(object): 8 | 9 | def __init__(self, project_name): 10 | self.project_name = project_name 11 | self.project = Project.query.filter_by(name=project_name).first() 12 | if not self.project: 13 | if request.method != 'POST': 14 | abort(404) 15 | elif request.method == 'POST': 16 | self.project = models.get_or_create(Project, name=project_name) 17 | request.context['project_id'] = self.project.id 18 | 19 | @expose('json') 20 | def index(self): 21 | return self.project 22 | 23 | @expose() 24 | def _lookup(self, name, *remainder): 25 | return RefController(name), remainder 26 | 27 | 28 | class ProjectsController(object): 29 | 30 | @expose('json') 31 | def index(self): 32 | resp = {} 33 | for project in Project.query.all(): 34 | resp[project.name] = project.refs 35 | return resp 36 | 37 | @expose() 38 | def _lookup(self, project_name, *remainder): 39 | return ProjectController(project_name), remainder 40 | -------------------------------------------------------------------------------- /chacra/tests/controllers/repos/test_flavors.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Repo 2 | 3 | 4 | class TestFlavorsController(object): 5 | 6 | def test_default_flavor(self, session): 7 | p = Project('foobar') 8 | repo = Repo( 9 | p, 10 | "firefly", 11 | "ubuntu", 12 | "trusty", 13 | sha1="head", 14 | ) 15 | repo.path = "some_path" 16 | session.commit() 17 | result = session.app.get('/repos/foobar/firefly/head/ubuntu/trusty/flavors/') 18 | assert result.json == ['default'] 19 | 20 | def test_multiple_flavors(self, session): 21 | p = Project('foobar') 22 | repo = Repo( 23 | p, 24 | "firefly", 25 | "ubuntu", 26 | "trusty", 27 | sha1="head", 28 | ) 29 | repo.path = "some_path" 30 | repo = Repo( 31 | p, 32 | "firefly", 33 | "ubuntu", 34 | "trusty", 35 | sha1="head", 36 | flavor="tcmalloc", 37 | ) 38 | repo.path = "some_path" 39 | session.commit() 40 | result = session.app.get('/repos/foobar/firefly/head/ubuntu/trusty/flavors/') 41 | assert sorted(result.json) == sorted(['default', 'tcmalloc']) 42 | -------------------------------------------------------------------------------- /chacra/controllers/root.py: -------------------------------------------------------------------------------- 1 | from pecan import expose 2 | from chacra.models import Project, Repo 3 | from chacra.controllers.projects import ProjectsController 4 | from chacra.controllers.errors import ErrorsController 5 | from chacra.controllers.search import SearchController 6 | from chacra.controllers.health import HealthController 7 | from chacra.controllers.repos.projects import ( 8 | ProjectsController as RepoProjectsController, 9 | ) 10 | 11 | 12 | 13 | description = """chacra is a binary API that allows querying, posting, 14 | updating, and retrieving binaries for different projects.""" 15 | 16 | 17 | class RootController(object): 18 | 19 | @expose('json') 20 | def index(self): 21 | documentation = "https://github.com/ceph/chacra#chacra" 22 | projects = [p.name for p in Project.query.all()] 23 | repo_projects = Project.query.join(Repo).filter(Repo.path != None) 24 | repos = [p.name for p in repo_projects.all()] 25 | return dict( 26 | description=description, 27 | documentation=documentation, 28 | repos=repos, 29 | binaries=projects) 30 | 31 | binaries = ProjectsController() 32 | errors = ErrorsController() 33 | search = SearchController() 34 | repos = RepoProjectsController() 35 | health = HealthController() 36 | -------------------------------------------------------------------------------- /chacra/controllers/repos/refs.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra.models import Project 3 | from chacra.controllers import error 4 | from chacra.controllers.repos.sha1s import SHA1Controller 5 | 6 | 7 | class RefController(object): 8 | 9 | def __init__(self, ref_name): 10 | self.ref_name = ref_name 11 | self.project = Project.get(request.context['project_id']) 12 | request.context['ref'] = self.ref_name 13 | 14 | @expose('json', generic=True) 15 | def index(self): 16 | if self.ref_name not in self.project.repo_refs: 17 | abort(404) 18 | resp = {} 19 | sha1s = list(set( 20 | [r.sha1 for r in 21 | self.project.repos.filter_by(ref=self.ref_name).all()] 22 | )) 23 | for sha1 in sha1s: 24 | resp[sha1] = list(set( 25 | [b.distro for b in 26 | self.project.repos.filter_by(ref=self.ref_name, sha1=sha1).all()] 27 | )) 28 | return resp 29 | 30 | @index.when(method='POST', template='json') 31 | def index_post(self): 32 | error('/errors/not_allowed', 33 | 'POST requests to this url are not allowed') 34 | 35 | @expose() 36 | def _lookup(self, name, *remainder): 37 | return SHA1Controller(name), remainder 38 | -------------------------------------------------------------------------------- /alembic/versions/4ee54bf1cca3_add_flavors_for_repos_and_binaries.py: -------------------------------------------------------------------------------- 1 | """add flavors for repos and binaries 2 | 3 | Revision ID: 4ee54bf1cca3 4 | Revises: 432dc36d3105 5 | Create Date: 2016-08-02 10:30:19.377335 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = '4ee54bf1cca3' 11 | down_revision = '432dc36d3105' 12 | branch_labels = None 13 | depends_on = None 14 | 15 | from alembic import op 16 | import sqlalchemy as sa 17 | 18 | 19 | def upgrade(): 20 | ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('binaries', sa.Column('flavor', sa.String(length=256), nullable=False, server_default='default')) 22 | op.create_index(op.f('ix_binaries_flavor'), 'binaries', ['flavor'], unique=False) 23 | op.add_column('repos', sa.Column('flavor', sa.String(length=256), nullable=False, server_default='default')) 24 | op.create_index(op.f('ix_repos_flavor'), 'repos', ['flavor'], unique=False) 25 | ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | ### commands auto generated by Alembic - please adjust! ### 30 | op.drop_index(op.f('ix_repos_flavor'), table_name='repos') 31 | op.drop_column('repos', 'flavor') 32 | op.drop_index(op.f('ix_binaries_flavor'), table_name='binaries') 33 | op.drop_column('binaries', 'flavor') 34 | ### end Alembic commands ### 35 | -------------------------------------------------------------------------------- /chacra/controllers/binaries/refs.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra import models 3 | from chacra.controllers import error 4 | from chacra.controllers.binaries.sha1s import SHA1Controller 5 | 6 | 7 | class RefController(object): 8 | 9 | def __init__(self, ref_name): 10 | self.ref_name = ref_name 11 | self.project = models.Project.get(request.context['project_id']) 12 | request.context['ref'] = self.ref_name 13 | 14 | @expose('json', generic=True) 15 | def index(self): 16 | if self.ref_name not in self.project.refs: 17 | abort(404) 18 | resp = {} 19 | binaries = models.Binary.filter_by( 20 | project=self.project, 21 | ref=self.ref_name).all() 22 | 23 | sha1s = set([b.sha1 for b in binaries]) 24 | 25 | for sha1 in sha1s: 26 | resp[sha1] = list( 27 | set( 28 | [b.distro for b in binaries if b.sha1 == sha1] 29 | ) 30 | ) 31 | 32 | if not resp: 33 | abort(404) 34 | 35 | return resp 36 | 37 | @index.when(method='POST', template='json') 38 | def index_post(self): 39 | error('/errors/not_allowed', 'POST requests to this url are not allowed') 40 | 41 | @expose() 42 | def _lookup(self, name, *remainder): 43 | return SHA1Controller(name), remainder 44 | -------------------------------------------------------------------------------- /chacra/controllers/repos/distros.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra.models import Project 3 | from chacra.controllers import error 4 | from chacra.controllers.repos import RepoController 5 | 6 | 7 | class DistroController(object): 8 | def __init__(self, distro_name): 9 | self.distro_name = distro_name 10 | self.project = Project.get(request.context['project_id']) 11 | self.ref = request.context['ref'] 12 | self.sha1 = request.context['sha1'] 13 | request.context['distro'] = distro_name 14 | 15 | @expose('json', generic=True) 16 | def index(self): 17 | # TODO: Improve this duplication here (and spread to other controllers) 18 | if self.distro_name not in self.project.repo_distros: 19 | abort(404) 20 | if self.ref not in self.project.repo_refs: 21 | abort(404) 22 | resp = [] 23 | 24 | for repo in self.project.repos.filter_by(distro=self.distro_name, ref=self.ref, sha1=self.sha1).all(): 25 | resp.append(repo.distro_version) 26 | return list(set(resp)) 27 | 28 | @index.when(method='POST', template='json') 29 | def index_post(self): 30 | error('/errors/not_allowed', 31 | 'POST requests to this url are not allowed') 32 | 33 | @expose() 34 | def _lookup(self, name, *remainder): 35 | return RepoController(name), remainder 36 | -------------------------------------------------------------------------------- /deploy/playbooks/examples/deploy_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: test 4 | user: ubuntu 5 | roles: 6 | - common 7 | - repos 8 | - statsd 9 | vars: 10 | app_name: "chacra" 11 | use_ssl: true 12 | wsgi_file: wsgi.py 13 | wsgi_callable: application 14 | ansible_ssh_port: 2222 15 | binary_root: "/opt/binaries" 16 | repos_root: "/opt/repos" 17 | branch: "main" 18 | development_server: false 19 | purge_repos: True 20 | fqdn: "{{ inventory_hostname }}" 21 | ## required. use something like chacra# so rabbitmq will start 22 | ## see http://tracker.ceph.com/issues/19316 23 | # short_hostname: "chacra#" 24 | # required for first deploy 25 | # api_user: "admin" 26 | # api_key: "secret" 27 | # graphite reporting for statsd 28 | graphite_host: "shaman.ceph.com" 29 | ## required 30 | # graphite_api_key: '1234-asdf-1234' 31 | # callbacks 32 | callback_url: "https://shaman.ceph.com/api/repos/" 33 | ## required for first deploy 34 | # callback_user: "admin" 35 | # callback_key: "secret" 36 | callback_verify_ssl: True 37 | health_ping: true 38 | health_ping_url: "https://shaman.ceph.com/api/nodes/" 39 | use_letsencrypt: True 40 | nginx_ssl_cert_path: "/etc/letsencrypt/live/{{ fqdn }}/fullchain.pem" 41 | nginx_ssl_key_path: "/etc/letsencrypt/live/{{ fqdn }}/privkey.pem" 42 | combine_deb_repos: False 43 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/nginx.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | user nginx; 3 | worker_processes 20; 4 | worker_rlimit_nofile 8192; 5 | 6 | pid /var/run/nginx.pid; 7 | 8 | events { 9 | worker_connections 1024; 10 | # multi_accept on; 11 | } 12 | 13 | http { 14 | 15 | ## 16 | # Basic Settings 17 | ## 18 | 19 | #sendfile on; 20 | tcp_nopush on; 21 | tcp_nodelay on; 22 | keepalive_timeout 65; 23 | types_hash_max_size 2048; 24 | server_tokens off; 25 | 26 | # server_names_hash_bucket_size 64; 27 | # server_name_in_redirect off; 28 | 29 | include /etc/nginx/mime.types; 30 | default_type application/octet-stream; 31 | 32 | ## 33 | # Logging Settings 34 | ## 35 | 36 | access_log /var/log/nginx/access.log; 37 | error_log /var/log/nginx/error.log; 38 | 39 | ## 40 | # Gzip Settings 41 | ## 42 | 43 | gzip on; 44 | gzip_disable "msie6"; 45 | 46 | # gzip_vary on; 47 | # gzip_proxied any; 48 | # gzip_comp_level 6; 49 | # gzip_buffers 16 8k; 50 | # gzip_http_version 1.1; 51 | # gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; 52 | 53 | ## 54 | # If HTTPS, then set a variable so it can be passed along. 55 | ## 56 | 57 | map $scheme $server_https { 58 | default off; 59 | https on; 60 | } 61 | 62 | ## 63 | # Virtual Host Configs 64 | ## 65 | 66 | include /etc/nginx/conf.d/*.conf; 67 | include /etc/nginx/sites-enabled/*; 68 | } 69 | -------------------------------------------------------------------------------- /chacra/controllers/binaries/sha1s.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra import models 3 | from chacra.controllers import error 4 | from chacra.controllers.binaries.distros import DistroController 5 | 6 | 7 | class SHA1Controller(object): 8 | 9 | def __init__(self, sha1): 10 | self.sha1 = sha1 11 | self.project = models.Project.get(request.context['project_id']) 12 | request.context['sha1'] = sha1 13 | self.ref = request.context["ref"] 14 | 15 | @expose('json', generic=True) 16 | def index(self): 17 | if self.sha1 not in self.project.sha1s: 18 | abort(404) 19 | resp = {} 20 | binaries = models.Binary.filter_by( 21 | project=self.project, 22 | sha1=self.sha1, 23 | ref=self.ref).all() 24 | 25 | distros = set([b.distro for b in binaries]) 26 | 27 | for distro in distros: 28 | resp[distro] = list( 29 | set( 30 | [b.distro_version for b in binaries if b.distro == distro ] 31 | ) 32 | ) 33 | 34 | if not resp: 35 | abort(404) 36 | 37 | return resp 38 | 39 | @index.when(method='POST', template='json') 40 | def index_post(self): 41 | error('/errors/not_allowed', 'POST requests to this url are not allowed') 42 | 43 | @expose() 44 | def _lookup(self, name, *remainder): 45 | return DistroController(name), remainder 46 | -------------------------------------------------------------------------------- /chacra/commands/populate.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | 4 | from pecan.commands.base import BaseCommand 5 | from pecan import conf 6 | 7 | from alembic.config import Config 8 | from alembic import command 9 | 10 | from chacra import models 11 | 12 | 13 | def out(string): 14 | print("==> %s" % string) 15 | 16 | 17 | def get_alembic_config(): 18 | try: 19 | config_path = os.environ['ALEMBIC_CONFIG'] 20 | except KeyError: 21 | here = os.path.abspath(os.path.dirname(__file__)) 22 | config_path = os.path.abspath(os.path.join(here, '../../alembic.ini')) 23 | return config_path 24 | 25 | 26 | class PopulateCommand(BaseCommand): 27 | """ 28 | Load a pecan environment and initializate the database. 29 | """ 30 | 31 | def run(self, args): 32 | super(PopulateCommand, self).run(args) 33 | out("LOADING ENVIRONMENT") 34 | self.load_app() 35 | out("BUILDING SCHEMA") 36 | try: 37 | out("STARTING A TRANSACTION...") 38 | models.start() 39 | models.Base.metadata.create_all(conf.sqlalchemy.engine) 40 | except: 41 | models.rollback() 42 | out("ROLLING BACK... ") 43 | raise 44 | else: 45 | out("COMMITING... ") 46 | models.commit() 47 | out("STAMPING INITIAL STATE WITH ALEMBIC... ") 48 | alembic_cfg = Config(get_alembic_config()) 49 | command.stamp(alembic_cfg, "head") 50 | -------------------------------------------------------------------------------- /chacra/controllers/repos/projects.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra.models import Project 3 | from chacra.controllers import error 4 | from chacra.controllers.repos.refs import RefController 5 | 6 | 7 | class ProjectController(object): 8 | 9 | def __init__(self, project_name): 10 | self.project_name = project_name 11 | self.project = Project.query.filter_by( 12 | name=project_name 13 | ).first() 14 | if not self.project: 15 | abort(404) 16 | request.context['project_id'] = self.project.id 17 | 18 | @expose('json') 19 | def index(self): 20 | if request.method == 'POST': 21 | error('/errors/not_allowed', 22 | 'POST requests to this url are not allowed') 23 | resp = {} 24 | for ref in self.project.repo_refs: 25 | resp[ref] = list(set( 26 | [r.sha1 for r in 27 | self.project.repos.filter_by(ref=ref).all()] 28 | )) 29 | return resp 30 | 31 | @expose() 32 | def _lookup(self, name, *remainder): 33 | return RefController(name), remainder 34 | 35 | 36 | class ProjectsController(object): 37 | 38 | @expose('json') 39 | def index(self): 40 | resp = {} 41 | for project in Project.query.all(): 42 | resp[project.name] = project.repo_refs 43 | return resp 44 | 45 | @expose() 46 | def _lookup(self, project_name, *remainder): 47 | return ProjectController(project_name), remainder 48 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/systemd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: ensure /var/log/celery dir exists 4 | become: true 5 | file: path=/var/log/celery state=directory owner={{ ansible_ssh_user }} group={{ ansible_ssh_user }} recurse=yes 6 | 7 | - name: ensure /etc/default/ dir exists 8 | become: true 9 | file: path=/etc/default state=directory 10 | 11 | - name: install the systemd configuration file for celery 12 | template: src=systemd/chacra.sysconfig.j2 dest=/etc/default/chacra 13 | become: true 14 | 15 | - name: install the systemd unit file for chacra 16 | template: src=systemd/chacra.service.j2 dest=/etc/systemd/system/chacra.service 17 | become: true 18 | notify: 19 | - reload systemd 20 | 21 | - name: install the systemd unit file for celery 22 | template: src=systemd/chacra-celery.service.j2 dest=/etc/systemd/system/chacra-celery.service 23 | become: true 24 | notify: 25 | - reload systemd 26 | 27 | - name: install the systemd unit file for celerybeat 28 | template: src=systemd/chacra-celerybeat.service.j2 dest=/etc/systemd/system/chacra-celerybeat.service 29 | become: true 30 | notify: 31 | - reload systemd 32 | 33 | - name: ensure chacra is enabled and running 34 | become: true 35 | service: name=chacra state=restarted enabled=yes 36 | 37 | - name: ensure chacra-celery is enabled and running 38 | become: true 39 | service: name=chacra-celery state=restarted enabled=yes 40 | 41 | - name: ensure chacra-celerybeat is enabled and running 42 | become: true 43 | service: name=chacra-celerybeat state=restarted enabled=yes 44 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/nginx_site.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name {{ fqdn }}; 3 | location '/.well-known/acme-challenge' { 4 | default_type "text/plain"; 5 | root {{ ssl_webroot_path }}; 6 | } 7 | location / { 8 | add_header Strict-Transport-Security max-age=31536000; 9 | return 301 https://$server_name$request_uri; 10 | } 11 | } 12 | 13 | server { 14 | listen 443 default_server ssl; 15 | server_name {{ fqdn }}; 16 | 17 | {% if ssl_cert_exists.stat.exists == true %} 18 | ssl_certificate {{ nginx_ssl_cert_path }}; 19 | ssl_certificate_key {{ nginx_ssl_key_path }}; 20 | {% endif %} 21 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 22 | ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; 23 | ssl_prefer_server_ciphers on; 24 | add_header Strict-Transport-Security "max-age=31536000"; 25 | 26 | access_log /var/log/nginx/{{ app_name }}-access.log; 27 | error_log /var/log/nginx/{{ app_name }}-error.log; 28 | 29 | # Some binaries are gigantic 30 | client_max_body_size 4096m; 31 | 32 | location / { 33 | proxy_set_header Host $host; 34 | proxy_set_header X-Real-IP $remote_addr; 35 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 36 | proxy_set_header X-Forwarded-Proto $scheme; 37 | 38 | proxy_pass http://127.0.0.1:8000; 39 | proxy_read_timeout 5000; 40 | } 41 | 42 | location /r/ { 43 | autoindex on; 44 | alias {{ repos_root }}/; 45 | } 46 | 47 | location /b/ { 48 | internal; 49 | alias {{ binary_root }}/; 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # max length of characters to apply to the 11 | # "slug" field 12 | #truncate_slug_length = 40 13 | 14 | # set to 'true' to run the environment during 15 | # the 'revision' command, regardless of autogenerate 16 | # revision_environment = false 17 | 18 | # set to 'true' to allow .pyc and .pyo files without 19 | # a source .py file to be detected as revisions in the 20 | # versions/ directory 21 | # sourceless = false 22 | 23 | # version location specification; this defaults 24 | # to alembic/versions. When using multiple version 25 | # directories, initial revisions must be specified with --version-path 26 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 27 | 28 | # the output encoding used when revision files 29 | # are written from script.py.mako 30 | # output_encoding = utf-8 31 | 32 | sqlalchemy.url = sqlite:///dev.db 33 | 34 | 35 | # Logging configuration 36 | [loggers] 37 | keys = root,sqlalchemy,alembic 38 | 39 | [handlers] 40 | keys = console 41 | 42 | [formatters] 43 | keys = generic 44 | 45 | [logger_root] 46 | level = WARN 47 | handlers = console 48 | qualname = 49 | 50 | [logger_sqlalchemy] 51 | level = WARN 52 | handlers = 53 | qualname = sqlalchemy.engine 54 | 55 | [logger_alembic] 56 | level = INFO 57 | handlers = 58 | qualname = alembic 59 | 60 | [handler_console] 61 | class = StreamHandler 62 | args = (sys.stderr,) 63 | level = NOTSET 64 | formatter = generic 65 | 66 | [formatter_generic] 67 | format = %(levelname)-5.5s [%(name)s] %(message)s 68 | datefmt = %H:%M:%S 69 | -------------------------------------------------------------------------------- /chacra/controllers/errors.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, response, request 2 | 3 | 4 | class ErrorsController(object): 5 | 6 | @expose('json') 7 | def schema(self, **kw): 8 | response.status = 400 9 | return dict(message=str(request.validation_error)) 10 | 11 | @expose('json') 12 | def invalid(self, **kw): 13 | msg = kw.get( 14 | 'error_message', 15 | 'invalid request' 16 | ) 17 | response.status = 400 18 | return dict(message=msg) 19 | 20 | @expose('json') 21 | def not_allowed(self, **kw): 22 | msg = kw.get( 23 | 'error_message', 24 | 'method not allowed' 25 | ) 26 | response.status = 405 27 | return dict(message=msg) 28 | 29 | @expose('json') 30 | def forbidden(self, **kw): 31 | msg = kw.get( 32 | 'error_message', 33 | 'forbidden' 34 | ) 35 | response.status = 403 36 | return dict(message=msg) 37 | 38 | @expose('json') 39 | def not_found(self, **kw): 40 | msg = kw.get( 41 | 'error_message', 42 | 'resource was not found' 43 | ) 44 | response.status = 404 45 | return dict(message=msg) 46 | 47 | @expose('json') 48 | def unavailable(self, **kw): 49 | msg = kw.get( 50 | 'error_message', 51 | 'service unavailable', 52 | ) 53 | response.status = 503 54 | return dict(message=msg) 55 | 56 | @expose('json') 57 | def error(self, **kw): 58 | msg = kw.get( 59 | 'error_message', 60 | 'an error has occured', 61 | ) 62 | response.status = 500 63 | return dict(message=msg) 64 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/templates/alembic-prod.ini.j2: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = {{ app_home }}/src/{{ app_name }}/alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # max length of characters to apply to the 11 | # "slug" field 12 | #truncate_slug_length = 40 13 | 14 | # set to 'true' to run the environment during 15 | # the 'revision' command, regardless of autogenerate 16 | # revision_environment = false 17 | 18 | # set to 'true' to allow .pyc and .pyo files without 19 | # a source .py file to be detected as revisions in the 20 | # versions/ directory 21 | # sourceless = false 22 | 23 | # version location specification; this defaults 24 | # to alembic/versions. When using multiple version 25 | # directories, initial revisions must be specified with --version-path 26 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 27 | 28 | # the output encoding used when revision files 29 | # are written from script.py.mako 30 | # output_encoding = utf-8 31 | 32 | sqlalchemy.url = postgresql://{{ app_name }}:{{ db_password.stdout }}@127.0.0.1/{{ app_name }} 33 | 34 | 35 | # Logging configuration 36 | [loggers] 37 | keys = root,sqlalchemy,alembic 38 | 39 | [handlers] 40 | keys = console 41 | 42 | [formatters] 43 | keys = generic 44 | 45 | [logger_root] 46 | level = WARN 47 | handlers = console 48 | qualname = 49 | 50 | [logger_sqlalchemy] 51 | level = WARN 52 | handlers = 53 | qualname = sqlalchemy.engine 54 | 55 | [logger_alembic] 56 | level = INFO 57 | handlers = 58 | qualname = alembic 59 | 60 | [handler_console] 61 | class = StreamHandler 62 | args = (sys.stderr,) 63 | level = NOTSET 64 | formatter = generic 65 | 66 | [formatter_generic] 67 | format = %(levelname)-5.5s [%(name)s] %(message)s 68 | datefmt = %H:%M:%S 69 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/letsencrypt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # letsencrypt doesn't recommend using the Ubuntu-provided letsencrypt package 3 | # https://github.com/certbot/certbot/issues/3538 4 | # They do recommend using certbot from their PPA for Xenial 5 | # https://certbot.eff.org/#ubuntuxenial-nginx 6 | 7 | - name: install software-properties-common 8 | apt: 9 | name: software-properties-common 10 | state: latest 11 | update_cache: yes 12 | become: true 13 | 14 | - name: install certbot 15 | apt: 16 | name: certbot 17 | state: latest 18 | update_cache: yes 19 | become: true 20 | 21 | - name: ensure letsencrypt acme-challenge path 22 | file: 23 | path: "{{ ssl_webroot_path }}" 24 | state: "directory" 25 | mode: 0755 26 | become: true 27 | 28 | - name: create (or renew) letsencrypt ssl cert 29 | command: "{{ letsencrypt_command }}" 30 | become: true 31 | 32 | - name: reload nginx to make sure latest configs are in use 33 | service: 34 | name: nginx 35 | state: reloaded 36 | become: true 37 | 38 | - name: setup a cron to attempt to renew the SSL cert every 15ish days 39 | cron: 40 | name: "renew letsencrypt cert" 41 | minute: "0" 42 | hour: "0" 43 | day: "1,15" 44 | job: "certbot renew --renew-hook='systemctl reload nginx'" 45 | become: true 46 | 47 | # This cronjob would attempt to renew the cert twice a day but doesn't have our required --renew-hook 48 | - name: make sure certbot's cronbjob is not present 49 | file: 50 | path: /etc/cron.d/certbot 51 | state: absent 52 | become: true 53 | 54 | # Same thing here. Let me automate how I wanna automate plz. 55 | - name: make sure certbot's systemd services are disabled 56 | service: 57 | name: "{{ item }}" 58 | state: stopped 59 | enabled: no 60 | with_items: 61 | - "certbot.service" 62 | - "certbot.timer" 63 | become: true 64 | -------------------------------------------------------------------------------- /chacra/controllers/search.py: -------------------------------------------------------------------------------- 1 | from pecan import expose 2 | from chacra.models import Binary 3 | from chacra.controllers import error 4 | 5 | 6 | class SearchController(object): 7 | 8 | def __init__(self): 9 | self.filters = { 10 | 'distro': Binary.distro, 11 | 'distro_version': Binary.distro_version, 12 | 'arch': Binary.arch, 13 | 'ref': Binary.ref, 14 | 'built_by': Binary.built_by, 15 | 'size': Binary.size, 16 | 'name': Binary.name, 17 | 'name-has': Binary.name.like, 18 | } 19 | 20 | @expose('json') 21 | def index(self, **kw): 22 | query = self.apply_filters(kw) 23 | if not query: 24 | return {} 25 | return query.all() 26 | 27 | def apply_filters(self, filters): 28 | # TODO: allow operators 29 | query = None 30 | for k, v in filters.items(): 31 | if k not in self.filters: 32 | return error('/errors/not_allowed', 'invalid query params: %s' % k) 33 | if k in self.filters: 34 | query = self.filter_binary(k, v, query) 35 | return query 36 | 37 | def filter_binary(self, key, value, query=None): 38 | filter_obj = self.filters[key] 39 | # for *-has search only 40 | search_value = '%{value}%'.format(value=value) 41 | 42 | # query will exist if multiple filters are being applied, e.g. by name 43 | # and by distro but otherwise it will be None 44 | if query: 45 | if key.endswith('-has'): 46 | return query.filter(filter_obj(search_value)) 47 | return query.filter(filter_obj == value) 48 | if key.endswith('-has'): 49 | return Binary.query.filter(filter_obj(search_value)) 50 | return Binary.query.filter(filter_obj == value) 51 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | try: 3 | from setuptools import setup, find_packages 4 | except ImportError: 5 | from ez_setup import use_setuptools 6 | use_setuptools() 7 | from setuptools import setup, find_packages 8 | 9 | 10 | setup( 11 | name='chacra', 12 | version='0.1', 13 | description='', 14 | author='Alfredo Deza', 15 | author_email='adeza@redhat.com', 16 | license = "MIT", 17 | install_requires=[ 18 | "pecan", 19 | "sqlalchemy==1.3.0", 20 | "psycopg2-binary==2.9.9", 21 | "pecan-notario", 22 | "python-statsd", 23 | "requests", 24 | "celery<=6.2.5", 25 | # celery imports kombu, which imports importlib_metadata 26 | # for py earlier than 3.8 (in 3.8 importlib.metadata is supplied 27 | # in py's stdlib). But there's a deprecated interface that 28 | # kombu uses (see https://github.com/celery/kombu/issues/1339). 29 | # Constrain the version of the external importlib_metadata 30 | # to avoid the compatibility problem. (kombu will eventually 31 | # have to change, as the stdlib version is also removing the 32 | # SelectableGroups dict interface. 33 | "importlib_metadata<=3.6; python_version<'3.8'", 34 | ], 35 | test_suite='chacra', 36 | zip_safe=False, 37 | include_package_data=True, 38 | packages=find_packages(exclude=['ez_setup']), 39 | classifiers = [ 40 | 'Development Status :: 4 - Beta', 41 | 'Intended Audience :: Developers', 42 | 'License :: OSI Approved :: MIT License', 43 | 'Topic :: Utilities', 44 | 'Operating System :: MacOS :: MacOS X', 45 | 'Operating System :: POSIX', 46 | 'Programming Language :: Python :: 3.10', 47 | 'Programming Language :: Python :: 3.11', 48 | 'Programming Language :: Python :: 3.12', 49 | ], 50 | entry_points=""" 51 | [pecan.command] 52 | populate=chacra.commands.populate:PopulateCommand 53 | """ 54 | ) 55 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_refs.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Binary 2 | 3 | 4 | class TestRefController(object): 5 | 6 | def test_get_index_single_ref(self, session): 7 | p = Project('ceph') 8 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="head", distro='centos', distro_version='el6', arch='i386') 9 | session.commit() 10 | result = session.app.get('/binaries/ceph/main/') 11 | assert result.json == {'head': ['centos']} 12 | 13 | def test_get_index_no_ref(self, session): 14 | Project('ceph') 15 | session.commit() 16 | result = session.app.get('/binaries/ceph/next/', expect_errors=True) 17 | assert result.status_int == 404 18 | 19 | def test_get_index_ref_with_sha1(self, session): 20 | p = Project('ceph') 21 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="head", distro='centos', distro_version='el6', arch='i386') 22 | session.commit() 23 | result = session.app.get('/binaries/ceph/main/') 24 | assert result.json['head'] == ['centos'] 25 | 26 | def test_get_index_ref_with_sha1s(self, session): 27 | p = Project('ceph') 28 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="sha1", distro='centos', distro_version='el6', arch='i386') 29 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 30 | session.commit() 31 | result = session.app.get('/binaries/ceph/main/') 32 | assert set(result.json.keys()) == set(['head', 'sha1']) 33 | 34 | def test_get_ref_with_distinct_sha1s(self, session): 35 | p = Project('ceph') 36 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="sha1", distro='centos', distro_version='el6', arch='i386') 37 | # note how we are using a different ref 38 | Binary('ceph-1.0.0.deb', p, ref='firefly', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 39 | session.commit() 40 | result = session.app.get('/binaries/ceph/main/') 41 | assert list(result.json.keys()) == ['sha1'] 42 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/nginx.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure sites-available for nginx 3 | file: 4 | path: /etc/nginx/sites-available 5 | state: directory 6 | become: true 7 | 8 | - name: ensure there is an nginx user 9 | user: 10 | name: nginx 11 | comment: "Nginx user" 12 | become: true 13 | 14 | - name: ensure sites-enable for nginx 15 | file: 16 | path: /etc/nginx/sites-enabled 17 | state: directory 18 | become: true 19 | 20 | - name: remove default nginx site 21 | action: file path=/etc/nginx/sites-enabled/default state=absent 22 | become: true 23 | 24 | - name: check to see if ssl cert needs created 25 | stat: 26 | path: "{{ nginx_ssl_cert_path }}" 27 | become: true 28 | register: ssl_cert 29 | 30 | - include_tasks: ssl.yml 31 | when: development_server == true or use_self_signed_ssl 32 | 33 | - name: "check if {{ nginx_ssl_cert_path }} exists" 34 | stat: 35 | path: "{{ nginx_ssl_cert_path }}" 36 | register: ssl_cert_exists 37 | become: true 38 | 39 | - name: write nginx.conf 40 | action: template src=../templates/nginx.conf dest=/etc/nginx/nginx.conf 41 | become: true 42 | 43 | - name: enable nginx 44 | become: true 45 | action: service name=nginx enabled=true 46 | 47 | - name: create nginx site config 48 | template: 49 | src: "../templates/nginx_site.conf" 50 | dest: "/etc/nginx/sites-available/{{ app_name }}.conf" 51 | become: true 52 | notify: 53 | - restart nginx 54 | 55 | - name: link nginx config 56 | file: 57 | src: "/etc/nginx/sites-available/{{ app_name }}.conf" 58 | dest: "/etc/nginx/sites-enabled/{{ app_name }}.conf" 59 | state: "link" 60 | become: true 61 | 62 | - include_tasks: letsencrypt.yml 63 | when: 64 | - development_server == false 65 | - use_letsencrypt 66 | - not ssl_cert.stat.exists 67 | 68 | - name: stop/disable apache2 69 | become: true 70 | service: 71 | name: apache2 72 | enabled: false 73 | state: stopped 74 | failed_when: false 75 | tags: apachestop 76 | 77 | - name: ensure nginx is restarted 78 | become: true 79 | action: service name=nginx state=restarted 80 | -------------------------------------------------------------------------------- /chacra/tests/async/test_checks.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from chacra.asynch import checks 3 | from chacra.asynch.checks import SystemCheckError 4 | 5 | 6 | class TestIsHealthy(object): 7 | 8 | def test_is_not_healthy(self): 9 | def bad_check(): 10 | raise RuntimeError() 11 | 12 | checks.system_checks = (bad_check,) 13 | assert checks.is_healthy() is False 14 | 15 | def test_is_healthy(self): 16 | checks.system_checks = (lambda: True,) 17 | assert checks.is_healthy() is True 18 | 19 | 20 | df_communicate = ( 21 | b'Filesystem 1K-blocks Used Available Use% Mounted on\n/dev/mapper/vagrant--vg-root 80909064 2205960 74570036 3% /\n', 22 | '') 23 | 24 | df_communicate_full = ( 25 | b'Filesystem 1K-blocks Used Available Use% Mounted on\n/dev/mapper/vagrant--vg-root 80909064 2205960 74570036 93% /\n', 26 | '') 27 | 28 | def fake_wait(timeout=0): 29 | return True 30 | 31 | class TestDiskHasSpace(object): 32 | 33 | 34 | def test_it_has_plenty(self, fake): 35 | popen = fake(returncode=0, wait=fake_wait, communicate=lambda: df_communicate) 36 | result = checks.disk_has_space(_popen=lambda *a, **kw: popen) 37 | assert result is None 38 | 39 | def test_it_has_an_error(self, fake): 40 | stderr = fake(read=lambda: b'df had an error') 41 | popen = fake(returncode=1, wait=fake_wait, stderr=stderr) 42 | with pytest.raises(SystemCheckError) as err: 43 | checks.disk_has_space(_popen=lambda *a, **kw: popen) 44 | assert 'df had an error' in err.value.message 45 | 46 | def test_it_is_full(self, fake): 47 | popen = fake(returncode=0, wait=fake_wait, communicate=lambda: df_communicate_full) 48 | with pytest.raises(SystemCheckError) as err: 49 | checks.disk_has_space(_popen=lambda *a, **kw: popen) 50 | assert 'almost full. Used: 93%' in err.value.message 51 | 52 | 53 | class TestErrorMessage(object): 54 | 55 | def test_message_is_captured(self): 56 | with pytest.raises(SystemCheckError) as err: 57 | raise SystemCheckError('an error message') 58 | assert 'an error message' == str(err.value) 59 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/rabbitmq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add the rabbitmq signing key 3 | become: true 4 | shell: 5 | cmd: "curl -1sLf 'https://keys.openpgp.org/vks/v1/by-fingerprint/0A9AF2115F4687BD29803A206B73A36E6026DFCA' | sudo gpg --dearmor | sudo tee /usr/share/keyrings/com.rabbitmq.team.gpg" 6 | 7 | - name: add the Erlang Launchpad PPA key 8 | become: true 9 | apt_key: 10 | url: "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xf77f1eda57ebb1cc" 11 | state: present 12 | 13 | - name: add the rabbitmq public key 14 | become: true 15 | apt_key: 16 | url: "https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey" 17 | state: present 18 | 19 | - name: add the Erlang Launchpad PPA 20 | become: true 21 | apt_repository: 22 | repo: "deb http://ppa.launchpad.net/rabbitmq/rabbitmq-erlang/ubuntu {{ ansible_lsb.codename }} main" 23 | state: present 24 | 25 | - name: add the rabbitmq official repo 26 | become: true 27 | apt_repository: 28 | repo: "deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu/ {{ ansible_lsb.codename }} main" 29 | state: present 30 | 31 | - name: update the apt cache 32 | become: true 33 | apt: 34 | update_cache: true 35 | 36 | - name: install pkg-config 37 | become: true 38 | apt: 39 | name: pkg-config 40 | 41 | - name: install erlang 42 | become: true 43 | apt: 44 | name: 45 | - erlang-base 46 | - erlang-asn1 47 | - erlang-crypto 48 | - erlang-eldap 49 | - erlang-ftp 50 | - erlang-inets 51 | - erlang-mnesia 52 | - erlang-os-mon 53 | - erlang-parsetools 54 | - erlang-public-key 55 | - erlang-runtime-tools 56 | - erlang-snmp 57 | - erlang-ssl 58 | - erlang-syntax-tools 59 | - erlang-tftp 60 | - erlang-tools 61 | - erlang-xmerl 62 | state: present 63 | 64 | - name: install rabbitmq-server 65 | become: true 66 | apt: 67 | name: rabbitmq-server 68 | state: present 69 | 70 | - name: ensure rabbitmq is running and enabled 71 | become: true 72 | service: 73 | name: rabbitmq-server 74 | state: started 75 | enabled: yes 76 | 77 | - name: enable web management for rabbit 78 | become: true 79 | command: rabbitmq-plugins enable rabbitmq_management 80 | -------------------------------------------------------------------------------- /deploy/playbooks/redeploy-all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook can be used to redeploy all chacra nodes without disrupting the CI. 3 | # It check Jenkins to make sure there are no running jobs either POSTing or planning to POST binaries to each chacra node before proceeding 4 | # 5 | # Prerequisites: 6 | # - Be using ceph-sepia-secrets ansible inventory 7 | # - Have an individual playbook file for each node named {{ inventory_hostname }}.yml 8 | # - Have no need to pass additional vars on the command-line 9 | # (e.g., `ansible-playbook 1.chacra.ceph.com.yml` should be sufficient) 10 | - hosts: chacra 11 | serial: 1 12 | become: true 13 | gather_facts: true 14 | vars: 15 | path_to_playbooks: "~/src/chacra/deploy/playbooks" 16 | tasks: 17 | 18 | - name: "Touch /tmp/fail_check on {{ inventory_hostname }}" 19 | file: 20 | path: /tmp/fail_check 21 | state: touch 22 | 23 | # Set current_node since we can't use ansible_host in the next task due to it being delegated to a different host (and not the chacra host we're checking) 24 | - set_fact: 25 | current_node: "{{ ansible_host }}" 26 | 27 | - name: "Check if {{ inventory_hostname }} has Jenkins jobs using it" 28 | shell: "for file in $(sudo lsof -a -u jenkins -c java | grep \"/var/lib/jenkins/jobs\" | grep log | grep -v '(deleted)' | awk '{ print $9 }'); do grep \"chacra_url=https://{{ current_node }}\" $file; done | wc -l" 29 | register: jobs 30 | delegate_to: jenkins.ceph.com 31 | until: 'jobs.stdout == "0"' 32 | retries: 360 # A ceph build is roughly 2hrs so 3hrs of retries should be enough 33 | delay: 60 34 | 35 | - name: "Redeploy chacra on {{ inventory_hostname }}" 36 | shell: "ansible-playbook {{ path_to_playbooks }}/{{ inventory_hostname }}.yml --limit {{ inventory_hostname }}" 37 | delegate_to: localhost 38 | become: false 39 | 40 | # This is obviously the preferred method to run an external playbook but I couldn't get it working with any amount of indentation experimenting. 41 | #- import_playbook: "{{ path_to_playbooks }}/{{ inventory_hostname }}.yml" 42 | # delegate_to: localhost 43 | 44 | - name: "Remove /tmp/fail_check on {{ inventory_hostname }}" 45 | file: 46 | path: /tmp/fail_check 47 | state: absent 48 | -------------------------------------------------------------------------------- /chacra/tests/controllers/repos/test_sha1s.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Repo 2 | 3 | 4 | class TestSHA1Controller(object): 5 | 6 | def test_get_single_project(self, session): 7 | p = Project('foobar') 8 | repo = Repo( 9 | p, 10 | "firefly", 11 | "ubuntu", 12 | "trusty", 13 | sha1="head", 14 | ) 15 | repo.path = "some_path" 16 | session.commit() 17 | result = session.app.get('/repos/foobar/firefly/head/') 18 | assert result.status_int == 200 19 | assert len(result.json) == 1 20 | assert result.json == {"ubuntu": ["trusty"]} 21 | 22 | def test_sha1_does_not_exist(self, session): 23 | p = Project('foobar') 24 | repo = Repo( 25 | p, 26 | "firefly", 27 | "ubuntu", 28 | "trusty", 29 | sha1="head", 30 | ) 31 | repo.path = "some_path" 32 | session.commit() 33 | result = session.app.get('/repos/foobar/firefly/sha1/', expect_errors=True) 34 | assert result.status_int == 404 35 | 36 | def test_sha1_has_no_built_repos(self, session): 37 | p = Project('foobar') 38 | Repo( 39 | p, 40 | "firefly", 41 | "ubuntu", 42 | "trusty", 43 | sha1="head", 44 | ) 45 | session.commit() 46 | result = session.app.get('/repos/foobar/firefly/head/') 47 | assert result.status_int == 200 48 | assert len(result.json) == 1 49 | 50 | def test_multiple_distros_with_built_repos(self, session): 51 | p = Project('foobar') 52 | repo = Repo( 53 | p, 54 | "firefly", 55 | "ubuntu", 56 | "trusty", 57 | sha1="head", 58 | ) 59 | repo2 = Repo( 60 | p, 61 | "firefly", 62 | "centos", 63 | "7", 64 | sha1="head", 65 | ) 66 | repo.path = "some_path" 67 | repo2.path = "some_path" 68 | session.commit() 69 | result = session.app.get('/repos/foobar/firefly/head/') 70 | assert result.status_int == 200 71 | assert len(result.json) == 2 72 | assert result.json == {"ubuntu": ["trusty"], "centos": ['7']} 73 | -------------------------------------------------------------------------------- /alembic/env.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | from alembic import context 3 | from sqlalchemy import engine_from_config, pool 4 | from logging.config import fileConfig 5 | from chacra import models 6 | 7 | # this is the Alembic Config object, which provides 8 | # access to the values within the .ini file in use. 9 | config = context.config 10 | 11 | # Interpret the config file for Python logging. 12 | # This line sets up loggers basically. 13 | fileConfig(config.config_file_name) 14 | 15 | # add your model's MetaData object here 16 | # for 'autogenerate' support 17 | # from myapp import mymodel 18 | # target_metadata = mymodel.Base.metadata 19 | target_metadata = models.Base.metadata 20 | 21 | # other values from the config, defined by the needs of env.py, 22 | # can be acquired: 23 | # my_important_option = config.get_main_option("my_important_option") 24 | # ... etc. 25 | 26 | 27 | def run_migrations_offline(): 28 | """Run migrations in 'offline' mode. 29 | 30 | This configures the context with just a URL 31 | and not an Engine, though an Engine is acceptable 32 | here as well. By skipping the Engine creation 33 | we don't even need a DBAPI to be available. 34 | 35 | Calls to context.execute() here emit the given string to the 36 | script output. 37 | 38 | """ 39 | url = config.get_main_option("sqlalchemy.url") 40 | context.configure( 41 | url=url, target_metadata=target_metadata, literal_binds=True) 42 | 43 | with context.begin_transaction(): 44 | context.run_migrations() 45 | 46 | 47 | def run_migrations_online(): 48 | """Run migrations in 'online' mode. 49 | 50 | In this scenario we need to create an Engine 51 | and associate a connection with the context. 52 | 53 | """ 54 | connectable = engine_from_config( 55 | config.get_section(config.config_ini_section), 56 | prefix='sqlalchemy.', 57 | poolclass=pool.NullPool) 58 | 59 | with connectable.connect() as connection: 60 | context.configure( 61 | connection=connection, 62 | target_metadata=target_metadata 63 | ) 64 | 65 | with context.begin_transaction(): 66 | context.run_migrations() 67 | 68 | if context.is_offline_mode(): 69 | run_migrations_offline() 70 | else: 71 | run_migrations_online() 72 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_projects.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Binary 2 | 3 | 4 | class TestProjectsController(object): 5 | 6 | def test_get_index_no_projects(self, session): 7 | result = session.app.get('/binaries/') 8 | assert result.status_int == 200 9 | assert result.json == {} 10 | 11 | def test_list_a_project(self, session): 12 | Project('foobar') 13 | session.commit() 14 | result = session.app.get('/binaries/').json 15 | assert result == {'foobar': []} 16 | 17 | def test_single_project_should_have_one_item(self, session): 18 | Project('foobar') 19 | session.commit() 20 | result = session.app.get('/binaries/') 21 | assert result.status_int == 200 22 | assert len(result.json) == 1 23 | 24 | def test_list_a_few_projects(self, session): 25 | for p in range(20): 26 | Project('foo_%s' % p) 27 | session.commit() 28 | 29 | result = session.app.get('/binaries/') 30 | json = result.json 31 | assert result.status_int == 200 32 | assert len(json) == 20 33 | 34 | def test_create_project(self, session): 35 | session.app.post('/binaries/rhcs-ceph/') 36 | result = session.app.get('/binaries/rhcs-ceph/') 37 | assert result.status_int == 200 38 | assert result.json == {} 39 | 40 | 41 | class TestProjectController(object): 42 | 43 | def test_get_index_single_project(self, session): 44 | Project('foobar') 45 | session.commit() 46 | result = session.app.get('/binaries/foobar/') 47 | assert result.status_int == 200 48 | 49 | def test_get_index_no_project(self, session): 50 | result = session.app.get('/binaries/foobar/', expect_errors=True) 51 | assert result.status_int == 404 52 | 53 | def test_get_index_single_project_data(self, session): 54 | Project('foobar') 55 | session.commit() 56 | result = session.app.get('/binaries/foobar/') 57 | assert result.json == {} 58 | 59 | def test_get_project_refs(self, session): 60 | p = Project('foobar') 61 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="HEAD", distro='centos', distro_version='el6', arch='i386') 62 | Binary('ceph-1.0.0.rpm', p, ref='firefly', sha1="HEAD", distro='centos', distro_version='el6', arch='i386') 63 | session.commit() 64 | result = session.app.get('/binaries/foobar/') 65 | assert result.json == {'firefly': ['HEAD'], 'main': ['HEAD']} 66 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_search.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Binary 2 | from chacra.controllers import search 3 | 4 | 5 | class TestSearchController(object): 6 | 7 | def test_search_by_distro_gets_one_item(self, session): 8 | project = Project('ceph') 9 | Binary('ceph-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el6', arch='x86_64') 10 | session.commit() 11 | result = session.app.get('/search/?distro=centos') 12 | assert len(result.json) == 1 13 | 14 | def test_search_by_distro_gets_no_items(self, session): 15 | project = Project('ceph') 16 | Binary('ceph-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el6', arch='x86_64') 17 | session.commit() 18 | result = session.app.get('/search/?distro=solaris') 19 | assert len(result.json) == 0 20 | 21 | def test_search_by_distro_gets_full_metadata(self, session): 22 | project = Project('ceph') 23 | Binary('ceph-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el6', arch='x86_64') 24 | session.commit() 25 | result = session.app.get('/search/?distro=centos').json[0] 26 | assert result['name'] == 'ceph-1.0.0.rpm' 27 | assert result['distro'] == 'centos' 28 | assert result['distro_version'] == 'el6' 29 | assert result['arch'] == 'x86_64' 30 | assert result['ref'] == 'giant' 31 | 32 | def test_search_by_distro_gets_more_than_one_item(self, session): 33 | project = Project('ceph') 34 | Binary('ceph-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el6', arch='x86_64') 35 | Binary('ceph-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el7', arch='x86_64') 36 | session.commit() 37 | result = session.app.get('/search/?distro=centos') 38 | assert len(result.json) == 2 39 | 40 | 41 | class TestLikeSearch(object): 42 | 43 | def setup(self): 44 | self.controller = search.SearchController() 45 | 46 | def test_search_like_name(self, session): 47 | project = Project('ceph') 48 | Binary('ceph-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el6', arch='x86_64') 49 | Binary('radosgw-agent-1.0.0.rpm', project, ref='giant', distro='centos', distro_version='el7', arch='x86_64') 50 | session.commit() 51 | result = session.app.get('/search/?name-has=ceph') 52 | assert len(result.json) == 1 53 | assert result.json[0]['name'] == 'ceph-1.0.0.rpm' 54 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_sha1s.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Binary 2 | 3 | 4 | class TestSHA1Controller(object): 5 | 6 | def test_get_index_single_sha1(self, session): 7 | p = Project('ceph') 8 | Binary('ceph-1.0.0.rpm', p, sha1="head", ref='main', distro='centos', distro_version='el6', arch='i386') 9 | session.commit() 10 | result = session.app.get('/binaries/ceph/main/head/') 11 | assert result.json == {'centos': ['el6']} 12 | 13 | def test_get_index_no_sha1(self, session): 14 | Project('ceph') 15 | session.commit() 16 | result = session.app.get('/binaries/ceph/next/sha1/', expect_errors=True) 17 | assert result.status_int == 404 18 | 19 | def test_get_index_sha1_with_distro(self, session): 20 | p = Project('ceph') 21 | Binary('ceph-1.0.0.rpm', p, sha1="head", ref='main', distro='centos', distro_version='el6', arch='i386') 22 | session.commit() 23 | result = session.app.get('/binaries/ceph/main/head/') 24 | assert result.json['centos'] == ['el6'] 25 | 26 | def test_get_index_sha1_with_distros(self, session): 27 | p = Project('ceph') 28 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="head", distro='centos', distro_version='el6', arch='i386') 29 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 30 | session.commit() 31 | result = session.app.get('/binaries/ceph/main/head/') 32 | assert set(result.json.keys()) == set(['centos', 'ubuntu']) 33 | 34 | def test_get_sha1_with_distinct_distros(self, session): 35 | p = Project('ceph') 36 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="head", distro='centos', distro_version='el6', arch='i386') 37 | # note how we are using a different ref 38 | Binary('ceph-1.0.0.deb', p, ref='firefly', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 39 | session.commit() 40 | result = session.app.get('/binaries/ceph/main/head/') 41 | assert list(result.json.keys()) == ['centos'] 42 | 43 | def test_get_distro_with_distinct_distros_different_sha1(self, session): 44 | p = Project('ceph') 45 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="sha1", distro='centos', distro_version='el6', arch='i386') 46 | # note how we are using a different ref 47 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 48 | session.commit() 49 | result = session.app.get('/binaries/ceph/main/head/') 50 | assert list(result.json.keys()) == ['ubuntu'] 51 | -------------------------------------------------------------------------------- /chacra/tests/controllers/repos/test_refs.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Repo 2 | 3 | 4 | class TestRefController(object): 5 | 6 | def test_get_single_project(self, session): 7 | p = Project('foobar') 8 | repo = Repo( 9 | p, 10 | "firefly", 11 | "ubuntu", 12 | "trusty", 13 | sha1="head", 14 | ) 15 | repo.path = "some_path" 16 | session.commit() 17 | result = session.app.get('/repos/foobar/firefly/') 18 | assert result.status_int == 200 19 | assert len(result.json) == 1 20 | assert result.json == {"head": ["ubuntu"]} 21 | 22 | def test_ref_does_not_exist(self, session): 23 | p = Project('foobar') 24 | repo = Repo( 25 | p, 26 | "firefly", 27 | "ubuntu", 28 | "trusty", 29 | sha1="head", 30 | ) 31 | repo.path = "some_path" 32 | session.commit() 33 | result = session.app.get('/repos/foobar/hammer/', expect_errors=True) 34 | assert result.status_int == 404 35 | 36 | def test_ref_has_no_built_repos(self, session): 37 | p = Project('foobar') 38 | Repo( 39 | p, 40 | "firefly", 41 | "ubuntu", 42 | "trusty", 43 | sha1="head", 44 | ) 45 | session.commit() 46 | result = session.app.get('/repos/foobar/firefly/') 47 | assert result.status_int == 200 48 | assert len(result.json) == 1 49 | 50 | def test_does_show_sha1_without_built_repos(self, session): 51 | p = Project('foobar') 52 | repo = Repo( 53 | p, 54 | "firefly", 55 | "ubuntu", 56 | "trusty", 57 | sha1="sha1", 58 | ) 59 | Repo( 60 | p, 61 | "firefly", 62 | "centos", 63 | "7", 64 | sha1="head", 65 | ) 66 | repo.path = "some_path" 67 | session.commit() 68 | result = session.app.get('/repos/foobar/firefly/') 69 | assert len(result.json) == 2 70 | assert "sha1" in result.json 71 | 72 | def test_multiple_sha1_with_built_repos(self, session): 73 | p = Project('foobar') 74 | repo = Repo( 75 | p, 76 | "firefly", 77 | "ubuntu", 78 | "trusty", 79 | sha1="head", 80 | ) 81 | repo2 = Repo( 82 | p, 83 | "firefly", 84 | "centos", 85 | "7", 86 | sha1="sha1", 87 | ) 88 | repo.path = "some_path" 89 | repo2.path = "some_path" 90 | session.commit() 91 | result = session.app.get('/repos/foobar/firefly/') 92 | assert result.status_int == 200 93 | assert len(result.json) == 2 94 | assert result.json == {"head": ["ubuntu"], "sha1": ['centos']} 95 | -------------------------------------------------------------------------------- /chacra/controllers/binaries/distros.py: -------------------------------------------------------------------------------- 1 | from pecan import expose, abort, request 2 | from chacra import models 3 | from chacra.controllers import error 4 | from chacra.controllers.binaries.archs import ArchController 5 | 6 | 7 | class DistroVersionController(object): 8 | 9 | def __init__(self, distro_version): 10 | self.distro_version = distro_version 11 | self.project = models.Project.get(request.context['project_id']) 12 | self.distro_name = request.context['distro'] 13 | self.ref = request.context['ref'] 14 | self.sha1 = request.context['sha1'] 15 | request.context['distro_version'] = self.distro_version 16 | 17 | @expose('json', generic=True) 18 | def index(self): 19 | if self.distro_version not in self.project.distro_versions: 20 | abort(404) 21 | 22 | resp = {} 23 | for arch in self.project.archs: 24 | binaries = [ 25 | b.name for b in models.Binary.filter_by( 26 | project=self.project, 27 | distro_version=self.distro_version, 28 | distro=self.distro_name, 29 | ref=self.ref, 30 | sha1=self.sha1, 31 | arch=arch).all()] 32 | if binaries: 33 | resp[arch] = list(set(binaries)) 34 | return resp 35 | 36 | @index.when(method='POST', template='json') 37 | def index_post(self): 38 | error('/errors/not_allowed', 'POST requests to this url are not allowed') 39 | 40 | @expose() 41 | def _lookup(self, name, *remainder): 42 | if request.method in ['HEAD', 'GET']: 43 | if self.distro_version not in self.project.distro_versions: 44 | abort(404) 45 | return ArchController(name), remainder 46 | 47 | 48 | class DistroController(object): 49 | def __init__(self, distro_name): 50 | self.distro_name = distro_name 51 | self.project = models.Project.get(request.context['project_id']) 52 | self.ref = request.context['ref'] 53 | self.sha1 = request.context['sha1'] 54 | request.context['distro'] = distro_name 55 | 56 | @expose('json', generic=True) 57 | def index(self): 58 | resp = {} 59 | 60 | binaries = models.Binary.filter_by( 61 | project=self.project, 62 | distro=self.distro_name, 63 | ref=self.ref, 64 | sha1=self.sha1).all() 65 | 66 | if not binaries: 67 | abort(404) 68 | 69 | distro_versions = set([b.distro_version for b in binaries]) 70 | 71 | for distro_version in distro_versions: 72 | resp[distro_version] = list( 73 | set( 74 | [b.arch for b in binaries if b.distro_version == distro_version] 75 | ) 76 | ) 77 | if not resp: 78 | abort(404) 79 | return resp 80 | 81 | @index.when(method='POST', template='json') 82 | def index_post(self): 83 | error('/errors/not_allowed', 'POST requests to this url are not allowed') 84 | 85 | @expose() 86 | def _lookup(self, name, *remainder): 87 | return DistroVersionController(name), remainder 88 | -------------------------------------------------------------------------------- /chacra/tests/config.py: -------------------------------------------------------------------------------- 1 | from pecan.hooks import TransactionHook 2 | from chacra import models 3 | 4 | 5 | # Server Specific Configurations 6 | server = { 7 | 'port': '8080', 8 | 'host': '0.0.0.0' 9 | } 10 | 11 | # Pecan Application Configurations 12 | app = { 13 | 'root': 'chacra.controllers.root.RootController', 14 | 'modules': ['chacra'], 15 | 'static_root': '%(confdir)s/public', 16 | # 'default_renderer': 'json', 17 | 'guess_content_type_from_ext': False, 18 | 'template_path': '%(confdir)s/../templates', 19 | 'hooks': [ 20 | TransactionHook( 21 | models.start, 22 | models.start_read_only, 23 | models.commit, 24 | models.rollback, 25 | models.clear 26 | ), 27 | ], 28 | 'debug': False, 29 | #'errors': { 30 | # 404: '/error/404', 31 | # '__force_dict__': True 32 | #} 33 | } 34 | 35 | logging = { 36 | 'loggers': { 37 | 'root': {'level': 'INFO', 'handlers': ['console']}, 38 | 'chacra': {'level': 'DEBUG', 'handlers': ['console']}, 39 | 'pecan.commands.serve': {'level': 'DEBUG', 'handlers': ['console']}, 40 | 'py.warnings': {'handlers': ['console']}, 41 | '__force_dict__': True 42 | }, 43 | 'handlers': { 44 | 'console': { 45 | 'level': 'DEBUG', 46 | 'class': 'logging.StreamHandler', 47 | 'formatter': 'color' 48 | } 49 | }, 50 | 'formatters': { 51 | 'simple': { 52 | 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' 53 | '[%(threadName)s] %(message)s') 54 | }, 55 | 'color': { 56 | '()': 'pecan.log.ColorFormatter', 57 | 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' 58 | '[%(threadName)s] %(message)s'), 59 | '__force_dict__': True 60 | } 61 | } 62 | } 63 | 64 | sqlalchemy = { 65 | # You may use SQLite for testing 66 | 'url': 'sqlite:///dev.db', 67 | # When you set up PostreSQL, it will look more like: 68 | #'url': 'postgresql+psycopg2://USER:PASSWORD@DB_HOST/DB_NAME', 69 | 'echo': True, 70 | 'echo_pool': True, 71 | 'pool_recycle': 3600, 72 | 'encoding': 'utf-8' 73 | } 74 | 75 | binary_root = '/tmp/' 76 | distributions_root = '/tmp/' 77 | 78 | # When True it will set the headers so that Nginx can serve the download 79 | # instead of Pecan. 80 | delegate_downloads = False 81 | 82 | api_user = 'admin' 83 | api_key = 'secret' 84 | 85 | polling_cycle = 30 86 | 87 | # Use this to define how distributions files will be created per project 88 | distributions = { 89 | "defaults": { 90 | "DebIndices": "Packages Release . .gz .bz2", 91 | "DscIndices": "Sources Release .gz .bz2", 92 | "Contents": ".gz .bz2", 93 | "Origin": "ceph.com", 94 | "Description": "", 95 | "Architectures": "amd64 armhf i386 source", 96 | "Suite": "stable", 97 | "Components": "main", 98 | }, 99 | "ceph": { 100 | "Description": "Ceph distributed file system", 101 | }, 102 | } 103 | -------------------------------------------------------------------------------- /chacra/models/projects.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, Integer, String 2 | from sqlalchemy.orm.exc import DetachedInstanceError 3 | from chacra.models import Base 4 | from chacra.models.repos import Repo 5 | from chacra.models.binaries import Binary 6 | 7 | 8 | class Project(Base): 9 | 10 | __tablename__ = 'projects' 11 | id = Column(Integer, primary_key=True) 12 | name = Column(String(256), nullable=False, unique=True, index=True) 13 | 14 | def __init__(self, name): 15 | self.name = name 16 | 17 | @property 18 | def archs(self): 19 | return list(set( 20 | [b.arch for b in 21 | Binary.query.distinct(Binary.arch).filter_by(project=self).all()] 22 | )) 23 | 24 | @property 25 | def distro_versions(self): 26 | return list(set( 27 | [b.distro_version for b in 28 | Binary.query.distinct(Binary.distro_version).filter_by(project=self).all()] 29 | )) 30 | 31 | @property 32 | def distros(self): 33 | return list(set( 34 | [b.distro for b in 35 | Binary.query.distinct(Binary.distro).filter_by(project=self).all()] 36 | )) 37 | 38 | @property 39 | def refs(self): 40 | return list(set( 41 | [b.ref for b in 42 | Binary.query.distinct(Binary.ref).filter_by(project=self).all()] 43 | )) 44 | 45 | @property 46 | def sha1s(self): 47 | return list(set( 48 | [b.sha1 for b in 49 | Binary.query.distinct(Binary.sha1).filter_by(project=self).all()] 50 | )) 51 | 52 | @property 53 | def flavors(self): 54 | return list(set( 55 | [b.flavor for b in 56 | Binary.query.distinct(Binary.flavor).filter_by(project=self).all()] 57 | )) 58 | 59 | @property 60 | def built_repos(self): 61 | return self.repos.filter(Repo.path != None) 62 | 63 | @property 64 | def repo_refs(self): 65 | return list(set([r.ref for r in self.repos.all()])) 66 | 67 | @property 68 | def repo_sha1s(self): 69 | return list(set([r.sha1 for r in self.repos.all()])) 70 | 71 | @property 72 | def repo_distros(self): 73 | return list(set([r.distro for r in self.repos.all()])) 74 | 75 | @property 76 | def repo_distro_versions(self): 77 | return list(set([r.distro_version for r in self.repos.all()])) 78 | 79 | def __repr__(self): 80 | try: 81 | return '' % self.name 82 | except DetachedInstanceError: 83 | return '' 84 | 85 | def __json__(self): 86 | json_ = {} 87 | for ref in self.refs: 88 | json_[ref] = list( 89 | set( 90 | [b.sha1 for b in 91 | Binary.query.distinct(Binary.sha1).filter_by(ref=ref, project=self).all()] 92 | ) 93 | ) 94 | return json_ 95 | 96 | 97 | def get_or_create(name, **kw): 98 | project = Project.filter_by(name=name).first() 99 | if not project: 100 | project = Project(name=name) 101 | return project 102 | -------------------------------------------------------------------------------- /config/dev.py: -------------------------------------------------------------------------------- 1 | from pecan.hooks import TransactionHook, RequestViewerHook 2 | from chacra import models 3 | 4 | 5 | # Server Specific Configurations 6 | server = { 7 | 'port': '8080', 8 | 'host': '0.0.0.0' 9 | } 10 | 11 | # Pecan Application Configurations 12 | app = { 13 | 'root': 'chacra.controllers.root.RootController', 14 | 'modules': ['chacra'], 15 | 'default_renderer': 'json', 16 | 'hooks': [ 17 | TransactionHook( 18 | models.start, 19 | models.start_read_only, 20 | models.commit, 21 | models.rollback, 22 | models.clear 23 | ), 24 | RequestViewerHook(), 25 | ], 26 | 'debug': True, 27 | } 28 | 29 | logging = { 30 | 'loggers': { 31 | 'root': {'level': 'INFO', 'handlers': ['console']}, 32 | 'chacra': {'level': 'DEBUG', 'handlers': ['console']}, 33 | 'pecan.commands.serve': {'level': 'DEBUG', 'handlers': ['console']}, 34 | 'py.warnings': {'handlers': ['console']}, 35 | '__force_dict__': True 36 | }, 37 | 'handlers': { 38 | 'console': { 39 | 'level': 'DEBUG', 40 | 'class': 'logging.StreamHandler', 41 | 'formatter': 'color' 42 | } 43 | }, 44 | 'formatters': { 45 | 'simple': { 46 | 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' 47 | '[%(threadName)s] %(message)s') 48 | }, 49 | 'color': { 50 | '()': 'pecan.log.ColorFormatter', 51 | 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' 52 | '[%(threadName)s] %(message)s'), 53 | '__force_dict__': True 54 | } 55 | } 56 | } 57 | 58 | sqlalchemy = { 59 | 'url': 'sqlite:///dev.db', 60 | 'echo': True, 61 | 'echo_pool': True, 62 | 'pool_recycle': 3600, 63 | 'encoding': 'utf-8' 64 | } 65 | 66 | # location for storing uploaded binaries 67 | binary_root = '%(confdir)s/public' 68 | repos_root = '%(confdir)s/repos' 69 | distributions_root = '%(confdir)s/distributions' 70 | 71 | # When True it will set the headers so that Nginx can serve the download 72 | # instead of Pecan. 73 | delegate_downloads = False 74 | 75 | # Basic HTTP Auth credentials 76 | api_user = 'admin' 77 | api_key = 'secret' 78 | 79 | # Celery options 80 | # How often (in seconds) the database should be queried for repos that need to 81 | # be rebuilt 82 | polling_cycle = 15 83 | 84 | # Once a "create repo" task is called, how many seconds (if any) to wait before actually 85 | # creating the repository 86 | quiet_time = 30 87 | 88 | 89 | # Use this to define how distributions files will be created per project 90 | distributions = { 91 | "defaults": { 92 | "DebIndices": "Packages Release . .gz .bz2", 93 | "DscIndices": "Sources Release .gz .bz2", 94 | "Contents": ".gz .bz2", 95 | "Origin": "RedHat", 96 | "Description": "", 97 | "Architectures": "amd64 armhf i386 source", 98 | "Suite": "stable", 99 | "Components": "main", 100 | }, 101 | "ceph": { 102 | "Description": "Ceph distributed file system", 103 | }, 104 | } 105 | -------------------------------------------------------------------------------- /chacra/controllers/util.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta, UTC 2 | import logging 3 | from pecan import conf 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def repository_is_automatic(project_name, repo_config=None): 9 | repo_config = repo_config or getattr(conf, 'repos', {}) 10 | logger.debug('checking if repository is automatic for project: %s', project_name) 11 | # every repo is automatic by default unless explicitly configured otherwise 12 | if repo_config.get(project_name, {}).get('automatic', True): 13 | logger.info('project: %s is configured for automatic repositories', project_name) 14 | return True 15 | logger.info('project: %s has automatic repository feature disabled', project_name) 16 | return False 17 | 18 | 19 | def last_seen(timestamp): 20 | now = datetime.now(UTC) 21 | difference = now - timestamp.replace(tzinfo=UTC) 22 | formatted = ReadableSeconds(difference.seconds) 23 | return "%s ago" % formatted 24 | 25 | 26 | class ReadableSeconds(object): 27 | 28 | def __init__(self, seconds): 29 | self.original_seconds = seconds 30 | 31 | @property 32 | def relative(self): 33 | """ 34 | Generate a relative datetime object based on current seconds 35 | """ 36 | return datetime(1, 1, 1) + timedelta(seconds=self.original_seconds) 37 | 38 | def __str__(self): 39 | return "{years}{months}{days}{hours}{minutes}{seconds}".format( 40 | years=self.years, 41 | months=self.months, 42 | days=self.days, 43 | hours=self.hours, 44 | minutes=self.minutes, 45 | seconds=self.seconds, 46 | ).rstrip(' ,') 47 | 48 | @property 49 | def years(self): 50 | # Subtract 1 here because the earliest datetime() is 1/1/1 51 | years = self.relative.year - 1 52 | year_str = 'years' if years > 1 else 'year' 53 | if years: 54 | return "%d %s, " % (years, year_str) 55 | return "" 56 | 57 | @property 58 | def months(self): 59 | # Subtract 1 here because the earliest datetime() is 1/1/1 60 | months = self.relative.month - 1 61 | month_str = 'months' if months > 1 else 'month' 62 | if months: 63 | return "%d %s, " % (months, month_str) 64 | return "" 65 | 66 | @property 67 | def days(self): 68 | # Subtract 1 here because the earliest datetime() is 1/1/1 69 | days = self.relative.day - 1 70 | day_str = 'days' if days > 1 else 'day' 71 | if days: 72 | return "%d %s, " % (days, day_str) 73 | return "" 74 | 75 | @property 76 | def hours(self): 77 | hours = self.relative.hour 78 | hour_str = 'hours' if hours > 1 else 'hour' 79 | if hours: 80 | return "%d %s, " % (self.relative.hour, hour_str) 81 | return "" 82 | 83 | @property 84 | def minutes(self): 85 | minutes = self.relative.minute 86 | minutes_str = 'minutes' if minutes > 1 else 'minute' 87 | if minutes: 88 | return "%d %s, " % (self.relative.minute, minutes_str) 89 | return "" 90 | 91 | @property 92 | def seconds(self): 93 | seconds = self.relative.second 94 | seconds_str = 'seconds' if seconds > 1 else 'second' 95 | if seconds: 96 | return "%d %s, " % (self.relative.second, seconds_str) 97 | return "" 98 | 99 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/postgresql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure database service is up 3 | service: 4 | name: postgresql 5 | state: started 6 | become: true 7 | 8 | - name: get postgres version number 9 | shell: 10 | cmd: dpkg -l | awk '{print $2;}' | grep -P 'postgresql-\d\d' | sed 's/.*-//' 11 | register: postgres_version 12 | 13 | - name: allow users to connect locally 14 | become: true 15 | lineinfile: 16 | dest: /etc/postgresql/{{ postgres_version.stdout }}/main/pg_hba.conf 17 | regexp: '^host\s+all\s+all\s+127.0.0.1/32' 18 | line: 'host all all 127.0.0.1/32 md5' 19 | backrefs: yes 20 | register: pg_hba_conf 21 | 22 | - service: 23 | name: postgresql 24 | state: restarted 25 | become: true 26 | when: pg_hba_conf.changed 27 | 28 | # FIXME: flag/option needed 29 | # this is a bit of chicken-or-egg problem. If the user for the app does 30 | # exist but we failed to set the password in both postgresql and the python 31 | # config file then we will fail to reset it next time around. This would 32 | # leave the app without the access to the database. 33 | - name: generate pseudo-random password for the database connection 34 | shell: python3 -c "import os, base64; print(base64.b64encode(os.urandom(30))[:${length}].decode())" 35 | register: db_password 36 | changed_when: false 37 | 38 | - name: Make {{ app_name }} user 39 | postgresql_user: 40 | name: "{{ app_name }}" 41 | password: "{{ db_password.stdout }}" 42 | role_attr_flags: SUPERUSER 43 | login_user: postgres 44 | become_user: postgres 45 | become: true 46 | 47 | - name: Make {{ app_name }} database 48 | postgresql_db: 49 | name: "{{ app_name }}" 50 | owner: "{{ app_name }}" 51 | state: present 52 | login_user: postgres 53 | become_user: postgres 54 | become: true 55 | 56 | - name: ensure database service is up 57 | service: 58 | name: postgresql 59 | state: started 60 | become: true 61 | 62 | - name: create the prod_db config file with the db password 63 | template: 64 | src: ../templates/prod_db.py.j2 65 | dest: "{{ app_home }}/src/{{ app_name }}/prod_db.py" 66 | notify: 67 | - restart app 68 | 69 | - name: write a .pgpass file so we can back up the db 70 | lineinfile: 71 | path: "/home/{{ ansible_ssh_user }}/.pgpass" 72 | owner: "{{ ansible_ssh_user }}" 73 | group: "{{ ansible_ssh_user }}" 74 | mode: 0600 75 | create: yes 76 | state: present 77 | line: '127.0.0.1:5432:{{ app_name }}:{{ app_name }}:{{ db_password.stdout }}' 78 | 79 | # this needs to be here because it needs the new db password 80 | - name: create the prod alembic.ini file 81 | template: 82 | src: ../templates/alembic-prod.ini.j2 83 | dest: "{{ app_home }}/src/{{ app_name }}/alembic-prod.ini" 84 | 85 | - name: check if database for app needs populating 86 | # this should be configurable/optional in the playbook 87 | command: psql -t chacra -c "SELECT COUNT(*) FROM projects;" 88 | become_user: postgres 89 | become: true 90 | register: database_is_populated 91 | ignore_errors: true 92 | changed_when: "database_is_populated.rc != 0" 93 | 94 | - name: populate the database for {{ app_name }} 95 | when: "database_is_populated.rc == 1" 96 | command: "{{ app_home }}/bin/pecan populate {{ app_home }}/src/{{ app_name }}/prod.py" 97 | environment: 98 | ALEMBIC_CONFIG: "{{ app_home }}/src/{{ app_name }}/alembic-prod.ini" 99 | -------------------------------------------------------------------------------- /chacra/metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | A wrapper around python-statsd to automatically configure outgoing metrics so 3 | that it can include, at the highest level:: 4 | 5 | secret.hostname.module 6 | 7 | Production Graphite instances might require a secret key which should be 8 | prefixed to outgoing metrics, if not configured, metrics would start off from 9 | the short hostname:: 10 | 11 | hostname.module 12 | 13 | Instantiating metrics work similarly to creating a logger instance:: 14 | 15 | from metrics import Counter 16 | 17 | counter = Counter(__name__) 18 | 19 | counter += 1 20 | 21 | It is allowed to alter the naming appending a suffix:: 22 | 23 | 24 | def my_oddly_named_function(): 25 | counter = Counter(__name__, suffix='expensive_function') 26 | while True: 27 | ... 28 | counter +=1 29 | 30 | This would append 'package.module' with `expensive_function`, but 31 | just the suffix (the last part of the scheme). For a production host, this 32 | counter could look like:: 33 | 34 | secret.chacra1.chacra.asynch.expensive_function 35 | 36 | 37 | Although not encouraged, it is possible to fully override with a custom name:: 38 | 39 | 40 | counter = Counter('custom.path') 41 | 42 | Which would cause to override the module path (secret key and hostname would 43 | prevail):: 44 | 45 | secret.chacra1.custom.path 46 | 47 | 48 | ..note:: All these assume a local statsd instance running, so there is no need 49 | to provide a connection object. If there is ever a need to customize the 50 | connection, it will need to be provided here. 51 | 52 | """ 53 | 54 | import socket 55 | import pecan 56 | import statsd 57 | 58 | 59 | def short_hostname(_socket=None): 60 | """ 61 | Returns the config option ``short_hostname`` if found. 62 | 63 | If ``short_hostname`` is not defined, it obtains the 64 | remote hostname of the socket and cuts off the domain part 65 | of its FQDN. 66 | """ 67 | short_hostname = getattr(pecan.conf, 'short_hostname', None) 68 | if not short_hostname: 69 | _socket = _socket or socket 70 | return _socket.gethostname().split('.', 1)[0] 71 | return short_hostname 72 | 73 | 74 | def get_prefix(conf=None, host=None): 75 | host = host or short_hostname() 76 | conf = conf or pecan.conf 77 | secret = getattr(conf, 'graphite_api_key', None) 78 | 79 | if secret: 80 | prefix = "%s.%s" % (secret, host) 81 | else: 82 | prefix = "%s" % host 83 | 84 | return prefix 85 | 86 | 87 | def append_suffix(name, suffix): 88 | """ 89 | Helper to append a suffix the end of the name with a custom one. Useful for 90 | private functions or signatures that require distinct separation from the 91 | module. 92 | """ 93 | name_parts = name.split('.') 94 | name_parts.append(suffix) 95 | return '.'.join(name_parts) 96 | 97 | 98 | def Counter(name, suffix=None): 99 | if suffix: 100 | name = append_suffix(name, suffix) 101 | return statsd.Counter("%s.%s" % (get_prefix(), name)) 102 | 103 | 104 | def Gauge(name, suffix=None): 105 | if suffix: 106 | name = append_suffix(name, suffix) 107 | return statsd.Gauge("%s.%s" % (get_prefix(), name)) 108 | 109 | 110 | def Timer(name, suffix=None): 111 | if suffix: 112 | name = append_suffix(name, suffix) 113 | return statsd.Timer("%s.%s" % (get_prefix(), name)) 114 | -------------------------------------------------------------------------------- /deploy/playbooks/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Build hosts file" 4 | become: true 5 | template: 6 | src: hosts.j2 7 | dest: "/etc/hosts" 8 | 9 | - name: "Set hostname to {{ short_hostname }}" 10 | become: true 11 | hostname: name="{{ short_hostname }}" 12 | when: short_hostname is defined 13 | 14 | - name: Prevent cloud-init from changing hostname after reboot 15 | become: true 16 | lineinfile: 17 | path: /etc/cloud/cloud.cfg 18 | regexp: '^preserve_hostname:' 19 | line: 'preserve_hostname: true' 20 | state: present 21 | create: yes 22 | 23 | # rabbitmq must be installed after the hostname is set 24 | - include_tasks: rabbitmq.yml 25 | 26 | - name: "ensure a home for {{ app_name }}" 27 | become: true 28 | file: path={{ app_home }} owner={{ ansible_ssh_user }} group={{ ansible_ssh_user }} state=directory recurse=yes 29 | 30 | - name: Update apt cache 31 | apt: 32 | update_cache: yes 33 | become: true 34 | 35 | - name: install ssl system requirements 36 | become: true 37 | apt: name={{ item }} state=present 38 | with_items: "{{ ssl_requirements }}" 39 | when: app_use_ssl 40 | tags: 41 | - packages 42 | 43 | - name: install system packages 44 | become: true 45 | apt: name={{ item }} state=present 46 | with_items: "{{ system_packages }}" 47 | tags: 48 | - packages 49 | 50 | - name: Create a virtualenv with latest pip. 51 | pip: name=pip virtualenv={{ app_home }} extra_args='--upgrade' virtualenv_python=python3 52 | 53 | - name: "pip+git install {{ app_name }} into virtualenv." 54 | pip: name='git+https://github.com/ceph/chacra@{{ branch }}#egg=chacra' virtualenv={{ app_home }} editable=yes state=forcereinstall 55 | changed_when: False 56 | 57 | - name: create the prod config file 58 | action: template src=../templates/prod.py.j2 dest={{ app_home }}/src/{{ app_name }}/prod.py 59 | 60 | - name: create the prod api credentials file 61 | template: 62 | src: prod_api_creds.py.j2 63 | dest: "{{ app_home }}/src/{{ app_name }}/prod_api_creds.py" 64 | when: api_key is defined and api_user is defined 65 | 66 | - name: stop chacra-celery 67 | service: 68 | name: chacra-celery 69 | state: stopped 70 | become: true 71 | failed_when: false 72 | 73 | - name: stop chacra-celerybeat 74 | service: 75 | name: chacra-celerybeat 76 | state: stopped 77 | become: true 78 | failed_when: false 79 | 80 | - name: create the callbacks configuration file 81 | template: 82 | src: prod_callbacks.py.j2 83 | dest: "{{ app_home }}/src/{{ app_name }}/prod_callbacks.py" 84 | when: callback_user is defined and callback_key is defined 85 | 86 | - name: install python requirements in virtualenv 87 | pip: 88 | requirements: "{{ app_home }}/src/{{ app_name }}/requirements.txt" 89 | state: present 90 | virtualenv: "{{ app_home }}" 91 | notify: 92 | - restart app 93 | - restart chacra-celery 94 | - restart chacra-celerybeat 95 | 96 | - name: ensure file paths are set properly 97 | become: true 98 | file: 99 | path: "{{ item }}" 100 | state: directory 101 | owner: "{{ ansible_ssh_user }}" 102 | group: "{{ ansible_ssh_user }}" 103 | recurse: yes 104 | with_items: 105 | - "{{ binary_root }}" 106 | - "{{ repos_root }}" 107 | 108 | - include_tasks: postgresql.yml 109 | tags: 110 | - postgres 111 | 112 | - include_tasks: systemd.yml 113 | tags: 114 | - systemd 115 | 116 | - include_tasks: nginx.yml 117 | tags: 118 | - nginx 119 | -------------------------------------------------------------------------------- /chacra/models/__init__.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from sqlalchemy import create_engine, MetaData, event 3 | from sqlalchemy.orm import scoped_session, sessionmaker, object_session, mapper 4 | from sqlalchemy.ext.declarative import declarative_base 5 | from pecan import conf 6 | 7 | 8 | class _EntityBase(object): 9 | """ 10 | A custom declarative base that provides some Elixir-inspired shortcuts. 11 | """ 12 | 13 | @classmethod 14 | def filter_by(cls, *args, **kwargs): 15 | return cls.query.filter_by(*args, **kwargs) 16 | 17 | @classmethod 18 | def get(cls, *args, **kwargs): 19 | return cls.query.get(*args, **kwargs) 20 | 21 | def flush(self, *args, **kwargs): 22 | object_session(self).flush([self], *args, **kwargs) 23 | 24 | def delete(self, *args, **kwargs): 25 | object_session(self).delete(self, *args, **kwargs) 26 | 27 | def as_dict(self): 28 | return dict((k, v) for k, v in self.__dict__.items() 29 | if not k.startswith('_')) 30 | 31 | def update_from_json(self, data): 32 | """ 33 | We received a JSON blob with updated metadata information 34 | that needs to update some fields 35 | """ 36 | for key in data.keys(): 37 | setattr(self, key, data[key]) 38 | 39 | 40 | Session = scoped_session(sessionmaker()) 41 | metadata = MetaData() 42 | Base = declarative_base(cls=_EntityBase) 43 | Base.query = Session.query_property() 44 | 45 | 46 | # Listeners: 47 | 48 | @event.listens_for(mapper, 'init') 49 | def auto_add(target, args, kwargs): 50 | Session.add(target) 51 | 52 | 53 | def update_timestamp(mapper, connection, target): 54 | """ 55 | Automate the 'modified' attribute when a model changes 56 | """ 57 | target.modified = datetime.datetime.now(datetime.UTC) 58 | 59 | 60 | # Utilities: 61 | 62 | def get_or_create(model, **kwargs): 63 | instance = model.filter_by(**kwargs).first() 64 | if instance: 65 | return instance 66 | else: 67 | instance = model(**kwargs) 68 | commit() 69 | return instance 70 | 71 | 72 | def init_model(): 73 | """ 74 | This is a stub method which is called at application startup time. 75 | 76 | If you need to bind to a parse database configuration, set up tables or 77 | ORM classes, or perform any database initialization, this is the 78 | recommended place to do it. 79 | 80 | For more information working with databases, and some common recipes, 81 | see http://pecan.readthedocs.org/en/latest/databases.html 82 | 83 | For creating all metadata you would use:: 84 | 85 | Base.metadata.create_all(conf.sqlalchemy.engine) 86 | 87 | """ 88 | conf.sqlalchemy.engine = _engine_from_config(conf.sqlalchemy) 89 | Session.configure(bind=conf.sqlalchemy.engine) 90 | 91 | 92 | def _engine_from_config(configuration): 93 | configuration = dict(configuration) 94 | url = configuration.pop('url') 95 | return create_engine(url, **configuration) 96 | 97 | 98 | def start(): 99 | Session() 100 | metadata.bind = conf.sqlalchemy.engine 101 | 102 | 103 | def start_read_only(): 104 | start() 105 | 106 | 107 | def commit(): 108 | Session.commit() 109 | 110 | 111 | def rollback(): 112 | Session.rollback() 113 | 114 | 115 | def clear(): 116 | Session.remove() 117 | Session.close() 118 | 119 | 120 | def flush(): 121 | Session.flush() 122 | 123 | 124 | from .projects import Project # noqa 125 | from .binaries import Binary # noqa 126 | from .repos import Repo # noqa 127 | -------------------------------------------------------------------------------- /chacra/tests/controllers/repos/test_distros.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Repo 2 | 3 | 4 | class TestDistroController(object): 5 | 6 | def test_single_distro(self, session): 7 | p = Project('foobar') 8 | repo = Repo( 9 | p, 10 | "firefly", 11 | "ubuntu", 12 | "trusty", 13 | sha1="head", 14 | ) 15 | repo.path = "some_path" 16 | session.commit() 17 | result = session.app.get('/repos/foobar/firefly/head/ubuntu/') 18 | assert result.status_int == 200 19 | assert len(result.json) == 1 20 | assert result.json == ["trusty"] 21 | 22 | def test_does_not_show_duplicate_distro_versions(self, session): 23 | p = Project('foobar') 24 | repo = Repo( 25 | p, 26 | "firefly", 27 | "ubuntu", 28 | "trusty", 29 | sha1="head", 30 | ) 31 | repo.path = "some_path" 32 | repo2 = Repo( 33 | p, 34 | "firefly", 35 | "ubuntu", 36 | "trusty", 37 | sha1="head", 38 | ) 39 | repo2.path = "some_path" 40 | session.commit() 41 | result = session.app.get('/repos/foobar/firefly/head/ubuntu/') 42 | assert result.status_int == 200 43 | assert len(result.json) == 1 44 | assert result.json == ["trusty"] 45 | 46 | def test_shows_only_versions_for_ref(self, session): 47 | p = Project('foobar') 48 | repo = Repo( 49 | p, 50 | "firefly", 51 | "ubuntu", 52 | "trusty", 53 | sha1="head", 54 | ) 55 | repo.path = "some_path" 56 | repo2 = Repo( 57 | p, 58 | "hammer", 59 | "ubuntu", 60 | "precise", 61 | sha1="head", 62 | ) 63 | repo2.path = "some_path" 64 | session.commit() 65 | result = session.app.get('/repos/foobar/firefly/head/ubuntu/') 66 | assert result.status_int == 200 67 | assert len(result.json) == 1 68 | assert result.json == ["trusty"] 69 | 70 | def test_distro_does_not_exist(self, session): 71 | p = Project('foobar') 72 | repo = Repo( 73 | p, 74 | "firefly", 75 | "ubuntu", 76 | "trusty", 77 | sha1="head", 78 | ) 79 | repo.path = "some_path" 80 | session.commit() 81 | result = session.app.get('/repos/foobar/firefly/head/centos/', 82 | expect_errors=True) 83 | assert result.status_int == 404 84 | 85 | def test_shows_distro_that_has_no_built_repos(self, session): 86 | p = Project('foobar') 87 | Repo( 88 | p, 89 | "firefly", 90 | "ubuntu", 91 | "trusty", 92 | sha1="head", 93 | ) 94 | session.commit() 95 | result = session.app.get('/repos/foobar/firefly/head/ubuntu/') 96 | assert result.json == ["trusty"] 97 | 98 | def test_ref_does_not_exist(self, session): 99 | p = Project('foobar') 100 | repo = Repo( 101 | p, 102 | "firefly", 103 | "ubuntu", 104 | "trusty", 105 | sha1="head", 106 | ) 107 | repo.path = "some_path" 108 | session.commit() 109 | result = session.app.get('/repos/foobar/hammer/head/ubuntu/', 110 | expect_errors=True) 111 | assert result.status_int == 404 112 | -------------------------------------------------------------------------------- /chacra/tests/models/test_repos.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Repo, Binary 2 | 3 | 4 | class TestRepoModification(object): 5 | 6 | def setup_method(self): 7 | self.p = Project('ceph') 8 | 9 | def test_created_slaps_a_modified_attr(self, session): 10 | repo = Repo( 11 | self.p, 12 | ref='firefly', 13 | distro='centos', 14 | distro_version='7', 15 | ) 16 | assert repo.modified.timetuple() 17 | 18 | def test_update_triggers_a_change_in_modified(self, session): 19 | initial_repo = Repo( 20 | self.p, 21 | ref='firefly', 22 | distro='centos', 23 | distro_version='7', 24 | ) 25 | initial_timestamp = initial_repo.modified.time() 26 | session.commit() 27 | repo = Repo.get(1) 28 | repo.distro = 'rhel' 29 | session.commit() 30 | 31 | assert initial_timestamp < repo.modified.time() 32 | 33 | def test_created_no_binaries_is_not_generic(self, session): 34 | repo = Repo( 35 | self.p, 36 | ref='firefly', 37 | distro='centos', 38 | distro_version='7', 39 | ) 40 | assert repo.is_generic is False 41 | 42 | 43 | class TestInferType(object): 44 | 45 | def setup_method(self): 46 | self.p = Project('ceph') 47 | 48 | def test_rpm_is_inferred(self, session): 49 | Binary( 50 | 'ceph-1.0.rpm', 51 | self.p, 52 | distro='centos', 53 | distro_version='7', 54 | arch='x86_64', 55 | ) 56 | session.commit() 57 | repo = Repo.get(1) 58 | assert repo.infer_type() == 'rpm' 59 | 60 | def test_deb_is_inferred(self, session): 61 | Binary( 62 | 'ceph-1.0.deb', 63 | self.p, 64 | distro='ubuntu', 65 | distro_version='trusty', 66 | arch='x86_64', 67 | ) 68 | session.commit() 69 | repo = Repo.get(1) 70 | assert repo.infer_type() == 'deb' 71 | 72 | 73 | class TestRepoArch(object): 74 | 75 | def setup_method(self): 76 | self.p = Project('ceph') 77 | 78 | def test_x86_64(self, session): 79 | Binary( 80 | 'ceph-1.0.rpm', 81 | self.p, 82 | distro='centos', 83 | distro_version='7', 84 | arch='x86_64', 85 | ) 86 | session.commit() 87 | repo = Repo.get(1) 88 | assert repo.archs == ['x86_64'] 89 | 90 | def test_multiple_archs(self, session): 91 | Binary( 92 | 'ceph-1.0.deb', 93 | self.p, 94 | distro='ubuntu', 95 | distro_version='trusty', 96 | arch='aarch64', 97 | ) 98 | Binary( 99 | 'ceph-1.0.deb', 100 | self.p, 101 | distro='ubuntu', 102 | distro_version='trusty', 103 | arch='x86_64', 104 | ) 105 | session.commit() 106 | repo = Repo.get(1) 107 | assert sorted(repo.archs) == sorted(['aarch64', 'x86_64']) 108 | 109 | 110 | class TestRepoMetricNames(object): 111 | 112 | def setup_method(self): 113 | self.p = Project('ceph') 114 | 115 | def test_full_metric_name(self, session): 116 | Repo( 117 | self.p, 118 | ref="main", 119 | distro='ubuntu', 120 | distro_version='trusty', 121 | arch='aarch64', 122 | ) 123 | session.commit() 124 | result = Repo.get(1).metric_name 125 | assert result == "repos.ceph.ubuntu.trusty" 126 | -------------------------------------------------------------------------------- /chacra/asynch/rpm.py: -------------------------------------------------------------------------------- 1 | import os 2 | from celery import shared_task 3 | from chacra import models 4 | from chacra.asynch import base, post_ready, post_building 5 | from chacra import util 6 | from chacra.metrics import Counter, Timer 7 | import logging 8 | import subprocess 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | @shared_task(base=base.SQLATask) 14 | def create_rpm_repo(repo_id): 15 | """ 16 | Go create or update repositories with specific IDs. 17 | """ 18 | directories = ['SRPMS', 'noarch', 'x86_64', 'aarch64'] 19 | # get the root path for storing repos 20 | # TODO: Is it possible we can get an ID that doesn't exist anymore? 21 | repo = models.Repo.get(repo_id) 22 | post_building(repo) 23 | timer = Timer(__name__, suffix="create.rpm.%s" % repo.metric_name) 24 | counter = Counter(__name__, suffix="create.rpm.%s" % repo.metric_name) 25 | timer.start() 26 | logger.info("processing repository: %s", repo) 27 | if util.repository_is_disabled(repo.project.name): 28 | logger.info("will not process repository: %s", repo) 29 | repo.needs_update = False 30 | repo.is_queued = False 31 | return 32 | 33 | # Determine paths for this repository 34 | paths = util.repo_paths(repo) 35 | repo_dirs = [os.path.join(paths['absolute'], d) for d in directories] 36 | 37 | # Before doing work that might take very long to complete, set the repo 38 | # path in the object and mark needs_update as False 39 | repo.path = paths['absolute'] 40 | repo.is_updating = True 41 | repo.is_queued = False 42 | repo.needs_update = False 43 | models.commit() 44 | 45 | # this is safe to do, behind the scenes it is just trying to create them if 46 | # they don't exist and it will include the 'absolute' path 47 | for d in repo_dirs: 48 | util.makedirs(d) 49 | 50 | # now that structure is done, we need to symlink the RPMs that belong 51 | # to this repo so that we can create the metadata. 52 | conf_extra_repos = util.get_extra_repos(repo.project.name, repo.ref) 53 | extra_binaries = [] 54 | for project_name, project_refs in conf_extra_repos.items(): 55 | for ref in project_refs: 56 | extra_binaries += util.get_extra_binaries( 57 | project_name, 58 | repo.distro, 59 | repo.distro_version, 60 | ref=ref if ref != 'all' else None 61 | ) 62 | 63 | all_binaries = extra_binaries + [b for b in repo.binaries] 64 | timer.intermediate('collection') 65 | for binary in all_binaries: 66 | source = binary.path 67 | arch_directory = util.infer_arch_directory(binary.name) 68 | destination_dir = os.path.join(paths['absolute'], arch_directory) 69 | destination = os.path.join(destination_dir, binary.name) 70 | try: 71 | if not os.path.exists(destination): 72 | os.symlink(source, destination) 73 | except OSError: 74 | logger.exception('could not symlink') 75 | 76 | _createrepo(paths['absolute'], repo_dirs, repo.distro) 77 | 78 | logger.info("finished processing repository: %s", repo) 79 | repo.is_updating = False 80 | models.commit() 81 | timer.stop() 82 | counter += 1 83 | post_ready(repo) 84 | 85 | 86 | def _createrepo(base_path, repo_dirs, distro): 87 | if distro.lower() in ['opensuse', 'sle']: 88 | # openSUSE/sles zypper expects the repodata on the top level dir 89 | subprocess.check_call(['createrepo', '--no-database', base_path]) 90 | else: 91 | for d in repo_dirs: 92 | # this prevents RPM packages that are larger than 2GB (!!!) from 93 | # causing the database to fail to store the size and subsequently make 94 | # the package uninstallable. Ideally, this types of flag options should 95 | # be configurable 96 | subprocess.check_call(['createrepo', '--no-database', d]) 97 | -------------------------------------------------------------------------------- /chacra/asynch/checks.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import subprocess 4 | 5 | from errno import errorcode 6 | from pecan import conf 7 | from chacra import models 8 | from sqlalchemy.exc import OperationalError 9 | from chacra.asynch import app 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class SystemCheckError(Exception): 16 | 17 | def __init__(self, message): 18 | self.message = message 19 | 20 | def __str__(self): 21 | return self.message 22 | 23 | 24 | def celery_has_workers(): 25 | """ 26 | The ``stats()`` call will return different stats/metadata information about 27 | celery worker(s). An empty/None result will mean that there aren't any 28 | celery workers in use. 29 | """ 30 | stats = app.control.inspect().stats() 31 | if not stats: 32 | raise SystemCheckError('No running Celery worker was found') 33 | 34 | 35 | def rabbitmq_is_running(): 36 | """ 37 | If checking for worker stats, an ``IOError`` may be raised depending on the 38 | problem for the RabbitMQ connection. 39 | """ 40 | try: 41 | celery_has_workers() 42 | except IOError as e: 43 | msg = "Error connecting to RabbitMQ: " + str(e) 44 | if len(e.args): 45 | if errorcode.get(e.args[0]) == 'ECONNREFUSED': 46 | msg = "RabbitMQ is not running or not reachable" 47 | raise SystemCheckError(msg) 48 | 49 | 50 | def database_connection(): 51 | """ 52 | A very simple connection that should succeed if there is a good/correct 53 | database connection. 54 | """ 55 | try: 56 | models.Project.get(1) 57 | except OperationalError as exc: 58 | raise SystemCheckError( 59 | "Could not connect or retrieve information from the database: %s" % exc.message) 60 | 61 | 62 | def disk_has_space(_popen=None): 63 | """ 64 | If the disk where repos/binaries live doesn't have enough space, 65 | fail the health check to prevent failing when the binaries are posted 66 | """ 67 | popen = _popen or subprocess.Popen 68 | paths = [conf.get('repos_root'), conf.get('binary_root')] 69 | for path in paths: 70 | if not path: 71 | continue 72 | command = ['df', path] 73 | result = popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) 74 | result.wait(timeout=30) 75 | if result.returncode > 0: 76 | raise SystemCheckError( 77 | "failed disk check for %s: %s" % 78 | (path, result.stderr.read().decode()) 79 | ) 80 | out = result.communicate()[0].decode() 81 | device, size, used, available, percent, mountpoint = \ 82 | out.split('\n')[1].split() 83 | if int(percent.strip().split('%')[0]) > 85: 84 | msg = 'disk %s almost full. Used: %s%%' % (device, percent) 85 | raise SystemCheckError(msg) 86 | 87 | 88 | def fail_health_check(): 89 | """ 90 | Checks for the existance of a file and if that file exists it fails 91 | the check. This is used to manually take a node out of rotation for 92 | maintenance. 93 | """ 94 | check_file_path = getattr(conf, "fail_check_trigger_path", "/tmp/fail_check") 95 | if os.path.exists(check_file_path): 96 | raise SystemCheckError("%s was found, failing health check" % check_file_path) 97 | 98 | 99 | system_checks = ( 100 | rabbitmq_is_running, 101 | celery_has_workers, 102 | database_connection, 103 | fail_health_check, 104 | disk_has_space, 105 | ) 106 | 107 | 108 | def is_healthy(): 109 | """ 110 | Perform all the registered system checks and detect if anything fails so 111 | that the system can send a callback indicating an OK status 112 | """ 113 | for check in system_checks: 114 | try: 115 | check() 116 | except Exception: 117 | logger.exception('system is unhealthy') 118 | return False 119 | return True 120 | -------------------------------------------------------------------------------- /chacra/tests/async/test_callbacks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from pecan import conf 4 | import requests 5 | from chacra import asynch 6 | from chacra.asynch import recurring 7 | from chacra.models import Repo, Project 8 | from chacra.tests import conftest 9 | 10 | 11 | repo_keys = [ 12 | '"needs_update"', '"sha1"', '"is_queued"', '"is_updating"', '"type"', 13 | '"modified"', '"signed"', '"status"', '"project_name"', '"distro_version"', 14 | '"path"', '"flavor"', '"ref"', '"distro"', '"chacra_url"', '"url"'] 15 | 16 | 17 | class TestHelpers(object): 18 | 19 | def setup_method(self): 20 | self.p = Project('ceph') 21 | self.repo = Repo( 22 | self.p, 23 | ref='firefly', 24 | distro='centos', 25 | distro_version='7', 26 | ) 27 | 28 | def teardown_method(self): 29 | # callback settings added in test_post_request are "sticky", this 30 | # ensures they are reset for other tests that rely on pristine conf 31 | # settings 32 | conftest.reload_config() 33 | 34 | @pytest.mark.parametrize('key', repo_keys) 35 | def test_post_request(self, session, recorder, key): 36 | conf.callback_url = 'http://localhost/callback' 37 | f_async = recorder() 38 | asynch.post_status('building', self.repo, _callback=f_async) 39 | result = f_async.recorder_calls[0]['kwargs']['args'][0] 40 | assert key in result 41 | assert '"building"' in result 42 | 43 | def test_correct_url_for_repos(self, session, recorder): 44 | conf.callback_url = 'http://localhost/callback' 45 | f_async = recorder() 46 | asynch.post_status('building', self.repo, _callback=f_async) 47 | result = f_async.recorder_calls[0]['kwargs']['args'][0] 48 | assert '/r/ceph/firefly/head/centos/7/' in result 49 | 50 | def test_correct_url_for_api(self, session, recorder): 51 | conf.callback_url = 'http://localhost/callback' 52 | f_async = recorder() 53 | asynch.post_status('building', self.repo, _callback=f_async) 54 | result = f_async.recorder_calls[0]['kwargs']['args'][0] 55 | assert '/repos/ceph/firefly/head/centos/7/' in result 56 | 57 | 58 | class TestCallbackInvalidConf(object): 59 | 60 | def setup_method(self): 61 | conf.callback_url = 'http://localhost/callback' 62 | 63 | def teardown_method(self): 64 | # callback settings added in setup are "sticky", this ensures they are 65 | # reset for other tests that rely on pristine conf settings 66 | conftest.reload_config() 67 | 68 | def test_missing_user_and_key(self): 69 | assert recurring.callback("{}", 'ceph') is False 70 | 71 | def test_missing_user(self): 72 | conf.callback_key = 'key' 73 | assert recurring.callback("{}", 'ceph') is False 74 | 75 | def test_missing_key(self): 76 | conf.callback_user = 'admin' 77 | assert recurring.callback("{}", 'ceph') is False 78 | 79 | 80 | class TestCallback(object): 81 | 82 | def setup_method(self): 83 | conf.callback_url = 'http://localhost/callback' 84 | conf.callback_user = 'admin' 85 | conf.callback_key = 'key' 86 | 87 | def teardown_method(self): 88 | # callback settings added in setup are "sticky", this ensures they are 89 | # reset for other tests that rely on pristine conf settings 90 | conftest.reload_config() 91 | 92 | def test_invalid_json(self): 93 | # omg this is so invalid 94 | assert recurring.callback({'error': Exception}, 'ceph') is False 95 | 96 | def test_requests_correct_project_url(self, monkeypatch, recorder): 97 | r = recorder() 98 | monkeypatch.setattr(recurring.requests, 'post', r) 99 | recurring.callback("{}", 'ceph') 100 | result = r.recorder_calls[0]['args'][0] 101 | assert result == os.path.join(conf.callback_url, 'ceph', '') 102 | 103 | def test_requests_http_error(self, monkeypatch, fake): 104 | # not confident this is testing what really happens. Could not get the 105 | # 'retry' behavior from Celery. This just executes the code path to get 106 | # to the exception being re-raised by Celery, which might be enough 107 | def bad_post(*a, **kw): 108 | raise requests.HTTPError('I suck') 109 | monkeypatch.setattr(recurring.requests, 'post', bad_post) 110 | with pytest.raises(requests.HTTPError): 111 | recurring.callback("{}", 'ceph') 112 | -------------------------------------------------------------------------------- /chacra/tests/controllers/repos/test_projects.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Repo 2 | 3 | 4 | class TestProjectsController(object): 5 | 6 | def test_get_index_no_projects(self, session): 7 | result = session.app.get('/repos/') 8 | assert result.status_int == 200 9 | assert result.json == {} 10 | 11 | def test_project_no_built_repos(self, session): 12 | Project('foobar') 13 | session.commit() 14 | result = session.app.get('/repos/') 15 | assert result.status_int == 200 16 | assert result.json == {"foobar": []} 17 | 18 | def test_single_project_with_built_repos(self, session): 19 | p = Project('foobar') 20 | repo = Repo( 21 | p, 22 | "firefly", 23 | "ubuntu", 24 | "trusty", 25 | ) 26 | repo.path = "some_path" 27 | session.commit() 28 | result = session.app.get('/repos/') 29 | assert result.status_int == 200 30 | assert len(result.json) == 1 31 | assert result.json == {"foobar": ["firefly"]} 32 | 33 | def test_does_show_refs_without_built_repos(self, session): 34 | p = Project('foobar') 35 | repo = Repo( 36 | p, 37 | "firefly", 38 | "ubuntu", 39 | "trusty", 40 | ) 41 | Repo( 42 | p, 43 | "hammer", 44 | "ubuntu", 45 | "trusty", 46 | ) 47 | repo.path = "some_path" 48 | session.commit() 49 | result = session.app.get('/repos/') 50 | assert result.status_int == 200 51 | assert len(result.json) == 1 52 | assert sorted(result.json["foobar"]) == sorted(["firefly", "hammer"]) 53 | 54 | def test_does_list_projects_without_built_repos(self, session): 55 | p = Project('foobar') 56 | Repo( 57 | p, 58 | "firefly", 59 | "ubuntu", 60 | "trusty", 61 | ) 62 | Project('baz') 63 | session.commit() 64 | result = session.app.get('/repos/') 65 | assert result.status_int == 200 66 | assert len(result.json) == 2 67 | 68 | 69 | class TestProjectController(object): 70 | 71 | def test_get_single_project(self, session): 72 | p = Project('foobar') 73 | repo = Repo( 74 | p, 75 | "firefly", 76 | "ubuntu", 77 | "trusty", 78 | sha1="HEAD", 79 | ) 80 | repo.path = "some_path" 81 | session.commit() 82 | result = session.app.get('/repos/foobar/') 83 | assert result.status_int == 200 84 | assert len(result.json) == 1 85 | assert result.json == {"firefly": ["HEAD"]} 86 | 87 | def test_project_does_not_exist(self, session): 88 | result = session.app.get('/repos/foo/', expect_errors=True) 89 | assert result.status_int == 404 90 | 91 | def test_project_has_no_built_repos(self, session): 92 | Project('foobar') 93 | result = session.app.get('/repos/foobar/') 94 | assert result.status_int == 200 95 | 96 | def test_does_show_refs_without_built_repos(self, session): 97 | p = Project('foobar') 98 | repo = Repo( 99 | p, 100 | "firefly", 101 | "ubuntu", 102 | "trusty", 103 | sha1="HEAD", 104 | ) 105 | Repo( 106 | p, 107 | "hammer", 108 | "ubuntu", 109 | "trusty", 110 | sha1="HEAD", 111 | ) 112 | repo.path = "some_path" 113 | session.commit() 114 | result = session.app.get('/repos/foobar/') 115 | assert result.status_int == 200 116 | assert len(result.json) == 2 117 | 118 | def test_show_multiple_refs_with_built_repos(self, session): 119 | p = Project('foobar') 120 | repo = Repo( 121 | p, 122 | "firefly", 123 | "ubuntu", 124 | "trusty", 125 | sha1="HEAD", 126 | ) 127 | repo2 = Repo( 128 | p, 129 | "hammer", 130 | "ubuntu", 131 | "trusty", 132 | sha1="HEAD", 133 | ) 134 | repo.path = "some_path" 135 | repo2.path = "some_path" 136 | session.commit() 137 | result = session.app.get('/repos/foobar/') 138 | assert result.status_int == 200 139 | assert len(result.json) == 2 140 | assert result.json == {"firefly": ["HEAD"], "hammer": ["HEAD"]} 141 | -------------------------------------------------------------------------------- /chacra/models/repos.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | import socket 4 | from pecan import conf 5 | from sqlalchemy import Column, Integer, String, ForeignKey, Boolean, DateTime 6 | from sqlalchemy.orm import relationship, backref, deferred 7 | from sqlalchemy.event import listen, remove 8 | from sqlalchemy.orm.exc import DetachedInstanceError 9 | from chacra.models import Base, update_timestamp 10 | from chacra.models.types import JSONType 11 | 12 | 13 | class Repo(Base): 14 | 15 | __tablename__ = 'repos' 16 | id = Column(Integer, primary_key=True) 17 | path = Column(String(256)) 18 | ref = Column(String(256), index=True) 19 | sha1 = Column(String(256), index=True, default='head') 20 | distro = Column(String(256), nullable=False, index=True) 21 | distro_version = Column(String(256), nullable=False, index=True) 22 | flavor = Column(String(256), nullable=False, index=True, default='default') 23 | modified = Column(DateTime, index=True) 24 | signed = Column(Boolean(), default=False) 25 | needs_update = Column(Boolean(), default=True) 26 | is_updating = Column(Boolean(), default=False) 27 | is_queued = Column(Boolean(), default=False) 28 | type = Column(String(12)) 29 | size = Column(Integer, default=0) 30 | extra = deferred(Column(JSONType(), default={})) 31 | 32 | project_id = Column(Integer, ForeignKey('projects.id')) 33 | project = relationship('Project', backref=backref('repos', lazy='dynamic')) 34 | 35 | def __init__(self, project, ref, distro, distro_version, **kwargs): 36 | self.project = project 37 | self.ref = ref 38 | self.distro = distro 39 | self.distro_version = distro_version 40 | self.modified = datetime.datetime.now(datetime.UTC) 41 | self.sha1 = kwargs.get('sha1', 'head') 42 | self.flavor = kwargs.get('flavor', 'default') 43 | 44 | def __repr__(self): 45 | try: 46 | return "".format( 47 | name=self.project.name, 48 | ref=self.ref, 49 | sha1=self.sha1, 50 | distro=self.distro, 51 | version=self.distro_version, 52 | ) 53 | except DetachedInstanceError: 54 | return '' 55 | 56 | def __json__(self): 57 | return dict( 58 | path=self.path, 59 | project_name=self.project.name, 60 | ref=self.ref, 61 | sha1=self.sha1, 62 | distro=self.distro, 63 | distro_version=self.distro_version, 64 | modified=self.modified, 65 | signed=self.signed, 66 | needs_update=self.needs_update, 67 | is_updating=self.is_updating, 68 | is_queued=self.is_queued, 69 | type=self.type, 70 | size=self.size, 71 | flavor=self.flavor, 72 | archs=self.archs, 73 | extra=self.extra, 74 | ) 75 | 76 | @property 77 | def uri(self): 78 | return "{name}/{ref}/{sha1}/{distro}/{version}/flavors/{flavor}/".format( 79 | name=self.project.name, 80 | ref=self.ref, 81 | sha1=self.sha1, 82 | distro=self.distro, 83 | version=self.distro_version, 84 | flavor=self.flavor, 85 | ) 86 | 87 | @property 88 | def base_url(self): 89 | hostname = getattr(conf, 'hostname', socket.gethostname()) 90 | host_url = 'https://%s/' % hostname 91 | return os.path.join(host_url, 'r', self.uri, '') 92 | 93 | @property 94 | def is_generic(self): 95 | for binary in self.binaries: 96 | if binary.is_generic: 97 | return True 98 | return False 99 | 100 | @property 101 | def metric_name(self): 102 | return "repos.%s.%s.%s" % ( 103 | self.project.name, 104 | self.distro, 105 | self.distro_version, 106 | ) 107 | 108 | def infer_type(self): 109 | """ 110 | Sometimes a repo may not know what 'type' it is (a deb or rpm) 111 | so this goes looking at its binaries for an answer 112 | """ 113 | for binary in self.binaries: 114 | return binary._get_repo_type() 115 | 116 | @property 117 | def archs(self): 118 | return list(set(b.arch for b in self.binaries)) 119 | 120 | def add_timestamp_listeners(): 121 | # listen for timestamp modifications 122 | listen(Repo, 'before_insert', update_timestamp) 123 | listen(Repo, 'before_update', update_timestamp) 124 | 125 | def remove_timestamp_listeners(): 126 | remove(Repo, 'before_insert', update_timestamp) 127 | remove(Repo, 'before_update', update_timestamp) 128 | 129 | add_timestamp_listeners() 130 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_distros.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Binary 2 | 3 | 4 | class TestDistroVersionController(object): 5 | 6 | def test_distro_should_list_unique_versions(self, session): 7 | p = Project('ceph') 8 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 9 | Binary('ceph-1.0.1.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 10 | session.commit() 11 | result = session.app.get('/binaries/ceph/main/head/ubuntu/trusty/') 12 | assert set(result.json["i386"]) == set([u'ceph-1.0.0.deb', u'ceph-1.0.1.deb']) 13 | 14 | 15 | class TestDistroController(object): 16 | 17 | def test_list_a_distro_version(self, session): 18 | p = Project('ceph') 19 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 20 | session.commit() 21 | result = session.app.get('/binaries/ceph/main/head/ubuntu/') 22 | assert result.json == {'trusty': ['i386']} 23 | 24 | def test_list_a_distinct_distro_version(self, session): 25 | p = Project('ceph') 26 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 27 | Binary('ceph-1.0.0.deb', p, ref='firefly', sha1="head", distro='debian', distro_version='wheezy', arch='i386') 28 | session.commit() 29 | result = session.app.get( 30 | '/binaries/ceph/main/head/debian/', 31 | expect_errors=True) 32 | assert result.status_int == 404 33 | 34 | def test_list_unkown_ref_for_distro(self, session): 35 | p = Project('ceph') 36 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 37 | session.commit() 38 | result = session.app.get('/binaries/ceph/head/BOGUS/ubuntu/', expect_errors=True) 39 | assert result.status_int == 404 40 | 41 | def test_distro_should_list_unique_versions(self, session): 42 | p = Project('ceph') 43 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 44 | Binary('ceph-1.0.1.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 45 | session.commit() 46 | result = session.app.get('/binaries/ceph/main/head/ubuntu/') 47 | assert result.json == {'trusty': ['i386']} 48 | 49 | def test_list_a_distro_version_not_found(self, session): 50 | p = Project('ceph') 51 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="head", distro='centos', distro_version='el6', arch='i386') 52 | session.commit() 53 | result = session.app.get('/binaries/ceph/main/head/ubuntu/', expect_errors=True) 54 | assert result.status_int == 404 55 | 56 | def test_list_a_distinct_distro(self, session): 57 | p = Project('ceph') 58 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='trusty', arch='i386') 59 | Binary('ceph-1.0.0.rpm', p, ref='main', sha1="head", distro='centos', distro_version='el6', arch='i386') 60 | session.commit() 61 | result = session.app.get('/binaries/ceph/main/head/ubuntu/') 62 | assert result.json == {'trusty': ['i386']} 63 | 64 | def test_single_distro_should_have_one_item(self, session): 65 | p = Project('ceph') 66 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='12.04', arch='i386') 67 | session.commit() 68 | result = session.app.get('/binaries/ceph/main/head/ubuntu/') 69 | assert result.status_int == 200 70 | assert len(result.json) == 1 71 | 72 | def test_refs_should_not_pollute_others(self, session): 73 | p = Project('ceph') 74 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='precise', arch='i386') 75 | Binary('ceph-1.0.0.deb', p, ref='jewel', sha1="head", distro='ubuntu', distro_version='xenial', arch='arm64') 76 | session.commit() 77 | result = session.app.head('/binaries/ceph/main/head/ubuntu/xenial/arm64/', expect_errors=True) 78 | assert result.status_int == 404 79 | 80 | def test_distros_should_not_pollute_others(self, session): 81 | p = Project('ceph') 82 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='precise', arch='i386') 83 | Binary('ceph-1.0.0.deb', p, ref='jewel', sha1="head", distro='debian', distro_version='xenial', arch='arm64') 84 | session.commit() 85 | result = session.app.head('/binaries/ceph/main/head/debian/', expect_errors=True) 86 | assert result.status_int == 404 87 | 88 | def test_single_distro_should_have_a_name(self, session): 89 | p = Project('ceph') 90 | Binary('ceph-1.0.0.deb', p, ref='main', sha1="head", distro='ubuntu', distro_version='12.04', arch='i386') 91 | session.commit() 92 | result = session.app.get('/binaries/ceph/main/head/ubuntu/') 93 | assert result.json['12.04'] == ['i386'] 94 | -------------------------------------------------------------------------------- /chacra/asynch/debian.py: -------------------------------------------------------------------------------- 1 | from celery import shared_task 2 | from chacra import models 3 | from chacra.asynch import base, post_ready, post_building 4 | from chacra import util 5 | from chacra.metrics import Counter, Timer 6 | import logging 7 | import subprocess 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | @shared_task(base=base.SQLATask) 13 | def create_deb_repo(repo_id): 14 | """ 15 | Go create or update repositories with specific IDs. 16 | """ 17 | # get the root path for storing repos 18 | # TODO: Is it possible we can get an ID that doesn't exist anymore? 19 | repo = models.Repo.get(repo_id) 20 | timer = Timer(__name__, suffix="create.deb.%s" % repo.metric_name) 21 | counter = Counter(__name__, suffix="create.deb.%s" % repo.metric_name) 22 | timer.start() 23 | post_building(repo) 24 | logger.info("processing repository: %s", repo) 25 | if util.repository_is_disabled(repo.project.name): 26 | logger.info("will not process repository: %s", repo) 27 | repo.needs_update = False 28 | repo.is_queued = False 29 | models.commit() 30 | return 31 | 32 | # Determine paths for this repository 33 | paths = util.repo_paths(repo) 34 | 35 | # Before doing work that might take very long to complete, set the repo 36 | # path in the object, mark needs_update as False, and mark it as being 37 | # updated so we prevent piling up if other binaries are being posted 38 | repo.path = paths['absolute'] 39 | repo.is_updating = True 40 | repo.is_queued = False 41 | repo.needs_update = False 42 | models.commit() 43 | 44 | # determine if other repositories might need to be queried to add extra 45 | # binaries (repos are tied to binaries which are all related with refs, 46 | # archs, distros, and distro versions. 47 | conf_extra_repos = util.get_extra_repos(repo.project.name, repo.ref) 48 | combined_versions = util.get_combined_repos(repo.project.name) 49 | extra_binaries = [] 50 | 51 | # See if there are any generic/universal binaries so that they can be 52 | # automatically added from the current project 53 | for binary in util.get_extra_binaries( 54 | repo.project.name, 55 | repo.distro, 56 | None, 57 | distro_versions=['generic', 'universal', 'any'], 58 | ref=repo.ref, 59 | sha1=repo.sha1): 60 | extra_binaries.append(binary) 61 | 62 | for project_name, project_refs in conf_extra_repos.items(): 63 | for ref in project_refs: 64 | logger.info('fetching binaries for project: %s, ref: %s', project_name, ref) 65 | found_binaries = util.get_extra_binaries( 66 | project_name, 67 | None, 68 | repo.distro_version, 69 | distro_versions=combined_versions, 70 | ref=ref if ref != 'all' else None 71 | ) 72 | extra_binaries += found_binaries 73 | 74 | # See if there are any generic/universal binaries so that they can be 75 | # automatically added from projects coming from extra repos 76 | for binary in util.get_extra_binaries( 77 | project_name, 78 | repo.distro, 79 | None, 80 | distro_versions=['generic', 'universal', 'any'], 81 | ref=ref if ref != 'all' else None): 82 | extra_binaries.append(binary) 83 | 84 | # check for the option to 'combine' repositories with different 85 | # debian/ubuntu versions 86 | for distro_version in combined_versions: 87 | logger.info( 88 | 'fetching distro_version %s for project: %s', 89 | distro_version, 90 | repo.project.name 91 | ) 92 | # When combining distro_versions we cannot filter by distribution as 93 | # well, otherwise it will be an impossible query. E.g. "get wheezy, 94 | # precise and trusty but only for the Ubuntu distro" 95 | extra_binaries += util.get_extra_binaries( 96 | repo.project.name, 97 | None, 98 | distro_version, 99 | ref=repo.ref, 100 | sha1=repo.sha1 101 | ) 102 | 103 | # try to create the absolute path to the repository if it doesn't exist 104 | util.makedirs(paths['absolute']) 105 | 106 | all_binaries = extra_binaries + [b for b in repo.binaries] 107 | timer.intermediate('collection') 108 | 109 | for binary in set(all_binaries): 110 | # XXX This is really not a good alternative but we are not going to be 111 | # using .changes for now although we can store it. 112 | if binary.extension == 'changes': 113 | continue 114 | try: 115 | commands = util.reprepro_commands( 116 | paths['absolute'], 117 | binary, 118 | distro_versions=combined_versions, 119 | fallback_version=repo.distro_version 120 | ) 121 | except KeyError: # probably a tar.gz or similar file that should not be added directly 122 | continue 123 | for command in commands: 124 | logger.info('running command: %s', ' '.join(command)) 125 | result = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) 126 | stdout, stderr = result.communicate() 127 | if result.returncode > 0: 128 | logger.error('failed to add binary %s', binary.name) 129 | stdout = stdout.decode() 130 | stderr = stderr.decode() 131 | for line in stdout.split('\n'): 132 | logger.info(line) 133 | for line in stderr.split('\n'): 134 | logger.warning(line) 135 | 136 | logger.info("finished processing repository: %s", repo) 137 | repo.is_updating = False 138 | models.commit() 139 | timer.stop() 140 | counter += 1 141 | post_ready(repo) 142 | -------------------------------------------------------------------------------- /chacra/asynch/__init__.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | import os 3 | import pecan 4 | import socket 5 | import logging 6 | import warnings 7 | 8 | from celery import Celery 9 | from celery.signals import worker_init 10 | from chacra import models 11 | 12 | from pecan.configuration import Config 13 | 14 | try: 15 | from logging.config import dictConfig as load_logging_config 16 | except ImportError: 17 | from logutils.dictconfig import dictConfig as load_logging_config # noqa 18 | 19 | 20 | logger = logging.getLogger(__name__) 21 | 22 | 23 | def configure_celery_logging(): 24 | logging = pecan.conf.get('logging', {}) 25 | debug = pecan.conf.get('debug', False) 26 | if logging: 27 | if debug: 28 | try: 29 | # 30 | # By default, Python 2.7+ silences DeprecationWarnings. 31 | # However, if conf.app.debug is True, we should probably ensure 32 | # that users see these types of warnings. 33 | # 34 | from logging import captureWarnings 35 | captureWarnings(True) 36 | warnings.simplefilter("default", DeprecationWarning) 37 | except ImportError: 38 | # No captureWarnings on Python 2.6, DeprecationWarnings are on 39 | pass 40 | 41 | if isinstance(logging, Config): 42 | logging = logging.to_dict() 43 | if 'version' not in logging: 44 | logging['version'] = 1 45 | load_logging_config(logging) 46 | 47 | 48 | @worker_init.connect 49 | def bootstrap_pecan(signal, sender, **kw): 50 | try: 51 | config_path = os.environ['PECAN_CONFIG'] 52 | except KeyError: 53 | here = os.path.abspath(os.path.dirname(__file__)) 54 | # XXX this will not hold true when installing as a binary 55 | config_path = os.path.abspath(os.path.join(here, '../config/config.py')) 56 | 57 | pecan.configuration.set_config(config_path, overwrite=True) 58 | configure_celery_logging() 59 | # Once configuration is set we need to initialize the models so that we can connect 60 | # to the DB wth a configured mapper. 61 | models.init_model() 62 | 63 | 64 | app = Celery( 65 | 'chacra.asynch', 66 | broker='pyamqp://guest@localhost//', 67 | include=['chacra.asynch.rpm', 'chacra.asynch.debian', 'chacra.asynch.recurring'] 68 | ) 69 | 70 | 71 | try: 72 | seconds = pecan.conf.polling_cycle 73 | except AttributeError: 74 | bootstrap_pecan(None, None) 75 | seconds = pecan.conf.polling_cycle 76 | 77 | app.conf.update( 78 | CELERYBEAT_SCHEDULE={ 79 | 'poll-repos': { 80 | 'task': 'chacra.asynch.recurring.poll_repos', 81 | 'schedule': timedelta(seconds=seconds), 82 | 'options': {'queue': 'poll_repos'} 83 | }, 84 | 'purge-repos': { 85 | 'task': 'chacra.asynch.recurring.purge_repos', 86 | 'schedule': timedelta(days=1), 87 | }, 88 | }, 89 | ) 90 | 91 | 92 | # helpers 93 | # 94 | # 95 | def post_status(status, repo_obj, _callback=None): 96 | """ 97 | Nicer interface to send a status report on repo creation if configured. 98 | 99 | :param state: Any useful (single-word) string to describe the current 100 | status of a repo. Like: 'queued', 'building', 'ready', 'requested' 101 | :param json: The actual ``json`` representing the Repo model object (or any subset of it) 102 | :param project_name: The name of the project the repository belongs to 103 | """ 104 | if not getattr(pecan.conf, 'callback_url', False): 105 | return 106 | from chacra.asynch import recurring 107 | # this needs a better implementation 108 | hostname = getattr(pecan.conf, 'hostname', socket.gethostname()) 109 | host_url = 'https://%s/' % hostname 110 | api_url = os.path.join(host_url, 'repos', '') 111 | repos_url = os.path.join(host_url, 'r', '') 112 | callback = _callback or recurring.callback.apply_async 113 | repo_obj_dict = repo_obj.__json__() 114 | repo_obj_dict['status'] = status 115 | repo_obj_dict['chacra_url'] = os.path.join(api_url, repo_obj.uri, '') 116 | repo_obj_dict['url'] = os.path.join(repos_url, repo_obj.uri, '') 117 | project_name = repo_obj_dict['project_name'] 118 | 119 | # Some fields from the object may not be JSON serializable by `requests` 120 | # (like datetime objects) so we rely on Pecan to deal with those and encode 121 | # them for us 122 | data = pecan.jsonify.encode(repo_obj_dict) 123 | callback( 124 | args=(data, project_name), 125 | ) 126 | 127 | 128 | def post_requested(repo): 129 | post_status('requested', repo) 130 | 131 | 132 | def post_queued(repo): 133 | post_status('queued', repo) 134 | 135 | 136 | def post_building(repo): 137 | post_status('building', repo) 138 | 139 | 140 | def post_ready(repo): 141 | post_status('ready', repo) 142 | 143 | 144 | def post_deleted(repo): 145 | post_status('deleted', repo) 146 | 147 | 148 | def post_if_healthy(): 149 | """ 150 | If system is healthy, make an asynchronous request to a configured remote 151 | system. Requires the following in the config file:: 152 | 153 | health_ping = True 154 | health_ping_url = "https://check.example.com" 155 | 156 | """ 157 | health_ping = getattr(pecan.conf, 'health_ping', False) 158 | health_ping_url = getattr(pecan.conf, 'health_ping_url', False) 159 | 160 | if not health_ping or not health_ping_url: 161 | logger.info("System is not configured to send health ping.") 162 | return 163 | 164 | from chacra.asynch import recurring, checks 165 | 166 | if not checks.is_healthy(): 167 | logger.error("System is not healthy and will not send health ping.") 168 | return 169 | 170 | hostname = getattr(pecan.conf, 'hostname', socket.gethostname()) 171 | url = os.path.join(health_ping_url, hostname, '') 172 | logger.info("Posting health ping to: %s", url) 173 | recurring.callback.apply_async( 174 | args=({}, None), 175 | kwargs=dict(url=url), 176 | ) 177 | -------------------------------------------------------------------------------- /chacra/tests/models/test_binaries.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Binary, Project, Repo 2 | 3 | 4 | class TestBinaryModification(object): 5 | 6 | def setup_method(self): 7 | self.p = Project('ceph') 8 | 9 | def test_created_equals_modified_first_time_around(self, session): 10 | binary = Binary( 11 | 'ceph-1.0.rpm', 12 | self.p, 13 | distro='centos', 14 | distro_version='7', 15 | arch='x86_64', 16 | ) 17 | session.commit() 18 | # multiple assertions so that we don't fail because of a msec 19 | # difference when the object gets modified. We don't care about msec, 20 | # we just want to make sure that the same date/time is kept when first 21 | # created 22 | assert binary.created.year == binary.modified.year 23 | assert binary.created.month == binary.modified.month 24 | assert binary.created.day == binary.modified.day 25 | assert binary.created.hour == binary.modified.hour 26 | assert binary.created.minute == binary.modified.minute 27 | 28 | def test_modified_gets_updated(self, session): 29 | binary = Binary( 30 | 'ceph-1.0.rpm', 31 | self.p, 32 | distro='centos', 33 | distro_version='7', 34 | arch='x86_64', 35 | ) 36 | session.commit() 37 | initial_modified = binary.modified.time() 38 | binary.ref = 'main' 39 | session.commit() 40 | binary = Binary.get(1) 41 | 42 | assert initial_modified < binary.modified.time() 43 | 44 | def test_modified_is_older_than_created(self, session): 45 | binary = Binary( 46 | 'ceph-1.0.rpm', 47 | self.p, 48 | distro='centos', 49 | distro_version='7', 50 | arch='x86_64', 51 | ) 52 | session.commit() 53 | initial_created = binary.created.time() 54 | binary.ref = 'main' 55 | session.commit() 56 | binary = Binary.get(1) 57 | 58 | assert initial_created < binary.modified.time() 59 | 60 | def test_binary_uses_explicit_repo(self, session): 61 | repo = Repo(self.p, 'firefly', 'centos', '7') 62 | binary = Binary( 63 | 'ceph-1.0.rpm', 64 | self.p, 65 | distro='centos', 66 | distro_version='7', 67 | arch='x86_64', 68 | repo=repo, 69 | ) 70 | session.commit() 71 | binary = Binary.get(1) 72 | assert binary.repo.ref == 'firefly' 73 | assert binary.repo.distro_version == '7' 74 | 75 | def test_binary_create_repo_object(self, session): 76 | binary = Binary( 77 | 'ceph-1.0.rpm', 78 | self.p, 79 | ref='firefly', 80 | distro='centos', 81 | distro_version='7', 82 | arch='x86_64', 83 | ) 84 | session.commit() 85 | binary = Binary.get(1) 86 | assert binary.repo.ref == 'firefly' 87 | assert binary.repo.distro_version == '7' 88 | 89 | def test_binary_reuse_repo_object(self, session): 90 | Repo(self.p, 'hammer', 'centos', '7') 91 | session.commit() 92 | binary = Binary( 93 | 'ceph-1.0.rpm', 94 | self.p, 95 | ref='hammer', 96 | distro='centos', 97 | distro_version='7', 98 | arch='x86_64', 99 | ) 100 | session.commit() 101 | binary = Binary.get(1) 102 | assert binary.repo.ref == 'hammer' 103 | assert binary.repo.distro_version == '7' 104 | 105 | def test_binary_sets_repo_type(self, session): 106 | repo = Repo(self.p, 'hammer', 'centos', '7') 107 | session.commit() 108 | Binary( 109 | 'ceph-1.0.rpm', 110 | self.p, 111 | ref='hammer', 112 | distro='centos', 113 | distro_version='7', 114 | arch='x86_64', 115 | ) 116 | session.commit() 117 | repo = Repo.get(1) 118 | assert repo.type == 'rpm' 119 | 120 | def test_binary_type_for_changes(self, session): 121 | repo = Repo(self.p, 'hammer', 'debian', 'wheezy') 122 | Binary( 123 | 'ceph-1.0.changes', 124 | self.p, 125 | ref='hammer', 126 | distro='debian', 127 | distro_version='wheezy', 128 | arch='amd64', 129 | ) 130 | assert repo.type == 'deb' 131 | 132 | def test_binary_type_for_dsc(self, session): 133 | repo = Repo(self.p, 'hammer', 'debian', 'wheezy') 134 | Binary( 135 | 'ceph-1.0.dsc', 136 | self.p, 137 | ref='hammer', 138 | distro='debian', 139 | distro_version='wheezy', 140 | arch='amd64', 141 | ) 142 | assert repo.type == 'deb' 143 | 144 | def test_binary_type_for_tar(self, session): 145 | repo = Repo(self.p, 'hammer', 'debian', 'wheezy') 146 | Binary( 147 | 'ceph-1.0.tar.gz', 148 | self.p, 149 | ref='hammer', 150 | distro='debian', 151 | distro_version='wheezy', 152 | arch='amd64', 153 | ) 154 | assert repo.type == 'deb' 155 | 156 | 157 | class TestGenericBinaries(object): 158 | 159 | def setup_method(self): 160 | self.p = Project('ceph') 161 | 162 | def test_binary_is_generic(self, session): 163 | binary = Binary( 164 | 'ceph-1.0.deb', 165 | self.p, 166 | ref='hammer', 167 | distro='debian', 168 | distro_version='generic', 169 | arch='amd64', 170 | ) 171 | assert binary.repo.is_generic is True 172 | 173 | def test_binary_is_not_generic(self, session): 174 | binary = Binary( 175 | 'ceph-1.0.deb', 176 | self.p, 177 | ref='hammer', 178 | distro='debian', 179 | distro_version='wheezy', 180 | arch='amd64', 181 | ) 182 | assert binary.repo.is_generic is False 183 | -------------------------------------------------------------------------------- /chacra/tests/controllers/test_flavors.py: -------------------------------------------------------------------------------- 1 | from chacra.models import Project, Binary 2 | from chacra.compat import b_ 3 | 4 | 5 | class TestFlavorsController(object): 6 | 7 | def test_list_single_flavor(self, session): 8 | project = Project('ceph') 9 | Binary( 10 | 'ceph-1.0.0.rpm', 11 | project, 12 | ref='giant', 13 | sha1="head", 14 | distro='centos', 15 | distro_version='el6', 16 | arch='x86_64', 17 | flavor='tcmalloc' 18 | ) 19 | session.commit() 20 | result = session.app.get( 21 | '/binaries/ceph/giant/head/centos/el6/x86_64/flavors/', 22 | ) 23 | assert result.json['tcmalloc'] == ['ceph-1.0.0.rpm'] 24 | 25 | def test_list_unique_flavor(self, session): 26 | project = Project('ceph') 27 | Binary( 28 | 'ceph-1.0.0.rpm', 29 | project, 30 | ref='giant', 31 | sha1="head", 32 | distro='centos', 33 | distro_version='el6', 34 | arch='x86_64', 35 | flavor='tcmalloc' 36 | ) 37 | Binary( 38 | 'ceph-1.0.0.rpm', 39 | project, 40 | ref='giant', 41 | sha1="head", 42 | distro='centos', 43 | distro_version='el7', 44 | arch='x86_64', 45 | flavor='tcmalloc' 46 | ) 47 | session.commit() 48 | result = session.app.get( 49 | '/binaries/ceph/giant/head/centos/el6/x86_64/flavors/', 50 | ) 51 | assert result.json['tcmalloc'] == ['ceph-1.0.0.rpm'] 52 | 53 | def test_list_one_flavor(self, session): 54 | project = Project('ceph') 55 | Binary( 56 | 'ceph-1.0.0.rpm', 57 | project, 58 | ref='giant', 59 | sha1="head", 60 | distro='centos', 61 | distro_version='el6', 62 | arch='x86_64', 63 | flavor='tcmalloc' 64 | ) 65 | Binary( 66 | 'ceph-1.0.0.rpm', 67 | project, 68 | ref='giant', 69 | sha1="head", 70 | distro='centos', 71 | distro_version='el7', 72 | arch='x86_64', 73 | flavor='default' 74 | ) 75 | session.commit() 76 | result = session.app.get( 77 | '/binaries/ceph/giant/head/centos/el6/x86_64/flavors/', 78 | ) 79 | # default flavor is for a different distro_version in this case 80 | # and should not show up 81 | assert list(result.json.keys()) == ['tcmalloc'] 82 | 83 | 84 | class TestFlavorController(object): 85 | 86 | def test_list_flavor_one_binary(self, session): 87 | project = Project('ceph') 88 | Binary( 89 | 'ceph-1.0.0.rpm', 90 | project, 91 | ref='giant', 92 | sha1="head", 93 | distro='centos', 94 | distro_version='el6', 95 | arch='x86_64', 96 | flavor='tcmalloc' 97 | ) 98 | session.commit() 99 | result = session.app.get('/binaries/ceph/giant/head/centos/el6/x86_64/flavors/tcmalloc/') 100 | assert result.json['ceph-1.0.0.rpm'] 101 | 102 | def test_list_flavor_one_binary_on_arch(self, session): 103 | project = Project('ceph') 104 | Binary( 105 | 'ceph-1.0.0.rpm', 106 | project, 107 | ref='giant', 108 | sha1="head", 109 | distro='centos', 110 | distro_version='el6', 111 | arch='x86_64', 112 | flavor='tcmalloc' 113 | ) 114 | session.commit() 115 | result = session.app.get('/binaries/ceph/giant/head/centos/el6/x86_64/') 116 | assert result.json['ceph-1.0.0.rpm'] 117 | 118 | def test_list_default_flavor_one_binary_on_arch(self, session): 119 | project = Project('ceph') 120 | Binary( 121 | 'ceph-1.0.0.rpm', 122 | project, 123 | ref='giant', 124 | sha1="head", 125 | distro='centos', 126 | distro_version='el6', 127 | arch='x86_64', 128 | flavor='tcmalloc' 129 | ) 130 | session.commit() 131 | result = session.app.get('/binaries/ceph/giant/head/centos/el6/x86_64/') 132 | assert result.json['ceph-1.0.0.rpm'] 133 | 134 | def test_list_default_flavor_one_binary(self, session): 135 | # note how 'flavor' is not specified 136 | project = Project('ceph') 137 | Binary( 138 | 'ceph-1.0.0.rpm', 139 | project, 140 | ref='giant', 141 | sha1="head", 142 | distro='centos', 143 | distro_version='el6', 144 | arch='x86_64', 145 | ) 146 | session.commit() 147 | result = session.app.get('/binaries/ceph/giant/head/centos/el6/x86_64/flavors/default/') 148 | assert result.json['ceph-1.0.0.rpm'] 149 | 150 | def test_flavor_not_found_with_head(self, session): 151 | project = Project('ceph') 152 | Binary( 153 | 'ceph-1.0.0.rpm', 154 | project, 155 | ref='giant', 156 | sha1="head", 157 | distro='centos', 158 | distro_version='el6', 159 | arch='x86_64' 160 | ) 161 | session.commit() 162 | result = session.app.head( 163 | '/binaries/ceph/giant/head/centos/el7/x86_64/flavors/default/', expect_errors=True) 164 | assert result.status_int == 404 165 | 166 | def test_single_flavor_should_have_one_item(self, session): 167 | p = Project('ceph') 168 | Binary( 169 | 'ceph-9.0.0-0.el6.x86_64.rpm', 170 | p, 171 | ref='giant', 172 | sha1="head", 173 | distro='centos', 174 | distro_version='el6', 175 | arch='x86_64' 176 | ) 177 | session.commit() 178 | result = session.app.get('/binaries/ceph/giant/head/centos/el6/x86_64/flavors/default/') 179 | assert result.status_int == 200 180 | assert len(result.json) == 1 181 | 182 | def test_single_binary_file_creates_resource(self, session, tmpdir): 183 | import pecan 184 | pecan.conf.binary_root = str(tmpdir) 185 | result = session.app.post( 186 | '/binaries/ceph/giant/head/ceph/el6/x86_64/flavors/frufru/', 187 | params={'force': 1}, 188 | upload_files=[('file', 'ceph-9.0.0-0.el6.x86_64.rpm', b_('hello tharrrr'))] 189 | ) 190 | assert result.status_int == 201 191 | 192 | 193 | -------------------------------------------------------------------------------- /chacra/controllers/binaries/archs.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import pecan 4 | from pecan import response 5 | from pecan.secure import secure 6 | from pecan import expose, abort, request 7 | from webob.static import FileIter 8 | from chacra.models import Binary 9 | from chacra import models, util 10 | from chacra.controllers import error 11 | from chacra.controllers.util import repository_is_automatic 12 | from chacra.controllers.binaries import BinaryController 13 | from chacra.controllers.binaries import flavors as _flavors 14 | from chacra.auth import basic_auth 15 | from pathlib import Path 16 | 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | class ArchController(object): 22 | 23 | def __init__(self, arch): 24 | self.arch = arch 25 | self.project = models.Project.get(request.context['project_id']) 26 | self.distro = request.context['distro'] 27 | self.distro_version = request.context['distro_version'] 28 | self.ref = request.context['ref'] 29 | self.sha1 = request.context['sha1'] 30 | request.context['arch'] = self.arch 31 | 32 | @expose(generic=True, template='json') 33 | def index(self): 34 | abort(405) 35 | 36 | @index.when(method='HEAD', template='json') 37 | def index_head(self): 38 | binaries = self.project.binaries.filter_by( 39 | distro=self.distro, 40 | distro_version=self.distro_version, 41 | ref=self.ref, 42 | sha1=self.sha1, 43 | arch=self.arch).all() 44 | 45 | if not binaries: 46 | abort(404) 47 | return dict() 48 | 49 | @index.when(method='GET', template='json') 50 | def index_get(self): 51 | binaries = self.project.binaries.filter_by( 52 | distro=self.distro, 53 | distro_version=self.distro_version, 54 | ref=self.ref, 55 | sha1=self.sha1, 56 | arch=self.arch).all() 57 | 58 | if not binaries: 59 | abort(404) 60 | 61 | resp = {} 62 | for b in self.project.binaries.filter_by( 63 | distro=self.distro, 64 | distro_version=self.distro_version, 65 | ref=self.ref, 66 | sha1=self.sha1, 67 | arch=self.arch).all(): 68 | resp[b.name] = b 69 | return resp 70 | 71 | def get_binary(self, name): 72 | return Binary.filter_by( 73 | name=name, project=self.project, arch=self.arch, 74 | distro=self.distro, distro_version=self.distro_version, 75 | ref=self.ref, sha1=self.sha1 76 | ).first() 77 | 78 | @secure(basic_auth) 79 | @index.when(method='POST', template='json') 80 | def index_post(self): 81 | contents = request.POST.get('file', False) 82 | if contents is False: 83 | error('/errors/invalid/', 'no file object found in "file" param in POST request') 84 | file_obj = contents.file 85 | filename = contents.filename 86 | self.binary = self.get_binary(filename) 87 | self.binary_name = filename 88 | if self.binary is not None: 89 | if os.path.exists(self.binary.path): 90 | if request.POST.get('force', False) is False: 91 | error('/errors/invalid', 'resource already exists and "force" key was not used') 92 | 93 | full_path = self.save_file(file_obj) 94 | 95 | if self.binary is None: 96 | path = full_path 97 | distro = request.context['distro'] 98 | distro_version = request.context['distro_version'] 99 | arch = request.context['arch'] 100 | ref = request.context['ref'] 101 | sha1 = request.context['sha1'] 102 | 103 | self.binary = Binary( 104 | self.binary_name, self.project, arch=arch, 105 | distro=distro, distro_version=distro_version, 106 | ref=ref, sha1=sha1, path=path, size=os.path.getsize(path) 107 | ) 108 | else: 109 | self.binary.path = full_path 110 | 111 | # check if this binary is interesting for other configured projects, 112 | # and if so, then mark those other repos so that they can be re-built 113 | self.mark_related_repos() 114 | return dict() 115 | 116 | def mark_related_repos(self): 117 | related_projects = util.get_related_projects(self.project.name) 118 | repos = [] 119 | projects = [] 120 | for project_name, refs in related_projects.items(): 121 | p = models.projects.get_or_create(name=project_name) 122 | projects.append(p) 123 | repo_query = [] 124 | if refs == ['all']: 125 | # we need all the repos available 126 | repo_query = models.Repo.filter_by(project=p).all() 127 | else: 128 | for ref in refs: 129 | repo_query = models.Repo.filter_by(project=p, ref=ref).all() 130 | if repo_query: 131 | for r in repo_query: 132 | repos.append(r) 133 | 134 | if not repos: 135 | # there are no repositories associated with this project, so go ahead 136 | # and create one so that it can be queried by the celery task later 137 | for project in projects: 138 | repo = models.Repo( 139 | project, 140 | self.ref, 141 | self.distro, 142 | self.distro_version, 143 | sha1=self.sha1, 144 | ) 145 | repo.needs_update = repository_is_automatic(project.name) 146 | repo.type = self.binary._get_repo_type() 147 | 148 | else: 149 | for repo in repos: 150 | repo.needs_update = repository_is_automatic(repo.project.name) 151 | if repo.type is None: 152 | repo.type = self.binary._get_repo_type() 153 | 154 | def create_directory(self): 155 | urlpath = Path(request.url) 156 | # remove binary_name if it exists 157 | if urlpath.name == self.binary_name: 158 | urlpath = urlpath.parent 159 | # replace '...binaries' with binary_root 160 | rootindex = urlpath.parts.index('binaries') 161 | path = Path(pecan.conf.binary_root, *(urlpath.parts[rootindex+1:])) 162 | if not os.path.isdir(path): 163 | os.makedirs(path) 164 | return path 165 | 166 | def save_file(self, file_obj): 167 | dir_path = self.create_directory() 168 | if self.binary_name in os.listdir(dir_path): 169 | # resource exists so we will update it 170 | response.status = 200 171 | else: 172 | # we will create a resource 173 | response.status = 201 174 | 175 | destination = os.path.join(dir_path, self.binary_name) 176 | 177 | with open(destination, 'wb') as f: 178 | file_iterable = FileIter(file_obj) 179 | for chunk in file_iterable: 180 | f.write(chunk) 181 | 182 | # return the full path to the saved object: 183 | return destination 184 | 185 | @expose() 186 | def _lookup(self, name, *remainder): 187 | return BinaryController(name), remainder 188 | 189 | flavors = _flavors.FlavorsController() 190 | -------------------------------------------------------------------------------- /chacra/controllers/repos/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import shutil 4 | 5 | from pecan import expose, abort, request, response 6 | from pecan.secure import secure 7 | from pecan_notario import validate 8 | 9 | from chacra.models import Project 10 | from chacra.controllers import error 11 | from chacra.auth import basic_auth 12 | from chacra import schemas, asynch 13 | from chacra import util 14 | 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | class FlavorsController(object): 20 | 21 | def __init__(self): 22 | self.distro_version = request.context['distro_version'] 23 | self.project = Project.get(request.context['project_id']) 24 | self.distro_name = request.context['distro'] 25 | self.ref = request.context['ref'] 26 | self.sha1 = request.context['sha1'] 27 | self.repos = self.project.repos.filter_by( 28 | distro=self.distro_name, 29 | distro_version=self.distro_version, 30 | ref=self.ref, 31 | sha1=self.sha1, 32 | ).all() 33 | self.flavors = list(set([r.flavor for r in self.repos])) 34 | 35 | @expose('json', generic=True) 36 | def index(self): 37 | return self.flavors 38 | 39 | @index.when(method='POST', template='json') 40 | def index_post(self): 41 | error('/errors/not_allowed', 'POST requests to this url are not allowed') 42 | 43 | @expose() 44 | def _lookup(self, flavor, *remainder): 45 | if flavor not in self.flavors: 46 | abort(404) 47 | return RepoController(self.distro_version, flavor), remainder 48 | 49 | 50 | class RepoController(object): 51 | 52 | def __init__(self, distro_version, flavor=None): 53 | self.distro_version = distro_version 54 | self.project = Project.get(request.context['project_id']) 55 | self.distro_name = request.context['distro'] 56 | self.ref = request.context['ref'] 57 | self.sha1 = request.context['sha1'] 58 | if not request.context.get('distro_version'): 59 | request.context['distro_version'] = self.distro_version 60 | self.flavor = flavor 61 | self.repo_obj = self.project.repos.filter_by( 62 | distro=self.distro_name, 63 | distro_version=self.distro_version, 64 | ref=self.ref, 65 | sha1=self.sha1, 66 | flavor=self.flavor or 'default', 67 | ).first() 68 | 69 | @expose('json', generic=True) 70 | def index(self): 71 | if self.repo_obj is None: 72 | print("no repo, aborting") 73 | abort(404) 74 | return self.repo_obj 75 | 76 | @secure(basic_auth) 77 | @index.when(method='POST', template='json') 78 | @validate(schemas.repo_schema, handler='/errors/schema') 79 | def index_post(self): 80 | data = request.json 81 | self.repo_obj.update_from_json(data) 82 | return self.repo_obj 83 | 84 | @secure(basic_auth) 85 | @expose('json') 86 | def update(self): 87 | if request.method == 'HEAD': 88 | return {} 89 | if request.method != 'POST': 90 | error( 91 | '/errors/not_allowed', 92 | 'only POST request are accepted for this url' 93 | ) 94 | if self.repo_obj.type == 'raw': 95 | # raw repos need no asynch construction. Create 96 | # the paths, symlink the binaries, mark them ready. 97 | self.repo_obj.path = util.repo_paths(self.repo_obj)['absolute'] 98 | util.makedirs(self.repo_obj.path) 99 | for binary in self.repo_obj.binaries: 100 | src = binary.path 101 | dest = os.path.join( 102 | self.repo_obj.path, 103 | os.path.join(binary.arch, binary.name) 104 | ) 105 | try: 106 | if not os.path.exists(dest): 107 | os.symlink(src, dest) 108 | except OSError: 109 | logger.exception( 110 | f'could not symlink raw binary {src} -> {dest}') 111 | 112 | self.repo_obj.needs_update = False 113 | asynch.post_ready(self.repo_obj) 114 | else: 115 | # Just mark the repo so that celery picks it up 116 | self.repo_obj.needs_update = True 117 | self.repo_obj.is_updating = False 118 | self.repo_obj.is_queued = False 119 | asynch.post_requested(self.repo_obj) 120 | 121 | return self.repo_obj 122 | 123 | @secure(basic_auth) 124 | @expose('json') 125 | def recreate(self): 126 | if request.method == 'HEAD': 127 | return {} 128 | if request.method != 'POST': 129 | error( 130 | '/errors/not_allowed', 131 | 'only POST request are accepted for this url' 132 | ) 133 | # completely remove the path to the repository 134 | logger.info('removing repository path: %s', self.repo_obj.path) 135 | try: 136 | shutil.rmtree(self.repo_obj.path) 137 | except OSError: 138 | logger.warning("could not remove repo path: %s", self.repo_obj.path) 139 | 140 | # mark the repo so that celery picks it up 141 | self.repo_obj.needs_update = True 142 | self.repo_obj.is_updating = False 143 | self.repo_obj.is_queued = False 144 | 145 | asynch.post_requested(self.repo_obj) 146 | return self.repo_obj 147 | 148 | @secure(basic_auth) 149 | @index.when(method='DELETE', template='json') 150 | @validate(schemas.repo_schema, handler='/errors/schema') 151 | def index_delete(self): 152 | repo_path = self.repo_obj.path 153 | logger.info('nuke repository path: %s', repo_path) 154 | try: 155 | shutil.rmtree(repo_path) 156 | except OSError: 157 | msg = "could not remove repo path: {}".format(repo_path) 158 | logger.exception(msg) 159 | error('/errors/error/', msg) 160 | for binary in self.repo_obj.binaries: 161 | binary_path = binary.binary.path 162 | if binary_path: 163 | try: 164 | os.remove(binary_path) 165 | except (IOError, OSError): 166 | msg = "Could not remove the binary path: %s" % binary_path 167 | logger.exception(msg) 168 | binary.delete() 169 | self.repo_obj.delete() 170 | if self.project.repos.count() == 0: 171 | self.project.delete() 172 | response.status = 204 173 | return dict() 174 | 175 | @secure(basic_auth) 176 | @expose('json') 177 | def extra(self): 178 | if request.method != 'POST': 179 | error( 180 | '/errors/not_allowed', 181 | 'only POST request are accepted for this url' 182 | ) 183 | self.repo_obj.extra = request.json 184 | return self.repo_obj 185 | 186 | @expose('mako:repo.mako', content_type="text/plain") 187 | def repo(self): 188 | return dict( 189 | project_name=self.project.name, 190 | base_url=self.repo_obj.base_url, 191 | distro_name=self.distro_name.lower(), 192 | distro_version=self.repo_obj.distro_version, 193 | type=self.repo_obj.type, 194 | ) 195 | 196 | @expose() 197 | def _lookup(self, name, *remainder): 198 | # the `is not None` prevents this from being a recursive url 199 | if name == 'flavors' and self.flavor is None: 200 | return FlavorsController(), remainder 201 | abort(404) 202 | -------------------------------------------------------------------------------- /chacra/controllers/binaries/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import pecan 4 | from pecan import expose, abort, request, response, conf 5 | from pecan.secure import secure 6 | from webob.static import FileIter 7 | from chacra.models import Binary, Project 8 | from chacra.controllers import error 9 | from chacra.auth import basic_auth 10 | from pathlib import Path 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class BinaryController(object): 16 | 17 | def __init__(self, binary_name): 18 | self.binary_name = binary_name 19 | self.project = Project.get(request.context['project_id']) 20 | self.distro_version = request.context['distro_version'] 21 | self.distro = request.context['distro'] 22 | self.arch = request.context['arch'] 23 | self.ref = request.context['ref'] 24 | self.sha1 = request.context['sha1'] 25 | self.flavor = request.context.get('flavor', 'default') 26 | self.binary = Binary.query.filter_by( 27 | name=binary_name, 28 | ref=self.ref, 29 | sha1=self.sha1, 30 | distro=self.distro, 31 | distro_version=self.distro_version, 32 | flavor=self.flavor, 33 | arch=self.arch, 34 | project=self.project).first() 35 | 36 | @expose(content_type='application/octet-stream', generic=True) 37 | def index(self): 38 | """ 39 | Special method for internal redirect URI's so that webservers (like 40 | Nginx) can serve downloads to clients while the app just delegates. 41 | This method will require an Nginx configuration that points to 42 | resources and match `binary_root` URIs:: 43 | 44 | location /home/ubuntu/repos/ { 45 | internal; 46 | alias /files/; 47 | } 48 | 49 | `alias` can be anything, it would probably make sense to have a set of rules that allow 50 | distinct URIs, like:: 51 | 52 | location /home/ubuntu/repos/rpm-firefly/ { 53 | internal; 54 | alias /files/rpm-firefly/; 55 | } 56 | 57 | 58 | There are two ways to get binaries into this app: via existing files in 59 | certain paths POSTing JSON to the arch/ endpoint, or via actual upload 60 | of the binary. So if many locations need to be supported, they each 61 | need to have a corresponding section in Nginx to be configured. 62 | """ 63 | if not self.binary: 64 | abort(404) 65 | # we need to slap some headers so Nginx can serve this 66 | # TODO: maybe disable this for testing? 67 | # XXX Maybe we don't need to set Content-Disposition here? 68 | response.headers['Content-Disposition'] = 'attachment; filename=%s' % str(self.binary.name) 69 | if conf.delegate_downloads is False: 70 | f = open(self.binary.path, 'rb') 71 | response.app_iter = FileIter(f) 72 | else: 73 | relative_path = self.binary.path.split(pecan.conf.binary_root)[-1].strip('/') 74 | # FIXME: this should be read from configuration, this is not configurable 75 | # at the moment and relies on the nginx config being properly set 76 | path = os.path.join('/b/', relative_path) 77 | logger.info('setting path header: %s', path) 78 | response.headers['X-Accel-Redirect'] = path 79 | 80 | @secure(basic_auth) 81 | @index.when(method='POST', template='json') 82 | def index_post(self): 83 | try: 84 | data = request.json 85 | name = data.get('name') 86 | except ValueError: 87 | error('/errors/invalid/', 'could not decode JSON body') 88 | 89 | # updates the binary only if explicitly told to do so 90 | if self.binary: 91 | if not data.get('force'): 92 | error('/errors/invalid/', 'file already exists and "force" flag was not used') 93 | else: 94 | # FIXME this looks like we need to implement PUT 95 | path = data.get('path') 96 | if path: 97 | try: 98 | data['size'] = os.path.getsize(path) 99 | except OSError: 100 | logger.exception('could not retrieve size from %s' % path) 101 | data['size'] = 0 102 | self.binary.update_from_json(data) 103 | return {} 104 | 105 | # we allow empty data to be pushed 106 | if not name: 107 | error('/errors/invalid/', "could not find required key: 'name'") 108 | name = data.pop('name') 109 | path = data.get('path') 110 | 111 | if path: 112 | size = os.path.getsize(path) 113 | else: 114 | size = 0 115 | Binary( 116 | name=name, project=self.project, arch=self.arch, 117 | distro=self.distro, distro_version=self.distro_version, 118 | ref=self.ref, size=size, sha1=self.sha1 119 | ) 120 | 121 | return {} 122 | 123 | @secure(basic_auth) 124 | @index.when(method='PUT', template='json') 125 | def index_put(self): 126 | contents = request.POST.get('file', False) 127 | if contents is False: 128 | error('/errors/invalid/', 'no file object found in "file" param in POST request') 129 | file_obj = contents.file 130 | # this looks odd, path is not changing, but we need to 'ping' the object by 131 | # re-saving the attribute so that the listener can update the checksum and modified 132 | # timestamps 133 | self.binary.path = self.save_file(file_obj) 134 | return dict() 135 | 136 | @secure(basic_auth) 137 | @index.when(method='DELETE', template='json') 138 | def index_delete(self): 139 | if not self.binary: 140 | abort(404) 141 | binary_path = self.binary.path 142 | repo = self.binary.repo 143 | project = self.binary.project 144 | self.binary.delete() 145 | try: 146 | if binary_path: 147 | os.remove(binary_path) 148 | except (IOError, OSError): 149 | msg = "Could not remove the binary path: %s" % binary_path 150 | logger.exception(msg) 151 | error('/errors/error/', msg) 152 | if repo.binaries.count() > 0: 153 | # there are still binaries related to this repo, mark it to rebuild 154 | repo.needs_update = True 155 | else: 156 | # there are no more binaries for this repo, delete the repo 157 | repo.delete() 158 | 159 | if project.binaries.count() == 0: 160 | project.delete() 161 | 162 | response.status = 204 163 | return dict() 164 | 165 | def create_directory(self): 166 | urlpath = Path(request.url) 167 | # remove binary_name if it exists 168 | if urlpath.name == self.binary_name: 169 | urlpath = urlpath.parent 170 | # replace '...binaries' with binary_root 171 | rootindex = urlpath.parts.index('binaries') 172 | path = Path(pecan.conf.binary_root, *(urlpath.parts[rootindex+1:])) 173 | if not os.path.isdir(path): 174 | os.makedirs(path) 175 | return path 176 | 177 | def save_file(self, file_obj): 178 | # TODO: we should just use self.binary.path for this 179 | dir_path = self.create_directory() 180 | if self.binary_name in os.listdir(dir_path): 181 | # resource exists so we will update it 182 | response.status = 200 183 | else: 184 | # TODO: enforce this. 185 | # we will create a resource, but this SHOULD NOT HAPPEN 186 | # because we are PUT not POST 187 | response.status = 201 188 | 189 | destination = os.path.join(dir_path, self.binary_name) 190 | 191 | with open(destination, 'wb') as f: 192 | file_iterable = FileIter(file_obj) 193 | for chunk in file_iterable: 194 | f.write(chunk) 195 | 196 | # return the full path to the saved object: 197 | return destination 198 | -------------------------------------------------------------------------------- /chacra/tests/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | from pecan.testing import load_test_app 4 | 5 | import subprocess 6 | 7 | from copy import deepcopy 8 | from pecan import conf 9 | from pecan import configuration 10 | from sqlalchemy import create_engine 11 | from sqlalchemy.pool import NullPool 12 | 13 | from chacra import models as _db 14 | from chacra.tests import util 15 | import pytest 16 | 17 | 18 | DBNAME = 'chacratest' 19 | BIND = 'postgresql+psycopg2://' 20 | # for sqlite, use something like this (DBNAME is the name of the file) 21 | # DBNAME = 'chacratest.db' 22 | # BIND = 'sqlite://' 23 | 24 | 25 | def config_file(): 26 | here = os.path.abspath(os.path.dirname(__file__)) 27 | return os.path.join(here, 'config.py') 28 | 29 | 30 | def reload_config(): 31 | from pecan import configuration 32 | config = configuration.conf_from_file(config_file()).to_dict() 33 | 34 | # Add the appropriate connection string to the app config. 35 | config['sqlalchemy'] = { 36 | 'url': '%s/%s' % (BIND, DBNAME), 37 | 'encoding': 'utf-8', 38 | 'poolclass': NullPool 39 | } 40 | 41 | configuration.set_config( 42 | config, 43 | overwrite=True 44 | ) 45 | _db.init_model() 46 | 47 | 48 | @pytest.fixture 49 | def fake(): 50 | class Fake(object): 51 | def __init__(self, *a, **kw): 52 | for k, v, in kw.items(): 53 | setattr(self, k, v) 54 | return Fake 55 | 56 | @pytest.fixture 57 | def recorder(): 58 | class Recorder(object): 59 | def __init__(self, *a, **kw): 60 | self.recorder_init_call = [] 61 | self.recorder_calls = [] 62 | for k, v, in kw.items(): 63 | setattr(self, k, v) 64 | self.recorder_init_call.append( 65 | {'args': a, 'kwargs': kw} 66 | ) 67 | def __call__(self, *a, **kw): 68 | for k, v, in kw.items(): 69 | setattr(self, k, v) 70 | self.recorder_calls.append( 71 | {'args': a, 'kwargs': kw} 72 | ) 73 | 74 | return Recorder 75 | 76 | def pytest_collectstart(collector): 77 | import os 78 | os.environ['PECAN_CONFIG'] = config_file() 79 | 80 | 81 | @pytest.fixture(scope='session') 82 | def app(request): 83 | config = configuration.conf_from_file(config_file()).to_dict() 84 | 85 | # Add the appropriate connection string to the app config. 86 | config['sqlalchemy'] = { 87 | 'url': '%s/%s' % (BIND, DBNAME), 88 | 'encoding': 'utf-8', 89 | 'poolclass': NullPool 90 | } 91 | 92 | # Set up a fake app 93 | app = TestApp(load_test_app(config)) 94 | return app 95 | 96 | 97 | @pytest.fixture(scope='session') 98 | def connection(app, request): 99 | """Session-wide test database.""" 100 | # Connect and create the temporary database 101 | print("=" * 80) 102 | print("CREATING TEMPORARY DATABASE FOR TESTS") 103 | print("=" * 80) 104 | if BIND.startswith('postgresql'): 105 | subprocess.call(['dropdb', DBNAME]) 106 | subprocess.call(['createdb', DBNAME]) 107 | 108 | # Bind and create the database tables 109 | _db.clear() 110 | engine_url = '%s/%s' % (BIND, DBNAME) 111 | 112 | db_engine = create_engine( 113 | engine_url, 114 | encoding='utf-8', 115 | poolclass=NullPool) 116 | 117 | # AKA models.start() 118 | _db.Session.bind = db_engine 119 | _db.metadata.bind = _db.Session.bind 120 | 121 | _db.Base.metadata.create_all(db_engine) 122 | _db.commit() 123 | _db.clear() 124 | 125 | def teardown(): 126 | _db.Base.metadata.drop_all(db_engine) 127 | 128 | request.addfinalizer(teardown) 129 | 130 | # Slap our test app on it 131 | _db.app = app 132 | return _db 133 | 134 | 135 | @pytest.fixture(scope='function') 136 | def session(connection, request): 137 | """Creates a new database session for a test.""" 138 | _config = configuration.conf_from_file(config_file()).to_dict() 139 | config = deepcopy(_config) 140 | 141 | # Add the appropriate connection string to the app config. 142 | config['sqlalchemy'] = { 143 | 'url': '%s/%s' % (BIND, DBNAME), 144 | 'encoding': 'utf-8', 145 | 'poolclass': NullPool 146 | } 147 | 148 | connection.start() 149 | 150 | def teardown(): 151 | 152 | # Tear down and dispose the DB binding 153 | connection.clear() 154 | 155 | # start a transaction 156 | engine = conf.sqlalchemy.engine 157 | conn = engine.connect() 158 | trans = conn.begin() 159 | 160 | 161 | # gather all data first before dropping anything. 162 | # some DBs lock after things have been dropped in 163 | # a transaction. 164 | if BIND.startswith('postgresql'): 165 | conn.execute("TRUNCATE TABLE %s RESTART IDENTITY CASCADE" % ( 166 | ', '.join(engine.table_names()) 167 | )) 168 | elif BIND.startswith('sqlite'): 169 | for table in engine.table_names(): 170 | conn.execute("DELETE FROM %s" % table) 171 | 172 | trans.commit() 173 | conn.close() 174 | 175 | request.addfinalizer(teardown) 176 | return connection 177 | 178 | 179 | class TestApp(object): 180 | """ 181 | A controller test starts a database transaction and creates a fake 182 | WSGI app. 183 | """ 184 | 185 | __headers__ = {} 186 | 187 | def __init__(self, app): 188 | self.app = app 189 | 190 | def _do_request(self, url, method='GET', **kwargs): 191 | methods = { 192 | 'GET': self.app.get, 193 | 'POST': self.app.post, 194 | 'POSTJ': self.app.post_json, 195 | 'PUT': self.app.put, 196 | 'HEAD': self.app.head, 197 | 'DELETE': self.app.delete 198 | } 199 | kwargs.setdefault('headers', {}).update(self.__headers__) 200 | return methods.get(method, self.app.get)(str(url), **kwargs) 201 | 202 | def post_json(self, url, **kwargs): 203 | """ 204 | @param (string) url - The URL to emulate a POST request to 205 | @returns (paste.fixture.TestResponse) 206 | """ 207 | # support automatic, correct authentication if not specified otherwise 208 | if not kwargs.get('headers'): 209 | kwargs['headers'] = {'Authorization': util.make_credentials()} 210 | return self._do_request(url, 'POSTJ', **kwargs) 211 | 212 | def post(self, url, **kwargs): 213 | """ 214 | @param (string) url - The URL to emulate a POST request to 215 | @returns (paste.fixture.TestResponse) 216 | """ 217 | # support automatic, correct authentication if not specified otherwise 218 | if not kwargs.get('headers'): 219 | kwargs['headers'] = {'Authorization': util.make_credentials()} 220 | return self._do_request(url, 'POST', **kwargs) 221 | 222 | def get(self, url, **kwargs): 223 | """ 224 | @param (string) url - The URL to emulate a GET request to 225 | @returns (paste.fixture.TestResponse) 226 | """ 227 | return self._do_request(url, 'GET', **kwargs) 228 | 229 | def put(self, url, **kwargs): 230 | """ 231 | @param (string) url - The URL to emulate a PUT request to 232 | @returns (paste.fixture.TestResponse) 233 | """ 234 | if not kwargs.get('headers'): 235 | kwargs['headers'] = {'Authorization': util.make_credentials()} 236 | return self._do_request(url, 'PUT', **kwargs) 237 | 238 | def delete(self, url, **kwargs): 239 | """ 240 | @param (string) url - The URL to emulate a DELETE request to 241 | @returns (paste.fixture.TestResponse) 242 | """ 243 | if not kwargs.get('headers'): 244 | kwargs['headers'] = {'Authorization': util.make_credentials()} 245 | return self._do_request(url, 'DELETE', **kwargs) 246 | 247 | def head(self, url, **kwargs): 248 | """ 249 | @param (string) url - The URL to emulate a HEAD request to 250 | @returns (paste.fixture.TestResponse) 251 | """ 252 | if not kwargs.get('headers'): 253 | kwargs['headers'] = {'Authorization': util.make_credentials()} 254 | return self._do_request(url, 'HEAD', **kwargs) 255 | --------------------------------------------------------------------------------