├── .gitignore ├── roles ├── log │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── files │ │ ├── elasticsearch.conf │ │ └── fxa_auth_memory.lua │ ├── templates │ │ ├── nginx.conf.j2 │ │ ├── heka.toml.j2 │ │ └── kibana_config.js.j2 │ └── tasks │ │ └── main.yml ├── authdb │ ├── meta │ │ └── main.yml │ ├── templates │ │ ├── heka.toml.j2 │ │ └── config.json.j2 │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── files │ │ ├── fxa_auth_db_server.lua │ │ └── fxa-auth-db-server.conf ├── customs │ ├── meta │ │ └── main.yml │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── config.json.j2 │ ├── handlers │ │ └── main.yml │ ├── files │ │ └── fxa-customs-server.conf │ └── tasks │ │ └── main.yml ├── email │ ├── meta │ │ └── main.yml │ ├── templates │ │ └── config.json.j2 │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── files │ │ └── fxa-auth-mailer.conf ├── heka_leaf │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── hekad.toml.j2 │ └── tasks │ │ └── main.yml ├── auth │ ├── meta │ │ └── main.yml │ ├── templates │ │ ├── upstream.conf.j2 │ │ ├── heka.toml.j2 │ │ ├── nginx.conf.j2 │ │ └── config.json.j2 │ ├── handlers │ │ └── main.yml │ ├── files │ │ ├── fxa-auth-server.conf │ │ └── fxa_auth_server.lua │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── oauth │ ├── meta │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── templates │ │ ├── upstream.conf.j2 │ │ └── config.json.j2 │ ├── files │ │ └── fxa-oauth-server.conf │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── rp │ ├── meta │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── templates │ │ ├── nginx.conf.j2 │ │ ├── upstream.conf.j2 │ │ └── config.json.j2 │ ├── files │ │ └── fxa-rp.conf │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── content │ ├── meta │ │ └── main.yml │ ├── templates │ │ ├── upstream.conf.j2 │ │ ├── nginx.conf.j2 │ │ ├── heka.toml.j2 │ │ └── config.json.j2 │ ├── handlers │ │ └── main.yml │ ├── files │ │ └── fxa-content-server.conf │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── profile │ ├── meta │ │ └── main.yml │ ├── templates │ │ ├── upstream.conf.j2 │ │ ├── nginx.conf.j2 │ │ └── config.json.j2 │ ├── handlers │ │ └── main.yml │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── fxa-profile-server.conf │ └── tasks │ │ └── main.yml ├── mysql │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── memcached │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── common │ ├── files │ │ ├── supervisor.conf │ │ ├── supervisor.systemd │ │ ├── hekad.toml │ │ ├── ntp.conf │ │ ├── hekad.conf │ │ └── supervisord.conf │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml └── web │ ├── handlers │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ └── nginx.conf.j2 ├── aws ├── roles │ ├── ses │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── sasl_passwd.j2 │ │ │ └── main.cf.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── team │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ └── cron_update │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml ├── Makefile ├── environments │ ├── latest.yml │ ├── ux.yml │ ├── dcoates.yml │ ├── nightly.yml │ ├── marketplace.yml │ ├── stable.yml │ └── EXAMPLE.yml ├── local.yml ├── dev.yml ├── cloudformation │ ├── basic.json │ └── moz-single.json └── ansible.cfg ├── vagrant ├── Makefile ├── local.yml ├── Vagrantfile ├── insecure_private_key └── ansible.cfg └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | node_modules 3 | aws/my_vars.yml 4 | -------------------------------------------------------------------------------- /roles/log/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kibana_public_port: 9199 3 | -------------------------------------------------------------------------------- /roles/authdb/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | -------------------------------------------------------------------------------- /roles/customs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | -------------------------------------------------------------------------------- /roles/email/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | -------------------------------------------------------------------------------- /roles/heka_leaf/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | heka_aggregator: '127.0.0.1:5565' 3 | -------------------------------------------------------------------------------- /roles/auth/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: web } 5 | -------------------------------------------------------------------------------- /roles/log/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: web } 5 | -------------------------------------------------------------------------------- /roles/oauth/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: web } 5 | -------------------------------------------------------------------------------- /roles/rp/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: web } 5 | -------------------------------------------------------------------------------- /aws/roles/ses/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: email } 5 | -------------------------------------------------------------------------------- /roles/content/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: web } 5 | -------------------------------------------------------------------------------- /roles/profile/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: common } 4 | - { role: web } 5 | -------------------------------------------------------------------------------- /roles/mysql/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart mysql 4 | sudo: true 5 | service: name=mysqld state=restarted 6 | -------------------------------------------------------------------------------- /aws/roles/ses/templates/sasl_passwd.j2: -------------------------------------------------------------------------------- 1 | email-smtp.{{ region }}.amazonaws.com:587 {{ smtp_user.stdout }}:{{ smtp_pass.stdout }} 2 | -------------------------------------------------------------------------------- /roles/auth/templates/upstream.conf.j2: -------------------------------------------------------------------------------- 1 | upstream upstream_auth_server { 2 | server 127.0.0.1:{{ auth_private_port }}; 3 | } 4 | -------------------------------------------------------------------------------- /roles/content/templates/upstream.conf.j2: -------------------------------------------------------------------------------- 1 | upstream upstream_content_server { 2 | server 127.0.0.1:{{ content_private_port }}; 3 | } 4 | -------------------------------------------------------------------------------- /roles/profile/templates/upstream.conf.j2: -------------------------------------------------------------------------------- 1 | upstream upstream_profile_server { 2 | server 127.0.0.1:{{ profile_private_port }}; 3 | } 4 | -------------------------------------------------------------------------------- /aws/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: default 2 | 3 | default: latest 4 | 5 | %: 6 | ansible-playbook -i localhost, dev.yml --extra-vars "stack_name=$@" 7 | -------------------------------------------------------------------------------- /roles/memcached/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart memcached 4 | sudo: true 5 | service: name=memcached state=restarted 6 | -------------------------------------------------------------------------------- /aws/roles/ses/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: postmap sasl_passwd 4 | sudo: true 5 | command: /usr/sbin/postmap /etc/postfix/sasl_passwd 6 | -------------------------------------------------------------------------------- /aws/roles/team/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: update authorized_keys 4 | shell: cat /data/identity-pubkeys/*.pub > ~/.ssh/authorized_keys 5 | -------------------------------------------------------------------------------- /roles/heka_leaf/templates/hekad.toml.j2: -------------------------------------------------------------------------------- 1 | [AggregatorOutput] 2 | type = "TcpOutput" 3 | address="{{ heka_aggregator }}" 4 | message_matcher = 'Type !~ /^heka\./' 5 | -------------------------------------------------------------------------------- /aws/environments/latest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: us-west-2 3 | subdomain: latest.dev 4 | hosted_zone: lcip.org 5 | ssl_certificate_name: wildcard.dev.lcip.org 6 | rds_password: 33yJ(Lv)hr6&=N7t 7 | -------------------------------------------------------------------------------- /roles/heka_leaf/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: configure heka 4 | sudo: true 5 | template: src=hekad.toml.j2 dest=/etc/heka.d/leaf.toml 6 | notify: restart heka 7 | 8 | - meta: flush_handlers 9 | -------------------------------------------------------------------------------- /roles/common/files/supervisor.conf: -------------------------------------------------------------------------------- 1 | description "supervisor" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | respawn 7 | 8 | exec /usr/bin/supervisord --configuration /etc/supervisord.conf 9 | -------------------------------------------------------------------------------- /roles/common/files/supervisor.systemd: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=supervisor 3 | 4 | [Service] 5 | ExecStart=/usr/bin/supervisord --configuration /etc/supervisord.conf 6 | 7 | [Install] 8 | WantedBy=multi-user.target 9 | -------------------------------------------------------------------------------- /aws/environments/ux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: us-west-2 3 | subdomain: ux.dev 4 | hosted_zone: lcip.org 5 | ssl_certificate_name: wildcard.dev.lcip.org 6 | rds_password: Q)vy7]e9Q8G%9f{K 7 | 8 | content_git_version: ux-dev 9 | -------------------------------------------------------------------------------- /roles/customs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | customs_private_port: 7000 3 | customs_memcached_addr_port: 127.0.0.1:11211 4 | customs_git_repo: https://github.com/mozilla/fxa-customs-server.git 5 | customs_git_version: master 6 | -------------------------------------------------------------------------------- /roles/web/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart nginx 4 | sudo: true 5 | service: name=nginx state=restarted 6 | 7 | - name: reload nginx config 8 | sudo: true 9 | command: nginx -s reload 10 | -------------------------------------------------------------------------------- /roles/mysql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install mysql 4 | sudo: true 5 | yum: name=mysql-server state=present 6 | 7 | - name: start mysql 8 | sudo: true 9 | service: name=mysqld state=started enabled=true 10 | -------------------------------------------------------------------------------- /roles/common/files/hekad.toml: -------------------------------------------------------------------------------- 1 | [hekad] 2 | max_timer_inject = 100 3 | base_dir = "/data/hekad" 4 | 5 | [StatAccumInput] 6 | ticker_interval = 5 7 | emit_in_payload = true 8 | 9 | [StatsdInput] 10 | address = "127.0.0.1:8125" 11 | -------------------------------------------------------------------------------- /roles/memcached/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install memcached 4 | sudo: true 5 | yum: name=memcached state=present 6 | 7 | - name: start memcached 8 | sudo: true 9 | service: name=memcached state=started enabled=true 10 | -------------------------------------------------------------------------------- /aws/environments/dcoates.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: us-west-2 3 | subdomain: dcoates.dev 4 | hosted_zone: lcip.org 5 | ssl_certificate_name: wildcard.dev.lcip.org 6 | rds_password: 3s,hE2;QDw?6)t8e 7 | cron_time: 8 | minute: 5 9 | hour: 10 10 | -------------------------------------------------------------------------------- /aws/environments/nightly.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: us-west-2 3 | subdomain: nightly.dev 4 | hosted_zone: lcip.org 5 | ssl_certificate_name: wildcard.dev.lcip.org 6 | rds_password: QK4L6(8ZXoT$y{2& 7 | cron_time: 8 | minute: 5 9 | hour: 10 10 | -------------------------------------------------------------------------------- /roles/content/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | location / { 2 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 3 | proxy_set_header Host $http_host; 4 | proxy_redirect off; 5 | proxy_pass http://upstream_content_server; 6 | } 7 | -------------------------------------------------------------------------------- /aws/roles/cron_update/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | fxadev_git_repo: https://github.com/dannycoates/fxa-dev.git 3 | fxadev_git_version: master 4 | cron_time: 5 | weekday: '*' 6 | month: '*' 7 | day: '*' 8 | hour: '*' 9 | minute: '*/10' 10 | -------------------------------------------------------------------------------- /roles/log/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install kibana dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/kibana 7 | 8 | - name: build kibana 9 | sudo: true 10 | sudo_user: app 11 | command: grunt build chdir=/data/kibana 12 | -------------------------------------------------------------------------------- /roles/rp/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-rp dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-rp production=true 7 | 8 | - name: restart fxa-rp 9 | sudo: true 10 | supervisorctl: name=fxa-rp state=restarted 11 | -------------------------------------------------------------------------------- /roles/rp/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | location /123done/ { 2 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 3 | proxy_set_header Host $http_host; 4 | proxy_redirect off; 5 | rewrite ^/123done(.*)$ $1 break; 6 | proxy_pass http://upstream_rp; 7 | } 8 | -------------------------------------------------------------------------------- /roles/profile/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | location /profile/ { 2 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 3 | proxy_set_header Host $http_host; 4 | proxy_redirect off; 5 | rewrite ^/profile(.*)$ $1 break; 6 | proxy_pass http://upstream_profile_server; 7 | } 8 | -------------------------------------------------------------------------------- /roles/auth/templates/heka.toml.j2: -------------------------------------------------------------------------------- 1 | [FxaAuth] 2 | type = "LogstreamerInput" 3 | log_directory = "/var/log" 4 | file_match = 'fxa-auth\.log' 5 | decoder = "FxaAuthDecoder" 6 | 7 | [FxaAuthDecoder] 8 | type = "SandboxDecoder" 9 | script_type = "lua" 10 | filename = "lua_decoders/fxa_auth_server.lua" 11 | -------------------------------------------------------------------------------- /roles/common/files/ntp.conf: -------------------------------------------------------------------------------- 1 | 2 | driftfile /var/lib/ntp/drift 3 | 4 | restrict 127.0.0.1 5 | restrict -6 ::1 6 | 7 | server 0.us.pool.ntp.org 8 | server 1.us.pool.ntp.org 9 | server 2.us.pool.ntp.org 10 | server 3.us.pool.ntp.org 11 | 12 | includefile /etc/ntp/crypto/pw 13 | 14 | keys /etc/ntp/keys 15 | -------------------------------------------------------------------------------- /roles/oauth/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-oauth-server dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-oauth-server production=true 7 | 8 | - name: restart fxa-oauth-server 9 | sudo: true 10 | supervisorctl: name=fxa-oauth-server state=restarted 11 | -------------------------------------------------------------------------------- /roles/customs/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "logLevel": "info", 3 | "port": {{ customs_private_port }}, 4 | "memcached": "{{ customs_memcached_addr_port }}", 5 | "recordLifetimeSeconds": 900, 6 | "blockIntervalSeconds": 900, 7 | "maxEmails": 3, 8 | "maxBadLogins": 10 9 | } 10 | -------------------------------------------------------------------------------- /roles/authdb/templates/heka.toml.j2: -------------------------------------------------------------------------------- 1 | [FxaAuthDB] 2 | type = "LogstreamerInput" 3 | log_directory = "/var/log" 4 | file_match = 'fxa-auth-db\.log' 5 | decoder = "FxaAuthDBDecoder" 6 | 7 | [FxaAuthDBDecoder] 8 | type = "SandboxDecoder" 9 | script_type = "lua" 10 | filename = "lua_decoders/fxa_auth_db_server.lua" 11 | -------------------------------------------------------------------------------- /roles/customs/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-customs-server dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-customs-server production=true 7 | 8 | - name: restart fxa-customs-server 9 | sudo: true 10 | supervisorctl: name=fxa-customs-server state=restarted 11 | -------------------------------------------------------------------------------- /roles/profile/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-profile-server dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-profile-server production=true 7 | 8 | - name: restart fxa-profile-server 9 | sudo: true 10 | supervisorctl: name=fxa-profile-server state=restarted 11 | -------------------------------------------------------------------------------- /roles/content/templates/heka.toml.j2: -------------------------------------------------------------------------------- 1 | [FxaContent] 2 | type = "LogstreamerInput" 3 | log_directory = "/var/log" 4 | file_match = 'fxa-content\.err' 5 | decoder = "FxaContentDecoder" 6 | 7 | [FxaContentDecoder] 8 | type = "SandboxDecoder" 9 | script_type = "lua" 10 | filename = "lua_decoders/fxa_auth_server.lua" 11 | 12 | -------------------------------------------------------------------------------- /roles/common/files/hekad.conf: -------------------------------------------------------------------------------- 1 | [program:hekad] 2 | command=/usr/bin/hekad -config=/etc/heka.d 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=/var/log/hekad_err.log 9 | stderr_logfile=/var/log/hekad_out.log 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | -------------------------------------------------------------------------------- /roles/log/files/elasticsearch.conf: -------------------------------------------------------------------------------- 1 | [program:elasticsearch] 2 | command=/usr/share/elasticsearch/bin/elasticsearch 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=/var/log/es_out.log 9 | stderr_logfile=/var/log/es_err.log 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | -------------------------------------------------------------------------------- /aws/environments/marketplace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: us-west-2 3 | subdomain: marketplace.dev 4 | hosted_zone: lcip.org 5 | ssl_certificate_name: wildcard.dev.lcip.org 6 | rds_password: r44?%Wuj8y$BJ,B3 7 | cron_time: 8 | minute: 0 9 | hour: 0 10 | month: 1 11 | day: 1 12 | 13 | auth_public_url: https://api.accounts.firefox.com 14 | browserid_issuer: api.accounts.firefox.com 15 | -------------------------------------------------------------------------------- /roles/customs/files/fxa-customs-server.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-customs-server] 2 | command=node /data/fxa-customs-server/bin/customs_server.js 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=NONE 9 | stderr_logfile=/var/log/fxa-customs.log 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | user=app 13 | -------------------------------------------------------------------------------- /roles/rp/templates/upstream.conf.j2: -------------------------------------------------------------------------------- 1 | upstream upstream_rp { 2 | server 127.0.0.1:{{ rp_private_port }}; 3 | } 4 | 5 | server { 6 | server_name {{ rp_domain_name }}; 7 | 8 | location / { 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | proxy_set_header Host $http_host; 11 | proxy_redirect off; 12 | proxy_pass http://upstream_rp; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /roles/authdb/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | authdb_private_port: 8000 3 | authdb_git_repo: https://github.com/mozilla/fxa-auth-db-server.git 4 | authdb_git_version: master 5 | authdb_patch_level: 3 6 | authdb_primary_host: 127.0.0.1 7 | authdb_primary_user: root 8 | authdb_primary_password: foobarbaz 9 | authdb_replica_host: 127.0.0.1 10 | authdb_replica_user: root 11 | authdb_replica_password: foobarbaz 12 | -------------------------------------------------------------------------------- /roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart ntp 4 | sudo: true 5 | service: name=ntpd state=restarted 6 | 7 | - name: restart supervisor 8 | sudo: true 9 | service: name=supervisor state=restarted 10 | 11 | - name: update supervisor 12 | sudo: true 13 | command: supervisorctl update 14 | 15 | - name: restart heka 16 | sudo: true 17 | supervisorctl: name=hekad state=restarted 18 | -------------------------------------------------------------------------------- /roles/auth/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-auth-server dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-auth-server 7 | 8 | - name: gen dev key-pair 9 | sudo: true 10 | sudo_user: app 11 | command: node /data/fxa-auth-server/scripts/gen_keys.js 12 | 13 | - name: restart fxa-auth-server 14 | sudo: true 15 | supervisorctl: name=fxa-auth-server state=restarted 16 | 17 | -------------------------------------------------------------------------------- /roles/oauth/templates/upstream.conf.j2: -------------------------------------------------------------------------------- 1 | upstream upstream_oauth_server { 2 | server 127.0.0.1:{{ oauth_private_port }}; 3 | } 4 | 5 | server { 6 | server_name {{ oauth_domain_name }}; 7 | 8 | location / { 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | proxy_set_header Host $http_host; 11 | proxy_redirect off; 12 | proxy_pass http://upstream_oauth_server; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /roles/rp/files/fxa-rp.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-rp] 2 | command=node /data/fxa-rp/server.js 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=/var/log/fxa-rp.log 9 | stderr_logfile=/var/log/fxa-rp.err 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | environment=CONFIG_FILES="/data/fxa-rp/config.json,/data/fxa-rp/local.json" 13 | user=app 14 | -------------------------------------------------------------------------------- /roles/auth/files/fxa-auth-server.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-auth-server] 2 | command=node /data/fxa-auth-server/bin/key_server.js 3 | directory=/data/fxa-auth-server 4 | autostart=true 5 | autorestart=unexpected 6 | startsecs=1 7 | startretries=3 8 | stopwaitsecs=3 9 | stdout_logfile=NONE 10 | stderr_logfile=/var/log/fxa-auth.log 11 | stderr_logfile_maxbytes=10MB 12 | stderr_logfile_backups=10 13 | user=app 14 | environment=NODE_ENV="stage" 15 | -------------------------------------------------------------------------------- /roles/authdb/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-auth-db-server dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-auth-db-server production=true 7 | 8 | - name: run db patcher 9 | sudo: true 10 | sudo_user: app 11 | command: node bin/db_patcher.js chdir=/data/fxa-auth-db-server 12 | 13 | - name: restart fxa-auth-db-server 14 | sudo: true 15 | supervisorctl: name=fxa-auth-db-server state=restarted 16 | -------------------------------------------------------------------------------- /roles/email/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "port": {{ auth_mailer_port }}, 3 | "logLevel": "info", 4 | "locales": ["en_US", "de"], 5 | "mail": { 6 | "host": "{{ auth_mailer_smtp_host }}", 7 | "port": {{ auth_mailer_smtp_port }}, 8 | "secure": false, 9 | "sender": "{{ auth_mailer_sender }}", 10 | "verificationUrl": "{{ auth_mailer_verify_url }}", 11 | "passwordResetUrl": "{{ auth_mailer_recovery_url }}" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /roles/rp/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "client_id": "dcdb5ae7add825d2", 3 | "client_secret": "b93ef8a8f3e553a430d7e5b904c6132b2722633af9f03128029201d24a97f2a8", 4 | "redirect_uri": "{{ rp_public_url }}/api/oauth", 5 | "auth_uri": "{{ oauth_public_url }}/v1/authorization", 6 | "oauth_uri": "{{ oauth_public_url }}/v1", 7 | "profile_uri": "{{ profile_public_url }}/v1", 8 | "scopes": "profile", 9 | "port": {{ rp_private_port }} 10 | } 11 | -------------------------------------------------------------------------------- /roles/auth/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | location /auth/ { 2 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 3 | proxy_set_header Host $http_host; 4 | proxy_redirect off; 5 | proxy_pass http://upstream_auth_server; 6 | } 7 | 8 | location /.well-known/ { 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | proxy_set_header Host $http_host; 11 | proxy_redirect off; 12 | proxy_pass http://upstream_auth_server; 13 | } 14 | -------------------------------------------------------------------------------- /roles/content/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-content-server dependencies 4 | sudo: true 5 | sudo_user: app 6 | npm: path=/data/fxa-content-server production=true 7 | 8 | - name: build fxa-content-server assets 9 | sudo: true 10 | sudo_user: app 11 | command: grunt build chdir=/data/fxa-content-server 12 | 13 | - name: restart fxa-content-server 14 | sudo: true 15 | supervisorctl: name=fxa-content-server state=restarted 16 | -------------------------------------------------------------------------------- /roles/profile/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | public_protocol: https 3 | private_protocol: http 4 | domain_name: "{{ subdomain }}.{{ hosted_zone }}" 5 | profile_private_port: 9011 6 | profile_git_repo: https://github.com/mozilla/fxa-profile-server.git 7 | profile_git_version: master 8 | profile_public_url: "{{ public_protocol }}://{{ domain_name }}/profile" 9 | oauth_domain_name: "oauth-{{ domain_name }}" 10 | oauth_public_url: "{{ public_protocol }}://{{ oauth_domain_name }}" 11 | -------------------------------------------------------------------------------- /roles/profile/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "logging": { 3 | "handlers": { 4 | "console": { 5 | "formatter": "pretty_with_time" 6 | } 7 | }, 8 | "loggers": { 9 | "fxa": { 10 | "level": "verbose" 11 | } 12 | } 13 | }, 14 | "publicUrl": "{{ profile_public_url }}", 15 | "oauth": { 16 | "url": "{{ oauth_public_url }}/v1" 17 | }, 18 | "server": { 19 | "host": "127.0.0.1", 20 | "port": 9011 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /vagrant/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: default vmware 2 | 3 | default: 4 | vagrant up --provider=virtualbox 5 | 6 | vmware: 7 | vagrant up --provider=vmware_fusion 8 | 9 | .PHONY: update-code update 10 | 11 | update-code: 12 | ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory --private-key=./insecure_private_key -u vagrant --tags code local.yml 13 | 14 | update: 15 | vagrant provision 16 | 17 | .PHONY: destroy 18 | 19 | destroy: 20 | vagrant destroy -f 21 | -------------------------------------------------------------------------------- /aws/roles/team/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install human tools 4 | sudo: true 5 | yum: name={{ item }} state=present 6 | with_items: 7 | - emacs-nox 8 | - strace 9 | - lsof 10 | - tmux 11 | - htop 12 | 13 | - name: get team public keys 14 | git: repo=https://github.com/mozilla/identity-pubkeys.git 15 | dest=/data/identity-pubkeys 16 | version=05af279f784385bfa4dfda785d7609d1588809d3 17 | force=true 18 | notify: update authorized_keys 19 | -------------------------------------------------------------------------------- /roles/oauth/files/fxa-oauth-server.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-oauth-server] 2 | command=node /data/fxa-oauth-server/bin/server.js 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=/var/log/fxa-oauth.log 9 | stderr_logfile=/var/log/fxa-oauth.err 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | environment=CONFIG_FILES="/data/fxa-oauth-server/config/awsbox.json,/data/fxa-oauth-server/config/local.json" 13 | user=app 14 | -------------------------------------------------------------------------------- /roles/profile/files/fxa-profile-server.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-profile-server] 2 | command=node /data/fxa-profile-server/bin/server.js 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=/var/log/fxa-profile.log 9 | stderr_logfile=/var/log/fxa-profile.err 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | environment=CONFIG_FILES="/data/fxa-profile-server/config/awsbox.json,/data/fxa-profile-server/config/local.json" 13 | user=app 14 | -------------------------------------------------------------------------------- /roles/content/files/fxa-content-server.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-content-server] 2 | command=node /data/fxa-content-server/server/bin/fxa-content-server.js 3 | autostart=true 4 | autorestart=unexpected 5 | startsecs=1 6 | startretries=3 7 | stopwaitsecs=3 8 | stdout_logfile=/var/log/fxa-content.log 9 | stderr_logfile=/var/log/fxa-content.err 10 | stderr_logfile_maxbytes=10MB 11 | stderr_logfile_backups=10 12 | environment=CONFIG_FILES="/data/fxa-content-server/server/config/awsbox.json,/data/fxa-content-server/server/config/local.json" 13 | user=app 14 | -------------------------------------------------------------------------------- /roles/rp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | public_protocol: https 3 | private_protocol: http 4 | domain_name: "{{ subdomain }}.{{ hosted_zone }}" 5 | rp_private_port: 4900 6 | rp_git_repo: https://github.com/mozilla/123done.git 7 | rp_git_version: oauth 8 | rp_domain_name: "123done-{{ domain_name }}" 9 | rp_public_url: "{{ public_protocol }}://{{ rp_domain_name }}" 10 | oauth_domain_name: "oauth-{{ domain_name }}" 11 | oauth_public_url: "{{ public_protocol }}://{{ oauth_domain_name }}" 12 | profile_public_url: "{{ public_protocol }}://{{ domain_name }}/profile" 13 | -------------------------------------------------------------------------------- /roles/content/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | public_protocol: https 3 | private_protocol: http 4 | domain_name: "{{ subdomain }}.{{ hosted_zone }}" 5 | auth_public_url: "{{ public_protocol }}://{{ domain_name }}/auth" 6 | oauth_domain_name: "oauth-{{ domain_name }}" 7 | oauth_public_url: "{{ public_protocol }}://{{ oauth_domain_name }}" 8 | content_public_url: "{{ public_protocol }}://{{ domain_name }}" 9 | content_public_port: 80 10 | content_private_port: 3030 11 | content_git_repo: https://github.com/mozilla/fxa-content-server.git 12 | content_git_version: master 13 | -------------------------------------------------------------------------------- /roles/email/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | public_protocol: https 3 | private_protocol: http 4 | content_public_url: "{{ public_protocol }}://{{ domain_name }}" 5 | auth_mailer_git_repo: https://github.com/dannycoates/fxa-auth-mailer.git 6 | auth_mailer_git_version: master 7 | auth_mailer_port: 1810 8 | auth_mailer_smtp_host: localhost 9 | auth_mailer_smtp_port: 25 10 | auth_mailer_sender: Firefox Accounts 11 | auth_mailer_verify_url: "{{ content_public_url }}/v1/verify_email" 12 | auth_mailer_recovery_url: "{{content_public_url }}/v1/complete_reset_password" 13 | -------------------------------------------------------------------------------- /roles/oauth/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | public_protocol: https 3 | private_protocol: http 4 | domain_name: "{{ subdomain }}.{{ hosted_zone }}" 5 | oauth_private_port: 9010 6 | oauth_db_host: 127.0.0.1 7 | oauth_db_password: 8 | oauth_domain_name: "oauth-{{ domain_name }}" 9 | oauth_git_repo: https://github.com/mozilla/fxa-oauth-server.git 10 | oauth_git_version: master 11 | oauth_public_url: "{{ public_protocol }}://{{ oauth_domain_name }}" 12 | browserid_issuer: "{{ domain_name }}" 13 | rp_domain_name: "123done-{{ domain_name }}" 14 | rp_public_url: "{{ public_protocol }}://{{ rp_domain_name }}" 15 | -------------------------------------------------------------------------------- /roles/email/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: set MTA to postfix 4 | sudo: true 5 | # the alternatives module isn't working 6 | # alternatives: name=mta path=/usr/sbin/sendmail.postfix 7 | command: alternatives --set mta /usr/sbin/sendmail.postfix 8 | 9 | - name: reload postfix 10 | sudo: true 11 | service: name=postfix state=reloaded 12 | 13 | - name: install fxa-auth-mailer dependencies 14 | sudo: true 15 | sudo_user: app 16 | npm: path=/data/fxa-auth-mailer production=true 17 | 18 | - name: restart fxa-auth-mailer 19 | sudo: true 20 | supervisorctl: name=fxa-auth-mailer state=restarted 21 | -------------------------------------------------------------------------------- /aws/local.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure instance 4 | hosts: localhost 5 | connection: local 6 | vars_files: 7 | - "environments/{{ stack_name }}.yml" 8 | vars: 9 | authdb_primary_host: "{{ rds_host }}" 10 | authdb_primary_password: "{{ rds_password }}" 11 | authdb_replica_host: "{{ rds_host }}" 12 | authdb_replica_password: "{{ rds_password }}" 13 | oauth_db_host: "{{ rds_host }}" 14 | oauth_db_password: "{{ rds_password }}" 15 | roles: 16 | - ses 17 | - memcached 18 | - customs 19 | - authdb 20 | - auth 21 | - content 22 | - log 23 | - cron_update 24 | - team 25 | -------------------------------------------------------------------------------- /roles/auth/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | public_protocol: https 3 | private_protocol: http 4 | domain_name: "{{ subdomain }}.{{ hosted_zone }}" 5 | content_public_url: "{{ public_protocol }}://{{ domain_name }}" 6 | authdb_server_url: "{{ private_protocol }}://127.0.0.1:8000" 7 | customs_server_url: "{{ private_protocol }}://127.0.0.1:7000" 8 | auth_mailer_url: "{{ private_protocol }}://127.0.0.1:1810" 9 | auth_private_port: 9000 10 | auth_git_repo: https://github.com/mozilla/fxa-auth-server.git 11 | auth_git_version: master 12 | auth_mail_host: 127.0.0.1 13 | auth_mail_port: 25 14 | auth_mail_sender: "Firefox Accounts " 15 | -------------------------------------------------------------------------------- /roles/web/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install nginx 4 | sudo: true 5 | yum: name=nginx state=present 6 | 7 | - file: path=/etc/nginx/conf.d/upstream state=directory 8 | sudo: true 9 | 10 | - file: path=/etc/nginx/conf.d/location state=directory 11 | sudo: true 12 | 13 | - name: delete nginx default.conf 14 | sudo: true 15 | file: path=/etc/nginx/conf.d/default.conf state=absent 16 | 17 | - name: copy base nginx.conf 18 | sudo: true 19 | template: src=nginx.conf.j2 dest=/etc/nginx/nginx.conf 20 | notify: reload nginx config 21 | 22 | - name: start nginx 23 | sudo: true 24 | service: name=nginx state=started enabled=true 25 | -------------------------------------------------------------------------------- /aws/environments/stable.yml: -------------------------------------------------------------------------------- 1 | --- 2 | region: us-west-2 3 | subdomain: stable.dev 4 | hosted_zone: lcip.org 5 | ssl_certificate_name: wildcard.dev.lcip.org 6 | rds_password: Q&}PzHU79J8Ex}3, 7 | cron_time: 8 | minute: 0 9 | hour: 0 10 | 11 | auth_git_version: train-16 12 | authdb_git_version: d20b2f4f32fd2b0aff23673cde4df2d35ae7ff1c 13 | content_git_version: train-16 14 | customs_git_version: d5f96e7308aae0a803ea140dfa3b0234f9389d86 15 | auth_mailer_git_version: 452434ae0f0c8099d75c2742cfb0e1a9f7aa35d6 16 | oauth_git_version: d41c8bab6a43f555a9ae23ac1349724ec8f71ae4 17 | profile_git_version: a383374a3733bca72353c2b71b7042e2306898d2 18 | rp_git_version: 25ca96e19f6171b12b17370c377f5502a927ed97 19 | -------------------------------------------------------------------------------- /roles/customs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install fxa-customs-server 4 | tags: code 5 | sudo: true 6 | sudo_user: app 7 | git: repo={{ customs_git_repo }} 8 | dest=/data/fxa-customs-server 9 | version={{ customs_git_version }} 10 | force=true 11 | notify: 12 | - install fxa-customs-server dependencies 13 | - restart fxa-customs-server 14 | 15 | - name: supervise fxa-customs-server 16 | sudo: true 17 | copy: src=fxa-customs-server.conf dest=/etc/supervisor.d/fxa-customs-server.conf 18 | notify: update supervisor 19 | 20 | - name: configure fxa-customs-server 21 | sudo: true 22 | sudo_user: app 23 | template: src=config.json.j2 dest=/data/fxa-customs-server/fxa_customsrc 24 | notify: restart fxa-customs-server 25 | 26 | - meta: flush_handlers 27 | -------------------------------------------------------------------------------- /roles/rp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: configure nginx upstream 4 | sudo: true 5 | template: src=upstream.conf.j2 dest=/etc/nginx/conf.d/upstream/http_fxa-rp.conf 6 | notify: reload nginx config 7 | 8 | - name: install fxa-rp 9 | tags: code 10 | sudo: true 11 | sudo_user: app 12 | git: repo={{ rp_git_repo }} 13 | dest=/data/fxa-rp 14 | version={{ rp_git_version }} 15 | force=true 16 | notify: 17 | - install fxa-rp dependencies 18 | - restart fxa-rp 19 | 20 | - name: configure fxa-rp 21 | sudo: true 22 | sudo_user: app 23 | template: src=config.json.j2 dest=/data/fxa-rp/config.json 24 | notify: restart fxa-rp 25 | 26 | - name: supervise fxa-rp 27 | sudo: true 28 | copy: src=fxa-rp.conf dest=/etc/supervisor.d/fxa-rp.conf 29 | notify: update supervisor 30 | 31 | - meta: flush_handlers 32 | -------------------------------------------------------------------------------- /aws/environments/EXAMPLE.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # The following variables should be put into a new yml file in this directory. 4 | # Any variables used by ansible roles may also be set here. 5 | 6 | region: us-west-2 7 | 8 | # The {{ subdomain }}.{{ host_zone }} will be added to route53 9 | subdomain: dcoates.dev 10 | hosted_zone: lcip.org 11 | 12 | # the name of the ssl cert in IAM to use in the ELB 13 | # this must be compatible with your stack_name and hosted_zone 14 | # see http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html 15 | ssl_certificate_name: wildcard.dev.lcip.org 16 | 17 | rds_password: 42{B[3RL(g9ZkE+e 18 | 19 | # how often to auto-update (defaults to every 10 minutes) 20 | # the example below will set it to only update on January 1, 00:00 UTC 21 | cron_time: 22 | minute: 0 23 | hour: 0 24 | day: 1 25 | month: 1 26 | -------------------------------------------------------------------------------- /roles/authdb/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "logLevel": "info", 3 | "port": {{ authdb_private_port }}, 4 | "patchKey": "schema-patch-level", 5 | "patchLevel": {{ authdb_patch_level }}, 6 | "master": { 7 | "user": "{{ authdb_primary_user }}", 8 | "password": "{{ authdb_primary_password }}", 9 | "database": "fxa", 10 | "host": "{{ authdb_primary_host }}", 11 | "port": 3306, 12 | "connectionLimit": 10, 13 | "waitForConnections": true, 14 | "queueLimit": 100 15 | }, 16 | "slave": { 17 | "user": "{{ authdb_replica_user }}", 18 | "password": "{{ authdb_replica_password }}", 19 | "database": "fxa", 20 | "host": "{{ authdb_replica_host }}", 21 | "port": 3306, 22 | "connectionLimit": 10, 23 | "waitForConnections": true, 24 | "queueLimit": 100 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /roles/oauth/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: configure nginx 4 | sudo: true 5 | template: src=upstream.conf.j2 dest=/etc/nginx/conf.d/upstream/http_fxa_oauth.conf 6 | notify: reload nginx config 7 | 8 | - name: install fxa-oauth-server 9 | tags: code 10 | sudo: true 11 | sudo_user: app 12 | git: repo={{ oauth_git_repo }} 13 | dest=/data/fxa-oauth-server 14 | version={{ oauth_git_version }} 15 | force=true 16 | notify: 17 | - install fxa-oauth-server dependencies 18 | - restart fxa-oauth-server 19 | 20 | - name: configure fxa-oauth-server 21 | sudo: true 22 | sudo_user: app 23 | template: src=config.json.j2 dest=/data/fxa-oauth-server/config/local.json 24 | notify: restart fxa-oauth-server 25 | 26 | - name: supervise fxa-oauth-server 27 | sudo: true 28 | copy: src=fxa-oauth-server.conf dest=/etc/supervisor.d/fxa-oauth-server.conf 29 | notify: update supervisor 30 | 31 | - meta: flush_handlers 32 | -------------------------------------------------------------------------------- /roles/auth/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "publicUrl": "{{ auth_public_url }}", 3 | "domain": "{{ domain_name }}", 4 | "db": { 5 | "backend": "httpdb" 6 | }, 7 | "httpdb": { 8 | "url": "{{ authdb_server_url }}" 9 | }, 10 | "secretKeyFile": "/data/fxa-auth-server/config/secret-key.json", 11 | "publicKeyFile": "/data/fxa-auth-server/config/public-key.json", 12 | "customsUrl": "{{ customs_server_url }}", 13 | "contentServer": { 14 | "url": "{{ content_public_url }}" 15 | }, 16 | "templateServer": { 17 | "url": "{{ content_public_url }}" 18 | }, 19 | "smtp":{ 20 | "host": "{{ auth_mail_host }}", 21 | "port": {{ auth_mail_port }}, 22 | "secure": false, 23 | "sender":"{{ auth_mail_sender }}", 24 | "templatePath":"/data/fxa-auth-server/templates/email", 25 | "redirectDomain": "firefox.com" 26 | }, 27 | "listen": { 28 | "port": {{ auth_private_port }} 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /vagrant/local.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: default 4 | remote_user: vagrant 5 | vars: 6 | public_protocol: http 7 | domain_name: fxa.local 8 | roles: 9 | - email 10 | - mysql 11 | - memcached 12 | - authdb 13 | - content 14 | - customs 15 | - auth 16 | - oauth 17 | - profile 18 | - rp 19 | - log 20 | 21 | - hosts: localhost 22 | gather_facts: false 23 | connection: local 24 | tasks: 25 | - name: add entries to /etc/hosts 26 | sudo: true 27 | lineinfile: dest=/etc/hosts regexp="{{ item.regexp }}" line="{{ item.line }}" 28 | with_items: 29 | - { regexp: " fxa.local$", line: '{{ hostvars["default"]["ansible_all_ipv4_addresses"][hostvars["default"]["iface"]] }} fxa.local'} 30 | - { regexp: " 123done-fxa.local$", line: '{{ hostvars["default"]["ansible_all_ipv4_addresses"][hostvars["default"]["iface"]] }} 123done-fxa.local'} 31 | - { regexp: " oauth-fxa.local$", line: '{{ hostvars["default"]["ansible_all_ipv4_addresses"][hostvars["default"]["iface"]] }} oauth-fxa.local'} 32 | -------------------------------------------------------------------------------- /aws/roles/ses/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install postfix main.cf 4 | sudo: true 5 | template: 6 | src=main.cf.j2 7 | dest=/etc/postfix/main.cf 8 | owner=root group=root mode=0644 9 | notify: reload postfix 10 | 11 | - name: get s3 secrets 12 | sudo: true 13 | s3: bucket=net.mozaws.ops.hiera-secrets 14 | object=/app/fxa.dev.yaml 15 | dest=/data/fxa.dev.yml 16 | mode=get 17 | 18 | - shell: 'grep -e "fxa::smtp::user" /data/fxa.dev.yml | sed "s/^fxa::smtp::user: ''\([^'']\+\)''/\1/"' 19 | sudo: true 20 | changed_when: false 21 | register: smtp_user 22 | 23 | 24 | - shell: 'grep -e "fxa::smtp::pass" /data/fxa.dev.yml | sed "s/^fxa::smtp::pass: ''\([^'']\+\)''/\1/"' 25 | sudo: true 26 | changed_when: false 27 | register: smtp_pass 28 | 29 | - name: install postfix sasl_passwd 30 | sudo: true 31 | template: 32 | src=sasl_passwd.j2 33 | dest=/etc/postfix/sasl_passwd 34 | owner=root group=root mode=0600 35 | notify: 36 | - postmap sasl_passwd 37 | - reload postfix 38 | 39 | - meta: flush_handlers 40 | -------------------------------------------------------------------------------- /roles/content/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "fxaccount_url": "{{ auth_public_url }}", 3 | "public_url": "{{ content_public_url }}", 4 | "oauth_url": "{{ oauth_public_url }}", 5 | "env": "production", 6 | "use_https": false, 7 | "static_max_age" : 0, 8 | "i18n": { 9 | "supportedLanguages": ["af", "an", "ar", "as", "ast", "be", "bg", "bn-BD", "bn-IN", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en-GB", "en-US", "en-ZA", "eo", "es", "es-AR", "es-CL", "es-MX", "et", "eu", "fa", "ff", "fi", "fr", "fy", "fy-NL", "ga", "ga-IE", "gd", "gl", "gu", "gu-IN", "he", "hi-IN", "hr", "ht", "hu", "hy-AM", "id", "is", "it", "it-CH", "ja", "kk", "km", "kn", "ko", "ku", "lij", "lt", "lv", "mai", "mk", "ml", "mr", "ms", "nb-NO", "ne-NP", "nl", "nn-NO", "or", "pa", "pa-IN", "pl", "pt", "pt-BR", "pt-PT", "rm", "ro", "ru", "si", "sk", "sl", "son", "sq", "sr", "sr-LATN", "sv", "sv-SE", "ta", "te", "th", "tr", "uk", "ur", "vi", "xh", "zh-CN", "zh-TW", "zu"] 10 | }, 11 | "route_log_format": "dev_fxa", 12 | "static_directory": "dist", 13 | "page_template_subdirectory": "dist", 14 | "metrics" : { 15 | "sample_rate" : 1 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /roles/authdb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create fxa-auth-db-server heka decoder 4 | sudo: true 5 | copy: src=fxa_auth_db_server.lua dest=/usr/share/heka/lua_decoders/fxa_auth_db_server.lua 6 | notify: restart heka 7 | 8 | - name: configure heka 9 | sudo: true 10 | template: src=heka.toml.j2 dest=/etc/heka.d/fxa-auth-db-server.toml 11 | notify: restart heka 12 | 13 | - name: install fxa-auth-db-server 14 | tags: code 15 | sudo: true 16 | sudo_user: app 17 | git: repo={{ authdb_git_repo }} 18 | dest=/data/fxa-auth-db-server 19 | version={{ authdb_git_version }} 20 | force=true 21 | notify: 22 | - install fxa-auth-db-server dependencies 23 | - run db patcher 24 | - restart fxa-auth-db-server 25 | 26 | - name: configure fxa-auth-db-server 27 | sudo: true 28 | sudo_user: app 29 | template: src=config.json.j2 dest=/data/fxa-auth-db-server/.fxa_dbrc 30 | notify: restart fxa-auth-db-server 31 | 32 | - name: supervise fxa-auth-db-server 33 | sudo: true 34 | copy: src=fxa-auth-db-server.conf dest=/etc/supervisor.d/fxa-auth-db-server.conf 35 | notify: update supervisor 36 | 37 | - meta: flush_handlers 38 | -------------------------------------------------------------------------------- /roles/profile/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: configure nginx upstream 4 | sudo: true 5 | template: src=upstream.conf.j2 dest=/etc/nginx/conf.d/upstream/http_fxa_profile.conf 6 | notify: reload nginx config 7 | 8 | - name: configure nginx location 9 | sudo: true 10 | template: src=nginx.conf.j2 dest=/etc/nginx/conf.d/location/http_fxa_profile.conf 11 | notify: reload nginx config 12 | 13 | - name: install fxa-profile-server 14 | tags: code 15 | sudo: true 16 | sudo_user: app 17 | git: repo={{ profile_git_repo }} 18 | dest=/data/fxa-profile-server 19 | version={{ profile_git_version }} 20 | force=true 21 | notify: 22 | - install fxa-profile-server dependencies 23 | - restart fxa-profile-server 24 | 25 | - name: configure fxa-profile-server 26 | sudo: true 27 | sudo_user: app 28 | template: src=config.json.j2 dest=/data/fxa-profile-server/config/local.json 29 | notify: restart fxa-profile-server 30 | 31 | - name: supervise fxa-profile-server 32 | sudo: true 33 | copy: src=fxa-profile-server.conf dest=/etc/supervisor.d/fxa-profile-server.conf 34 | notify: update supervisor 35 | 36 | - meta: flush_handlers 37 | -------------------------------------------------------------------------------- /roles/email/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: no sendmail 4 | sudo: true 5 | yum: name=sendmail state=absent 6 | 7 | - name: install postfix 8 | sudo: true 9 | yum: name=postfix state=present 10 | notify: set MTA to postfix 11 | 12 | - name: start postfix 13 | sudo: true 14 | service: name=postfix state=started enabled=true 15 | 16 | # fedora 20 doesn't include rsyslog by default 17 | - name: install rsyslog 18 | sudo: true 19 | yum: name=rsyslog state=present 20 | 21 | - name: start rsyslog 22 | sudo: true 23 | service: name=rsyslog state=started enabled=true 24 | 25 | - name: install fxa-auth-mailer 26 | tags: code 27 | sudo: true 28 | sudo_user: app 29 | git: repo={{ auth_mailer_git_repo }} 30 | dest=/data/fxa-auth-mailer 31 | version={{ auth_mailer_git_version}} 32 | force=true 33 | notify: 34 | - install fxa-auth-mailer dependencies 35 | - restart fxa-auth-mailer 36 | 37 | - name: configure fxa-auth-mailer 38 | sudo: true 39 | sudo_user: app 40 | template: src=config.json.j2 dest=/data/fxa-auth-mailer/fxa_auth_mailerrc 41 | notify: restart fxa-auth-mailer 42 | 43 | - name: supervise fxa-auth-mailer 44 | sudo: true 45 | copy: src=fxa-auth-mailer.conf dest=/etc/supervisor.d/fxa-auth-mailer.conf 46 | notify: update supervisor 47 | 48 | - meta: flush_handlers 49 | -------------------------------------------------------------------------------- /roles/log/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | location /heka { 2 | rewrite ^/heka(.*)$ $1 break; 3 | proxy_pass http://127.0.0.1:4352; 4 | proxy_read_timeout 90; 5 | } 6 | 7 | location /logs { 8 | alias /data/kibana/dist/; 9 | index index.html index.htm; 10 | } 11 | 12 | location ~ ^/logs/_aliases$ { 13 | rewrite ^/logs(.*)$ $1 break; 14 | proxy_pass http://127.0.0.1:9200; 15 | proxy_read_timeout 90; 16 | } 17 | location ~ ^/logs/.*/_aliases$ { 18 | rewrite ^/logs(.*)$ $1 break; 19 | proxy_pass http://127.0.0.1:9200; 20 | proxy_read_timeout 90; 21 | } 22 | location ~ ^/logs/_nodes$ { 23 | rewrite ^/logs(.*)$ $1 break; 24 | proxy_pass http://127.0.0.1:9200; 25 | proxy_read_timeout 90; 26 | } 27 | location ~ ^/logs/.*/_search$ { 28 | rewrite ^/logs(.*)$ $1 break; 29 | proxy_pass http://127.0.0.1:9200; 30 | proxy_read_timeout 90; 31 | } 32 | location ~ ^/logs/.*/_mapping { 33 | rewrite ^/logs(.*)$ $1 break; 34 | proxy_pass http://127.0.0.1:9200; 35 | proxy_read_timeout 90; 36 | } 37 | 38 | # Password protected end points 39 | location ~ ^/logs/kibana-int/dashboard/.*$ { 40 | rewrite ^/logs(.*)$ $1 break; 41 | proxy_pass http://127.0.0.1:9200; 42 | proxy_read_timeout 90; 43 | } 44 | location ~ ^/logs/kibana-int/temp.*$ { 45 | rewrite ^/logs(.*)$ $1 break; 46 | proxy_pass http://127.0.0.1:9200; 47 | proxy_read_timeout 90; 48 | } 49 | -------------------------------------------------------------------------------- /aws/roles/cron_update/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install cronie 4 | sudo: true 5 | yum: name=cronie state=present 6 | 7 | - name: start crond 8 | sudo: true 9 | service: name=crond state=started enabled=true 10 | 11 | - name: install ansible 12 | sudo: true 13 | pip: name=ansible version=1.6.1 state=present 14 | 15 | - name: install fxa-dev 16 | sudo: true 17 | git: repo={{ fxadev_git_repo }} 18 | dest=/data/fxa-dev 19 | version={{ fxadev_git_version }} 20 | force=true 21 | 22 | - name: disable requiretty in /etc/sudoers 23 | sudo: true 24 | lineinfile: dest=/etc/sudoers state=absent regexp='Defaults\s+requiretty' validate='visudo -cf %s' 25 | 26 | # TODO make the permissions non-ec2 specific 27 | - file: path=/var/log/ansible state=directory owner=ec2-user group=ec2-user 28 | sudo: true 29 | 30 | # TODO the job should be a shell script that can try to recover from errors 31 | - name: cron update 32 | cron: name="fxa update" 33 | weekday={{ cron_time.weekday | default('*') }} 34 | month={{ cron_time.month | default('*') }} 35 | day={{ cron_time.day | default('*') }} 36 | hour={{ cron_time.hour | default('*') }} 37 | minute={{ cron_time.minute | default('*/10') }} 38 | job="cd /data/fxa-dev/aws; ansible-playbook -i localhost, local.yml --extra-vars \"stack_name={{ stack_name }} rds_host={{ rds_host }}\" > /var/log/ansible/update.log" 39 | -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | VAGRANTFILE_API_VERSION = "2" 5 | 6 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 7 | config.vm.box = "dannycoates/fedora20" 8 | config.vm.synced_folder ".", "/vagrant", disabled: true 9 | config.vm.hostname = 'fxa.local' 10 | 11 | config.vm.provider "virtualbox" do |vb, override| 12 | vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 13 | vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 14 | override.vm.network "private_network", type: "dhcp" 15 | vb.memory = 2048 16 | vb.cpus = 2 17 | override.vm.provision "ansible" do |ansible| 18 | ansible.playbook = "local.yml" 19 | ansible.verbose = 'vv' 20 | ansible.limit = 'all' 21 | ansible.ask_sudo_pass = true 22 | ansible.extra_vars = { 23 | host_ip: "{{ ansible_enp0s8.ipv4.address }}", 24 | iface: 1 25 | } 26 | end 27 | end 28 | 29 | config.vm.provider "vmware_fusion" do |vw, override| 30 | vw.vmx["memsize"] = "2048" 31 | vw.vmx["numvcpus"] = "2" 32 | override.vm.provision "ansible" do |ansible| 33 | ansible.playbook = "local.yml" 34 | ansible.verbose = 'vv' 35 | ansible.limit = 'all' 36 | ansible.ask_sudo_pass = true 37 | ansible.extra_vars = { 38 | host_ip: "{{ ansible_default_ipv4.address }}", 39 | iface: 0 40 | } 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /roles/log/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create heka filters 4 | sudo: true 5 | copy: src=fxa_auth_memory.lua dest=/usr/share/heka/lua_filters/fxa_auth_memory.lua 6 | notify: restart heka 7 | 8 | - name: install java 9 | sudo: true 10 | yum: name=java-1.7.0-openjdk state=present 11 | 12 | - name: install elasticsearch 13 | sudo: true 14 | yum: name=https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.1.1.noarch.rpm state=present 15 | 16 | - name: supervise elasticsearch 17 | sudo: true 18 | copy: src=elasticsearch.conf dest=/etc/supervisor.d/elasticsearch.conf 19 | notify: update supervisor 20 | 21 | - name: install grunt 22 | sudo: true 23 | npm: name=grunt-cli global=yes state=present 24 | 25 | - name: install kibana 26 | sudo: true 27 | sudo_user: app 28 | git: repo=https://github.com/elasticsearch/kibana.git 29 | dest=/data/kibana 30 | version=master 31 | force=true 32 | notify: 33 | - install kibana dependencies 34 | - build kibana 35 | 36 | - meta: flush_handlers 37 | 38 | - name: configure kibana 39 | sudo: true 40 | sudo_user: app 41 | template: src=kibana_config.js.j2 dest=/data/kibana/dist/config.js 42 | 43 | - name: configure nginx location 44 | sudo: true 45 | template: src=nginx.conf.j2 dest=/etc/nginx/conf.d/location/kibana.conf 46 | notify: reload nginx config 47 | 48 | - name: configure heka aggregator 49 | sudo: true 50 | template: src=heka.toml.j2 dest=/etc/heka.d/aggregator.toml 51 | notify: restart heka 52 | 53 | - meta: flush_handlers 54 | -------------------------------------------------------------------------------- /roles/auth/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install gmp-devel 4 | sudo: true 5 | yum: name=gmp-devel state=present 6 | 7 | - name: configure nginx upstream 8 | sudo: true 9 | template: src=upstream.conf.j2 dest=/etc/nginx/conf.d/upstream/http_fxa_auth.conf 10 | notify: reload nginx config 11 | 12 | - name: configure nginx location 13 | sudo: true 14 | template: src=nginx.conf.j2 dest=/etc/nginx/conf.d/location/http_fxa_auth.conf 15 | notify: reload nginx config 16 | 17 | - name: create fxa-auth-server heka decoder 18 | sudo: true 19 | copy: src=fxa_auth_server.lua dest=/usr/share/heka/lua_decoders/fxa_auth_server.lua 20 | notify: restart heka 21 | 22 | - name: configure heka 23 | sudo: true 24 | template: src=heka.toml.j2 dest=/etc/heka.d/fxa-auth-server.toml 25 | notify: restart heka 26 | 27 | - name: install fxa-auth-server 28 | tags: code 29 | sudo: true 30 | sudo_user: app 31 | git: repo={{ auth_git_repo }} 32 | dest=/data/fxa-auth-server 33 | version={{ auth_git_version }} 34 | force=true 35 | notify: 36 | - install fxa-auth-server dependencies 37 | - gen dev key-pair 38 | - restart fxa-auth-server 39 | 40 | - name: configure fxa-auth-server 41 | sudo: true 42 | sudo_user: app 43 | template: src=config.json.j2 dest=/data/fxa-auth-server/config/stage.json 44 | notify: restart fxa-auth-server 45 | 46 | - name: supervise fxa-auth-server 47 | sudo: true 48 | copy: src=fxa-auth-server.conf dest=/etc/supervisor.d/fxa-auth-server.conf 49 | notify: update supervisor 50 | 51 | - meta: flush_handlers 52 | -------------------------------------------------------------------------------- /roles/content/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install gmp-devel 4 | sudo: true 5 | yum: name=gmp-devel state=present 6 | 7 | - name: configure nginx upstream 8 | sudo: true 9 | template: src=upstream.conf.j2 dest=/etc/nginx/conf.d/upstream/http_fxa_content.conf 10 | notify: reload nginx config 11 | 12 | - name: configure nginx location 13 | sudo: true 14 | template: src=nginx.conf.j2 dest=/etc/nginx/conf.d/location/http_fxa_content.conf 15 | notify: reload nginx config 16 | 17 | - name: configure heka 18 | sudo: true 19 | template: src=heka.toml.j2 dest=/etc/heka.d/fxa-content-server.toml 20 | notify: restart heka 21 | 22 | - name: install nonsense 23 | sudo: true 24 | npm: name={{ item }} global=yes state=present 25 | with_items: 26 | - bower 27 | - grunt-cli 28 | - phantomjs 29 | 30 | - name: install fxa-content-server 31 | tags: code 32 | sudo: true 33 | sudo_user: app 34 | git: repo={{ content_git_repo }} 35 | dest=/data/fxa-content-server 36 | version={{ content_git_version }} 37 | force=true 38 | notify: 39 | - install fxa-content-server dependencies 40 | - build fxa-content-server assets 41 | - restart fxa-content-server 42 | 43 | - name: configure fxa-content-server 44 | sudo: true 45 | sudo_user: app 46 | template: src=config.json.j2 dest=/data/fxa-content-server/server/config/local.json 47 | notify: restart fxa-content-server 48 | 49 | - name: supervise fxa-content-server 50 | sudo: true 51 | copy: src=fxa-content-server.conf dest=/etc/supervisor.d/fxa-content-server.conf 52 | notify: update supervisor 53 | 54 | - meta: flush_handlers 55 | -------------------------------------------------------------------------------- /vagrant/insecure_private_key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI 3 | w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP 4 | kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2 5 | hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO 6 | Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW 7 | yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd 8 | ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1 9 | Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf 10 | TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK 11 | iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A 12 | sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf 13 | 4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP 14 | cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk 15 | EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN 16 | CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX 17 | 3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG 18 | YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj 19 | 3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+ 20 | dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz 21 | 6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC 22 | P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF 23 | llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ 24 | kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH 25 | +vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ 26 | NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /roles/auth/files/fxa_auth_server.lua: -------------------------------------------------------------------------------- 1 | -- This Source Code Form is subject to the terms of the Mozilla Public 2 | -- License, v. 2.0. If a copy of the MPL was not distributed with this 3 | -- file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | require "cjson" 6 | require "lpeg" 7 | require "string" 8 | local clf = require "common_log_format" 9 | local dt = require "date_time" 10 | local util = require "util" 11 | 12 | local msg = { 13 | Timestamp = nil, 14 | Type = nil, 15 | Hostname = nil, 16 | Pid = nil, 17 | EnvVersion = nil, 18 | Fields = nil 19 | } 20 | 21 | function process_message() 22 | json = cjson.decode(read_message("Payload")) 23 | if not json then return -1 end 24 | 25 | local ts = lpeg.match(dt.rfc3339, json.time) 26 | if not ts then return -1 end 27 | 28 | msg.Timestamp = dt.time_to_ns(ts) 29 | json.time = nil 30 | 31 | if json.op then 32 | msg.Type = json.op 33 | json.op = nil 34 | else 35 | msg.Type = "unknown" 36 | end 37 | 38 | msg.Hostname = json.hostname 39 | json.hostname = nil 40 | 41 | msg.Pid = json.pid 42 | json.pid = nil 43 | 44 | msg.EnvVersion = json.v 45 | json.v = nil 46 | 47 | if json.lang then 48 | json.lang = string.match(json.lang:lower(), "^%a%a") 49 | end 50 | 51 | if json.agent then 52 | json.user_agent_browser, 53 | json.user_agent_version, 54 | json.user_agent_os = clf.normalize_user_agent(json.agent) 55 | json.agent = nil 56 | end 57 | 58 | if json.err then 59 | util.table_to_fields(json.err, json, "err") 60 | json.err = nil 61 | end 62 | 63 | msg.Fields = json 64 | if not pcall(inject_message, msg) then return -1 end 65 | 66 | return 0 67 | end 68 | -------------------------------------------------------------------------------- /roles/authdb/files/fxa_auth_db_server.lua: -------------------------------------------------------------------------------- 1 | -- This Source Code Form is subject to the terms of the Mozilla Public 2 | -- License, v. 2.0. If a copy of the MPL was not distributed with this 3 | -- file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | require "cjson" 6 | require "lpeg" 7 | require "string" 8 | local clf = require "common_log_format" 9 | local dt = require "date_time" 10 | local util = require "util" 11 | 12 | local msg = { 13 | Timestamp = nil, 14 | Type = nil, 15 | Hostname = nil, 16 | Pid = nil, 17 | EnvVersion = nil, 18 | Fields = nil 19 | } 20 | 21 | function process_message() 22 | json = cjson.decode(read_message("Payload")) 23 | if not json then return -1 end 24 | 25 | local ts = lpeg.match(dt.rfc3339, json.time) 26 | if not ts then return -1 end 27 | 28 | msg.Timestamp = dt.time_to_ns(ts) 29 | json.time = nil 30 | 31 | if json.op then 32 | msg.Type = json.op 33 | json.op = nil 34 | else 35 | msg.Type = "unknown" 36 | end 37 | 38 | msg.Hostname = json.hostname 39 | json.hostname = nil 40 | 41 | msg.Pid = json.pid 42 | json.pid = nil 43 | 44 | msg.EnvVersion = json.v 45 | json.v = nil 46 | 47 | if json.lang then 48 | json.lang = string.match(json.lang:lower(), "^%a%a") 49 | end 50 | 51 | if json.agent then 52 | json.user_agent_browser, 53 | json.user_agent_version, 54 | json.user_agent_os = clf.normalize_user_agent(json.agent) 55 | json.agent = nil 56 | end 57 | 58 | if json.err then 59 | util.table_to_fields(json.err, json, "err") 60 | json.err = nil 61 | end 62 | 63 | msg.Fields = json 64 | if not pcall(inject_message, msg) then return -1 end 65 | 66 | return 0 67 | end 68 | -------------------------------------------------------------------------------- /roles/web/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | # For more information on configuration, see: 2 | # * Official English Documentation: http://nginx.org/en/docs/ 3 | # * Official Russian Documentation: http://nginx.org/ru/docs/ 4 | 5 | user nginx; 6 | worker_processes 1; 7 | 8 | error_log /var/log/nginx/error.log; 9 | #error_log /var/log/nginx/error.log notice; 10 | #error_log /var/log/nginx/error.log info; 11 | 12 | pid /var/run/nginx.pid; 13 | 14 | 15 | events { 16 | worker_connections 1024; 17 | } 18 | 19 | 20 | http { 21 | include /etc/nginx/mime.types; 22 | default_type application/octet-stream; 23 | 24 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 25 | '$status $body_bytes_sent "$http_referer" ' 26 | '"$http_user_agent" "$http_x_forwarded_for"'; 27 | 28 | access_log /var/log/nginx/access.log main; 29 | 30 | sendfile on; 31 | #tcp_nopush on; 32 | 33 | #keepalive_timeout 0; 34 | keepalive_timeout 65; 35 | 36 | #gzip on; 37 | 38 | index index.html index.htm; 39 | 40 | include /etc/nginx/conf.d/upstream/*.conf; 41 | 42 | server { 43 | listen 80; 44 | server_name {{ domain_name }}; 45 | 46 | #charset koi8-r; 47 | 48 | #access_log /var/log/nginx/host.access.log main; 49 | 50 | include /etc/nginx/conf.d/location/*.conf; 51 | 52 | # redirect server error pages to the static page /40x.html 53 | # 54 | error_page 404 /404.html; 55 | location = /40x.html { 56 | } 57 | 58 | # redirect server error pages to the static page /50x.html 59 | # 60 | error_page 500 502 503 504 /50x.html; 61 | location = /50x.html { 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /aws/dev.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | connection: local 5 | gather_facts: false 6 | vars_files: 7 | - "environments/{{ stack_name }}.yml" 8 | vars: 9 | key_name: "{{ stack_name }}-fxadev" 10 | tasks: 11 | - name: create key pair 12 | local_action: 13 | module: ec2_key 14 | name: "{{ key_name }}" 15 | region: "{{ region }}" 16 | key_material: "{{ item }}" 17 | with_file: ~/.ssh/id_rsa.pub 18 | 19 | - name: create basic stack (this will take a while) 20 | action: cloudformation 21 | stack_name="{{ stack_name }}-fxadev" 22 | state=present 23 | region="{{ region }}" 24 | template=cloudformation/moz-single.json 25 | args: 26 | template_parameters: 27 | KeyName: "{{ key_name }}" 28 | HostedZone: "{{ hosted_zone }}" 29 | Subdomain: "{{ subdomain }}" 30 | SSLCertificateName: "{{ ssl_certificate_name }}" 31 | RDSPassword: "{{ rds_password }}" 32 | register: stack 33 | 34 | - debug: var=stack 35 | 36 | - name: add new host 37 | add_host: hostname={{ stack['stack_outputs']['Instance'] }} groupname=whatevs 38 | 39 | - name: Configure instance 40 | hosts: whatevs 41 | remote_user: ec2-user 42 | gather_facts: true 43 | vars: 44 | rds_host: "{{ hostvars['localhost']['stack']['stack_outputs']['RDSEndpoint'] }}" 45 | authdb_primary_host: "{{ rds_host }}" 46 | authdb_primary_password: "{{ rds_password }}" 47 | authdb_replica_host: "{{ rds_host }}" 48 | authdb_replica_password: "{{ rds_password }}" 49 | oauth_db_host: "{{ rds_host }}" 50 | oauth_db_password: "{{ rds_password }}" 51 | vars_files: 52 | - "environments/{{ stack_name }}.yml" 53 | roles: 54 | - ses 55 | - memcached 56 | - customs 57 | - authdb 58 | - auth 59 | - content 60 | - oauth 61 | - profile 62 | - rp 63 | - log 64 | - cron_update 65 | - team 66 | -------------------------------------------------------------------------------- /roles/log/templates/heka.toml.j2: -------------------------------------------------------------------------------- 1 | [hekad] 2 | max_timer_inject = 100 3 | maxprocs = 1 4 | max_process_duration = 1000000 5 | 6 | [TcpInput] 7 | address = "0.0.0.0:5565" 8 | parser_type = "message.proto" 9 | decoder = "ProtobufDecoder" 10 | 11 | [DashboardOutput] 12 | ticker_interval = 5 13 | 14 | [FxaSandbox] 15 | type = "SandboxManagerFilter" 16 | message_matcher = "Type == 'heka.control.sandbox'" 17 | max_filters = 15 18 | 19 | [AuthElasticSearch] 20 | type = "ElasticSearchOutput" 21 | message_matcher = "Logger == 'FxaAuth' && Type != 'stat' && Type != 'server.nonceFunc'" 22 | index = "authlogs-%{2006-01-02}" 23 | type_name = "authlogline" 24 | server = "http://127.0.0.1:9200" 25 | cluster = "elasticsearch" 26 | esindexfromtimestamp = true 27 | flush_interval = 5000 28 | flush_count = 100 29 | format = "clean" 30 | id = "%{UUID}" 31 | 32 | [AuthDBElasticSearch] 33 | type = "ElasticSearchOutput" 34 | message_matcher = "Logger == 'FxaAuthDB' && Type != 'stat'" 35 | index = "authlogs-%{2006-01-02}" 36 | type_name = "authlogline" 37 | server = "http://127.0.0.1:9200" 38 | cluster = "elasticsearch" 39 | esindexfromtimestamp = true 40 | flush_interval = 5000 41 | flush_count = 100 42 | format = "clean" 43 | id = "%{UUID}" 44 | 45 | [ContentElasticSearch] 46 | type = "ElasticSearchOutput" 47 | message_matcher = "Logger == 'FxaContent'" 48 | index = "authlogs-%{2006-01-02}" 49 | type_name = "authlogline" 50 | server = "http://127.0.0.1:9200" 51 | cluster = "elasticsearch" 52 | esindexfromtimestamp = true 53 | flush_interval = 5000 54 | flush_count = 100 55 | format = "clean" 56 | id = "%{UUID}" 57 | 58 | 59 | 60 | [FxaAuthMemory] 61 | type = "SandboxFilter" 62 | script_type = "lua" 63 | filename = "lua_filters/fxa_auth_memory.lua" 64 | ticker_interval = 60 65 | preserve_data = true 66 | message_matcher = "Logger == 'FxaAuth' && Type == 'stat' && Fields[stat] == 'mem'" 67 | 68 | [FxaAuthMemory.config] 69 | anomaly_config = 'mww("Fxa Auth Server", 1, 30, 10, 0.0001, increasing)' 70 | -------------------------------------------------------------------------------- /roles/email/files/fxa-auth-mailer.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-auth-mailer] 2 | command=node /data/fxa-auth-mailer/bin/server.js 3 | autostart=true ; start at supervisord start (default: true) 4 | autorestart=unexpected ; whether/when to restart (default: unexpected) 5 | startsecs=1 ; number of secs prog must stay running (def. 1) 6 | startretries=3 ; max # of serial start failures (default 3) 7 | stopwaitsecs=2 ; max num secs to wait b4 SIGKILL (default 10) 8 | stdout_logfile=NONE ; stdout log path, NONE for none; default AUTO 9 | stderr_logfile=/var/log/fxa-auth-mailer.log ; stderr log path, NONE for none; default AUTO 10 | stderr_logfile_maxbytes=10MB ; max # logfile bytes b4 rotation (default 50MB) 11 | stderr_logfile_backups=10 ; # of stderr logfile backups (default 10) 12 | user=app 13 | 14 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) 15 | ;numprocs=1 ; number of processes copies to start (def 1) 16 | ;directory=/tmp ; directory to cwd to before exec (def no cwd) 17 | ;umask=022 ; umask for process (default None) 18 | ;priority=999 ; the relative start priority (default 999) 19 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) 20 | ;stopsignal=QUIT ; signal used to kill process (default TERM) 21 | ;stopasgroup=false ; send stop signal to the UNIX process group (default false) 22 | ;killasgroup=false ; SIGKILL the UNIX process group (def false) 23 | ;user=chrism ; setuid to this UNIX account to run the program 24 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false) 25 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 26 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10) 27 | ;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 28 | ;stdout_events_enabled=false ; emit events on stdout writes (default false) 29 | ;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 30 | ;stderr_events_enabled=false ; emit events on stderr writes (default false) 31 | ;environment=A="1",B="2" ; process environment additions (def no adds) 32 | ;serverurl=AUTO ; override serverurl computation (childutils) 33 | -------------------------------------------------------------------------------- /roles/authdb/files/fxa-auth-db-server.conf: -------------------------------------------------------------------------------- 1 | [program:fxa-auth-db-server] 2 | command=node /data/fxa-auth-db-server/bin/db_server.js 3 | autostart=true ; start at supervisord start (default: true) 4 | autorestart=unexpected ; whether/when to restart (default: unexpected) 5 | startsecs=1 ; number of secs prog must stay running (def. 1) 6 | startretries=3 ; max # of serial start failures (default 3) 7 | stopwaitsecs=2 ; max num secs to wait b4 SIGKILL (default 10) 8 | stdout_logfile=NONE ; stdout log path, NONE for none; default AUTO 9 | stderr_logfile=/var/log/fxa-auth-db.log ; stderr log path, NONE for none; default AUTO 10 | stderr_logfile_maxbytes=10MB ; max # logfile bytes b4 rotation (default 50MB) 11 | stderr_logfile_backups=10 ; # of stderr logfile backups (default 10) 12 | user=app 13 | directory=/data/fxa-auth-db-server 14 | 15 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) 16 | ;numprocs=1 ; number of processes copies to start (def 1) 17 | ;directory=/tmp ; directory to cwd to before exec (def no cwd) 18 | ;umask=022 ; umask for process (default None) 19 | ;priority=999 ; the relative start priority (default 999) 20 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) 21 | ;stopsignal=QUIT ; signal used to kill process (default TERM) 22 | ;stopasgroup=false ; send stop signal to the UNIX process group (default false) 23 | ;killasgroup=false ; SIGKILL the UNIX process group (def false) 24 | ;user=chrism ; setuid to this UNIX account to run the program 25 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false) 26 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 27 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10) 28 | ;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 29 | ;stdout_events_enabled=false ; emit events on stdout writes (default false) 30 | ;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 31 | ;stderr_events_enabled=false ; emit events on stderr writes (default false) 32 | ;environment=A="1",B="2" ; process environment additions (def no adds) 33 | ;serverurl=AUTO ; override serverurl computation (childutils) 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Development environment for Firefox Accounts 2 | 3 | ## Prerequisites 4 | 5 | For a local virtual machine environment: 6 | 7 | - [vagrant](http://www.vagrantup.com/downloads.html) >=1.5 8 | - [ansible](http://docs.ansible.com/intro_installation.html) >=1.5 9 | - [virtualbox](https://www.virtualbox.org/wiki/Downloads) or vmware ([fusion](https://www.vmware.com/products/fusion/) or [workstation](http://www.vmware.com/products/workstation)) 10 | 11 | For an AWS environment: 12 | 13 | - [ansible](http://docs.ansible.com/intro_installation.html) >=1.5 14 | - [boto](https://github.com/boto/boto#installation) 15 | 16 | ## Usage 17 | 18 | ### Local 19 | 20 | To run a local virtual machine evironment change directory to `vagrant` 21 | 22 | ```sh 23 | cd vagrant 24 | ``` 25 | 26 | Running `make` or `make vmware` will: 27 | 28 | - build a base vagrant box image 29 | - create a cluster of virtual machines 30 | - provision each machine with a role 31 | - set an entry for `fxa.local` in `/etc/hosts` 32 | 33 | The fxa-content-server should now be accessible from a browser at [http://fxa.local](http://fxa.local) 34 | 35 | To pull code changes from github: 36 | 37 | ```sh 38 | make update-code 39 | ``` 40 | 41 | To update other provisioning changes: 42 | 43 | ```sh 44 | make update 45 | ``` 46 | 47 | ### AWS 48 | 49 | To run on AWS change directory to `aws` 50 | 51 | ```sh 52 | cd aws 53 | ``` 54 | 55 | 1. Set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables 56 | 3. create a `environments/foo.yml` file ('foo' can be anything) 57 | a) see `environments/EXAMPLE.yml` for a base reference 58 | 4. run `make foo` 59 | 60 | To updated the stack just run `make foo` again. 61 | 62 | You can ssh into the EC2 instance with `ssh ec2-user@{{ whatever you configured in foo.yml }}` 63 | 64 | ## Layout Notes 65 | 66 | - fxa sources are in `/data/fxa-*` 67 | - node processes are run by supervisord 68 | - config in `/etc/supervisor.d` 69 | - run `sudo supervisorctl status` for info 70 | - nginx is the web frontend 71 | - config in `/etc/nginx/conf.d` 72 | - node process logs are in `/var/log/fxa-*` 73 | 74 | ## Example urls 75 | 76 | - logs: https://latest.dev.lcip.org/logs/ 77 | - heka: https://latest.dev.lcip.org/heka/ 78 | - content server: https://latest.dev.lcip.org 79 | - auth server: https://latest.dev.lcip.org/auth/ 80 | - oauth server: https://oauth-latest.dev.lcip.org 81 | - demo oauth site: https://123done-latest.dev.lcip.org 82 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create app user 4 | sudo: true 5 | user: name=app state=present 6 | 7 | - name: update installed packages 8 | sudo: true 9 | yum: name=* state=latest 10 | 11 | - name: install base packages 12 | sudo: true 13 | yum: name={{ item }} state=present 14 | with_items: 15 | - gcc-c++ 16 | - git 17 | - ntp 18 | - python-pip 19 | 20 | - name: install nave 21 | sudo: true 22 | get_url: url=https://raw.githubusercontent.com/dannycoates/nave/master/nave.sh 23 | dest=/usr/bin/nave 24 | mode=755 25 | 26 | - name: install node 27 | sudo: true 28 | command: /usr/bin/nave usemain stable 29 | # TODO detect actual changes 30 | changed_when: false 31 | 32 | - name: install supervisord 33 | sudo: true 34 | pip: name=supervisor version=3.0 state=present 35 | 36 | - file: path=/etc/supervisor.d state=directory 37 | sudo: true 38 | 39 | - file: path=/data state=directory owner=app group=app mode=0777 40 | sudo: true 41 | 42 | - name: upstart or systemd? 43 | sudo: true 44 | command: which initctl 45 | failed_when: false 46 | changed_when: false 47 | register: is_upstart 48 | 49 | - name: upstart supervisord 50 | sudo: true 51 | copy: src=supervisor.conf dest=/etc/init/supervisor.conf owner=root group=root 52 | when: is_upstart.rc == 0 53 | 54 | - name: systemd supervisord 55 | sudo: true 56 | copy: src=supervisor.systemd dest=/etc/systemd/system/supervisor.service owner=root group=root 57 | when: is_upstart.rc != 0 58 | 59 | - name: configure supervisord 60 | sudo: true 61 | copy: src=supervisord.conf dest=/etc/supervisord.conf owner=root group=root 62 | notify: restart supervisor 63 | 64 | - name: start supervisord 65 | sudo: true 66 | service: name=supervisor state=started 67 | 68 | - name: install heka 69 | sudo: true 70 | yum: name=https://dl.dropboxusercontent.com/u/3684283/heka-0_6_0-linux-amd64.rpm state=present 71 | 72 | - file: path=/etc/heka.d state=directory 73 | sudo: true 74 | 75 | - name: supervise heka 76 | sudo: true 77 | copy: src=hekad.conf dest=/etc/supervisor.d/hekad.conf 78 | notify: update supervisor 79 | 80 | - name: configure heka 81 | sudo: true 82 | copy: src=hekad.toml dest=/etc/heka.d/!hekad.toml 83 | notify: restart heka 84 | 85 | - name: configure ntp 86 | sudo: true 87 | copy: src=ntp.conf dest=/etc/ntp.conf 88 | notify: restart ntp 89 | 90 | - name: start ntp 91 | sudo: true 92 | service: name=ntpd state=started enabled=true 93 | 94 | - meta: flush_handlers 95 | -------------------------------------------------------------------------------- /roles/log/files/fxa_auth_memory.lua: -------------------------------------------------------------------------------- 1 | -- This Source Code Form is subject to the terms of the Mozilla Public 2 | -- License, v. 2.0. If a copy of the MPL was not distributed with this 3 | -- file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 | 5 | require "circular_buffer" 6 | require "string" 7 | local alert = require "alert" 8 | local annotation = require "annotation" 9 | local anomaly = require "anomaly" 10 | 11 | local static_title = "Fxa Auth Server" 12 | local rows = read_config("rows") or 1440 13 | local sec_per_row = read_config("sec_per_row") or 60 14 | local pid_expiration = (read_config("pid_expiration") or 600) * 1e9 15 | local HEAP_USED = 1 16 | local RSS = 2 17 | 18 | pids = {} 19 | last_update = 0 20 | 21 | function process_message () 22 | local ts = read_message("Timestamp") 23 | local host = read_message("Hostname") 24 | local pid = read_message("Pid") 25 | local hu = read_message("Fields[heapUsed]") 26 | local rss = read_message("Fields[rss]") 27 | 28 | local key = string.format("%s PID:%d", host, pid) 29 | local p = pids[key] 30 | if not p then 31 | p = circular_buffer.new(rows, 2, sec_per_row) 32 | p:set_header(HEAP_USED , "heapUsed" , "B", "max") 33 | p:set_header(RSS , "rss" , "B", "max") 34 | pids[key] = p 35 | end 36 | 37 | if last_update < ts then 38 | last_update = ts 39 | end 40 | 41 | p:set(ts, HEAP_USED , hu) 42 | p:set(ts, RSS , rss) 43 | return 0 44 | end 45 | 46 | function timer_event(ns) 47 | for k, v in pairs(pids) do 48 | if last_update - v:current_time() < pid_expiration then 49 | local title = string.format("%s:%s", static_title, k) 50 | if anomaly_config then 51 | if not alert.throttled(ns) then 52 | local msg, annos = anomaly.detect(ns, static_title, v, anomaly_config) 53 | if msg then 54 | annotation.concat(k, annos) 55 | alert.queue(ns, string.format("%s\n%s", k, msg)) 56 | end 57 | end 58 | output({annotations = annotation.prune(k, ns)}, v) 59 | inject_message("cbuf", title) 60 | else 61 | inject_message(v, title) 62 | end 63 | else 64 | annotation.remove(k) 65 | pids[k] = nil 66 | end 67 | end 68 | alert.send_queue(ns) 69 | end 70 | -------------------------------------------------------------------------------- /roles/log/templates/kibana_config.js.j2: -------------------------------------------------------------------------------- 1 | /** @scratch /configuration/config.js/1 2 | * 3 | * == Configuration 4 | * config.js is where you will find the core Kibana configuration. This file contains parameter that 5 | * must be set before kibana is run for the first time. 6 | */ 7 | define(['settings'], 8 | function (Settings) { 9 | "use strict"; 10 | 11 | /** @scratch /configuration/config.js/2 12 | * 13 | * === Parameters 14 | */ 15 | return new Settings({ 16 | 17 | /** @scratch /configuration/config.js/5 18 | * 19 | * ==== elasticsearch 20 | * 21 | * The URL to your elasticsearch server. You almost certainly don't 22 | * want +http://localhost:9200+ here. Even if Kibana and Elasticsearch are on 23 | * the same host. By default this will attempt to reach ES at the same host you have 24 | * kibana installed on. You probably want to set it to the FQDN of your 25 | * elasticsearch host 26 | * 27 | * Note: this can also be an object if you want to pass options to the http client. For example: 28 | * 29 | * +elasticsearch: {server: "http://localhost:9200", withCredentials: true}+ 30 | * 31 | */ 32 | elasticsearch: "https://"+window.location.hostname+"/logs", 33 | 34 | /** @scratch /configuration/config.js/5 35 | * 36 | * ==== default_route 37 | * 38 | * This is the default landing page when you don't specify a dashboard to load. You can specify 39 | * files, scripts or saved dashboards here. For example, if you had saved a dashboard called 40 | * `WebLogs' to elasticsearch you might use: 41 | * 42 | * default_route: '/dashboard/elasticsearch/WebLogs', 43 | */ 44 | default_route : '/dashboard/file/default.json', 45 | 46 | /** @scratch /configuration/config.js/5 47 | * 48 | * ==== kibana-int 49 | * 50 | * The default ES index to use for storing Kibana specific object 51 | * such as stored dashboards 52 | */ 53 | kibana_index: "kibana-int", 54 | 55 | /** @scratch /configuration/config.js/5 56 | * 57 | * ==== panel_name 58 | * 59 | * An array of panel modules available. Panels will only be loaded when they are defined in the 60 | * dashboard, but this list is used in the "add panel" interface. 61 | */ 62 | panel_names: [ 63 | 'histogram', 64 | 'map', 65 | 'goal', 66 | 'table', 67 | 'filtering', 68 | 'timepicker', 69 | 'text', 70 | 'hits', 71 | 'column', 72 | 'trends', 73 | 'bettermap', 74 | 'query', 75 | 'terms', 76 | 'stats', 77 | 'sparklines' 78 | ] 79 | }); 80 | }); 81 | -------------------------------------------------------------------------------- /roles/oauth/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "contentUrl": "{{ content_public_url }}/oauth/", 3 | "db": { 4 | "driver": "mysql" 5 | }, 6 | "mysql": { 7 | "password": "{{ oauth_db_password }}", 8 | "host": "{{ oauth_db_host }}" 9 | }, 10 | "browserid": { 11 | "issuer": "{{ browserid_issuer }}" 12 | }, 13 | "publicUrl": "{{ oauth_public_url }}", 14 | "clients": [ 15 | { 16 | "id": "dcdb5ae7add825d2", 17 | "secret": "b93ef8a8f3e553a430d7e5b904c6132b2722633af9f03128029201d24a97f2a8", 18 | "name": "123done", 19 | "imageUri": "{{ oauth_public_url }}/img/logo@2x.png", 20 | "redirectUri": "{{ rp_public_url }}/api/oauth", 21 | "whitelisted": true 22 | }, 23 | { 24 | "id": "dcdb5ae7add825d3", 25 | "secret": "b93ef8a8f3e553a430d7e5b904c6132b2722633af9f03128029201d24a97f2a9", 26 | "name": "Loop", 27 | "imageUri": "{{ oauth_public_url }}/img/logo@2x.png", 28 | "redirectUri": "http://localhost:5000/fxaOauth/redirect", 29 | "whitelisted": true 30 | }, 31 | { 32 | "id": "11c73e2d918ae5d9", 33 | "secret": "866751b57bb4f7bb9d090e837839c8fb09ba2ccc501038a6b3e4dd88b801d675", 34 | "name": "Firefox Marketplace DEV", 35 | "imageUri": "{{ oauth_public_url }}/img/logo@2x.png", 36 | "redirectUri": "https://marketplace-dev.allizom.org/fxa/authorize", 37 | "whitelisted": true 38 | }, 39 | { 40 | "id": "56fc6da8d185c8e3", 41 | "secret": "d1a8f0088e565d066c3d9f28587f5875a800e0a1618a4aaeabd00e162ac583a3", 42 | "name": "Firefox Marketplace Dev", 43 | "imageUri": "https://marketplace-dev.mozflare.net/media/img/mkt/logos/128.png", 44 | "redirectUri": "http://127.0.0.1/fxa-authorize", 45 | "whitelisted": true 46 | }, 47 | { 48 | "id": "56fc6da8d185c8e4", 49 | "secret": "d1a8f0088e565d066c3d9f28587f5875a800e0a1618a4aaeabd00e162ac583a4", 50 | "name": "Fireplace Marketplace Dev", 51 | "imageUri": "https://marketplace-dev.mozflare.net/media/img/mkt/logos/128.png", 52 | "redirectUri": "https://127.0.0.1:8080/fxa-authorize", 53 | "whitelisted": true 54 | }, 55 | { 56 | "id": "13a9e472ef33b1b8", 57 | "secret": "d17a43afb0d646dfe1dd6bfacfc5df3eb45f90e0adf86fedd68ffb22310f45f6", 58 | "name": "FMD Local", 59 | "imageUri": "https://marketplace-dev.mozflare.net/media/img/mkt/logos/128.png", 60 | "redirectUri": "http://localhost:8000/oauth", 61 | "whitelisted": true 62 | }, 63 | { 64 | "id": "0fddc2b28f47c2d7", 65 | "secret": "e078c99ff06b920a7436e2fda803bcb0bd44e3186ff5d13b9b0f31a7c2daf078", 66 | "name": "Find My Device Dev", 67 | "imageUri": "https://marketplace-dev.mozflare.net/media/img/mkt/logos/128.png", 68 | "redirectUri": "http://fmd.dev.mozaws.net/oauth", 69 | "whitelisted": true 70 | } 71 | ], 72 | "env": "stage", 73 | "logging": { 74 | "formatters": { 75 | "pretty": { 76 | "format": "[%(date)s] %(name)s.%(levelname)s: %(message)s", 77 | "colorize": false 78 | } 79 | }, 80 | "handlers": { 81 | "console": { 82 | "formatter": "pretty" 83 | } 84 | }, 85 | "loggers": { 86 | "fxa": { 87 | "level": "verbose" 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /aws/cloudformation/basic.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "Single machine Fxa Dev environment", 5 | 6 | "Parameters" : { 7 | "KeyName": { 8 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server", 9 | "Type": "String", 10 | "MinLength": "1", 11 | "MaxLength": "255", 12 | "AllowedPattern" : "[\\x20-\\x7E]*", 13 | "ConstraintDescription" : "can contain only ASCII characters." 14 | }, 15 | "HostedZone" : { 16 | "Type" : "String", 17 | "Description" : "The DNS name of an existing Amazon Route 53 hosted zone" 18 | }, 19 | "Subdomain" : { 20 | "Type" : "String", 21 | "Description" : "subdomain" 22 | } 23 | }, 24 | 25 | "Mappings" : { 26 | "RegionMap" : { 27 | "us-east-1" : { "AMI" : "ami-fb8e9292" }, 28 | "us-west-1" : { "AMI" : "ami-7aba833f" }, 29 | "us-west-2" : { "AMI" : "ami-043a5034" }, 30 | "eu-west-1" : { "AMI" : "ami-2918e35e" }, 31 | "sa-east-1" : { "AMI" : "ami-215dff3c" }, 32 | "ap-southeast-1" : { "AMI" : "ami-b40d5ee6" }, 33 | "ap-southeast-2" : { "AMI" : "ami-3b4bd301" }, 34 | "ap-northeast-1" : { "AMI" : "ami-c9562fc8" } 35 | } 36 | }, 37 | 38 | "Resources" : { 39 | "Ec2Instance" : { 40 | "Type" : "AWS::EC2::Instance", 41 | "Properties" : { 42 | "ImageId" : { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "AMI" ]}, 43 | "KeyName" : { "Ref" : "KeyName" }, 44 | "InstanceType" : "m3.medium", 45 | "SecurityGroups" : [{ "Ref" : "FxaDevSecurityGroup" }] 46 | } 47 | }, 48 | 49 | "FxaDevSecurityGroup" : { 50 | "Type" : "AWS::EC2::SecurityGroup", 51 | "Properties" : { 52 | "GroupDescription" : "Fxa Dev", 53 | "SecurityGroupIngress" : [ 54 | { 55 | "IpProtocol" : "tcp", 56 | "FromPort" : "22", "ToPort" : "22", 57 | "CidrIp" : "0.0.0.0/0" 58 | }, 59 | { 60 | "IpProtocol" : "tcp", 61 | "FromPort" : "80", "ToPort" : "81", 62 | "CidrIp" : "0.0.0.0/0" 63 | }, 64 | { 65 | "IpProtocol" : "tcp", 66 | "FromPort" : "443", "ToPort" : "443", 67 | "CidrIp" : "0.0.0.0/0" 68 | }, 69 | { 70 | "IpProtocol" : "tcp", 71 | "FromPort" : "4352", "ToPort" : "4352", 72 | "CidrIp" : "0.0.0.0/0" 73 | }, 74 | { 75 | "IpProtocol" : "tcp", 76 | "FromPort" : "9199", "ToPort" : "9199", 77 | "CidrIp" : "0.0.0.0/0" 78 | } 79 | ] 80 | } 81 | }, 82 | 83 | "FxaDNSRecord" : { 84 | "Type" : "AWS::Route53::RecordSet", 85 | "Properties" : { 86 | "HostedZoneName" : { "Fn::Join" : [ "", [{"Ref" : "HostedZone"}, "." ]]}, 87 | "Name" : { "Fn::Join" : [ "", [{"Ref" : "Subdomain"}, ".", {"Ref" : "HostedZone"}, "."]]}, 88 | "Type" : "A", 89 | "TTL" : "30", 90 | "ResourceRecords" : [ { "Fn::GetAtt" : [ "Ec2Instance", "PublicIp" ] } ] 91 | } 92 | } 93 | }, 94 | 95 | "Outputs" : { 96 | "Instance" : { 97 | "Value" : { "Fn::GetAtt" : [ "Ec2Instance", "PublicDnsName" ] }, 98 | "Description" : "DNS Name of the newly created EC2 instance" 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /vagrant/ansible.cfg: -------------------------------------------------------------------------------- 1 | # config file for ansible -- http://ansible.com/ 2 | # ============================================== 3 | 4 | # nearly all parameters can be overridden in ansible-playbook 5 | # or with command line flags. ansible will read ANSIBLE_CONFIG, 6 | # ansible.cfg in the current working directory, .ansible.cfg in 7 | # the home directory or /etc/ansible/ansible.cfg, whichever it 8 | # finds first 9 | 10 | [defaults] 11 | 12 | # some basic default values... 13 | 14 | hostfile = /etc/ansible/hosts 15 | library = /usr/share/ansible 16 | remote_tmp = $HOME/.ansible/tmp 17 | pattern = * 18 | forks = 5 19 | poll_interval = 15 20 | sudo_user = root 21 | #ask_sudo_pass = True 22 | #ask_pass = True 23 | transport = smart 24 | remote_port = 22 25 | module_lang = C 26 | 27 | # plays will gather facts by default, which contain information about 28 | # the remote system. 29 | # 30 | # smart - gather by default, but don't regather if already gathered 31 | # implicit - gather by default, turn off with gather_facts: False 32 | # explicit - do not gather by default, must say gather_facts: True 33 | gathering = implicit 34 | 35 | # additional paths to search for roles in, colon separated 36 | roles_path = ../roles 37 | 38 | # uncomment this to disable SSH key host checking 39 | #host_key_checking = False 40 | 41 | # change this for alternative sudo implementations 42 | sudo_exe = sudo 43 | 44 | # what flags to pass to sudo 45 | #sudo_flags = -H 46 | 47 | # SSH timeout 48 | timeout = 10 49 | 50 | # default user to use for playbooks if user is not specified 51 | # (/usr/bin/ansible will use current user as default) 52 | #remote_user = root 53 | 54 | # logging is off by default unless this path is defined 55 | # if so defined, consider logrotate 56 | #log_path = /var/log/ansible.log 57 | 58 | # default module name for /usr/bin/ansible 59 | #module_name = command 60 | 61 | # use this shell for commands executed under sudo 62 | # you may need to change this to bin/bash in rare instances 63 | # if sudo is constrained 64 | #executable = /bin/sh 65 | 66 | # if inventory variables overlap, does the higher precedence one win 67 | # or are hash values merged together? The default is 'replace' but 68 | # this can also be set to 'merge'. 69 | #hash_behaviour = replace 70 | 71 | # list any Jinja2 extensions to enable here: 72 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n 73 | 74 | # if set, always use this private key file for authentication, same as 75 | # if passing --private-key to ansible or ansible-playbook 76 | private_key_file = ./insecure_private_key 77 | 78 | # format of string {{ ansible_managed }} available within Jinja2 79 | # templates indicates to users editing templates files will be replaced. 80 | # replacing {file}, {host} and {uid} and strftime codes with proper values. 81 | ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} 82 | 83 | # by default, ansible-playbook will display "Skipping [host]" if it determines a task 84 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping" 85 | # messages. NOTE: the task header will still be shown regardless of whether or not the 86 | # task is skipped. 87 | #display_skipped_hosts = True 88 | 89 | # by default (as of 1.3), Ansible will raise errors when attempting to dereference 90 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line 91 | # to revert the behavior to pre-1.3. 92 | #error_on_undefined_vars = False 93 | 94 | # by default (as of 1.6), Ansible may display warnings based on the configuration of the 95 | # system running ansible itself. This may include warnings about 3rd party packages or 96 | # other conditions that should be resolved if possible. 97 | # to disable these warnings, set the following value to False: 98 | #system_warnings = True 99 | 100 | # by default (as of 1.4), Ansible may display deprecation warnings for language 101 | # features that should no longer be used and will be removed in future versions. 102 | # to disable these warnings, set the following value to False: 103 | #deprecation_warnings = True 104 | 105 | # set plugin path directories here, separate with colons 106 | action_plugins = /usr/share/ansible_plugins/action_plugins 107 | callback_plugins = /usr/share/ansible_plugins/callback_plugins 108 | connection_plugins = /usr/share/ansible_plugins/connection_plugins 109 | lookup_plugins = /usr/share/ansible_plugins/lookup_plugins 110 | vars_plugins = /usr/share/ansible_plugins/vars_plugins 111 | filter_plugins = /usr/share/ansible_plugins/filter_plugins 112 | 113 | # don't like cows? that's unfortunate. 114 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 115 | #nocows = 1 116 | 117 | # don't like colors either? 118 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 119 | #nocolor = 1 120 | 121 | # the CA certificate path used for validating SSL certs. This path 122 | # should exist on the controlling node, not the target nodes 123 | # common locations: 124 | # RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt 125 | # Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 126 | # Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt 127 | #ca_file_path = 128 | 129 | # the http user-agent string to use when fetching urls. Some web server 130 | # operators block the default urllib user agent as it is frequently used 131 | # by malicious attacks/scripts, so we set it to something unique to 132 | # avoid issues. 133 | #http_user_agent = ansible-agent 134 | 135 | [paramiko_connection] 136 | 137 | # uncomment this line to cause the paramiko connection plugin to not record new host 138 | # keys encountered. Increases performance on new host additions. Setting works independently of the 139 | # host key checking setting above. 140 | #record_host_keys=False 141 | 142 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this 143 | # line to disable this behaviour. 144 | #pty=False 145 | 146 | [ssh_connection] 147 | 148 | # ssh arguments to use 149 | # Leaving off ControlPersist will result in poor performance, so use 150 | # paramiko on older platforms rather than removing it 151 | #ssh_args = -o ControlMaster=auto -o ControlPersist=60s 152 | 153 | # The path to use for the ControlPath sockets. This defaults to 154 | # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with 155 | # very long hostnames or very long path names (caused by long user names or 156 | # deeply nested home directories) this can exceed the character limit on 157 | # file socket names (108 characters for most platforms). In that case, you 158 | # may wish to shorten the string below. 159 | # 160 | # Example: 161 | # control_path = %(directory)s/%%h-%%r 162 | #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r 163 | 164 | # Enabling pipelining reduces the number of SSH operations required to 165 | # execute a module on the remote server. This can result in a significant 166 | # performance improvement when enabled, however when using "sudo:" you must 167 | # first disable 'requiretty' in /etc/sudoers 168 | # 169 | # By default, this option is disabled to preserve compatibility with 170 | # sudoers configurations that have requiretty (the default on many distros). 171 | # 172 | pipelining = True 173 | 174 | # if True, make ansible use scp if the connection type is ssh 175 | # (default is sftp) 176 | #scp_if_ssh = True 177 | 178 | [accelerate] 179 | accelerate_port = 5099 180 | accelerate_timeout = 30 181 | accelerate_connect_timeout = 5.0 182 | 183 | # The daemon timeout is measured in minutes. This time is measured 184 | # from the last activity to the accelerate daemon. 185 | accelerate_daemon_timeout = 30 186 | 187 | # If set to yes, accelerate_multi_key will allow multiple 188 | # private keys to be uploaded to it, though each user must 189 | # have access to the system via SSH to add a new key. The default 190 | # is "no". 191 | #accelerate_multi_key = yes 192 | -------------------------------------------------------------------------------- /aws/ansible.cfg: -------------------------------------------------------------------------------- 1 | # config file for ansible -- http://ansible.com/ 2 | # ============================================== 3 | 4 | # nearly all parameters can be overridden in ansible-playbook 5 | # or with command line flags. ansible will read ANSIBLE_CONFIG, 6 | # ansible.cfg in the current working directory, .ansible.cfg in 7 | # the home directory or /etc/ansible/ansible.cfg, whichever it 8 | # finds first 9 | 10 | [defaults] 11 | 12 | # some basic default values... 13 | 14 | hostfile = /etc/ansible/hosts 15 | library = /usr/share/ansible 16 | remote_tmp = $HOME/.ansible/tmp 17 | pattern = * 18 | forks = 5 19 | poll_interval = 15 20 | sudo_user = root 21 | #ask_sudo_pass = True 22 | #ask_pass = True 23 | transport = smart 24 | remote_port = 22 25 | module_lang = C 26 | 27 | # plays will gather facts by default, which contain information about 28 | # the remote system. 29 | # 30 | # smart - gather by default, but don't regather if already gathered 31 | # implicit - gather by default, turn off with gather_facts: False 32 | # explicit - do not gather by default, must say gather_facts: True 33 | gathering = implicit 34 | 35 | # additional paths to search for roles in, colon separated 36 | roles_path = ../roles 37 | 38 | # uncomment this to disable SSH key host checking 39 | #host_key_checking = False 40 | 41 | # change this for alternative sudo implementations 42 | sudo_exe = sudo 43 | 44 | # what flags to pass to sudo 45 | #sudo_flags = -H 46 | 47 | # SSH timeout 48 | timeout = 10 49 | 50 | # default user to use for playbooks if user is not specified 51 | # (/usr/bin/ansible will use current user as default) 52 | #remote_user = root 53 | 54 | # logging is off by default unless this path is defined 55 | # if so defined, consider logrotate 56 | #log_path = /var/log/ansible.log 57 | 58 | # default module name for /usr/bin/ansible 59 | #module_name = command 60 | 61 | # use this shell for commands executed under sudo 62 | # you may need to change this to bin/bash in rare instances 63 | # if sudo is constrained 64 | #executable = /bin/sh 65 | 66 | # if inventory variables overlap, does the higher precedence one win 67 | # or are hash values merged together? The default is 'replace' but 68 | # this can also be set to 'merge'. 69 | #hash_behaviour = replace 70 | 71 | # list any Jinja2 extensions to enable here: 72 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n 73 | 74 | # if set, always use this private key file for authentication, same as 75 | # if passing --private-key to ansible or ansible-playbook 76 | #private_key_file = ./insecure_private_key 77 | 78 | # format of string {{ ansible_managed }} available within Jinja2 79 | # templates indicates to users editing templates files will be replaced. 80 | # replacing {file}, {host} and {uid} and strftime codes with proper values. 81 | ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} 82 | 83 | # by default, ansible-playbook will display "Skipping [host]" if it determines a task 84 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping" 85 | # messages. NOTE: the task header will still be shown regardless of whether or not the 86 | # task is skipped. 87 | #display_skipped_hosts = True 88 | 89 | # by default (as of 1.3), Ansible will raise errors when attempting to dereference 90 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line 91 | # to revert the behavior to pre-1.3. 92 | #error_on_undefined_vars = False 93 | 94 | # by default (as of 1.6), Ansible may display warnings based on the configuration of the 95 | # system running ansible itself. This may include warnings about 3rd party packages or 96 | # other conditions that should be resolved if possible. 97 | # to disable these warnings, set the following value to False: 98 | #system_warnings = True 99 | 100 | # by default (as of 1.4), Ansible may display deprecation warnings for language 101 | # features that should no longer be used and will be removed in future versions. 102 | # to disable these warnings, set the following value to False: 103 | #deprecation_warnings = True 104 | 105 | # set plugin path directories here, separate with colons 106 | action_plugins = /usr/share/ansible_plugins/action_plugins 107 | callback_plugins = /usr/share/ansible_plugins/callback_plugins 108 | connection_plugins = /usr/share/ansible_plugins/connection_plugins 109 | lookup_plugins = /usr/share/ansible_plugins/lookup_plugins 110 | vars_plugins = /usr/share/ansible_plugins/vars_plugins 111 | filter_plugins = /usr/share/ansible_plugins/filter_plugins 112 | 113 | # don't like cows? that's unfortunate. 114 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 115 | #nocows = 1 116 | 117 | # don't like colors either? 118 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 119 | #nocolor = 1 120 | 121 | # the CA certificate path used for validating SSL certs. This path 122 | # should exist on the controlling node, not the target nodes 123 | # common locations: 124 | # RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt 125 | # Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 126 | # Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt 127 | #ca_file_path = 128 | 129 | # the http user-agent string to use when fetching urls. Some web server 130 | # operators block the default urllib user agent as it is frequently used 131 | # by malicious attacks/scripts, so we set it to something unique to 132 | # avoid issues. 133 | #http_user_agent = ansible-agent 134 | 135 | [paramiko_connection] 136 | 137 | # uncomment this line to cause the paramiko connection plugin to not record new host 138 | # keys encountered. Increases performance on new host additions. Setting works independently of the 139 | # host key checking setting above. 140 | #record_host_keys=False 141 | 142 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this 143 | # line to disable this behaviour. 144 | #pty=False 145 | 146 | [ssh_connection] 147 | 148 | # ssh arguments to use 149 | # Leaving off ControlPersist will result in poor performance, so use 150 | # paramiko on older platforms rather than removing it 151 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no 152 | 153 | # The path to use for the ControlPath sockets. This defaults to 154 | # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with 155 | # very long hostnames or very long path names (caused by long user names or 156 | # deeply nested home directories) this can exceed the character limit on 157 | # file socket names (108 characters for most platforms). In that case, you 158 | # may wish to shorten the string below. 159 | # 160 | # Example: 161 | # control_path = %(directory)s/%%h-%%r 162 | #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r 163 | 164 | # Enabling pipelining reduces the number of SSH operations required to 165 | # execute a module on the remote server. This can result in a significant 166 | # performance improvement when enabled, however when using "sudo:" you must 167 | # first disable 'requiretty' in /etc/sudoers 168 | # 169 | # By default, this option is disabled to preserve compatibility with 170 | # sudoers configurations that have requiretty (the default on many distros). 171 | # 172 | pipelining = True 173 | 174 | # if True, make ansible use scp if the connection type is ssh 175 | # (default is sftp) 176 | #scp_if_ssh = True 177 | 178 | [accelerate] 179 | accelerate_port = 5099 180 | accelerate_timeout = 30 181 | accelerate_connect_timeout = 5.0 182 | 183 | # The daemon timeout is measured in minutes. This time is measured 184 | # from the last activity to the accelerate daemon. 185 | accelerate_daemon_timeout = 30 186 | 187 | # If set to yes, accelerate_multi_key will allow multiple 188 | # private keys to be uploaded to it, though each user must 189 | # have access to the system via SSH to add a new key. The default 190 | # is "no". 191 | #accelerate_multi_key = yes 192 | -------------------------------------------------------------------------------- /roles/common/files/supervisord.conf: -------------------------------------------------------------------------------- 1 | ; supervisor config file. 2 | ; 3 | ; For more information on the config file, please see: 4 | ; http://supervisord.org/configuration.html 5 | ; 6 | ; Note: shell expansion ("~" or "$HOME") is not supported. Environment 7 | ; variables can be expanded using this syntax: "%(ENV_HOME)s". 8 | 9 | [unix_http_server] 10 | file=/tmp/supervisor.sock ; (the path to the socket file) 11 | ;chmod=0700 ; socket file mode (default 0700) 12 | ;chown=nobody:nogroup ; socket file uid:gid owner 13 | ;username=user ; (default is no username (open server)) 14 | ;password=123 ; (default is no password (open server)) 15 | 16 | ;[inet_http_server] ; inet (TCP) server disabled by default 17 | ;port=127.0.0.1:9001 ; (ip_address:port specifier, *:port for all iface) 18 | ;username=user ; (default is no username (open server)) 19 | ;password=123 ; (default is no password (open server)) 20 | 21 | [supervisord] 22 | logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log) 23 | logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) 24 | logfile_backups=10 ; (num of main logfile rotation backups;default 10) 25 | loglevel=info ; (log level;default info; others: debug,warn,trace) 26 | pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) 27 | nodaemon=true ; (start in foreground if true;default false) 28 | minfds=1024 ; (min. avail startup file descriptors;default 1024) 29 | minprocs=200 ; (min. avail process descriptors;default 200) 30 | ;umask=022 ; (process file creation umask;default 022) 31 | ;user=chrism ; (default is current user, required if root) 32 | ;identifier=supervisor ; (supervisord identifier, default is 'supervisor') 33 | ;directory=/tmp ; (default is not to cd during start) 34 | ;nocleanup=true ; (don't clean up tempfiles at start;default false) 35 | ;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP) 36 | ;environment=KEY="value" ; (key value pairs to add to environment) 37 | ;strip_ansi=false ; (strip ansi escape codes in logs; def. false) 38 | 39 | ; the below section must remain in the config file for RPC 40 | ; (supervisorctl/web interface) to work, additional interfaces may be 41 | ; added by defining them in separate rpcinterface: sections 42 | [rpcinterface:supervisor] 43 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 44 | 45 | [supervisorctl] 46 | serverurl=unix:///tmp/supervisor.sock ; use a unix:// URL for a unix socket 47 | ;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket 48 | ;username=chris ; should be same as http_username if set 49 | ;password=123 ; should be same as http_password if set 50 | ;prompt=mysupervisor ; cmd line prompt (default "supervisor") 51 | ;history_file=~/.sc_history ; use readline history if available 52 | 53 | ; The below sample program section shows all possible program subsection values, 54 | ; create one or more 'real' program: sections to be able to control them under 55 | ; supervisor. 56 | 57 | ;[program:theprogramname] 58 | ;command=/bin/cat ; the program (relative uses PATH, can take args) 59 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) 60 | ;numprocs=1 ; number of processes copies to start (def 1) 61 | ;directory=/tmp ; directory to cwd to before exec (def no cwd) 62 | ;umask=022 ; umask for process (default None) 63 | ;priority=999 ; the relative start priority (default 999) 64 | ;autostart=true ; start at supervisord start (default: true) 65 | ;autorestart=unexpected ; whether/when to restart (default: unexpected) 66 | ;startsecs=1 ; number of secs prog must stay running (def. 1) 67 | ;startretries=3 ; max # of serial start failures (default 3) 68 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) 69 | ;stopsignal=QUIT ; signal used to kill process (default TERM) 70 | ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) 71 | ;stopasgroup=false ; send stop signal to the UNIX process group (default false) 72 | ;killasgroup=false ; SIGKILL the UNIX process group (def false) 73 | ;user=chrism ; setuid to this UNIX account to run the program 74 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false) 75 | ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO 76 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 77 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10) 78 | ;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 79 | ;stdout_events_enabled=false ; emit events on stdout writes (default false) 80 | ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO 81 | ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 82 | ;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10) 83 | ;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 84 | ;stderr_events_enabled=false ; emit events on stderr writes (default false) 85 | ;environment=A="1",B="2" ; process environment additions (def no adds) 86 | ;serverurl=AUTO ; override serverurl computation (childutils) 87 | 88 | ; The below sample eventlistener section shows all possible 89 | ; eventlistener subsection values, create one or more 'real' 90 | ; eventlistener: sections to be able to handle event notifications 91 | ; sent by supervisor. 92 | 93 | ;[eventlistener:theeventlistenername] 94 | ;command=/bin/eventlistener ; the program (relative uses PATH, can take args) 95 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) 96 | ;numprocs=1 ; number of processes copies to start (def 1) 97 | ;events=EVENT ; event notif. types to subscribe to (req'd) 98 | ;buffer_size=10 ; event buffer queue size (default 10) 99 | ;directory=/tmp ; directory to cwd to before exec (def no cwd) 100 | ;umask=022 ; umask for process (default None) 101 | ;priority=-1 ; the relative start priority (default -1) 102 | ;autostart=true ; start at supervisord start (default: true) 103 | ;autorestart=unexpected ; whether/when to restart (default: unexpected) 104 | ;startsecs=1 ; number of secs prog must stay running (def. 1) 105 | ;startretries=3 ; max # of serial start failures (default 3) 106 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) 107 | ;stopsignal=QUIT ; signal used to kill process (default TERM) 108 | ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) 109 | ;stopasgroup=false ; send stop signal to the UNIX process group (default false) 110 | ;killasgroup=false ; SIGKILL the UNIX process group (def false) 111 | ;user=chrism ; setuid to this UNIX account to run the program 112 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false) 113 | ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO 114 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 115 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10) 116 | ;stdout_events_enabled=false ; emit events on stdout writes (default false) 117 | ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO 118 | ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 119 | ;stderr_logfile_backups ; # of stderr logfile backups (default 10) 120 | ;stderr_events_enabled=false ; emit events on stderr writes (default false) 121 | ;environment=A="1",B="2" ; process environment additions 122 | ;serverurl=AUTO ; override serverurl computation (childutils) 123 | 124 | ; The below sample group section shows all possible group values, 125 | ; create one or more 'real' group: sections to create "heterogeneous" 126 | ; process groups. 127 | 128 | ;[group:thegroupname] 129 | ;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions 130 | ;priority=999 ; the relative start priority (default 999) 131 | 132 | ; The [include] section can just contain the "files" setting. This 133 | ; setting can list multiple files (separated by whitespace or 134 | ; newlines). It can also contain wildcards. The filenames are 135 | ; interpreted as relative to this file. Included files *cannot* 136 | ; include files themselves. 137 | 138 | [include] 139 | files = supervisor.d/*.conf 140 | -------------------------------------------------------------------------------- /aws/cloudformation/moz-single.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion" : "2010-09-09", 3 | 4 | "Description" : "Single machine Fxa Dev environment", 5 | 6 | "Parameters" : { 7 | "KeyName": { 8 | "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the web server", 9 | "Type": "String", 10 | "MinLength": "1", 11 | "MaxLength": "255", 12 | "AllowedPattern" : "[\\x20-\\x7E]*", 13 | "ConstraintDescription" : "can contain only ASCII characters." 14 | }, 15 | "HostedZone" : { 16 | "Type": "String", 17 | "Description" : "The DNS name of an existing Amazon Route 53 hosted zone" 18 | }, 19 | "Subdomain" : { 20 | "Type": "String", 21 | "Description" : "subdomain" 22 | }, 23 | "SSLCertificateName": { 24 | "Type": "String", 25 | "Description": "SSL Cert name from IAM" 26 | }, 27 | "RDSPassword": { 28 | "Type": "String", 29 | "MinLength": "8", 30 | "Description": "RDS password" 31 | } 32 | }, 33 | 34 | "Mappings" : { 35 | "RegionMap" : { 36 | "us-east-1" : { "AMI" : "ami-fb8e9292" }, 37 | "us-west-1" : { "AMI" : "ami-7aba833f" }, 38 | "us-west-2" : { "AMI" : "ami-043a5034" }, 39 | "eu-west-1" : { "AMI" : "ami-2918e35e" }, 40 | "sa-east-1" : { "AMI" : "ami-215dff3c" }, 41 | "ap-southeast-1" : { "AMI" : "ami-b40d5ee6" }, 42 | "ap-southeast-2" : { "AMI" : "ami-3b4bd301" }, 43 | "ap-northeast-1" : { "AMI" : "ami-c9562fc8" } 44 | } 45 | }, 46 | 47 | "Resources" : { 48 | 49 | "FxaDevSecurityGroup" : { 50 | "Type" : "AWS::EC2::SecurityGroup", 51 | "Properties" : { 52 | "GroupDescription" : "Fxa Dev", 53 | "SecurityGroupIngress" : [ 54 | { 55 | "IpProtocol" : "tcp", 56 | "FromPort" : "22", "ToPort" : "22", 57 | "CidrIp" : "0.0.0.0/0" 58 | }, 59 | { 60 | "IpProtocol" : "tcp", 61 | "FromPort" : "80", "ToPort" : "80", 62 | "CidrIp" : "0.0.0.0/0" 63 | } 64 | ] 65 | } 66 | }, 67 | 68 | "FxaIAMRole": { 69 | "Type": "AWS::IAM::Role", 70 | "Properties": { 71 | "AssumeRolePolicyDocument":{ 72 | "Statement":[ 73 | { 74 | "Effect":"Allow", 75 | "Principal":{ "Service":[ "ec2.amazonaws.com" ] }, 76 | "Action":[ "sts:AssumeRole" ] 77 | } 78 | ] 79 | }, 80 | "Path": "/fxa/", 81 | "Policies": [ 82 | { 83 | "PolicyName":"PuppetConfigDeployIAMRequirements", 84 | "PolicyDocument":{ 85 | "Version":"2012-10-17", 86 | "Statement":[ 87 | { 88 | "Effect":"Allow", 89 | "Action":[ "S3:ListBucket", "S3:GetObject" ], 90 | "Resource": [ 91 | "arn:aws:s3:::net.mozaws.ops.hiera-secrets", 92 | "arn:aws:s3:::net.mozaws.prod.ops.rpmrepo-protected", 93 | "arn:aws:s3:::net.mozaws.ops.rpmrepo-puppet" 94 | ] 95 | }, 96 | { 97 | "Effect":"Allow", 98 | "Action":[ "S3:GetObject" ], 99 | "Resource":[ 100 | "arn:aws:s3:::net.mozaws.ops.rpmrepo-protected/*", 101 | "arn:aws:s3:::net.mozaws.ops.rpmrepo-puppet/*", 102 | "arn:aws:s3:::net.mozaws.ops.hiera-secrets/common.yaml", 103 | "arn:aws:s3:::net.mozaws.ops.hiera-secrets/env/dev.yaml", 104 | "arn:aws:s3:::net.mozaws.ops.hiera-secrets/type/fxa.auth_server.dev.yaml", 105 | "arn:aws:s3:::net.mozaws.ops.hiera-secrets/app/fxa.dev.yaml" 106 | ] 107 | }, 108 | { "Effect":"Allow", "Action": [ "ec2:DescribeTags" ], "Resource": "*" } 109 | ] 110 | } 111 | } 112 | ] 113 | } 114 | }, 115 | 116 | "FxaInstanceProfile": { 117 | "Type": "AWS::IAM::InstanceProfile", 118 | "Properties": { 119 | "Path": "/fxa/", 120 | "Roles": [{ "Ref": "FxaIAMRole" }] 121 | } 122 | }, 123 | 124 | "FxaEc2Instance" : { 125 | "Type" : "AWS::EC2::Instance", 126 | "DependsOn" : ["FxaInstanceProfile", "FxaDevSecurityGroup"], 127 | "Properties" : { 128 | "ImageId" : { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "AMI" ]}, 129 | "KeyName" : { "Ref" : "KeyName" }, 130 | "IamInstanceProfile": { "Ref": "FxaInstanceProfile" }, 131 | "InstanceType" : "m3.medium", 132 | "SecurityGroups" : [{ "Ref" : "FxaDevSecurityGroup" }], 133 | "UserData": { "Fn::Base64": { "Fn::Join": ["", 134 | ["#!/bin/bash -ex","\n", 135 | "echo Defaults:ec2-user \\!requiretty >> /etc/sudoers;", "\n"] 136 | ]}} 137 | } 138 | }, 139 | 140 | "FxaELB":{ 141 | "Type":"AWS::ElasticLoadBalancing::LoadBalancer", 142 | "DependsOn" : "FxaEc2Instance", 143 | "Properties":{ 144 | "AvailabilityZones" : { "Fn::GetAZs" : "" }, 145 | "ConnectionDrainingPolicy":{ "Enabled" : true, "Timeout" : 15 }, 146 | "Listeners":[ 147 | { 148 | "InstancePort":"80", 149 | "LoadBalancerPort":"443", 150 | "PolicyNames" : ["ELBSecurityPolicy-2014-01"], 151 | "Protocol":"HTTPS", 152 | "SSLCertificateId":{ 153 | "Fn::Join":[ "", 154 | [ 155 | "arn:aws:iam::", { "Ref":"AWS::AccountId" }, ":server-certificate/", {"Ref" : "SSLCertificateName"} 156 | ] 157 | ] 158 | } 159 | } 160 | ], 161 | "Instances": [{ "Ref" : "FxaEc2Instance" }] 162 | } 163 | }, 164 | 165 | "FxaRDSSecurityGroup":{ 166 | "Type":"AWS::RDS::DBSecurityGroup", 167 | "DependsOn" : "FxaDevSecurityGroup", 168 | "Properties":{ 169 | "GroupDescription":"SG to Allow access to FXA RDS", 170 | "DBSecurityGroupIngress":{ 171 | "EC2SecurityGroupName": { "Ref":"FxaDevSecurityGroup" } 172 | } 173 | } 174 | }, 175 | 176 | "FxaRDSInstance":{ 177 | "Type":"AWS::RDS::DBInstance", 178 | "DependsOn" : "FxaRDSSecurityGroup", 179 | "Properties":{ 180 | "AllocatedStorage": "10", 181 | "DBInstanceClass": "db.t1.micro", 182 | "DBSecurityGroups":[ { "Ref":"FxaRDSSecurityGroup" } ], 183 | "Engine":"MySQL", 184 | "EngineVersion":"5.6", 185 | "MasterUsername":"root", 186 | "MasterUserPassword":{ "Ref" : "RDSPassword" } 187 | } 188 | }, 189 | 190 | "FxaDNS": { 191 | "Type": "AWS::Route53::RecordSetGroup", 192 | "DependsOn" : [ "FxaEc2Instance", "FxaELB", "FxaRDSInstance" ], 193 | "Properties": { 194 | "HostedZoneName" : { "Fn::Join" : [ "", [{"Ref" : "HostedZone"}, "." ]]}, 195 | "RecordSets": [ 196 | { 197 | "Name" : { "Fn::Join" : [ "", [{"Ref" : "Subdomain"}, ".", {"Ref" : "HostedZone"}, "."]]}, 198 | "Type" : "A", 199 | "AliasTarget" : { 200 | "HostedZoneId" : { "Fn::GetAtt":[ "FxaELB", "CanonicalHostedZoneNameID" ] }, 201 | "DNSName" : { "Fn::GetAtt":[ "FxaELB", "CanonicalHostedZoneName" ] } 202 | } 203 | }, 204 | { 205 | "Name" : { "Fn::Join" : [ "", ["oauth-", {"Ref" : "Subdomain"}, ".", {"Ref" : "HostedZone"}, "."]]}, 206 | "Type" : "A", 207 | "AliasTarget" : { 208 | "HostedZoneId" : { "Fn::GetAtt":[ "FxaELB", "CanonicalHostedZoneNameID" ] }, 209 | "DNSName" : { "Fn::GetAtt":[ "FxaELB", "CanonicalHostedZoneName" ] } 210 | } 211 | }, 212 | { 213 | "Name" : { "Fn::Join" : [ "", ["123done-", {"Ref" : "Subdomain"}, ".", {"Ref" : "HostedZone"}, "."]]}, 214 | "Type" : "A", 215 | "AliasTarget" : { 216 | "HostedZoneId" : { "Fn::GetAtt":[ "FxaELB", "CanonicalHostedZoneNameID" ] }, 217 | "DNSName" : { "Fn::GetAtt":[ "FxaELB", "CanonicalHostedZoneName" ] } 218 | } 219 | }, 220 | { 221 | "Name" : { "Fn::Join" : [ "", ["meta-", {"Ref" : "Subdomain"}, ".", {"Ref" : "HostedZone"}, "."]]}, 222 | "Type": "A", 223 | "TTL": "30", 224 | "ResourceRecords" : [ { "Fn::GetAtt" : [ "FxaEc2Instance", "PublicIp" ] } ] 225 | } 226 | ] 227 | } 228 | } 229 | }, 230 | 231 | "Outputs" : { 232 | "Instance" : { 233 | "Value" : { "Fn::GetAtt" : [ "FxaEc2Instance", "PublicDnsName" ] }, 234 | "Description" : "DNS Name of the newly created EC2 instance" 235 | }, 236 | "RDSEndpoint": { 237 | "Description": "RDS Endpoint", 238 | "Value": {"Fn::GetAtt":[ "FxaRDSInstance", "Endpoint.Address" ]} 239 | } 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /aws/roles/ses/templates/main.cf.j2: -------------------------------------------------------------------------------- 1 | # Global Postfix configuration file. This file lists only a subset 2 | # of all parameters. For the syntax, and for a complete parameter 3 | # list, see the postconf(5) manual page (command: "man 5 postconf"). 4 | # 5 | # For common configuration examples, see BASIC_CONFIGURATION_README 6 | # and STANDARD_CONFIGURATION_README. To find these documents, use 7 | # the command "postconf html_directory readme_directory", or go to 8 | # http://www.postfix.org/. 9 | # 10 | # For best results, change no more than 2-3 parameters at a time, 11 | # and test if Postfix still works after every change. 12 | 13 | # SOFT BOUNCE 14 | # 15 | # The soft_bounce parameter provides a limited safety net for 16 | # testing. When soft_bounce is enabled, mail will remain queued that 17 | # would otherwise bounce. This parameter disables locally-generated 18 | # bounces, and prevents the SMTP server from rejecting mail permanently 19 | # (by changing 5xx replies into 4xx replies). However, soft_bounce 20 | # is no cure for address rewriting mistakes or mail routing mistakes. 21 | # 22 | #soft_bounce = no 23 | 24 | # LOCAL PATHNAME INFORMATION 25 | # 26 | # The queue_directory specifies the location of the Postfix queue. 27 | # This is also the root directory of Postfix daemons that run chrooted. 28 | # See the files in examples/chroot-setup for setting up Postfix chroot 29 | # environments on different UNIX systems. 30 | # 31 | queue_directory = /var/spool/postfix 32 | 33 | # The command_directory parameter specifies the location of all 34 | # postXXX commands. 35 | # 36 | command_directory = /usr/sbin 37 | 38 | # The daemon_directory parameter specifies the location of all Postfix 39 | # daemon programs (i.e. programs listed in the master.cf file). This 40 | # directory must be owned by root. 41 | # 42 | daemon_directory = /usr/libexec/postfix 43 | 44 | # The data_directory parameter specifies the location of Postfix-writable 45 | # data files (caches, random numbers). This directory must be owned 46 | # by the mail_owner account (see below). 47 | # 48 | data_directory = /var/lib/postfix 49 | 50 | # QUEUE AND PROCESS OWNERSHIP 51 | # 52 | # The mail_owner parameter specifies the owner of the Postfix queue 53 | # and of most Postfix daemon processes. Specify the name of a user 54 | # account THAT DOES NOT SHARE ITS USER OR GROUP ID WITH OTHER ACCOUNTS 55 | # AND THAT OWNS NO OTHER FILES OR PROCESSES ON THE SYSTEM. In 56 | # particular, don't specify nobody or daemon. PLEASE USE A DEDICATED 57 | # USER. 58 | # 59 | mail_owner = postfix 60 | 61 | # The default_privs parameter specifies the default rights used by 62 | # the local delivery agent for delivery to external file or command. 63 | # These rights are used in the absence of a recipient user context. 64 | # DO NOT SPECIFY A PRIVILEGED USER OR THE POSTFIX OWNER. 65 | # 66 | #default_privs = nobody 67 | 68 | # INTERNET HOST AND DOMAIN NAMES 69 | # 70 | # The myhostname parameter specifies the internet hostname of this 71 | # mail system. The default is to use the fully-qualified domain name 72 | # from gethostname(). $myhostname is used as a default value for many 73 | # other configuration parameters. 74 | # 75 | #myhostname = host.domain.tld 76 | #myhostname = virtual.domain.tld 77 | 78 | # The mydomain parameter specifies the local internet domain name. 79 | # The default is to use $myhostname minus the first component. 80 | # $mydomain is used as a default value for many other configuration 81 | # parameters. 82 | # 83 | #mydomain = domain.tld 84 | 85 | # SENDING MAIL 86 | # 87 | # The myorigin parameter specifies the domain that locally-posted 88 | # mail appears to come from. The default is to append $myhostname, 89 | # which is fine for small sites. If you run a domain with multiple 90 | # machines, you should (1) change this to $mydomain and (2) set up 91 | # a domain-wide alias database that aliases each user to 92 | # user@that.users.mailhost. 93 | # 94 | # For the sake of consistency between sender and recipient addresses, 95 | # myorigin also specifies the default domain name that is appended 96 | # to recipient addresses that have no @domain part. 97 | # 98 | #myorigin = $myhostname 99 | #myorigin = $mydomain 100 | 101 | # RECEIVING MAIL 102 | 103 | # The inet_interfaces parameter specifies the network interface 104 | # addresses that this mail system receives mail on. By default, 105 | # the software claims all active interfaces on the machine. The 106 | # parameter also controls delivery of mail to user@[ip.address]. 107 | # 108 | # See also the proxy_interfaces parameter, for network addresses that 109 | # are forwarded to us via a proxy or network address translator. 110 | # 111 | # Note: you need to stop/start Postfix when this parameter changes. 112 | # 113 | #inet_interfaces = all 114 | #inet_interfaces = $myhostname 115 | #inet_interfaces = $myhostname, localhost 116 | inet_interfaces = localhost 117 | 118 | # Enable IPv4, and IPv6 if supported 119 | inet_protocols = all 120 | 121 | # The proxy_interfaces parameter specifies the network interface 122 | # addresses that this mail system receives mail on by way of a 123 | # proxy or network address translation unit. This setting extends 124 | # the address list specified with the inet_interfaces parameter. 125 | # 126 | # You must specify your proxy/NAT addresses when your system is a 127 | # backup MX host for other domains, otherwise mail delivery loops 128 | # will happen when the primary MX host is down. 129 | # 130 | #proxy_interfaces = 131 | #proxy_interfaces = 1.2.3.4 132 | 133 | # The mydestination parameter specifies the list of domains that this 134 | # machine considers itself the final destination for. 135 | # 136 | # These domains are routed to the delivery agent specified with the 137 | # local_transport parameter setting. By default, that is the UNIX 138 | # compatible delivery agent that lookups all recipients in /etc/passwd 139 | # and /etc/aliases or their equivalent. 140 | # 141 | # The default is $myhostname + localhost.$mydomain. On a mail domain 142 | # gateway, you should also include $mydomain. 143 | # 144 | # Do not specify the names of virtual domains - those domains are 145 | # specified elsewhere (see VIRTUAL_README). 146 | # 147 | # Do not specify the names of domains that this machine is backup MX 148 | # host for. Specify those names via the relay_domains settings for 149 | # the SMTP server, or use permit_mx_backup if you are lazy (see 150 | # STANDARD_CONFIGURATION_README). 151 | # 152 | # The local machine is always the final destination for mail addressed 153 | # to user@[the.net.work.address] of an interface that the mail system 154 | # receives mail on (see the inet_interfaces parameter). 155 | # 156 | # Specify a list of host or domain names, /file/name or type:table 157 | # patterns, separated by commas and/or whitespace. A /file/name 158 | # pattern is replaced by its contents; a type:table is matched when 159 | # a name matches a lookup key (the right-hand side is ignored). 160 | # Continue long lines by starting the next line with whitespace. 161 | # 162 | # See also below, section "REJECTING MAIL FOR UNKNOWN LOCAL USERS". 163 | # 164 | mydestination = $myhostname, localhost.$mydomain, localhost 165 | #mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain 166 | #mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain, 167 | # mail.$mydomain, www.$mydomain, ftp.$mydomain 168 | 169 | # REJECTING MAIL FOR UNKNOWN LOCAL USERS 170 | # 171 | # The local_recipient_maps parameter specifies optional lookup tables 172 | # with all names or addresses of users that are local with respect 173 | # to $mydestination, $inet_interfaces or $proxy_interfaces. 174 | # 175 | # If this parameter is defined, then the SMTP server will reject 176 | # mail for unknown local users. This parameter is defined by default. 177 | # 178 | # To turn off local recipient checking in the SMTP server, specify 179 | # local_recipient_maps = (i.e. empty). 180 | # 181 | # The default setting assumes that you use the default Postfix local 182 | # delivery agent for local delivery. You need to update the 183 | # local_recipient_maps setting if: 184 | # 185 | # - You define $mydestination domain recipients in files other than 186 | # /etc/passwd, /etc/aliases, or the $virtual_alias_maps files. 187 | # For example, you define $mydestination domain recipients in 188 | # the $virtual_mailbox_maps files. 189 | # 190 | # - You redefine the local delivery agent in master.cf. 191 | # 192 | # - You redefine the "local_transport" setting in main.cf. 193 | # 194 | # - You use the "luser_relay", "mailbox_transport", or "fallback_transport" 195 | # feature of the Postfix local delivery agent (see local(8)). 196 | # 197 | # Details are described in the LOCAL_RECIPIENT_README file. 198 | # 199 | # Beware: if the Postfix SMTP server runs chrooted, you probably have 200 | # to access the passwd file via the proxymap service, in order to 201 | # overcome chroot restrictions. The alternative, having a copy of 202 | # the system passwd file in the chroot jail is just not practical. 203 | # 204 | # The right-hand side of the lookup tables is conveniently ignored. 205 | # In the left-hand side, specify a bare username, an @domain.tld 206 | # wild-card, or specify a user@domain.tld address. 207 | # 208 | #local_recipient_maps = unix:passwd.byname $alias_maps 209 | #local_recipient_maps = proxy:unix:passwd.byname $alias_maps 210 | #local_recipient_maps = 211 | 212 | # The unknown_local_recipient_reject_code specifies the SMTP server 213 | # response code when a recipient domain matches $mydestination or 214 | # ${proxy,inet}_interfaces, while $local_recipient_maps is non-empty 215 | # and the recipient address or address local-part is not found. 216 | # 217 | # The default setting is 550 (reject mail) but it is safer to start 218 | # with 450 (try again later) until you are certain that your 219 | # local_recipient_maps settings are OK. 220 | # 221 | unknown_local_recipient_reject_code = 550 222 | 223 | # TRUST AND RELAY CONTROL 224 | 225 | # The mynetworks parameter specifies the list of "trusted" SMTP 226 | # clients that have more privileges than "strangers". 227 | # 228 | # In particular, "trusted" SMTP clients are allowed to relay mail 229 | # through Postfix. See the smtpd_recipient_restrictions parameter 230 | # in postconf(5). 231 | # 232 | # You can specify the list of "trusted" network addresses by hand 233 | # or you can let Postfix do it for you (which is the default). 234 | # 235 | # By default (mynetworks_style = subnet), Postfix "trusts" SMTP 236 | # clients in the same IP subnetworks as the local machine. 237 | # On Linux, this does works correctly only with interfaces specified 238 | # with the "ifconfig" command. 239 | # 240 | # Specify "mynetworks_style = class" when Postfix should "trust" SMTP 241 | # clients in the same IP class A/B/C networks as the local machine. 242 | # Don't do this with a dialup site - it would cause Postfix to "trust" 243 | # your entire provider's network. Instead, specify an explicit 244 | # mynetworks list by hand, as described below. 245 | # 246 | # Specify "mynetworks_style = host" when Postfix should "trust" 247 | # only the local machine. 248 | # 249 | #mynetworks_style = class 250 | #mynetworks_style = subnet 251 | #mynetworks_style = host 252 | 253 | # Alternatively, you can specify the mynetworks list by hand, in 254 | # which case Postfix ignores the mynetworks_style setting. 255 | # 256 | # Specify an explicit list of network/netmask patterns, where the 257 | # mask specifies the number of bits in the network part of a host 258 | # address. 259 | # 260 | # You can also specify the absolute pathname of a pattern file instead 261 | # of listing the patterns here. Specify type:table for table-based lookups 262 | # (the value on the table right-hand side is not used). 263 | # 264 | #mynetworks = 168.100.189.0/28, 127.0.0.0/8 265 | #mynetworks = $config_directory/mynetworks 266 | #mynetworks = hash:/etc/postfix/network_table 267 | 268 | # The relay_domains parameter restricts what destinations this system will 269 | # relay mail to. See the smtpd_recipient_restrictions description in 270 | # postconf(5) for detailed information. 271 | # 272 | # By default, Postfix relays mail 273 | # - from "trusted" clients (IP address matches $mynetworks) to any destination, 274 | # - from "untrusted" clients to destinations that match $relay_domains or 275 | # subdomains thereof, except addresses with sender-specified routing. 276 | # The default relay_domains value is $mydestination. 277 | # 278 | # In addition to the above, the Postfix SMTP server by default accepts mail 279 | # that Postfix is final destination for: 280 | # - destinations that match $inet_interfaces or $proxy_interfaces, 281 | # - destinations that match $mydestination 282 | # - destinations that match $virtual_alias_domains, 283 | # - destinations that match $virtual_mailbox_domains. 284 | # These destinations do not need to be listed in $relay_domains. 285 | # 286 | # Specify a list of hosts or domains, /file/name patterns or type:name 287 | # lookup tables, separated by commas and/or whitespace. Continue 288 | # long lines by starting the next line with whitespace. A file name 289 | # is replaced by its contents; a type:name table is matched when a 290 | # (parent) domain appears as lookup key. 291 | # 292 | # NOTE: Postfix will not automatically forward mail for domains that 293 | # list this system as their primary or backup MX host. See the 294 | # permit_mx_backup restriction description in postconf(5). 295 | # 296 | #relay_domains = $mydestination 297 | 298 | # INTERNET OR INTRANET 299 | 300 | # The relayhost parameter specifies the default host to send mail to 301 | # when no entry is matched in the optional transport(5) table. When 302 | # no relayhost is given, mail is routed directly to the destination. 303 | # 304 | # On an intranet, specify the organizational domain name. If your 305 | # internal DNS uses no MX records, specify the name of the intranet 306 | # gateway host instead. 307 | # 308 | # In the case of SMTP, specify a domain, host, host:port, [host]:port, 309 | # [address] or [address]:port; the form [host] turns off MX lookups. 310 | # 311 | # If you're connected via UUCP, see also the default_transport parameter. 312 | # 313 | #relayhost = $mydomain 314 | #relayhost = [gateway.my.domain] 315 | #relayhost = [mailserver.isp.tld] 316 | #relayhost = uucphost 317 | #relayhost = [an.ip.add.ress] 318 | 319 | # REJECTING UNKNOWN RELAY USERS 320 | # 321 | # The relay_recipient_maps parameter specifies optional lookup tables 322 | # with all addresses in the domains that match $relay_domains. 323 | # 324 | # If this parameter is defined, then the SMTP server will reject 325 | # mail for unknown relay users. This feature is off by default. 326 | # 327 | # The right-hand side of the lookup tables is conveniently ignored. 328 | # In the left-hand side, specify an @domain.tld wild-card, or specify 329 | # a user@domain.tld address. 330 | # 331 | #relay_recipient_maps = hash:/etc/postfix/relay_recipients 332 | 333 | # INPUT RATE CONTROL 334 | # 335 | # The in_flow_delay configuration parameter implements mail input 336 | # flow control. This feature is turned on by default, although it 337 | # still needs further development (it's disabled on SCO UNIX due 338 | # to an SCO bug). 339 | # 340 | # A Postfix process will pause for $in_flow_delay seconds before 341 | # accepting a new message, when the message arrival rate exceeds the 342 | # message delivery rate. With the default 100 SMTP server process 343 | # limit, this limits the mail inflow to 100 messages a second more 344 | # than the number of messages delivered per second. 345 | # 346 | # Specify 0 to disable the feature. Valid delays are 0..10. 347 | # 348 | #in_flow_delay = 1s 349 | 350 | # ADDRESS REWRITING 351 | # 352 | # The ADDRESS_REWRITING_README document gives information about 353 | # address masquerading or other forms of address rewriting including 354 | # username->Firstname.Lastname mapping. 355 | 356 | # ADDRESS REDIRECTION (VIRTUAL DOMAIN) 357 | # 358 | # The VIRTUAL_README document gives information about the many forms 359 | # of domain hosting that Postfix supports. 360 | 361 | # "USER HAS MOVED" BOUNCE MESSAGES 362 | # 363 | # See the discussion in the ADDRESS_REWRITING_README document. 364 | 365 | # TRANSPORT MAP 366 | # 367 | # See the discussion in the ADDRESS_REWRITING_README document. 368 | 369 | # ALIAS DATABASE 370 | # 371 | # The alias_maps parameter specifies the list of alias databases used 372 | # by the local delivery agent. The default list is system dependent. 373 | # 374 | # On systems with NIS, the default is to search the local alias 375 | # database, then the NIS alias database. See aliases(5) for syntax 376 | # details. 377 | # 378 | # If you change the alias database, run "postalias /etc/aliases" (or 379 | # wherever your system stores the mail alias file), or simply run 380 | # "newaliases" to build the necessary DBM or DB file. 381 | # 382 | # It will take a minute or so before changes become visible. Use 383 | # "postfix reload" to eliminate the delay. 384 | # 385 | #alias_maps = dbm:/etc/aliases 386 | alias_maps = hash:/etc/aliases 387 | #alias_maps = hash:/etc/aliases, nis:mail.aliases 388 | #alias_maps = netinfo:/aliases 389 | 390 | # The alias_database parameter specifies the alias database(s) that 391 | # are built with "newaliases" or "sendmail -bi". This is a separate 392 | # configuration parameter, because alias_maps (see above) may specify 393 | # tables that are not necessarily all under control by Postfix. 394 | # 395 | #alias_database = dbm:/etc/aliases 396 | #alias_database = dbm:/etc/mail/aliases 397 | alias_database = hash:/etc/aliases 398 | #alias_database = hash:/etc/aliases, hash:/opt/majordomo/aliases 399 | 400 | # ADDRESS EXTENSIONS (e.g., user+foo) 401 | # 402 | # The recipient_delimiter parameter specifies the separator between 403 | # user names and address extensions (user+foo). See canonical(5), 404 | # local(8), relocated(5) and virtual(5) for the effects this has on 405 | # aliases, canonical, virtual, relocated and .forward file lookups. 406 | # Basically, the software tries user+foo and .forward+foo before 407 | # trying user and .forward. 408 | # 409 | #recipient_delimiter = + 410 | 411 | # DELIVERY TO MAILBOX 412 | # 413 | # The home_mailbox parameter specifies the optional pathname of a 414 | # mailbox file relative to a user's home directory. The default 415 | # mailbox file is /var/spool/mail/user or /var/mail/user. Specify 416 | # "Maildir/" for qmail-style delivery (the / is required). 417 | # 418 | #home_mailbox = Mailbox 419 | #home_mailbox = Maildir/ 420 | 421 | # The mail_spool_directory parameter specifies the directory where 422 | # UNIX-style mailboxes are kept. The default setting depends on the 423 | # system type. 424 | # 425 | #mail_spool_directory = /var/mail 426 | #mail_spool_directory = /var/spool/mail 427 | 428 | # The mailbox_command parameter specifies the optional external 429 | # command to use instead of mailbox delivery. The command is run as 430 | # the recipient with proper HOME, SHELL and LOGNAME environment settings. 431 | # Exception: delivery for root is done as $default_user. 432 | # 433 | # Other environment variables of interest: USER (recipient username), 434 | # EXTENSION (address extension), DOMAIN (domain part of address), 435 | # and LOCAL (the address localpart). 436 | # 437 | # Unlike other Postfix configuration parameters, the mailbox_command 438 | # parameter is not subjected to $parameter substitutions. This is to 439 | # make it easier to specify shell syntax (see example below). 440 | # 441 | # Avoid shell meta characters because they will force Postfix to run 442 | # an expensive shell process. Procmail alone is expensive enough. 443 | # 444 | # IF YOU USE THIS TO DELIVER MAIL SYSTEM-WIDE, YOU MUST SET UP AN 445 | # ALIAS THAT FORWARDS MAIL FOR ROOT TO A REAL USER. 446 | # 447 | #mailbox_command = /some/where/procmail 448 | #mailbox_command = /some/where/procmail -a "$EXTENSION" 449 | 450 | # The mailbox_transport specifies the optional transport in master.cf 451 | # to use after processing aliases and .forward files. This parameter 452 | # has precedence over the mailbox_command, fallback_transport and 453 | # luser_relay parameters. 454 | # 455 | # Specify a string of the form transport:nexthop, where transport is 456 | # the name of a mail delivery transport defined in master.cf. The 457 | # :nexthop part is optional. For more details see the sample transport 458 | # configuration file. 459 | # 460 | # NOTE: if you use this feature for accounts not in the UNIX password 461 | # file, then you must update the "local_recipient_maps" setting in 462 | # the main.cf file, otherwise the SMTP server will reject mail for 463 | # non-UNIX accounts with "User unknown in local recipient table". 464 | # 465 | #mailbox_transport = lmtp:unix:/var/lib/imap/socket/lmtp 466 | 467 | # If using the cyrus-imapd IMAP server deliver local mail to the IMAP 468 | # server using LMTP (Local Mail Transport Protocol), this is prefered 469 | # over the older cyrus deliver program by setting the 470 | # mailbox_transport as below: 471 | # 472 | # mailbox_transport = lmtp:unix:/var/lib/imap/socket/lmtp 473 | # 474 | # The efficiency of LMTP delivery for cyrus-imapd can be enhanced via 475 | # these settings. 476 | # 477 | # local_destination_recipient_limit = 300 478 | # local_destination_concurrency_limit = 5 479 | # 480 | # Of course you should adjust these settings as appropriate for the 481 | # capacity of the hardware you are using. The recipient limit setting 482 | # can be used to take advantage of the single instance message store 483 | # capability of Cyrus. The concurrency limit can be used to control 484 | # how many simultaneous LMTP sessions will be permitted to the Cyrus 485 | # message store. 486 | # 487 | # To use the old cyrus deliver program you have to set: 488 | #mailbox_transport = cyrus 489 | 490 | # The fallback_transport specifies the optional transport in master.cf 491 | # to use for recipients that are not found in the UNIX passwd database. 492 | # This parameter has precedence over the luser_relay parameter. 493 | # 494 | # Specify a string of the form transport:nexthop, where transport is 495 | # the name of a mail delivery transport defined in master.cf. The 496 | # :nexthop part is optional. For more details see the sample transport 497 | # configuration file. 498 | # 499 | # NOTE: if you use this feature for accounts not in the UNIX password 500 | # file, then you must update the "local_recipient_maps" setting in 501 | # the main.cf file, otherwise the SMTP server will reject mail for 502 | # non-UNIX accounts with "User unknown in local recipient table". 503 | # 504 | #fallback_transport = lmtp:unix:/var/lib/imap/socket/lmtp 505 | #fallback_transport = 506 | 507 | # The luser_relay parameter specifies an optional destination address 508 | # for unknown recipients. By default, mail for unknown@$mydestination, 509 | # unknown@[$inet_interfaces] or unknown@[$proxy_interfaces] is returned 510 | # as undeliverable. 511 | # 512 | # The following expansions are done on luser_relay: $user (recipient 513 | # username), $shell (recipient shell), $home (recipient home directory), 514 | # $recipient (full recipient address), $extension (recipient address 515 | # extension), $domain (recipient domain), $local (entire recipient 516 | # localpart), $recipient_delimiter. Specify ${name?value} or 517 | # ${name:value} to expand value only when $name does (does not) exist. 518 | # 519 | # luser_relay works only for the default Postfix local delivery agent. 520 | # 521 | # NOTE: if you use this feature for accounts not in the UNIX password 522 | # file, then you must specify "local_recipient_maps =" (i.e. empty) in 523 | # the main.cf file, otherwise the SMTP server will reject mail for 524 | # non-UNIX accounts with "User unknown in local recipient table". 525 | # 526 | #luser_relay = $user@other.host 527 | #luser_relay = $local@other.host 528 | #luser_relay = admin+$local 529 | 530 | # JUNK MAIL CONTROLS 531 | # 532 | # The controls listed here are only a very small subset. The file 533 | # SMTPD_ACCESS_README provides an overview. 534 | 535 | # The header_checks parameter specifies an optional table with patterns 536 | # that each logical message header is matched against, including 537 | # headers that span multiple physical lines. 538 | # 539 | # By default, these patterns also apply to MIME headers and to the 540 | # headers of attached messages. With older Postfix versions, MIME and 541 | # attached message headers were treated as body text. 542 | # 543 | # For details, see "man header_checks". 544 | # 545 | #header_checks = regexp:/etc/postfix/header_checks 546 | 547 | # FAST ETRN SERVICE 548 | # 549 | # Postfix maintains per-destination logfiles with information about 550 | # deferred mail, so that mail can be flushed quickly with the SMTP 551 | # "ETRN domain.tld" command, or by executing "sendmail -qRdomain.tld". 552 | # See the ETRN_README document for a detailed description. 553 | # 554 | # The fast_flush_domains parameter controls what destinations are 555 | # eligible for this service. By default, they are all domains that 556 | # this server is willing to relay mail to. 557 | # 558 | #fast_flush_domains = $relay_domains 559 | 560 | # SHOW SOFTWARE VERSION OR NOT 561 | # 562 | # The smtpd_banner parameter specifies the text that follows the 220 563 | # code in the SMTP server's greeting banner. Some people like to see 564 | # the mail version advertised. By default, Postfix shows no version. 565 | # 566 | # You MUST specify $myhostname at the start of the text. That is an 567 | # RFC requirement. Postfix itself does not care. 568 | # 569 | #smtpd_banner = $myhostname ESMTP $mail_name 570 | #smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) 571 | 572 | # PARALLEL DELIVERY TO THE SAME DESTINATION 573 | # 574 | # How many parallel deliveries to the same user or domain? With local 575 | # delivery, it does not make sense to do massively parallel delivery 576 | # to the same user, because mailbox updates must happen sequentially, 577 | # and expensive pipelines in .forward files can cause disasters when 578 | # too many are run at the same time. With SMTP deliveries, 10 579 | # simultaneous connections to the same domain could be sufficient to 580 | # raise eyebrows. 581 | # 582 | # Each message delivery transport has its XXX_destination_concurrency_limit 583 | # parameter. The default is $default_destination_concurrency_limit for 584 | # most delivery transports. For the local delivery agent the default is 2. 585 | 586 | #local_destination_concurrency_limit = 2 587 | #default_destination_concurrency_limit = 20 588 | 589 | # DEBUGGING CONTROL 590 | # 591 | # The debug_peer_level parameter specifies the increment in verbose 592 | # logging level when an SMTP client or server host name or address 593 | # matches a pattern in the debug_peer_list parameter. 594 | # 595 | debug_peer_level = 2 596 | 597 | # The debug_peer_list parameter specifies an optional list of domain 598 | # or network patterns, /file/name patterns or type:name tables. When 599 | # an SMTP client or server host name or address matches a pattern, 600 | # increase the verbose logging level by the amount specified in the 601 | # debug_peer_level parameter. 602 | # 603 | #debug_peer_list = 127.0.0.1 604 | #debug_peer_list = some.domain 605 | 606 | # The debugger_command specifies the external command that is executed 607 | # when a Postfix daemon program is run with the -D option. 608 | # 609 | # Use "command .. & sleep 5" so that the debugger can attach before 610 | # the process marches on. If you use an X-based debugger, be sure to 611 | # set up your XAUTHORITY environment variable before starting Postfix. 612 | # 613 | debugger_command = 614 | PATH=/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin 615 | ddd $daemon_directory/$process_name $process_id & sleep 5 616 | 617 | # If you can't use X, use this to capture the call stack when a 618 | # daemon crashes. The result is in a file in the configuration 619 | # directory, and is named after the process name and the process ID. 620 | # 621 | # debugger_command = 622 | # PATH=/bin:/usr/bin:/usr/local/bin; export PATH; (echo cont; 623 | # echo where) | gdb $daemon_directory/$process_name $process_id 2>&1 624 | # >$config_directory/$process_name.$process_id.log & sleep 5 625 | # 626 | # Another possibility is to run gdb under a detached screen session. 627 | # To attach to the screen sesssion, su root and run "screen -r 628 | # " where uniquely matches one of the detached 629 | # sessions (from "screen -list"). 630 | # 631 | # debugger_command = 632 | # PATH=/bin:/usr/bin:/sbin:/usr/sbin; export PATH; screen 633 | # -dmS $process_name gdb $daemon_directory/$process_name 634 | # $process_id & sleep 1 635 | 636 | # INSTALL-TIME CONFIGURATION INFORMATION 637 | # 638 | # The following parameters are used when installing a new Postfix version. 639 | # 640 | # sendmail_path: The full pathname of the Postfix sendmail command. 641 | # This is the Sendmail-compatible mail posting interface. 642 | # 643 | sendmail_path = /usr/sbin/sendmail.postfix 644 | 645 | # newaliases_path: The full pathname of the Postfix newaliases command. 646 | # This is the Sendmail-compatible command to build alias databases. 647 | # 648 | newaliases_path = /usr/bin/newaliases.postfix 649 | 650 | # mailq_path: The full pathname of the Postfix mailq command. This 651 | # is the Sendmail-compatible mail queue listing command. 652 | # 653 | mailq_path = /usr/bin/mailq.postfix 654 | 655 | # setgid_group: The group for mail submission and queue management 656 | # commands. This must be a group name with a numerical group ID that 657 | # is not shared with other accounts, not even with the Postfix account. 658 | # 659 | setgid_group = postdrop 660 | 661 | # html_directory: The location of the Postfix HTML documentation. 662 | # 663 | html_directory = no 664 | 665 | # manpage_directory: The location of the Postfix on-line manual pages. 666 | # 667 | manpage_directory = /usr/share/man 668 | 669 | # sample_directory: The location of the Postfix sample configuration files. 670 | # This parameter is obsolete as of Postfix 2.1. 671 | # 672 | sample_directory = /usr/share/doc/postfix-2.6.6/samples 673 | 674 | # readme_directory: The location of the Postfix README files. 675 | # 676 | readme_directory = /usr/share/doc/postfix-2.6.6/README_FILES 677 | 678 | ############################################################################### 679 | # 680 | # Additional configuration to send mail via Amazon SES 681 | # 682 | # https://gist.github.com/gene1wood/6323301 683 | # 684 | relayhost = email-smtp.{{ region }}.amazonaws.com:587 685 | smtp_sasl_auth_enable = yes 686 | smtp_sasl_security_options = noanonymous 687 | smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd 688 | smtp_use_tls = yes 689 | smtp_tls_security_level = encrypt 690 | smtp_tls_note_starttls_offer = yes 691 | smtp_tls_CAfile = /etc/ssl/certs/ca-bundle.crt 692 | --------------------------------------------------------------------------------