├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── group_vars ├── README.md ├── ipfs.yml └── ipfs_cluster.yml ├── host_vars └── README.md ├── inventory.yml ├── ipfs-cluster.yml ├── ipfs.yml └── roles ├── ipfs-cluster ├── handlers │ └── main.yaml ├── tasks │ └── main.yml └── templates │ ├── etc │ ├── init.d │ │ └── ipfs-cluster │ └── systemd │ │ └── system │ │ └── ipfs-cluster.service │ ├── identity.json │ ├── peerstore │ └── service.json └── ipfs ├── handlers └── main.yml ├── tasks └── main.yml └── templates ├── etc ├── init.d │ └── ipfs └── systemd │ └── system │ └── ipfs.service └── home └── ipfs └── ipfs_default_config /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | host_vars/* 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Hector Sanjuan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: ipfs ipfs-cluster 2 | ipfs: 3 | ansible-playbook -i inventory.yml ipfs.yml 4 | ipfs-cluster: 5 | ansible-playbook -i inventory.yml ipfs-cluster.yml 6 | .PHONY = all ipfs ipfs-cluster 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible roles for `go-ipfs` and `ipfs-cluster` 2 | 3 | This repository contains Ansible roles to install and run 4 | [`go-ipfs`](https://github.com/ipfs/go-ipfs) and 5 | [`IPFS Cluster`](https://github.com/ipfs/ipfs-cluster). 6 | 7 | They include a Systemd service file both. 8 | 9 | ## Usage 10 | 11 | If you are familiar with Ansible, you can just re-use the modules in the way 12 | that fits you best. Otherwise follow these steps: 13 | 14 | 0. Make sure you have ansible installed: `pip install ansible`. 15 | 1. Fill in `inventory.yml` and place the hostnames of your nodes under the `[ipfs]` group. 16 | 2. Edit the `group_vars/ipfs.yml` and `group_vars/ipfs_cluster.yml` file 17 | setting the right configuration values, including generating an 18 | [IPFS Cluster secret](https://cluster.ipfs.io/documentation/guides/security/#the-cluster-secret) 19 | with `od -vN 32 -An -tx1 /dev/urandom | tr -d ' \n' ; echo` 20 | 3. Add a file for each hostname (filename is the hostname), to the `host_vars` 21 | folder as outlined in [`host_vars/README.md`](host_vars/README.md), 22 | containing the necessary host-specific variables (example in the 23 | `host_vars` README). 24 | 4. Run `make`. 25 | 26 | `make` will run ansible for the `ipfs` and the `ipfs-cluster` roles, which 27 | apply to the `[ipfs]` and `[ipfs_cluster]` inventory group. Upon successful, 28 | both `go-ipfs` and `ipfs-cluster` should be running in the nodes (they are 29 | installed under `/usr/local/bin` and run by a created `ipfs` system user). 30 | 31 | You can use `systemctl status ipfs` and `systemctl status ipfs-cluster` to 32 | check the status of the new services. 33 | 34 | Note that `ipfs` configuration has been generated using `profile=server`, thus 35 | will not automatically scan the local network. 36 | -------------------------------------------------------------------------------- /group_vars/README.md: -------------------------------------------------------------------------------- 1 | # Group vars 2 | 3 | The `group_vars` file can be used to set variables to control the 4 | common configuration for of all ipfs and ipfs-cluster peers. 5 | 6 | Edit the `ipfs.yml` file in this folder and set the appropiate values for the 7 | variables. 8 | 9 | Note the cluster `service.json` template can be fully customized by defining 10 | the appropiate variables, and otherwise they will take sensisble defaults. 11 | -------------------------------------------------------------------------------- /group_vars/ipfs.yml: -------------------------------------------------------------------------------- 1 | dist_url: https://dist.ipfs.io 2 | ipfs_version: v0.4.19 3 | ipfs_arch: amd64 4 | ipfs_home: /home/ipfs 5 | ipfs_storage_max: 10G 6 | ipfs_bloom_filter_size: 1048576 # 1MB 7 | ipfs_api_listen: /ip4/127.0.0.1/tcp/5001 8 | ipfs_fd_max: 4092 9 | ipfs_use_badger: false 10 | ipfs_discovery: false 11 | ipfs_disk_sync: true 12 | ipfs_routing: dht 13 | ipfs_disable_bandthwidth_metrics: false 14 | ipfs_reprovider_strategy: all -------------------------------------------------------------------------------- /group_vars/ipfs_cluster.yml: -------------------------------------------------------------------------------- 1 | dist_url: https://dist.ipfs.io 2 | ipfs_cluster_secret: "use `od -vN 32 -An -tx1 /dev/urandom | tr -d ' \n' ; echo` to generate this" 3 | ipfs_cluster_arch: amd64 4 | ipfs_cluster_version: v0.10.0 5 | ipfs_cluster_fd_max: 10000 6 | -------------------------------------------------------------------------------- /host_vars/README.md: -------------------------------------------------------------------------------- 1 | # Setting up `host_vars` for each IPFS Cluster node 2 | 3 | Add one file for each ipfs-cluster host. The filename should match a domain 4 | name from your inventory, i.e. `example.org`. 5 | 6 | Each file should contain the following variables, updated for your cluster: 7 | 8 | ```yaml 9 | ipfs_peer_id: "" 10 | ipfs_private_key: "" 11 | 12 | ipfs_cluster_id: "" 13 | ipfs_cluster_private_key: "" 14 | 15 | ipfs_cluster_peer_addr: "/dns4//tcp/9096/ipfs/" 16 | ``` 17 | 18 | To generate the `ipfs_peer_id`/`ipfs_private_key` and 19 | `ipfs_cluster_id`/`ipfs_cluster_private_key` key-pairs, use [`ipfs-key`]. They 20 | must be all different (no ID or Key can be shared between daemons). 21 | 22 | To install [`ipfs-key`], with Go installed, run: 23 | 24 | ```console 25 | $ go get github.com/whyrusleeping/ipfs-key 26 | ``` 27 | 28 | then generate a key-pair: 29 | 30 | ```console 31 | $ ipfs-key | base64 -w 0 32 | 33 | # or on macos 34 | $ ipfs-key | base64 35 | 36 | Generating a 2048 bit RSA key... 37 | Success! 38 | ID for generated key: Qmat3Bk4SixhZdU5j5pf2uXcpUuTSxKHQu7whbWrdFwn5g 39 | CAASqAkwggSkAgEAAoIBAQCUzxjdml2fORveg9PN98qqiENexLzoaSeNc6N7K8iVzneCU1aDZpM... 40 | ``` 41 | 42 | Where: 43 | 44 | - the value of `ID for generated key: ` is your `ipfs_peer_id` or `ipfs_cluster_id` 45 | - the subsequent line is your `ipfs_private_key` or `ipfs_cluster_private_key`, encoded as base64 46 | 47 | Copy those values into your host config file. 48 | 49 | For `ipfs_cluster_peer_addr` you need to specify a valid [multiaddr] by taking the example below 50 | 51 | ``` 52 | "/dns4//tcp/9096/ipfs/" 53 | ``` 54 | and replacing: 55 | 56 | `hostname`: with the host from your invetory that this file is for, e.g 57 | `example.org` `ipfs_cluster_peer_id`: with the peer id for this cluster node, 58 | that you just created. 59 | 60 | 61 | You can also define `ipfs_cluster_peername` to name your cluster peer for 62 | conviniency. Otherwise, the hostname will be used. 63 | 64 | [`ipfs-key`]: https://github.com/whyrusleeping/ipfs-key 65 | [multiaddr]: https://multiformats.io/multiaddr/ 66 | -------------------------------------------------------------------------------- /inventory.yml: -------------------------------------------------------------------------------- 1 | [ipfs] 2 | # Write here your ipfs nodes 3 | 4 | [ipfs_cluster:children] 5 | ipfs -------------------------------------------------------------------------------- /ipfs-cluster.yml: -------------------------------------------------------------------------------- 1 | - hosts: ipfs 2 | roles: 3 | - ipfs-cluster 4 | -------------------------------------------------------------------------------- /ipfs.yml: -------------------------------------------------------------------------------- 1 | - hosts: ipfs 2 | roles: 3 | - ipfs 4 | - hosts: ipfs_cluster 5 | roles: 6 | - ipfs-cluster 7 | -------------------------------------------------------------------------------- /roles/ipfs-cluster/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | - name: reload systemd 2 | become: yes 3 | systemd: daemon_reload=yes 4 | notify: restart IPFS Cluster 5 | when: 6 | - not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") 7 | - not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) 8 | - not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 9 | 10 | 11 | - name: restart IPFS Cluster 12 | become: yes 13 | service: 14 | name: ipfs-cluster 15 | enabled: yes 16 | state: restarted 17 | -------------------------------------------------------------------------------- /roles/ipfs-cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: create download folder for ipfs-cluster 2 | become: yes 3 | file: 4 | state: directory 5 | owner: root 6 | group: root 7 | dest: /opt/ipfs-cluster/{{ipfs_cluster_version}} 8 | 9 | - name: download and unpack IPFS Cluster 10 | when: not ansible_check_mode 11 | block: 12 | - name: download IPFS Cluster 13 | become: yes 14 | get_url: 15 | url: "{{ dist_url }}/{{ item }}/{{ipfs_cluster_version}}/{{ item }}_{{ipfs_cluster_version}}_linux-{{ipfs_cluster_arch}}.tar.gz" 16 | dest: /opt/ipfs-cluster/{{ipfs_cluster_version}}/{{ item }}.tar.gz 17 | timeout: 30 18 | with_items: 19 | - ipfs-cluster-service 20 | - ipfs-cluster-ctl 21 | 22 | - name: unpack IPFS cluster 23 | become: yes 24 | unarchive: 25 | remote_src: yes 26 | src: /opt/ipfs-cluster/{{ipfs_cluster_version}}/{{ item }}.tar.gz 27 | dest: /opt/ipfs-cluster/{{ipfs_cluster_version}}/ 28 | creates: /opt/ipfs-cluster/{{ipfs_cluster_version}}/{{ item }} 29 | with_items: 30 | - ipfs-cluster-service 31 | - ipfs-cluster-ctl 32 | 33 | - name: link ipfs cluster executables 34 | become: yes 35 | file: 36 | state: link 37 | owner: root 38 | group: root 39 | dest: /usr/local/bin/{{ item }} 40 | src: /opt/ipfs-cluster/{{ipfs_cluster_version}}/{{ item }}/{{ item }} 41 | with_items: 42 | - ipfs-cluster-service 43 | - ipfs-cluster-ctl 44 | 45 | - name: install ipfs-cluster init service 46 | become: yes 47 | template: 48 | src: etc/systemd/system/ipfs-cluster.service 49 | dest: /etc/systemd/system/ipfs-cluster.service 50 | owner: root 51 | group: root 52 | mode: 0644 53 | notify: 54 | - reload systemd 55 | - restart IPFS Cluster 56 | when: 57 | - not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") 58 | - not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) 59 | - not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 60 | 61 | - name: install ipfs-custer init service 62 | become: yes 63 | template: 64 | src: etc/init.d/ipfs-cluster 65 | dest: /etc/init.d/ipfs-cluster 66 | owner: root 67 | group: root 68 | mode: 0744 69 | notify: 70 | - restart IPFS Cluster 71 | when: 72 | - (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") or 73 | (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) or 74 | (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 75 | 76 | - name: make .ipfs-cluster directory 77 | become: yes 78 | file: 79 | state: directory 80 | mode: 0700 81 | owner: ipfs 82 | group: ipfs 83 | dest: "{{ ipfs_home }}/.ipfs-cluster" 84 | 85 | - name: copy configuration 86 | become: yes 87 | template: 88 | src: "{{ item }}" 89 | dest: "{{ ipfs_home }}/.ipfs-cluster/{{ item }}" 90 | mode: 0600 91 | owner: ipfs 92 | group: ipfs 93 | tags: 94 | - config 95 | with_items: 96 | - identity.json 97 | - service.json 98 | notify: restart IPFS Cluster 99 | 100 | - name: set version file (to notify restart on upgrades) 101 | become: yes 102 | copy: 103 | content: "{{ ipfs_cluster_version }}" 104 | dest: "{{ ipfs_home }}/cluster_deployed_version" 105 | mode: 0644 106 | owner: ipfs 107 | group: ipfs 108 | notify: restart IPFS Cluster 109 | 110 | - name: copy peerstore 111 | become: yes 112 | template: 113 | src: peerstore 114 | dest: "{{ ipfs_home }}/.ipfs-cluster/peerstore" 115 | mode: 0600 116 | owner: ipfs 117 | group: ipfs 118 | force: false # do not overwrite 119 | tags: 120 | - config 121 | notify: restart IPFS Cluster 122 | 123 | 124 | - name: reload systemd 125 | become: yes 126 | systemd: 127 | daemon_reload: yes 128 | name: ipfs-cluster 129 | when: 130 | - not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") 131 | - not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) 132 | - not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 133 | 134 | 135 | - name: enable and start IPFS Cluster 136 | become: yes 137 | service: 138 | name: ipfs-cluster 139 | state: started 140 | enabled: yes 141 | -------------------------------------------------------------------------------- /roles/ipfs-cluster/templates/etc/init.d/ipfs-cluster: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ### BEGIN INIT INFO 3 | # Provides: 4 | # Required-Start: $network $remote_fs $syslog 5 | # Required-Stop: $network $remote_fs $syslog 6 | # Default-Start: 2 3 4 5 7 | # Default-Stop: 0 1 6 8 | # Short-Description: Start ipfs 9 | # Description: Runs the ipfs daemon. 10 | ### END INIT INFO 11 | 12 | dir="/usr/local/bin" 13 | cmd="ipfs-cluster-service daemon" 14 | user="ipfs" 15 | 16 | name=`basename $0` 17 | pid_file="/var/run/$name.pid" 18 | stdout_log="/var/log/$name.log" 19 | stderr_log="/var/log/$name.err" 20 | 21 | get_pid() { 22 | cat "$pid_file" 23 | } 24 | 25 | is_running() { 26 | [ -f "$pid_file" ] && ps `get_pid` > /dev/null 2>&1 27 | } 28 | 29 | case "$1" in 30 | start) 31 | if is_running; then 32 | echo "Already started" 33 | else 34 | echo "Starting $name" 35 | cd "$dir" 36 | if [ -z "$user" ]; then 37 | sudo $cmd >> "$stdout_log" 2>> "$stderr_log" & 38 | else 39 | sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" & 40 | fi 41 | echo $! > "$pid_file" 42 | if ! is_running; then 43 | echo "Unable to start, see $stdout_log and $stderr_log" 44 | exit 1 45 | fi 46 | fi 47 | ;; 48 | stop) 49 | if is_running; then 50 | echo -n "Stopping $name.." 51 | kill `get_pid` 52 | for i in {1..10} 53 | do 54 | if ! is_running; then 55 | break 56 | fi 57 | 58 | echo -n "." 59 | sleep 1 60 | done 61 | echo 62 | 63 | if is_running; then 64 | echo "Not stopped; may still be shutting down or shutdown may have failed" 65 | exit 1 66 | else 67 | echo "Stopped" 68 | if [ -f "$pid_file" ]; then 69 | rm "$pid_file" 70 | fi 71 | fi 72 | else 73 | echo "Not running" 74 | fi 75 | ;; 76 | restart) 77 | $0 stop 78 | if is_running; then 79 | echo "Unable to stop, will not attempt to start" 80 | exit 1 81 | fi 82 | $0 start 83 | ;; 84 | status) 85 | if is_running; then 86 | echo "Running" 87 | else 88 | echo "Stopped" 89 | exit 1 90 | fi 91 | ;; 92 | *) 93 | echo "Usage: $0 {start|stop|restart|status}" 94 | exit 1 95 | ;; 96 | esac 97 | -------------------------------------------------------------------------------- /roles/ipfs-cluster/templates/etc/systemd/system/ipfs-cluster.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=IPFS Cluster Service 3 | After=network.target 4 | 5 | [Service] 6 | LimitNOFILE={{ ipfs_cluster_fd_max }} 7 | Environment="IPFS_CLUSTER_FD_MAX={{ ipfs_cluster_fd_max}}" 8 | ExecStart=/usr/local/bin/ipfs-cluster-service daemon 9 | Restart=on-failure 10 | User=ipfs 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /roles/ipfs-cluster/templates/identity.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "{{ ipfs_cluster_id }}", 3 | "private_key": "{{ ipfs_cluster_private_key }}" 4 | } 5 | -------------------------------------------------------------------------------- /roles/ipfs-cluster/templates/peerstore: -------------------------------------------------------------------------------- 1 | {% for host in groups['ipfs_cluster'] %} 2 | {% if hostvars[host]['ipfs_cluster_peer_addr'] is defined %} 3 | {{ hostvars[host]['ipfs_cluster_peer_addr'] }} 4 | {% endif %} 5 | {% endfor %} -------------------------------------------------------------------------------- /roles/ipfs-cluster/templates/service.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster": { 3 | "peername": "{{ ipfs_cluster_peername | default('') }}", 4 | "secret": "{{ ipfs_cluster_secret | default('') }}", 5 | "leave_on_shutdown": {{ ipfs_cluster_leave_on_shutdown | default('false') }}, 6 | {% if ipfs_cluster_listen_multiaddress is defined %} 7 | "listen_multiaddress": "{{ ipfs_cluster_listen_multiaddress | default('/ip4/0.0.0.0/tcp/9096') }}", 8 | {% else %} 9 | "listen_multiaddress": [ 10 | {% for m in ipfs_cluster_listen_multiaddresses | default(['/ip4/0.0.0.0/tcp/9096']) %} 11 | "{{ m }}"{% if not loop.last %},{% endif %} 12 | 13 | {% endfor %} 14 | ], 15 | {% endif %} 16 | "connection_manager": { 17 | "high_water": {{ ipfs_cluster_connection_manager_high_water | default(400) }}, 18 | "low_water": {{ ipfs_cluster_connection_manager_low_water | default(100) }}, 19 | "grace_period": "{{ ipfs_cluster_connection_manager_grace_period | default('2m0s') }}" 20 | }, 21 | "dial_peer_timeout": "{{ ipfs_cluster_dial_peer_timeout | default('3s') }}", 22 | "state_sync_interval": "{{ ipfs_cluster_state_sync_interval | default('10m') }}", 23 | "pin_recover_interval": "{{ ipfs_cluster_pin_recover_internval | default('12m') }}", 24 | "ipfs_sync_interval": "{{ ipfs_cluster_ipfs_sync_interval | default('130s') }}", 25 | "replication_factor_min": {{ ipfs_cluster_replication_factor_min | default(-1) }}, 26 | "replication_factor_max": {{ ipfs_cluster_replication_factor_max | default(-1) }}, 27 | "monitor_ping_interval": "{{ ipfs_cluster_monitor_ping_interval | default('30s') }}", 28 | "peer_watch_interval": "{{ ipfs_cluster_peer_watch_interval | default('10s') }}", 29 | "mdns_interval": "{{ ipfs_cluster_mdns_interval | default('10s') }}", 30 | "disable_repinning": {{ ipfs_cluster_disable_repinning | default(true) | lower }}, 31 | "follower_mode": {{ ipfs_cluster_follower_mode | default(false) | lower }}, 32 | "peer_addresses": [ 33 | {% for host in groups['ipfs_cluster'] %} 34 | {% if hostvars[host]['ipfs_cluster_peer_addr'] is defined %} 35 | "{{ hostvars[host]['ipfs_cluster_peer_addr'] }}"{% if not loop.last %},{% endif %} 36 | 37 | {% endif %} 38 | {% endfor %} 39 | ] 40 | }, 41 | "consensus": { 42 | {% if (ipfs_cluster_consensus | default("raft")) == "raft" %} 43 | "raft": { 44 | "init_peerset": [ 45 | {% for p in ipfs_cluster_raft_init_peerset | default([]) %} 46 | "{{ p }}"{% if not loop.last %},{% endif %} 47 | 48 | {% endfor %} 49 | ], 50 | "wait_for_leader_timeout": "{{ ipfs_cluster_raft_wait_for_leader_timeout | default('2m') }}", 51 | "network_timeout": "{{ ipfs_cluster_raft_network_timeout | default('10s') }}", 52 | "commit_retries": {{ ipfs_cluster_raft_commit_retries | default(2) }}, 53 | "commit_retry_delay": "{{ ipfs_cluster_raft_commit_retry_delay | default('300ms') }}", 54 | "backups_rotate": {{ ipfs_cluster_raft_backups_rotate | default(10) }}, 55 | "heartbeat_timeout": "{{ ipfs_cluster_raft_heartbeat_timeout | default('1500ms') }}", 56 | "election_timeout": "{{ ipfs_cluster_raft_election_timeout | default('1500ms') }}", 57 | "commit_timeout": "{{ ipfs_cluster_raft_commit_timeout | default('100ms') }}", 58 | "max_append_entries": {{ ipfs_cluster_raft_max_append_entries | default(64) }}, 59 | "trailing_logs": {{ ipfs_cluster_raft_trailing_logs | default(10240) }}, 60 | "snapshot_interval": "{{ ipfs_cluster_raft_snapshot_interval | default('2m') }}", 61 | "snapshot_threshold": {{ ipfs_cluster_raft_snapshot_threshold | default(4096) }}, 62 | "leader_lease_timeout": "{{ ipfs_cluster_raft_leader_lease_timeout | default('800ms') }}" 63 | } 64 | {% else %} 65 | "crdt": { 66 | "cluster_name": "{{ ipfs_cluster_crdt_cluster_name | default('ipfs-cluster') }}", 67 | "trusted_peers": [ 68 | {% for p in ipfs_cluster_crdt_trusted_peers | default([]) %} 69 | "{{ p }}"{% if not loop.last %},{% endif %} 70 | 71 | {% endfor %} 72 | ], 73 | "rebroadcast_interval": "{{ ipfs_cluster_crdt_rebroadcast_interval | default('1m') }}", 74 | "peerset_metric": "{{ ipfs_cluster_crdt_peerset_metric | default('ping') }}", 75 | "batching": { 76 | "max_batch_size": {{ ipfs_cluster_crdt_max_batch_size | default(0) }}, 77 | "max_batch_age": "{{ ipfs_cluster_crdt_max_batch_age | default('0s') }}", 78 | "max_queue_size": {{ ipfs_cluster_crdt_max_queue_size | default(50000) }} 79 | } 80 | } 81 | {% endif %} 82 | }, 83 | "api": { 84 | "ipfsproxy": { 85 | "listen_multiaddress": "{{ ipfs_cluster_ipfsproxy_listen_multiaddress | default('/ip4/127.0.0.1/tcp/9095') }}", 86 | "node_multiaddress": "{{ ipfs_cluster_ipfsproxy_node_multiaddress | default('/ip4/127.0.0.1/tcp/5001') }}", 87 | "read_timeout": "{{ ipfs_cluster_ipfsproxy_read_timeout | default('10m0s') }}", 88 | "read_header_timeout": "{{ ipfs_cluster_ipfsproxy_read_header_timeout | default('5s') }}", 89 | "write_timeout": "{{ ipfs_cluster_ipfsproxy_write_timeout | default('10m0s') }}", 90 | "idle_timeout": "{{ ipfs_cluster_ipfsproxy_idle_timeout | default('1m0s') }}", 91 | "max_header_bytes" : {{ ipfs_cluster_ipfsproxy_max_header_bytes | default(4096) }}, 92 | "extract_headers_extra": [ 93 | {% for h in ipfs_cluster_ipfsproxy_extract_headers_extra | default([]) %} 94 | "{{ h }}"{% if not loop.last %},{% endif %} 95 | 96 | {% endfor %} 97 | ], 98 | "extract_headers_path": "/api/v0/version", 99 | "extract_headers_ttl": "5m0s" 100 | }, 101 | "restapi": { 102 | "http_listen_multiaddress": "{{ ipfs_cluster_restapi_http_listen_multiaddress | default('/ip4/127.0.0.1/tcp/9094') }}", 103 | "ssl_cert_file":"{{ ipfs_cluster_restapi_ssl_cert_file | default('') }}", 104 | "ssl_key_file":"{{ ipfs_cluster_restapi_ssl_key_file | default('') }}", 105 | "libp2p_listen_multiaddress": "{{ ipfs_cluster_restapi_libp2p_listen_multiaddress | default('') }}", 106 | "id": "{{ ipfs_cluster_restapi_id | default('') }}", 107 | "private_key": "{{ ipfs_cluster_restapi_private_key | default('') }}", 108 | "read_timeout": "{{ ipfs_cluster_restapi_read_timeout | default('0s') }}", 109 | "read_header_timeout": "{{ ipfs_cluster_restapi_read_header_timeout | default('1h') }}", 110 | "write_timeout": "{{ ipfs_cluster_restapi_write_timeout | default('0s') }}", 111 | "idle_timeout": "{{ ipfs_cluster_restapi_idle_timeout | default('10m') }}", 112 | "http_log_file": "{{ ipfs_cluster_restapi_http_log_file | default('') }}", 113 | {% if ipfs_cluster_restapi_users | default('') != "" %} 114 | "basic_auth_credentials": { 115 | {% for u in ipfs_cluster_restapi_users %} 116 | "{{ u.username }}": "{{ u.password }}"{% if not loop.last %},{% endif %} 117 | 118 | {% endfor %} 119 | }, 120 | {% else %} 121 | "basic_auth_credentials": null, 122 | {% endif %} 123 | "headers": { 124 | {% for h in ipfs_cluster_restapi_headers | default({}) %} 125 | "{{ h.Name }}": ["{{ h.Value }}"]{% if not loop.last %},{% endif %} 126 | 127 | {% endfor %} 128 | }, 129 | "cors_allowed_origins": [ 130 | {% for c in ipfs_cluster_restapi_cors_allowed_origins | default([])%} 131 | "{{ c }}"{% if not loop.last %},{% endif %} 132 | 133 | {% endfor %} 134 | ], 135 | "cors_allowed_methods": [ 136 | {% for c in ipfs_cluster_restapi_cors_allowed_methods | default([]) %} 137 | "{{ c }}"{% if not loop.last %},{% endif %} 138 | 139 | {% endfor %} 140 | ], 141 | "cors_allowed_headers": [ 142 | {% for c in ipfs_cluster_restapi_cors_allowed_headers | default([]) %} 143 | "{{ c }}"{% if not loop.last %},{% endif %} 144 | 145 | {% endfor %} 146 | ], 147 | "cors_exposed_headers": [ 148 | {% for c in ipfs_cluster_restapi_cors_exposed_headers | default([]) %} 149 | "{{ c }}"{% if not loop.last %},{% endif %} 150 | 151 | {% endfor %} 152 | ], 153 | "cors_allow_credentials": {{ ipfs_cluster_restapi_cors_allow_credentials | default(false) | lower }}, 154 | "cors_max_age": "{{ ipfs_cluster_restapi_cors_max_age | default('0s') }}" 155 | } 156 | }, 157 | "ipfs_connector": { 158 | "ipfshttp": { 159 | "node_multiaddress": "{{ ipfs_cluster_ipfshttp_node_multiaddress | default('/ip4/127.0.0.1/tcp/5001') }}", 160 | "connect_swarms_delay": "{{ ipfs_cluster_ipfshttp_connect_swarms_delay | default('30s') }}", 161 | "ipfs_request_timeout": "{{ ipfs_cluster_ipfshttp_ipfs_request_timeout | default('5m') }}", 162 | "repogc_timeout": "{{ ipfs_cluster_ipfshttp_repogc_timeout | default('24h') }}", 163 | "pin_timeout": "{{ ipfs_cluster_ipfshttp_pin_timeout | default('300h') }}", 164 | "unpin_timeout": "{{ ipfs_cluster_ipfshttp_unpin_timeout | default('3h') }}", 165 | "unpin_disable": {{ ipfs_cluster_ipfshttp_unpin_disable | default(false) | lower }} 166 | } 167 | }, 168 | "pin_tracker": { 169 | "stateless": { 170 | "max_pin_queue_size": {{ ipfs_cluster_stateless_max_pin_queue_size | default(1000000) }}, 171 | "concurrent_pins": {{ ipfs_cluster_stateless_concurrent_pins | default(10) }}, 172 | "priority_pin_max_age" : "{{ ipfs_cluster_stateless_priority_pin_max_age | default('24h') }}", 173 | "priority_pin_max_retries" : {{ ipfs_cluster_stateless_priority_pin_max_retries | default(5) }} 174 | } 175 | }, 176 | "monitor": { 177 | "pubsubmon": { 178 | "check_interval": "{{ ipfs_cluster_pubsubmon_check_interval | default('15s') }}", 179 | "failure_threshold": {{ ipfs_cluster_pubsubmon_failure_threshold | default(3) }} 180 | } 181 | }, 182 | "informer": { 183 | "disk": { 184 | "metric_ttl": "{{ ipfs_cluster_informer_disk_metric_ttl | default('5m') }}", 185 | "metric_type": "{{ ipfs_cluster_informer_disk_metric_type | default('freespace') }}" 186 | }, 187 | "tags": { 188 | "metric_ttl": "30s", 189 | "tags": {{ ipfs_cluster_informer_tags_tags | default({}) | to_json }} 190 | } 191 | }, 192 | "allocator": { 193 | "balanced": { 194 | "allocate_by": {{ ipfs_cluster_allocator_balanced_allocate_by | default(["freespace"]) | to_json }} 195 | } 196 | }, 197 | "observations": { 198 | "metrics": { 199 | "enable_stats": {{ ipfs_cluster_metrics_enable_stats | default(false) | lower }}, 200 | "prometheus_endpoint": "{{ ipfs_cluster_metrics_prometheus_endpoint | default('/ip4/0.0.0.0/tcp/8888') }}", 201 | "reporting_interval": "{{ ipfs_cluster_metrics_reporting_interval | default('2s') }}" 202 | }, 203 | "tracing": { 204 | "enable_tracing": {{ ipfs_cluster_tracing_enable_tracing | default(false) | lower }}, 205 | "jaeger_agent_endpoint": "{{ ipfs_cluster_tracing_jaeger_agent_endpoint | default('/ip4/0.0.0.0/udp/6831') }}", 206 | "sampling_prob": {{ ipfs_cluster_tracing_sampling_prob | default('0.3') }}, 207 | "service_name": "{{ ipfs_cluster_tracing_service_name | default('cluster-daemon') }}" 208 | } 209 | }, 210 | "datastore": { 211 | "badger": { 212 | "gc_discard_ratio": {{ ipfs_cluster_badger_gc_discard_ratio | default(0.2) }}, 213 | "gc_interval": "{{ ipfs_cluster_badger_gc_interval | default('15m0s') }}", 214 | "gc_sleep": "{{ ipfs_cluster_badger_gc | default('10s') }}", 215 | "badger_options": { 216 | "dir": "{{ ipfs_cluster_badger_options_dir | default('') }}", 217 | "value_dir": "{{ ipfs_cluster_badger_options_value_dir | default('') }}", 218 | "sync_writes": {{ ipfs_cluster_badger_options_sync_writes | default(true) | lower }}, 219 | "table_loading_mode": {{ ipfs_cluster_badger_options_table_loading_mode | default(2) }}, 220 | "value_log_loading_mode": {{ ipfs_cluster_badger_options_value_log_loading_mode | default(2) }}, 221 | "num_versions_to_keep": {{ ipfs_cluster_badger_options_num_versions_to_keep | default(1) }}, 222 | "max_table_size": {{ ipfs_cluster_badger_options_max_table_size | default(67108864) }}, 223 | "level_size_multiplier": {{ ipfs_cluster_badger_options_level_size_multiplier | default(10) }}, 224 | "max_levels": {{ ipfs_cluster_badger_options_max_levels | default(7) }}, 225 | "value_threshold": {{ ipfs_cluster_badger_options_value_threshold | default(32) }}, 226 | "num_memtables": {{ ipfs_cluster_badger_options_num_memtables | default(5) }}, 227 | "num_level_zero_tables": {{ ipfs_cluster_badger_options_num_level_zero_tables | default(5) }}, 228 | "num_level_zero_tables_stall": {{ ipfs_cluster_badger_options_num_level_zero_tables_stall | default(10) }}, 229 | "level_one_size": {{ ipfs_cluster_badger_options_level_one_size | default(268435456) }}, 230 | "value_log_file_size": {{ ipfs_cluster_badger_options_value_log_file_size | default(1073741823) }}, 231 | "value_log_max_entries": {{ ipfs_cluster_badger_options_value_log_max_entries | default(1000000) }}, 232 | "num_compactors": {{ ipfs_cluster_badger_options_num_compactors | default(2) }}, 233 | "compact_l_0_on_close": {{ ipfs_cluster_badger_options_compact_l_0_on_close | default(true) | lower }}, 234 | "read_only": {{ ipfs_cluster_badger_options_read_only | default(false) | lower }}, 235 | "truncate": {{ ipfs_cluster_badger_options_truncate | default(false) | lower }} 236 | } 237 | } 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /roles/ipfs/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: reload systemd 2 | become: yes 3 | systemd: 4 | daemon_reload: yes 5 | name: ipfs 6 | notify: restart IPFS 7 | when: 8 | - not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") 9 | - not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) 10 | - not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 11 | 12 | - name: restart IPFS 13 | become: yes 14 | service: 15 | name: ipfs 16 | enabled: yes 17 | state: restarted 18 | -------------------------------------------------------------------------------- /roles/ipfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: create download folder for go-ipfs 2 | become: yes 3 | file: 4 | state: directory 5 | owner: root 6 | group: root 7 | dest: /opt/go-ipfs/{{ipfs_version}} 8 | 9 | - name: download and unpack IPFS 10 | when: not ansible_check_mode 11 | block: 12 | - name: download IPFS 13 | become: yes 14 | get_url: 15 | url: "{{ dist_url }}/go-ipfs/{{ipfs_version}}/go-ipfs_{{ipfs_version}}_linux-{{ipfs_arch}}.tar.gz" 16 | dest: /opt/go-ipfs/{{ipfs_version}}/go-ipfs.tar.gz 17 | timeout: 30 18 | 19 | - name: unpack go-ipfs 20 | become: yes 21 | unarchive: 22 | remote_src: yes 23 | src: /opt/go-ipfs/{{ipfs_version}}/go-ipfs.tar.gz 24 | dest: /opt/go-ipfs/{{ipfs_version}} 25 | creates: /opt/go-ipfs/{{ipfs_version}}/go-ipfs 26 | notify: restart IPFS 27 | 28 | - name: link go-ipfs executable 29 | become: yes 30 | file: 31 | state: link 32 | owner: root 33 | group: root 34 | dest: /usr/local/bin/ipfs 35 | src: /opt/go-ipfs/{{ipfs_version}}/go-ipfs/ipfs 36 | 37 | - name: install ipfs systemd init service 38 | become: yes 39 | template: 40 | src: etc/systemd/system/ipfs.service 41 | dest: /etc/systemd/system/ipfs.service 42 | owner: root 43 | group: root 44 | mode: 0644 45 | notify: 46 | - reload systemd 47 | - restart IPFS 48 | when: 49 | - not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") 50 | - not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) 51 | - not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 52 | tags: 53 | - init 54 | 55 | - name: install ipfs init service 56 | become: yes 57 | template: 58 | src: etc/init.d/ipfs 59 | dest: /etc/init.d/ipfs 60 | owner: root 61 | group: root 62 | mode: 0744 63 | notify: 64 | - restart IPFS 65 | when: 66 | - (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") or 67 | (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) or 68 | (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 69 | tags: 70 | - init 71 | 72 | 73 | - name: make ipfs group 74 | become: yes 75 | group: 76 | state: present 77 | name: ipfs 78 | 79 | - name: make ipfs user 80 | become: yes 81 | user: 82 | state: present 83 | name: ipfs 84 | group: ipfs 85 | shell: /bin/bash 86 | home: "{{ ipfs_home }}" 87 | comment: IPFS user 88 | system: yes 89 | 90 | - name: copy default config 91 | become: yes 92 | template: 93 | src: home/ipfs/ipfs_default_config 94 | dest: "{{ ipfs_home }}/ipfs_default_config" 95 | owner: ipfs 96 | group: ipfs 97 | mode: 0644 98 | tags: 99 | - config 100 | 101 | - name: init IPFS 102 | become: yes 103 | become_user: ipfs 104 | command: ipfs init --empty-repo -- {{ ipfs_home }}/ipfs_default_config 105 | args: 106 | creates: "{{ ipfs_home }}/.ipfs/config" 107 | notify: restart IPFS 108 | 109 | - name: download and unpack NOpfs 110 | when: not ansible_check_mode 111 | block: 112 | - name: create plugins folder 113 | become: yes 114 | file: 115 | state: directory 116 | owner: ipfs 117 | group: ipfs 118 | dest: "{{ ipfs_home }}/.ipfs/plugins" 119 | 120 | - name: download and unpack nopfs 121 | when: not ansible_check_mode 122 | become: yes 123 | unarchive: 124 | remote_src: yes 125 | src: "https://github.com/ipfs-shipyard/nopfs/releases/download/nopfs-kubo-plugin/{{nopfs_version}}/nopfs-kubo-plugin_{{nopfs_version}}_linux_{{ipfs_arch}}.tar.gz" 126 | dest: "{{ ipfs_home }}/.ipfs/plugins/" 127 | include: 128 | - nopfs-kubo-plugin/nopfs-kubo-plugin 129 | extra_opts: 130 | - "--strip-components=1" 131 | owner: ipfs 132 | group: ipfs 133 | notify: restart IPFS 134 | tags: 135 | - nopfs 136 | 137 | - name: set version file (to notify restart on upgrades) 138 | become: yes 139 | copy: 140 | content: "{{ ipfs_version }}" 141 | dest: "{{ ipfs_home }}/ipfs_deployed_version" 142 | mode: 0644 143 | owner: ipfs 144 | group: ipfs 145 | notify: restart IPFS 146 | 147 | - name: reload systemd 148 | become: yes 149 | systemd: 150 | daemon_reload: yes 151 | name: ipfs 152 | when: 153 | - not (ansible_distribution == "Amazon" and ansible_distribution_major_version == "NA") 154 | - not (ansible_distribution == "Ubuntu" and ansible_distribution_version is version('15.04', '<')) 155 | - not (ansible_distribution == "Debian" and ansible_distribution_version is version('8', '<')) 156 | 157 | - name: enable and start IPFS 158 | become: yes 159 | service: 160 | name: ipfs 161 | state: started 162 | enabled: yes 163 | -------------------------------------------------------------------------------- /roles/ipfs/templates/etc/init.d/ipfs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ### BEGIN INIT INFO 3 | # Provides: 4 | # Required-Start: $network $remote_fs $syslog 5 | # Required-Stop: $network $remote_fs $syslog 6 | # Default-Start: 2 3 4 5 7 | # Default-Stop: 0 1 6 8 | # Short-Description: Start ipfs 9 | # Description: Runs the ipfs daemon. 10 | ### END INIT INFO 11 | 12 | export IPFS_FD_MAX={{ ipfs_fd_max }} 13 | 14 | dir="/usr/local/bin" 15 | cmd="ipfs daemon" 16 | user="ipfs" 17 | 18 | name=`basename $0` 19 | pid_file="/var/run/$name.pid" 20 | stdout_log="/var/log/$name.log" 21 | stderr_log="/var/log/$name.err" 22 | 23 | get_pid() { 24 | cat "$pid_file" 25 | } 26 | 27 | is_running() { 28 | [ -f "$pid_file" ] && ps `get_pid` > /dev/null 2>&1 29 | } 30 | 31 | case "$1" in 32 | start) 33 | if is_running; then 34 | echo "Already started" 35 | else 36 | echo "Starting $name" 37 | cd "$dir" 38 | if [ -z "$user" ]; then 39 | sudo $cmd >> "$stdout_log" 2>> "$stderr_log" & 40 | else 41 | sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" & 42 | fi 43 | echo $! > "$pid_file" 44 | if ! is_running; then 45 | echo "Unable to start, see $stdout_log and $stderr_log" 46 | exit 1 47 | fi 48 | fi 49 | ;; 50 | stop) 51 | if is_running; then 52 | echo -n "Stopping $name.." 53 | kill `get_pid` 54 | for i in {1..10} 55 | do 56 | if ! is_running; then 57 | break 58 | fi 59 | 60 | echo -n "." 61 | sleep 1 62 | done 63 | echo 64 | 65 | if is_running; then 66 | echo "Not stopped; may still be shutting down or shutdown may have failed" 67 | exit 1 68 | else 69 | echo "Stopped" 70 | if [ -f "$pid_file" ]; then 71 | rm "$pid_file" 72 | fi 73 | fi 74 | else 75 | echo "Not running" 76 | fi 77 | ;; 78 | restart) 79 | $0 stop 80 | if is_running; then 81 | echo "Unable to stop, will not attempt to start" 82 | exit 1 83 | fi 84 | $0 start 85 | ;; 86 | status) 87 | if is_running; then 88 | echo "Running" 89 | else 90 | echo "Stopped" 91 | exit 1 92 | fi 93 | ;; 94 | *) 95 | echo "Usage: $0 {start|stop|restart|status}" 96 | exit 1 97 | ;; 98 | esac 99 | 100 | -------------------------------------------------------------------------------- /roles/ipfs/templates/etc/systemd/system/ipfs.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=IPFS daemon 3 | After=network.target 4 | 5 | [Service] 6 | Type=notify 7 | User=ipfs 8 | Group=ipfs 9 | StateDirectory=ipfs 10 | TimeoutStartSec=10800 11 | LimitNOFILE={{ ipfs_fd_max }} 12 | MemorySwapMax=0 13 | Environment="IPFS_FD_MAX={{ ipfs_fd_max}}" 14 | ExecStart=/usr/local/bin/ipfs daemon --migrate {%- if ipfs_enable_gc | default(False) %} --enable-gc{% endif %} 15 | 16 | Restart=on-failure 17 | KillSignal=SIGINT 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | -------------------------------------------------------------------------------- /roles/ipfs/templates/home/ipfs/ipfs_default_config: -------------------------------------------------------------------------------- 1 | { 2 | "Identity": { 3 | "PeerID": "{{ ipfs_peer_id }}", 4 | "PrivKey": "{{ ipfs_private_key }}" 5 | }, 6 | "Datastore": { 7 | "StorageMax": "{{ ipfs_storage_max }}", 8 | "StorageGCWatermark": {{ ipfs_gc_watermwark | default(90) }}, 9 | "BloomFilterSize": {{ ipfs_bloom_filter_size }}, 10 | "GCPeriod": "{{ ipfs_gc_period | default('1h') }}", 11 | "HashOnRead": false, 12 | {% if ipfs_use_badger | default(False) %} 13 | "Spec": { 14 | "child": { 15 | "path": "badgerds", 16 | "syncWrites": {{ ipfs_disk_sync | default(True) | to_json }}, 17 | "truncate": true, 18 | "type": "badgerds" 19 | }, 20 | "prefix": "badger.datastore", 21 | "type": "measure" 22 | } 23 | {% else %} 24 | "Spec": { 25 | "mounts": [ 26 | { 27 | "child": { 28 | "path": "blocks", 29 | "shardFunc": "/repo/flatfs/shard/v1/next-to-last/2", 30 | "sync": {{ ipfs_disk_sync | default(True) | to_json }}, 31 | "type": "flatfs" 32 | }, 33 | "mountpoint": "/blocks", 34 | "prefix": "flatfs.datastore", 35 | "type": "measure" 36 | }, 37 | { 38 | "child": { 39 | "compression": "none", 40 | "path": "datastore", 41 | "type": "levelds" 42 | }, 43 | "mountpoint": "/", 44 | "prefix": "leveldb.datastore", 45 | "type": "measure" 46 | } 47 | ], 48 | "type": "mount" 49 | } 50 | {% endif %} 51 | }, 52 | "Addresses": { 53 | "API": "{{ ipfs_api_listen | default('/ip4/127.0.0.1/tcp/5001') }}", 54 | "Announce": {{ ipfs_announce | default([]) | to_nice_json(indent=2) | indent(4) }}, 55 | "Gateway": "/ip4/127.0.0.1/tcp/8080", 56 | "NoAnnounce": [ 57 | {% if ipfs_no_announce is defined %} 58 | {% for p in ipfs_no_announce %} 59 | "{{ p }}"{% if not loop.last %},{% endif %} 60 | {% endfor %} 61 | {% else %} 62 | "/ip4/100.64.0.0/ipcidr/10", 63 | "/ip4/169.254.0.0/ipcidr/16", 64 | "/ip4/172.16.0.0/ipcidr/12", 65 | "/ip4/192.0.0.0/ipcidr/24", 66 | "/ip4/192.0.0.0/ipcidr/29", 67 | "/ip4/192.0.0.8/ipcidr/32", 68 | "/ip4/192.0.0.170/ipcidr/32", 69 | "/ip4/192.0.0.171/ipcidr/32", 70 | "/ip4/192.0.2.0/ipcidr/24", 71 | "/ip4/192.168.0.0/ipcidr/16", 72 | "/ip4/198.18.0.0/ipcidr/15", 73 | "/ip4/198.51.100.0/ipcidr/24", 74 | "/ip4/203.0.113.0/ipcidr/24", 75 | "/ip4/240.0.0.0/ipcidr/4", 76 | "/ip6/100::/ipcidr/64", 77 | "/ip6/2001:2::/ipcidr/48", 78 | "/ip6/2001:db8::/ipcidr/32", 79 | "/ip6/fc00::/ipcidr/7", 80 | "/ip6/fe80::/ipcidr/10" 81 | {% endif %} 82 | ], 83 | "Swarm": [ 84 | "/ip4/0.0.0.0/tcp/4001", 85 | "/ip4/0.0.0.0/tcp/4002/ws", 86 | "/ip4/0.0.0.0/udp/4001/quic", 87 | "/ip4/0.0.0.0/udp/4001/quic-v1", 88 | "/ip4/0.0.0.0/udp/4001/quic-v1/webtransport", 89 | "/ip6/::/tcp/4001", 90 | "/ip6/::/udp/4001/quic", 91 | "/ip6/::/udp/4001/quic-v1", 92 | "/ip6/::/udp/4001/quic-v1/webtransport", 93 | "/ip6/::/tcp/4002/ws" 94 | ] 95 | }, 96 | "Mounts": { 97 | "IPFS": "/ipfs", 98 | "IPNS": "/ipns", 99 | "FuseAllowOther": false 100 | }, 101 | "Discovery": { 102 | "MDNS": { 103 | "Enabled": {{ ipfs_discovery | default(False) | to_json }}, 104 | "Interval": 10 105 | } 106 | }, 107 | "Routing": { 108 | "AcceleratedDHTClient": {{ ipfs_accelerated_dht_client | default(False) | lower }}, 109 | "Routers": null, 110 | "Methods": null 111 | }, 112 | "Ipns": { 113 | "RepublishPeriod": "", 114 | "RecordLifetime": "", 115 | "ResolveCacheSize": 128 116 | }, 117 | "Bootstrap": [ 118 | "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", 119 | "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", 120 | "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", 121 | "/ip4/104.131.131.82/udp/4001/quic/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", 122 | "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", 123 | "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa" 124 | ], 125 | "Gateway": { 126 | "HTTPHeaders": { 127 | "Access-Control-Allow-Headers": [ 128 | "X-Requested-With", 129 | "Range", 130 | "User-Agent" 131 | ], 132 | "Access-Control-Allow-Methods": [ 133 | "GET" 134 | ], 135 | "Access-Control-Allow-Origin": [ 136 | "*" 137 | ] 138 | }, 139 | "RootRedirect": "{{ ipfs_gateway_root_redirect | default('') }}", 140 | "PathPrefixes": [], 141 | "APICommands": [], 142 | "NoFetch": false, 143 | "NoDNSLink": false, 144 | "DeserializedResponses": null, 145 | "PublicGateways": {{ ipfs_public_gateways | default(None) | to_nice_json(indent=2) | indent(6) }} 146 | }, 147 | "API": { 148 | "HTTPHeaders": {} 149 | }, 150 | "Swarm": { 151 | "AddrFilters": [ 152 | {% if ipfs_addr_filters is defined %} 153 | {% for p in ipfs_addr_filters %} 154 | "{{ p }}"{% if not loop.last %},{% endif %} 155 | {% endfor %} 156 | {% else %} 157 | "/ip4/10.0.0.0/ipcidr/8", 158 | "/ip4/100.64.0.0/ipcidr/10", 159 | "/ip4/169.254.0.0/ipcidr/16", 160 | "/ip4/172.16.0.0/ipcidr/12", 161 | "/ip4/192.0.0.0/ipcidr/24", 162 | "/ip4/192.0.0.0/ipcidr/29", 163 | "/ip4/192.0.0.8/ipcidr/32", 164 | "/ip4/192.0.0.170/ipcidr/32", 165 | "/ip4/192.0.0.171/ipcidr/32", 166 | "/ip4/192.0.2.0/ipcidr/24", 167 | "/ip4/192.168.0.0/ipcidr/16", 168 | "/ip4/198.18.0.0/ipcidr/15", 169 | "/ip4/198.51.100.0/ipcidr/24", 170 | "/ip4/203.0.113.0/ipcidr/24", 171 | "/ip4/240.0.0.0/ipcidr/4", 172 | "/ip6/100::/ipcidr/64", 173 | "/ip6/2001:2::/ipcidr/48", 174 | "/ip6/2001:db8::/ipcidr/32", 175 | "/ip6/fc00::/ipcidr/7", 176 | "/ip6/fe80::/ipcidr/10" 177 | {% endif %} 178 | ], 179 | "DisableBandwidthMetrics": {{ ipfs_disable_bandwidth_metrics | default(False) | lower }}, 180 | "DisableNatPortMap": true, 181 | "RelayClient": {}, 182 | "RelayService": {}, 183 | "Transports": { 184 | "Network": {}, 185 | "Security": {}, 186 | "Multiplexers": {} 187 | }, 188 | "ConnMgr": { 189 | "Type": "basic", 190 | "LowWater": {{ ipfs_connmgr_low_water }}, 191 | "HighWater": {{ ipfs_connmgr_high_water }}, 192 | "GracePeriod": "30s" 193 | }, 194 | "ResourceMgr": { 195 | "MaxMemory": "{{ ipfs_resourcemgr_max_memory | default(0) }}" 196 | } 197 | }, 198 | "AutoNAT": {}, 199 | "Pubsub": { 200 | "DisableSigning": false, 201 | "Router": "" 202 | }, 203 | "Peering": { 204 | "Peers": null 205 | }, 206 | "DNS": { 207 | "Resolvers": {} 208 | }, 209 | "Migration": { 210 | "DownloadSources": [], 211 | "Keep": "" 212 | }, 213 | "Provider": { 214 | "Strategy": "" 215 | }, 216 | "Reprovider": { 217 | "Interval": "12h", 218 | "Strategy": "{{ ipfs_reprovider_strategy | default('all') }}" 219 | }, 220 | "Experimental": { 221 | "FilestoreEnabled": false, 222 | "UrlstoreEnabled": false, 223 | "GraphsyncEnabled": false, 224 | "Libp2pStreamMounting": false, 225 | "P2pHttpProxy": false, 226 | "StrategicProviding": {{ ipfs_experimental_strategic_providing | default(False) | lower }}, 227 | "OptimisticProvide": false, 228 | "OptimisticProvideJobsPoolSize": 0 229 | }, 230 | "Plugins": { 231 | "Plugins": null 232 | }, 233 | "Pinning": { 234 | "RemoteServices": {} 235 | }, 236 | "Internal": {} 237 | } 238 | --------------------------------------------------------------------------------