├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ └── feature_request.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── tests.yml ├── LICENSE ├── README.md ├── commands ├── host │ └── redis-backend └── redis │ ├── redis-cli │ └── redis-flush ├── docker-compose.redis.yaml ├── install.yaml ├── redis ├── advanced.conf ├── append.conf ├── general.conf ├── io.conf ├── memory.conf ├── network.conf ├── redis.conf ├── scripts │ ├── settings.ddev.redis.php │ ├── setup-drupal-settings.sh │ └── setup-redis-optimized-config.sh ├── security.conf └── snapshots.conf └── tests ├── test.bats └── testdata └── .gitmanaged /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: 🐞 Bug report or Support Request 2 | description: Create a report to help us improve. 3 | labels: [bug] 4 | body: 5 | - type: checkboxes 6 | attributes: 7 | label: Preliminary checklist 8 | description: Please complete the following checks before submitting an issue. 9 | options: 10 | - label: I am using the latest stable version of DDEV 11 | required: true 12 | - label: I am using the latest stable version of this add-on 13 | required: true 14 | - type: textarea 15 | attributes: 16 | label: Expected Behavior 17 | description: What did you expect to happen? 18 | validations: 19 | required: true 20 | - type: textarea 21 | attributes: 22 | label: Actual Behavior 23 | description: What actually happened instead? 24 | validations: 25 | required: true 26 | - type: textarea 27 | attributes: 28 | label: Steps To Reproduce 29 | description: Specific steps to reproduce the behavior. 30 | placeholder: | 31 | 1. In this environment... 32 | 2. With this config... 33 | 3. Run `...` 34 | 4. See error... 35 | validations: 36 | required: false 37 | - type: textarea 38 | attributes: 39 | label: Anything else? 40 | description: | 41 | Links? References? Screenshots? Anything that will give us more context about your issue! 42 | 43 | 💡 Attach images or log files by clicking this area to highlight it and dragging files in. 44 | validations: 45 | required: false 46 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: 🚀 Feature request 2 | description: Suggest an idea for this project. 3 | labels: [enhancement] 4 | body: 5 | - type: checkboxes 6 | attributes: 7 | label: Is there an existing issue for this? 8 | description: Please search existing issues to see if one already exists for your request. 9 | options: 10 | - label: I have searched the existing issues 11 | required: true 12 | - type: textarea 13 | attributes: 14 | label: Is your feature request related to a problem? 15 | description: Clearly and concisely describe the problem. (Ex. I'm always frustrated when...) 16 | validations: 17 | required: true 18 | - type: textarea 19 | attributes: 20 | label: Describe your solution 21 | description: Clearly and concisely describe what you want to happen. 22 | validations: 23 | required: true 24 | - type: textarea 25 | attributes: 26 | label: Describe alternatives 27 | description: Clearly and concisely describe any alternative solutions or features you've considered. 28 | validations: 29 | required: false 30 | - type: textarea 31 | attributes: 32 | label: Additional context 33 | description: Add any other context or screenshots about the feature request. 34 | validations: 35 | required: false 36 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## The Issue 2 | 3 | - # 4 | 5 | 6 | 7 | ## How This PR Solves The Issue 8 | 9 | ## Manual Testing Instructions 10 | 11 | ```bash 12 | ddev add-on get https://github.com///tarball/ 13 | ddev restart 14 | ``` 15 | 16 | ## Automated Testing Overview 17 | 18 | 19 | 20 | ## Release/Deployment Notes 21 | 22 | 23 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ main ] 6 | 7 | schedule: 8 | - cron: '25 08 * * *' 9 | 10 | workflow_dispatch: 11 | inputs: 12 | debug_enabled: 13 | type: boolean 14 | description: Debug with tmate 15 | required: false 16 | default: false 17 | 18 | concurrency: 19 | group: ${{ github.workflow }}-${{ github.ref }} 20 | cancel-in-progress: true 21 | 22 | permissions: 23 | contents: read 24 | 25 | jobs: 26 | tests: 27 | strategy: 28 | matrix: 29 | ddev_version: [stable, HEAD] 30 | bats_tag: [ 31 | "default", 32 | "default-optimized", 33 | "drupal", 34 | "laravel-redis", 35 | "laravel-redis-alpine-optimized", 36 | "laravel-valkey", 37 | "laravel-valkey-alpine-optimized", 38 | "laravel-redis-6", 39 | "drupal-7", 40 | "drupal-no-settings" 41 | ] 42 | fail-fast: false 43 | 44 | runs-on: ubuntu-latest 45 | 46 | steps: 47 | - uses: ddev/github-action-add-on-test@v2 48 | with: 49 | ddev_version: ${{ matrix.ddev_version }} 50 | token: ${{ secrets.GITHUB_TOKEN }} 51 | debug_enabled: ${{ github.event.inputs.debug_enabled }} 52 | addon_repository: ${{ env.GITHUB_REPOSITORY }} 53 | addon_ref: ${{ env.GITHUB_REF }} 54 | test_command: bats tests --filter-tags ${{ matrix.bats_tag }} 55 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![add-on registry](https://img.shields.io/badge/DDEV-Add--on_Registry-blue)](https://addons.ddev.com) 2 | [![tests](https://github.com/ddev/ddev-redis/actions/workflows/tests.yml/badge.svg?branch=main)](https://github.com/ddev/ddev-redis/actions/workflows/tests.yml?query=branch%3Amain) 3 | [![last commit](https://img.shields.io/github/last-commit/ddev/ddev-redis)](https://github.com/ddev/ddev-redis/commits) 4 | [![release](https://img.shields.io/github/v/release/ddev/ddev-redis)](https://github.com/ddev/ddev-redis/releases/latest) 5 | 6 | ## DDEV Redis 7 | 8 | > [!NOTE] 9 | > This add-on has absorbed functionality from `ddev/ddev-redis-7`, see [Advanced Customization](#advanced-customization). 10 | 11 | ## Overview 12 | 13 | [Redis](https://redis.io/) is an in-memory key–value database, used as a distributed cache and message broker, with optional durability. 14 | 15 | This add-on integrates Redis into your [DDEV](https://ddev.com/) project with Redis persistence enabled by default. 16 | 17 | ## Installation 18 | 19 | ```bash 20 | ddev add-on get ddev/ddev-redis 21 | ddev restart 22 | ``` 23 | 24 | After installation, make sure to commit the `.ddev` directory to version control. 25 | 26 | ## Usage 27 | 28 | | Command | Description | 29 | | ------- | ----------- | 30 | | `ddev redis-backend` | Use a different key-value store for Redis | 31 | | `ddev redis-cli` | Run `redis-cli` inside the Redis container | 32 | | `ddev redis` | Alias for `ddev redis-cli` | 33 | | `ddev redis-flush` | Flush all cache inside the Redis container | 34 | | `ddev describe` | View service status and used ports for Redis | 35 | | `ddev logs -s redis` | Check Redis logs | 36 | 37 | Redis is available inside Docker containers with `redis:6379`. 38 | 39 | ## What makes the optimized config different? 40 | 41 | The default config only uses the [redis.conf](./redis/redis.conf) file. 42 | 43 | The optimized config uses all `*.conf` files in the [redis](./redis) directory except `redis.conf`. 44 | 45 | It uses *hardened* settings ready for production, like enabling Redis credentials. 46 | 47 | You can read each config file to see the exact differences. 48 | 49 | ## Redis Credentials 50 | 51 | By default, there is no authentication. 52 | 53 | If you have the optimized config enabled, the credentials are: 54 | 55 | | Field | Value | 56 | |----------|---------| 57 | | Username | `redis` | 58 | | Password | `redis` | 59 | 60 | For more information about ACLs, see the [Redis documentation](https://redis.io/docs/latest/operate/oss_and_stack/management/security/acl/). 61 | 62 | ### Swappable Redis backends 63 | 64 | Use the `ddev redis-backend` command to swap between Redis backends: 65 | 66 | | Command | Docker Image | 67 | |--------------------------------------|-----------------------------------------------| 68 | | `ddev redis-backend redis` | `redis:7` | 69 | | `ddev redis-backend redis-alpine` | `redis:7-alpine` | 70 | | `ddev redis-backend valkey` | `valkey/valkey:8` | 71 | | `ddev redis-backend valkey-alpine` | `valkey/valkey:8-alpine` | 72 | | `ddev redis-backend ` | `` (specify your custom Redis image) | 73 | 74 | > [!TIP] 75 | > Add `optimize` or `optimized` after the command to enable optimized Redis configuration. 76 | > 77 | > Example: `ddev redis-backend redis optimize` 78 | 79 | ## Advanced Customization 80 | 81 | To apply an optimized configuration from `ddev/ddev-redis-7`: 82 | 83 | ```bash 84 | ddev dotenv set .ddev/.env.redis --redis-optimized=true 85 | ddev add-on get ddev/ddev-redis 86 | 87 | # (optional) if you have an existing Redis volume, delete it to avoid problems with Redis: 88 | ddev stop 89 | docker volume rm ddev-$(ddev status -j | docker run -i --rm ddev/ddev-utilities jq -r '.raw.name')_redis 90 | 91 | ddev restart 92 | ``` 93 | 94 | Make sure to commit the `.ddev/.env.redis` file to version control. 95 | 96 | To change the used Docker image: 97 | 98 | ```bash 99 | ddev dotenv set .ddev/.env.redis --redis-docker-image=redis:7 100 | ddev add-on get ddev/ddev-redis 101 | 102 | # (optional) if you have an existing Redis volume, delete it to avoid problems with Redis: 103 | ddev stop 104 | docker volume rm ddev-$(ddev status -j | docker run -i --rm ddev/ddev-utilities jq -r '.raw.name')_redis 105 | 106 | ddev restart 107 | ``` 108 | 109 | Make sure to commit the `.ddev/.env.redis` file to version control. 110 | 111 | All customization options (use with caution): 112 | 113 | | Variable | Flag | Default | 114 | | -------- | ---- | ------- | 115 | | `REDIS_DOCKER_IMAGE` | `--redis-docker-image` | `redis:7` | 116 | | `REDIS_OPTIMIZED` | `--redis-optimized` | `false` (`true`/`false`) | 117 | 118 | ## Credits 119 | 120 | **Contributed by [@hussainweb](https://github.com/hussainweb) based on the original [ddev-contrib recipe](https://github.com/ddev/ddev-contrib/tree/master/docker-compose-services/redis) by [@gormus](https://github.com/gormus)** 121 | 122 | **Optimized config from `ddev/ddev-redis-7` contributed by [@seebeen](https://github.com/seebeen)** 123 | 124 | **Maintained by the [DDEV team](https://ddev.com/support-ddev/)** 125 | -------------------------------------------------------------------------------- /commands/host/redis-backend: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #ddev-generated 3 | 4 | ## Description: Use a different key-value store for Redis 5 | ## Usage: redis-backend [optimize] 6 | ## Example: ddev redis-backend redis-alpine optimize 7 | 8 | REDIS_DOCKER_IMAGE=${1:-} 9 | REDIS_CONFIG=${2:-} 10 | NAME=$REDIS_DOCKER_IMAGE 11 | 12 | function show_help() { 13 | cat < [optimize] 15 | 16 | Choose from predefined aliases, or provide any Redis-compatible Docker image. 17 | Note that not every Docker image can work right away, and you may need to override 18 | the "command:" in the docker-compose.redis_extra.yaml file 19 | 20 | Available aliases: 21 | redis redis:7 22 | redis-alpine redis:7-alpine 23 | valkey valkey/valkey:8 24 | valkey-alpine valkey/valkey:8-alpine 25 | 26 | Custom backend: 27 | You can specify any Docker image, e.g.: 28 | ddev redis-backend redis:6 29 | 30 | Optional: 31 | optimize Apply additional Redis configuration with resource limits 32 | optimized Same as optimize 33 | 34 | Examples: 35 | ddev redis-backend redis-alpine optimize 36 | ddev redis-backend valkey 37 | ddev redis-backend redis:7.2-alpine 38 | EOF 39 | exit 0 40 | } 41 | 42 | function optimize_config() { 43 | [[ "$REDIS_CONFIG" != "optimized" && "$REDIS_CONFIG" != "optimize" ]] && return 44 | ddev dotenv set .ddev/.env.redis --redis-optimized=true 45 | } 46 | 47 | function cleanup() { 48 | rm -f "$DDEV_APPROOT/.ddev/.env.redis" 49 | rm -rf "$DDEV_APPROOT/.ddev/redis/" 50 | rm -f "$DDEV_APPROOT/.ddev/docker-compose.redis.yaml" "$DDEV_APPROOT/.ddev/docker-compose.redis_extra.yaml" 51 | 52 | redis_volume="ddev-$(ddev status -j | docker run -i --rm ddev/ddev-utilities jq -r '.raw.name')_redis" 53 | if docker volume ls -q | grep -qw "$redis_volume"; then 54 | ddev stop 55 | docker volume rm "$redis_volume" 56 | fi 57 | } 58 | 59 | function check_docker_image() { 60 | echo "Pulling ${REDIS_DOCKER_IMAGE}..." 61 | if ! docker pull "$REDIS_DOCKER_IMAGE"; then 62 | echo >&2 "❌ Unable to pull ${REDIS_DOCKER_IMAGE}" 63 | exit 2 64 | fi 65 | } 66 | 67 | function use_docker_image() { 68 | [[ "$REDIS_DOCKER_IMAGE" != "redis:7" ]] && ddev dotenv set .ddev/.env.redis --redis-docker-image="$REDIS_DOCKER_IMAGE" 69 | REPO=$(ddev add-on list --installed -j 2>/dev/null | docker run -i --rm ddev/ddev-utilities jq -r '.raw[] | select(.Name=="redis") | .Repository // empty' 2>/dev/null) 70 | ddev add-on get "${REPO:-ddev/ddev-redis}" 71 | } 72 | 73 | case "$REDIS_DOCKER_IMAGE" in 74 | redis) 75 | NAME="Redis 7" 76 | REDIS_DOCKER_IMAGE="redis:7" 77 | ;; 78 | redis-alpine) 79 | NAME="Redis 7 Alpine" 80 | REDIS_DOCKER_IMAGE="redis:7-alpine" 81 | ;; 82 | valkey) 83 | NAME="Valkey 8" 84 | REDIS_DOCKER_IMAGE="valkey/valkey:8" 85 | ;; 86 | valkey-alpine) 87 | NAME="Valkey 8 Alpine" 88 | REDIS_DOCKER_IMAGE="valkey/valkey:8-alpine" 89 | ;; 90 | ""|--help|-h) 91 | show_help 92 | ;; 93 | *) 94 | NAME="$REDIS_DOCKER_IMAGE" 95 | # Allow unknown image, nothing to override 96 | ;; 97 | esac 98 | 99 | check_docker_image 100 | cleanup 101 | optimize_config 102 | use_docker_image 103 | 104 | echo 105 | echo "✅ Redis backend: $REDIS_DOCKER_IMAGE" 106 | if [[ "$REDIS_CONFIG" == "optimized" || "$REDIS_CONFIG" == "optimize" ]]; then 107 | echo "⚙️ Redis config: optimized" 108 | else 109 | echo "⚙️ Redis config: default" 110 | fi 111 | 112 | echo 113 | echo "📝 Commit the '.ddev' directory to version control" 114 | 115 | echo 116 | echo "🔄 Redis config available after 'ddev restart'" 117 | -------------------------------------------------------------------------------- /commands/redis/redis-cli: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | #ddev-generated 4 | ## Description: Run redis-cli inside the Redis container 5 | ## Usage: redis-cli [flags] [args] 6 | ## Example: "ddev redis-cli KEYS *" or "ddev redis-cli INFO" or "ddev redis-cli --version" 7 | ## Aliases: redis 8 | 9 | if [ -f /etc/redis/conf/security.conf ]; then 10 | redis-cli -p 6379 -h redis -a redis --no-auth-warning $@ 11 | else 12 | redis-cli -p 6379 -h redis $@ 13 | fi 14 | -------------------------------------------------------------------------------- /commands/redis/redis-flush: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | #ddev-generated 4 | ## Description: Flush all cache inside the Redis container 5 | ## Usage: redis-flush 6 | ## Example: "ddev redis-flush" 7 | 8 | if [ -f /etc/redis/conf/security.conf ]; then 9 | redis-cli -p 6379 -h redis -a redis --no-auth-warning FLUSHALL ASYNC 10 | else 11 | redis-cli -p 6379 -h redis FLUSHALL ASYNC 12 | fi 13 | -------------------------------------------------------------------------------- /docker-compose.redis.yaml: -------------------------------------------------------------------------------- 1 | #ddev-generated 2 | services: 3 | redis: 4 | container_name: ddev-${DDEV_SITENAME}-redis 5 | image: ${REDIS_DOCKER_IMAGE:-redis:7} 6 | # These labels ensure this service is discoverable by ddev. 7 | labels: 8 | com.ddev.site-name: ${DDEV_SITENAME} 9 | com.ddev.approot: ${DDEV_APPROOT} 10 | restart: "no" 11 | expose: 12 | - 6379 13 | volumes: 14 | - ".:/mnt/ddev_config" 15 | - "ddev-global-cache:/mnt/ddev-global-cache" 16 | - "./redis:/etc/redis/conf" 17 | - "redis:/data" 18 | command: /etc/redis/conf/redis.conf 19 | 20 | volumes: 21 | redis: 22 | -------------------------------------------------------------------------------- /install.yaml: -------------------------------------------------------------------------------- 1 | name: redis 2 | 3 | project_files: 4 | - docker-compose.redis.yaml 5 | - redis/scripts/settings.ddev.redis.php 6 | - redis/scripts/setup-drupal-settings.sh 7 | - redis/scripts/setup-redis-optimized-config.sh 8 | - redis/redis.conf 9 | - redis/advanced.conf 10 | - redis/append.conf 11 | - redis/general.conf 12 | - redis/io.conf 13 | - redis/memory.conf 14 | - redis/network.conf 15 | - redis/security.conf 16 | - redis/snapshots.conf 17 | - commands/host/redis-backend 18 | - commands/redis/redis-cli 19 | - commands/redis/redis-flush 20 | 21 | ddev_version_constraint: '>= v1.24.3' 22 | 23 | post_install_actions: 24 | - | 25 | #ddev-description:Install redis settings for Drupal 9+ if applicable 26 | redis/scripts/setup-drupal-settings.sh 27 | - | 28 | #ddev-description:Using optimized config if --redis-optimized=true 29 | redis/scripts/setup-redis-optimized-config.sh 30 | - | 31 | #ddev-description:Remove redis/scripts if there are no files 32 | rmdir redis/scripts 2>/dev/null || true 33 | - | 34 | #ddev-nodisplay 35 | #ddev-description:Remove old `redis` command from `ddev-redis-7` 36 | if grep "#ddev-generated" $DDEV_APPROOT/.ddev/commands/redis/redis > /dev/null 2>&1; then 37 | rm -f "$DDEV_APPROOT/.ddev/commands/redis/redis" 38 | fi 39 | 40 | removal_actions: 41 | - | 42 | #ddev-description:Remove redis settings for Drupal 9+ if applicable 43 | rm -f "${DDEV_APPROOT}/${DDEV_DOCROOT}/sites/default/settings.ddev.redis.php" 44 | -------------------------------------------------------------------------------- /redis/advanced.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ############################### ADVANCED CONFIG ############################### 3 | 4 | # Hashes are encoded using a memory efficient data structure when they have a 5 | # small number of entries, and the biggest entry does not exceed a given 6 | # threshold. These thresholds can be configured using the following directives. 7 | hash-max-ziplist-entries 512 8 | hash-max-ziplist-value 64 9 | 10 | # Lists are also encoded in a special way to save a lot of space. 11 | # The number of entries allowed per internal list node can be specified 12 | # as a fixed maximum size or a maximum number of elements. 13 | # For a fixed maximum size, use -5 through -1, meaning: 14 | # -5: max size: 64 Kb <-- not recommended for normal workloads 15 | # -4: max size: 32 Kb <-- not recommended 16 | # -3: max size: 16 Kb <-- probably not recommended 17 | # -2: max size: 8 Kb <-- good 18 | # -1: max size: 4 Kb <-- good 19 | # Positive numbers mean store up to _exactly_ that number of elements 20 | # per list node. 21 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 22 | # but if your use case is unique, adjust the settings as necessary. 23 | list-max-ziplist-size -2 24 | 25 | # Lists may also be compressed. 26 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 27 | # the list to *exclude* from compression. The head and tail of the list 28 | # are always uncompressed for fast push/pop operations. Settings are: 29 | # 0: disable all list compression 30 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 31 | # going from either the head or tail" 32 | # So: [head]->node->node->...->node->[tail] 33 | # [head], [tail] will always be uncompressed; inner nodes will compress. 34 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 35 | # 2 here means: don't compress head or head->next or tail->prev or tail, 36 | # but compress all nodes between them. 37 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 38 | # etc. 39 | list-compress-depth 0 40 | 41 | # Sets have a special encoding in just one case: when a set is composed 42 | # of just strings that happen to be integers in radix 10 in the range 43 | # of 64 bit signed integers. 44 | # The following configuration setting sets the limit in the size of the 45 | # set in order to use this special memory saving encoding. 46 | set-max-intset-entries 512 47 | 48 | # Similarly to hashes and lists, sorted sets are also specially encoded in 49 | # order to save a lot of space. This encoding is only used when the length and 50 | # elements of a sorted set are below the following limits: 51 | zset-max-ziplist-entries 128 52 | zset-max-ziplist-value 64 53 | 54 | # HyperLogLog sparse representation bytes limit. The limit includes the 55 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 56 | # this limit, it is converted into the dense representation. 57 | # 58 | # A value greater than 16000 is totally useless, since at that point the 59 | # dense representation is more memory efficient. 60 | # 61 | # The suggested value is ~ 3000 in order to have the benefits of 62 | # the space efficient encoding without slowing down too much PFADD, 63 | # which is O(N) with the sparse encoding. The value can be raised to 64 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 65 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 66 | hll-sparse-max-bytes 3000 67 | 68 | # Streams macro node max size / items. The stream data structure is a radix 69 | # tree of big nodes that encode multiple items inside. Using this configuration 70 | # it is possible to configure how big a single node can be in bytes, and the 71 | # maximum number of items it may contain before switching to a new node when 72 | # appending new stream entries. If any of the following settings are set to 73 | # zero, the limit is ignored, so for instance it is possible to set just a 74 | # max entires limit by setting max-bytes to 0 and max-entries to the desired 75 | # value. 76 | stream-node-max-bytes 4096 77 | stream-node-max-entries 100 78 | 79 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 80 | # order to help rehashing the main Redis hash table (the one mapping top-level 81 | # keys to values). The hash table implementation Redis uses (see dict.c) 82 | # performs a lazy rehashing: the more operation you run into a hash table 83 | # that is rehashing, the more rehashing "steps" are performed, so if the 84 | # server is idle the rehashing is never complete and some more memory is used 85 | # by the hash table. 86 | # 87 | # The default is to use this millisecond 10 times every second in order to 88 | # actively rehash the main dictionaries, freeing memory when possible. 89 | # 90 | # If unsure: 91 | # use "activerehashing no" if you have hard latency requirements and it is 92 | # not a good thing in your environment that Redis can reply from time to time 93 | # to queries with 2 milliseconds delay. 94 | # 95 | # use "activerehashing yes" if you don't have such hard requirements but 96 | # want to free memory asap when possible. 97 | activerehashing yes 98 | 99 | # The client output buffer limits can be used to force disconnection of clients 100 | # that are not reading data from the server fast enough for some reason (a 101 | # common reason is that a Pub/Sub client can't consume messages as fast as the 102 | # publisher can produce them). 103 | # 104 | # The limit can be set differently for the three different classes of clients: 105 | # 106 | # normal -> normal clients including MONITOR clients 107 | # replica -> replica clients 108 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 109 | # 110 | # The syntax of every client-output-buffer-limit directive is the following: 111 | # 112 | # client-output-buffer-limit 113 | # 114 | # A client is immediately disconnected once the hard limit is reached, or if 115 | # the soft limit is reached and remains reached for the specified number of 116 | # seconds (continuously). 117 | # So for instance if the hard limit is 32 megabytes and the soft limit is 118 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 119 | # if the size of the output buffers reach 32 megabytes, but will also get 120 | # disconnected if the client reaches 16 megabytes and continuously overcomes 121 | # the limit for 10 seconds. 122 | # 123 | # By default normal clients are not limited because they don't receive data 124 | # without asking (in a push way), but just after a request, so only 125 | # asynchronous clients may create a scenario where data is requested faster 126 | # than it can read. 127 | # 128 | # Instead there is a default limit for pubsub and replica clients, since 129 | # subscribers and replicas receive data in a push fashion. 130 | # 131 | # Both the hard or the soft limit can be disabled by setting them to zero. 132 | client-output-buffer-limit normal 0 0 0 133 | client-output-buffer-limit replica 256mb 64mb 60 134 | client-output-buffer-limit pubsub 32mb 8mb 60 135 | 136 | # Client query buffers accumulate new commands. They are limited to a fixed 137 | # amount by default in order to avoid that a protocol desynchronization (for 138 | # instance due to a bug in the client) will lead to unbound memory usage in 139 | # the query buffer. However you can configure it here if you have very special 140 | # needs, such us huge multi/exec requests or alike. 141 | # 142 | # client-query-buffer-limit 1gb 143 | 144 | # In the Redis protocol, bulk requests, that are, elements representing single 145 | # strings, are normally limited to 512 mb. However you can change this limit 146 | # here, but must be 1mb or greater 147 | # 148 | # proto-max-bulk-len 512mb 149 | 150 | # Redis calls an internal function to perform many background tasks, like 151 | # closing connections of clients in timeout, purging expired keys that are 152 | # never requested, and so forth. 153 | # 154 | # Not all tasks are performed with the same frequency, but Redis checks for 155 | # tasks to perform according to the specified "hz" value. 156 | # 157 | # By default "hz" is set to 10. Raising the value will use more CPU when 158 | # Redis is idle, but at the same time will make Redis more responsive when 159 | # there are many keys expiring at the same time, and timeouts may be 160 | # handled with more precision. 161 | # 162 | # The range is between 1 and 500, however a value over 100 is usually not 163 | # a good idea. Most users should use the default of 10 and raise this up to 164 | # 100 only in environments where very low latency is required. 165 | hz 10 166 | 167 | # Normally it is useful to have an HZ value which is proportional to the 168 | # number of clients connected. This is useful in order, for instance, to 169 | # avoid too many clients are processed for each background task invocation 170 | # in order to avoid latency spikes. 171 | # 172 | # Since the default HZ value by default is conservatively set to 10, Redis 173 | # offers, and enables by default, the ability to use an adaptive HZ value 174 | # which will temporarily raise when there are many connected clients. 175 | # 176 | # When dynamic HZ is enabled, the actual configured HZ will be used 177 | # as a baseline, but multiples of the configured HZ value will be actually 178 | # used as needed once more clients are connected. In this way an idle 179 | # instance will use very little CPU time while a busy instance will be 180 | # more responsive. 181 | dynamic-hz yes 182 | 183 | # When a child rewrites the AOF file, if the following option is enabled 184 | # the file will be fsync-ed every 32 MB of data generated. This is useful 185 | # in order to commit the file to the disk more incrementally and avoid 186 | # big latency spikes. 187 | aof-rewrite-incremental-fsync yes 188 | 189 | # When redis saves RDB file, if the following option is enabled 190 | # the file will be fsync-ed every 32 MB of data generated. This is useful 191 | # in order to commit the file to the disk more incrementally and avoid 192 | # big latency spikes. 193 | rdb-save-incremental-fsync yes 194 | 195 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good 196 | # idea to start with the default settings and only change them after investigating 197 | # how to improve the performances and how the keys LFU change over time, which 198 | # is possible to inspect via the OBJECT FREQ command. 199 | # 200 | # There are two tunable parameters in the Redis LFU implementation: the 201 | # counter logarithm factor and the counter decay time. It is important to 202 | # understand what the two parameters mean before changing them. 203 | # 204 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis 205 | # uses a probabilistic increment with logarithmic behavior. Given the value 206 | # of the old counter, when a key is accessed, the counter is incremented in 207 | # this way: 208 | # 209 | # 1. A random number R between 0 and 1 is extracted. 210 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). 211 | # 3. The counter is incremented only if R < P. 212 | # 213 | # The default lfu-log-factor is 10. This is a table of how the frequency 214 | # counter changes with a different number of accesses with different 215 | # logarithmic factors: 216 | # 217 | # +--------+------------+------------+------------+------------+------------+ 218 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | 219 | # +--------+------------+------------+------------+------------+------------+ 220 | # | 0 | 104 | 255 | 255 | 255 | 255 | 221 | # +--------+------------+------------+------------+------------+------------+ 222 | # | 1 | 18 | 49 | 255 | 255 | 255 | 223 | # +--------+------------+------------+------------+------------+------------+ 224 | # | 10 | 10 | 18 | 142 | 255 | 255 | 225 | # +--------+------------+------------+------------+------------+------------+ 226 | # | 100 | 8 | 11 | 49 | 143 | 255 | 227 | # +--------+------------+------------+------------+------------+------------+ 228 | # 229 | # NOTE: The above table was obtained by running the following commands: 230 | # 231 | # redis-benchmark -n 1000000 incr foo 232 | # redis-cli object freq foo 233 | # 234 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance 235 | # to accumulate hits. 236 | # 237 | # The counter decay time is the time, in minutes, that must elapse in order 238 | # for the key counter to be divided by two (or decremented if it has a value 239 | # less <= 10). 240 | # 241 | # The default value for the lfu-decay-time is 1. A special value of 0 means to 242 | # decay the counter every time it happens to be scanned. 243 | # 244 | # lfu-log-factor 10 245 | # lfu-decay-time 1 246 | 247 | ############################## DEBUG COMMAND ############################# 248 | 249 | enable-debug-command yes 250 | 251 | ########################### ACTIVE DEFRAGMENTATION ####################### 252 | # 253 | # What is active defragmentation? 254 | # ------------------------------- 255 | # 256 | # Active (online) defragmentation allows a Redis server to compact the 257 | # spaces left between small allocations and deallocations of data in memory, 258 | # thus allowing to reclaim back memory. 259 | # 260 | # Fragmentation is a natural process that happens with every allocator (but 261 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server 262 | # restart is needed in order to lower the fragmentation, or at least to flush 263 | # away all the data and create it again. However thanks to this feature 264 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime 265 | # in a "hot" way, while the server is running. 266 | # 267 | # Basically when the fragmentation is over a certain level (see the 268 | # configuration options below) Redis will start to create new copies of the 269 | # values in contiguous memory regions by exploiting certain specific Jemalloc 270 | # features (in order to understand if an allocation is causing fragmentation 271 | # and to allocate it in a better place), and at the same time, will release the 272 | # old copies of the data. This process, repeated incrementally for all the keys 273 | # will cause the fragmentation to drop back to normal values. 274 | # 275 | # Important things to understand: 276 | # 277 | # 1. This feature is disabled by default, and only works if you compiled Redis 278 | # to use the copy of Jemalloc we ship with the source code of Redis. 279 | # This is the default with Linux builds. 280 | # 281 | # 2. You never need to enable this feature if you don't have fragmentation 282 | # issues. 283 | # 284 | # 3. Once you experience fragmentation, you can enable this feature when 285 | # needed with the command "CONFIG SET activedefrag yes". 286 | # 287 | # The configuration parameters are able to fine tune the behavior of the 288 | # defragmentation process. If you are not sure about what they mean it is 289 | # a good idea to leave the defaults untouched. 290 | 291 | # Enabled active defragmentation 292 | # activedefrag no 293 | 294 | # Minimum amount of fragmentation waste to start active defrag 295 | # active-defrag-ignore-bytes 100mb 296 | 297 | # Minimum percentage of fragmentation to start active defrag 298 | # active-defrag-threshold-lower 10 299 | 300 | # Maximum percentage of fragmentation at which we use maximum effort 301 | # active-defrag-threshold-upper 100 302 | 303 | # Minimal effort for defrag in CPU percentage, to be used when the lower 304 | # threshold is reached 305 | # active-defrag-cycle-min 1 306 | 307 | # Maximal effort for defrag in CPU percentage, to be used when the upper 308 | # threshold is reached 309 | # active-defrag-cycle-max 25 310 | 311 | # Maximum number of set/hash/zset/list fields that will be processed from 312 | # the main dictionary scan 313 | # active-defrag-max-scan-fields 1000 314 | 315 | # Jemalloc background thread for purging will be enabled by default 316 | jemalloc-bg-thread yes 317 | 318 | # It is possible to pin different threads and processes of Redis to specific 319 | # CPUs in your system, in order to maximize the performances of the server. 320 | # This is useful both in order to pin different Redis threads in different 321 | # CPUs, but also in order to make sure that multiple Redis instances running 322 | # in the same host will be pinned to different CPUs. 323 | # 324 | # Normally you can do this using the "taskset" command, however it is also 325 | # possible to this via Redis configuration directly, both in Linux and FreeBSD. 326 | # 327 | # You can pin the server/IO threads, bio threads, aof rewrite child process, and 328 | # the bgsave child process. The syntax to specify the cpu list is the same as 329 | # the taskset command: 330 | # 331 | # Set redis server/io threads to cpu affinity 0,2,4,6: 332 | # server_cpulist 0-7:2 333 | # 334 | # Set bio threads to cpu affinity 1,3: 335 | # bio_cpulist 1,3 336 | # 337 | # Set aof rewrite child process to cpu affinity 8,9,10,11: 338 | # aof_rewrite_cpulist 8-11 339 | # 340 | # Set bgsave child process to cpu affinity 1,10,11 341 | # bgsave_cpulist 1,10-11 342 | -------------------------------------------------------------------------------- /redis/append.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ############################## APPEND ONLY MODE ############################### 3 | 4 | # By default Redis asynchronously dumps the dataset on disk. This mode is 5 | # good enough in many applications, but an issue with the Redis process or 6 | # a power outage may result into a few minutes of writes lost (depending on 7 | # the configured save points). 8 | # 9 | # The Append Only File is an alternative persistence mode that provides 10 | # much better durability. For instance using the default data fsync policy 11 | # (see later in the config file) Redis can lose just one second of writes in a 12 | # dramatic event like a server power outage, or a single write if something 13 | # wrong with the Redis process itself happens, but the operating system is 14 | # still running correctly. 15 | # 16 | # AOF and RDB persistence can be enabled at the same time without problems. 17 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 18 | # with the better durability guarantees. 19 | # 20 | # Please check http://redis.io/topics/persistence for more information. 21 | 22 | appendonly yes 23 | 24 | # The name of the append only file (default: "appendonly.aof") 25 | 26 | appendfilename "appendonly.aof" 27 | 28 | # For convenience, Redis stores all persistent append-only files in a dedicated 29 | # directory. The name of the directory is determined by the appenddirname 30 | # configuration parameter. 31 | 32 | appenddirname "append" 33 | 34 | # The fsync() call tells the Operating System to actually write data on disk 35 | # instead of waiting for more data in the output buffer. Some OS will really flush 36 | # data on disk, some other OS will just try to do it ASAP. 37 | # 38 | # Redis supports three different modes: 39 | # 40 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 41 | # always: fsync after every write to the append only log. Slow, Safest. 42 | # everysec: fsync only one time every second. Compromise. 43 | # 44 | # The default is "everysec", as that's usually the right compromise between 45 | # speed and data safety. It's up to you to understand if you can relax this to 46 | # "no" that will let the operating system flush the output buffer when 47 | # it wants, for better performances (but if you can live with the idea of 48 | # some data loss consider the default persistence mode that's snapshotting), 49 | # or on the contrary, use "always" that's very slow but a bit safer than 50 | # everysec. 51 | # 52 | # More details please check the following article: 53 | # http://antirez.com/post/redis-persistence-demystified.html 54 | # 55 | # If unsure, use "everysec". 56 | 57 | appendfsync no 58 | 59 | # When the AOF fsync policy is set to always or everysec, and a background 60 | # saving process (a background save or AOF log background rewriting) is 61 | # performing a lot of I/O against the disk, in some Linux configurations 62 | # Redis may block too long on the fsync() call. Note that there is no fix for 63 | # this currently, as even performing fsync in a different thread will block 64 | # our synchronous write(2) call. 65 | # 66 | # In order to mitigate this problem it's possible to use the following option 67 | # that will prevent fsync() from being called in the main process while a 68 | # BGSAVE or BGREWRITEAOF is in progress. 69 | # 70 | # This means that while another child is saving, the durability of Redis is 71 | # the same as "appendfsync none". In practical terms, this means that it is 72 | # possible to lose up to 30 seconds of log in the worst scenario (with the 73 | # default Linux settings). 74 | # 75 | # If you have latency problems turn this to "yes". Otherwise leave it as 76 | # "no" that is the safest pick from the point of view of durability. 77 | 78 | no-appendfsync-on-rewrite no 79 | 80 | # Automatic rewrite of the append only file. 81 | # Redis is able to automatically rewrite the log file implicitly calling 82 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 83 | # 84 | # This is how it works: Redis remembers the size of the AOF file after the 85 | # latest rewrite (if no rewrite has happened since the restart, the size of 86 | # the AOF at startup is used). 87 | # 88 | # This base size is compared to the current size. If the current size is 89 | # bigger than the specified percentage, the rewrite is triggered. Also 90 | # you need to specify a minimal size for the AOF file to be rewritten, this 91 | # is useful to avoid rewriting the AOF file even if the percentage increase 92 | # is reached but it is still pretty small. 93 | # 94 | # Specify a percentage of zero in order to disable the automatic AOF 95 | # rewrite feature. 96 | 97 | auto-aof-rewrite-percentage 100 98 | auto-aof-rewrite-min-size 64mb 99 | 100 | # An AOF file may be found to be truncated at the end during the Redis 101 | # startup process, when the AOF data gets loaded back into memory. 102 | # This may happen when the system where Redis is running 103 | # crashes, especially when an ext4 filesystem is mounted without the 104 | # data=ordered option (however this can't happen when Redis itself 105 | # crashes or aborts but the operating system still works correctly). 106 | # 107 | # Redis can either exit with an error when this happens, or load as much 108 | # data as possible (the default now) and start if the AOF file is found 109 | # to be truncated at the end. The following option controls this behavior. 110 | # 111 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 112 | # the Redis server starts emitting a log to inform the user of the event. 113 | # Otherwise if the option is set to no, the server aborts with an error 114 | # and refuses to start. When the option is set to no, the user requires 115 | # to fix the AOF file using the "redis-check-aof" utility before to restart 116 | # the server. 117 | # 118 | # Note that if the AOF file will be found to be corrupted in the middle 119 | # the server will still exit with an error. This option only applies when 120 | # Redis will try to read more data from the AOF file but not enough bytes 121 | # will be found. 122 | aof-load-truncated yes 123 | 124 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the 125 | # AOF file for faster rewrites and recoveries. When this option is turned 126 | # on the rewritten AOF file is composed of two different stanzas: 127 | # 128 | # [RDB file][AOF tail] 129 | # 130 | # When loading, Redis recognizes that the AOF file starts with the "REDIS" 131 | # string and loads the prefixed RDB file, then continues loading the AOF 132 | # tail. 133 | aof-use-rdb-preamble yes 134 | -------------------------------------------------------------------------------- /redis/general.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ################################# GENERAL ##################################### 3 | 4 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 5 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 6 | daemonize no 7 | 8 | # If you run Redis from upstart or systemd, Redis can interact with your 9 | # supervision tree. Options: 10 | # supervised no - no supervision interaction 11 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 12 | # requires "expect stop" in your upstart job config 13 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 14 | # supervised auto - detect upstart or systemd method based on 15 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 16 | # Note: these supervision methods only signal "process is ready." 17 | # They do not enable continuous pings back to your supervisor. 18 | supervised no 19 | 20 | # Specify the server verbosity level. 21 | # This can be one of: 22 | # debug (a lot of information, useful for development/testing) 23 | # verbose (many rarely useful info, but not a mess like the debug level) 24 | # notice (moderately verbose, what you want in production probably) 25 | # warning (only very important / critical messages are logged) 26 | loglevel notice 27 | 28 | # Specify the log file name. Also the empty string can be used to force 29 | # Redis to log on the standard output. Note that if you use standard 30 | # output for logging but daemonize, logs will be sent to /dev/null 31 | logfile "" 32 | 33 | # Set the number of databases. The default database is DB 0, you can select 34 | # a different one on a per-connection basis using SELECT where 35 | # dbid is a number between 0 and 'databases'-1 36 | databases 4 37 | 38 | # By default Redis shows an ASCII art logo only when started to log to the 39 | # standard output and if the standard output is a TTY. Basically this means 40 | # that normally a logo is displayed only in interactive sessions. 41 | # 42 | # However it is possible to force the pre-4.0 behavior and always show a 43 | # ASCII art logo in startup logs by setting the following option to yes. 44 | always-show-logo yes 45 | -------------------------------------------------------------------------------- /redis/io.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ################################ THREADED I/O ################################# 3 | 4 | # Redis is mostly single threaded, however there are certain threaded 5 | # operations such as UNLINK, slow I/O accesses and other things that are 6 | # performed on side threads. 7 | # 8 | # Now it is also possible to handle Redis clients socket reads and writes 9 | # in different I/O threads. Since especially writing is so slow, normally 10 | # Redis users use pipelining in order to speed up the Redis performances per 11 | # core, and spawn multiple instances in order to scale more. Using I/O 12 | # threads it is possible to easily speedup two times Redis without resorting 13 | # to pipelining nor sharding of the instance. 14 | # 15 | # By default threading is disabled, we suggest enabling it only in machines 16 | # that have at least 4 or more cores, leaving at least one spare core. 17 | # Using more than 8 threads is unlikely to help much. We also recommend using 18 | # threaded I/O only if you actually have performance problems, with Redis 19 | # instances being able to use a quite big percentage of CPU time, otherwise 20 | # there is no point in using this feature. 21 | # 22 | # So for instance if you have a four cores boxes, try to use 2 or 3 I/O 23 | # threads, if you have a 8 cores, try to use 6 threads. In order to 24 | # enable I/O threads use the following configuration directive: 25 | # 26 | io-threads 1 27 | 28 | # 29 | # Setting io-threads to 1 will just use the main thread as usual. 30 | # When I/O threads are enabled, we only use threads for writes, that is 31 | # to thread the write(2) syscall and transfer the client buffers to the 32 | # socket. However it is also possible to enable threading of reads and 33 | # protocol parsing using the following configuration directive, by setting 34 | # it to yes: 35 | # 36 | io-threads-do-reads no 37 | 38 | # 39 | # Usually threading reads doesn't help much. 40 | # 41 | # NOTE 1: This configuration directive cannot be changed at runtime via 42 | # CONFIG SET. Aso this feature currently does not work when SSL is 43 | # enabled. 44 | # 45 | # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make 46 | # sure you also run the benchmark itself in threaded mode, using the 47 | # --threads option to match the number of Redis threads, otherwise you'll not 48 | # be able to notice the improvements. 49 | 50 | ############################ KERNEL OOM CONTROL ############################## 51 | 52 | # On Linux, it is possible to hint the kernel OOM killer on what processes 53 | # should be killed first when out of memory. 54 | # 55 | # Enabling this feature makes Redis actively control the oom_score_adj value 56 | # for all its processes, depending on their role. The default scores will 57 | # attempt to have background child processes killed before all others, and 58 | # replicas killed before masters. 59 | 60 | oom-score-adj no 61 | 62 | # When oom-score-adj is used, this directive controls the specific values used 63 | # for master, replica and background child processes. Values range -1000 to 64 | # 1000 (higher means more likely to be killed). 65 | # 66 | # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) 67 | # can freely increase their value, but not decrease it below its initial 68 | # settings. 69 | # 70 | # Values are used relative to the initial value of oom_score_adj when the server 71 | # starts. Because typically the initial value is 0, they will often match the 72 | # absolute values. 73 | 74 | oom-score-adj-values 0 200 800 75 | 76 | ################################## SLOW LOG ################################### 77 | 78 | # The Redis Slow Log is a system to log queries that exceeded a specified 79 | # execution time. The execution time does not include the I/O operations 80 | # like talking with the client, sending the reply and so forth, 81 | # but just the time needed to actually execute the command (this is the only 82 | # stage of command execution where the thread is blocked and can not serve 83 | # other requests in the meantime). 84 | # 85 | # You can configure the slow log with two parameters: one tells Redis 86 | # what is the execution time, in microseconds, to exceed in order for the 87 | # command to get logged, and the other parameter is the length of the 88 | # slow log. When a new command is logged the oldest one is removed from the 89 | # queue of logged commands. 90 | 91 | # The following time is expressed in microseconds, so 1000000 is equivalent 92 | # to one second. Note that a negative number disables the slow log, while 93 | # a value of zero forces the logging of every command. 94 | slowlog-log-slower-than 10000 95 | 96 | # There is no limit to this length. Just be aware that it will consume memory. 97 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 98 | slowlog-max-len 128 99 | -------------------------------------------------------------------------------- /redis/memory.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ############################## MEMORY MANAGEMENT ################################ 3 | 4 | # Set a memory usage limit to the specified amount of bytes. 5 | # When the memory limit is reached Redis will try to remove keys 6 | # according to the eviction policy selected (see maxmemory-policy). 7 | # 8 | # If Redis can't remove keys according to the policy, or if the policy is 9 | # set to 'noeviction', Redis will start to reply with errors to commands 10 | # that would use more memory, like SET, LPUSH, and so on, and will continue 11 | # to reply to read-only commands like GET. 12 | # 13 | # This option is usually useful when using Redis as an LRU or LFU cache, or to 14 | # set a hard memory limit for an instance (using the 'noeviction' policy). 15 | # 16 | # WARNING: If you have replicas attached to an instance with maxmemory on, 17 | # the size of the output buffers needed to feed the replicas are subtracted 18 | # from the used memory count, so that network problems / resyncs will 19 | # not trigger a loop where keys are evicted, and in turn the output 20 | # buffer of replicas is full with DELs of keys evicted triggering the deletion 21 | # of more keys, and so forth until the database is completely emptied. 22 | # 23 | # In short... if you have replicas attached it is suggested that you set a lower 24 | # limit for maxmemory so that there is some free RAM on the system for replica 25 | # output buffers (but this is not needed if the policy is 'noeviction'). 26 | # 27 | maxmemory 512mb 28 | 29 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 30 | # is reached. You can select one from the following behaviors: 31 | # 32 | # volatile-lru -> Evict using approximated LRU, only keys with an expire set. 33 | # allkeys-lru -> Evict any key using approximated LRU. 34 | # volatile-lfu -> Evict using approximated LFU, only keys with an expire set. 35 | # allkeys-lfu -> Evict any key using approximated LFU. 36 | # volatile-random -> Remove a random key having an expire set. 37 | # allkeys-random -> Remove a random key, any key. 38 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) 39 | # noeviction -> Don't evict anything, just return an error on write operations. 40 | # 41 | # LRU means Least Recently Used 42 | # LFU means Least Frequently Used 43 | # 44 | # Both LRU, LFU and volatile-ttl are implemented using approximated 45 | # randomized algorithms. 46 | # 47 | # Note: with any of the above policies, Redis will return an error on write 48 | # operations, when there are no suitable keys for eviction. 49 | # 50 | # At the date of writing these commands are: set setnx setex append 51 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 52 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 53 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 54 | # getset mset msetnx exec sort 55 | # 56 | # The default is: 57 | # 58 | maxmemory-policy allkeys-lru 59 | 60 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated 61 | # algorithms (in order to save memory), so you can tune it for speed or 62 | # accuracy. By default Redis will check five keys and pick the one that was 63 | # used least recently, you can change the sample size using the following 64 | # configuration directive. 65 | # 66 | # The default of 5 produces good enough results. 10 Approximates very closely 67 | # true LRU but costs more CPU. 3 is faster but not very accurate. 68 | # 69 | maxmemory-samples 4 70 | 71 | # Redis reclaims expired keys in two ways: upon access when those keys are 72 | # found to be expired, and also in background, in what is called the 73 | # "active expire key". The key space is slowly and interactively scanned 74 | # looking for expired keys to reclaim, so that it is possible to free memory 75 | # of keys that are expired and will never be accessed again in a short time. 76 | # 77 | # The default effort of the expire cycle will try to avoid having more than 78 | # ten percent of expired keys still in memory, and will try to avoid consuming 79 | # more than 25% of total memory and to add latency to the system. However 80 | # it is possible to increase the expire "effort" that is normally set to 81 | # "1", to a greater value, up to the value "10". At its maximum value the 82 | # system will use more CPU, longer cycles (and technically may introduce 83 | # more latency), and will tolerate less already expired keys still present 84 | # in the system. It's a tradeoff between memory, CPU and latency. 85 | # 86 | active-expire-effort 2 87 | 88 | ############################# LAZY FREEING #################################### 89 | 90 | # Redis has two primitives to delete keys. One is called DEL and is a blocking 91 | # deletion of the object. It means that the server stops processing new commands 92 | # in order to reclaim all the memory associated with an object in a synchronous 93 | # way. If the key deleted is associated with a small object, the time needed 94 | # in order to execute the DEL command is very small and comparable to most other 95 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an 96 | # aggregated value containing millions of elements, the server can block for 97 | # a long time (even seconds) in order to complete the operation. 98 | # 99 | # For the above reasons Redis also offers non blocking deletion primitives 100 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and 101 | # FLUSHDB commands, in order to reclaim memory in background. Those commands 102 | # are executed in constant time. Another thread will incrementally free the 103 | # object in the background as fast as possible. 104 | # 105 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. 106 | # It's up to the design of the application to understand when it is a good 107 | # idea to use one or the other. However the Redis server sometimes has to 108 | # delete keys or flush the whole database as a side effect of other operations. 109 | # Specifically Redis deletes objects independently of a user call in the 110 | # following scenarios: 111 | # 112 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations, 113 | # in order to make room for new data, without going over the specified 114 | # memory limit. 115 | # 2) Because of expire: when a key with an associated time to live (see the 116 | # EXPIRE command) must be deleted from memory. 117 | # 3) Because of a side effect of a command that stores data on a key that may 118 | # already exist. For example the RENAME command may delete the old key 119 | # content when it is replaced with another one. Similarly SUNIONSTORE 120 | # or SORT with STORE option may delete existing keys. The SET command 121 | # itself removes any old content of the specified key in order to replace 122 | # it with the specified string. 123 | # 4) During replication, when a replica performs a full resynchronization with 124 | # its master, the content of the whole database is removed in order to 125 | # load the RDB file just transferred. 126 | # 127 | # In all the above cases the default is to delete objects in a blocking way, 128 | # like if DEL was called. However you can configure each case specifically 129 | # in order to instead release memory in a non-blocking way like if UNLINK 130 | # was called, using the following configuration directives. 131 | 132 | lazyfree-lazy-eviction no 133 | lazyfree-lazy-expire no 134 | lazyfree-lazy-server-del no 135 | replica-lazy-flush no 136 | 137 | # It is also possible, for the case when to replace the user code DEL calls 138 | # with UNLINK calls is not easy, to modify the default behavior of the DEL 139 | # command to act exactly like UNLINK, using the following configuration 140 | # directive: 141 | 142 | lazyfree-lazy-user-del no 143 | -------------------------------------------------------------------------------- /redis/network.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ################################## NETWORK ##################################### 3 | 4 | # By default, if no "bind" configuration directive is specified, Redis listens 5 | # for connections from all available network interfaces on the host machine. 6 | # It is possible to listen to just one or multiple selected interfaces using 7 | # the "bind" configuration directive, followed by one or more IP addresses. 8 | # 9 | # Examples: 10 | # 11 | # bind 192.168.1.100 10.0.0.1 12 | # bind 127.0.0.1 ::1 13 | # 14 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 15 | # internet, binding to all the interfaces is dangerous and will expose the 16 | # instance to everybody on the internet. So by default we uncomment the 17 | # following bind directive, that will force Redis to listen only on the 18 | # IPv4 loopback interface address (this means Redis will only be able to 19 | # accept client connections from the same host that it is running on). 20 | # 21 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 22 | # JUST COMMENT OUT THE FOLLOWING LINE. 23 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 24 | bind 0.0.0.0 25 | 26 | # Protected mode is a layer of security protection, in order to avoid that 27 | # Redis instances left open on the internet are accessed and exploited. 28 | # 29 | # When protected mode is on and if: 30 | # 31 | # 1) The server is not binding explicitly to a set of addresses using the 32 | # "bind" directive. 33 | # 2) No password is configured. 34 | # 35 | # The server only accepts connections from clients connecting from the 36 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 37 | # sockets. 38 | # 39 | # By default protected mode is enabled. You should disable it only if 40 | # you are sure you want clients from other hosts to connect to Redis 41 | # even if no authentication is configured, nor a specific set of interfaces 42 | # are explicitly listed using the "bind" directive. 43 | protected-mode yes 44 | 45 | # Accept connections on the specified port, default is 6379 (IANA #815344). 46 | # If port 0 is specified Redis will not listen on a TCP socket. 47 | port 6379 48 | 49 | # TCP listen() backlog. 50 | # 51 | # In high requests-per-second environments you need a high backlog in order 52 | # to avoid slow clients connection issues. Note that the Linux kernel 53 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 54 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 55 | # in order to get the desired effect. 56 | tcp-backlog 4096 57 | 58 | # Unix socket. 59 | # 60 | # Specify the path for the Unix socket that will be used to listen for 61 | # incoming connections. There is no default, so Redis will not listen 62 | # on a unix socket when not specified. 63 | # 64 | # unixsocket /tmp/redis.sock 65 | # unixsocketperm 700 66 | 67 | # Close the connection after a client is idle for N seconds (0 to disable) 68 | timeout 0 69 | 70 | # TCP keepalive. 71 | # 72 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 73 | # of communication. This is useful for two reasons: 74 | # 75 | # 1) Detect dead peers. 76 | # 2) Force network equipment in the middle to consider the connection to be 77 | # alive. 78 | # 79 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 80 | # Note that to close the connection the double of the time is needed. 81 | # On other kernels the period depends on the kernel configuration. 82 | # 83 | # A reasonable value for this option is 300 seconds, which is the new 84 | # Redis default starting with Redis 3.2.1. 85 | tcp-keepalive 0 86 | -------------------------------------------------------------------------------- /redis/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration. 2 | # #ddev-generated 3 | # Example configuration files for reference: 4 | # http://download.redis.io/redis-stable/redis.conf 5 | # http://download.redis.io/redis-stable/sentinel.conf 6 | 7 | maxmemory 2048mb 8 | maxmemory-policy allkeys-lfu 9 | 10 | # to disable Redis persistence, remove ddev-generated from this file, 11 | # and uncomment the two lines below: 12 | #appendonly no 13 | #save "" 14 | -------------------------------------------------------------------------------- /redis/scripts/settings.ddev.redis.php: -------------------------------------------------------------------------------- 1 | addPsr4('Drupal\\redis\\', 'modules/contrib/redis/src'); 24 | 25 | // Use redis for container cache. 26 | // The container cache is used to load the container definition itself, and 27 | // thus any configuration stored in the container itself is not available 28 | // yet. These lines force the container cache to use Redis rather than the 29 | // default SQL cache. 30 | $settings['bootstrap_container_definition'] = [ 31 | 'parameters' => [], 32 | 'services' => [ 33 | 'redis.factory' => [ 34 | 'class' => 'Drupal\redis\ClientFactory', 35 | ], 36 | 'cache.backend.redis' => [ 37 | 'class' => 'Drupal\redis\Cache\CacheBackendFactory', 38 | 'arguments' => ['@redis.factory', '@cache_tags_provider.container', '@serialization.phpserialize'], 39 | ], 40 | 'cache.container' => [ 41 | 'class' => '\Drupal\redis\Cache\PhpRedis', 42 | 'factory' => ['@cache.backend.redis', 'get'], 43 | 'arguments' => ['container'], 44 | ], 45 | 'cache_tags_provider.container' => [ 46 | 'class' => 'Drupal\redis\Cache\RedisCacheTagsChecksum', 47 | 'arguments' => ['@redis.factory'], 48 | ], 49 | 'serialization.phpserialize' => [ 50 | 'class' => 'Drupal\Component\Serialization\PhpSerialize', 51 | ], 52 | ], 53 | ]; 54 | } 55 | -------------------------------------------------------------------------------- /redis/scripts/setup-drupal-settings.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #ddev-generated 3 | set -e 4 | 5 | if [[ $DDEV_PROJECT_TYPE != drupal* ]] || [[ $DDEV_PROJECT_TYPE =~ ^drupal(6|7)$ ]] ; 6 | then 7 | for file in redis/scripts/settings.ddev.redis.php redis/scripts/setup-drupal-settings.sh; do 8 | if grep -q "#ddev-generated" "${file}" 2>/dev/null; then 9 | echo "Removing ${file} as not applicable" 10 | rm -f "${file}" 11 | fi 12 | done 13 | exit 0 14 | fi 15 | 16 | if ( ddev debug configyaml 2>/dev/null | grep 'disable_settings_management:\s*true' >/dev/null 2>&1 ) ; then 17 | exit 0 18 | fi 19 | 20 | cp redis/scripts/settings.ddev.redis.php $DDEV_APPROOT/$DDEV_DOCROOT/sites/default/ 21 | 22 | SETTINGS_FILE_NAME="${DDEV_APPROOT}/${DDEV_DOCROOT}/sites/default/settings.php" 23 | echo "Settings file name: ${SETTINGS_FILE_NAME}" 24 | grep -qF 'settings.ddev.redis.php' $SETTINGS_FILE_NAME || echo " 25 | // Include settings required for Redis cache. 26 | if ((file_exists(__DIR__ . '/settings.ddev.redis.php') && getenv('IS_DDEV_PROJECT') == 'true')) { 27 | include __DIR__ . '/settings.ddev.redis.php'; 28 | }" >> $SETTINGS_FILE_NAME 29 | -------------------------------------------------------------------------------- /redis/scripts/setup-redis-optimized-config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #ddev-generated 3 | set -e 4 | 5 | script_file="${DDEV_APPROOT}/.ddev/redis/scripts/setup-redis-optimized-config.sh" 6 | extra_docker_file="${DDEV_APPROOT}/.ddev/docker-compose.redis_extra.yaml" 7 | 8 | if [[ $(ddev dotenv get .ddev/.env.redis --redis-optimized 2>/dev/null) != "true" ]]; then 9 | for file in advanced append general io memory network security snapshots; do 10 | if grep -q '#ddev-generated' "${DDEV_APPROOT}/.ddev/redis/${file}.conf" 2>/dev/null; then 11 | rm -f "${DDEV_APPROOT}/.ddev/redis/${file}.conf" 12 | fi 13 | done 14 | 15 | for file in "${extra_docker_file}" "${script_file}"; do 16 | if grep -q "#ddev-generated" "${file}" 2>/dev/null; then 17 | echo "Removing ${file}" 18 | rm -f "${file}" 19 | fi 20 | done 21 | exit 0 22 | fi 23 | 24 | if grep -q '#ddev-generated' "${DDEV_APPROOT}/.ddev/redis/redis.conf"; then 25 | cat >"${DDEV_APPROOT}/.ddev/redis/redis.conf" <"${extra_docker_file}" </dev/null; then 75 | echo "Removing ${script_file}" 76 | rm -f "${script_file}" 77 | fi 78 | -------------------------------------------------------------------------------- /redis/security.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ################################## SECURITY ################################### 3 | 4 | # Warning: since Redis is pretty fast, an outside user can try up to 5 | # 1 million passwords per second against a modern box. This means that you 6 | # should use very strong passwords, otherwise they will be very easy to break. 7 | # Note that because the password is really a shared secret between the client 8 | # and the server, and should not be memorized by any human, the password 9 | # can be easily a long string from /dev/urandom or whatever, so by using a 10 | # long and unguessable password no brute force attack will be possible. 11 | 12 | # user config ending with "on >redis" means "redis" password 13 | user default ~* &* +@all on >redis 14 | user redis ~* &* +@all on >redis 15 | -------------------------------------------------------------------------------- /redis/snapshots.conf: -------------------------------------------------------------------------------- 1 | # #ddev-generated 2 | ################################ SNAPSHOTTING ################################ 3 | # 4 | # Save the DB on disk: 5 | # 6 | # save 7 | # 8 | # Will save the DB if both the given number of seconds and the given 9 | # number of write operations against the DB occurred. 10 | # 11 | # In the example below the behavior will be to save: 12 | # after 300 sec (5 min) if at least 1 key changed 13 | # after 150 sec (2.5 min) if at least 10 keys changed 14 | # after 30 sec if at least 10000 keys changed 15 | # 16 | # Note: you can disable saving completely by commenting out all "save" lines. 17 | # 18 | # It is also possible to remove all the previously configured save 19 | # points by adding a save directive with a single empty string argument 20 | # like in the following example: 21 | # 22 | # save "" 23 | save 3600 1 300 100 60 10000 24 | 25 | # By default Redis will stop accepting writes if RDB snapshots are enabled 26 | # (at least one save point) and the latest background save failed. 27 | # This will make the user aware (in a hard way) that data is not persisting 28 | # on disk properly, otherwise chances are that no one will notice and some 29 | # disaster will happen. 30 | # 31 | # If the background saving process will start working again Redis will 32 | # automatically allow writes again. 33 | # 34 | # However if you have setup your proper monitoring of the Redis server 35 | # and persistence, you may want to disable this feature so that Redis will 36 | # continue to work as usual even if there are problems with disk, 37 | # permissions, and so forth. 38 | stop-writes-on-bgsave-error yes 39 | 40 | # Compress string objects using LZF when dump .rdb databases? 41 | # By default compression is enabled as it's almost always a win. 42 | # If you want to save some CPU in the saving child set it to 'no' but 43 | # the dataset will likely be bigger if you have compressible values or keys. 44 | rdbcompression no 45 | 46 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 47 | # This makes the format more resistant to corruption but there is a performance 48 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 49 | # for maximum performances. 50 | # 51 | # RDB files created with checksum disabled have a checksum of zero that will 52 | # tell the loading code to skip the check. 53 | rdbchecksum no 54 | 55 | # The filename where to dump the DB 56 | dbfilename REPLACE_ME.rdb 57 | 58 | # The working directory. 59 | # 60 | # The DB will be written inside this directory, with the filename specified 61 | # above using the 'dbfilename' configuration directive. 62 | # 63 | # The Append Only File will also be created inside this directory. 64 | # 65 | # Note that you must specify a directory here, not a file name. 66 | dir /data 67 | -------------------------------------------------------------------------------- /tests/test.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | # Bats is a testing framework for Bash 4 | # Documentation https://bats-core.readthedocs.io/en/stable/ 5 | # Bats libraries documentation https://github.com/ztombol/bats-docs 6 | 7 | # For local tests, install bats-core, bats-assert, bats-file, bats-support 8 | # And run this in the add-on root directory: 9 | # bats ./tests/test.bats 10 | # To run specific test: 11 | # bats ./tests/test.bats --filter-tags 'laravel-redis' 12 | # For debugging: 13 | # bats ./tests/test.bats --show-output-of-passing-tests --verbose-run --print-output-on-failure 14 | 15 | setup() { 16 | set -eu -o pipefail 17 | 18 | # Override this variable for your add-on: 19 | export GITHUB_REPO=ddev/ddev-redis 20 | 21 | TEST_BREW_PREFIX="$(brew --prefix 2>/dev/null || true)" 22 | export BATS_LIB_PATH="${BATS_LIB_PATH}:${TEST_BREW_PREFIX}/lib:/usr/lib/bats" 23 | bats_load_library bats-assert 24 | bats_load_library bats-file 25 | bats_load_library bats-support 26 | 27 | export DIR="$(cd "$(dirname "${BATS_TEST_FILENAME}")/.." >/dev/null 2>&1 && pwd)" 28 | export PROJNAME="test-$(basename "${GITHUB_REPO}")" 29 | mkdir -p ~/tmp 30 | export TESTDIR=$(mktemp -d ~/tmp/${PROJNAME}.XXXXXX) 31 | export DDEV_NONINTERACTIVE=true 32 | export DDEV_NO_INSTRUMENTATION=true 33 | ddev delete -Oy "${PROJNAME}" >/dev/null 2>&1 || true 34 | cd "${TESTDIR}" 35 | run ddev config --project-name="${PROJNAME}" --project-tld=ddev.site 36 | assert_success 37 | 38 | export REDIS_MAJOR_VERSION=7 39 | export HAS_DRUPAL_SETTINGS=false 40 | export HAS_OPTIMIZED_CONFIG=false 41 | export RUN_BGSAVE=false 42 | export CHECK_REDIS_READ_WRITE=false 43 | } 44 | 45 | health_checks() { 46 | run ddev redis-cli INFO 47 | assert_success 48 | assert_output --partial "redis_version:$REDIS_MAJOR_VERSION." 49 | 50 | if [ "${HAS_DRUPAL_SETTINGS}" = "true" ]; then 51 | assert_file_exist web/sites/default/settings.ddev.redis.php 52 | 53 | run grep -F "settings.ddev.redis.php" web/sites/default/settings.php 54 | assert_success 55 | else 56 | assert_file_not_exist web/sites/default/settings.ddev.redis.php 57 | fi 58 | 59 | assert_file_exist .ddev/redis/redis.conf 60 | 61 | redis_optimized_files=( 62 | .ddev/docker-compose.redis_extra.yaml 63 | .ddev/redis/advanced.conf 64 | .ddev/redis/append.conf 65 | .ddev/redis/general.conf 66 | .ddev/redis/io.conf 67 | .ddev/redis/memory.conf 68 | .ddev/redis/network.conf 69 | .ddev/redis/security.conf 70 | .ddev/redis/snapshots.conf 71 | ) 72 | 73 | if [ "$HAS_OPTIMIZED_CONFIG" = "true" ]; then 74 | for file in "${redis_optimized_files[@]}"; do 75 | assert_file_exist "$file" 76 | done 77 | 78 | run grep -F "${PROJNAME}" .ddev/redis/snapshots.conf 79 | assert_output "dbfilename ${PROJNAME}.rdb" 80 | else 81 | for file in "${redis_optimized_files[@]}"; do 82 | assert_file_not_exist "$file" 83 | done 84 | fi 85 | 86 | run ddev redis-cli "KEYS \*" 87 | assert_success 88 | assert_output "" 89 | 90 | # populate 10000 keys 91 | echo '' > keys.txt 92 | run bash -c 'for i in {1..10000}; do echo "SET testkey-$i $i" >> keys.txt; done' 93 | assert_success 94 | run bash -c "cat keys.txt | ddev redis --pipe" 95 | assert_success 96 | assert_line --index 2 "errors: 0, replies: 10000" 97 | 98 | # check if Redis really works with read/write from the app 99 | if [ "${CHECK_REDIS_READ_WRITE}" = "true" ]; then 100 | run curl -sf https://${PROJNAME}.ddev.site/set/foo/bar 101 | assert_success 102 | assert_output "bar" 103 | 104 | run curl -sf https://${PROJNAME}.ddev.site/set/test/value 105 | assert_success 106 | assert_output "value" 107 | 108 | run curl -sf https://${PROJNAME}.ddev.site/get/foo 109 | assert_success 110 | assert_output "bar" 111 | 112 | run curl -sf https://${PROJNAME}.ddev.site/get/test 113 | assert_success 114 | assert_output "value" 115 | 116 | # double-check the value to make sure nothing has been deleted 117 | run curl -sf https://${PROJNAME}.ddev.site/get/foo 118 | assert_success 119 | assert_output "bar" 120 | 121 | run curl -sf https://${PROJNAME}.ddev.site/get/test 122 | assert_success 123 | assert_output "value" 124 | 125 | run ddev redis-flush 126 | assert_success 127 | assert_output "OK" 128 | 129 | # after flushing, nothing should be here 130 | run curl -sf https://${PROJNAME}.ddev.site/get/foo 131 | assert_success 132 | assert_output "" 133 | 134 | run curl -sf https://${PROJNAME}.ddev.site/get/test 135 | assert_success 136 | assert_output "" 137 | fi 138 | 139 | if [ "${RUN_BGSAVE}" != "true" ]; then 140 | return 141 | fi 142 | 143 | # Trigger a BGSAVE 144 | run ddev redis BGSAVE 145 | assert_success 146 | assert_output "Background saving started" 147 | 148 | sleep 10 149 | 150 | run ddev stop 151 | assert_success 152 | 153 | run ddev start -y 154 | assert_success 155 | 156 | run ddev redis DBSIZE 157 | assert_success 158 | assert_output "10000" 159 | 160 | run ddev redis-flush 161 | assert_success 162 | assert_output "OK" 163 | 164 | run ddev redis DBSIZE 165 | assert_success 166 | assert_output "0" 167 | } 168 | 169 | teardown() { 170 | set -eu -o pipefail 171 | ddev delete -Oy ${PROJNAME} >/dev/null 2>&1 172 | [ "${TESTDIR}" != "" ] && rm -rf ${TESTDIR} 173 | } 174 | 175 | laravel_redis_cache_setup() { 176 | export CHECK_REDIS_READ_WRITE=true 177 | 178 | run ddev composer create laravel/laravel 179 | assert_success 180 | 181 | run ddev dotenv set .env --cache-store=redis --redis-host=redis 182 | assert_success 183 | 184 | if [ "${HAS_OPTIMIZED_CONFIG}" = "true" ]; then 185 | run ddev dotenv set .env --redis-password=redis 186 | assert_success 187 | fi 188 | 189 | cat <<'EOF' >routes/web.php 190 | set($key, $value); 194 | echo $value; 195 | }); 196 | Route::get('/get/{key}', function ($key) { 197 | echo cache()->get($key); 198 | }); 199 | EOF 200 | assert_file_exist routes/web.php 201 | } 202 | 203 | # bats test_tags=default 204 | @test "install from directory" { 205 | set -eu -o pipefail 206 | 207 | export RUN_BGSAVE=true 208 | 209 | run ddev start -y 210 | assert_success 211 | 212 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 213 | run ddev add-on get "${DIR}" 214 | assert_success 215 | 216 | run ddev restart -y 217 | assert_success 218 | health_checks 219 | } 220 | 221 | # bats test_tags=default-optimized 222 | @test "install from directory with optimized config" { 223 | set -eu -o pipefail 224 | 225 | export HAS_OPTIMIZED_CONFIG=true 226 | export RUN_BGSAVE=true 227 | 228 | run ddev start -y 229 | assert_success 230 | 231 | run ddev dotenv set .ddev/.env.redis --redis-optimized=true 232 | assert_success 233 | assert_file_exist .ddev/.env.redis 234 | 235 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 236 | run ddev add-on get "${DIR}" 237 | assert_success 238 | 239 | run ddev restart -y 240 | assert_success 241 | health_checks 242 | } 243 | 244 | # bats test_tags=drupal 245 | @test "Drupal installation" { 246 | set -eu -o pipefail 247 | 248 | export HAS_DRUPAL_SETTINGS=true 249 | 250 | run ddev config --project-type=drupal --docroot=web 251 | assert_success 252 | run ddev start -y 253 | assert_success 254 | 255 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 256 | run ddev add-on get "${DIR}" 257 | assert_success 258 | 259 | run ddev restart -y 260 | assert_success 261 | health_checks 262 | } 263 | 264 | # bats test_tags=laravel-redis 265 | @test "Laravel installation: ddev redis-backend redis" { 266 | set -eu -o pipefail 267 | 268 | run ddev config --project-type=laravel --docroot=public 269 | assert_success 270 | 271 | laravel_redis_cache_setup 272 | 273 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 274 | run ddev add-on get "${DIR}" 275 | assert_success 276 | 277 | run ddev redis-backend redis 278 | assert_success 279 | 280 | run ddev restart -y 281 | assert_success 282 | health_checks 283 | } 284 | 285 | # bats test_tags=laravel-redis-alpine-optimized 286 | @test "Laravel installation: ddev redis-backend redis-alpine optimized" { 287 | set -eu -o pipefail 288 | 289 | export HAS_OPTIMIZED_CONFIG=true 290 | 291 | run ddev config --project-type=laravel --docroot=public 292 | assert_success 293 | 294 | laravel_redis_cache_setup 295 | 296 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 297 | run ddev add-on get "${DIR}" 298 | assert_success 299 | 300 | run ddev redis-backend redis-alpine optimized 301 | assert_success 302 | 303 | run ddev restart -y 304 | assert_success 305 | health_checks 306 | } 307 | 308 | # bats test_tags=laravel-valkey 309 | @test "Laravel installation: ddev redis-backend valkey" { 310 | set -eu -o pipefail 311 | 312 | run ddev config --project-type=laravel --docroot=public 313 | assert_success 314 | 315 | laravel_redis_cache_setup 316 | 317 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 318 | run ddev add-on get "${DIR}" 319 | assert_success 320 | 321 | run ddev redis-backend valkey 322 | assert_success 323 | 324 | run ddev restart -y 325 | assert_success 326 | health_checks 327 | } 328 | 329 | # bats test_tags=laravel-valkey-alpine-optimized 330 | @test "Laravel installation: ddev redis-backend valkey-alpine optimized" { 331 | set -eu -o pipefail 332 | 333 | export HAS_OPTIMIZED_CONFIG=true 334 | 335 | run ddev config --project-type=laravel --docroot=public 336 | assert_success 337 | 338 | laravel_redis_cache_setup 339 | 340 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 341 | run ddev add-on get "${DIR}" 342 | assert_success 343 | 344 | run ddev redis-backend valkey-alpine optimized 345 | assert_success 346 | 347 | run ddev restart -y 348 | assert_success 349 | health_checks 350 | } 351 | 352 | # bats test_tags=laravel-redis-6 353 | @test "Laravel installation: ddev redis-backend redis:6" { 354 | set -eu -o pipefail 355 | 356 | export REDIS_MAJOR_VERSION=6 357 | 358 | run ddev config --project-type=laravel --docroot=public 359 | assert_success 360 | 361 | laravel_redis_cache_setup 362 | 363 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 364 | run ddev add-on get "${DIR}" 365 | assert_success 366 | 367 | run ddev redis-backend redis:${REDIS_MAJOR_VERSION} 368 | assert_success 369 | 370 | run ddev restart -y 371 | assert_success 372 | health_checks 373 | } 374 | 375 | # bats test_tags=drupal-7 376 | @test "Drupal 7 installation" { 377 | set -eu -o pipefail 378 | 379 | # Drupal configuration should not be present in Drupal 7 380 | export HAS_DRUPAL_SETTINGS=false 381 | 382 | run ddev config --project-type=drupal7 --docroot=web 383 | assert_success 384 | run ddev start -y 385 | assert_success 386 | 387 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 388 | run ddev add-on get "${DIR}" 389 | assert_success 390 | 391 | run ddev restart -y 392 | assert_success 393 | health_checks 394 | } 395 | 396 | # bats test_tags=drupal-no-settings 397 | @test "Drupal installation without settings management" { 398 | set -eu -o pipefail 399 | 400 | export HAS_DRUPAL_SETTINGS=false 401 | 402 | run ddev config --disable-settings-management --project-type=drupal --docroot=web 403 | assert_success 404 | run ddev start -y 405 | assert_success 406 | 407 | echo "# ddev add-on get ${DIR} with project ${PROJNAME} in $(pwd)" >&3 408 | run ddev add-on get "${DIR}" 409 | assert_success 410 | 411 | run ddev restart -y 412 | assert_success 413 | health_checks 414 | } 415 | -------------------------------------------------------------------------------- /tests/testdata/.gitmanaged: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ddev/ddev-redis/a6bed08ef1cc1b600306fbc302f6673a67fc0b7a/tests/testdata/.gitmanaged --------------------------------------------------------------------------------