├── .eslintrc.json ├── .gitignore ├── CI-CD ├── CircleCI.md ├── GitHub-Actions.md ├── GitLab-CI.md └── Jenkins.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cloud ├── AWS.md ├── Azure.md ├── GCP.md ├── Kubernetes-on-AWS.md └── Terraform-on-AWS.md ├── Containerization ├── CRI-O.md ├── Docker.md ├── Helm.md ├── Kubernetes.md ├── OpenShift.md └── Podman.md ├── Infrastructure-Management ├── Ansible.md ├── Chef.md ├── Puppet.md └── Terraform.md ├── LICENSE ├── Monitoring ├── CloudWatch.md ├── ELK-Stack.md ├── Grafana.md ├── Nagios.md └── Prometheus.md ├── Networking ├── Consul.md ├── Envoy.md ├── Istio.md └── Linkerd.md ├── README.md ├── Security ├── AquaSec.md ├── HashiCorp-Vault.md ├── SonarQube.md └── Trivy.md ├── Version-Control ├── Bitbucket.md ├── GitHub.md └── GitLab.md ├── next-env.d.ts ├── next.config.mjs ├── package-lock.json ├── package.json ├── postcss.config.js ├── public ├── android-chrome-192x192.png ├── android-chrome-512x512.png ├── apple-touch-icon.png ├── favicon-16x16.png ├── favicon-32x32.png ├── favicon.ico ├── grid-pattern.svg ├── icons │ └── README.md ├── logo.png ├── manifest.json ├── placeholder.svg ├── robots.txt ├── site.webmanifest ├── sitemap.xml └── static │ ├── cheatsheets-content.json │ └── cheatsheets-metadata.json ├── scripts ├── categories.js ├── categoryData.js └── process-markdown.mjs ├── src ├── app │ ├── [category] │ │ ├── CategoryPageClient.tsx │ │ ├── [slug] │ │ │ ├── CheatsheetPageClient.tsx │ │ │ ├── error.tsx │ │ │ ├── loading.tsx │ │ │ └── page.tsx │ │ ├── loading.tsx │ │ ├── not-found.tsx │ │ └── page.tsx │ ├── about │ │ └── page.tsx │ ├── api │ │ └── cheatsheets │ │ │ ├── [category] │ │ │ └── [slug] │ │ │ │ └── route.ts │ │ │ └── route.ts │ ├── categories │ │ └── page.tsx │ ├── contribute │ │ └── page.tsx │ ├── getting-started │ │ └── page.tsx │ ├── globals.css │ ├── layout.tsx │ ├── not-found.tsx │ ├── page.tsx │ └── search │ │ └── page.tsx ├── components │ ├── CategoryActions.tsx │ ├── CategoryCard.tsx │ ├── CategoryIcon.tsx │ ├── CheatsheetTemplate.tsx │ ├── CopyButton.tsx │ ├── ExploreButton.tsx │ ├── Footer.tsx │ ├── GlobalLoader.tsx │ ├── Header.tsx │ ├── HomeFilters.tsx │ ├── MarkdownContent.tsx │ ├── OptimizedImage.tsx │ ├── ScrollToTop.tsx │ ├── SearchBar.tsx │ ├── ThemeToggle.tsx │ ├── ToolIcon.tsx │ └── ToolLogo.tsx ├── config │ └── toolIcons.ts ├── context │ └── ThemeContext.tsx ├── data │ └── cheatsheets.ts ├── hooks │ └── useOptimizedAnimation.ts └── utils │ ├── categoryData.ts │ ├── localStorage.ts │ ├── markdown.ts │ ├── performance.ts │ └── toolIcons.ts ├── tailwind.config.js └── tsconfig.json /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "next/core-web-vitals", 4 | "next/typescript" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .next 3 | .env 4 | .env.local -------------------------------------------------------------------------------- /CI-CD/CircleCI.md: -------------------------------------------------------------------------------- 1 | # CircleCI Cheatsheet 2 | 3 | ![](https://imgur.com/s6aXKl9.png) 4 | 5 | **1. Introduction:** 6 | 7 | - CircleCI is a continuous integration and delivery platform that automates the build, test, and deploy processes, allowing for quick and efficient development workflows. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Job:** A collection of steps to be executed in a build. 12 | - **Step:** A single command or script within a job. 13 | - **Workflow:** Defines the order of jobs and their dependencies. 14 | - **Executor:** Specifies the environment in which the job runs (e.g., Docker, Linux VM, macOS). 15 | 16 | **3. Basic `.circleci/config.yml` Example:** 17 | 18 | - **YAML Syntax:** 19 | 20 | ```yaml 21 | version: 2.1 22 | 23 | jobs: 24 | build: 25 | docker: 26 | - image: circleci/node:14 27 | steps: 28 | - checkout 29 | - run: npm install 30 | - run: npm test 31 | 32 | deploy: 33 | docker: 34 | - image: circleci/node:14 35 | steps: 36 | - checkout 37 | - run: npm run deploy 38 | 39 | workflows: 40 | version: 2 41 | build_and_deploy: 42 | jobs: 43 | - build 44 | - deploy 45 | ``` 46 | 47 | **4. Executors:** 48 | 49 | - **Docker:** Run jobs in Docker containers. 50 | 51 | ```yaml 52 | docker: 53 | - image: circleci/node:14 54 | ``` 55 | 56 | - **Machine:** Run jobs in a Linux VM. 57 | 58 | ```yaml 59 | machine: 60 | image: ubuntu-2004:202101-01 61 | ``` 62 | 63 | - **macOS:** Run jobs on macOS for iOS builds. 64 | 65 | ```yaml 66 | macos: 67 | xcode: "12.4.0" 68 | ``` 69 | 70 | **5. Reusable Configurations:** 71 | 72 | - **Commands:** Reuse steps across multiple jobs. 73 | 74 | ```yaml 75 | commands: 76 | setup: 77 | steps: 78 | - checkout 79 | - run: npm install 80 | 81 | jobs: 82 | build: 83 | docker: 84 | - image: circleci/node:14 85 | steps: 86 | - setup 87 | - run: npm test 88 | ``` 89 | 90 | - **Executors:** Reuse the environment configuration. 91 | 92 | ```yaml 93 | executors: 94 | node-executor: 95 | docker: 96 | - image: circleci/node:14 97 | 98 | jobs: 99 | build: 100 | executor: node-executor 101 | steps: 102 | - checkout 103 | - run: npm install 104 | ``` 105 | 106 | **6. Caching and Artifacts:** 107 | 108 | - **Caching:** Speed up builds by caching dependencies. 109 | 110 | ```yaml 111 | - restore_cache: 112 | keys: 113 | - v1-dependencies-{{ checksum "package-lock.json" }} 114 | - save_cache: 115 | paths: 116 | - node_modules 117 | key: v1-dependencies-{{ checksum "package-lock.json" }} 118 | ``` 119 | 120 | - **Artifacts:** Save build outputs and other data for later use. 121 | 122 | ```yaml 123 | - store_artifacts: 124 | path: ./build 125 | destination: build_output 126 | ``` 127 | 128 | **7. Workflows:** 129 | 130 | - **Sequential Jobs:** Define jobs that run in sequence. 131 | 132 | ```yaml 133 | workflows: 134 | version: 2 135 | build_and_deploy: 136 | jobs: 137 | - build 138 | - deploy 139 | ``` 140 | 141 | - **Parallel Jobs:** Run jobs in parallel to speed up pipeline execution. 142 | 143 | ```yaml 144 | workflows: 145 | version: 2 146 | test-and-deploy: 147 | jobs: 148 | - test 149 | - deploy 150 | ``` 151 | 152 | **8. Environment Variables:** 153 | 154 | - **Project-level Variables:** Set environment variables in the CircleCI project settings. 155 | - **Context Variables:** Use contexts to securely store and manage environment variables. 156 | - **Job-level Variables:** 157 | 158 | ```yaml 159 | jobs: 160 | build: 161 | docker: 162 | - image: circleci/node:14 163 | environment: 164 | NODE_ENV: production 165 | ``` 166 | 167 | **9. Advanced CircleCI Features:** 168 | 169 | - **Orbs:** Reusable packages of CircleCI configuration that make it easy to integrate with third-party tools. 170 | 171 | ```yaml 172 | orbs: 173 | aws-s3: circleci/aws-s3@4.2.0 174 | 175 | jobs: 176 | deploy: 177 | steps: 178 | - aws-s3/copy: 179 | from: "build/" 180 | to: "s3://my-bucket/" 181 | ``` 182 | 183 | - **Conditional Steps:** Run steps conditionally based on the success or failure of previous steps. 184 | 185 | ```yaml 186 | - run: 187 | name: Deploy only if tests pass 188 | command: ./deploy.sh 189 | when: on_success 190 | ``` 191 | 192 | **10. Best Practices:** 193 | 194 | - **Parallelism:** Use parallelism to reduce build times by running tests and other tasks simultaneously. 195 | - **Modular Configurations:** Break down your CircleCI configuration into reusable components with orbs, commands, and executors. 196 | - **Effective Caching:** Cache dependencies effectively to reduce build times, but remember to invalidate caches when necessary to avoid stale dependencies. 197 | -------------------------------------------------------------------------------- /CI-CD/GitHub-Actions.md: -------------------------------------------------------------------------------- 1 | # GitHub Actions Cheatsheet 2 | 3 | ![](https://imgur.com/GMwRo18.png) 4 | 5 | **1. Introduction:** 6 | 7 | - GitHub Actions is a powerful CI/CD and automation tool integrated directly into GitHub repositories, allowing you to build, test, and deploy your code. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Workflow:** An automated process defined in YAML that is triggered by events like `push`, `pull_request`, etc. 12 | - **Job:** A set of steps that runs on the same runner. 13 | - **Step:** An individual task, such as running a script or installing a dependency. 14 | - **Runner:** A server that runs the jobs in a workflow, can be GitHub-hosted or self-hosted. 15 | 16 | **3. Basic Workflow Example:** 17 | 18 | - **YAML Syntax:** 19 | 20 | ```yaml 21 | name: CI Workflow 22 | 23 | on: 24 | push: 25 | branches: 26 | - main 27 | pull_request: 28 | branches: 29 | - main 30 | 31 | jobs: 32 | build: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v3 36 | - name: Set up Node.js 37 | uses: actions/setup-node@v3 38 | with: 39 | node-version: '14' 40 | - run: npm install 41 | - run: npm test 42 | ``` 43 | 44 | **4. Common Actions:** 45 | 46 | - **actions/checkout:** Checks out your repository under `$GITHUB_WORKSPACE`. 47 | - **actions/setup-node:** Sets up a Node.js environment. 48 | - **actions/upload-artifact:** Uploads build artifacts for later use. 49 | - **actions/cache:** Caches dependencies like `node_modules` or `Maven`. 50 | 51 | **5. Triggers:** 52 | 53 | - **on: push:** Trigger a workflow when a push occurs. 54 | - **on: pull_request:** Trigger a workflow when a pull request is opened. 55 | - **on: schedule:** Schedule a workflow to run at specific times using cron syntax. 56 | 57 | **6. Environment Variables:** 58 | 59 | - **Set environment variables:** 60 | 61 | ```yaml 62 | env: 63 | NODE_ENV: production 64 | DEBUG: true 65 | ``` 66 | 67 | - **Access secrets:** 68 | 69 | ```yaml 70 | env: 71 | MY_SECRET: ${{ secrets.MY_SECRET }} 72 | ``` 73 | 74 | **7. Matrix Builds:** 75 | 76 | - **Example:** 77 | 78 | ```yaml 79 | jobs: 80 | build: 81 | runs-on: ubuntu-latest 82 | strategy: 83 | matrix: 84 | node-version: [12, 14, 16] 85 | steps: 86 | - uses: actions/checkout@v3 87 | - name: Set up Node.js 88 | uses: actions/setup-node@v3 89 | with: 90 | node-version: ${{ matrix.node-version }} 91 | - run: npm install 92 | - run: npm test 93 | ``` 94 | 95 | **8. Artifacts and Caching:** 96 | 97 | - **Upload Artifacts:** 98 | 99 | ```yaml 100 | - name: Upload build artifacts 101 | uses: actions/upload-artifact@v3 102 | with: 103 | name: my-artifact 104 | path: ./build 105 | ``` 106 | 107 | - **Caching Dependencies:** 108 | 109 | ```yaml 110 | - name: Cache Node.js modules 111 | uses: actions/cache@v3 112 | with: 113 | path: node_modules 114 | key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} 115 | restore-keys: | 116 | ${{ runner.os }}-node- 117 | ``` 118 | 119 | **9. Reusable Workflows:** 120 | 121 | - **Define a reusable workflow:** 122 | 123 | ```yaml 124 | name: Reusable CI Workflow 125 | 126 | on: 127 | workflow_call: 128 | inputs: 129 | node-version: 130 | required: true 131 | type: string 132 | 133 | jobs: 134 | build: 135 | runs-on: ubuntu-latest 136 | steps: 137 | - uses: actions/checkout@v3 138 | - name: Set up Node.js 139 | uses: actions/setup-node@v3 140 | with: 141 | node-version: ${{ inputs.node-version }} 142 | - run: npm install 143 | - run: npm test 144 | ``` 145 | 146 | - **Call a reusable workflow:** 147 | 148 | ```yaml 149 | jobs: 150 | call-workflow: 151 | uses: ./.github/workflows/reusable-workflow.yml 152 | with: 153 | node-version: '14' 154 | ``` 155 | 156 | **10. Best Practices:** 157 | 158 | - **Modular Workflows:** Break down complex workflows into smaller, reusable pieces. 159 | - **Use Environments:** Leverage environments in GitHub Actions for deployments with manual approvals. 160 | - **Secret Management:** Always use GitHub Secrets for sensitive information and never hard-code them. 161 | -------------------------------------------------------------------------------- /CI-CD/GitLab-CI.md: -------------------------------------------------------------------------------- 1 | # GitLab CI Cheatsheet 2 | 3 | ![](https://imgur.com/dbufti0.png) 4 | 5 | **1. Introduction:** 6 | 7 | - GitLab CI/CD is a part of GitLab, a complete DevOps platform, allowing you to define CI/CD pipelines directly within your GitLab repository using the `.gitlab-ci.yml` file. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Pipeline:** A series of stages that run jobs sequentially or in parallel. 12 | - **Job:** An individual unit of work, such as running tests or deploying code. 13 | - **Stage:** A group of jobs that run in parallel. 14 | - **Runner:** The agent that executes jobs, can be GitLab-hosted or self-hosted. 15 | 16 | **3. Basic `.gitlab-ci.yml` Example:** 17 | 18 | - **YAML Syntax:** 19 | 20 | ```yaml 21 | stages: 22 | - build 23 | - test 24 | - deploy 25 | 26 | build-job: 27 | stage: build 28 | script: 29 | - echo "Building the project..." 30 | - make 31 | 32 | test-job: 33 | stage: test 34 | 35 | 36 | script: 37 | - echo "Running tests..." 38 | - make test 39 | 40 | deploy-job: 41 | stage: deploy 42 | script: 43 | - echo "Deploying the project..." 44 | - make deploy 45 | ``` 46 | 47 | **4. Runners:** 48 | 49 | - **Shared Runners:** Provided by GitLab and available to all projects. 50 | - **Specific Runners:** Custom runners registered to a specific project or group. 51 | - **Tags:** Use tags to specify which runner should execute a job. 52 | 53 | **5. Artifacts and Caching:** 54 | 55 | - **Artifacts:** Save job outputs and make them available to subsequent jobs. 56 | 57 | ```yaml 58 | artifacts: 59 | paths: 60 | - build/ 61 | expire_in: 1 week 62 | ``` 63 | 64 | - **Caching:** Speed up jobs by reusing previously downloaded dependencies. 65 | 66 | ```yaml 67 | cache: 68 | paths: 69 | - node_modules/ 70 | ``` 71 | 72 | **6. Environments and Deployments:** 73 | 74 | - **Environments:** Define environments to organize and manage deployments. 75 | 76 | ```yaml 77 | deploy-job: 78 | stage: deploy 79 | environment: 80 | name: production 81 | url: https://myapp.com 82 | script: 83 | - echo "Deploying to production..." 84 | - ./deploy.sh 85 | ``` 86 | 87 | - **Manual Deployments:** Require manual approval before a job runs. 88 | 89 | ```yaml 90 | deploy-job: 91 | stage: deploy 92 | script: 93 | - ./deploy.sh 94 | when: manual 95 | ``` 96 | 97 | **7. Advanced `.gitlab-ci.yml` Features:** 98 | 99 | - **YAML Anchors:** Reuse parts of your YAML configuration. 100 | 101 | ```yaml 102 | .default-job: &default-job 103 | script: 104 | - echo "Default job script" 105 | 106 | job1: 107 | <<: *default-job 108 | 109 | job2: 110 | <<: *default-job 111 | ``` 112 | 113 | - **Includes:** Include other YAML files to organize your configuration. 114 | 115 | ```yaml 116 | include: 117 | - local: '/templates/.gitlab-ci-template.yml' 118 | ``` 119 | 120 | **8. Security and Compliance:** 121 | 122 | - **Secret Variables:** Store sensitive data securely in GitLab CI/CD. 123 | 124 | ```yaml 125 | deploy-job: 126 | script: 127 | - deploy --token $CI_DEPLOY_TOKEN 128 | ``` 129 | 130 | - **Protected Branches:** Restrict certain jobs to run only on protected branches. 131 | 132 | **9. Troubleshooting:** 133 | 134 | - **Pipeline Logs:** Access detailed logs for each job to troubleshoot failures. 135 | - **Retrying Jobs:** Use the GitLab UI to manually retry failed jobs. 136 | 137 | **10. Best Practices:** 138 | 139 | - **Modular Pipelines:** Break down your pipeline into stages for better organization. 140 | - **Use CI/CD Templates:** Leverage GitLab’s built-in templates for common CI/CD tasks. 141 | - **Optimize Runner Usage:** Use caching, artifacts, and parallel jobs to reduce pipeline runtime. 142 | -------------------------------------------------------------------------------- /CI-CD/Jenkins.md: -------------------------------------------------------------------------------- 1 | # Jenkins Cheatsheet 2 | 3 | ![](https://imgur.com/jWGs9lH.png) 4 | 5 | **1. Introduction:** 6 | 7 | - Jenkins is an open-source automation server that helps automate parts of software development related to building, testing, and deploying, facilitating continuous integration and delivery. 8 | 9 | **2. Installation:** 10 | 11 | - **Docker Installation:** 12 | 13 | ```bash 14 | docker run -d -p 8080:8080 -p 50000:50000 jenkins/jenkins:lts 15 | ``` 16 | 17 | - **Direct Installation:** 18 | 19 | - **For Ubuntu/Debian:** 20 | 21 | ```bash 22 | wget -q -O - https://pkg.jenkins.io/debian/jenkins.io.key | sudo apt-key add - 23 | sudo sh -c 'echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list' 24 | sudo apt update 25 | sudo apt install jenkins 26 | ``` 27 | 28 | - **For CentOS/RHEL:** 29 | 30 | ```bash 31 | sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo 32 | sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key 33 | sudo yum install jenkins 34 | ``` 35 | 36 | - **Access Jenkins:** 37 | - Visit `http://localhost:8080` in your web browser. 38 | 39 | **3. Jenkins Pipeline:** 40 | 41 | - **Declarative Pipeline:** 42 | 43 | ```groovy 44 | pipeline { 45 | agent any 46 | environment { 47 | MY_VAR = "value" 48 | } 49 | stages { 50 | stage('Checkout') { 51 | steps { 52 | checkout scm 53 | } 54 | } 55 | stage('Build') { 56 | steps { 57 | sh 'make' 58 | } 59 | } 60 | stage('Test') { 61 | steps { 62 | sh 'make test' 63 | } 64 | } 65 | stage('Deploy') { 66 | steps { 67 | sh 'make deploy' 68 | } 69 | } 70 | } 71 | post { 72 | success { 73 | echo 'Pipeline completed successfully!' 74 | } 75 | failure { 76 | echo 'Pipeline failed.' 77 | } 78 | } 79 | } 80 | ``` 81 | 82 | - **Scripted Pipeline:** 83 | 84 | ```groovy 85 | node { 86 | stage('Checkout') { 87 | checkout scm 88 | } 89 | stage('Build') { 90 | sh 'make' 91 | } 92 | stage('Test') { 93 | sh 'make test' 94 | } 95 | stage('Deploy') { 96 | sh 'make deploy' 97 | } 98 | } 99 | ``` 100 | 101 | **4. Common Jenkins Commands:** 102 | 103 | - **Restart Jenkins:** 104 | 105 | ```bash 106 | sudo systemctl restart jenkins 107 | ``` 108 | 109 | - **Manage Jenkins from CLI:** 110 | 111 | ```bash 112 | java -jar jenkins-cli.jar -s http://localhost:8080/ list-jobs 113 | ``` 114 | 115 | **5. Useful Jenkins Plugins:** 116 | 117 | - **Blue Ocean:** Modern UI for Jenkins pipelines. 118 | - **Git:** Integrate Git version control into Jenkins. 119 | - **Pipeline:** Enables Pipeline as Code. 120 | - **Credentials Binding:** Securely manage credentials. 121 | - **SonarQube Scanner:** Integrate code quality checks. 122 | - **Slack Notification:** Send pipeline status notifications to Slack. 123 | 124 | **6. Best Practices:** 125 | 126 | - **Pipeline as Code:** Always use Jenkins Pipelines defined in `Jenkinsfile` for consistent and version-controlled builds. 127 | - **Use Parameters:** Use parameters to make your pipelines flexible and reusable. 128 | 129 | ```groovy 130 | parameters { 131 | string(name: 'ENV', defaultValue: 'dev', description: 'Environment') 132 | } 133 | ``` 134 | 135 | - **Secure Jenkins:** Regularly update plugins, use RBAC, and secure the Jenkins instance with HTTPS. 136 | 137 | **7. Jenkins Configuration:** 138 | 139 | - **Manage Jenkins:** 140 | - Manage and configure global settings from the Jenkins dashboard under **Manage Jenkins**. 141 | - **Configure Tools:** Set up JDK, Maven, and other tools globally in **Global Tool Configuration**. 142 | - **Jenkinsfile Configuration:** 143 | - Define your pipeline stages, environment, and agents within a `Jenkinsfile` stored in your repository. 144 | 145 | **8. Advanced Jenkins:** 146 | 147 | - **Parallel Stages:** 148 | 149 | ```groovy 150 | pipeline { 151 | agent any 152 | stages { 153 | stage('Parallel') { 154 | parallel { 155 | stage('Unit Tests') { 156 | steps { 157 | sh 'make test' 158 | } 159 | } 160 | stage('Integration Tests') { 161 | steps { 162 | sh 'make integration-test' 163 | } 164 | } 165 | } 166 | } 167 | } 168 | } 169 | ``` 170 | 171 | - **Shared Libraries:** Centralize and reuse pipeline code across projects using Shared Libraries. 172 | 173 | ## **Troubleshooting** 174 | 175 | ### **Common Issues** 176 | 177 | 1. **Jenkins Won't Start** 178 | ```bash 179 | # Check logs 180 | sudo tail -f /var/log/jenkins/jenkins.log 181 | 182 | # Check permissions 183 | sudo chown -R jenkins:jenkins /var/lib/jenkins 184 | ``` 185 | 186 | 2. **Pipeline Failure** 187 | ```groovy 188 | // Add error handling 189 | pipeline { 190 | agent any 191 | stages { 192 | stage('Build') { 193 | steps { 194 | script { 195 | try { 196 | sh 'make build' 197 | } catch (exc) { 198 | echo 'Build failed!' 199 | throw exc 200 | } 201 | } 202 | } 203 | } 204 | } 205 | } 206 | ``` 207 | 208 | 3. **Plugin Issues** 209 | - Clear plugin cache: 210 | ```bash 211 | rm -rf $JENKINS_HOME/plugins/*.jpi 212 | rm -rf $JENKINS_HOME/plugins/*.hpi 213 | ``` 214 | - Restart Jenkins after plugin updates 215 | 216 | ## **Useful Plugins** 217 | 218 | 1. **Pipeline** 219 | - Pipeline Graph View 220 | - Pipeline Stage View 221 | - Blue Ocean 222 | 223 | 2. **Source Control** 224 | - Git 225 | - GitHub Integration 226 | - BitBucket Integration 227 | 228 | 3. **Build Tools** 229 | - Maven Integration 230 | - Gradle 231 | - NodeJS 232 | 233 | 4. **Testing** 234 | - JUnit 235 | - Cobertura 236 | - SonarQube Scanner 237 | 238 | 5. **Deployment** 239 | - Docker 240 | - Kubernetes 241 | - AWS 242 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # 📜 **Code of Conduct** 2 | 3 | > [!NOTE] 4 | > Thank you for being a part of our DevOps Cheatsheet! To ensure a welcoming and inclusive environment for everyone, we've established a Code of Conduct that all participants are expected to follow. 5 | 6 | ### **1. Be Respectful** 7 | 8 | - Treat everyone with respect, regardless of their background, experience level, or opinions. 9 | - Always be considerate and constructive in your feedback and interactions. 10 | - Harassment, bullying, and discriminatory language will not be tolerated. 11 | 12 | ### **2. Be Inclusive** 13 | 14 | - Strive to create a diverse and inclusive community where everyone feels welcome. 15 | - Be mindful of the language you use and the impact it may have on others. 16 | - Encourage and support contributions from people of all backgrounds. 17 | 18 | ### **3. Collaborate Openly** 19 | 20 | - Share knowledge, ideas, and feedback openly and constructively. 21 | - Help others whenever possible, and don’t hesitate to ask for help when you need it. 22 | - Recognize the contributions of others and give credit where it's due. 23 | 24 | ### **4. Follow Best Practices** 25 | 26 | - Write clear and concise documentation to help others understand your contributions. 27 | - Adhere to the project’s guidelines for coding, documentation, and communication. 28 | - Strive to improve the quality and usability of the project for everyone. 29 | 30 | ### **5. Report Issues Promptly** 31 | 32 | - If you witness or experience any violations of this Code of Conduct, report it immediately to the project maintainers. 33 | - All reports will be reviewed and investigated, and appropriate actions will be taken to address any issues. 34 | 35 | --- 36 | > [!IMPORTANT] 37 | > _By participating in this project, you agree to abide by this Code of Conduct. Let's work together to make this community a positive and productive space for everyone!_ 38 | > 39 | > _Please take a moment to read and understand the expected behavior when contributing to the project. Your cooperation is crucial in maintaining a welcoming and collaborative environment for all contributors._ 40 | > 41 | > _Thank you for your commitment to fostering a positive community!_ 42 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to DevOps Tools Cheatsheet Collection 2 | 3 | Thank you for considering contributing to the **DevOps Tools Cheatsheet Collection**! Your contributions help make this project a valuable resource for the DevOps community. 4 | 5 | ## 📜 Code of Conduct 6 | 7 | By participating in this project, you agree to abide by our [Code of Conduct](./CODE_OF_CONDUCT.md). Please read it to understand the expected behavior when contributing to the project. 8 | 9 | ## 🛠️ How to Contribute 10 | 11 | > [!TIP] 12 | > We welcome various types of contributions, including but not limited to: 13 | > 14 | > - **Adding New Cheatsheets**: Share your knowledge about a tool not yet covered. 15 | > - **Improving Existing Cheatsheets**: Update or enhance the content in existing files. 16 | > - **Fixing Issues**: Help resolve any bugs or errors found in the repository. 17 | > - **Providing Feedback**: Suggest new features, improvements, or corrections. 18 | 19 | ### 📝 Submitting a Pull Request 20 | 21 | 1. **Fork the Repository**: Start by forking this repository to your GitHub account. 22 | 2. **Create a New Branch**: Create a new branch in your fork for your contribution. For example: 23 | 24 | ```bash 25 | git checkout -b add-toolname-cheatsheet 26 | ``` 27 | 28 | 3. **Make Your Changes**: Edit or add files as necessary. Follow the format of existing cheatsheets for consistency. 29 | 4. **Commit Your Changes**: Write a clear and descriptive commit message. For example: 30 | 31 | ```bash 32 | git commit -m "Add cheatsheet for ToolName" 33 | ``` 34 | 35 | 5. **Push Your Changes**: Push the changes to your forked repository: 36 | 37 | ```bash 38 | git push origin add-toolname-cheatsheet 39 | ``` 40 | 41 | 6. **Submit a Pull Request**: Go to the original repository and submit a pull request. Provide a clear description of your changes and why they are beneficial. 42 | 43 | ### 🧐 Review Process 44 | 45 | > [!NOTE] 46 | > 47 | > - Your pull request will be reviewed by the maintainers of the project. 48 | > - Please be patient, as reviews can take some time depending on the complexity of the changes. 49 | > - You may be asked to make changes before your pull request is accepted. 50 | 51 | ## 📂 Directory Structure 52 | 53 | When adding a new cheatsheet, please ensure it is placed in the correct directory based on its category (e.g., `CI-CD`, `Containerization`, `Monitoring`, etc.). This helps maintain an organized structure for easy navigation. 54 | 55 | ### ✏️ Cheatsheet Format 56 | 57 | > [!TIP] 58 | > For consistency, please follow this basic format for new cheatsheets: 59 | > 60 | > - **Tool Name**: Title the file with the tool name (e.g., `Docker.md`). 61 | > - **Sections**: Include sections such as Basic Commands, Tips, Configuration, etc. 62 | > - **Examples**: Provide examples wherever possible. 63 | > - **Formatting**: Use Markdown for formatting (headings, bullet points, code blocks). 64 | 65 | ## 🤝 Community Guidelines 66 | 67 | > [!IMPORTANT] 68 | > 69 | > - **Be Respectful**: Keep interactions respectful and constructive. 70 | > - **Ask for Help**: If you're unsure about anything, feel free to ask in the discussion section. 71 | > - **Stay on Topic**: Make sure your contributions align with the purpose of this repository. 72 | 73 | ## 📝 License 74 | 75 | By contributing to this repository, you agree that your contributions will be licensed under the MIT License. 76 | 77 | --- 78 | 79 | ### Thank you for your contribution! 🚀 80 | -------------------------------------------------------------------------------- /Containerization/Helm.md: -------------------------------------------------------------------------------- 1 | # Helm Cheatsheet 2 | 3 | ![text](https://imgur.com/nDW9BHK.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **Helm** is a package manager for Kubernetes, helping you define, install, and upgrade even the most complex Kubernetes applications. It uses charts to package Kubernetes resources. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Chart:** A collection of files that describe a set of Kubernetes resources. 12 | - **Release:** An instance of a chart running in a Kubernetes cluster. 13 | - **Repository:** A place where charts can be collected and shared. 14 | 15 | **3. Installing Helm:** 16 | 17 | - **Helm Installation:** 18 | 19 | ```bash 20 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash 21 | ``` 22 | 23 | - **Add a Helm Repository:** 24 | 25 | ```bash 26 | helm repo add stable https://charts.helm.sh/stable 27 | helm repo update 28 | ``` 29 | 30 | **4. Helm Commands:** 31 | 32 | - **Install a Chart:** 33 | 34 | ```bash 35 | helm install my-release stable/nginx 36 | ``` 37 | 38 | - **List Releases:** 39 | 40 | ```bash 41 | helm list 42 | ``` 43 | 44 | - **Upgrade a Release:** 45 | 46 | ```bash 47 | helm upgrade my-release stable/nginx 48 | ``` 49 | 50 | - **Uninstall a Release:** 51 | 52 | ```bash 53 | helm uninstall my-release 54 | ``` 55 | 56 | - **Search for Charts:** 57 | 58 | ```bash 59 | helm search repo nginx 60 | ``` 61 | 62 | **5. Chart Structure:** 63 | 64 | - **Basic Chart Structure:** 65 | 66 | ``` 67 | my-chart/ 68 | ├── Chart.yaml 69 | ├── values.yaml 70 | ├── charts/ 71 | ├── templates/ 72 | │ ├── deployment.yaml 73 | │ ├── service.yaml 74 | │ └── _helpers.tpl 75 | ``` 76 | 77 | - **Chart.yaml:** 78 | 79 | ```yaml 80 | apiVersion: v2 81 | name: my-chart 82 | description: A Helm chart for Kubernetes 83 | version: 0.1.0 84 | ``` 85 | 86 | - **values.yaml:** 87 | 88 | ```yaml 89 | replicaCount: 3 90 | image: 91 | repository: nginx 92 | tag: stable 93 | ``` 94 | 95 | - **Template Example (deployment.yaml):** 96 | 97 | ```yaml 98 | apiVersion: apps/v1 99 | kind: Deployment 100 | metadata: 101 | name: {{ .Release.Name }}-nginx 102 | spec: 103 | replicas: {{ .Values.replicaCount }} 104 | selector: 105 | matchLabels: 106 | app: {{ .Release.Name }}-nginx 107 | template: 108 | metadata: 109 | labels: 110 | app: {{ .Release.Name }}-nginx 111 | spec: 112 | containers: 113 | - name: nginx 114 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 115 | ``` 116 | 117 | **6. Helm Lifecycle:** 118 | 119 | - **Creating a New Chart:** 120 | 121 | ```bash 122 | helm create my-chart 123 | ``` 124 | 125 | - **Templating:** 126 | - **List all template values:** 127 | 128 | ```bash 129 | helm template my-release my-chart 130 | ``` 131 | 132 | - **Lint a Chart:** 133 | 134 | ```bash 135 | helm lint my-chart 136 | ``` 137 | 138 | **7. Helm Repositories:** 139 | 140 | - **Creating a Local Helm Repository:** 141 | 142 | ```bash 143 | helm repo index ./charts --url http://example.com/charts 144 | ``` 145 | 146 | - **Serving Charts:** 147 | 148 | ```bash 149 | helm serve --address 0.0.0.0:8879 150 | ``` 151 | 152 | **8. Helm Hooks:** 153 | 154 | - **Example of a Pre-Install Hook:** 155 | 156 | ```yaml 157 | apiVersion: batch/v1 158 | kind: Job 159 | metadata: 160 | name: "{{ .Release.Name }}-preinstall" 161 | annotations: 162 | "helm.sh/hook": pre-install 163 | spec: 164 | template: 165 | spec: 166 | containers: 167 | - name: preinstall 168 | image: busybox 169 | command: ['sh', '-c', 'echo Hello Helm'] 170 | restartPolicy: Never 171 | ``` 172 | 173 | **9. Helm and CI/CD:** 174 | 175 | - **Using Helm in Jenkins Pipeline:** 176 | 177 | ```groovy 178 | pipeline { 179 | agent any 180 | stages { 181 | stage('Deploy') { 182 | steps { 183 | script { 184 | sh "helm upgrade --install my-release ./my-chart" 185 | } 186 | } 187 | } 188 | } 189 | } 190 | ``` 191 | 192 | **10. Advanced Helm Concepts:** 193 | 194 | - **Subcharts:** Use subcharts to package related Kubernetes resources together. 195 | - **Chart Museum:** Helm repository server to store and manage Helm charts. 196 | - **Helmfile:** A declarative spec for deploying Helm charts. 197 | 198 | **11. Helm Security:** 199 | 200 | - **Chart Signing:** 201 | - Sign and verify Helm charts to ensure integrity. 202 | 203 | ```bash 204 | helm package --sign --key --keyring my-chart 205 | helm verify my-chart-0.1.0.tgz 206 | ``` 207 | 208 | - **RBAC:** Control access to Helm releases with Kubernetes RBAC. 209 | 210 | **12. Troubleshooting Helm:** 211 | 212 | - **Debugging a Chart Installation:** 213 | 214 | ```bash 215 | helm install --debug --dry-run my-release ./my-chart 216 | ``` 217 | 218 | - **Checking Helm Release History:** 219 | 220 | ```bash 221 | helm history my-release 222 | ``` 223 | 224 | - **Rollback a Release:** 225 | 226 | ```bash 227 | helm rollback my-release 1 228 | ``` 229 | -------------------------------------------------------------------------------- /Containerization/Podman.md: -------------------------------------------------------------------------------- 1 | # Podman Cheatsheet 2 | 3 | ![text](https://imgur.com/6x1bZIJ.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **Podman** is an open-source container engine that performs much like Docker but without the daemon dependency. It supports the Open Container Initiative (OCI) standards for both containers and container images. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Pod:** A group of containers that run together and share resources, similar to a Kubernetes Pod. 12 | - **Rootless Containers:** Podman can run containers as a non-root user. 13 | - **Docker Compatibility:** Podman commands are similar to Docker, making it easy to switch between the two. 14 | 15 | **3. Installation:** 16 | 17 | - **On Fedora:** 18 | 19 | ```bash 20 | sudo dnf install podman 21 | ``` 22 | 23 | - **On Ubuntu:** 24 | 25 | ```bash 26 | sudo apt-get -y install podman 27 | ``` 28 | 29 | **4. Basic Podman Commands:** 30 | 31 | - **Run a Container:** 32 | 33 | ```bash 34 | podman run -dt -p 8080:80 nginx 35 | ``` 36 | 37 | - **List Running Containers:** 38 | 39 | ```bash 40 | podman ps 41 | ``` 42 | 43 | - **Stop a Container:** 44 | 45 | ```bash 46 | podman stop container_id 47 | ``` 48 | 49 | - **Remove a Container:** 50 | 51 | ```bash 52 | podman rm container_id 53 | ``` 54 | 55 | - **Build an Image:** 56 | 57 | ```bash 58 | podman build -t my-image:latest . 59 | ``` 60 | 61 | **5. Podman vs Docker:** 62 | 63 | - **No Daemon:** Podman does not rely on a central daemon; each container is an isolated process. 64 | - **Rootless Mode:** Allows running containers without root privileges, enhancing security. 65 | - **Podman Pods:** Group containers under a single network namespace. 66 | 67 | **6. Pods in Podman:** 68 | 69 | - **Create a Pod:** 70 | 71 | ```bash 72 | podman pod create --name mypod -p 8080:80 73 | ``` 74 | 75 | - **Run a Container in a Pod:** 76 | 77 | ```bash 78 | podman run -dt --pod mypod nginx 79 | ``` 80 | 81 | - **Inspect a Pod:** 82 | 83 | ```bash 84 | podman pod inspect mypod 85 | ``` 86 | 87 | - **Stop a Pod:** 88 | 89 | ```bash 90 | podman pod stop mypod 91 | ``` 92 | 93 | **7. Networking:** 94 | 95 | - **Podman Network Command:** 96 | 97 | ```bash 98 | podman network create mynetwork 99 | ``` 100 | 101 | - **Attaching a Container to a Network:** 102 | 103 | ```bash 104 | podman run -dt --network mynetwork nginx 105 | ``` 106 | 107 | **8. Storage Management:** 108 | 109 | - **Mount a Volume:** 110 | 111 | ```bash 112 | podman run -dt -v /host/data:/container/data nginx 113 | ``` 114 | 115 | - **List Volumes:** 116 | 117 | ```bash 118 | podman volume ls 119 | ``` 120 | 121 | - **Create a Volume:** 122 | 123 | ```bash 124 | podman volume create myvolume 125 | ``` 126 | 127 | **9. Rootless Containers:** 128 | 129 | - **Running Rootless:** 130 | 131 | ```bash 132 | podman --rootless run -dt -p 8080:80 nginx 133 | ``` 134 | 135 | - **Inspect Rootless Mode:** 136 | 137 | ```bash 138 | podman info --format '{{.Host.Rootless}}' 139 | ``` 140 | 141 | **10. Podman Compose:** 142 | 143 | - **Install Podman Compose:** 144 | 145 | ```bash 146 | pip3 install podman-compose 147 | ``` 148 | 149 | - **Using Docker Compose with Podman:** 150 | 151 | ```bash 152 | podman-compose up 153 | ``` 154 | 155 | **11. Troubleshooting Podman:** 156 | 157 | - **Check Podman Logs:** 158 | 159 | ```bash 160 | podman logs container_id 161 | ``` 162 | 163 | - **Check Network Configuration:** 164 | 165 | ```bash 166 | podman network inspect mynetwork 167 | ``` 168 | 169 | - **Debugging Podman Containers:** 170 | 171 | ```bash 172 | podman exec -it container_id /bin/bash 173 | ``` 174 | 175 | **12. Podman in CI/CD:** 176 | 177 | - **Using Podman in GitLab CI:** 178 | 179 | ```yaml 180 | image: quay.io/podman/stable 181 | 182 | build: 183 | script: 184 | - podman build -t myimage . 185 | - podman push myimage registry.example.com/myimage:latest 186 | ``` 187 | 188 | **13. Security Best Practices:** 189 | 190 | - **Run Containers as Non-Root:** 191 | - Use rootless mode or specify a non-root user in the container. 192 | 193 | ```bash 194 | podman run -dt -u 1001 nginx 195 | ``` 196 | 197 | - **Use SELinux:** 198 | - Enable SELinux for added security on supported systems. 199 | 200 | ```bash 201 | podman run -dt --security-opt label=type:container_runtime_t nginx 202 | ``` 203 | 204 | **14. Migrating from Docker to Podman:** 205 | 206 | - **Docker Compatibility Mode:** 207 | 208 | ```bash 209 | alias docker=podman 210 | ``` 211 | 212 | - **Importing Docker Images:** 213 | 214 | ```bash 215 | podman pull docker-daemon:nginx:latest 216 | ``` 217 | 218 | **15. Podman on Kubernetes:** 219 | 220 | - **CRI-O Integration:** 221 | - Podman can be used with CRI-O as a runtime for Kubernetes, allowing seamless integration with Kubernetes clusters. 222 | -------------------------------------------------------------------------------- /Infrastructure-Management/Ansible.md: -------------------------------------------------------------------------------- 1 | # 📜 **Ansible Cheatsheet** 2 | 3 | ![ansible](https://imgur.com/XwECXoK.png) 4 | 5 | ## **🔹 Introduction to Ansible** 6 | 7 | ### ✅ What is Ansible? 8 | 9 | Ansible is an **open-source automation tool** used for: 10 | ✅ **Configuration Management** (e.g., installing & managing software on servers) 11 | ✅ **Application Deployment** (e.g., deploying a web app on multiple servers) 12 | ✅ **Orchestration** (e.g., managing multi-tier applications like load balancer + DB) 13 | ✅ **Provisioning** (e.g., setting up cloud infrastructure with AWS, Azure, GCP) 14 | 15 | ### ✅ Why Use Ansible? 16 | 17 | 🔹 **Agentless:** No need to install agents on target machines (uses SSH & WinRM) 18 | 🔹 **Idempotent:** Runs multiple times without unwanted changes 19 | 🔹 **Human-Readable:** Uses YAML playbooks 20 | 🔹 **Cross-Platform:** Works on **Linux, Windows, macOS, Cloud Servers** 21 | 22 | --- 23 | 24 | ## **🛠️ 1. Installing & Setting Up Ansible** 25 | 26 | ### ✅ Installing Ansible on Linux 27 | 28 | ```bash 29 | # Ubuntu/Debian 30 | sudo apt update 31 | sudo apt install -y ansible 32 | 33 | # CentOS/RHEL 34 | sudo yum install -y ansible 35 | ``` 36 | 37 | ### ✅ Checking Installation 38 | 39 | ```bash 40 | ansible --version 41 | ``` 42 | 43 | ### ✅ Setting Up an Inventory File 44 | 45 | An **inventory file** (`/etc/ansible/hosts`) tells Ansible where to connect. 46 | Example: 47 | 48 | ```ini 49 | [webservers] 50 | server1 ansible_host=192.168.1.10 ansible_user=ubuntu 51 | server2 ansible_host=192.168.1.11 ansible_user=ubuntu 52 | 53 | [dbservers] 54 | db1 ansible_host=192.168.1.20 ansible_user=root 55 | ``` 56 | 57 | ### ✅ Testing Connectivity with `ping` 58 | 59 | ```bash 60 | ansible all -m ping 61 | ``` 62 | 63 | 📌 If successful, you'll see: 64 | 65 | ```bash 66 | server1 | SUCCESS => {"changed": false, "ping": "pong"} 67 | server2 | SUCCESS => {"changed": false, "ping": "pong"} 68 | ``` 69 | 70 | --- 71 | 72 | ## **🚀 2. Running Ad-Hoc Commands (Quick Tasks Without a Playbook)** 73 | 74 | ✅ **Check disk usage** 75 | 76 | ```bash 77 | ansible all -m command -a "df -h" 78 | ``` 79 | 80 | ✅ **Check system uptime** 81 | 82 | ```bash 83 | ansible all -m command -a "uptime" 84 | ``` 85 | 86 | ✅ **Create a directory on remote hosts** 87 | 88 | ```bash 89 | ansible all -m file -a "path=/opt/newdir state=directory" 90 | ``` 91 | 92 | ✅ **Copy files to remote servers** 93 | 94 | ```bash 95 | ansible all -m copy -a "src=/tmp/file.txt dest=/home/ubuntu/file.txt" 96 | ``` 97 | 98 | ✅ **Install a package (e.g., nginx) on all web servers** 99 | 100 | ```bash 101 | ansible webservers -m apt -a "name=nginx state=present" --become 102 | ``` 103 | 104 | ✅ **Restart a service (e.g., nginx)** 105 | 106 | ```bash 107 | ansible webservers -m service -a "name=nginx state=restarted" --become 108 | ``` 109 | 110 | --- 111 | 112 | ## **📜 3. Writing Ansible Playbooks (Automation Scripts)** 113 | 114 | ✅ **What is a Playbook?** 115 | A **playbook** is a YAML file that contains tasks to **automate configuration**. 116 | 117 | ### **🔹 Basic Playbook Example** 118 | 119 | ```yaml 120 | - name: Install and Start Nginx 121 | hosts: webservers 122 | become: yes # Run as sudo 123 | tasks: 124 | - name: Install Nginx 125 | apt: 126 | name: nginx 127 | state: present 128 | 129 | - name: Start Nginx 130 | service: 131 | name: nginx 132 | state: started 133 | ``` 134 | 135 | ✅ **Run the Playbook** 136 | 137 | ```bash 138 | ansible-playbook playbook.yml 139 | ``` 140 | 141 | --- 142 | 143 | ## **🔹 4. Using Variables in Ansible** 144 | 145 | ✅ **Define Variables in a Playbook** 146 | 147 | ```yaml 148 | - name: Install a Package with a Variable 149 | hosts: webservers 150 | vars: 151 | package_name: nginx 152 | tasks: 153 | - name: Install Package 154 | apt: 155 | name: "{{ package_name }}" 156 | state: present 157 | ``` 158 | 159 | ✅ **Use Built-in Ansible Facts** 160 | 161 | ```bash 162 | ansible all -m setup 163 | ``` 164 | 165 | Example Fact Usage in Playbook: 166 | 167 | ```yaml 168 | - name: Display System Information 169 | hosts: all 170 | tasks: 171 | - debug: 172 | msg: "This server is running {{ ansible_distribution }} {{ ansible_distribution_version }}" 173 | ``` 174 | 175 | --- 176 | 177 | ## **🔹 5. Loops & Conditionals** 178 | 179 | ✅ **Loop Example (Install Multiple Packages)** 180 | 181 | ```yaml 182 | - name: Install Multiple Packages 183 | hosts: webservers 184 | become: yes 185 | tasks: 186 | - name: Install Packages 187 | apt: 188 | name: "{{ item }}" 189 | state: present 190 | loop: 191 | - nginx 192 | - curl 193 | - unzip 194 | ``` 195 | 196 | ✅ **Conditional Execution** 197 | 198 | ```yaml 199 | - name: Restart Nginx Only If Needed 200 | hosts: webservers 201 | become: yes 202 | tasks: 203 | - name: Check if Nginx is Running 204 | shell: pgrep nginx 205 | register: nginx_running 206 | ignore_errors: yes 207 | 208 | - name: Restart Nginx 209 | service: 210 | name: nginx 211 | state: restarted 212 | when: nginx_running.rc == 0 213 | ``` 214 | 215 | --- 216 | 217 | ## **📂 6. Ansible Roles (Best Practices for Large Projects)** 218 | 219 | ✅ **Generate an Ansible Role Structure** 220 | 221 | ```bash 222 | ansible-galaxy init my_role 223 | ``` 224 | 225 | 📌 This creates a structured directory like: 226 | 227 | ```plaintext 228 | my_role/ 229 | ├── tasks/ 230 | │ └── main.yml 231 | ├── handlers/ 232 | │ └── main.yml 233 | ├── templates/ 234 | ├── files/ 235 | ├── vars/ 236 | │ └── main.yml 237 | ├── defaults/ 238 | │ └── main.yml 239 | ├── meta/ 240 | │ └── main.yml 241 | ├── README.md 242 | ``` 243 | 244 | ✅ **Use Roles in a Playbook** 245 | 246 | ```yaml 247 | - name: Deploy Web Server 248 | hosts: webservers 249 | roles: 250 | - nginx_role 251 | ``` 252 | 253 | --- 254 | 255 | ## **🔐 7. Ansible Vault (Encrypting Secrets)** 256 | 257 | ✅ **Create an Encrypted File** 258 | 259 | ```bash 260 | ansible-vault create secrets.yml 261 | ``` 262 | 263 | ✅ **Edit an Encrypted File** 264 | 265 | ```bash 266 | ansible-vault edit secrets.yml 267 | ``` 268 | 269 | ✅ **Use Vault in Playbooks** 270 | 271 | ```yaml 272 | - name: Deploy with Encrypted Secrets 273 | hosts: webservers 274 | vars_files: 275 | - secrets.yml 276 | tasks: 277 | - debug: 278 | msg: "The secret password is {{ secret_password }}" 279 | ``` 280 | 281 | ✅ **Run Playbook with Vault Password Prompt** 282 | 283 | ```bash 284 | ansible-playbook playbook.yml --ask-vault-pass 285 | ``` 286 | 287 | --- 288 | 289 | ## **🎯 8. Useful Ansible Commands** 290 | 291 | ✅ **Check Playbook Syntax** 292 | 293 | ```bash 294 | ansible-playbook playbook.yml --syntax-check 295 | ``` 296 | 297 | ✅ **Dry Run (Test Without Executing Changes)** 298 | 299 | ```bash 300 | ansible-playbook playbook.yml --check 301 | ``` 302 | 303 | ✅ **List All Available Modules** 304 | 305 | ```bash 306 | ansible-doc -l 307 | ``` 308 | 309 | ✅ **Get Help for a Specific Module** 310 | 311 | ```bash 312 | ansible-doc apt 313 | ``` 314 | 315 | --- 316 | 317 | ## 🎯 **Conclusion** 318 | 319 | This **Ansible Cheatsheet** provides a **step-by-step guide** from **beginner to advanced**. 320 | 321 | 🚀 **Next Steps:** 322 | ✅ **Practice with real-world playbooks** 323 | ✅ **Use roles for better structuring** 324 | ✅ **Secure credentials with Ansible Vault** 325 | ✅ **Automate cloud infrastructure with Terraform + Ansible** 326 | 327 | 🔗 **Contribute to the Cheatsheet Collection:** [GitHub Repo](https://github.com/NotHarshhaa/devops-cheatsheet) 328 | -------------------------------------------------------------------------------- /Monitoring/ELK-Stack.md: -------------------------------------------------------------------------------- 1 | # ELK Stack Cheatsheet 2 | 3 | ![text](https://imgur.com/wLayBA4.png) 4 | 5 | **1. Introduction:** 6 | 7 | - The **ELK Stack** is a powerful suite of open-source tools: **Elasticsearch** for search and analytics, **Logstash** for data processing, and **Kibana** for visualization. It's often extended with **Beats** for data collection and **X-Pack** for additional features. 8 | 9 | **2. Elasticsearch:** 10 | 11 | - **Installing Elasticsearch:** 12 | 13 | ```bash 14 | wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.2-x86_64.rpm 15 | sudo rpm -ivh elasticsearch-7.10.2-x86_64.rpm 16 | sudo systemctl start elasticsearch 17 | sudo systemctl enable elasticsearch 18 | ``` 19 | 20 | - **Basic Configuration:** 21 | - Edit `/etc/elasticsearch/elasticsearch.yml`: 22 | 23 | ```yaml 24 | network.host: localhost 25 | http.port: 9200 26 | ``` 27 | 28 | - **Basic Queries:** 29 | 30 | ```bash 31 | curl -X GET "localhost:9200/_cat/indices?v" 32 | curl -X GET "localhost:9200/my-index/_search?q=user:john" 33 | ``` 34 | 35 | - **Indexing Documents:** 36 | 37 | ```bash 38 | curl -X POST "localhost:9200/my-index/_doc/1" -H 'Content-Type: application/json' -d' 39 | { 40 | "user": "john", 41 | "message": "Hello, Elasticsearch!" 42 | }' 43 | ``` 44 | 45 | - **Elasticsearch Cluster:** 46 | - Configure multi-node clusters by setting `cluster.name`, `node.name`, and `discovery.seed_hosts` in `elasticsearch.yml`. 47 | 48 | **3. Logstash:** 49 | 50 | - **Installing Logstash:** 51 | 52 | ```bash 53 | wget https://artifacts.elastic.co/downloads/logstash/logstash-7.10.2.rpm 54 | sudo rpm -ivh logstash-7.10.2.rpm 55 | sudo systemctl start logstash 56 | sudo systemctl enable logstash 57 | ``` 58 | 59 | - **Logstash Configuration:** 60 | 61 | ```yaml 62 | input { 63 | file { 64 | path => "/var/log/syslog" 65 | start_position => "beginning" 66 | } 67 | } 68 | filter { 69 | grok { 70 | match => { "message" => "%{SYSLOGLINE}" } 71 | } 72 | } 73 | output { 74 | elasticsearch { 75 | hosts => ["localhost:9200"] 76 | index => "syslog-%{+YYYY.MM.dd}" 77 | } 78 | } 79 | ``` 80 | 81 | - **Running Logstash:** 82 | 83 | ```bash 84 | sudo systemctl start logstash 85 | ``` 86 | 87 | - **Using Beats with Logstash:** 88 | - Use **Filebeat**, **Metricbeat**, or **Packetbeat** to ship data to Logstash for processing. 89 | 90 | **4. Kibana:** 91 | 92 | - **Installing Kibana:** 93 | 94 | ```bash 95 | wget https://artifacts.elastic.co/downloads/kibana/kibana-7.10.2-x86_64.rpm 96 | sudo rpm -ivh kibana-7.10.2-x86_64.rpm 97 | sudo systemctl start kibana 98 | sudo systemctl enable kibana 99 | ``` 100 | 101 | - **Basic Configuration:** 102 | - Edit `/etc/kibana/kibana.yml`: 103 | 104 | ```yaml 105 | server.port: 5601 106 | server.host: "localhost" 107 | elasticsearch.hosts: ["http://localhost:9200"] 108 | ``` 109 | 110 | - **Creating Visualizations:** 111 | 1. Navigate to **Visualize** in the Kibana interface. 112 | 2. Choose a visualization type (e.g., line chart, pie chart). 113 | 3. Select the data source and configure your queries. 114 | 4. Save and add the visualization to a dashboard. 115 | 116 | - **Kibana Dashboards:** 117 | - Use dashboards to combine multiple visualizations into a single view, useful for monitoring and analysis. 118 | 119 | **5. Beats:** 120 | 121 | - **Filebeat:** 122 | - **Installing Filebeat:** 123 | 124 | ```bash 125 | wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.2-x86_64.rpm 126 | sudo rpm -ivh filebeat-7.10.2-x86_64.rpm 127 | sudo systemctl start filebeat 128 | sudo systemctl enable filebeat 129 | ``` 130 | 131 | - **Configuring Filebeat:** 132 | 133 | ```yaml 134 | filebeat.inputs: 135 | - type: log 136 | paths: 137 | - /var/log/syslog 138 | 139 | output.elasticsearch: 140 | hosts: ["localhost:9200"] 141 | ``` 142 | 143 | - **Running Filebeat:** 144 | 145 | ```bash 146 | sudo systemctl start filebeat 147 | ``` 148 | 149 | - **Metricbeat:** 150 | - Collects metrics from the system and services like MySQL, Docker, etc. 151 | 152 | - **Packetbeat:** 153 | - Captures network traffic and analyzes protocols. 154 | 155 | **6. Security in ELK Stack:** 156 | 157 | - **Enabling HTTPS in Elasticsearch:** 158 | 159 | ```yaml 160 | xpack.security.enabled: true 161 | xpack.security.http.ssl.enabled: true 162 | xpack.security.http.ssl.keystore.path: /path/to/keystore.jks 163 | ``` 164 | 165 | - **User Authentication:** 166 | - Use **X-Pack** to manage users, roles, and permissions. 167 | 168 | **7. ELK Stack in Kubernetes:** 169 | 170 | - **Deploying ELK Stack:** 171 | - Use Helm charts to deploy the ELK stack in Kubernetes for easier management and scaling. 172 | 173 | **8. Troubleshooting ELK Stack:** 174 | 175 | - **Common Issues:** 176 | - **High Memory Usage:** Optimize the heap size in Elasticsearch. 177 | - **Logstash Performance:** Tune pipeline workers 178 | 179 | and batch size. 180 | 181 | - **Debugging:** 182 | - Check logs for Elasticsearch (`/var/log/elasticsearch/`), Logstash (`/var/log/logstash/`), and Kibana (`/var/log/kibana/`). 183 | - Use `curl` to test Elasticsearch endpoints and ensure services are running. 184 | -------------------------------------------------------------------------------- /Monitoring/Grafana.md: -------------------------------------------------------------------------------- 1 | # Grafana Cheatsheet 2 | 3 | ![text](https://imgur.com/j07r4L6.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **Grafana** is an open-source platform for monitoring and observability that allows you to query, visualize, and alert on metrics from multiple data sources like Prometheus, InfluxDB, Elasticsearch, and more. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Dashboard:** A collection of panels organized into a grid. 12 | - **Panel:** A visualization of data (graphs, charts, etc.) from a specific data source. 13 | - **Data Source:** The database or service that provides the metrics for Grafana to visualize. 14 | - **Alerting:** Set up conditions to trigger notifications when metrics meet specific criteria. 15 | 16 | **3. Installation:** 17 | 18 | - **Running Grafana:** 19 | 20 | ```bash 21 | sudo apt-get install -y adduser libfontconfig1 22 | wget https://dl.grafana.com/oss/release/grafana_7.5.7_amd64.deb 23 | sudo dpkg -i grafana_7.5.7_amd64.deb 24 | sudo systemctl start grafana-server 25 | sudo systemctl enable grafana-server 26 | ``` 27 | 28 | - **Docker:** 29 | 30 | ```bash 31 | docker run -d -p 3000:3000 --name=grafana grafana/grafana 32 | ``` 33 | 34 | **4. Configuring Data Sources:** 35 | 36 | - **Adding Prometheus as a Data Source:** 37 | 1. Navigate to **Configuration > Data Sources**. 38 | 2. Click on **Add data source** and select **Prometheus**. 39 | 3. Enter the URL of your Prometheus server (e.g., `http://localhost:9090`). 40 | 4. Click **Save & Test** to verify the connection. 41 | 42 | - **Adding Elasticsearch as a Data Source:** 43 | 1. Navigate to **Configuration > Data Sources**. 44 | 2. Click on **Add data source** and select **Elasticsearch**. 45 | 3. Enter the URL, index name, and time field. 46 | 4. Click **Save & Test** to verify the connection. 47 | 48 | **5. Building Dashboards:** 49 | 50 | - **Creating a New Dashboard:** 51 | 1. Click the **+** icon in the sidebar and select **Dashboard**. 52 | 2. Click **Add new panel**. 53 | 3. Choose your data source and write a query (e.g., `rate(http_requests_total[5m])` for Prometheus). 54 | 4. Select a visualization type (e.g., **Graph**, **Stat**, **Gauge**). 55 | 5. Save the panel and the dashboard. 56 | 57 | - **Using Variables:** 58 | - **Creating a Variable:** 59 | 1. Go to **Dashboard settings** > **Variables** > **New**. 60 | 2. Set the **Name**, **Type** (e.g., **Query**), and **Query**. 61 | 3. Use the variable in panel queries by referencing it as **`$variable_name`**. 62 | 63 | **6. Alerting:** 64 | 65 | - **Creating Alerts:** 66 | 67 | 1. Add a panel to your dashboard. 68 | 2. In the **Alert** tab, click **Create Alert**. 69 | 3. Set the **Conditions** for triggering the alert (e.g., when a metric crosses a threshold). 70 | 4. Define the **Evaluation Interval** and **No Data** options. 71 | 5. Configure **Notifications** to send alerts via email, Slack, or other channels. 72 | 73 | - **Managing Alerts:** 74 | - Alerts can be managed centrally through the **Alerting** section in the sidebar. 75 | 76 | **7. Grafana Plugins:** 77 | 78 | - **Installing Plugins:** 79 | 80 | ```bash 81 | grafana-cli plugins install grafana-piechart-panel 82 | sudo systemctl restart grafana-server 83 | ``` 84 | 85 | - **Popular Plugins:** 86 | - **Pie Chart Panel:** Display metrics in a pie chart. 87 | - **Worldmap Panel:** Visualize data on a world map. 88 | - **Alert List Panel:** Display active alerts from multiple sources. 89 | 90 | **8. Dashboard Templating:** 91 | 92 | - **Using Templated Dashboards:** 93 | - Leverage variables to create dynamic dashboards that can change based on user input. 94 | 95 | - **Dynamic Panels:** 96 | - Create repeating panels or rows based on variable values (e.g., show metrics per host). 97 | 98 | **9. Customizing Grafana:** 99 | 100 | - **Themes:** 101 | - Switch between light and dark themes via **Preferences** in the dashboard settings. 102 | 103 | - **Custom Branding:** 104 | - Modify Grafana's appearance by adding custom logos and colors. Requires editing configuration files and CSS. 105 | 106 | **10. Securing Grafana:** 107 | 108 | - **User Management:** 109 | - Add users and assign them roles such as Viewer, Editor, or Admin. 110 | 111 | - **LDAP/SSO Integration:** 112 | - Configure Grafana to use LDAP or Single Sign-On (SSO) for user authentication. 113 | 114 | - **Enabling HTTPS:** 115 | 116 | ```yaml 117 | [server] 118 | protocol = https 119 | cert_file = /path/to/cert.crt 120 | cert_key = /path/to/cert.key 121 | ``` 122 | 123 | **11. Advanced Queries and Visualizations:** 124 | 125 | - **Grafana with PromQL:** 126 | - Use advanced PromQL queries for more complex visualizations. 127 | 128 | - **Annotations:** 129 | - Add annotations to mark specific events on graphs, useful for correlating issues with changes or incidents. 130 | 131 | **12. Grafana Loki:** 132 | 133 | - **Introduction to Loki:** 134 | - Grafana Loki is a horizontally scalable, highly available log aggregation system inspired by Prometheus. 135 | 136 | - **Setting up Loki:** 137 | 138 | ```bash 139 | docker run -d --name=loki -p 3100:3100 grafana/loki:2.2.0 -config.file=/etc/loki/local-config.yaml 140 | ``` 141 | 142 | - **Querying Logs in Grafana:** 143 | - Use **Loki** as a data source to query and visualize logs alongside metrics. 144 | 145 | **13. Grafana in Kubernetes:** 146 | 147 | - **Deploying Grafana in Kubernetes:** 148 | 149 | ```yaml 150 | apiVersion: apps/v1 151 | kind: Deployment 152 | metadata: 153 | name: grafana 154 | spec: 155 | replicas: 1 156 | selector: 157 | matchLabels: 158 | app: grafana 159 | template: 160 | metadata: 161 | labels: 162 | app: grafana 163 | spec: 164 | containers: 165 | - name: grafana 166 | image: grafana/grafana:7.5.7 167 | ports: 168 | - containerPort: 3000 169 | ``` 170 | 171 | **14. Troubleshooting Grafana:** 172 | 173 | - **Common Issues:** 174 | - **No Data:** Check data source configuration and queries. 175 | - **Slow Dashboards:** Optimize queries and reduce the time range. 176 | - **Plugin Errors:** Ensure plugins are compatible with your Grafana version. 177 | 178 | - **Debugging:** 179 | - View logs at `/var/log/grafana/grafana.log` for error details. 180 | - Use **`curl`** to test data source connectivity (e.g., `curl http://localhost:9090` for Prometheus). 181 | -------------------------------------------------------------------------------- /Monitoring/Nagios.md: -------------------------------------------------------------------------------- 1 | # Nagios Cheatsheet 2 | 3 | ![text](https://imgur.com/O9DGMee.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **Nagios** is a powerful open-source monitoring tool that provides comprehensive monitoring of systems, networks, and infrastructure. It is known for its robustness, flexibility, and extensive plugin system. 8 | 9 | **2. Installation:** 10 | 11 | - **Installing Nagios Core:** 12 | 13 | ```bash 14 | sudo apt-get update 15 | sudo apt-get install -y build-essential libgd2-xpm-dev openssl libssl-dev xinetd apache2-utils unzip 16 | wget https://assets.nagios.com/downloads/nagioscore/releases/nagios-4.4.6.tar.gz 17 | tar -xzf nagios-4.4.6.tar.gz 18 | cd nagios-4.4.6/ 19 | ./configure --with-httpd-conf=/etc/apache2/sites-enabled 20 | make all 21 | sudo make install 22 | sudo make install-commandmode 23 | sudo make install-init 24 | sudo make install-config 25 | sudo make install-webconf 26 | ``` 27 | 28 | - **Starting Nagios:** 29 | 30 | ```bash 31 | sudo systemctl start nagios 32 | sudo systemctl enable nagios 33 | ``` 34 | 35 | **3. Configuration:** 36 | 37 | - **Basic Configuration:** 38 | - Nagios configuration files are typically located in `/usr/local/nagios/etc/`. 39 | 40 | - **Defining a Host:** 41 | 42 | ```cfg 43 | define host { 44 | use linux-server 45 | host_name myserver 46 | alias My Linux Server 47 | address 192.168.1.1 48 | } 49 | ``` 50 | 51 | - **Defining a Service:** 52 | 53 | ```cfg 54 | define service { 55 | use generic-service 56 | host_name myserver 57 | service_description HTTP 58 | check_command check_http 59 | } 60 | ``` 61 | 62 | **4. Nagios Plugins:** 63 | 64 | - **Installing Plugins:** 65 | 66 | ```bash 67 | wget https://nagios-plugins.org/download/nagios-plugins-2.3.3.tar.gz 68 | tar -xzf nagios-plugins-2.3.3.tar.gz 69 | cd nagios-plugins-2.3.3/ 70 | ./configure 71 | make 72 | sudo make install 73 | ``` 74 | 75 | - **Common Plugins:** 76 | - **check_ping:** Monitors network connectivity. 77 | - **check_http:** Monitors HTTP servers. 78 | - **check_disk:** Monitors disk usage. 79 | 80 | **5. Notifications:** 81 | 82 | - **Setting Up Email Notifications:** 83 | - Configure email settings in `/usr/local/nagios/etc/objects/contacts.cfg`: 84 | 85 | ```cfg 86 | define contact { 87 | contact_name nagiosadmin 88 | use generic-contact 89 | alias Nagios Admin 90 | email nagios@yourdomain.com 91 | } 92 | ``` 93 | 94 | - **Notification Commands:** 95 | - Use commands like `notify-host-by-email` and `notify-service-by-email` to define how notifications are sent. 96 | 97 | **6. Web Interface:** 98 | 99 | - **Accessing Nagios Web Interface:** 100 | - Nagios web interface is usually accessible at `http:///nagios`. 101 | - Default credentials: `nagiosadmin` and the password set during installation. 102 | 103 | - **Customizing the Interface:** 104 | - Modify the theme and layout by editing files in `/usr/local/nagios/share`. 105 | 106 | **7. Monitoring Remote Hosts:** 107 | 108 | - **NRPE (Nagios Remote Plugin Executor):** 109 | - **Installing NRPE:** 110 | 111 | ```bash 112 | sudo apt-get install nagios-nrpe-server nagios-plugins 113 | sudo systemctl start nagios-nrpe-server 114 | ``` 115 | 116 | - **Configuring NRPE:** 117 | - Edit `/etc/nagios/nrpe.cfg` to define allowed hosts and monitored services. 118 | 119 | ```cfg 120 | allowed_hosts=127.0.0.1,192.168.1.100 121 | command[check_disk]=/usr/lib/nagios/plugins/check_disk -w 20% -c 10% -p /dev/sda1 122 | ``` 123 | 124 | - **Monitoring with NRPE:** 125 | - Add a service in Nagios to monitor a remote host using NRPE. 126 | 127 | ```cfg 128 | define service { 129 | use generic-service 130 | host_name remotehost 131 | service_description Disk Usage 132 | check_command check_nrpe!check_disk 133 | } 134 | ``` 135 | 136 | **8. Nagios XI:** 137 | 138 | - **Introduction to Nagios XI:** 139 | - Nagios XI is the commercial version of Nagios Core, providing additional features like a more user-friendly interface, reporting, and advanced monitoring capabilities. 140 | 141 | - **Differences from Nagios Core:** 142 | - Built-in wizards, easier configuration, and more extensive support. 143 | 144 | **9. Advanced Nagios Concepts:** 145 | 146 | - **Passive Checks:** 147 | - Useful for monitoring systems where Nagios cannot initiate checks, but the system can send results to Nagios. 148 | 149 | - **Distributed Monitoring:** 150 | - Implement distributed monitoring by setting up multiple Nagios servers and configuring them to send data to a central Nagios server. 151 | 152 | **10. Securing Nagios:** 153 | 154 | - **Enabling HTTPS:** 155 | - Configure Apache to serve Nagios over HTTPS. 156 | 157 | ```bash 158 | sudo a2enmod ssl 159 | sudo service apache2 restart 160 | ``` 161 | 162 | - Update Nagios configuration in `/etc/apache2/sites-available/nagios.conf` to use SSL certificates. 163 | 164 | - **User Authentication:** 165 | - Use `.htpasswd` files to manage user access to the Nagios web interface. 166 | 167 | **11. Troubleshooting Nagios:** 168 | 169 | - **Common Issues:** 170 | - **Service Check Failing:** Ensure plugins are executable and paths are correct. 171 | - **Email Notifications Not Working:** Verify the mail server configuration and check the `maillog` for errors. 172 | 173 | - **Debugging:** 174 | - Use the Nagios log file at `/usr/local/nagios/var/nagios.log` to troubleshoot issues. 175 | - Run checks manually to verify plugin output. 176 | 177 | ```bash 178 | /usr/local/nagios/libexec/check_http -I 127.0.0.1 179 | ``` 180 | 181 | **12. Nagios and Docker:** 182 | 183 | - **Running Nagios in Docker:** 184 | 185 | ```bash 186 | docker run --name nagios -p 0.0.0.0:8080:80 jasonrivers/nagios 187 | ``` 188 | 189 | - **Customizing Dockerized Nagios:** 190 | - Mount volumes to add custom configurations and plugins. 191 | 192 | ```bash 193 | docker run --name nagios -v /path/to/nagios.cfg:/usr/local/nagios/etc/nagios.cfg jasonrivers/nagios 194 | ``` 195 | -------------------------------------------------------------------------------- /Monitoring/Prometheus.md: -------------------------------------------------------------------------------- 1 | # Prometheus Cheatsheet 2 | 3 | ![text](https://imgur.com/nthHFQk.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **Prometheus** is an open-source systems monitoring and alerting toolkit, particularly well-suited for monitoring dynamic, cloud-native environments such as Kubernetes. It uses a pull-based model to scrape metrics from configured endpoints. 8 | 9 | **2. Key Concepts:** 10 | 11 | - **Metrics:** Data points collected over time, usually in the form of time series. 12 | - **PromQL:** Prometheus Query Language used to query the collected metrics. 13 | - **Exporters:** Components that expose metrics in a format that Prometheus can scrape. 14 | - **Alertmanager:** Manages alerts generated by Prometheus. 15 | 16 | **3. Installation:** 17 | 18 | - **Running Prometheus:** 19 | 20 | ```bash 21 | wget https://github.com/prometheus/prometheus/releases/download/v2.30.0/prometheus-2.30.0.linux-amd64.tar.gz 22 | tar xvfz prometheus-*.tar.gz 23 | cd prometheus-* 24 | ./prometheus --config.file=prometheus.yml 25 | ``` 26 | 27 | - **Docker:** 28 | 29 | ```bash 30 | docker run -p 9090:9090 prom/prometheus 31 | ``` 32 | 33 | **4. Prometheus Configuration:** 34 | 35 | - **Basic `prometheus.yml` Configuration:** 36 | 37 | ```yaml 38 | global: 39 | scrape_interval: 15s 40 | 41 | scrape_configs: 42 | - job_name: 'prometheus' 43 | static_configs: 44 | - targets: ['localhost:9090'] 45 | ``` 46 | 47 | - **Adding Targets:** 48 | 49 | ```yaml 50 | - job_name: 'node_exporter' 51 | static_configs: 52 | - targets: ['localhost:9100'] 53 | ``` 54 | 55 | **5. Prometheus Query Language (PromQL):** 56 | 57 | - **Basic Queries:** 58 | 59 | ```promql 60 | up 61 | rate(http_requests_total[5m]) 62 | ``` 63 | 64 | - **Aggregations:** 65 | 66 | ```promql 67 | sum(rate(http_requests_total[5m])) 68 | avg_over_time(http_requests_total[5m]) 69 | ``` 70 | 71 | - **Recording Rules:** 72 | 73 | ```yaml 74 | groups: 75 | - name: example 76 | rules: 77 | - record: job:http_inprogress_requests:sum 78 | expr: sum(http_inprogress_requests) by (job) 79 | ``` 80 | 81 | **6. Exporters:** 82 | 83 | - **Node Exporter:** Collects system-level metrics. 84 | 85 | ```bash 86 | wget https://github.com/prometheus/node_exporter/releases/download/v1.2.2/node_exporter-1.2.2.linux-amd64.tar.gz 87 | tar xvfz node_exporter-*.tar.gz 88 | ./node_exporter 89 | ``` 90 | 91 | - **Custom Exporter:** Writing a custom exporter using Python. 92 | 93 | ```python 94 | from prometheus_client import start_http_server, Gauge 95 | import random 96 | import time 97 | 98 | g = Gauge('random_number', 'A random number') 99 | 100 | def generate_random_number(): 101 | while True: 102 | g.set(random.random()) 103 | time.sleep(5) 104 | 105 | if __name__ == '__main__': 106 | start_http_server(8000) 107 | generate_random_number() 108 | ``` 109 | 110 | **7. Alerts and Alertmanager:** 111 | 112 | - **Alerting Rules:** 113 | 114 | ```yaml 115 | groups: 116 | - name: example 117 | rules: 118 | - alert: HighMemoryUsage 119 | expr: node_memory_Active_bytes / node_memory_MemTotal_bytes * 100 > 90 120 | for: 5m 121 | labels: 122 | severity: critical 123 | annotations: 124 | summary: "High memory usage detected on {{ $labels.instance }}" 125 | description: "Memory usage is above 90% for more than 5 minutes." 126 | ``` 127 | 128 | - **Alertmanager Configuration:** 129 | 130 | ```yaml 131 | global: 132 | resolve_timeout: 5m 133 | 134 | route: 135 | group_by: ['alertname'] 136 | receiver: 'email' 137 | 138 | receivers: 139 | - name: 'email' 140 | email_configs: 141 | - to: 'your-email@example.com' 142 | from: 'prometheus@example.com' 143 | smarthost: 'smtp.example.com:587' 144 | auth_username: 'username' 145 | auth_password: 'password' 146 | ``` 147 | 148 | **8. Prometheus Federation:** 149 | 150 | - **Setting Up Federation:** 151 | 152 | ```yaml 153 | scrape_configs: 154 | - job_name: 'federate' 155 | honor_labels: true 156 | metrics_path: '/federate' 157 | params: 158 | match[]: 159 | - '{job="prometheus"}' 160 | static_configs: 161 | - targets: 162 | - 'prometheus-server-1:9090' 163 | - 'prometheus-server-2:9090' 164 | ``` 165 | 166 | **9. Monitoring Kubernetes with Prometheus:** 167 | 168 | - **Deploying Prometheus on Kubernetes:** 169 | 170 | ```yaml 171 | apiVersion: monitoring.coreos.com/v1 172 | kind: Prometheus 173 | metadata: 174 | name: prometheus 175 | spec: 176 | replicas: 1 177 | serviceAccountName: prometheus 178 | serviceMonitorSelector: 179 | matchLabels: 180 | team: frontend 181 | resources: 182 | requests: 183 | memory: 400Mi 184 | storage: 185 | volumeClaimTemplate: 186 | spec: 187 | storageClassName: standard 188 | resources: 189 | requests: 190 | storage: 50Gi 191 | ``` 192 | 193 | - **ServiceMonitor Example:** 194 | 195 | ```yaml 196 | apiVersion: monitoring.coreos.com/v1 197 | kind: ServiceMonitor 198 | metadata: 199 | name: example-monitor 200 | spec: 201 | selector: 202 | matchLabels: 203 | app: example 204 | endpoints: 205 | - port: web 206 | ``` 207 | 208 | **10. Advanced Prometheus Concepts:** 209 | 210 | - **Thanos:** Extends Prometheus with long-term storage, global querying, and downsampling. 211 | - **Cortex:** Multi-tenant, horizontally scalable Prometheus as a service. 212 | 213 | **11. Prometheus Security:** 214 | 215 | - **Basic Authentication:** 216 | 217 | ```yaml 218 | basic_auth: 219 | username: admin 220 | password: admin 221 | ``` 222 | 223 | - **TLS/SSL Configuration:** 224 | 225 | ```yaml 226 | tls_config: 227 | ca_file: /etc/prometheus/certs/ca.crt 228 | cert_file: /etc/prometheus/certs/prometheus.crt 229 | key_file: /etc/prometheus/certs/prometheus.key 230 | ``` 231 | 232 | **12. Troubleshooting Prometheus:** 233 | 234 | - **Common Issues:** 235 | - **High Cardinality Metrics:** Too many unique time series can overwhelm Prometheus. 236 | - **Slow Queries:** Optimize queries by avoiding high cardinality and using efficient aggregations. 237 | 238 | - **Debugging:** 239 | - Use the **`promtool`** command-line tool to check configuration files. 240 | - **Prometheus UI** provides an interface to debug queries and examine time series data. 241 | -------------------------------------------------------------------------------- /Networking/Consul.md: -------------------------------------------------------------------------------- 1 | # Consul Cheatsheet 2 | 3 | ![text](https://imgur.com/RWncIhL.png) 4 | 5 | ## **Overview** 6 | 7 | Consul by HashiCorp is a service mesh and service discovery tool that provides distributed service networking, configuration management, and segmentation. It’s widely used for managing microservices in dynamic environments like Kubernetes. 8 | 9 | ### **Basic Concepts** 10 | 11 | - **Service Discovery:** Consul automatically detects services in your network, allowing them to register and discover each other without hardcoding IP addresses or DNS names. This is especially useful in dynamic environments where services are constantly scaling. 12 | 13 | - **Key/Value Store:** Consul includes a distributed key/value store that can be used for dynamic configuration management. This allows applications to retrieve configuration data at runtime without restarting. 14 | 15 | - **Health Checking:** Consul monitors the health of services through health checks. If a service fails its health check, Consul can automatically remove it from the service registry, preventing traffic from being routed to unhealthy instances. 16 | 17 | - **Agent:** Each node in a Consul cluster runs an agent that provides a local interface for service registration, health checking, and querying. Agents communicate with each other to ensure consistent service data across the cluster. 18 | 19 | ### **Service Mesh Features** 20 | 21 | - **Connect:** Consul’s service mesh feature, Connect, provides secure service-to-service communication using mutual TLS (mTLS). It ensures that all traffic between services is encrypted and authenticated. 22 | 23 | - **Intention:** Intentions in Consul define policies that control which services are allowed to communicate with each other. This fine-grained access control enhances security by ensuring that only authorized services can connect. 24 | 25 | - **Sidecar Proxy:** Consul uses Envoy as a sidecar proxy to manage and secure service communication. The sidecar handles tasks such as load balancing, mTLS, and observability. 26 | 27 | ### **Traffic Management** 28 | 29 | - **Service Segmentation:** Consul’s intentions allow you to segment traffic by defining which services can communicate. For example, you can ensure that only the web service can talk to the payment service, preventing unauthorized access. 30 | 31 | - **Service Failover:** If a service instance becomes unhealthy or fails, Consul can automatically reroute traffic to healthy instances. This ensures high availability and resilience in your applications. 32 | 33 | - **Ingress Gateways:** Consul manages ingress gateways that control and secure traffic entering the service mesh. These gateways can enforce policies and provide TLS termination for incoming traffic. 34 | 35 | ### **Security** 36 | 37 | - **ACLs (Access Control Lists):** Consul’s ACL system provides fine-grained security controls. You can create policies that determine which users or services have access to specific resources, enhancing security in multi-tenant environments. 38 | 39 | - **mTLS:** Consul uses mutual TLS to secure communication between services. mTLS not only encrypts traffic but also ensures that both the client and server are authenticated before communication is allowed. 40 | 41 | - **Service Mesh Policies:** Consul allows you to define policies that control various aspects of service communication, such as rate limiting, traffic shaping, and access control. These policies help you manage and secure your service mesh. 42 | 43 | ### **Observability** 44 | 45 | - **Metrics:** Consul provides detailed metrics about service health, traffic patterns, and performance. These metrics can be exported to monitoring systems like Prometheus for further analysis. 46 | 47 | - **Logs:** Consul collects and stores logs related to service health, configuration changes, and traffic routing. These logs are useful for auditing and troubleshooting. 48 | 49 | - **Tracing:** Consul integrates with tracing systems like Jaeger and Zipkin to provide visibility into service communication. Tracing helps you understand how requests flow through your services and identify bottlenecks or failures. 50 | 51 | ### **Advanced Concepts** 52 | 53 | - **Mesh Gateways:** Mesh gateways allow Consul to manage traffic between services in different datacenters or regions. This extends the service mesh beyond a single cluster, enabling global service networking. 54 | 55 | - **Network Middleware Integration:** Consul can integrate with firewalls, load balancers, and other network devices to enforce policies outside the service mesh. This is useful for securing traffic at the network edge. 56 | 57 | - **Service Failover Across Datacenters:** In multi-datacenter deployments, Consul can automatically failover services to another datacenter if the primary one fails. This ensures continuity and resilience. 58 | 59 | - **Consul-Terraform Sync:** Consul can automatically configure network infrastructure by syncing its service data with Terraform. This allows you to dynamically manage network devices based on the state of your services. 60 | 61 | ### **Example Use Case** 62 | 63 | Consider a microservices architecture where services need to be dynamically discovered, secured, and managed across multiple environments: 64 | 65 | 1. **Service Discovery:** Use Consul to automatically register services and make them discoverable to other services without manual intervention. 66 | 2. **Secure Communication:** Implement mTLS with Consul Connect to ensure all service-to-service communication is encrypted and authenticated. 67 | 3. **High Availability:** Configure service failover to reroute traffic to healthy instances if a service fails. 68 | 4. **Access Control:** Use ACLs to restrict access to sensitive services like payment processing, ensuring that only authorized services can communicate with them. 69 | 5. **Multi-Datacenter Resilience:** Deploy mesh gateways to manage traffic between services across different datacenters, ensuring global service availability. 70 | -------------------------------------------------------------------------------- /Networking/Envoy.md: -------------------------------------------------------------------------------- 1 | # Envoy Cheatsheet 2 | 3 | ![text](https://imgur.com/iw5sG1a.png) 4 | 5 | ## **Overview** 6 | 7 | Envoy is a high-performance, open-source edge and service proxy. Originally developed by Lyft, Envoy is now widely adopted for managing microservices communication, especially within service meshes. Envoy handles tasks such as load balancing, security, observability, and routing. 8 | 9 | ### **Basic Concepts** 10 | 11 | - **Proxy:** Envoy acts as a proxy, sitting between services and managing all incoming and outgoing traffic. It intercepts, processes, and forwards requests based on predefined configurations. 12 | 13 | - **Listener:** A listener is a configuration that defines how Envoy should accept incoming connections. It specifies the port and protocols (e.g., HTTP, TCP) Envoy listens to. 14 | 15 | - **Cluster:** In Envoy, a cluster represents a group of upstream services that Envoy proxies traffic to. A cluster typically consists of multiple instances of a service, allowing Envoy to distribute requests across them. 16 | 17 | - **Route:** Routes define how requests are processed and forwarded by Envoy. A route maps incoming requests to the appropriate cluster based on various criteria like URL paths or headers. 18 | 19 | ### **Traffic Management** 20 | 21 | - **Load Balancing:** Envoy provides several load balancing algorithms to distribute traffic across service instances. Common algorithms include round-robin, least-request, and ring-hash. Load balancing ensures that no single instance is overwhelmed with too much traffic. 22 | 23 | - **Retries:** Envoy can automatically retry failed requests based on configurable policies. For example, if an upstream service fails to respond, Envoy can retry the request on a different instance. 24 | 25 | - **Circuit Breakers:** Circuit breakers prevent a service from becoming overwhelmed by limiting the number of concurrent connections or requests. If a service exceeds the defined thresholds, Envoy will stop sending traffic to it until it recovers. 26 | 27 | - **Rate Limiting:** Envoy allows you to define rate limits on incoming requests, controlling how many requests are allowed over a given period. This is useful for preventing abuse or overloading of services. 28 | 29 | ### **Security** 30 | 31 | - **TLS Termination:** Envoy can handle TLS termination, decrypting inbound traffic, and encrypting outbound traffic. This simplifies the management of secure communications within your services. 32 | 33 | - **mTLS (Mutual TLS):** Envoy supports mutual TLS for securing service-to-service communication. This ensures that both parties in a communication exchange authenticate each other and that their communication is encrypted. 34 | 35 | - **RBAC (Role-Based Access Control):** Envoy implements RBAC to control access to services based on predefined roles and permissions. This adds an additional layer of security, ensuring that only authorized services or users can access specific resources. 36 | 37 | ### **Observability** 38 | 39 | - **Metrics:** Envoy provides detailed metrics about network traffic, including request counts, latency, error rates, and more. These metrics are essential for monitoring the health and performance of your services. 40 | 41 | - **Access Logs:** Envoy generates detailed access logs for each request it handles. These logs include information about the request’s origin, the response status, and any errors encountered. Access logs are valuable for auditing and debugging. 42 | 43 | - **Tracing:** Envoy integrates with distributed tracing systems like Jaeger and Zipkin. Tracing provides a detailed view of a request’s journey through various services, helping you identify bottlenecks and failures in your application. 44 | 45 | ### **Advanced Concepts** 46 | 47 | - **Filter Chains:** Envoy’s filter chains allow for complex request processing. Filters can modify, route, or reject requests based on various conditions. Common filters include authentication, rate limiting, and request transformation. 48 | 49 | - **Dynamic Configuration with xDS APIs:** Envoy supports dynamic configuration through a set of APIs known as xDS (e.g., ADS, CDS, LDS, RDS, EDS). These APIs allow Envoy to update its configuration in real-time without restarting. This capability is crucial for environments where services are constantly changing. 50 | 51 | - **Sidecar Proxy:** In a service mesh, Envoy is typically deployed as a sidecar proxy alongside each microservice. The sidecar intercepts all traffic to and from the service, providing security, observability, and traffic management features. 52 | 53 | ### **Example Use Case** 54 | 55 | Imagine you are running an e-commerce application with multiple microservices such as payment, inventory, and user services. Here’s how 56 | 57 | Envoy can help: 58 | 59 | 1. **Secure Communication:** Use Envoy’s TLS termination to encrypt all traffic between the microservices. 60 | 2. **Load Balancing:** Distribute incoming requests evenly across multiple instances of the payment service using Envoy’s round-robin load balancing. 61 | 3. **Rate Limiting:** Protect the user service from abuse by setting a rate limit on login attempts. 62 | 4. **Observability:** Monitor the health of all microservices using Envoy’s metrics and integrate with Prometheus for alerting. 63 | 5. **Resilience:** Use circuit breakers to prevent the inventory service from becoming overwhelmed during high traffic periods. 64 | -------------------------------------------------------------------------------- /Networking/Istio.md: -------------------------------------------------------------------------------- 1 | # Istio Cheatsheet 2 | 3 | ![text](https://imgur.com/QLlMSCp.png) 4 | 5 | ## **Overview** 6 | 7 | Istio is an open-source service mesh that layers transparently onto existing distributed applications. It provides a way to control how microservices share data with one another. Key features of Istio include traffic management, security, and observability. 8 | 9 | ### **Basic Concepts** 10 | 11 | - **Service Mesh:** Istio creates a service mesh, which is an infrastructure layer that enables microservices to communicate with each other securely and efficiently. It also allows for traffic management and monitoring without requiring changes to the microservices themselves. 12 | 13 | - **Control Plane vs. Data Plane:** Istio's architecture is divided into two planes: 14 | - **Control Plane:** Manages and configures the proxies (Envoy) to route traffic, enforce policies, and collect telemetry. 15 | - **Data Plane:** Consists of Envoy proxies deployed as sidecars to the microservices, handling all network traffic between services. 16 | 17 | ### **Key Components** 18 | 19 | - **Envoy Proxy:** The core component of Istio’s data plane. Envoy is deployed as a sidecar to each service and intercepts all inbound and outbound traffic. 20 | 21 | - **Pilot:** Manages the configuration of the Envoy proxies, distributing routing rules and policies across the mesh. 22 | 23 | - **Mixer:** Enforces access control and usage policies, and collects telemetry data. It interacts with the Envoy proxies and provides insights into traffic patterns and security. 24 | 25 | - **Citadel:** Manages certificates and keys for mutual TLS (mTLS) and service identities within the mesh, ensuring secure communication between services. 26 | 27 | - **Galley:** Istio’s configuration validation component. It ensures that configurations are correct and distributes them to the appropriate components within the mesh. 28 | 29 | ### **Traffic Management** 30 | 31 | - **VirtualService:** A resource that defines how traffic is routed to a service. It allows you to configure complex routing rules like request matching, traffic splitting, and more. 32 | 33 | - **DestinationRule:** Defines policies that apply to traffic after it has been routed to a destination. These policies can include load balancing settings, connection pool sizes, and outlier detection. 34 | 35 | - **Gateway:** Manages external traffic entering the mesh. It controls how traffic from outside the cluster is directed into the mesh and routed to the appropriate services. 36 | 37 | - **Sidecar:** This resource configures the behavior of the sidecar proxies deployed alongside the microservices. It allows for fine-grained control over traffic management and resource usage. 38 | 39 | ### **Security** 40 | 41 | - **mTLS (Mutual TLS):** Istio supports mTLS to secure service-to-service communication. mTLS ensures that the identity of both the client and the server is authenticated and that the communication between them is encrypted. 42 | 43 | - **Authorization Policies:** These policies define access control rules, determining which services or users can access specific resources. Policies can be applied globally, per namespace, or per workload. 44 | 45 | - **Ingress/Egress Control:** Istio manages both inbound and outbound traffic to ensure that it complies with security policies. Ingress controls how external traffic enters the mesh, while egress manages how traffic leaves the mesh. 46 | 47 | ### **Observability** 48 | 49 | - **Telemetry:** Istio collects telemetry data such as metrics, logs, and traces, providing deep insights into the behavior of your microservices. This data is essential for monitoring and debugging applications. 50 | 51 | - **Prometheus:** Istio integrates with Prometheus, a monitoring system that scrapes metrics from the Envoy proxies. These metrics can be visualized using tools like Grafana. 52 | 53 | - **Grafana:** A visualization tool used to create dashboards that display the metrics collected by Prometheus. Istio provides pre-built Grafana dashboards to monitor your service mesh. 54 | 55 | - **Jaeger/Zipkin:** Distributed tracing tools integrated with Istio. They allow you to trace the path of a request as it travels through various services in the mesh, helping to identify performance bottlenecks and errors. 56 | 57 | ### **Advanced Concepts** 58 | 59 | - **Canary Deployments:** Istio enables canary deployments by allowing you to gradually roll out a new version of a service to a small percentage of users while monitoring its performance before fully deploying it. 60 | 61 | - **Traffic Mirroring:** This feature allows you to mirror a portion of live traffic to a new service version without impacting production traffic. It’s useful for testing new versions in a real-world environment. 62 | 63 | - **Circuit Breaking:** Prevents services from being overwhelmed by limiting the number of concurrent connections or requests. If the limit is reached, Istio can return an error or route traffic to a backup service. 64 | 65 | - **Rate Limiting:** Controls the rate at which requests are sent to a service, preventing overloads. Rate limits can be defined based on various factors, such as user identity or source IP. 66 | 67 | - **Ingress/Egress Policies:** These policies control what traffic is allowed to enter or leave the service mesh, enhancing security by restricting access based on predefined rules. 68 | 69 | - **Service Entries:** Extend the mesh to services outside of the mesh, allowing them to be treated as if they were inside the mesh. This is useful for managing and securing external services. 70 | 71 | ### **Example Use Case** 72 | 73 | Consider a microservices architecture where you need to manage traffic between different versions of a service. With Istio, you can: 74 | 75 | 1. **Deploy a New Version:** Use a VirtualService to route 10% of traffic to a new version of your service. 76 | 2. **Monitor the New Version:** Collect telemetry data to ensure the new version behaves as expected. 77 | 3. **Gradually Increase Traffic:** If the new version is stable, gradually increase the traffic percentage. 78 | 4. **Roll Back if Needed:** If issues are detected, quickly route all traffic back to the previous version using Istio’s traffic management capabilities. 79 | -------------------------------------------------------------------------------- /Networking/Linkerd.md: -------------------------------------------------------------------------------- 1 | # Linkerd Cheatsheet 2 | 3 | ![text](https://imgur.com/xyQcgGf.png) 4 | 5 | ## **Overview** 6 | 7 | Linkerd is a lightweight service mesh designed to be simple to operate while providing powerful features for observability, security, and reliability. Unlike some other service meshes, Linkerd focuses on minimal configuration and performance. 8 | 9 | ### **Basic Concepts** 10 | 11 | - **Service Mesh:** Linkerd provides an infrastructure layer that enables secure, reliable, and observable communication between microservices. It operates transparently, requiring minimal changes to your services. 12 | 13 | - **Control Plane:** Linkerd’s control plane manages the configuration and behavior of the service mesh. It includes components for managing policies, collecting telemetry, and issuing certificates. 14 | 15 | - **Data Plane:** The data plane consists of lightweight proxies deployed as sidecars to each service. These proxies handle all inbound and outbound traffic, providing features like mTLS, retries, and load balancing. 16 | 17 | ### **Traffic Management** 18 | 19 | - **Routing:** Linkerd automatically manages routing for service-to-service communication. It handles retries and timeouts, ensuring that requests are routed efficiently and reliably. 20 | 21 | - **Load Balancing:** Linkerd distributes traffic across available service instances to prevent any single instance from being overwhelmed. It uses algorithms like random and least-request to balance traffic effectively. 22 | 23 | - **Traffic Splitting:** Linkerd allows you to split traffic between different versions of a service. This is useful for canary deployments, where a small percentage of traffic is sent to a new version before full rollout. 24 | 25 | ### **Security** 26 | 27 | - **mTLS:** Linkerd provides out-of-the-box mutual TLS (mTLS) for all communication between services. This ensures that all traffic is encrypted and that both the client and server are authenticated. 28 | 29 | - **Identity Service:** Linkerd includes an identity service that issues and renews TLS certificates for the proxies. This service manages the cryptographic identities used for mTLS. 30 | 31 | - **Authorization:** Linkerd’s mTLS also acts as an authorization mechanism, ensuring that only authorized services can communicate with each other. This enhances security by preventing unauthorized access. 32 | 33 | ### **Observability** 34 | 35 | - **Metrics:** Linkerd automatically collects and exposes metrics such as latency, success rates, and request volumes. These metrics are essential for monitoring the health and performance of your services. 36 | 37 | - **Prometheus Integration:** Linkerd integrates seamlessly with Prometheus, allowing you to scrape and visualize metrics. Prometheus can be used to create alerts based on Linkerd’s metrics. 38 | 39 | - **Grafana Dashboards:** Linkerd provides pre-built Grafana dashboards for visualizing metrics. These dashboards offer insights into service performance and help in identifying issues. 40 | 41 | - **Distributed Tracing:** Linkerd supports distributed tracing, allowing you to track requests as they flow through different services. This helps in understanding the service interaction and diagnosing issues. 42 | 43 | ### **Advanced Concepts** 44 | 45 | - **Service Profiles:** Service profiles allow you to define expected behavior for services, such as retries, timeouts, and traffic shaping. They provide fine-grained control over how traffic is handled. 46 | 47 | - **Tap API:** The Tap API provides real-time visibility into live traffic. You can use it to inspect requests and responses, making it a powerful tool for debugging and monitoring. 48 | 49 | - **Traffic Shifting:** Linkerd supports traffic shifting, enabling you to gradually shift traffic from one version of a service to another. This is particularly useful for rolling out updates safely. 50 | 51 | - **Multicluster Support:** Linkerd can extend its service mesh across multiple Kubernetes clusters, allowing you to manage services that span different environments. This is useful for high availability and disaster recovery. 52 | 53 | - **Policy Enforcement:** Linkerd allows you to define policies that control traffic routing, access control, and rate limiting. These policies help ensure that services behave as expected under various conditions. 54 | 55 | ### **Example Use Case** 56 | 57 | Suppose you are managing a microservices application where you need a lightweight service mesh to provide observability and security with minimal overhead: 58 | 59 | 1. **Simplified Deployment:** Deploy Linkerd with minimal configuration and start benefiting from automatic mTLS and load balancing. 60 | 2. **Canary Releases:** Use traffic splitting to gradually route traffic to a new version of a service, reducing the risk of full deployment. 61 | 3. **Real-time Monitoring:** Utilize the Tap API to monitor live traffic and quickly identify any issues with requests. 62 | 4. **Secure Communication:** Rely on Linkerd’s mTLS to secure all service-to-service communication without the need for complex certificate management. 63 | 5. **Cross-Cluster Management:** Extend Linkerd’s service mesh across multiple Kubernetes clusters to ensure high availability and disaster recovery. 64 | -------------------------------------------------------------------------------- /Security/AquaSec.md: -------------------------------------------------------------------------------- 1 | # AquaSec Cheatsheet 2 | 3 | ![text](https://imgur.com/8MBLV6G.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **AquaSec** (Aqua Security) is a comprehensive security platform for securing containers, Kubernetes, and cloud-native applications throughout the CI/CD pipeline. 8 | 9 | **2. Installation:** 10 | 11 | - **Installing AquaSec:** 12 | - AquaSec is usually deployed as a Kubernetes application. 13 | - Download AquaSec from the [Aqua website](https://www.aquasec.com/) and follow the installation instructions for your environment. 14 | 15 | - **Dockerized Installation:** 16 | - AquaSec components can also be installed using Docker images available on Docker Hub. 17 | 18 | **3. Basic Configuration:** 19 | 20 | - **Aqua Console:** 21 | - The Aqua Console is the central management interface for configuring and monitoring AquaSec. 22 | - Access the Aqua Console at `http://:8080`. 23 | 24 | - **User Management:** 25 | - Create users and assign roles in the Aqua Console under the **Users** section. 26 | 27 | **4. Container Security:** 28 | 29 | - **Image Scanning:** 30 | - AquaSec automatically scans container images for vulnerabilities, malware, and misconfigurations. 31 | - Scans can be initiated via the Aqua Console or automated in CI/CD pipelines. 32 | 33 | - **Runtime Protection:** 34 | - AquaSec provides real-time monitoring of running containers, blocking unauthorized activities based on predefined policies. 35 | 36 | **5. Kubernetes Security:** 37 | 38 | - **Kubernetes Admission Control:** 39 | - AquaSec integrates with Kubernetes admission controllers to enforce security policies during the pod creation process. 40 | - Policies can prevent the deployment of vulnerable or misconfigured containers. 41 | 42 | - **Network Segmentation:** 43 | - AquaSec can segment Kubernetes network traffic using microsegmentation to restrict communication between pods. 44 | 45 | **6. Advanced Features:** 46 | 47 | - **Secrets Management:** 48 | - AquaSec integrates with secrets management tools like HashiCorp Vault to secure sensitive data in containers and Kubernetes clusters. 49 | 50 | - **Compliance Auditing:** 51 | - AquaSec provides auditing capabilities to ensure compliance with standards like PCI-DSS, HIPAA, and NIST. 52 | 53 | **7. AquaSec in CI/CD Pipelines:** 54 | 55 | - **Integrating with Jenkins:** 56 | - Use the AquaSec Jenkins plugin to scan images as part of the build process and fail builds that do not meet security criteria. 57 | 58 | - **Automating Policies:** 59 | - Define security policies that are automatically enforced across all stages of the pipeline. 60 | 61 | **8. Monitoring and Reporting:** 62 | 63 | - **Dashboards:** 64 | - AquaSec provides detailed dashboards for monitoring vulnerabilities, policy violations, and runtime security events. 65 | 66 | - **Custom Alerts:** 67 | - Configure alerts for specific security events, such as the detection of high-severity vulnerabilities or unauthorized access attempts. 68 | 69 | **9. Scaling AquaSec:** 70 | 71 | - **High Availability:** 72 | - Deploy AquaSec in a high-availability configuration with multiple Aqua Consoles and databases to ensure resilience. 73 | 74 | - **Integrating with SIEMs:** 75 | - AquaSec integrates with Security Information and Event Management (SIEM) systems like Splunk and IBM QRadar for centralized monitoring. 76 | 77 | **10. Troubleshooting AquaSec:** 78 | 79 | - **Common Issues:** 80 | - **Failed Scans:** Ensure that the Aqua scanner is properly configured and has access to the image registry. 81 | - **Policy Enforcement Issues:** Review policy definitions and ensure they are correctly applied. 82 | 83 | - **Debugging:** 84 | - Check AquaSec logs for detailed error information and troubleshooting steps. 85 | -------------------------------------------------------------------------------- /Security/HashiCorp-Vault.md: -------------------------------------------------------------------------------- 1 | # HashiCorp Vault Cheatsheet 2 | 3 | ![text](https://imgur.com/322q6Pi.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **HashiCorp Vault** is a tool designed to securely store and access secrets. It can manage sensitive data such as passwords, API keys, and certificates. 8 | 9 | **2. Installation:** 10 | 11 | - **Installing Vault:** 12 | - On macOS using Homebrew: 13 | 14 | ```bash 15 | brew install vault 16 | ``` 17 | 18 | - On Linux: 19 | 20 | ```bash 21 | wget https://releases.hashicorp.com/vault/1.9.0/vault_1.9.0_linux_amd64.zip 22 | unzip vault_1.9.0_linux_amd64.zip 23 | sudo mv vault /usr/local/bin/ 24 | ``` 25 | 26 | - On Windows: 27 | - Download the binary from the [official HashiCorp releases](https://www.vaultproject.io/downloads). 28 | 29 | **3. Basic Usage:** 30 | 31 | - **Initializing Vault:** 32 | 33 | ```bash 34 | vault operator init 35 | ``` 36 | 37 | - This command initializes the Vault server, generating unseal keys and a root token. 38 | 39 | - **Unsealing Vault:** 40 | 41 | ```bash 42 | vault operator unseal 43 | vault operator unseal 44 | vault operator unseal 45 | ``` 46 | 47 | - Unseal Vault using the keys provided during initialization. 48 | 49 | - **Storing Secrets:** 50 | 51 | ```bash 52 | vault kv put secret/my-secret password="mypassword" 53 | ``` 54 | 55 | - This command stores a secret in Vault at the path `secret/my-secret`. 56 | 57 | - **Retrieving Secrets:** 58 | 59 | ```bash 60 | vault kv get secret/my-secret 61 | ``` 62 | 63 | - Retrieves the secret stored at `secret/my-secret`. 64 | 65 | **4. Advanced Usage:** 66 | 67 | - **Dynamic Secrets:** 68 | - Vault can generate secrets dynamically, such as database credentials that are created on-demand. 69 | - Example: Generating MySQL credentials: 70 | 71 | ```bash 72 | vault write database/roles/my-role db_name=mydb creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';" default_ttl="1h" max_ttl="24h" 73 | vault read database/creds/my-role 74 | ``` 75 | 76 | - **Secret Engines:** 77 | - Vault supports multiple secret engines like KV, AWS, GCP, and more. 78 | - Enable a secrets engine: 79 | 80 | ```bash 81 | vault secrets enable aws 82 | ``` 83 | 84 | - Configure and use the AWS secrets engine: 85 | 86 | ```bash 87 | vault write aws/config/root access_key= secret_key= 88 | vault write aws/roles/my-role credential_type=iam_user policy_arns=arn:aws:iam::aws:policy/ReadOnlyAccess 89 | ``` 90 | 91 | **5. Authentication Methods:** 92 | 93 | - **Enabling Authentication Methods:** 94 | - Vault supports various authentication methods, including AppRole, LDAP, and AWS. 95 | - Enable an authentication method: 96 | 97 | ```bash 98 | vault auth enable approle 99 | ``` 100 | 101 | - **Configuring AppRole Authentication:** 102 | - Create a role: 103 | 104 | ```bash 105 | vault write auth/approle/role/my-role token_policies="default" token_ttl=1h token_max_ttl=4h 106 | ``` 107 | 108 | - Retrieve the role ID and secret ID: 109 | 110 | ```bash 111 | vault read auth/approle/role/my-role/role-id 112 | vault write -f auth/approle/role/my-role/secret-id 113 | ``` 114 | 115 | **6. Policies and Access Control:** 116 | 117 | - **Creating Policies:** 118 | - Define a policy to control access to secrets: 119 | 120 | ```hcl 121 | path "secret/data/*" { 122 | capabilities = ["create", "read", "update", "delete", "list"] 123 | } 124 | ``` 125 | 126 | - Apply the policy: 127 | 128 | ```bash 129 | vault policy write my-policy my-policy.hcl 130 | ``` 131 | 132 | **7. Vault in Production:** 133 | 134 | - **High Availability (HA):** 135 | - Vault supports HA configurations using storage backends like Consul. 136 | - Example Consul configuration: 137 | 138 | ```bash 139 | storage "consul" { 140 | address = "127.0.0.1:8500" 141 | path = "vault/" 142 | } 143 | ``` 144 | 145 | - **Performance Replication:** 146 | - Vault Enterprise supports performance replication for scaling reads. 147 | 148 | **8. Integrations and Automation:** 149 | 150 | - **Terraform Integration:** 151 | - Use the [Terraform Vault provider](https://registry.terraform.io/providers/hashicorp/vault/latest/docs) to manage Vault resources. 152 | - Example Terraform configuration: 153 | 154 | ```hcl 155 | provider "vault" {} 156 | 157 | resource "vault_generic_secret" "example" { 158 | path = "secret/example" 159 | data_json = < Marketplace** in SonarQube and search for plugins. 74 | - Popular plugins include SonarLint, SonarCSS, and SonarTS. 75 | 76 | - **SonarQube and IDE Integration:** 77 | - **SonarLint** is a plugin that integrates with IDEs like IntelliJ, Eclipse, and VS Code for real-time code quality feedback. 78 | 79 | **6. Advanced Features:** 80 | 81 | - **Code Coverage:** 82 | - SonarQube integrates with code coverage tools like Jacoco for Java and Istanbul for JavaScript to report on test coverage. 83 | 84 | - **Security Vulnerabilities:** 85 | - SonarQube detects vulnerabilities and provides remediation guidance based on OWASP and SANS standards. 86 | 87 | **7. Managing Users and Permissions:** 88 | 89 | - **User Management:** 90 | - Add users and groups in the **Security** section. 91 | - Assign roles such as **Admin**, **User**, or **Code Viewer**. 92 | 93 | - **LDAP/SSO Integration:** 94 | - Configure LDAP or SSO in `sonar.properties` for centralized user authentication. 95 | 96 | **8. Monitoring and Reporting:** 97 | 98 | - **Project Dashboards:** 99 | - SonarQube provides detailed dashboards for each project, showing metrics like code coverage, duplications, and issues over time. 100 | 101 | - **Custom Reports:** 102 | - Generate custom reports with detailed metrics and trends for management or compliance purposes. 103 | 104 | **9. Scaling SonarQube:** 105 | 106 | - **High Availability:** 107 | - Run SonarQube in a cluster mode by configuring multiple nodes and a load balancer. 108 | - Configure the cluster settings in the `sonar.properties` file. 109 | 110 | - **Optimizing Performance:** 111 | - Use a separate database for larger SonarQube deployments and allocate sufficient resources to the server. 112 | 113 | **10. Troubleshooting SonarQube:** 114 | 115 | - **Common Issues:** 116 | - **Out of Memory:** Increase JVM heap size in `sonar.properties`. 117 | - **Failed Scans:** Check the logs in `logs/` directory for detailed error messages. 118 | 119 | - **Debugging:** 120 | - Enable debug logging in `sonar.properties`: 121 | 122 | ```properties 123 | sonar.log.level=DEBUG 124 | ``` 125 | -------------------------------------------------------------------------------- /Security/Trivy.md: -------------------------------------------------------------------------------- 1 | # Trivy Cheatsheet 2 | 3 | ![text](https://imgur.com/TYu7qw7.png) 4 | 5 | **1. Introduction:** 6 | 7 | - **Trivy** is a comprehensive and easy-to-use security scanner for container images, file systems, and Git repositories, detecting vulnerabilities, misconfigurations, and secrets. 8 | 9 | **2. Installation:** 10 | 11 | - **Installing Trivy:** 12 | - On macOS using Homebrew: 13 | 14 | ```bash 15 | brew install aquasecurity/trivy/trivy 16 | ``` 17 | 18 | - On Linux: 19 | 20 | ```bash 21 | sudo apt-get install wget apt-transport-https gnupg lsb-release 22 | wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add - 23 | echo deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main | sudo tee -a /etc/apt/sources.list.d/trivy.list 24 | sudo apt-get update 25 | sudo apt-get install trivy 26 | ``` 27 | 28 | - On Windows: 29 | - Download the binary from the [GitHub releases](https://github.com/aquasecurity/trivy/releases). 30 | 31 | **3. Basic Usage:** 32 | 33 | - **Scanning a Docker Image:** 34 | 35 | ```bash 36 | trivy image nginx:latest 37 | ``` 38 | 39 | - This command scans the `nginx:latest` Docker image for known vulnerabilities. 40 | 41 | - **Scanning a File System:** 42 | 43 | ```bash 44 | trivy fs /path/to/directory 45 | ``` 46 | 47 | - This command scans the specified directory for vulnerabilities and misconfigurations. 48 | 49 | - **Scanning a Git Repository:** 50 | 51 | ```bash 52 | trivy repo https://github.com/user/repository 53 | ``` 54 | 55 | - This command scans the entire GitHub repository for vulnerabilities. 56 | 57 | **4. Scanning Options:** 58 | 59 | - **Severity Levels:** 60 | - Filter results based on severity: 61 | 62 | ```bash 63 | trivy image --severity HIGH,CRITICAL nginx:latest 64 | ``` 65 | 66 | - This command limits the output to high and critical vulnerabilities only. 67 | 68 | - **Ignore Unfixed Vulnerabilities:** 69 | 70 | ```bash 71 | trivy image --ignore-unfixed nginx:latest 72 | ``` 73 | 74 | - Excludes vulnerabilities that have no known fixes. 75 | 76 | - **Output Formats:** 77 | - JSON: 78 | 79 | ```bash 80 | trivy image -f json -o results.json nginx:latest 81 | ``` 82 | 83 | - Table (default): 84 | 85 | ```bash 86 | trivy image -f table nginx:latest 87 | ``` 88 | 89 | **5. Advanced Usage:** 90 | 91 | - **Customizing Vulnerability Database Update:** 92 | 93 | ```bash 94 | trivy image --skip-update nginx:latest 95 | ``` 96 | 97 | - Skips updating the vulnerability database before scanning. 98 | 99 | - **Using Trivy with Docker:** 100 | - Running Trivy as a Docker container: 101 | 102 | ```bash 103 | docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy image nginx:latest 104 | ``` 105 | 106 | - Scanning an image by directly pulling it from a registry: 107 | 108 | ```bash 109 | trivy image --docker-username --docker-password myregistry.com/myimage:tag 110 | ``` 111 | 112 | - **Trivy in CI/CD Pipelines:** 113 | - Integrate Trivy into CI/CD workflows to automate vulnerability scanning during build stages. 114 | 115 | **6. Trivy Misconfiguration Detection:** 116 | 117 | - **Scanning for Misconfigurations:** 118 | 119 | ```bash 120 | trivy config /path/to/configuration/files 121 | ``` 122 | 123 | - Scans configuration files (e.g., Kubernetes, Terraform) for security misconfigurations. 124 | 125 | **7. Trivy and Secrets Detection:** 126 | 127 | - **Scanning for Secrets:** 128 | 129 | ```bash 130 | trivy fs --security-checks secrets /path/to/code 131 | ``` 132 | 133 | - Detects hardcoded secrets like passwords, API keys, and tokens within the codebase. 134 | 135 | **8. Integration with Other Tools:** 136 | 137 | - **Trivy and Harbor:** 138 | - Trivy can be used as a vulnerability scanner within [Harbor](https://goharbor.io/), a cloud-native registry. 139 | 140 | - **Trivy and Kubernetes:** 141 | - Trivy can scan Kubernetes resources for vulnerabilities and misconfigurations. 142 | 143 | **9. Trivy Reports:** 144 | 145 | - **Generating Reports:** 146 | - HTML Report: 147 | 148 | ```bash 149 | trivy image -f json -o report.json nginx:latest 150 | trivy report --input report.json --format html --output report.html 151 | ``` 152 | 153 | - Detailed Reports with Severity Breakdown: 154 | 155 | ```bash 156 | trivy image --severity HIGH,CRITICAL --format table nginx:latest 157 | ``` 158 | 159 | **10. Troubleshooting Trivy:** 160 | 161 | - **Common Issues:** 162 | - **Slow Scans:** Consider skipping database updates if they are not necessary. 163 | - **Network Issues:** Ensure your network allows access to Trivy’s vulnerability database. 164 | 165 | - **Debugging:** 166 | - Use the `--debug` flag to see detailed logs: 167 | 168 | ```bash 169 | trivy image --debug nginx:latest 170 | ``` 171 | -------------------------------------------------------------------------------- /Version-Control/GitHub.md: -------------------------------------------------------------------------------- 1 | # Github Cheatsheet 2 | 3 | ![text](https://imgur.com/c189VXy.png) 4 | 5 | **GitHub** is a powerful platform for version control, collaboration, CI/CD automation, and DevOps workflows. This cheatsheet provides an in-depth guide to using GitHub, covering basic operations to advanced features. 6 | 7 | --- 8 | 9 | ## 1. **Introduction to GitHub** 10 | 11 | ### What is GitHub? 12 | 13 | GitHub is a web-based platform that uses Git for version control and provides tools for: 14 | 15 | - Collaborative software development 16 | - CI/CD automation 17 | - Project management 18 | - Code review and DevOps integration 19 | 20 | ### Key Features 21 | 22 | - **Git Repositories**: Centralized code hosting with Git. 23 | - **Collaboration**: Pull requests, code reviews, and discussions. 24 | - **Actions**: Automate workflows with GitHub Actions. 25 | - **Project Management**: Boards, issues, and milestones for agile workflows. 26 | - **Security**: Dependabot alerts and code scanning for vulnerabilities. 27 | 28 | --- 29 | 30 | ## 2. **Getting Started** 31 | 32 | ### Creating an Account 33 | 34 | 1. Sign up at [GitHub](https://github.com/). 35 | 2. Create or join an organization for team collaboration. 36 | 37 | ### Adding SSH Keys 38 | 39 | 1. Generate an SSH key: 40 | 41 | ```bash 42 | ssh-keygen -t rsa -b 4096 -C "your_email@example.com" 43 | ``` 44 | 45 | 2. Add the key to your GitHub account: 46 | - Go to **Settings** → **SSH and GPG keys** → Add Key. 47 | 48 | ### Creating a Repository 49 | 50 | 1. Go to **Repositories** → **New**. 51 | 2. Configure repository name, description, and visibility. 52 | 3. Add a `.gitignore` file or license if needed. 53 | 54 | --- 55 | 56 | ## 3. **Basic GitHub Operations** 57 | 58 | ### Cloning a Repository 59 | 60 | ```bash 61 | git clone git@github.com:username/repository.git 62 | ``` 63 | 64 | ### Committing and Pushing Changes 65 | 66 | ```bash 67 | # Stage changes 68 | git add . 69 | # Commit changes 70 | git commit -m "Initial commit" 71 | # Push changes 72 | git push origin main 73 | ``` 74 | 75 | ### Pulling Changes 76 | 77 | ```bash 78 | git pull origin main 79 | ``` 80 | 81 | --- 82 | 83 | ## 4. **Branching and Merging** 84 | 85 | ### Creating and Switching Branches 86 | 87 | ```bash 88 | # Create a new branch 89 | git checkout -b feature-branch 90 | # Switch to an existing branch 91 | git checkout main 92 | ``` 93 | 94 | ### Pushing a Branch 95 | 96 | ```bash 97 | git push origin feature-branch 98 | ``` 99 | 100 | ### Merging Branches 101 | 102 | 1. Open a **Pull Request** on GitHub: 103 | - Navigate to the repository → **Pull Requests** → **New Pull Request**. 104 | 2. Review and merge changes. 105 | 106 | ### Deleting a Branch 107 | 108 | ```bash 109 | # Delete locally 110 | git branch -d feature-branch 111 | # Delete on remote 112 | git push origin --delete feature-branch 113 | ``` 114 | 115 | --- 116 | 117 | ## 5. **GitHub Issues and Project Boards** 118 | 119 | ### Creating an Issue 120 | 121 | 1. Go to **Issues** → **New Issue**. 122 | 2. Add title, description, and assign labels or assignees. 123 | 124 | ### Automating Project Boards 125 | 126 | - **Add Issues Automatically**: 127 | 1. Go to the project board. 128 | 2. Set up automation rules like "Add issues in progress." 129 | 130 | ### Linking Pull Requests to Issues 131 | 132 | Use keywords in PR descriptions: 133 | 134 | ```text 135 | Fixes #issue_number 136 | Closes #issue_number 137 | ``` 138 | 139 | --- 140 | 141 | ## 6. **GitHub Actions (CI/CD)** 142 | 143 | GitHub Actions is a workflow automation tool for CI/CD. 144 | 145 | ### Basics of `.github/workflows/.yml` 146 | 147 | #### Example Workflow: 148 | 149 | ```yaml 150 | name: CI Pipeline 151 | 152 | on: 153 | push: 154 | branches: 155 | - main 156 | 157 | jobs: 158 | build: 159 | runs-on: ubuntu-latest 160 | steps: 161 | - name: Checkout Code 162 | uses: actions/checkout@v3 163 | - name: Install Dependencies 164 | run: npm install 165 | - name: Run Tests 166 | run: npm test 167 | ``` 168 | 169 | ### Workflow Triggers 170 | 171 | - **push**: Runs the workflow when a commit is pushed. 172 | - **pull_request**: Triggers on pull requests. 173 | - **schedule**: Triggers on a cron schedule. 174 | 175 | ### Managing Secrets 176 | 177 | 1. Go to **Settings** → **Secrets and variables** → **Actions**. 178 | 2. Add variables like `AWS_ACCESS_KEY_ID` or `DOCKER_PASSWORD`. 179 | 180 | ### Example with Secrets 181 | 182 | ```yaml 183 | jobs: 184 | deploy: 185 | runs-on: ubuntu-latest 186 | steps: 187 | - name: Deploy to AWS 188 | run: aws s3 sync ./build s3://my-bucket 189 | env: 190 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 191 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 192 | ``` 193 | 194 | --- 195 | 196 | ## 7. **GitHub Packages** 197 | 198 | ### Using GitHub as a Docker Registry 199 | 200 | 1. Authenticate: 201 | 202 | ```bash 203 | docker login ghcr.io -u USERNAME -p TOKEN 204 | ``` 205 | 206 | 2. Build and Push: 207 | 208 | ```bash 209 | docker build -t ghcr.io/username/image-name:tag . 210 | docker push ghcr.io/username/image-name:tag 211 | ``` 212 | 213 | ### Installing from GitHub Packages 214 | 215 | - Add dependency in `package.json` (Node.js): 216 | 217 | ```json 218 | "dependencies": { 219 | "package-name": "github:username/repository" 220 | } 221 | ``` 222 | 223 | --- 224 | 225 | ## 8. **Advanced GitHub Features** 226 | 227 | ### Protecting Branches 228 | 229 | 1. Go to **Settings** → **Branches**. 230 | 2. Enable branch protection rules (e.g., prevent force-pushes, require PR reviews). 231 | 232 | ### Code Review Automation 233 | 234 | - Use GitHub Apps like **CodeCov** or **LGTM** for automated code review. 235 | 236 | ### Dependency Management with Dependabot 237 | 238 | 1. Enable Dependabot under **Insights** → **Dependency Graph**. 239 | 2. Dependabot creates pull requests to update outdated dependencies. 240 | 241 | --- 242 | 243 | ## 9. **GitHub Security** 244 | 245 | ### Code Scanning 246 | 247 | 1. Enable **Code Scanning Alerts** under **Security**. 248 | 2. Include scanning actions in workflows: 249 | 250 | ```yaml 251 | - name: CodeQL Analysis 252 | uses: github/codeql-action/analyze@v2 253 | ``` 254 | 255 | ### Secret Scanning 256 | 257 | - GitHub scans public repositories for leaked secrets and alerts the repository owner. 258 | 259 | ### Enabling 2FA 260 | 261 | 1. Go to **Settings** → **Account Security** → Enable Two-Factor Authentication. 262 | 263 | --- 264 | 265 | ## 10. **GitHub CLI (gh)** 266 | 267 | ### Installing GitHub CLI 268 | 269 | ```bash 270 | brew install gh # macOS 271 | sudo apt install gh # Linux 272 | ``` 273 | 274 | ### Authenticating 275 | 276 | ```bash 277 | gh auth login 278 | ``` 279 | 280 | ### Common Commands 281 | 282 | - Clone a Repository: 283 | 284 | ```bash 285 | gh repo clone username/repository 286 | ``` 287 | 288 | - Create a Pull Request: 289 | 290 | ```bash 291 | gh pr create --title "Feature Update" --body "Details of PR" 292 | ``` 293 | 294 | - List Issues: 295 | 296 | ```bash 297 | gh issue list 298 | ``` 299 | 300 | --- 301 | 302 | ## 11. **GitHub API** 303 | 304 | ### Using the API 305 | 306 | Authenticate using a personal access token: 307 | 308 | ```bash 309 | curl -H "Authorization: token YOUR_TOKEN" https://api.github.com/user/repos 310 | ``` 311 | 312 | ### Example: Creating an Issue 313 | 314 | ```bash 315 | curl -X POST -H "Authorization: token YOUR_TOKEN" \ 316 | -H "Content-Type: application/json" \ 317 | -d '{"title": "Bug Report", "body": "Description of the bug"}' \ 318 | https://api.github.com/repos/username/repository/issues 319 | ``` 320 | 321 | --- 322 | 323 | ## 12. **GitHub Best Practices** 324 | 325 | - **Use Descriptive Commit Messages**: 326 | 327 | ```text 328 | Fix bug in login page #123 329 | ``` 330 | 331 | - **Enable Branch Protections** to enforce review processes. 332 | - **Automate Testing** using GitHub Actions for pull requests. 333 | - **Use Issues and Labels** for effective project tracking. 334 | 335 | --- 336 | 337 | ## References and Resources 338 | 339 | 1. [GitHub Documentation](https://docs.github.com/) 340 | 2. [GitHub CLI Documentation](https://cli.github.com/manual/) 341 | 3. [GitHub Actions Guide](https://docs.github.com/en/actions) 342 | -------------------------------------------------------------------------------- /next-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | 4 | // NOTE: This file should not be edited 5 | // see https://nextjs.org/docs/app/building-your-application/configuring/typescript for more information. 6 | -------------------------------------------------------------------------------- /next.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | // Optimize bundle size 4 | compiler: { 5 | removeConsole: process.env.NODE_ENV === "production", 6 | reactRemoveProperties: process.env.NODE_ENV === "production", 7 | swcMinify: true, 8 | }, 9 | // Optimize images 10 | images: { 11 | formats: ["image/avif", "image/webp"], 12 | remotePatterns: [ 13 | { 14 | protocol: "https", 15 | hostname: "**", 16 | }, 17 | ], 18 | deviceSizes: [640, 750, 828, 1080, 1200, 1920, 2048], 19 | imageSizes: [16, 32, 48, 64, 96, 128, 256], 20 | }, 21 | webpack: (config, { dev, isServer }) => { 22 | // Handle markdown files 23 | config.module.rules.push({ 24 | test: /\.md$/, 25 | use: "raw-loader", 26 | }); 27 | 28 | // Handle browser polyfills 29 | if (!isServer) { 30 | config.resolve.fallback = { 31 | fs: false, 32 | path: false, 33 | }; 34 | } 35 | 36 | // Production optimizations 37 | if (!dev) { 38 | config.optimization = { 39 | ...config.optimization, 40 | minimize: true, 41 | moduleIds: "deterministic", 42 | chunkIds: "deterministic", 43 | runtimeChunk: { name: "runtime" }, 44 | splitChunks: { 45 | chunks: "all", 46 | cacheGroups: { 47 | vendor: { 48 | name: "vendor", 49 | test: /[\\/]node_modules[\\/]/, 50 | chunks: "all", 51 | priority: 10, 52 | }, 53 | commons: { 54 | name: "commons", 55 | minChunks: 2, 56 | priority: 5, 57 | reuseExistingChunk: true, 58 | }, 59 | }, 60 | }, 61 | }; 62 | 63 | // Exclude large dependencies from server bundles 64 | if (isServer) { 65 | config.externals = [ 66 | ...(config.externals || []), 67 | "sharp", 68 | "gray-matter", 69 | "react-syntax-highlighter", 70 | ]; 71 | } 72 | } 73 | 74 | return config; 75 | }, 76 | // Disable unnecessary features 77 | productionBrowserSourceMaps: false, 78 | optimizeFonts: true, 79 | poweredByHeader: false, 80 | // Enable static compression 81 | compress: true, 82 | // Enhance speed with incremental static regeneration 83 | staticPageGenerationTimeout: 120, 84 | // Reduce bundle size 85 | modularizeImports: { 86 | "lucide-react": { 87 | transform: "lucide-react/dist/esm/icons/{{member}}", 88 | preventFullImport: true, 89 | skipDefaultConversion: true, 90 | }, 91 | "@heroicons/react/24/outline": { 92 | transform: "@heroicons/react/24/outline/{{member}}", 93 | preventFullImport: true, 94 | }, 95 | "react-icons/fa": { 96 | transform: "react-icons/fa/{{member}}", 97 | preventFullImport: true, 98 | }, 99 | "react-icons/hi": { 100 | transform: "react-icons/hi/{{member}}", 101 | preventFullImport: true, 102 | }, 103 | }, 104 | // Output file tracing for serverless 105 | experimental: { 106 | outputFileTracingExcludes: { 107 | "*": [ 108 | "node_modules/@swc/core-linux-x64-gnu", 109 | "node_modules/@swc/core-linux-x64-musl", 110 | "node_modules/esbuild-linux-64/bin", 111 | ], 112 | }, 113 | optimizeCss: true, 114 | scrollRestoration: true, 115 | optimizePackageImports: ["react-icons", "lucide-react", "framer-motion"], 116 | }, 117 | }; 118 | 119 | export default nextConfig; 120 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "devops-cheatsheet", 3 | "version": "0.1.0", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "dev": "next dev", 8 | "prebuild": "node scripts/process-markdown.mjs", 9 | "build": "next build", 10 | "start": "next start", 11 | "lint": "next lint" 12 | }, 13 | "dependencies": { 14 | "@tailwindcss/typography": "^0.5.16", 15 | "framer-motion": "^12.23.9", 16 | "gray-matter": "^4.0.3", 17 | "isomorphic-dompurify": "^2.24.0", 18 | "lucide-react": "^0.510.0", 19 | "marked": "^15.0.11", 20 | "next": "^14.1.0", 21 | "react": "^18.3.1", 22 | "react-dom": "^18.3.1", 23 | "react-hot-toast": "^2.5.2", 24 | "react-icons": "^5.5.0", 25 | "react-markdown": "^10.1.0", 26 | "react-syntax-highlighter": "^5.8.0", 27 | "remark-gfm": "^4.0.1" 28 | }, 29 | "devDependencies": { 30 | "@types/node": "^20.11.0", 31 | "@types/react": "^18.2.0", 32 | "@types/react-dom": "^18.2.0", 33 | "autoprefixer": "^10.4.17", 34 | "critters": "^0.0.23", 35 | "eslint": "8.57.1", 36 | "eslint-config-next": "15.4.6", 37 | "postcss": "^8.4.35", 38 | "tailwindcss": "^3.4.1", 39 | "typescript": "^5.3.3" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | } -------------------------------------------------------------------------------- /public/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/android-chrome-192x192.png -------------------------------------------------------------------------------- /public/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/android-chrome-512x512.png -------------------------------------------------------------------------------- /public/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/apple-touch-icon.png -------------------------------------------------------------------------------- /public/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/favicon-16x16.png -------------------------------------------------------------------------------- /public/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/favicon-32x32.png -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/favicon.ico -------------------------------------------------------------------------------- /public/grid-pattern.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /public/icons/README.md: -------------------------------------------------------------------------------- 1 | # Tool Icons Directory 2 | 3 | This directory contains SVG icons for various DevOps tools used in the cheatsheet. 4 | 5 | ## Required Icons 6 | 7 | Please add the following SVG icons to this directory: 8 | 9 | - github-actions.svg 10 | - gitlab-ci.svg 11 | - circleci.svg 12 | - jenkins.svg 13 | - kubernetes.svg 14 | - docker.svg 15 | - terraform.svg 16 | - ansible.svg 17 | 18 | ## Icon Guidelines 19 | 20 | 1. Use official brand SVGs when possible 21 | 2. Icons should be simple, monochrome SVGs 22 | 3. Preferred size: 24x24 or 48x48 pixels 23 | 4. Use white (#FFFFFF) as the primary color for good contrast 24 | 5. Remove any background elements 25 | 6. Optimize SVGs using tools like SVGO 26 | 27 | ## Sources for Official Logos 28 | 29 | - GitHub Actions: https://github.com/logos 30 | - GitLab: https://about.gitlab.com/press/press-kit/ 31 | - CircleCI: https://circleci.com/press/ 32 | - Jenkins: https://www.jenkins.io/artwork/ 33 | - Kubernetes: https://github.com/kubernetes/community/tree/master/icons 34 | - Docker: https://www.docker.com/company/newsroom/media-resources/ 35 | - Terraform: https://www.hashicorp.com/brand 36 | - Ansible: https://www.ansible.com/logos -------------------------------------------------------------------------------- /public/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NotHarshhaa/devops-cheatsheet/b4c1a7361f9f167685ca6968ebdbae3b3ef5bfd4/public/logo.png -------------------------------------------------------------------------------- /public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DevOps Cheatsheet Hub", 3 | "short_name": "DevOps Hub", 4 | "description": "A comprehensive collection of DevOps tools and practices", 5 | "start_url": "/", 6 | "display": "standalone", 7 | "background_color": "#ffffff", 8 | "theme_color": "#3b82f6", 9 | "icons": [ 10 | { 11 | "src": "/android-icon.png", 12 | "sizes": "192x192", 13 | "type": "image/png" 14 | }, 15 | { 16 | "src": "/logo.png", 17 | "sizes": "512x512", 18 | "type": "image/png" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /public/placeholder.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | Image Loading 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /public/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Allow: / 3 | 4 | Sitemap: https://devops-cheatsheet.com/sitemap.xml -------------------------------------------------------------------------------- /public/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DevOps Cheatsheet Hub", 3 | "short_name": "DevOps Hub", 4 | "description": "A comprehensive collection of DevOps tools and practices", 5 | "start_url": "/", 6 | "display": "standalone", 7 | "background_color": "#ffffff", 8 | "theme_color": "#3b82f6", 9 | "icons": [ 10 | { 11 | "src": "/android-chrome-192x192.png", 12 | "sizes": "192x192", 13 | "type": "image/png" 14 | }, 15 | { 16 | "src": "/android-chrome-512x512.png", 17 | "sizes": "512x512", 18 | "type": "image/png" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /public/sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://devops-cheatsheet.com/ 5 | daily 6 | 1.0 7 | 8 | 9 | https://devops-cheatsheet.com/categories 10 | weekly 11 | 0.8 12 | 13 | 14 | https://devops-cheatsheet.com/about 15 | monthly 16 | 0.5 17 | 18 | -------------------------------------------------------------------------------- /scripts/categories.js: -------------------------------------------------------------------------------- 1 | export const categories = [ 2 | 'CI-CD', 3 | 'Containerization', 4 | 'Cloud', 5 | 'Infrastructure-Management', 6 | 'Version-Control', 7 | 'Security', 8 | 'Networking', 9 | 'Monitoring', 10 | ]; -------------------------------------------------------------------------------- /scripts/categoryData.js: -------------------------------------------------------------------------------- 1 | export const categories = [ 2 | 'CI-CD', 3 | 'Containerization', 4 | 'Cloud', 5 | 'Infrastructure-Management', 6 | 'Version-Control', 7 | 'Security', 8 | 'Networking', 9 | 'Monitoring', 10 | ]; -------------------------------------------------------------------------------- /scripts/process-markdown.mjs: -------------------------------------------------------------------------------- 1 | import fs from 'fs/promises'; 2 | import path from 'path'; 3 | import matter from 'gray-matter'; 4 | import { categories } from './categoryData.js'; 5 | 6 | async function processMarkdownFiles() { 7 | const output = { 8 | metadata: {}, 9 | content: {} 10 | }; 11 | 12 | for (const category of categories) { 13 | try { 14 | const categoryPath = path.join(process.cwd(), category); 15 | const files = await fs.readdir(categoryPath); 16 | 17 | for (const file of files) { 18 | if (file.endsWith('.md')) { 19 | const filePath = path.join(categoryPath, file); 20 | const fileContent = await fs.readFile(filePath, 'utf-8'); 21 | const { data, content } = matter(fileContent); 22 | const slug = file.replace('.md', ''); 23 | 24 | // Store metadata and content separately 25 | output.metadata[`${category}/${slug}`] = { 26 | title: data.title || slug, 27 | description: data.description || '', 28 | category: category, 29 | icon: data.icon || '📄', 30 | slug: slug, 31 | difficulty: data.difficulty || 'Beginner', 32 | popularity: data.popularity || 0, 33 | tags: data.tags || [], 34 | updatedAt: data.updatedAt || new Date().toISOString() 35 | }; 36 | 37 | output.content[`${category}/${slug}`] = content; 38 | } 39 | } 40 | } catch (error) { 41 | console.error(`Error processing category ${category}:`, error); 42 | } 43 | } 44 | 45 | // Create the public directory if it doesn't exist 46 | const publicDir = path.join(process.cwd(), 'public', 'static'); 47 | await fs.mkdir(publicDir, { recursive: true }); 48 | 49 | // Write the processed data 50 | await fs.writeFile( 51 | path.join(publicDir, 'cheatsheets-metadata.json'), 52 | JSON.stringify(output.metadata, null, 2) 53 | ); 54 | await fs.writeFile( 55 | path.join(publicDir, 'cheatsheets-content.json'), 56 | JSON.stringify(output.content, null, 2) 57 | ); 58 | 59 | console.log('Markdown files processed successfully!'); 60 | } 61 | 62 | processMarkdownFiles().catch(console.error); -------------------------------------------------------------------------------- /src/app/[category]/[slug]/CheatsheetPageClient.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | 4 | import { CheatsheetTemplate } from '@/components/CheatsheetTemplate'; 5 | import { marked } from 'marked'; 6 | import { type Cheatsheet } from '@/data/cheatsheets'; 7 | import { useEffect, useState } from 'react'; 8 | import { type Category } from '@/utils/categoryData'; 9 | import { AlertTriangle } from 'lucide-react'; 10 | import Link from 'next/link'; 11 | 12 | interface Props { 13 | params: { 14 | category: string; 15 | slug: string; 16 | }; 17 | } 18 | 19 | type CheatsheetContent = { 20 | title: string; 21 | category: Category; 22 | icon: string; 23 | description: string; 24 | difficulty: Cheatsheet['difficulty']; 25 | popularity: number; 26 | parsedContent: string; 27 | }; 28 | 29 | export function CheatsheetPageClient({ params }: Props) { 30 | const [content, setContent] = useState(null); 31 | const [loading, setLoading] = useState(true); 32 | const [error, setError] = useState(null); 33 | 34 | useEffect(() => { 35 | const fetchContent = async () => { 36 | try { 37 | const protocol = process.env.NODE_ENV === 'development' ? 'http' : 'https'; 38 | const host = typeof window === 'undefined' ? 'localhost:3000' : window.location.host; 39 | const baseUrl = `${protocol}://${host}`; 40 | 41 | const response = await fetch(`${baseUrl}/api/cheatsheets/${params.category}/${params.slug}`); 42 | 43 | if (!response.ok) { 44 | if (response.status === 404) { 45 | throw new Error('Cheatsheet not found'); 46 | } 47 | throw new Error('Failed to fetch cheatsheet'); 48 | } 49 | 50 | const cheatsheet = await response.json(); 51 | 52 | if (!cheatsheet || !cheatsheet.content) { 53 | throw new Error('Invalid cheatsheet data'); 54 | } 55 | 56 | const parsedContent = await marked(cheatsheet.content); 57 | 58 | setContent({ 59 | title: cheatsheet.title, 60 | category: cheatsheet.category as Category, 61 | icon: cheatsheet.icon, 62 | description: cheatsheet.description, 63 | difficulty: cheatsheet.difficulty, 64 | popularity: cheatsheet.popularity, 65 | parsedContent, 66 | }); 67 | } catch (error) { 68 | console.error('Error fetching cheatsheet:', error); 69 | setError(error instanceof Error ? error.message : 'An unexpected error occurred'); 70 | } finally { 71 | setLoading(false); 72 | } 73 | }; 74 | 75 | fetchContent(); 76 | }, [params.category, params.slug]); 77 | 78 | if (loading) { 79 | return ( 80 |
81 |
82 |
83 |
84 |
85 | {[1, 2, 3, 4, 5].map((i) => ( 86 |
87 | ))} 88 |
89 |
90 |
91 | ); 92 | } 93 | 94 | if (error) { 95 | return ( 96 |
97 |
98 | 99 |

100 | {error === 'Cheatsheet not found' ? 'Cheatsheet not found!' : 'Something went wrong!'} 101 |

102 |

103 | {error === 'Cheatsheet not found' 104 | ? "We couldn't find the cheatsheet you're looking for. It might have been moved or deleted." 105 | : "We couldn't load the cheatsheet you requested. Please try again later."} 106 |

107 |
108 | 114 | 118 | Go back home 119 | 120 |
121 |
122 |
123 | ); 124 | } 125 | 126 | if (!content) { 127 | return null; 128 | } 129 | 130 | const sections = [ 131 | { 132 | id: 'content', 133 | title: 'Content', 134 | type: 'text' as const, 135 | content: [ 136 | { 137 | type: 'text' as const, 138 | value: content.parsedContent 139 | } 140 | ] 141 | } 142 | ]; 143 | 144 | return ( 145 | 155 | ); 156 | } -------------------------------------------------------------------------------- /src/app/[category]/[slug]/error.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { useEffect } from 'react'; 4 | import Link from 'next/link'; 5 | import { AlertTriangle } from 'lucide-react'; 6 | 7 | export default function Error({ 8 | error, 9 | reset, 10 | }: { 11 | error: Error & { digest?: string }; 12 | reset: () => void; 13 | }) { 14 | useEffect(() => { 15 | console.error(error); 16 | }, [error]); 17 | 18 | return ( 19 |
20 |
21 | 22 |

23 | Something went wrong! 24 |

25 |

26 | We couldn't load the cheatsheet you requested. Please try again later. 27 |

28 |
29 | 35 | 39 | Go back home 40 | 41 |
42 |
43 |
44 | ); 45 | } -------------------------------------------------------------------------------- /src/app/[category]/[slug]/loading.tsx: -------------------------------------------------------------------------------- 1 | export default function Loading() { 2 | return ( 3 |
4 |
5 |
6 |
7 | 8 |
9 |
10 |
11 |
12 | 13 |
14 | 15 |
16 |
17 |
18 |
19 |
20 |
21 | ); 22 | } -------------------------------------------------------------------------------- /src/app/[category]/[slug]/page.tsx: -------------------------------------------------------------------------------- 1 | import { Metadata } from 'next'; 2 | import { getCheatsheetBySlug } from '@/utils/markdown'; 3 | import { categoryData, type Category } from '@/utils/categoryData'; 4 | import { getAllCheatsheets } from '@/data/cheatsheets'; 5 | import { CheatsheetPageClient } from './CheatsheetPageClient'; 6 | 7 | interface Props { 8 | params: { 9 | category: string; 10 | slug: string; 11 | }; 12 | } 13 | 14 | export async function generateMetadata({ params }: Props): Promise { 15 | const cheatsheet = await getCheatsheetBySlug(params.category as Category, params.slug); 16 | if (!cheatsheet) return {}; 17 | 18 | return { 19 | title: `${cheatsheet.title} | DevOps Cheatsheet`, 20 | description: cheatsheet.description, 21 | }; 22 | } 23 | 24 | export async function generateStaticParams() { 25 | // Get all categories 26 | const categories = Object.keys(categoryData) as Category[]; 27 | 28 | // For each category, get all cheatsheets 29 | const params = await Promise.all( 30 | categories.map(async (category) => { 31 | const cheatsheets = await getAllCheatsheets(category); 32 | // Map each cheatsheet to its params 33 | return cheatsheets.map((cheatsheet) => ({ 34 | category: category, 35 | slug: cheatsheet.slug, 36 | })); 37 | }) 38 | ); 39 | 40 | // Flatten the array of arrays into a single array of params and add the 404 route 41 | return [ 42 | ...params.flat(), 43 | { 44 | category: '404', 45 | slug: '404', 46 | }, 47 | ]; 48 | } 49 | 50 | export default function CheatsheetPage({ params }: Props) { 51 | // If this is the 404 route, return null to trigger the not-found page 52 | if (params.category === '404' || params.slug === '404') { 53 | return null; 54 | } 55 | 56 | // Cast the category to Category type since we know it's valid from generateStaticParams 57 | return ; 58 | } -------------------------------------------------------------------------------- /src/app/[category]/loading.tsx: -------------------------------------------------------------------------------- 1 | export default function Loading() { 2 | return ( 3 |
4 |
5 | {/* Category Header Skeleton */} 6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | 16 | {/* Tools Grid Skeleton */} 17 |
18 | {[...Array(6)].map((_, i) => ( 19 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 | ))} 37 |
38 |
39 |
40 | ); 41 | } -------------------------------------------------------------------------------- /src/app/[category]/not-found.tsx: -------------------------------------------------------------------------------- 1 | import Link from 'next/link'; 2 | 3 | export default function NotFound() { 4 | return ( 5 |
6 |
7 |
8 |

9 | Category Not Found 10 |

11 |

12 | Sorry, we couldn't find the category you're looking for. 13 |

14 |
15 | 19 | Go back home 20 | 26 | 32 | 33 | 34 |
35 |
36 |
37 |
38 | ); 39 | } -------------------------------------------------------------------------------- /src/app/[category]/page.tsx: -------------------------------------------------------------------------------- 1 | import { CategoryPageClient } from './CategoryPageClient'; 2 | import { categories } from "@/utils/categoryData"; 3 | 4 | interface Props { 5 | params: { 6 | category: string; 7 | }; 8 | } 9 | 10 | export function generateStaticParams() { 11 | return categories.map((category) => ({ 12 | category: category, 13 | })); 14 | } 15 | 16 | export default function CategoryPage({ params }: Props) { 17 | // If the category is 404, show the NotFound component 18 | if (params.category === '404') { 19 | return null; // This will trigger the not-found.tsx page 20 | } 21 | return ; 22 | } -------------------------------------------------------------------------------- /src/app/api/cheatsheets/[category]/[slug]/route.ts: -------------------------------------------------------------------------------- 1 | import { getCheatsheetBySlug } from '@/utils/markdown'; 2 | import { NextRequest, NextResponse } from 'next/server'; 3 | import { categoryData } from '@/utils/categoryData'; 4 | import { getAllCheatsheets } from '@/data/cheatsheets'; 5 | 6 | export async function generateStaticParams() { 7 | // Get all categories 8 | const categories = Object.keys(categoryData); 9 | 10 | // For each category, get all cheatsheets 11 | const params = await Promise.all( 12 | categories.map(async (category) => { 13 | const cheatsheets = await getAllCheatsheets(category); 14 | // Map each cheatsheet to its params 15 | return cheatsheets.map((cheatsheet) => ({ 16 | category: category, 17 | slug: cheatsheet.slug, 18 | })); 19 | }) 20 | ); 21 | 22 | // Flatten the array of arrays into a single array of params 23 | return params.flat(); 24 | } 25 | 26 | export async function GET( 27 | request: NextRequest, 28 | { params }: { params: { category: string; slug: string } } 29 | ) { 30 | try { 31 | const { category, slug } = params; 32 | const cheatsheet = await getCheatsheetBySlug(category, slug); 33 | 34 | if (!cheatsheet) { 35 | return NextResponse.json( 36 | { error: 'Cheatsheet not found' }, 37 | { status: 404 } 38 | ); 39 | } 40 | 41 | return NextResponse.json(cheatsheet); 42 | } catch (error) { 43 | console.error('Error fetching cheatsheet:', error); 44 | return NextResponse.json( 45 | { error: 'Failed to fetch cheatsheet' }, 46 | { status: 500 } 47 | ); 48 | } 49 | } -------------------------------------------------------------------------------- /src/app/api/cheatsheets/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import { cheatsheets } from '@/data/cheatsheets'; 3 | import { categoryData } from '@/utils/categoryData'; 4 | 5 | // For static export, we need to generate all possible combinations of query parameters 6 | export async function generateStaticParams() { 7 | const categories = ['', ...Object.keys(categoryData)]; 8 | const limits = [10, 20, 50, 100, 1000]; // Common page size limits 9 | const pages = [1, 2, 3, 4, 5]; // First 5 pages 10 | 11 | const params = []; 12 | for (const category of categories) { 13 | for (const limit of limits) { 14 | for (const page of pages) { 15 | params.push({ 16 | searchParams: new URLSearchParams({ 17 | ...(category && { category }), 18 | limit: limit.toString(), 19 | page: page.toString(), 20 | }).toString(), 21 | }); 22 | } 23 | } 24 | } 25 | 26 | return params; 27 | } 28 | 29 | export async function GET(request: Request) { 30 | const { searchParams } = new URL(request.url); 31 | const limit = parseInt(searchParams.get('limit') || '10'); 32 | const page = parseInt(searchParams.get('page') || '1'); 33 | const query = searchParams.get('q') || ''; 34 | const category = searchParams.get('category') || ''; 35 | 36 | let filteredCheatsheets = [...cheatsheets]; 37 | 38 | // Apply category filter if category exists 39 | if (category) { 40 | filteredCheatsheets = filteredCheatsheets.filter(cheatsheet => 41 | cheatsheet.category === category 42 | ); 43 | } 44 | 45 | // Apply search filter if query exists 46 | if (query) { 47 | filteredCheatsheets = filteredCheatsheets.filter(cheatsheet => 48 | cheatsheet.title.toLowerCase().includes(query.toLowerCase()) || 49 | cheatsheet.description.toLowerCase().includes(query.toLowerCase()) || 50 | cheatsheet.tags.some(tag => tag.toLowerCase().includes(query.toLowerCase())) 51 | ); 52 | } 53 | 54 | // Calculate pagination 55 | const start = (page - 1) * limit; 56 | const end = start + limit; 57 | const paginatedCheatsheets = filteredCheatsheets.slice(start, end); 58 | 59 | return NextResponse.json({ 60 | cheatsheets: paginatedCheatsheets, 61 | total: filteredCheatsheets.length, 62 | page, 63 | limit, 64 | totalPages: Math.ceil(filteredCheatsheets.length / limit) 65 | }); 66 | } -------------------------------------------------------------------------------- /src/app/categories/page.tsx: -------------------------------------------------------------------------------- 1 | import { Tag, ArrowDown } from "lucide-react"; 2 | import { categories, categoryData } from "@/utils/categoryData"; 3 | import { CategoryCard } from "@/components/CategoryCard"; 4 | import { cheatsheets } from "@/data/cheatsheets"; 5 | 6 | export default function CategoriesPage() { 7 | const categoriesMap = cheatsheets.reduce( 8 | (acc, cheatsheet) => { 9 | if (!acc[cheatsheet.category]) { 10 | acc[cheatsheet.category] = []; 11 | } 12 | acc[cheatsheet.category].push(cheatsheet); 13 | return acc; 14 | }, 15 | {} as Record, 16 | ); 17 | 18 | const totalTools = Object.values(categoriesMap).reduce( 19 | (acc, curr) => acc + curr.length, 20 | 0, 21 | ); 22 | 23 | return ( 24 |
25 | {/* Hero Section with Background */} 26 |
27 | {/* Decorative Elements */} 28 |
29 |
30 |
31 | 32 | {/* Grid Pattern */} 33 |
34 |
35 | 36 |
37 |
38 |
39 |
40 |
41 | 42 | 43 | Explore & Discover 44 | 45 |
46 | 47 |

48 | DevOps Categories 49 |

50 |

51 | Explore our comprehensive collection of DevOps tools and 52 | resources, organized by category to supercharge your workflow. 53 |

54 |
55 | 56 |
57 |
58 |
59 |
60 | {totalTools}+ 61 |
62 |
Tools & Resources
63 |
64 | {categories.length} Categories 65 |
66 |
67 |
68 |
69 |
70 | 71 |
72 | 73 |
74 |
75 | 76 | {/* Wave divider */} 77 |
78 | 83 | 84 | 85 |
86 |
87 |
88 | 89 | {/* Categories Grid */} 90 |
91 |
92 | {categories.map((category) => { 93 | const info = categoryData[category]; 94 | return ( 95 |
99 | 105 |
106 | ); 107 | })} 108 |
109 |
110 | 111 | {/* Quick Stats */} 112 |
113 |
114 |
115 |
116 |
117 | {totalTools}+ 118 |
119 |
Total Tools
120 |
121 | Curated resources for DevOps professionals 122 |
123 |
124 | 125 |
126 |
127 | {categories.length} 128 |
129 |
Categories
130 |
131 | Organized for easy discovery 132 |
133 |
134 | 135 |
136 |
24/7
137 |
Community Support
138 |
139 | Always here to help you succeed 140 |
141 |
142 |
143 |
144 |
145 |
146 | ); 147 | } 148 | -------------------------------------------------------------------------------- /src/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | @layer base { 6 | :root { 7 | --background: 0 0% 100%; 8 | --foreground: 222.2 84% 4.9%; 9 | --toast-bg: #ffffff; 10 | --toast-color: #111827; 11 | --animation-duration: 1; 12 | } 13 | 14 | .dark { 15 | --toast-bg: #1f2937; 16 | --toast-color: #f9fafb; 17 | --animation-duration: 0.8; 18 | } 19 | 20 | body { 21 | @apply bg-white text-gray-900 dark:bg-black dark:text-gray-100 overflow-x-hidden; 22 | -webkit-font-smoothing: antialiased; 23 | -moz-osx-font-smoothing: grayscale; 24 | } 25 | 26 | /* Improve touch target size on mobile */ 27 | a, 28 | button { 29 | @apply touch-callout-none; 30 | } 31 | 32 | /* Optimize animations for reduced motion preferences */ 33 | @media (prefers-reduced-motion: reduce) { 34 | :root { 35 | --animation-duration: 0; 36 | } 37 | 38 | *, 39 | ::before, 40 | ::after { 41 | animation-duration: 0.01ms !important; 42 | animation-iteration-count: 1 !important; 43 | transition-duration: 0.01ms !important; 44 | scroll-behavior: auto !important; 45 | } 46 | } 47 | } 48 | 49 | /* Custom animations */ 50 | @layer utilities { 51 | .animate-reverse { 52 | animation-direction: reverse; 53 | } 54 | 55 | .animate-delay-150 { 56 | animation-delay: 150ms; 57 | } 58 | 59 | /* Hardware acceleration for animations */ 60 | .backface-hidden { 61 | backface-visibility: hidden; 62 | transform: translateZ(0); 63 | } 64 | 65 | /* Optimize scrolling */ 66 | .scroll-optimize { 67 | -webkit-overflow-scrolling: touch; 68 | scroll-behavior: smooth; 69 | } 70 | 71 | /* Mobile-specific optimizations */ 72 | @media (max-width: 640px) { 73 | .mobile-optimize { 74 | will-change: transform; 75 | } 76 | 77 | /* Prevent text size adjustment on orientation change */ 78 | html, 79 | body { 80 | -webkit-text-size-adjust: 100%; 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata, Viewport } from "next"; 2 | import { Inter } from "next/font/google"; 3 | import "./globals.css"; 4 | import { Header } from "@/components/Header"; 5 | import { Footer } from "@/components/Footer"; 6 | import { ScrollToTop } from "@/components/ScrollToTop"; 7 | import { GlobalLoader } from "@/components/GlobalLoader"; 8 | import { Toaster } from "react-hot-toast"; 9 | import { ThemeProvider } from "@/context/ThemeContext"; 10 | 11 | const inter = Inter({ 12 | subsets: ["latin"], 13 | display: "swap", 14 | preload: true, 15 | fallback: ["system-ui", "sans-serif"], 16 | }); 17 | 18 | // Force static generation 19 | export const dynamic = "force-static"; 20 | export const revalidate = false; 21 | 22 | export const viewport: Viewport = { 23 | width: "device-width", 24 | initialScale: 1, 25 | maximumScale: 5, 26 | userScalable: true, 27 | themeColor: [ 28 | { media: "(prefers-color-scheme: light)", color: "#ffffff" }, 29 | { media: "(prefers-color-scheme: dark)", color: "#000000" }, 30 | ], 31 | }; 32 | 33 | export const metadata: Metadata = { 34 | title: "DevOps Cheatsheet Hub", 35 | description: "A comprehensive collection of DevOps tools and practices", 36 | icons: { 37 | // Favicons 38 | icon: [ 39 | { url: "/favicon.ico" }, 40 | { url: "/favicon-16x16.png", sizes: "16x16", type: "image/png" }, 41 | { url: "/favicon-32x32.png", sizes: "32x32", type: "image/png" }, 42 | ], 43 | // Apple Touch Icon 44 | apple: [ 45 | { url: "/apple-touch-icon.png", sizes: "180x180", type: "image/png" }, 46 | ], 47 | // Android Chrome Icons 48 | other: [ 49 | { 50 | url: "/android-chrome-192x192.png", 51 | sizes: "192x192", 52 | type: "image/png", 53 | }, 54 | { 55 | url: "/android-chrome-512x512.png", 56 | sizes: "512x512", 57 | type: "image/png", 58 | }, 59 | ], 60 | }, 61 | manifest: "/site.webmanifest", 62 | // Open Graph metadata 63 | openGraph: { 64 | title: "DevOps Cheatsheet Hub", 65 | description: "A comprehensive collection of DevOps tools and practices", 66 | url: "https://devops-cheatsheet.com", 67 | siteName: "DevOps Cheatsheet Hub", 68 | images: [ 69 | { 70 | url: "/og-image.png", 71 | width: 1200, 72 | height: 630, 73 | }, 74 | ], 75 | locale: "en_US", 76 | type: "website", 77 | }, 78 | // Twitter metadata 79 | twitter: { 80 | card: "summary_large_image", 81 | title: "DevOps Cheatsheet Hub", 82 | description: "A comprehensive collection of DevOps tools and practices", 83 | images: ["/twitter-image.png"], 84 | }, 85 | }; 86 | 87 | export default function RootLayout({ 88 | children, 89 | }: { 90 | children: React.ReactNode; 91 | }) { 92 | return ( 93 | 94 | 95 | 98 | 99 | 100 |
101 |
{children}
102 |