├── .gitignore ├── LICENSE ├── README.md ├── book_store ├── cadastro_de_livros │ ├── Dockerfile │ ├── README.md │ ├── app │ │ ├── __init__.py │ │ ├── databases.py │ │ ├── main.py │ │ └── models.py │ └── requirements.txt ├── ordem_de_compra │ ├── Dockerfile │ ├── README.md │ ├── app │ │ ├── __init__.py │ │ ├── databases.py │ │ ├── main.py │ │ └── models.py │ └── requirements.txt └── pagamento │ ├── Dockerfile │ ├── README.md │ ├── app │ ├── __init__.py │ ├── databases.py │ ├── main.py │ └── models.py │ └── requirements.txt ├── config ├── collector │ └── otelcol-config.yml └── grafana │ ├── dashboard.yml │ ├── dashboards │ ├── app-python.json │ └── otel-collector.json │ ├── datasource.yml │ ├── grafana.ini │ ├── k6_generate_traffic.js │ ├── loki.yaml │ ├── mimir.yaml │ └── tempo.yaml └── docker-compose.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # PyPI configuration file 171 | .pypirc 172 | 173 | .vscode 174 | postgres_data 175 | ./postgres_data 176 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Semana DevOps LinuxTips 2 | 3 | Repositório contém os arquivos utilizados na apresentação sobre observabilidade da Semana DevOps LinuxTips apresentada no dia 11/02/2025. 4 | 5 | ## Pré-requisitos 6 | 7 | - [Docker](https://docs.docker.com/get-docker/) 8 | - [Docker Compose](https://docs.docker.com/compose/install/) 9 | 10 | ## Sistema Bookstore 11 | 12 | O sistema Bookstore é formado por microserviços que são responsáveis por gerenciar venda de livro. O sistema é composto por três microserviços: 13 | 14 | - Cadastro de Livros: Serviço responsável por gerenciar o cadastro de livros. 15 | - Ordem de Compra: Serviço responsável por gerenciar a ordem de compra de livros. 16 | - Pagamento: Serviço responsável por gerenciar o pagamento da ordem de compra. 17 | 18 | ### Diagrama de Arquitetura 19 | 20 | Sistema Bookstore é formado por três microserviços: 21 | 22 | ```mermaid 23 | graph TD 24 | subgraph Pagamento 25 | C1[POST /pagamentos] 26 | C2[GET /pagamentos/id] 27 | DB3[(Banco de Dados)] 28 | C1 --> DB3 29 | C2 --> DB3 30 | end 31 | 32 | subgraph Ordem_de_Compra 33 | A1[POST /ordens] --> A2[Validação no Cadastro de Livros] 34 | A2 --> A3[Chamada ao Serviço de Pagamento] 35 | A3 --> A4[Registro da Compra no Banco de Dados] 36 | DB1[(Banco de Dados)] 37 | A4 --> DB1 38 | end 39 | 40 | subgraph Cadastro_de_Livros 41 | B1[POST /livros] 42 | B2[GET /livros] 43 | B3[GET /livros/id] 44 | DB2[(Banco de Dados)] 45 | B1 --> DB2 46 | B2 --> DB2 47 | B3 --> DB2 48 | end 49 | 50 | %% Comunicação entre os serviços 51 | A2 -- "Valida Estoque" --> B3 52 | A3 -- "Processar Pagamento" --> C1 53 | C1 -- "Atualizar status de pagamento" --> A4 54 | ``` 55 | 56 | ## Implentação de Instrumentação Spring Código 57 | 58 | 1. Para implementar a instrumentação sem código, adicione os pacotes `opentelemetry-distro` e `opentelemetry-exporter-otlp` ao arquivo [requirements.txt](../../book_store/cadastro_de_livros/requirements.txt) do microserviço `Cadastro de Livro`. 59 | 60 | ```shell 61 | opentelemetry-distro==0.49b2 62 | opentelemetry-exporter-otlp==1.28.2 63 | opentelemetry-instrumentation-fastapi==0.49b2 64 | ``` 65 | 66 | O conteúdo do arquivo `requirements.txt` deve ser semelhante ao exemplo abaixo: 67 | 68 | ```shell 69 | fastapi==0.109.1 70 | uvicorn==0.15.0 71 | sqlalchemy==2.0.32 72 | sqlalchemy-utils==0.41.2 73 | psycopg2-binary==2.9.10 74 | requests==2.31.0 75 | opentelemetry-distro==0.49b2 76 | opentelemetry-exporter-otlp==1.28.2 77 | opentelemetry-instrumentation-fastapi==0.49b2 78 | ``` 79 | 80 | O [pacote OpenTelemetry distro](https://opentelemetry.io/docs/languages/python/distro/) fornece os mecanismos para configurar automaticamente as opções mais comuns para a instrumentação, como a definição para o `SDK TraceProvider`, `BatchSpanProcessor` e `OTLP SpanExporter`. 81 | 82 | O `opentelemetry-exporter-otlp` é um exportador que envia os dados de telemetria para o OpenTelemetry Collector ou qualquer outro [endpoint OTLP](https://opentelemetry.io/docs/specs/otel/protocol/exporter/). 83 | 84 | 1. O Próximo passo será adicionar o pacote `opentelemetry-bootstrap` ao arquivo [Dockerfile](../../book_store/cadastro_de_livros/Dockerfile) do microserviço `Cadastro de Livro`. 85 | 86 | ```shell 87 | RUN opentelemetry-bootstrap -a install 88 | ``` 89 | > Existe um comentário no arquivo `Dockerfile` que indica onde adicionar o trecho de código. 90 | 91 | O [opentelemetry-bootstrap](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/opentelemetry-instrumentation#opentelemetry-bootstrap) faz a leitura dos pacotes instalados na aplicação e instala as bibliotecas necessárias para instrumentar a aplicação. Por exemplo, estamos utilizando o pacote [FastAPI](https://fastapi.tiangolo.com/) o `opentelemetry-bootstrap` irá instalar o pacote `opentelemetry-instrumentation-fastapi` para instrumentar a aplicação. 92 | 93 | A lista completa de pacotes de instrumentação padrão e detectáveis está definida [aqui](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py). Ou acessando o documentação [OpenTelemetry Python Contrib](https://opentelemetry-python-contrib.readthedocs.io/en/latest/). 94 | 95 | 1. No `entrypoint` do [Dockerfile](../../book_store/cadastro_de_livros/Dockerfile) do microserviço `Cadastro de Livro`, adicione o prefixo `opentelemetry-instrument` ao comando de execução da aplicação. 96 | 97 | > Existe um comentário no arquivo `Dockerfile` que indica onde adicionar o trecho de código. 98 | 99 | ```Dockerfile 100 | CMD ["opentelemetry-instrument",.....] 101 | ``` 102 | 103 | O entrypoint do Dockerfile deve ser semelhante ao exemplo abaixo: 104 | 105 | ```Dockerfile 106 | CMD ["opentelemetry-instrument", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8080"] 107 | ``` 108 | 109 | O comando [opentelemetry-instrument](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/opentelemetry-instrumentation#opentelemetry-instrument) tentará detectar automaticamente os pacotes usados na aplicação e, quando possível, aplicará a instrumentação. 110 | 111 | Utilize as variáveis de ambiente para configurar o `opentelemetry-instrument`. Para isso, adicione as seguintes variáveis de ambiente no arquivo [docker-compose.yaml](../../docker-compose.yaml) no microserviço `Cadastro de Livro`. 112 | 113 | > Existe um comentário no arquivo `docker-compose.yaml` que indica onde adicionar o trecho de código. 114 | 115 | ```yaml 116 | environment: 117 | - OTEL_SERVICE_NAME=cadastro_de_livros 118 | - OTEL_RESOURCE_ATTRIBUTES=service.version=v0.0.1,service.env=dev 119 | - OTEL_EXPORTER_OTLP_ENDPOINT=http://otelcollector:4317 120 | - OTEL_EXPORTER_OTLP_PROTOCOL=grpc 121 | - OTEL_EXPORTER_OTLP_INSECURE=true 122 | - OTEL_PYTHON_LOG_CORRELATION=true 123 | ``` 124 | 125 | Estamos configurando o `OTEL_SERVICE_NAME` com o nome do serviço, `OTEL_RESOURCE_ATTRIBUTES` com os atributos do serviço, `OTEL_EXPORTER_OTLP_ENDPOINT` com o endpoint do OpenTelemetry Collector, `OTEL_EXPORTER_OTLP_INSECURE` para permitir conexões inseguras ao OpenTelemetry Collector e `OTEL_PYTHON_LOG_CORRELATION` para correlacionar os logs com as traces, nesse caso o `TRACE_ID` e `SPAN_ID` serão adicionados como campos no log. 126 | 127 | Para mais informações sobre as variáveis de ambiente, consulte a documentação do [OpenTelemetry](https://opentelemetry.io/docs/languages/sdk-configuration/general/) 128 | 129 | 1. Pronto! Agora, basta executar o comando `docker-compose up` para iniciar a aplicação. 130 | 131 | ```shell 132 | docker-compose up 133 | ``` 134 | 135 | 1. Acesse os endpoints da aplicação para gerar dados de telemetria: 136 | 137 | Ao acessar o endpoint, a aplicação irá listar todos os livros cadastrados. 138 | 139 | ```shell 140 | curl http://localhost:8080/livros/ 141 | ``` 142 | 143 | Ao acessar o endpoint, a aplicação irá adicionar um novo livro ao banco de dados. 144 | 145 | ```shell 146 | curl -X POST http://localhost:8080/livros/ -H "Content-Type: application/json" -d '{"titulo": "Semana DevOps LinuxTips - Containers", "estoque": 100}' 147 | 148 | curl -X POST http://localhost:8080/livros/ -H "Content-Type: application/json" -d '{"titulo": "Semana DevOps LinuxTips - Observabilidade", "estoque": 100}' 149 | 150 | curl -X POST http://localhost:8080/livros/ -H "Content-Type: application/json" -d '{"titulo": "Semana DevOps LinuxTips - Jenkins", "estoque": 0}' 151 | ``` 152 | 153 | Ao acessar o endpoint, a aplicação irá buscar os detalhes de um livro específico pelo ID. 154 | 155 | ```shell 156 | curl -X GET http://localhost:8080/livros/ 157 | ``` 158 | > Substitua `` pelo ID do livro que deseja buscar. 159 | 160 | Ao acessar o endpoint, a aplicação irá retornar todos os livros cadastrados. 161 | 162 | ```shell 163 | curl -X GET http://localhost:8080/livros/ 164 | ``` 165 | 166 | Dica: você pode utilizar o Swagger para testar os endpoints da aplicação. 167 | Acesse http://localhost:8080/docs para visualizar a documentação dos endpoints. 168 | 169 | 1. Acesse o Grafana para visualizar a telemetria gerada em http://localhost:3000. 170 | 171 | No menu `explorer` do Grafana, você pode visualizar as métricas, traces e logs. Selecione `service.name` = `cadastro-de-livros` para visualizar a telemetria gerada pela aplicação. 172 | 173 | ### O Que Esperar? 174 | 175 | Quando você acessar os endpoints da aplicação, o OpenTelemetry irá capturar as requisições e enviar para o OpenTelemetry Collector. O OpenTelemetry Collector irá processar e enviar a telemetria para Tempo, Prometheus e Loki. Por fim, você poderá visualizar a telemetria no Grafana. 176 | 177 | ## Exercício 178 | 179 | Agora que você implementou a instrumentação sem código na aplicação Cadastro de Livros, implemente a instrumentação sem código nas aplicações [Ordem de Compra](../../book_store/ordem_de_compra/) e [Pagamento](../../book_store/pagamento/). 180 | 181 | 1. Adicione os pacotes `opentelemetry-distro` e `opentelemetry-exporter-otlp` ao arquivo `requirements.txt` das aplicações `Ordem de Compra` e `Pagamento`. 182 | 1. Adicione o pacote `opentelemetry-bootstrap` ao arquivo `Dockerfile` das aplicações `Ordem de Compra` e `Pagamento`. 183 | 1. Adicione o prefixo `opentelemetry-instrument` ao comando de execução da aplicação no `entrypoint` do arquivo `Dockerfile` das aplicações `Ordem de Compra` e `Pagamento`. 184 | 1. Adicione as variáveis de ambiente no arquivo `docker-compose.yaml` das aplicações `Ordem de Compra` e `Pagamento`. 185 | 186 | > Dica: Utilize o comando `docker-compose up --build ` para reconstruir as imagens das aplicações `Ordem de Compra` e `Pagamento`. 187 | 188 | ### Resultado Esperado 189 | 190 | Após implementar a instrumentação sem código nas aplicações `Cadastro de Livros`, `Ordem de Compra` e `Pagamento`, você deve ser capaz de visualizar o trace gerado pelas aplicações no Grafana e seu ciclo de vida. 191 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/Dockerfile: -------------------------------------------------------------------------------- 1 | # Imagem base 2 | FROM python:3.12-slim 3 | 4 | # Configura o diretório de trabalho 5 | WORKDIR /app 6 | 7 | # Copia os arquivos de dependências e instale-as 8 | COPY requirements.txt . 9 | RUN pip install --no-cache-dir -r requirements.txt 10 | 11 | # Copia o código da aplicação para o contêiner 12 | COPY . . 13 | 14 | ############################################################################################################################################### 15 | #### Adicione na linha abaixo o comando RUN opentelemetry-bootstrap -a install para instalar os pacotes necessário do OpenTelemetry Python #### 16 | 17 | # Expõe a porta 18 | EXPOSE 8080 19 | 20 | ######################################################################################## 21 | #### Adicione na linha abaixo o opentelemetry-instrument para instrumentar o código #### 22 | CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8080"] 23 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/README.md: -------------------------------------------------------------------------------- 1 | ## API Cadastro de Livros 2 | 3 | API para gerenciar cadastro de livros. Permite criar, buscar, listar e gerenciar o estoque de livros em uma base de dados. 4 | 5 | ## Endpoints 6 | 7 | URL base: `http://localhost:8080` 8 | 9 | ### Adicionar livro 10 | 11 | POST /livros/ 12 | 13 | Cria um novo livro no banco de dados. 14 | 15 | Requisição: 16 | 17 | - Body (JSON): 18 | ```json 19 | { 20 | "titulo": "string", 21 | "estoque": "number" 22 | } 23 | ``` 24 | 25 | Resposta: 26 | 27 | - Status: 200 OK 28 | ```json 29 | { 30 | "id": "number", 31 | "titulo": "string", 32 | "estoque": "number" 33 | } 34 | ``` 35 | ### Listar livros 36 | 37 | GET /livros/ 38 | 39 | Lista todos os livros cadastrados no banco de dados. 40 | 41 | Resposta: 42 | 43 | - Status: 200 OK 44 | ```json 45 | [ 46 | { 47 | "id": "number", 48 | "titulo": "string", 49 | "estoque": "number" 50 | } 51 | ] 52 | ``` 53 | ### Buscar livro 54 | 55 | GET /livros/{id} 56 | 57 | Busca um livro pelo ID. 58 | 59 | Parametros: 60 | 61 | - `id` (int): ID do livro 62 | 63 | Resposta: 64 | 65 | - Status: 200 OK 66 | ```json 67 | { 68 | "id": "number", 69 | "titulo": "string", 70 | "estoque": "number" 71 | } 72 | ``` 73 | 74 | - Status: 404 Not Found 75 | ```json 76 | { 77 | "message": "Livro não encontrado" 78 | } 79 | ``` 80 | 81 | ## Tratamento de Erros 82 | 83 | Respostas de erro padrão: 84 | 85 | - 404 Not Found: Recurso não encontrado. 86 | - 500 Internal Server Error: Erro interno do servidor. 87 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/app/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Indica que o diretório é um pacote Python 3 | """ 4 | import logging 5 | 6 | # Configuração global de logging 7 | logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") 8 | logger = logging.getLogger(__name__) 9 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/app/databases.py: -------------------------------------------------------------------------------- 1 | """ 2 | Módulo responsável por criar a conexão com o banco de dados 3 | """ 4 | import os 5 | from sqlalchemy import create_engine 6 | from sqlalchemy.ext.declarative import declarative_base 7 | from sqlalchemy.orm import sessionmaker 8 | from sqlalchemy_utils import database_exists, create_database 9 | from . import logger 10 | 11 | # Obtém as credenciais do banco de dados das variáveis de ambiente 12 | DB_USER = os.getenv("POSTGRES_USER") 13 | DB_PASSWORD = os.getenv("POSTGRES_PASSWORD") 14 | DB_HOST = os.getenv("POSTGRES_HOST") 15 | DB_NAME = os.getenv("POSTGRES_DB") 16 | 17 | # URL de conexão com o banco de dados 18 | DATABASE_URL = f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:5432/{DB_NAME}" 19 | 20 | # Cria a engine de conexão com o banco de dados 21 | engine = create_engine(DATABASE_URL) 22 | 23 | # Verifica e cria o banco de dados, caso necessário 24 | def initialize_database(): 25 | """ 26 | Inicializa o banco de dados, criando-o se não existir. 27 | """ 28 | try: 29 | if not database_exists(engine.url): 30 | logger.info(f"Banco de dados {DB_NAME} não existe. Criando...") 31 | create_database(engine.url) 32 | else: 33 | logger.info(f"Banco de dados {DB_NAME} já existe.") 34 | except Exception as e: 35 | logger.error(f"Erro ao inicializar o banco de dados: {e}") 36 | raise 37 | 38 | initialize_database() 39 | 40 | # Configura a sessão do banco de dados 41 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) 42 | Base = declarative_base() 43 | 44 | # Função que retorna uma sessão do banco de dados 45 | def get_db(): 46 | """ 47 | Obtém uma nova sessão do banco de dados. 48 | """ 49 | db = SessionLocal() 50 | try: 51 | yield db 52 | except Exception as e: 53 | db.rollback() 54 | raise 55 | finally: 56 | db.close() 57 | logger.info("Conexão com o banco de dados encerrada.") 58 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/app/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Função principal que cria a aplicação FastAPI 3 | """ 4 | from fastapi import FastAPI, HTTPException, Depends 5 | from sqlalchemy.orm import Session 6 | from . import models 7 | from . import logger 8 | from .databases import engine, get_db 9 | 10 | # Cria as tabelas no banco de dados 11 | models.Base.metadata.create_all(bind=engine) 12 | 13 | # Cria a aplicação FastAPI 14 | app = FastAPI() 15 | 16 | # Define a rota para criar um livro 17 | @app.post("/livros/") 18 | def cria_livro(livro: models.LivroBase, db: Session = Depends(get_db)): 19 | """ 20 | Rota para criar um livro 21 | """ 22 | try: 23 | logger.info(f"Criando livro: {livro}") 24 | novo_livro = models.cria_livro(db=db, livro=livro) 25 | logger.info(f"Livro criado com sucesso: {livro}") 26 | return novo_livro 27 | except Exception as e: 28 | logger.error(f"Erro ao criar livro: {e}") 29 | raise HTTPException(status_code=500, detail="Erro ao criar livro") 30 | 31 | # Define a rota para listar livros por id 32 | @app.get("/livros/{id}") 33 | def busca_livro(id: int, db: Session = Depends(get_db)): 34 | """ 35 | Rota para buscar um livro pelo id 36 | """ 37 | try: 38 | logger.info(f"Buscando livro com id: {id}") 39 | livro = models.busca_livro(db, id) 40 | if livro is None: 41 | logger.warning(f"Livro com id {id} não encontrado") 42 | raise HTTPException(status_code=404, detail="Livro não encontrado") 43 | logger.info(f"Livro com ID: {id} encontrado com sucesso") 44 | return livro 45 | except HTTPException: 46 | raise 47 | except Exception as e: 48 | logger.error(f"Erro ao buscar livro: {e}") 49 | raise HTTPException(status_code=500, detail="Erro ao buscar livro") 50 | 51 | # Define a rota para listar todos os livros 52 | @app.get("/livros/") 53 | def lista_livros(db: Session = Depends(get_db)): 54 | """ 55 | Rota para listar todos os livros 56 | """ 57 | try: 58 | logger.info("Listando todos os livros") 59 | livros = models.lista_livros(db) 60 | logger.info(f"{len(livros)} livros encontrados") 61 | return livros 62 | except Exception as e: 63 | logger.error(f"Erro ao listar livros: {e}") 64 | raise HTTPException(status_code=500, detail="Erro ao listar livros") 65 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/app/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modulo responsável por manipular os dados do banco de dados 3 | """ 4 | from sqlalchemy import Column, Integer, String 5 | from sqlalchemy.orm import Session 6 | from pydantic import BaseModel 7 | from .databases import Base 8 | from . import logger 9 | 10 | # Modelo Pydantic para para entrada livre de dados 11 | class LivroBase(BaseModel): 12 | titulo: str 13 | estoque: int 14 | 15 | class Config: 16 | from_attributes = True 17 | 18 | # Define a classe Book que representa a tabela de livros no banco de dados 19 | class Livros(Base): 20 | __tablename__ = "livros" 21 | id = Column(Integer, primary_key=True, index=True) 22 | titulo = Column(String, index=True) 23 | estoque = Column(Integer) 24 | 25 | # Função que cria um livro no banco de dados 26 | def cria_livro(db: Session, livro: LivroBase): 27 | """ 28 | Função que cria um livro no banco de dados 29 | """ 30 | try: 31 | db_livro = Livros(titulo=livro.titulo, estoque=livro.estoque) 32 | db.add(db_livro) 33 | db.commit() 34 | db.refresh(db_livro) 35 | return db_livro 36 | except Exception as e: 37 | logger.error(f"Erro ao criar livro no banco de dados: {e}") 38 | raise 39 | 40 | # Função que retorna todos os livros do banco de dados 41 | def lista_livros(db: Session): 42 | """ 43 | Função que retorna todos os livros do banco de dados 44 | """ 45 | try: 46 | return db.query(Livros).all() 47 | except Exception as e: 48 | logger.error(f"Erro ao listar livros: {e}") 49 | raise 50 | 51 | # Função que retorna um livro do banco de dados 52 | def busca_livro(db: Session, livro_id: int): 53 | """ 54 | Função que retorna um livro do banco de dados 55 | """ 56 | try: 57 | return db.query(Livros).filter(Livros.id == livro_id).first() 58 | except Exception as e: 59 | logger.error(f"Erro ao buscar livro com id {livro_id}: {e}") 60 | raise 61 | -------------------------------------------------------------------------------- /book_store/cadastro_de_livros/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.109.1 2 | uvicorn==0.15.0 3 | sqlalchemy==2.0.32 4 | sqlalchemy-utils==0.41.2 5 | psycopg2-binary==2.9.10 6 | requests==2.31.0 7 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/Dockerfile: -------------------------------------------------------------------------------- 1 | # Imagem base 2 | FROM python:3.12-slim 3 | 4 | # Configura o diretório de trabalho 5 | WORKDIR /app 6 | 7 | # Copia os arquivos de dependências e instale-as 8 | COPY requirements.txt . 9 | RUN pip install --no-cache-dir -r requirements.txt 10 | 11 | # Copia o código da aplicação para o contêiner 12 | COPY . . 13 | 14 | ############################################################################################################################################### 15 | #### Adicione na linha abaixo o comando RUN opentelemetry-bootstrap -a install para instalar os pacotes necessário do OpenTelemetry Python #### 16 | 17 | # Expõe a porta 18 | EXPOSE 8081 19 | 20 | ######################################################################################## 21 | #### Adicione na linha abaixo o opentelemetry-instrument para instrumentar o código #### 22 | CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8081"] 23 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/README.md: -------------------------------------------------------------------------------- 1 | ## API Ordem de Compra 2 | 3 | API para gerenciar ordens de compra de livros. Permite criar e buscar ordens de compra. 4 | 5 | ## Endpoints 6 | 7 | URL base: `http://localhost:8081` 8 | 9 | ### Criar Ordem de Compra 10 | 11 | POST /ordens 12 | 13 | Cria uma nova ordem de compra no banco de dados. 14 | 15 | Requisição: 16 | 17 | - Body (JSON): 18 | ```json 19 | { 20 | "id_livro": "number" 21 | } 22 | ``` 23 | 24 | Resposta: 25 | 26 | - Status: 200 OK 27 | ```json 28 | { 29 | "id": "number", 30 | "id_livro": "number", 31 | "status": "string" 32 | } 33 | ``` 34 | 35 | ### Buscar Ordem de Compra 36 | 37 | GET /ordens/{id} 38 | 39 | Busca uma ordem de compra pelo ID. 40 | 41 | Parâmetros: 42 | 43 | - `id` (int): ID da ordem de compra 44 | 45 | Resposta: 46 | 47 | - Status: 200 OK 48 | ```json 49 | { 50 | "id": "number", 51 | "id_livro": "number", 52 | "status": "string" 53 | } 54 | ``` 55 | 56 | - Status: 404 Not Found 57 | ```json 58 | { 59 | "message": "Ordem não encontrada" 60 | } 61 | ``` 62 | 63 | ## Tratamento de Erros 64 | 65 | Respostas de erro padrão: 66 | 67 | - 404 Not Found: Recurso não encontrado. 68 | - 500 Internal Server Error: Erro interno do servidor. 69 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/app/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Indica que o diretório é um pacote Python 3 | """ 4 | import logging 5 | 6 | # Configuração global de logging 7 | logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") 8 | logger = logging.getLogger(__name__) 9 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/app/databases.py: -------------------------------------------------------------------------------- 1 | """ 2 | Módulo responsável por criar a conexão com o banco de dados 3 | """ 4 | import os 5 | from sqlalchemy import create_engine 6 | from sqlalchemy.ext.declarative import declarative_base 7 | from sqlalchemy.orm import sessionmaker 8 | from sqlalchemy_utils import database_exists, create_database 9 | from . import logger 10 | 11 | # Obtém as credenciais do banco de dados das variáveis de ambiente 12 | DB_USER = os.getenv("POSTGRES_USER") 13 | DB_PASSWORD = os.getenv("POSTGRES_PASSWORD") 14 | DB_HOST = os.getenv("POSTGRES_HOST") 15 | DB_NAME = os.getenv("POSTGRES_DB") 16 | 17 | # URL de conexão com o banco de dados 18 | DATABASE_URL = f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:5432/{DB_NAME}" 19 | 20 | # Cria a engine de conexão com o banco de dados 21 | engine = create_engine(DATABASE_URL) 22 | 23 | # Verifica e cria o banco de dados, caso necessário 24 | def initialize_database(): 25 | """ 26 | Inicializa o banco de dados, criando-o se não existir. 27 | """ 28 | try: 29 | if not database_exists(engine.url): 30 | logger.info(f"Banco de dados {DB_NAME} não existe. Criando...") 31 | create_database(engine.url) 32 | else: 33 | logger.info(f"Banco de dados {DB_NAME} já existe.") 34 | except Exception as e: 35 | logger.error(f"Erro ao inicializar o banco de dados: {e}") 36 | raise 37 | 38 | initialize_database() 39 | 40 | # Configura a sessão do banco de dados 41 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) 42 | Base = declarative_base() 43 | 44 | # Função que retorna uma sessão do banco de dados 45 | def get_db(): 46 | """ 47 | Obtém uma nova sessão do banco de dados. 48 | """ 49 | db = SessionLocal() 50 | try: 51 | yield db 52 | except Exception as e: 53 | db.rollback() 54 | raise 55 | finally: 56 | db.close() 57 | logger.info("Conexão com o banco de dados encerrada.") 58 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/app/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Função principal que cria a aplicação FastAPI 3 | """ 4 | import os 5 | from fastapi import FastAPI, HTTPException, Depends 6 | from sqlalchemy.orm import Session 7 | import requests 8 | from . import models 9 | from .databases import engine, get_db 10 | from . import logger 11 | 12 | # Obetem url dos serviços pagamento e cadastro de livros 13 | PAYMENT_URL = os.getenv("PAYMENT_URL", "http://pagamento:8082") 14 | BOOK_URL = os.getenv("BOOK_URL", "http://cadastro_de_livros:8080") 15 | 16 | # Cria as tabelas no banco de dados 17 | models.Base.metadata.create_all(bind=engine) 18 | 19 | # Cria a aplicação FastAPI 20 | app = FastAPI() 21 | 22 | # Define a rota para criar uma ordem 23 | @app.post("/ordens/", response_model=models.Ordem) 24 | def cria_ordem(ordem: models.OrdemCreate, db: Session = Depends(get_db)): 25 | """ 26 | Rota para criar uma ordem de compra de um livro 27 | """ 28 | try: 29 | # Valida disponibilidade do livro no serviço de cadastro de livros 30 | livro_response = requests.get(f"{BOOK_URL}/livros/{ordem.id_livro}") 31 | if livro_response.status_code != 200: 32 | raise HTTPException(status_code=404, detail="Livro não encontrado") 33 | 34 | # Valida se o livro está disponível em estoque 35 | livro = livro_response.json() 36 | if livro["estoque"] <= 0: 37 | raise HTTPException(status_code=404, detail="Livro esgotado") 38 | 39 | # Cria ordem de compra 40 | db_ordem = models.cria_ordem(db=db, ordem=ordem) 41 | 42 | # Enviar pagamento para o serviço de Pagamento 43 | pagamento_response = requests.post(f"{PAYMENT_URL}/pagamentos", json={"id_ordem": db_ordem.id}) 44 | if pagamento_response.status_code != 200: 45 | raise HTTPException(status_code=400, detail="Falha no processamento do pagamento") 46 | pagamento_response = pagamento_response.json() 47 | 48 | # Atualiza status da ordem 49 | if pagamento_response["status"] == "Aprovado": 50 | db_ordem.status = "Concluído" 51 | else: 52 | db_ordem.status = "Pagamento Recusado" 53 | 54 | db.commit() 55 | db.refresh(db_ordem) 56 | 57 | return db_ordem 58 | except HTTPException: 59 | raise 60 | except Exception as e: 61 | logger.error(f"Erro ao criar ordem: {str(e)}") 62 | raise HTTPException(status_code=500, detail=f"Erro ao criar ordem {str(e)}") 63 | 64 | # Define a rota para listar ordens por id 65 | @app.get("/ordens/{id}", response_model=models.Ordem) 66 | def busca_ordem(id: int, db: Session = Depends(get_db)): 67 | """ 68 | Rota para buscar uma ordem pelo id 69 | """ 70 | try: 71 | logger.info(f"Buscando ordem com id: {id}") 72 | ordem = models.lista_ordem(db=db, id_ordem=id) 73 | if ordem is None: 74 | logger.warning(f"Ordem com id {id} não encontrada") 75 | raise HTTPException(status_code=404, detail="Ordem não encontrada") 76 | return ordem 77 | except HTTPException: 78 | raise 79 | except Exception as e: 80 | logger.error(f"Erro ao buscar ordem: {e}") 81 | raise HTTPException(status_code=500, detail="Erro ao buscar ordem") 82 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/app/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modulo responsável por manipular os dados do banco de dados 3 | """ 4 | from sqlalchemy import Column, Integer, String 5 | from sqlalchemy.orm import Session 6 | from pydantic import BaseModel 7 | from .databases import Base 8 | from . import logger 9 | 10 | # Define o atributo comum a todas as classes 11 | class OrdemBase(BaseModel): 12 | id_livro: int 13 | 14 | # Classe que herda os atributos da classe OrdemBase e adiciona os campos id e status 15 | class OrdemCreate(OrdemBase): 16 | pass 17 | 18 | # Classe que representa a entidade completa de uma Ordem 19 | class Ordem(OrdemBase): 20 | id: int # Identificação da ordem 21 | status: str # Status da ordem (ex. Pendente, Aprovado, Cancelado) 22 | 23 | class Config: 24 | from_attributes = True # Permite que a classe seja compatível com objetos ORM (Object Relational Mapping) 25 | 26 | # Define a classe OrdemDB contendo os campos para criação da tabela no banco de dados 27 | class OrdemDB(Base): 28 | __tablename__ = "ordens" # Nome da tabela no banco de dados 29 | id = Column(Integer, primary_key=True, index=True) # Campo de identificação da ordem 30 | id_livro = Column(Integer) # Campo de identificação do livro 31 | status = Column(String) # Campo de status da ordem 32 | 33 | # Função que cria uma ordem no banco de dados 34 | def cria_ordem(db: Session, ordem: OrdemCreate): 35 | """ 36 | Função que cria uma ordem no banco de dados 37 | """ 38 | db_ordem = OrdemDB( 39 | id_livro=ordem.id_livro, 40 | status="Pendente" 41 | ) 42 | db.add(db_ordem) 43 | db.commit() 44 | db.refresh(db_ordem) 45 | return db_ordem 46 | 47 | # Função que retorna ordem do banco de dados 48 | def lista_ordem(db: Session, id_ordem: int): 49 | """ 50 | Função que retorna uma ordem do banco de dados 51 | """ 52 | try: 53 | return db.query(OrdemDB).filter(OrdemDB.id == id_ordem).first() 54 | except Exception as e: 55 | logger.error(f"Erro ao buscar ordem com id {id_ordem}: {e}") 56 | raise 57 | -------------------------------------------------------------------------------- /book_store/ordem_de_compra/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.115.6 2 | uvicorn==0.34.0 3 | sqlalchemy==2.0.37 4 | sqlalchemy-utils==0.41.2 5 | psycopg2-binary==2.9.10 6 | requests==2.32.3 7 | -------------------------------------------------------------------------------- /book_store/pagamento/Dockerfile: -------------------------------------------------------------------------------- 1 | # Imagem base 2 | FROM python:3.12-slim 3 | 4 | # Configura o diretório de trabalho 5 | WORKDIR /app 6 | 7 | # Copia os arquivos de dependências e instale-as 8 | COPY requirements.txt . 9 | RUN pip install --no-cache-dir -r requirements.txt 10 | 11 | # Copia o código da aplicação para o contêiner 12 | COPY . . 13 | 14 | ############################################################################################################################################### 15 | #### Adicione na linha abaixo o comando RUN opentelemetry-bootstrap -a install para instalar os pacotes necessário do OpenTelemetry Python #### 16 | 17 | # Expõe a porta 18 | EXPOSE 8082 19 | 20 | ######################################################################################## 21 | #### Adicione na linha abaixo o opentelemetry-instrument para instrumentar o código #### 22 | CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8082"] 23 | -------------------------------------------------------------------------------- /book_store/pagamento/README.md: -------------------------------------------------------------------------------- 1 | # API Pagamento 2 | 3 | API para gerenciar pagamentos de ordens de compra. Permite processar e consultar pagamentos. 4 | 5 | --- 6 | 7 | ## Endpoints 8 | 9 | **URL base**: `http://localhost:8082` 10 | 11 | --- 12 | 13 | ### Processar Pagamento 14 | 15 | **POST /pagamentos** 16 | 17 | Processa o pagamento de uma ordem de compra e registra o status do pagamento. 18 | 19 | #### Requisição 20 | 21 | - **Body (JSON)**: 22 | ```json 23 | { 24 | "id_ordem": "number", 25 | } 26 | ``` 27 | 28 | #### Resposta 29 | 30 | - **Status: 200 OK** 31 | ```json 32 | { 33 | "id": "number", 34 | "id_ordem": "number", 35 | "status": "string" 36 | } 37 | ``` 38 | 39 | ### Consultar Pagamento 40 | 41 | **GET /pagamentos/{id}** 42 | 43 | Consulta os detalhes de um pagamento pelo ID. 44 | 45 | #### Parâmetros 46 | 47 | - `id` (int): ID do pagamento. 48 | 49 | #### Resposta 50 | 51 | - **Status: 200 OK** 52 | ```json 53 | { 54 | "id": "number", 55 | "id_ordem": "number", 56 | "status": "string" 57 | } 58 | ``` 59 | 60 | - **Status: 404 Not Found** 61 | ```json 62 | { 63 | "message": "Pagamento não encontrado" 64 | } 65 | ``` 66 | 67 | --- 68 | 69 | ## Tratamento de Erros 70 | 71 | Respostas de erro padrão: 72 | 73 | - **404 Not Found**: Recurso não encontrado. 74 | - **500 Internal Server Error**: Erro interno do servidor. 75 | 76 | --- 77 | 78 | ## Notas Adicionais 79 | 80 | - O status de um pagamento pode ser um dos seguintes valores: 81 | - `Aprovado`: O pagamento foi processado com sucesso. 82 | - `Recusado`: O pagamento foi recusado pelo sistema. 83 | -------------------------------------------------------------------------------- /book_store/pagamento/app/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Indica que o diretório é um pacote Python 3 | """ 4 | import logging 5 | 6 | # Configuração global de logging 7 | logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") 8 | logger = logging.getLogger(__name__) 9 | -------------------------------------------------------------------------------- /book_store/pagamento/app/databases.py: -------------------------------------------------------------------------------- 1 | """ 2 | Módulo responsável por criar a conexão com o banco de dados 3 | """ 4 | import os 5 | from sqlalchemy import create_engine 6 | from sqlalchemy.ext.declarative import declarative_base 7 | from sqlalchemy.orm import sessionmaker 8 | from sqlalchemy_utils import database_exists, create_database 9 | from . import logger 10 | 11 | # Obtém as credenciais do banco de dados das variáveis de ambiente 12 | DB_USER = os.getenv("POSTGRES_USER") 13 | DB_PASSWORD = os.getenv("POSTGRES_PASSWORD") 14 | DB_HOST = os.getenv("POSTGRES_HOST") 15 | DB_NAME = os.getenv("POSTGRES_DB") 16 | 17 | # URL de conexão com o banco de dados 18 | DATABASE_URL = f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:5432/{DB_NAME}" 19 | 20 | # Cria a engine de conexão com o banco de dados 21 | engine = create_engine(DATABASE_URL) 22 | 23 | # Verifica e cria o banco de dados, caso necessário 24 | def initialize_database(): 25 | """ 26 | Inicializa o banco de dados, criando-o se não existir. 27 | """ 28 | try: 29 | if not database_exists(engine.url): 30 | logger.info(f"Banco de dados {DB_NAME} não existe. Criando...") 31 | create_database(engine.url) 32 | else: 33 | logger.info(f"Banco de dados {DB_NAME} já existe.") 34 | except Exception as e: 35 | logger.error(f"Erro ao inicializar o banco de dados: {e}") 36 | raise 37 | 38 | initialize_database() 39 | 40 | # Configura a sessão do banco de dados 41 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) 42 | Base = declarative_base() 43 | 44 | # Função que retorna uma sessão do banco de dados 45 | def get_db(): 46 | """ 47 | Obtém uma nova sessão do banco de dados. 48 | """ 49 | db = SessionLocal() 50 | try: 51 | yield db 52 | except Exception as e: 53 | db.rollback() 54 | raise 55 | finally: 56 | db.close() 57 | logger.info("Conexão com o banco de dados encerrada.") 58 | -------------------------------------------------------------------------------- /book_store/pagamento/app/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException, Depends 2 | from sqlalchemy.orm import Session 3 | import random 4 | import requests 5 | import os 6 | from . import models 7 | from .databases import engine, get_db 8 | from . import logger 9 | 10 | # Obetem url dos serviços ordem de compra 11 | ORDER_URL = os.getenv("ORDER_URL", "http://ordem_de_compra:8081") 12 | 13 | # Cria as tabelas no banco de dados 14 | models.Base.metadata.create_all(bind=engine) 15 | 16 | # Cria a aplicação FastAPI 17 | app = FastAPI() 18 | 19 | # Define a rota para processar pagamento 20 | @app.post("/pagamentos", response_model=models.Pagamento) 21 | def processar_pagamento(pagamento: models.PagamentoCreate, db: Session = Depends(get_db)): 22 | """ 23 | Processa um pagamento para a ordem especificada 24 | """ 25 | try: 26 | # Valida se a ordem de compra existe 27 | ordem_response = requests.get(f"{ORDER_URL}/ordens/{pagamento.id_ordem}") 28 | if ordem_response.status_code != 200: 29 | raise HTTPException(status_code=404, detail="Ordem de compra não encontrada") 30 | 31 | # Processa pagamento 32 | status = random.choice(["Aprovado", "Recusado"]) 33 | 34 | # Cria o pagamento no banco 35 | db_pagamento = models.processar_pagamento(db=db, pagamento=pagamento, status=status) 36 | 37 | return db_pagamento 38 | except Exception as e: 39 | logger.error(f"Erro ao processar pagamento: {str(e)}") 40 | raise HTTPException(status_code=500, detail=f"Erro ao processar pagamento: {str(e)}") 41 | 42 | @app.get("/pagamentos/{id_pagamento}", response_model=models.Pagamento) 43 | def lista_pagamentos(id_pagamento: int, db: Session = Depends(get_db)): 44 | """ 45 | Retorna informações de um pagamento pelo ID 46 | """ 47 | db_pagamento = models.lista_pagamentos(db=db, id_pagamento=id_pagamento) 48 | if db_pagamento is None: 49 | raise HTTPException(status_code=404, detail="Pagamento não encontrado") 50 | return db_pagamento 51 | -------------------------------------------------------------------------------- /book_store/pagamento/app/models.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, Integer, String 2 | from sqlalchemy.orm import Session 3 | from pydantic import BaseModel 4 | from .databases import Base 5 | 6 | # Define a classe PagamentoBase 7 | class PagamentoBase(BaseModel): 8 | id_ordem: int 9 | 10 | # Define a classe PagamentoCreate 11 | class PagamentoCreate(PagamentoBase): 12 | pass 13 | 14 | # Define a classe Pagamento 15 | class Pagamento(PagamentoBase): 16 | id: int 17 | status: str 18 | 19 | class Config: 20 | from_attributes = True 21 | 22 | # Define a classe Pagamento 23 | class PagamentoDB(Base): 24 | __tablename__ = 'pagamentos' 25 | id = Column(Integer, primary_key=True, index=True) 26 | id_ordem = Column(Integer, index=True) 27 | status = Column(String) 28 | 29 | # Função que processa o pagamento de uma ordem 30 | def processar_pagamento(db: Session, pagamento: PagamentoCreate, status: str): 31 | """" 32 | Função que cria um pagamento no banco de dados 33 | """ 34 | db_pagamento = PagamentoDB( 35 | id_ordem=pagamento.id_ordem, 36 | status=status 37 | ) 38 | db.add(db_pagamento) 39 | db.commit() 40 | db.refresh(db_pagamento) 41 | return db_pagamento 42 | 43 | # Função que lista os pagamento 44 | def lista_pagamentos(db: Session, id_pagamento: int): 45 | """" 46 | Função que retorna um pagamento do banco de dados 47 | """ 48 | try: 49 | return db.query(PagamentoDB).filter(PagamentoDB.id == id_pagamento).first() 50 | except Exception as e: 51 | logger.error(f"Erro ao buscar pagamento com id {id_pagamento}: {e}") 52 | raise 53 | -------------------------------------------------------------------------------- /book_store/pagamento/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.115.6 2 | uvicorn==0.34.0 3 | sqlalchemy==2.0.37 4 | sqlalchemy-utils==0.41.2 5 | psycopg2-binary==2.9.10 6 | requests==2.32.3 7 | -------------------------------------------------------------------------------- /config/collector/otelcol-config.yml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: # doc. https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver/otlpreceiver 3 | protocols: 4 | grpc: 5 | endpoint: 0.0.0.0:4317 6 | http: 7 | endpoint: 0.0.0.0:4318 8 | 9 | filelog: # doc. https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/filelogreceiver/README.md 10 | poll_interval: "200ms" 11 | start_at: end 12 | include: [/etc/log/*/*.log] 13 | operators: 14 | - type: json_parser 15 | parse_from: body 16 | 17 | prometheus: # doc. https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/prometheusreceiver 18 | config: 19 | scrape_configs: 20 | - job_name: otel-collector-metrics 21 | scrape_interval: 30s 22 | static_configs: 23 | - targets: ['127.0.0.1:8888'] 24 | 25 | processors: 26 | batch: # doc. https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/batchprocessor 27 | send_batch_size: 100 28 | timeout: 5s 29 | memory_limiter: 30 | check_interval: 5s 31 | limit_percentage: 75 32 | 33 | resourcedetection/system: # doc. https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourceprocessor 34 | detectors: [ "system", docker ] 35 | system: 36 | hostname_sources: [ "os" ] 37 | 38 | resource: # doc. https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourceprocessor 39 | attributes: 40 | - key: service.name 41 | value: logs-otel-collector 42 | action: insert 43 | 44 | exporters: 45 | otlp: # doc. https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlpexporter 46 | endpoint: tempo:4317 47 | tls: 48 | insecure: true 49 | 50 | prometheusremotewrite: # doc. https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/prometheusexporter 51 | endpoint: http://mimir:9009/api/v1/push 52 | resource_to_telemetry_conversion: 53 | enabled: true # Convert resource attributes to metric labels 54 | 55 | otlphttp: # doc. https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter 56 | endpoint: http://loki:3100/otlp 57 | tls: 58 | insecure: true 59 | 60 | extensions: # doc. https://github.com/open-telemetry/opentelemetry-collector/tree/main/extension 61 | health_check: 62 | pprof: 63 | zpages: 64 | 65 | service: 66 | extensions: [ pprof, zpages, health_check ] 67 | # Doc. https://opentelemetry.io/docs/collector/internal-telemetry/ 68 | telemetry: 69 | metrics: 70 | address: 127.0.0.1:8888 71 | level: detailed 72 | logs: 73 | level: DEBUG 74 | encoding: json 75 | processors: 76 | - batch: 77 | exporter: 78 | otlp: 79 | protocol: http/protobuf 80 | endpoint: http://otelcollector:4318 81 | traces: 82 | processors: 83 | - batch: 84 | exporter: 85 | otlp: 86 | protocol: grpc/protobuf 87 | endpoint: http://otelcollector:4317 88 | pipelines: 89 | logs: 90 | receivers: [ otlp, filelog ] 91 | processors: [ memory_limiter, resource, resourcedetection/system, batch ] 92 | exporters: [ otlphttp ] 93 | traces: 94 | receivers: [ otlp ] 95 | processors: [ memory_limiter, resourcedetection/system, batch ] 96 | exporters: [ otlp ] 97 | metrics: 98 | receivers: [ otlp, prometheus ] 99 | processors: [ memory_limiter, resourcedetection/system, batch ] 100 | exporters: [ prometheusremotewrite ] 101 | -------------------------------------------------------------------------------- /config/grafana/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: "Dashboard" 5 | orgId: 1 6 | type: file 7 | disableDeletion: false 8 | updateIntervalSeconds: 10 9 | allowUiUpdates: true 10 | options: 11 | path: /var/lib/grafana/dashboards 12 | -------------------------------------------------------------------------------- /config/grafana/dashboards/app-python.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "datasource", 8 | "uid": "grafana" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "target": { 15 | "limit": 100, 16 | "matchAny": false, 17 | "tags": [], 18 | "type": "dashboard" 19 | }, 20 | "type": "dashboard" 21 | } 22 | ] 23 | }, 24 | "editable": true, 25 | "fiscalYearStartMonth": 0, 26 | "graphTooltip": 0, 27 | "id": 1, 28 | "links": [], 29 | "panels": [ 30 | { 31 | "datasource": { 32 | "default": false, 33 | "type": "prometheus", 34 | "uid": "PBFA97CFB590B2093" 35 | }, 36 | "fieldConfig": { 37 | "defaults": { 38 | "color": { 39 | "mode": "thresholds" 40 | }, 41 | "mappings": [], 42 | "thresholds": { 43 | "mode": "absolute", 44 | "steps": [ 45 | { 46 | "color": "green", 47 | "value": null 48 | } 49 | ] 50 | } 51 | }, 52 | "overrides": [] 53 | }, 54 | "gridPos": { 55 | "h": 8, 56 | "w": 6, 57 | "x": 0, 58 | "y": 0 59 | }, 60 | "id": 19, 61 | "options": { 62 | "colorMode": "value", 63 | "graphMode": "area", 64 | "justifyMode": "auto", 65 | "orientation": "auto", 66 | "percentChangeColorMode": "standard", 67 | "reduceOptions": { 68 | "calcs": [ 69 | "lastNotNull" 70 | ], 71 | "fields": "", 72 | "values": false 73 | }, 74 | "showPercentChange": false, 75 | "textMode": "auto", 76 | "wideLayout": true 77 | }, 78 | "pluginVersion": "11.4.0", 79 | "targets": [ 80 | { 81 | "datasource": { 82 | "type": "prometheus", 83 | "uid": "PBFA97CFB590B2093" 84 | }, 85 | "disableTextWrap": false, 86 | "editorMode": "builder", 87 | "exemplar": true, 88 | "expr": "sum by(service_name) (http_server_duration_milliseconds_count{service_name=\"$service_name\"})", 89 | "fullMetaSearch": false, 90 | "includeNullMetadata": true, 91 | "interval": "", 92 | "legendFormat": "", 93 | "range": true, 94 | "refId": "A", 95 | "useBackend": false 96 | } 97 | ], 98 | "title": "Total Requests", 99 | "transformations": [ 100 | { 101 | "id": "seriesToRows", 102 | "options": {} 103 | }, 104 | { 105 | "id": "sortBy", 106 | "options": { 107 | "fields": {}, 108 | "sort": [ 109 | { 110 | "field": "Time" 111 | } 112 | ] 113 | } 114 | } 115 | ], 116 | "type": "stat" 117 | }, 118 | { 119 | "datasource": { 120 | "default": false, 121 | "type": "prometheus", 122 | "uid": "PBFA97CFB590B2093" 123 | }, 124 | "fieldConfig": { 125 | "defaults": { 126 | "color": { 127 | "mode": "thresholds" 128 | }, 129 | "mappings": [], 130 | "thresholds": { 131 | "mode": "absolute", 132 | "steps": [ 133 | { 134 | "color": "green", 135 | "value": null 136 | } 137 | ] 138 | } 139 | }, 140 | "overrides": [] 141 | }, 142 | "gridPos": { 143 | "h": 8, 144 | "w": 6, 145 | "x": 6, 146 | "y": 0 147 | }, 148 | "id": 24, 149 | "options": { 150 | "colorMode": "value", 151 | "graphMode": "area", 152 | "justifyMode": "auto", 153 | "orientation": "auto", 154 | "percentChangeColorMode": "standard", 155 | "reduceOptions": { 156 | "calcs": [ 157 | "lastNotNull" 158 | ], 159 | "fields": "", 160 | "values": false 161 | }, 162 | "showPercentChange": false, 163 | "textMode": "auto", 164 | "wideLayout": true 165 | }, 166 | "pluginVersion": "11.4.0", 167 | "targets": [ 168 | { 169 | "datasource": { 170 | "type": "prometheus", 171 | "uid": "PBFA97CFB590B2093" 172 | }, 173 | "disableTextWrap": false, 174 | "editorMode": "builder", 175 | "exemplar": true, 176 | "expr": "sum by(__name__) (http_server_active_requests{service_name=\"$service_name\"})", 177 | "fullMetaSearch": false, 178 | "includeNullMetadata": true, 179 | "interval": "", 180 | "legendFormat": "", 181 | "range": true, 182 | "refId": "A", 183 | "useBackend": false 184 | } 185 | ], 186 | "title": "Server Active Requests", 187 | "transformations": [ 188 | { 189 | "id": "seriesToRows", 190 | "options": {} 191 | }, 192 | { 193 | "id": "sortBy", 194 | "options": { 195 | "fields": {}, 196 | "sort": [ 197 | { 198 | "field": "Time" 199 | } 200 | ] 201 | } 202 | } 203 | ], 204 | "type": "stat" 205 | }, 206 | { 207 | "datasource": { 208 | "default": false, 209 | "type": "prometheus", 210 | "uid": "PBFA97CFB590B2093" 211 | }, 212 | "fieldConfig": { 213 | "defaults": { 214 | "color": { 215 | "mode": "continuous-GrYlRd" 216 | }, 217 | "mappings": [], 218 | "thresholds": { 219 | "mode": "absolute", 220 | "steps": [ 221 | { 222 | "color": "green", 223 | "value": null 224 | } 225 | ] 226 | }, 227 | "unit": "ms" 228 | }, 229 | "overrides": [] 230 | }, 231 | "gridPos": { 232 | "h": 8, 233 | "w": 12, 234 | "x": 12, 235 | "y": 0 236 | }, 237 | "id": 23, 238 | "options": { 239 | "displayMode": "lcd", 240 | "legend": { 241 | "calcs": [], 242 | "displayMode": "list", 243 | "placement": "bottom", 244 | "showLegend": false 245 | }, 246 | "maxVizHeight": 300, 247 | "minVizHeight": 16, 248 | "minVizWidth": 8, 249 | "namePlacement": "auto", 250 | "orientation": "horizontal", 251 | "reduceOptions": { 252 | "calcs": [ 253 | "lastNotNull" 254 | ], 255 | "fields": "", 256 | "values": false 257 | }, 258 | "showUnfilled": true, 259 | "sizing": "auto", 260 | "valueMode": "color" 261 | }, 262 | "pluginVersion": "11.4.0", 263 | "targets": [ 264 | { 265 | "datasource": { 266 | "type": "prometheus", 267 | "uid": "PBFA97CFB590B2093" 268 | }, 269 | "disableTextWrap": false, 270 | "editorMode": "builder", 271 | "exemplar": true, 272 | "expr": "http_server_duration_milliseconds_sum{service_name=\"$service_name\"} / http_server_duration_milliseconds_count{service_name=\"$service_name\"}", 273 | "fullMetaSearch": false, 274 | "includeNullMetadata": true, 275 | "interval": "", 276 | "legendFormat": "{{method}} {{endpoint}}", 277 | "range": true, 278 | "refId": "A", 279 | "useBackend": false 280 | } 281 | ], 282 | "title": "Requests Average Duration", 283 | "type": "bargauge" 284 | }, 285 | { 286 | "datasource": { 287 | "default": false, 288 | "type": "prometheus", 289 | "uid": "PBFA97CFB590B2093" 290 | }, 291 | "fieldConfig": { 292 | "defaults": { 293 | "color": { 294 | "mode": "palette-classic" 295 | }, 296 | "custom": { 297 | "axisBorderShow": false, 298 | "axisCenteredZero": false, 299 | "axisColorMode": "text", 300 | "axisLabel": "", 301 | "axisPlacement": "auto", 302 | "barAlignment": 0, 303 | "barWidthFactor": 0.6, 304 | "drawStyle": "line", 305 | "fillOpacity": 0, 306 | "gradientMode": "none", 307 | "hideFrom": { 308 | "legend": false, 309 | "tooltip": false, 310 | "viz": false 311 | }, 312 | "insertNulls": false, 313 | "lineInterpolation": "linear", 314 | "lineWidth": 1, 315 | "pointSize": 5, 316 | "scaleDistribution": { 317 | "type": "linear" 318 | }, 319 | "showPoints": "auto", 320 | "spanNulls": false, 321 | "stacking": { 322 | "group": "A", 323 | "mode": "none" 324 | }, 325 | "thresholdsStyle": { 326 | "mode": "off" 327 | } 328 | }, 329 | "mappings": [], 330 | "thresholds": { 331 | "mode": "absolute", 332 | "steps": [ 333 | { 334 | "color": "green", 335 | "value": null 336 | }, 337 | { 338 | "color": "red", 339 | "value": 80 340 | } 341 | ] 342 | } 343 | }, 344 | "overrides": [] 345 | }, 346 | "gridPos": { 347 | "h": 8, 348 | "w": 12, 349 | "x": 0, 350 | "y": 8 351 | }, 352 | "id": 10, 353 | "options": { 354 | "legend": { 355 | "calcs": [ 356 | "last" 357 | ], 358 | "displayMode": "list", 359 | "placement": "bottom", 360 | "showLegend": false 361 | }, 362 | "tooltip": { 363 | "mode": "single", 364 | "sort": "none" 365 | } 366 | }, 367 | "pluginVersion": "11.4.0", 368 | "targets": [ 369 | { 370 | "datasource": { 371 | "type": "prometheus", 372 | "uid": "PBFA97CFB590B2093" 373 | }, 374 | "disableTextWrap": false, 375 | "editorMode": "builder", 376 | "exemplar": true, 377 | "expr": "increase(http_server_duration_milliseconds_count{service_name=\"$service_name\"}[$__rate_interval])", 378 | "fullMetaSearch": false, 379 | "includeNullMetadata": true, 380 | "interval": "", 381 | "legendFormat": "__auto", 382 | "range": true, 383 | "refId": "A", 384 | "useBackend": false 385 | } 386 | ], 387 | "title": "Requests Count", 388 | "type": "timeseries" 389 | }, 390 | { 391 | "datasource": { 392 | "default": false, 393 | "type": "prometheus", 394 | "uid": "PBFA97CFB590B2093" 395 | }, 396 | "fieldConfig": { 397 | "defaults": { 398 | "color": { 399 | "mode": "palette-classic" 400 | }, 401 | "custom": { 402 | "axisBorderShow": false, 403 | "axisCenteredZero": false, 404 | "axisColorMode": "text", 405 | "axisLabel": "", 406 | "axisPlacement": "auto", 407 | "fillOpacity": 80, 408 | "gradientMode": "none", 409 | "hideFrom": { 410 | "legend": false, 411 | "tooltip": false, 412 | "viz": false 413 | }, 414 | "lineWidth": 1, 415 | "scaleDistribution": { 416 | "type": "linear" 417 | }, 418 | "thresholdsStyle": { 419 | "mode": "off" 420 | } 421 | }, 422 | "mappings": [], 423 | "min": 0, 424 | "thresholds": { 425 | "mode": "absolute", 426 | "steps": [ 427 | { 428 | "color": "green", 429 | "value": null 430 | }, 431 | { 432 | "color": "red", 433 | "value": 80 434 | } 435 | ] 436 | }, 437 | "unit": "ms" 438 | }, 439 | "overrides": [] 440 | }, 441 | "gridPos": { 442 | "h": 8, 443 | "w": 12, 444 | "x": 12, 445 | "y": 8 446 | }, 447 | "id": 6, 448 | "options": { 449 | "barRadius": 0, 450 | "barWidth": 0.97, 451 | "fullHighlight": false, 452 | "groupWidth": 0.7, 453 | "legend": { 454 | "calcs": [], 455 | "displayMode": "list", 456 | "placement": "bottom", 457 | "showLegend": true 458 | }, 459 | "orientation": "auto", 460 | "showValue": "auto", 461 | "stacking": "none", 462 | "tooltip": { 463 | "mode": "single", 464 | "sort": "none" 465 | }, 466 | "xTickLabelRotation": 0, 467 | "xTickLabelSpacing": 0 468 | }, 469 | "pluginVersion": "11.4.0", 470 | "targets": [ 471 | { 472 | "datasource": { 473 | "type": "prometheus", 474 | "uid": "PBFA97CFB590B2093" 475 | }, 476 | "disableTextWrap": false, 477 | "editorMode": "builder", 478 | "exemplar": true, 479 | "expr": "histogram_quantile(0.95, sum by(http_method, service_version, le) (rate(http_server_duration_milliseconds_bucket{service_name=\"$service_name\"}[$__rate_interval])))", 480 | "fullMetaSearch": false, 481 | "includeNullMetadata": false, 482 | "interval": "", 483 | "legendFormat": "__auto", 484 | "range": true, 485 | "refId": "A", 486 | "useBackend": false 487 | } 488 | ], 489 | "title": "Requests Per Second", 490 | "type": "barchart" 491 | }, 492 | { 493 | "datasource": { 494 | "default": false, 495 | "type": "prometheus", 496 | "uid": "PBFA97CFB590B2093" 497 | }, 498 | "fieldConfig": { 499 | "defaults": { 500 | "color": { 501 | "mode": "palette-classic" 502 | }, 503 | "custom": { 504 | "axisBorderShow": false, 505 | "axisCenteredZero": false, 506 | "axisColorMode": "text", 507 | "axisLabel": "", 508 | "axisPlacement": "auto", 509 | "axisSoftMax": 1, 510 | "barAlignment": 0, 511 | "barWidthFactor": 0.6, 512 | "drawStyle": "line", 513 | "fillOpacity": 0, 514 | "gradientMode": "none", 515 | "hideFrom": { 516 | "legend": false, 517 | "tooltip": false, 518 | "viz": false 519 | }, 520 | "insertNulls": false, 521 | "lineInterpolation": "linear", 522 | "lineWidth": 1, 523 | "pointSize": 5, 524 | "scaleDistribution": { 525 | "type": "linear" 526 | }, 527 | "showPoints": "auto", 528 | "spanNulls": false, 529 | "stacking": { 530 | "group": "A", 531 | "mode": "none" 532 | }, 533 | "thresholdsStyle": { 534 | "mode": "area" 535 | } 536 | }, 537 | "mappings": [], 538 | "thresholds": { 539 | "mode": "percentage", 540 | "steps": [ 541 | { 542 | "color": "dark-red", 543 | "value": null 544 | }, 545 | { 546 | "color": "green", 547 | "value": 80 548 | } 549 | ] 550 | }, 551 | "unit": "percentunit" 552 | }, 553 | "overrides": [] 554 | }, 555 | "gridPos": { 556 | "h": 8, 557 | "w": 12, 558 | "x": 0, 559 | "y": 16 560 | }, 561 | "id": 2, 562 | "options": { 563 | "legend": { 564 | "calcs": [ 565 | "last" 566 | ], 567 | "displayMode": "list", 568 | "placement": "bottom", 569 | "showLegend": true 570 | }, 571 | "tooltip": { 572 | "mode": "single", 573 | "sort": "none" 574 | } 575 | }, 576 | "pluginVersion": "11.4.0", 577 | "targets": [ 578 | { 579 | "datasource": { 580 | "type": "prometheus", 581 | "uid": "PBFA97CFB590B2093" 582 | }, 583 | "disableTextWrap": false, 584 | "editorMode": "builder", 585 | "exemplar": true, 586 | "expr": "sum by(http_status_code, http_method) (http_server_duration_milliseconds_sum{service_name=\"$service_name\", http_status_code=\"200\"}) / sum by(http_status_code, http_method) (http_server_duration_milliseconds_sum{service_name=\"$service_name\"})", 587 | "fullMetaSearch": false, 588 | "hide": false, 589 | "includeNullMetadata": true, 590 | "interval": "", 591 | "legendFormat": "__auto", 592 | "range": true, 593 | "refId": "A", 594 | "useBackend": false 595 | } 596 | ], 597 | "title": "Percent of 2xx Requests", 598 | "type": "timeseries" 599 | }, 600 | { 601 | "datasource": { 602 | "type": "tempo", 603 | "uid": "tempo" 604 | }, 605 | "fieldConfig": { 606 | "defaults": { 607 | "color": { 608 | "mode": "thresholds" 609 | }, 610 | "custom": { 611 | "align": "center", 612 | "cellOptions": { 613 | "type": "auto" 614 | }, 615 | "filterable": true, 616 | "inspect": false 617 | }, 618 | "mappings": [], 619 | "thresholds": { 620 | "mode": "absolute", 621 | "steps": [ 622 | { 623 | "color": "green", 624 | "value": null 625 | }, 626 | { 627 | "color": "red", 628 | "value": 80 629 | } 630 | ] 631 | } 632 | }, 633 | "overrides": [] 634 | }, 635 | "gridPos": { 636 | "h": 8, 637 | "w": 12, 638 | "x": 12, 639 | "y": 16 640 | }, 641 | "id": 25, 642 | "options": { 643 | "cellHeight": "sm", 644 | "footer": { 645 | "countRows": false, 646 | "enablePagination": false, 647 | "fields": "", 648 | "reducer": [ 649 | "sum" 650 | ], 651 | "show": false 652 | }, 653 | "showHeader": true 654 | }, 655 | "pluginVersion": "11.4.0", 656 | "targets": [ 657 | { 658 | "datasource": { 659 | "type": "tempo", 660 | "uid": "tempo" 661 | }, 662 | "filters": [ 663 | { 664 | "id": "616e1d16", 665 | "operator": "=", 666 | "scope": "span" 667 | }, 668 | { 669 | "id": "service-name", 670 | "operator": "=", 671 | "scope": "resource", 672 | "tag": "service.name", 673 | "value": [ 674 | "$service_name" 675 | ], 676 | "valueType": "string" 677 | }, 678 | { 679 | "id": "status", 680 | "operator": "=", 681 | "scope": "intrinsic", 682 | "tag": "status", 683 | "valueType": "keyword" 684 | } 685 | ], 686 | "limit": 20, 687 | "queryType": "traceqlSearch", 688 | "refId": "A", 689 | "tableType": "traces" 690 | } 691 | ], 692 | "title": "Traces", 693 | "type": "table" 694 | }, 695 | { 696 | "datasource": { 697 | "type": "tempo", 698 | "uid": "tempo" 699 | }, 700 | "fieldConfig": { 701 | "defaults": {}, 702 | "overrides": [] 703 | }, 704 | "gridPos": { 705 | "h": 12, 706 | "w": 24, 707 | "x": 0, 708 | "y": 24 709 | }, 710 | "id": 26, 711 | "options": { 712 | "edges": {}, 713 | "nodes": { 714 | "mainStatUnit": "" 715 | } 716 | }, 717 | "pluginVersion": "11.4.0", 718 | "targets": [ 719 | { 720 | "datasource": { 721 | "type": "tempo", 722 | "uid": "tempo" 723 | }, 724 | "filters": [ 725 | { 726 | "id": "8344b05e", 727 | "operator": "=", 728 | "scope": "span" 729 | }, 730 | { 731 | "id": "service-name", 732 | "operator": "=", 733 | "scope": "resource", 734 | "tag": "service.name", 735 | "value": [ 736 | "$service_name" 737 | ], 738 | "valueType": "string" 739 | } 740 | ], 741 | "limit": 20, 742 | "queryType": "serviceMap", 743 | "refId": "A", 744 | "tableType": "traces" 745 | } 746 | ], 747 | "title": "Panel Title", 748 | "type": "nodeGraph" 749 | } 750 | ], 751 | "preload": false, 752 | "refresh": "5s", 753 | "schemaVersion": 40, 754 | "tags": [], 755 | "templating": { 756 | "list": [ 757 | { 758 | "current": { 759 | "text": "cadastro_de_livros", 760 | "value": "cadastro_de_livros" 761 | }, 762 | "datasource": { 763 | "type": "prometheus", 764 | "uid": "PBFA97CFB590B2093" 765 | }, 766 | "definition": "label_values(http_server_duration_milliseconds_sum,service_name)", 767 | "label": "service_name", 768 | "name": "service_name", 769 | "options": [], 770 | "query": { 771 | "qryType": 1, 772 | "query": "label_values(http_server_duration_milliseconds_sum,service_name)", 773 | "refId": "PrometheusVariableQueryEditor-VariableQuery" 774 | }, 775 | "refresh": 1, 776 | "regex": "", 777 | "type": "query" 778 | } 779 | ] 780 | }, 781 | "time": { 782 | "from": "now-15m", 783 | "to": "now" 784 | }, 785 | "timepicker": {}, 786 | "timezone": "", 787 | "title": "App Python", 788 | "uid": "flask-monitoring", 789 | "version": 7, 790 | "weekStart": "" 791 | } 792 | -------------------------------------------------------------------------------- /config/grafana/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | 4 | - name: Tempo 5 | uid: tempo 6 | type: tempo 7 | access: proxy 8 | url: http://tempo:3200 9 | editable: true 10 | basicAuth: false 11 | apiVersion: 1 12 | jsonData: 13 | httpMethod: GET 14 | serviceMap: 15 | datasourceUid: Prometheus 16 | nodeGraph: 17 | enabled: true 18 | tracesToLogsV2: 19 | customQuery: true 20 | query: '{appname="grafana"} |~ "\\\\\"traceID\\\\\":\\\\\"$${__trace.traceId}\\\\"' 21 | spanEndTimeShift: 1m 22 | spanStartTimeShift: -1m 23 | datasourceUid: Loki 24 | filterByTraceID: false 25 | lokiSearch: 26 | datasourceUid: Loki 27 | 28 | - name: Prometheus 29 | type: prometheus 30 | editable: true 31 | access: proxy 32 | orgId: 1 33 | url: http://mimir:9009/prometheus 34 | basicAuth: false 35 | version: 1 36 | jsonData: 37 | timeInterval: 60s 38 | httpMethod: POST 39 | 40 | - name: Loki 41 | type: loki 42 | access: proxy 43 | editable: true 44 | orgId: 1 45 | url: http://loki:3100 46 | jsonData: 47 | derivedFields: 48 | - datasourceUid: Tempo 49 | matcherRegex: 'traceID\\":\\"([a-z0-9]+)\\"' 50 | name: TraceID 51 | url: $${__value.raw} 52 | urlDisplayLabel: Find this trace in Tempo 53 | secureJsonData: 54 | httpHeaderValue1: "tenant1" 55 | -------------------------------------------------------------------------------- /config/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [auth] 2 | disable_login_form = true 3 | 4 | [auth.anonymous] 5 | enabled = true 6 | org_name = Main Org. 7 | org_role = Admin 8 | -------------------------------------------------------------------------------- /config/grafana/k6_generate_traffic.js: -------------------------------------------------------------------------------- 1 | import http from 'k6/http'; 2 | import { sleep } from 'k6'; 3 | 4 | // Configurações do teste 5 | export const options = { 6 | stages: [ 7 | { duration: '0s', target: 5 }, // Começa com 5 usuários 8 | { duration: '5m', target: 20 }, // Aumenta para 20 usuários ao longo de 5 minutos 9 | { duration: '5m', target: 30 }, // Aumenta para 30 usuários ao longo de 5 minutos 10 | ], 11 | }; 12 | 13 | // Endpoint base 14 | const BASE_URL = 'http://app:8080/pokemon'; 15 | 16 | // Lista de nomes de Pokémon 17 | const POKEMONS = ["Bulbasaur", "Ivysaur", "Venusaur", "Charmander", "Charmeleon", "Charizard", "Squirtle", "Wartortle", "Blastoise", "Caterpie", "Metapod", "Butterfree", "Weedle", "Kakuna", "Beedrill", "Pidgey", "Pidgeotto", "Pidgeot", "Rattata", "Raticate", "Spearow", "Fearow", "Ekans", "Arbok", "Pikachu", "Raichu", "Sandshrew", "Sandslash", "Nidoran♀", "Nidorina", "Nidoqueen", "Nidoran♂", "Nidorino", "Nidoking", "Clefairy", "Clefable", "Vulpix", "Ninetales", "Jigglypuff", "Wigglytuff", "Zubat", "Golbat", "Oddish", "Gloom", "Vileplume", "Paras", "Parasect", "Venonat", "Venomoth", "Diglett", "Dugtrio", "Meowth", "Persian", "Psyduck", "Golduck", "Machop", "Machoke", "Machamp", "Bellsprout", "Weepinbell", "Victreebel", "Tentacool", "Tentacruel", "Geodude", "Graveler", "Golem", "Ponyta", "Rapidash", "Slowpoke", "Slowbro", "Magnemite", "Magneton", "Farfetch’d", "Doduo", "Dodrio", "Seel", "Dewgong", "Grimer", "Muk", "Shellder", "Cloyster", "Gastly", "Haunter", "Gengar", "Onix", "Drowzee", "Hypno", "Krabby", "Kingler", "Exeggcute", "Exeggutor", "Cubone", "Marowak", "Hitmonlee", "Hitmonchan", "Lickitung", "Lickilicky", "Koffing", "Weezing", "Rhyhorn"]; 18 | 19 | // Função para obter um nome aleatório da lista 20 | function getRandomName(list) { 21 | return list[Math.floor(Math.random() * list.length)]; 22 | } 23 | 24 | // Função para buscar Pokémon na API externa 25 | function fetchPokemon() { 26 | const pokemonName = getRandomName(POKEMONS); 27 | const response = http.get(`${BASE_URL}/fetch/${pokemonName}`); 28 | console.log(`Fetched Pokemon: ${pokemonName}, Status: ${response.status}`); 29 | } 30 | 31 | // Função para listar um Pokémon salvo 32 | function getPokemonByName() { 33 | const pokemonName = getRandomName(POKEMONS); 34 | const response = http.get(`${BASE_URL}/${pokemonName}`); 35 | console.log(`Get Pokemon by Name: ${pokemonName}, Status: ${response.status}`); 36 | } 37 | 38 | // Função para listar todos os Pokémon salvos 39 | function listAllPokemons() { 40 | const response = http.get(BASE_URL); 41 | console.log(`List all Pokemons, Status: ${response.status}`); 42 | } 43 | 44 | // Função para adicionar um Pokémon com nome aleatório 45 | function addPokemonRandom() { 46 | const randomName = getRandomName(POKEMONS); 47 | const payload = JSON.stringify({ 48 | name: randomName, 49 | height: (Math.random() * 2).toFixed(2), // Altura aleatória 50 | weight: (Math.random() * 100).toFixed(2), // Peso aleatório 51 | abilities: ['Run', 'Jump'], 52 | types: ['Normal'], 53 | }); 54 | 55 | const headers = { 'Content-Type': 'application/json' }; 56 | const response = http.post(BASE_URL, payload, { headers }); 57 | console.log(`Added Pokemon: ${randomName}, Status: ${response.status}`); 58 | } 59 | 60 | // Função para deletar um Pokémon salvo 61 | function deletePokemonByName() { 62 | const pokemonName = getRandomName(POKEMONS); 63 | const response = http.del(`${BASE_URL}/${pokemonName}`); 64 | console.log(`Deleted Pokemon: ${pokemonName}, Status: ${response.status}`); 65 | } 66 | 67 | // Função principal 68 | export default function () { 69 | // Define os métodos disponíveis para o teste 70 | const actions = [ 71 | () => fetchPokemon(), // GET /fetch/{name} 72 | () => getPokemonByName(), // GET /{name} 73 | () => listAllPokemons(), // GET / 74 | () => addPokemonRandom(), // POST / 75 | // () => deletePokemonByName(), // DELETE /{name} 76 | ]; 77 | 78 | // Escolhe aleatoriamente uma ação 79 | const action = actions[Math.floor(Math.random() * actions.length)]; 80 | action(); 81 | 82 | // Dorme antes da próxima iteração 83 | sleep(0.5); 84 | } 85 | -------------------------------------------------------------------------------- /config/grafana/loki.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | auth_enabled: false 3 | 4 | limits_config: 5 | allow_structured_metadata: true 6 | 7 | server: 8 | http_listen_address: 0.0.0.0 9 | http_listen_port: 3100 10 | grpc_listen_port: 9095 11 | 12 | common: 13 | compactor_address: 0.0.0.0 14 | instance_addr: localhost 15 | path_prefix: /loki 16 | storage: 17 | filesystem: 18 | chunks_directory: /loki/chunks 19 | rules_directory: /loki/rules 20 | replication_factor: 1 21 | ring: 22 | kvstore: 23 | store: inmemory 24 | 25 | query_range: 26 | results_cache: 27 | cache: 28 | embedded_cache: 29 | enabled: true 30 | max_size_mb: 100 31 | 32 | schema_config: 33 | configs: 34 | - from: 2023-01-01 35 | store: tsdb 36 | object_store: filesystem 37 | schema: v13 38 | index: 39 | prefix: index_ 40 | period: 24h 41 | 42 | ruler: 43 | storage: 44 | type: local 45 | local: 46 | directory: /loki/rules 47 | 48 | compactor: 49 | working_directory: /tmp/compactor 50 | 51 | distributor: 52 | otlp_config: 53 | default_resource_attributes_as_index_labels: 54 | - "cloud.availability_zone" 55 | - "cloud.region" 56 | - "container.name" 57 | - "deployment.environment" 58 | - "k8s.cluster.name" 59 | - "k8s.container.name" 60 | - "k8s.cronjob.name" 61 | - "k8s.daemonset.name" 62 | - "k8s.deployment.name" 63 | - "k8s.job.name" 64 | - "k8s.namespace.name" 65 | - "k8s.node.name" 66 | - "k8s.pod.name" 67 | - "k8s.replicaset.name" 68 | - "k8s.statefulset.name" 69 | - "service.instance.id" 70 | - "service.name" 71 | - "service.namespace" -------------------------------------------------------------------------------- /config/grafana/mimir.yaml: -------------------------------------------------------------------------------- 1 | # Do not use this configuration in production. 2 | # It is for demonstration purposes only. 3 | multitenancy_enabled: false 4 | 5 | blocks_storage: 6 | backend: filesystem 7 | bucket_store: 8 | sync_dir: /tmp/mimir/tsdb-sync 9 | filesystem: 10 | dir: /tmp/mimir/data/tsdb 11 | tsdb: 12 | dir: /tmp/mimir/tsdb 13 | 14 | compactor: 15 | data_dir: /tmp/mimir/compactor 16 | sharding_ring: 17 | kvstore: 18 | store: memberlist 19 | 20 | distributor: 21 | ring: 22 | instance_addr: 127.0.0.1 23 | kvstore: 24 | store: memberlist 25 | 26 | ingester: 27 | ring: 28 | instance_addr: 127.0.0.1 29 | kvstore: 30 | store: memberlist 31 | replication_factor: 1 32 | 33 | ruler_storage: 34 | backend: filesystem 35 | filesystem: 36 | dir: /tmp/mimir/rules 37 | 38 | server: 39 | http_listen_port: 9009 40 | log_level: error 41 | 42 | store_gateway: 43 | sharding_ring: 44 | replication_factor: 1 45 | -------------------------------------------------------------------------------- /config/grafana/tempo.yaml: -------------------------------------------------------------------------------- 1 | stream_over_http_enabled: true 2 | server: 3 | http_listen_port: 3200 4 | log_level: info 5 | 6 | query_frontend: 7 | search: 8 | duration_slo: 5s 9 | throughput_bytes_slo: 1.073741824e+09 10 | metadata_slo: 11 | duration_slo: 5s 12 | throughput_bytes_slo: 1.073741824e+09 13 | trace_by_id: 14 | duration_slo: 5s 15 | 16 | distributor: 17 | receivers: 18 | otlp: 19 | protocols: 20 | grpc: 21 | endpoint: "tempo:4317" 22 | 23 | ingester: 24 | max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally 25 | 26 | compactor: 27 | compaction: 28 | block_retention: 1h # overall Tempo trace retention. set for demo purposes 29 | 30 | metrics_generator: 31 | registry: 32 | external_labels: 33 | source: tempo 34 | cluster: docker-compose 35 | storage: 36 | path: /var/tempo/generator/wal 37 | remote_write: 38 | - url: http://mimir:9009/api/v1/push 39 | send_exemplars: true 40 | traces_storage: 41 | path: /var/tempo/generator/traces 42 | 43 | storage: 44 | trace: 45 | backend: local # backend configuration to use 46 | wal: 47 | path: /var/tempo/wal # where to store the wal locally 48 | local: 49 | path: /var/tempo/blocks 50 | 51 | overrides: 52 | defaults: 53 | metrics_generator: 54 | processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator 55 | generate_native_histograms: both -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | x-logging: &default-logging 2 | driver: "json-file" 3 | options: 4 | max-size: "5m" 5 | max-file: "2" 6 | tag: "{{.Name}}" 7 | 8 | services: 9 | # Microserviço de cadastro de livros 10 | cadastro_de_livros: 11 | build: 12 | context: ./book_store/cadastro_de_livros 13 | dockerfile: Dockerfile 14 | ports: 15 | - "8080:8080" 16 | depends_on: 17 | db: 18 | condition: service_healthy 19 | ###################################################################################################################### 20 | #### Adicione as variáveis de ambiente do OpenTelemetry abaixo para configurar o envio de traces para o collector #### 21 | ###################################################################################################################### 22 | environment: 23 | - POSTGRES_USER=$POSTGRES_USER 24 | - POSTGRES_PASSWORD=$POSTGRES_PASSWORD 25 | - POSTGRES_DB=cadastro-livros 26 | - POSTGRES_HOST=$POSTGRES_HOST 27 | 28 | networks: 29 | - otel 30 | logging: *default-logging 31 | 32 | # Microserviço Ordem de Compra 33 | ordem_de_compra: 34 | build: 35 | context: ./book_store/ordem_de_compra 36 | dockerfile: Dockerfile 37 | ports: 38 | - "8081:8081" 39 | depends_on: 40 | db: 41 | condition: service_healthy 42 | cadastro_de_livros: 43 | condition: service_started 44 | ###################################################################################################################### 45 | #### Adicione as variáveis de ambiente do OpenTelemetry abaixo para configurar o envio de traces para o collector #### 46 | ###################################################################################################################### 47 | environment: 48 | - POSTGRES_USER=$POSTGRES_USER 49 | - POSTGRES_PASSWORD=$POSTGRES_PASSWORD 50 | - POSTGRES_DB=ordem-compra 51 | - POSTGRES_HOST=$POSTGRES_HOST 52 | - BOOK_URL=$BOOK_URL 53 | - PAYMENT_URL=$PAYMENT_URL 54 | networks: 55 | - otel 56 | logging: *default-logging 57 | 58 | # Microserviço de Pagamento 59 | pagamento: 60 | build: 61 | context: ./book_store/pagamento 62 | dockerfile: Dockerfile 63 | ports: 64 | - "8082:8082" 65 | depends_on: 66 | db: 67 | condition: service_healthy 68 | ordem_de_compra: 69 | condition: service_started 70 | ###################################################################################################################### 71 | #### Adicione as variáveis de ambiente do OpenTelemetry abaixo para configurar o envio de traces para o collector #### 72 | ###################################################################################################################### 73 | environment: 74 | - POSTGRES_USER=$POSTGRES_USER 75 | - POSTGRES_PASSWORD=$POSTGRES_PASSWORD 76 | - POSTGRES_DB=pagamento 77 | - POSTGRES_HOST=$POSTGRES_HOST 78 | - ORDER_URL=$ORDER_URL 79 | networks: 80 | - otel 81 | logging: *default-logging 82 | 83 | # Banco de dados Postgres 84 | db: 85 | image: postgres:17.2 86 | environment: 87 | - POSTGRES_USER=$POSTGRES_USER 88 | - POSTGRES_PASSWORD=$POSTGRES_PASSWORD 89 | - POSTGRES_DB=postgres 90 | healthcheck: 91 | test: ["CMD", "pg_isready", "-U", "$POSTGRES_USER", "-d", "postgres"] 92 | interval: 10s 93 | timeout: 5s 94 | retries: 5 95 | ports: 96 | - "5432:5432" 97 | volumes: 98 | - ./postgres_data:/var/lib/postgresql/data 99 | networks: 100 | - otel 101 | logging: *default-logging 102 | 103 | # OpenTelemetry Collector 104 | otelcollector: 105 | privileged: true 106 | user: 0:0 107 | image: otel/opentelemetry-collector-contrib:0.118.0 108 | volumes: 109 | - ./config/collector/otelcol-config.yml:/etc/otel-collector-config.yml 110 | - /var/run/docker.sock:/var/run/docker.sock:ro 111 | - /var/lib/docker/containers:/etc/log/ 112 | command: 113 | - "--config=/etc/otel-collector-config.yml" 114 | - "--set=service.telemetry.logs.level=INFO" 115 | ports: 116 | - "4318:4318" # Porta OTLP HTTP 117 | - "4317:4317" # Porta OTLP gRPC 118 | networks: 119 | - otel 120 | logging: *default-logging 121 | 122 | # Grafana para visualização de métricas, logs e traces 123 | grafana: 124 | image: grafana/grafana:11.4.0 125 | ports: 126 | - 3000:3000 127 | volumes: 128 | - ./config/grafana/grafana.ini:/etc/grafana/grafana.ini 129 | - ./config/grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml 130 | - ./config/grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml 131 | - ./config/grafana/dashboards:/var/lib/grafana/dashboards 132 | networks: 133 | - otel 134 | logging: *default-logging 135 | 136 | # Grafana Tempo para armaazenamento de traces 137 | tempo: 138 | image: grafana/tempo:2.7.0 139 | command: [ "-config.file=/etc/tempo.yaml" ] 140 | volumes: 141 | - ./config/grafana/tempo.yaml:/etc/tempo.yaml 142 | depends_on: 143 | - grafana 144 | networks: 145 | - otel 146 | logging: *default-logging 147 | 148 | # Grafana Mimir para armazenamento de métricas 149 | mimir: 150 | image: grafana/mimir:2.15.0 151 | command: [ "-config.file=/etc/mimir.yaml" ] 152 | volumes: 153 | - ./config/grafana/mimir.yaml:/etc/mimir.yaml 154 | depends_on: 155 | - grafana 156 | networks: 157 | - otel 158 | logging: *default-logging 159 | 160 | # Grafana Loki para armazenamento de logs 161 | loki: 162 | image: grafana/loki:3.3.2 163 | command: [ "-config.file=/etc/loki/loki.yaml" ] 164 | volumes: 165 | - ./config/grafana/loki.yaml:/etc/loki/loki.yaml 166 | depends_on: 167 | - grafana 168 | networks: 169 | - otel 170 | logging: *default-logging 171 | 172 | networks: 173 | otel: 174 | name: otel 175 | driver: bridge 176 | --------------------------------------------------------------------------------