├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── deploy.yml │ └── test-deploy.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── blog ├── authors.yml ├── index.md ├── kratos │ ├── architecture.png │ └── kratos.md ├── oauth │ ├── codeflow.png │ ├── genericflow.png │ └── oauth.md ├── tags.yml ├── user-guide.md └── v0-14-0-release │ ├── grafana.png │ ├── tracing1.png │ ├── tracing2.png │ └── v0-14-0-release.md ├── docs ├── api.md ├── architecture.md ├── authentication.md ├── authorization.md ├── authz-spec.md ├── benchmark.md ├── bootstrap.md ├── certs.md ├── cli.md ├── dev-guide.md ├── diagrams │ ├── architecture.drawio.svg │ ├── channels_roles.svg │ ├── clients_roles.svg │ ├── domain_users.svg │ ├── domain_users_administrator.svg │ ├── domain_users_editor.svg │ ├── domain_users_member.svg │ ├── domain_users_viewer.svg │ ├── group_users_administrator_1.svg │ ├── group_users_administrator_2.svg │ ├── group_users_administrator_3.svg │ ├── group_users_editor.svg │ ├── group_users_member_11.svg │ ├── group_users_member_12.svg │ ├── group_users_member_13.svg │ ├── group_users_member_14.svg │ ├── group_users_member_2.svg │ ├── group_users_member_3.svg │ ├── group_users_viewer.svg │ ├── group_users_viewer_1.svg │ ├── group_users_viewer_2.svg │ ├── group_users_viewer_3.svg │ └── groups_roles.svg ├── edge.md ├── entities.md ├── events.md ├── getting-started.md ├── img │ ├── architecture.svg │ ├── architecture.xml │ ├── bootstrap │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ ├── 4.png │ │ ├── 5.png │ │ ├── 6.png │ │ └── addconfig.png │ ├── edge │ │ └── edge.png │ ├── gopher-reading.png │ ├── gopherBanner.jpg │ ├── opcua │ │ └── opcua.png │ ├── tracing │ │ ├── messagePub.png │ │ ├── search.png │ │ └── trace.png │ ├── twins │ │ └── architecture.png │ └── ui │ │ ├── dashboard.png │ │ ├── details.png │ │ ├── gateways.png │ │ ├── loraserver.png │ │ ├── things.png │ │ └── tracing.png ├── index.md ├── kubernetes.md ├── lora.md ├── messaging.md ├── opcua.md ├── provision.md ├── roles.md ├── security.md ├── smq-contrib.md ├── storage.md ├── tracing.md └── twins.md ├── docusaurus.config.ts ├── package-lock.json ├── package.json ├── requirements.txt ├── sidebars.ts ├── src └── css │ └── custom.css ├── static ├── .nojekyll ├── CNAME └── img │ ├── favicon.png │ ├── logo-dark.png │ └── logo-light1.png └── tsconfig.json /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @absmach/maintainers 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | **FEATURE REQUEST** 10 | 11 | 1. Is there an open issue addressing this request? If it does, please add a "+1" reaction to the 12 | existing issue, otherwise proceed to step 2. 13 | 14 | 2. Describe the feature you are requesting, as well as the possible use case(s) for it. 15 | 16 | 3. Indicate the importance of this feature to you (must-have, should-have, nice-to-have). 17 | 18 | **BUG REPORT** 19 | 20 | 1. What were you trying to achieve? 21 | 22 | 2. What are the expected results? 23 | 24 | 3. What are the received results? 25 | 26 | 4. What are the steps to reproduce the issue? 27 | 28 | 5. In what environment did you encounter the issue? 29 | 30 | 6. Additional information you deem important: 31 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Pull request title should be `SMQ-XXX - description` or `NOISSUE - description` where XXX is ID of issue that this PR relate to. 2 | Please review the [CONTRIBUTING.md](./CONTRIBUTING.md) file for detailed contributing guidelines. 3 | 4 | ### What does this do? 5 | 6 | ### Which issue(s) does this PR fix/relate to? 7 | 8 | Put here `Resolves #XXX` to auto-close the issue that your PR fixes (if such) 9 | 10 | ### List any changes that modify/break current functionality 11 | 12 | ### Have you included tests for your changes? 13 | 14 | ### Did you document any new/modified functionality? 15 | 16 | ### Notes 17 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Docusaurus to GitHub Pages 2 | 3 | # Controls when the action will run 4 | on: 5 | push: 6 | branches: 7 | - main 8 | 9 | workflow_dispatch: 10 | permissions: 11 | contents: write 12 | 13 | jobs: 14 | build: 15 | name: Build Docusaurus 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - name: Checkout main 20 | uses: actions/checkout@v4 21 | with: 22 | fetch-depth: 0 23 | 24 | - name: Set up Node.js 25 | uses: actions/setup-node@v3 26 | with: 27 | node-version: 20 28 | cache: "npm" 29 | 30 | - name: Install dependencies 31 | run: npm install 32 | 33 | - name: Lint Markdown Files 34 | run: | 35 | npx markdownlint '../../docs/**/*.md' 36 | 37 | - name: Build the website 38 | run: npm run build 39 | 40 | - name: Upload Build Artifact 41 | uses: actions/upload-pages-artifact@v3 42 | with: 43 | path: build 44 | 45 | 46 | deploy: 47 | name: Deploy to GitHub Pages 48 | needs: build 49 | 50 | permissions: 51 | pages: write 52 | id-token: write 53 | 54 | environment: 55 | name: github-pages 56 | 57 | runs-on: ubuntu-latest 58 | 59 | steps: 60 | - name: Deploy to GitHub Pages 61 | id: deployment 62 | uses: actions/deploy-pages@v4 63 | env: 64 | github_token: ${{ secrets.GITHUB_TOKEN }} 65 | -------------------------------------------------------------------------------- /.github/workflows/test-deploy.yml: -------------------------------------------------------------------------------- 1 | name: Test deployment 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | test-deploy: 10 | name: Test deployment 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout main 14 | uses: actions/checkout@v4 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Set up Node.js 19 | uses: actions/setup-node@v4 20 | with: 21 | node-version: 20 22 | cache: "npm" 23 | 24 | - name: Install dependencies 25 | run: npm install 26 | 27 | - name: Lint Markdown Files 28 | run: | 29 | npx markdownlint '../../docs/**/*.md' 30 | 31 | - name: Test Build the website 32 | run: npm run build 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /site 2 | 3 | # Dependencies 4 | node_modules 5 | 6 | # Production 7 | /build 8 | 9 | # Generated files 10 | .docusaurus 11 | .cache-loader 12 | 13 | # Misc 14 | .DS_Store 15 | .env.local 16 | .env.development.local 17 | .env.test.local 18 | .env.production.local 19 | 20 | npm-debug.log* 21 | yarn-debug.log* 22 | yarn-error.log* 23 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to SuperMQ 2 | 3 | The following is a set of guidelines to contribute to SuperMQ and its libraries, which are 4 | hosted on the [SuperMQ Organization](https://github.com/absmach/supermq) on GitHub. 5 | 6 | This project adheres to the [Contributor Covenant 1.2](http://contributor-covenant.org/version/1/2/0). 7 | By participating, you are expected to uphold this code. Please report unacceptable behavior to 8 | [abuse@mainflux.com](mailto:abuse@mainflux.com). 9 | 10 | ## Reporting issues 11 | 12 | Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, 13 | thorough bug report. 14 | 15 | Before raising a new issue, check [our issue 16 | list](https://github.com/absmach/magistrala-docs/issues) to determine if it already contains the 17 | problem that you are facing. 18 | 19 | A good bug report shouldn't leave others needing to chase you for more information. Please be as detailed as possible. The following questions might serve as a template for writing a detailed 20 | report: 21 | 22 | - What were you trying to achieve? 23 | - What are the expected results? 24 | - What are the received results? 25 | - What are the steps to reproduce the issue? 26 | - In what environment did you encounter the issue? 27 | 28 | ## Pull requests 29 | 30 | Good pull requests (e.g. patches, improvements, new features) are a fantastic help. They should 31 | remain focused in scope and avoid unrelated commits. 32 | 33 | **Please ask first** before embarking on any significant pull request (e.g. implementing new features, 34 | refactoring code etc.), otherwise you risk spending a lot of time working on something that the 35 | maintainers might not want to merge into the project. 36 | 37 | Please adhere to the coding conventions used throughout the project. If in doubt, consult the 38 | [Effective Go](https://golang.org/doc/effective_go.html) style guide. 39 | 40 | To contribute to the project, [fork](https://help.github.com/articles/fork-a-repo/) it, 41 | clone your fork repository and configure the remotes: 42 | 43 | ```bash 44 | git clone https://github.com//docs.git 45 | cd docs 46 | git remote add upstream https://github.com/absmach/magistrala-docs.git 47 | ``` 48 | 49 | If your cloned repository is behind the upstream commits, then get the latest changes from upstream: 50 | 51 | ```bash 52 | git checkout main 53 | git pull --rebase upstream main 54 | ``` 55 | 56 | Create a new topic branch from `main` using the naming convention `SMQ-[issue-number]` 57 | to help us keep track of your contribution scope: 58 | 59 | ```bash 60 | git checkout -b SMQ-[issue-number] 61 | ``` 62 | 63 | Commit your changes in logical chunks. When you are ready to commit, make sure 64 | to write a Good Commit Message™. Consult the [Erlang's contributing guide](https://github.com/erlang/otp/wiki/Writing-good-commit-messages) 65 | if you're unsure of what constitutes a Good Commit Message™. Use [interactive rebase](https://help.github.com/articles/about-git-rebase) 66 | to group your commits into logical units of work before making it public. 67 | 68 | Note that every commit you make must be signed. By signing off your work you indicate that you 69 | are accepting the [Developer Certificate of Origin](https://developercertificate.org/). 70 | 71 | Use your real name (sorry, no pseudonyms or anonymous contributions). If you set your `user.name` 72 | and `user.email` git configs, you can sign your commit automatically with `git commit -s`. 73 | 74 | Locally merge (or rebase) the upstream development branch into your topic branch: 75 | 76 | ```bash 77 | git pull --rebase upstream main 78 | ``` 79 | 80 | Push your topic branch up to your fork: 81 | 82 | ```bash 83 | git push origin SMQ-[issue-number] 84 | ``` 85 | 86 | [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) with a clear title 87 | and detailed description. 88 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | https://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2015-2013 Abstract Machines 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | https://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SuperMQ 2 | 3 | [![license][license]](LICENSE) 4 | [![Build](https://github.com/absmach/supermq-docs/actions/workflows/pages.yaml/badge.svg?branch=main)](https://github.com/absmach/supermq-docs/actions/workflows/pages.yaml) 5 | 6 | This repo collects the collaborative work on SuperMQ documentation. 7 | The official documentation is hosted at [SuperMQ Docs page][docs]. 8 | Documentation is auto-generated from Markdown files in this repo. 9 | 10 | This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. 11 | 12 | > Additional practical information about SuperMQ system, news and tutorials can be found on the [SuperMQ blog][blog]. 13 | 14 | ## Prerequisites 15 | 16 | Ensure that you have [Docusaurus](https://docusaurus.io/docs/installation) installed on your system. 17 | 18 | ## Installation 19 | 20 | Doc repo can be fetched from GitHub: 21 | 22 | ```bash 23 | git clone git@github.com:/absmach/supermq-docs.git 24 | ``` 25 | 26 | Install the required dependencies using: 27 | 28 | ```bash 29 | npm install 30 | ``` 31 | 32 | ## Build 33 | 34 | Build the documentation site using the following command: 35 | 36 | ```bash 37 | npm run build 38 | ``` 39 | 40 | ## Local Development 41 | 42 | Use Docusaurus to serve documentation. 43 | 44 | ```bash 45 | npm run serve 46 | ``` 47 | 48 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. 49 | 50 | ## Contributing 51 | 52 | Thank you for your interest in SuperMQ and the desire to contribute! 53 | 54 | 1. Take a look at our [open issues](https://github.com/absmach/supermq-docs/issues). The [good-first-issue](https://github.com/absmach/supermq-docs/labels/good-first-issue) label is specifically for issues that are great for getting started. 55 | 2. Check out the [contribution guide](CONTRIBUTING.md) to learn more about our style and conventions. 56 | 3. Make your changes compatible with our workflow. 57 | 58 | ## Community 59 | 60 | - [Matrix][matrix] 61 | - [Twitter][twitter] 62 | 63 | ## License 64 | 65 | [Apache-2.0](LICENSE) 66 | 67 | [matrix]: https://matrix.to/#/#Mainflux_mainflux:gitter.im 68 | [license]: https://img.shields.io/badge/license-Apache%20v2.0-blue.svg 69 | [blog]: https://medium.com/abstract-machines-blog 70 | [twitter]: https://twitter.com/absmach 71 | [docs]: https://docs.magistrala.abstractmachines.fr 72 | -------------------------------------------------------------------------------- /blog/authors.yml: -------------------------------------------------------------------------------- 1 | borovcanin: 2 | name: 'Dusan borovcanin' 3 | title: 'Software Engineer' 4 | url: 'https://github.com/dborovcanin' 5 | 6 | draskovic: 7 | name: 'Drasko Draskovic' 8 | title: 'Software Engineer' 9 | url: 'https://github.com/drasko' 10 | email: 'drasko.draskovic@gmail.com' 11 | 12 | osodo: 13 | name: 'Rodney Osodo' 14 | title: 'Software Engineer' 15 | url: 'http://rodneyosodo.com/' 16 | socials: 17 | twitter: 'https://twitter.com/b1ackd0t' 18 | 19 | musilah: 20 | name: 'Nataly Musilah' 21 | title: 'Software Engineer' 22 | url: 'https://github.com/Musilah' 23 | email: 'nataleigh.nk@gmail.com' 24 | -------------------------------------------------------------------------------- /blog/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Welcome to the SuperMQ Blog 3 | slug: / 4 | authors: osodo 5 | --- 6 | 7 | 8 | # Welcome to the SuperMQ Blog 9 | 10 | This section contains a list of blogs that are related to SuperMQ. They have been published to the [Official Abstract Machines blog in Medium](https://medium.com/abstract-machines-blog). 11 | 12 | 13 | 14 | ## Featured Blogs 15 | 16 | - [What's New in SuperMQ v0.14.0](./v0-14-0-release/v0-14-0-release.md) 17 | - [Extending SuperMQ Users Repository](./kratos/kratos.md) 18 | - [Integrating OAuth2.0 with SuperMQ](./oauth/oauth.md) 19 | - [Guide on using SuperMQ](./user-guide.md) 20 | -------------------------------------------------------------------------------- /blog/kratos/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/blog/kratos/architecture.png -------------------------------------------------------------------------------- /blog/kratos/kratos.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: extending-users-repository 3 | title: Extending Magistrala Users Repository 4 | authors: osodo 5 | description: Learn how Magistrala integrates with Ory Kratos to enhance user management, featuring MFA, password recovery, and admin APIs. 6 | tags: ['Magistrala', 'Kratos', 'User Management', 'Identity'] 7 | --- 8 | 9 | 10 | # Extending Magistrala Users Repository 11 | 12 | For the past few months, we have been working on integrating Magistrala with [Ory Kratos](https://www.ory.sh/docs/kratos/ory-kratos-intro) as the user management service. Ory Kratos is a cloud-native identity and user management system, which can be used as the user management service for Magistrala. 13 | 14 | 15 | 16 | Let's delve deeper into the myriad capabilities that Kratos brings to the forefront: 17 | 18 | - self-service user login and registration. This is the ability to register and log in to the system without the need for an administrator. 19 | - multi-factor authentication with Time-based One-time Passwords (TOTP). 20 | - account verification. This offers the ability to verify a user's email address. 21 | - password recovery. This offers the ability to reset a user's password or recover a forgotten password. 22 | - profile and account management. This offers the ability to update a user's profile and account information. 23 | - admin API for user management. 24 | 25 | Our architectural journey has been anchored by the adoption of GoKit which made this a seamless process. Fundamentally, we have been using [GoKit](https://gokit.io) as the framework for most of our microservices. 26 | 27 | ## GoKit 28 | 29 | Go kit is a collection of Go packages that help you build robust, reliable, maintainable microservices. Gokit is not an MVC framework but upholds structuring your microservices in a [clean architecture](https://blog.cleancoder.com/uncle-bob/2011/11/22/Clean-Architecture.html) or [hexagonal architecture](): 30 | 31 | - transport layer: responsible for encoding and decoding requests and responses. 32 | - endpoint layer: responsible for mapping requests to business logic. 33 | - service layer: responsible for business logic. 34 | 35 | we added an extra layer to most of our services called the `repository layer`. This crucial layer acts as the intermediary responsible for interfacing with the database, thus fostering a clean separation of concerns wherein the repository layer remains blissfully unaware of the intricacies of the transport layer. 36 | 37 | ![Users Service Architecture](./architecture.png) 38 | 39 | With this architecture, we can decouple the services from the database and make it easier to switch between different databases. 40 | 41 | ## Repository Layer 42 | 43 | The repository layer is responsible for communicating with the database. The repository layer is a thin layer that communicates with the database. It is responsible for: 44 | 45 | - persisting the data. 46 | - manipulating the data. 47 | - retrieving the data. 48 | - deleting the data. 49 | 50 | The repository is modelled using an interface. The interface defines the methods that the repository layer should implement. 51 | 52 | ## Repository Interface 53 | 54 | The repository interface is defined as follows: 55 | 56 | ```go 57 | // Repository defines the required dependencies for Client repository. 58 | // 59 | //go:generate mockery --name Repository --output=./mocks --filename repository.go --quiet --note "Copyright (c) Abstract Machines" 60 | type Repository interface { 61 | // Save persists in the client account. A non-nil error is returned to indicate 62 | // operation failure. 63 | Save(ctx context.Context, client clients.Client) (clients.Client, error) 64 | 65 | // RetrieveByID retrieves the client by its unique ID. 66 | RetrieveByID(ctx context.Context, id string) (Client, error) 67 | 68 | // RetrieveByIdentity retrieves client by its unique credentials 69 | RetrieveByIdentity(ctx context.Context, identity string) (Client, error) 70 | 71 | // RetrieveAll retrieves all clients. 72 | RetrieveAll(ctx context.Context, pm Page) (ClientsPage, error) 73 | 74 | // RetrieveAllBasicInfo lists all clients only with basic information. 75 | RetrieveAllBasicInfo(ctx context.Context, pm Page) (ClientsPage, error) 76 | 77 | // RetrieveAllByIDs retrieves for given client IDs. 78 | RetrieveAllByIDs(ctx context.Context, pm Page) (ClientsPage, error) 79 | 80 | // Update updates the client name and metadata. 81 | Update(ctx context.Context, client Client) (Client, error) 82 | 83 | // UpdateTags updates the client tags. 84 | UpdateTags(ctx context.Context, client Client) (Client, error) 85 | 86 | // UpdateIdentity updates the identity of a client with the given id. 87 | UpdateIdentity(ctx context.Context, client Client) (Client, error) 88 | 89 | // UpdateSecret updates the secret for a client with the given identity. 90 | UpdateSecret(ctx context.Context, client Client) (Client, error) 91 | 92 | // UpdateRole updates the role for a client with the given id. 93 | UpdateRole(ctx context.Context, client Client) (Client, error) 94 | 95 | // ChangeStatus changes client status to enabled or disabled 96 | ChangeStatus(ctx context.Context, client Client) (Client, error) 97 | 98 | // UpdateRole updates the role for a client with the given id. 99 | UpdateRole(ctx context.Context, client clients.Client) (clients.Client, error) 100 | 101 | // CheckSuperAdmin checks if the user with the given ID is a super admin. 102 | CheckSuperAdmin(ctx context.Context, adminID string) error 103 | } 104 | ``` 105 | 106 | The initial implementation of the repository layer is for PostgreSQL but it can be easily extended and changed to other databases. For this case, we decided to change to using Kratos as the user management service. 107 | 108 | ## Comparison 109 | 110 | When saving a client, the [postgresql implementation](https://github.com/absmach/magistrala/blob/2be34c42f4e6acbf98ae8bf9171adb15c031c276/users/postgres/clients.go#L49C1-L76C2) is as follows: 111 | 112 | ```go 113 | func (repo *repository) Save(ctx context.Context, c mgclients.Client) (mgclients.Client, error) { 114 | q := `INSERT INTO clients (id, name, tags, identity, secret, metadata, created_at, status, role) 115 | VALUES (:id, :name, :tags, :identity, :secret, :metadata, :created_at, :status, :role) 116 | RETURNING id, name, tags, identity, metadata, status, created_at` 117 | dbc, err := pgclients.ToDBClient(c) 118 | if err != nil { 119 | return mgclients.Client{}, errors.Wrap(repoerr.ErrCreateEntity, err) 120 | } 121 | 122 | row, err := repo.DB.NamedQueryContext(ctx, q, dbc) 123 | if err != nil { 124 | return mgclients.Client{}, postgres.HandleError(repoerr.ErrCreateEntity, err) 125 | } 126 | 127 | defer row.Close() 128 | row.Next() 129 | dbc = pgclients.DBClient{} 130 | if err := row.StructScan(&dbc); err != nil { 131 | return mgclients.Client{}, errors.Wrap(repoerr.ErrFailedOpDB, err) 132 | } 133 | 134 | client, err := pgclients.ToClient(dbc) 135 | if err != nil { 136 | return mgclients.Client{}, errors.Wrap(repoerr.ErrFailedOpDB, err) 137 | } 138 | 139 | return client, nil 140 | } 141 | ``` 142 | 143 | The [kratos implementation](https://github.com/rodneyosodo/magistrala/blob/efbdbf4ca71d33d03b9e0861d47bf2bff2309a29/users/kratos/repository.go#L60C1-L96C2) is as follows: 144 | 145 | ```go 146 | func (repo *repository) Save(ctx context.Context, user mgclients.Client) (mgclients.Client, error) { 147 | hashedPassword, err := repo.hasher.Hash(user.Credentials.Secret) 148 | if err != nil { 149 | return mgclients.Client{}, errors.Wrap(repoerr.ErrCreateEntity, err) 150 | } 151 | state := mgclients.ToOryState(user.Status) 152 | identity, resp, err := repo.IdentityAPI.CreateIdentity(ctx).CreateIdentityBody( 153 | ory.CreateIdentityBody{ 154 | SchemaId: repo.schemaID, 155 | Traits: map[string]interface{}{ 156 | "email": user.Credentials.Identity, 157 | "username": user.Name, 158 | "enterprise": slices.Contains(user.Tags, "enterprise"), 159 | "newsletter": slices.Contains(user.Tags, "newsletter"), 160 | }, 161 | State: &state, 162 | MetadataPublic: user.Metadata, 163 | MetadataAdmin: map[string]interface{}{ 164 | "role": user.Role, 165 | "permissions": user.Permissions, 166 | }, 167 | Credentials: &ory.IdentityWithCredentials{ 168 | Password: &ory.IdentityWithCredentialsPassword{ 169 | Config: &ory.IdentityWithCredentialsPasswordConfig{ 170 | HashedPassword: &hashedPassword, 171 | Password: &user.Credentials.Secret, 172 | }, 173 | }, 174 | }, 175 | }, 176 | ).Execute() 177 | if err != nil { 178 | return mgclients.Client{}, errors.Wrap(repoerr.ErrCreateEntity, decodeError(resp)) 179 | } 180 | 181 | return toClient(identity), nil 182 | } 183 | ``` 184 | 185 | The PostgreSQL implementation directly inserts client data into the database using a prepared SQL statement. It utilizes a custom type conversion to convert the client object into a format compatible with the database before insertion. 186 | 187 | On the other hand, the Kratos implementation interacts with the Kratos Identity service API to create client identities. It hashes the password before sending it to the API and constructs the identity creation request with various client attributes. 188 | 189 | From a high-level perspective, both implementations achieve the same goal of saving client data, but they differ in their approach. This is the reason behind using interfaces. Interfaces are important in software design because they: 190 | 191 | - Encapsulate abstraction. An interface defines the behavior of an object, but not its implementation. This allows the implementation to be changed without affecting the clients of the interface. 192 | - Promote loose coupling. Loose coupling is a design principle that minimizes the dependencies between modules. This makes the code more flexible and easier to maintain. 193 | - Allow polymorphism. Polymorphism is the ability to treat objects of different types similarly. This is made possible by interfaces, which define a common set of methods for different types of objects. 194 | 195 | With the same service layer with minimal change in logic, the functionality remains the same. 196 | 197 | ## Some pitfalls 198 | 199 | 1. Creating users. Initially, we hashed passwords in the service layer. However, this was not a good idea because it was not possible to change the hashing algorithm without changing the service layer. We moved the hashing logic to the repository layer. 200 | 2. Client Filtering Optimization: In our quest for efficient client filtering, we initially employed a straightforward SQL query. However, upon integrating with Kratos, we encountered a roadblock – the inability to directly filter users. Consequently, we were compelled to retrieve the user data first and subsequently apply filtering criteria. This iterative process extends to pagination as well, adding a layer of complexity. While this approach may introduce a slight overhead, it ensures compatibility with Kratos and enables us to effectively filter clients despite the inherent limitations. 201 | 3. Our experience with PostgreSQL afforded us the luxury of updating clients with a single, succinct query. However, transitioning to Kratos presented a unique challenge. Due to Kratos' utilization of email as the unique identifier for users, updating clients necessitates a different approach. Rather than directly executing an update query, we first retrieve the user data to obtain the email and username, followed by the execution of the update operation. While this two-step process may seem cumbersome compared to the simplicity of PostgreSQL, it ensures compliance with Kratos' user identification methodology. Despite the additional network calls involved, this approach ensures data integrity and compatibility with Kratos' unique architecture. 202 | 203 | ## Conclusion 204 | 205 | As we explored the rich capabilities that Kratos brings to the forefront, including self-service user operations, multi-factor authentication, and comprehensive admin APIs, we also encountered several challenges and pitfalls along the way. 206 | 207 | In essence, our integration with Ory Kratos exemplifies our commitment to adaptability, flexibility, and resilience. By embracing challenges as opportunities for growth and refinement, we pave the way for a robust and scalable user management solution that meets the evolving needs of Magistrala and its users. 208 | -------------------------------------------------------------------------------- /blog/oauth/codeflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/blog/oauth/codeflow.png -------------------------------------------------------------------------------- /blog/oauth/genericflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/blog/oauth/genericflow.png -------------------------------------------------------------------------------- /blog/tags.yml: -------------------------------------------------------------------------------- 1 | facebook: 2 | label: Facebook 3 | permalink: /facebook 4 | description: Facebook tag description 5 | 6 | hello: 7 | label: Hello 8 | permalink: /hello 9 | description: Hello tag description 10 | 11 | docusaurus: 12 | label: Docusaurus 13 | permalink: /docusaurus 14 | description: Docusaurus tag description 15 | 16 | Magistrala: 17 | label: Magistrala 18 | permalink: /magistrala 19 | description: All posts about Magistrala, the open-source IoT platform. 20 | 21 | IIoT: 22 | label: IIoT 23 | permalink: /iiot 24 | description: Posts focusing on Industrial Internet of Things (IIoT) solutions and use cases. 25 | 26 | User Guide: 27 | label: User Guide 28 | permalink: /user-guide 29 | description: Posts that serve as guides for using Magistrala. 30 | 31 | Industrial Automation: 32 | label: Industrial Automation 33 | permalink: /industrial-automation 34 | description: Content related to automating industrial processes with Magistrala. 35 | 36 | Open Source: 37 | label: Open Source 38 | permalink: /open-source 39 | description: Insights and updates on Magistrala as an open-source platform. 40 | 41 | OAuth2.0: 42 | label: OAuth2.0 43 | permalink: /oauth2 44 | description: Posts about integrating OAuth2.0 with Magistrala for secure authentication. 45 | 46 | Authentication: 47 | label: Authentication 48 | permalink: /authentication 49 | description: Content exploring authentication mechanisms like OAuth2.0. 50 | 51 | Google OAuth: 52 | label: Google OAuth 53 | permalink: /google-oauth 54 | description: Posts about integrating Google OAuth with Magistrala. 55 | 56 | Kratos: 57 | label: Kratos 58 | permalink: /kratos 59 | description: Posts about using Ory Kratos for user management in Magistrala. 60 | 61 | User Management: 62 | label: User Management 63 | permalink: /user-management 64 | description: Posts focused on managing users within Magistrala. 65 | 66 | Identity: 67 | label: Identity 68 | permalink: /identity 69 | description: Content about identity management and integration in Magistrala. 70 | 71 | release: 72 | label: Release 73 | permalink: /release 74 | description: Updates and announcements about new releases of Magistrala. 75 | 76 | SuperMQ: 77 | label: SuperMQ 78 | permalink: /supermq 79 | description: Posts related to SuperMQ, a scalable and secure IoT platform. 80 | 81 | update: 82 | label: Update 83 | permalink: /update 84 | description: Blog posts about updates and changes to Magistrala. 85 | -------------------------------------------------------------------------------- /blog/v0-14-0-release/grafana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/blog/v0-14-0-release/grafana.png -------------------------------------------------------------------------------- /blog/v0-14-0-release/tracing1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/blog/v0-14-0-release/tracing1.png -------------------------------------------------------------------------------- /blog/v0-14-0-release/tracing2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/blog/v0-14-0-release/tracing2.png -------------------------------------------------------------------------------- /blog/v0-14-0-release/v0-14-0-release.md: -------------------------------------------------------------------------------- 1 | --- 2 | slug: v0-14-0-release 3 | title: What's New in Magistrala v0.14.0 4 | authors: osodo 5 | description: Discover the latest updates in SuperMQ v0.14.0, featuring enhanced scalability, improved access control, and UI upgrades. 6 | tags: [release, SuperMQ, update] 7 | --- 8 | 9 | # What's New in Magistrala v0.14.0 10 | 11 | We're thrilled to announce the release of Magistrala v0.14.0, packed with exciting new features and improvements that elevate your experience! This update marks a significant leap forward, making Magistrala even more powerful and versatile. 12 | 13 | 14 | 15 | Here are some of the highlights: 16 | 17 | - Switched to Google Zanzibar Access control approach with SpiceDB 18 | - Implemented Domains for access control 19 | - Added UI as a separate service 20 | - Returned Websocket support 21 | - Added Invitation service 22 | - Expanded compatibility across various MQTT brokers 23 | - Added support for numerous event stores 24 | - Enable Jetstream for NATS as the default event store and message broker 25 | - Added support for gRPC mTLS 26 | - Added graceful-stop for HTTP and gRPC servers 27 | - Logical and hard entity deletion 28 | - Improved tracing 29 | - Improved event sourcing 30 | - Added telemetry 31 | - Added prometheus and grafana for metrics 32 | - Upgraded InfluxDB from 1.x to 2.x 33 | - Upgraded Postgres to 16.1 34 | 35 | This is just a glimpse of the extensive changes in v0.14.0. For a complete list, check out the detailed changelog [here](https://github.com/absmach/magistrala/compare/0.13.0...v0.14.0) for this [release](https://github.com/absmach/magistrala/releases/tag/v0.14.0). 36 | 37 | ## New Features 38 | 39 | ### Switched to Google Zanzibar Access control approach with SpiceDB 40 | 41 | We migrated our access control system from Ory Keto to [SpiceDB](https://github.com/authzed/spicedb). SpiceDB is an open-source, [Google Zanzibar](https://authzed.com/blog/what-is-zanzibar/)-inspired database system for real-time, security-critical application permissions. Initially, we attempted to build our access control system. However, we quickly recognized the inherent complexity of this task and the importance of domain expertise. We are confident that SpiceDB is the superior solution for our requirements. The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1919). 42 | 43 | ### Implemented Domains for access control 44 | 45 | Magistrala now supports multi-tenancy with the introduction of domains! This exciting new feature allows for the separation of data and resources between different users or organizations. While we're still working on further refinements in future releases, we believe this marks a significant step forward for Magistrala. You can find the commit for this change on [GitHub](https://github.com/absmach/magistrala/commit/c294e84d2446f8b99d22e24211da171462ecdd41). 46 | 47 | ### Added UI as a separate service 48 | 49 | With great enthusiasm, we present a significant upgrade to the Magistrala user interface: it's now a standalone service written in Golang! This exciting shift from a single-page Angular application brings several advantages. Firstly, maintaining a separate Golang service aligns better with our existing codebase, simplifying the development and upkeep of the UI. Secondly, this opens doors for easier extension and future functionalities. While there's still room for further development, especially regarding dashboards, this marks a major stepping stone for the UI's potential. We're actively exploring various dashboarding solutions and can't wait to share what's next! We dedicated an entire repository for the UI and it can be found [here](https://github.com/absmach/magistrala-ui). 50 | 51 | 52 | 53 | ### Returned Websocket support 54 | 55 | Remember the WebSocket adapter? We are happy to reveal its return to Magistrala, built from scratch for improved performance and exciting possibilities. This isn't just a revival – we're actively planning future features and even UI integration, allowing your interface to receive real-time messages directly. This opens doors for dynamic and efficient real-time communication, and we can't wait to see what you build! The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1625). 56 | 57 | ### Added Invitation service 58 | 59 | Invitation service is a new feature in Magistrala. This service is responsible for sending invitations to users to join a domain. This is an essential feature for multi-tenancy. We currently don't support email invitations but we are planning to add it in future releases. This is because the Invitation services need to talk to the Users service to get the email of the user and we don't have the communication between services yet. We are planning to add this in future releases. The commit for this change can be found [here](https://github.com/absmach/magistrala/commit/a07aabe78328d99c176f6bf6034339ce5256f13f) 60 | 61 | ### Expanded compatibility across various MQTT brokers 62 | 63 | The MQTT message broker is responsible for handling MQTT messages. We Expanded compatibility across various MQTT brokers. You can either use VerneMQ or NATS as the MQTT message broker. VerneMQ is a very popular MQTT message broker and was initially the only supported MQTT message broker. We decided to add support for NATS as the MQTT message broker because we believe that it is a great message broker and is very easy to use. Using NATS as an MQTT message broker you can simplify your infrastructure since it can be used as both the event store and the message broker. The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1904). 64 | 65 | ### Added support for multiple event stores 66 | 67 | Magistrala takes a monumental leap forward in event handling with the introduction of multiple event store support. No longer limited to Redis, you now have the freedom to choose between NATS Jetstream and RabbitMQ alongside the familiar favourite. This decision stems from our unwavering commitment to empowering developers with powerful and user-friendly options. Using NATS or RabbitMQ as the event store you can simplify your infrastructure since it can be used as both the event store and the message broker. The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1907). 68 | 69 | ### Added support for gRPC mTLS 70 | 71 | gRPC is a modern open-source high-performance Remote Procedure Call (RPC) framework developed by Google. It can efficiently connect services in and across data centres with pluggable support for load balancing, tracing, health checking and authentication. We use gRPC for inter-service communication in Magistrala. For example, if the user service wants to verify that user x can update user y, it will call the auth service to verify that user x can update user y. We use gRPC for this communication. 72 | 73 | In TLS, the client verifies the server's certificate to ensure that it is connecting to the correct server. The server doesn't verify the client's certificate. Anyone who has the server's certificate can connect to the server. Mutual TLS or mTLS is a form of transport security that requires both the client and the server to present certificates to verify their identity. This is a more secure form of transport security than traditional TLS, which only requires the server to present a certificate. mTLS is a good choice for securing communication between services in a microservices architecture, where you want to ensure that both the client and the server are who they say they are. The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1848). This is enabled out-of-the-box and you don't need to do anything to enable it. 74 | 75 | ### Added graceful-stop for HTTP and GRPC servers 76 | 77 | We added a graceful stop for HTTP and GRPC servers. This means that when you stop the server it will wait for all the requests to finish before it stops. This is a very important feature and we are very happy to have it in Magistrala. The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1548) 78 | 79 | ### Logical and hard entity deletion 80 | 81 | Logical deletion is a way to mark an entity as deleted without actually deleting it. This is done by changing its status to disabled. Hard deletion is a way to delete an entity from the database. We added support for both logical and hard entity deletion. 82 | 83 | ### Improved tracing 84 | 85 | Distributed tracing is a way to trace a request as it goes through the system. We enhanced support for distributed tracing in Magistrala. We added support for distributed tracing over NATS and gRPC. 86 | 87 | ![Tracing HTTP publish operation](./tracing1.png) 88 | 89 | ![Tracing update group operation](./tracing2.png) 90 | 91 | ### Improved event sourcing 92 | 93 | Event sourcing is a way to store data as events instead of the current state of the system in an append-only log. We improved support for event sourcing in Magistrala by: 94 | 95 | - Adding event sourcing package which can be used to implement event sourcing in the different services. This package is used to store events in the event store and to retrieve events from the event store. This change can be found [here](https://github.com/absmach/magistrala/pull/1897). 96 | - Emitting events from [Users service](https://github.com/absmach/magistrala/pull/1835) and [twins service](https://github.com/absmach/magistrala/pull/1852). 97 | - Reliably emitting events to the event store. This is by storing the events in a buffer if the event store is not available and then emitting the events to the event store when it becomes available. This change can be found [here](https://github.com/absmach/magistrala/pull/1836). This was for NATS since the library we were using for NATS did not support this feature. 98 | 99 | ### Added telemetry 100 | 101 | Magistrala is committed to continuously improving its services and ensuring a seamless experience for its users. To achieve this, we collect certain data from your deployments. Rest assured, this data is collected solely to enhance Magistrala and is not used with any malicious intent. The deployment summary can be found on [our website](https://deployments.magistrala.abstractmachines.fr). 102 | 103 | The collected data includes: 104 | 105 | - IP Address - Used for approximate location information on deployments. 106 | - Services Used - To understand which features are popular and prioritize future developments. 107 | - Last Seen Time - To ensure the stability and availability of Magistrala. 108 | - Magistrala Version - To track the software version and deliver relevant updates. 109 | 110 | We take your privacy and data security seriously. All data collected is handled by our stringent privacy policies and industry best practices. 111 | 112 | Data collection is on by default and can be disabled by setting the env variable: `MG_SEND_TELEMETRY=false`. 113 | 114 | ### Added prometheus and grafana for metrics 115 | 116 | Prometheus is an open-source monitoring and alerting toolkit. Grafana is the open-source analytics and monitoring solution for every database. We added support for Prometheus and grafana for metrics. Prometheus is used to collect metrics from the different services and grafana is used to visualize the metrics. The PR for this change can be found [here](https://github.com/absmach/magistrala/pull/1753) 117 | 118 | ![Grafana dashboard](./grafana.png) 119 | 120 | Get ready to experience Magistrala like never before! This release is jam-packed with improvements, making it easier than ever to get started and navigate our features. We've also implemented a new wave of automated testing, ensuring rock-solid stability, and squashed those pesky bugs that were slowing you down. We're thrilled about the progress and can't wait for you to dive in! Don't hesitate to share your thoughts and feedback, as your input is invaluable to us. We're always here to help, so reach out with any questions you may have. Let's explore the exciting world of Magistrala together! 121 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Architecture 3 | --- 4 | 5 | ## Components 6 | 7 | SuperMQ IoT platform is comprised of the following services: 8 | 9 | | Service | Description | 10 | | :--------------------------- | :------------------------------------------------------------------------------------------ | 11 | | [auth][auth-service] | Handles authorization and authentication for the platform as well as managing keys and tokens | 12 | | [users][users-service] | Manages platform's users and auth concerns in regards to users | 13 | | [groups][groups-service] | Manages platform's groups and auth concerns in regards to groups | 14 | | [clients][clients-service] | Manages platform's clients and auth concerns in regards to clients | 15 | | [channels][channels-service] | Manages platform's channels and auth concerns in regards to channels | 16 | | [domains][domains-service] | Manages platform's domains and auth concerns in regards to domains | 17 | | [http-adapter][http-adapter] | Provides an HTTP interface for sending messages via HTTP | 18 | | [mqtt-adapter][mqtt-adapter] | Provides an MQTT and MQTT over WS interface for sending and receiving messages via MQTT | 19 | | [ws-adapter][ws-adapter] | Provides a WebSocket interface for sending and receiving messages via WS | 20 | | [coap-adapter][coap-adapter] | Provides a CoAP interface for sending and receiving messages via CoAP | 21 | | [supermq-cli][supermq-cli] | Command line interface | 22 | 23 | ![arch](img/architecture.svg) 24 | 25 | ## Domain Model 26 | 27 | The platform consists of the following core entities: **user**,**client**,**channel**, **group** and **domain**. 28 | 29 | `User` represents the real (human) user of the system. Users are identified by their username and password, which are used as platform access credentials in order to obtain an access token. Once logged into the system, a user can manage their resources (i.e. domains,groups, clients and channels) in CRUD fashion and define access control policies by creating and managing roles for them. 30 | 31 | `Group` represents a logical grouping of clients, channels or other groups. It is used to simplify access control management by allowing these entities to be grouped together. When a user becomes the member of a role of a group, they are able to access the entities encompassed by the group. A user can have a role in multiple groups, and each group can have multiple members(users). Groups can have a single parent group and many children groups, this enables shared access to entities to users and a hierarchical structure. A role created for a group determines what a member(user) of the role can do with the group and entities associated with the group. 32 | 33 | `Clients` represents devices (or applications) connected to SuperMQ that use the platform for message exchange with other "clients". Clients have roles to which users are members to, determining which actions the role member(user) can perform on them. 34 | 35 | `Channel` represents a communication channel. It serves as a message topic that can be consumed by all of the clients connected to it. It also serves as grouping mechanism for clients. A client can be connected to multiple channels, and a channel can have multiple clients connected to it. A user can also have access to a channel thus allowing them access to the messages published to that channel. As mentioned before a channel can belong to a group.A client connected to a channel forms a connection in SuperMQ. The connection can be of three types: a Publish type, where the client can only publish messages to the channel, a Subscribe type, meaning a client can only receive messages sent to the channel and Publish and Subscribe type where the client can both publish and receive messages on the channel. Channels have roles which determine the actions a role member(user) can perform on them. 36 | 37 | `Domain` represents a top level organizational unit which encompases entities such as groups, channels and clients. All these entities have to belong to a domain. A user has a role on a domain which determines what actions the user can perform on the domain as well as the entities in the domain. The domain enables access to clients,channels, groups and messages to be shared with other users on the platform. They also offer the collaborative space to perfom CRUD operations on these entities. 38 | 39 | Additional functionality is provided by the following services: 40 | 41 | `auth` handles authentication and authorization functionality for the platform. The service is used to issue keys and tokens. The service also facilitates fine grained access control to core entities. 42 | 43 | `protocol-adapters` These include adapters for HTTP, CoAP, WS and MQTT. These services handle bidirectional communication between the platform and devices and applications. The adapters enable message handling in the system, supporting the PubSub model of the platform. 44 | 45 | ## Messaging 46 | 47 | SuperMQ uses [NATS][nats] as its default messaging backbone, due to its lightweight and performant nature. You can treat its _subjects_ as physical representation of SuperMQ channels, where subject name is constructed using channel unique identifier. SuperMQ also provides the ability to change your default message broker to [RabbitMQ][rabbitmq], [VerneMQ][vernemq] or [Kafka][kafka]. 48 | 49 | In general, there is no constraint put on content that is being exchanged through channels. However, in order to be post-processed and normalized, messages should be formatted using [SenML][senml]. 50 | 51 | ## Edge 52 | 53 | SuperMQ platform can be run on the edge as well. Deploying SuperMQ on a gateway makes it able to collect, store and analyze data, organize and authenticate devices. To connect SuperMQ instances running on a gateway with SuperMQ in a cloud we can use two gateway services developed for that purpose: 54 | 55 | - [Agent][agent] 56 | - [Export][export] 57 | 58 | ## Unified IoT Platform 59 | 60 | Running SuperMQ on gateway moves computation from cloud towards the edge thus decentralizing IoT system. Since we can deploy same SuperMQ code on gateway and in the cloud there are many benefits but the biggest one is easy deployment and adoption - once engineers understand how to deploy and maintain the platform, they will be able to apply those same skills to any part of the edge-fog-cloud continuum. This is because the platform is designed to be consistent, making it easy for engineers to move between them. This consistency will save engineers time and effort, and it will also help to improve the reliability and security of the platform. Same set of tools can be used, same patches and bug fixes can be applied. The whole system is much easier to reason about, and the maintenance is much easier and less costly. 61 | 62 | [auth-service]: https://github.com/absmach/supermq/tree/main/auth 63 | [users-service]: https://github.com/absmach/supermq/tree/main/users 64 | [groups-service]: https://github.com/absmach/supermq/tree/main/groups 65 | [clients-service]: https://github.com/absmach/supermq/tree/main/clients 66 | [channels-service]: https://github.com/absmach/supermq/tree/main/channels 67 | [domains-service]: https://github.com/absmach/supermq/tree/main/domains 68 | [http-adapter]: https://github.com/absmach/supermq/tree/main/http 69 | [mqtt-adapter]: https://github.com/absmach/supermq/tree/main/mqtt 70 | [coap-adapter]: https://github.com/absmach/supermq/tree/main/coap 71 | [ws-adapter]: https://github.com/absmach/supermq/tree/main/ws 72 | [supermq-cli]: https://github.com/absmach/supermq/tree/main/cli 73 | [nats]: https://nats.io/ 74 | [rabbitmq]: https://www.rabbitmq.com/ 75 | [vernemq]: https://vernemq.com/ 76 | [kafka]: https://kafka.apache.org/ 77 | [senml]: https://tools.ietf.org/html/draft-ietf-core-senml-08 78 | [agent]: ./edge.md#agent 79 | [export]: ./edge.md#export 80 | -------------------------------------------------------------------------------- /docs/authentication.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Authentication 3 | --- 4 | 5 | 6 | ## User authentication 7 | 8 | For user authentication SuperMQ uses Authentication keys. There are two types of authentication keys: 9 | 10 | - User key - keys issued to the user upon login request 11 | - Recovery key - password recovery key 12 | 13 | Authentication keys are represented and distributed by the corresponding [JWT][jwt]. User keys are issued when user logs in. Each user request (other than registration and login) contains user key that is used to authenticate the user. 14 | 15 | Recovery key is the password recovery key. It's short-lived token used for password recovery process. 16 | 17 | The following actions are supported: 18 | 19 | - create (all key types) 20 | - verify (all key types) 21 | 22 | ## Federated authentication 23 | 24 | Federated authentication is a process of authenticating users using external identity providers. SuperMQ supports federated authentication using [OpenID Connect][oidc] protocol. SuperMQ is a resource provider and it uses [Google Identity Platform][google-identity-platform] as an identity provider. To use federated authentication, you need to create a project in Google Cloud Platform and enable Google Identity Platform API. After that, you need to create OAuth 2.0 credentials and configure the consent screen. This can be done by following Google's [documentation][google-identity-platform-docs]. Once you have created OAuth 2.0 credentials, you need to set the following environment variables: 25 | 26 | ```bash 27 | SMQ_USERS_GOOGLE_CLIENT_ID=985229335584-m2mft8lqbgfn5gfw9ftrm3r2sgu4tsrw.apps.googleusercontent.com 28 | SMQ_USERS_GOOGLE_CLIENT_SECRET=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YUIpw 29 | SMQ_UI_GOOGLE_REDIRECT_URL=http://localhost/google-callback 30 | SMQ_USERS_GOOGLE_STATE=pGXVNhEeKfycuBzk5InlSfMlEU9UrhlkTUOSqhsgDzXP2Y4RsN 31 | SMQ_USERS_UI_REDIRECT_URL=http://localhost:9090 32 | ``` 33 | 34 | 1. `SMQ_USERS_GOOGLE_CLIENT_ID` - Google OAuth 2.0 client ID 35 | 2. `SMQ_USERS_GOOGLE_CLIENT_SECRET` - Google OAuth 2.0 client secret 36 | 3. `SMQ_UI_GOOGLE_REDIRECT_URL` - Google OAuth 2.0 redirect URL to handle callback after successful authentication. This URL must be registered in the Google Cloud Platform. 37 | 4. `SMQ_USERS_GOOGLE_STATE` - Random string used to protect against cross-site request forgery attacks. 38 | 5. `SMQ_USERS_UI_REDIRECT_URL` - URL to redirect user after successful authentication. This can be your SuperMQ UI URL. 39 | 40 | SuperMQ handles the authentication callback at `/google-callback` endpoint, where `` is the base URL of your SuperMQ instance. This endpoint needs to be registered in the Google Cloud Platform and it must match the value of `MG_UI_GOOGLE_REDIRECT_URL` environment variable. If an error occurs, the error message is sent from the backend using a query paramters with the key `error`. The UI will read the error message from the query parameter and display it to the user. When a user signs up, SuperMQ creates a local copy of the user with an ID provided by Google, the name and email address provided by Google and the password is left empty as the user is authenticated using Google, i.e. external user. The user can be created only once, so if the user already exists, the error will be sent to the UI via the error cookie. Finally, the user is redirected to the URL provided in `MG_USERS_UI_REDIRECT_URL` environment variable upon successful authentication. This should be the base URL of your UI. 41 | 42 | The `SMQ_USERS_GOOGLE_CLIENT_ID`, `SMQ_USERS_GOOGLE_CLIENT_SECRET`, `SMQ_UI_GOOGLE_REDIRECT_URL` and `SMQ_USERS_GOOGLE_STATE` environment variables should be the same for the UI and users service. The `SMQ_USERS_UI_REDIRECT_URL` environment variable should be the URL of your UI which is used to redirect the user after successful authentication. 43 | 44 | SuperMQ uses the `access_token` provided by Google only to fetch user information which includes user id, name, given name, family name, picture and locale. The `access_token` is not stored in the database and it's not used for any other purpose. The `id_token` is not used as it presents challenges on refreshing it, thus SuperMQ issues its own `access_token` and `refresh_token` stored in the HTTP-only cookie and it's used to authenticate the user in the subsequent requests. 45 | 46 | ## Authentication with SuperMQ keys 47 | 48 | By default, SuperMQ uses SuperMQ Client secret for authentication. The Client secret is a secret key that's generated at the Client creation. In order to authenticate, the Client needs to send its secret with the message. The way the secret is passed depends on the protocol used to send a message and differs from adapter to adapter. For more details on how this secret is passed around, please check out [messaging section][messaging]. This is the default SuperMQ authentication mechanism and this method is used if the composition is started using the following command: 49 | By default, SuperMQ uses SuperMQ Client secret for authentication. The Client secret is a secret key that's generated at the Client creation. In order to authenticate, the Client needs to send its secret with the message. The way the secret is passed depends on the protocol used to send a message and differs from adapter to adapter. For more details on how this secret is passed around, please check out [messaging section][messaging]. This is the default SuperMQ authentication mechanism and this method is used if the composition is started using the following command: 50 | 51 | ```bash 52 | docker-compose -f docker/docker-compose.yml up 53 | ``` 54 | 55 | ## Mutual TLS Authentication with X.509 Certificates 56 | 57 | In most of the cases, HTTPS, WSS, MQTTS or secure CoAP are secure enough. However, sometimes you might need an even more secure connection. SuperMQ supports mutual TLS authentication (_mTLS_) based on [X.509 certificates][rf5280]. By default, the TLS protocol only proves the identity of the server to the client using the X.509 certificate and the authentication of the client to the server is left to the application layer. TLS also offers client-to-server authentication using client-side X.509 authentication. This is called two-way or mutual authentication. SuperMQ currently supports mTLS over HTTP, WS, MQTT and MQTT over WS protocols. In order to run Docker composition with mTLS turned on, you can execute the following command from the project root: 58 | 59 | ```bash 60 | AUTH=x509 docker-compose -f docker/docker-compose.yml up -d 61 | ``` 62 | 63 | Mutual authentication includes client-side certificates. Certificates can be generated using the simple script provided [here][ssl-makefile]. In order to create a valid certificate, you need to create SuperMQ client using the process described in the [provisioning section][provision]. After that, you need to fetch created client secret. Client secret will be used to create x.509 certificate for the corresponding client. To create a certificate, execute the following commands: 64 | Mutual authentication includes client-side certificates. Certificates can be generated using the simple script provided [here][ssl-makefile]. In order to create a valid certificate, you need to create SuperMQ client using the process described in the [provisioning section][provision]. After that, you need to fetch created client secret. Client secret will be used to create x.509 certificate for the corresponding client. To create a certificate, execute the following commands: 65 | 66 | ```bash 67 | cd docker/ssl 68 | make ca CN= O= OU= emailAddress= 69 | make server_cert CN= O= OU= emailAddress= 70 | make client_cert CLIENT_SECRET= CRT_FILE_NAME= O= OU= emailAddress= 71 | make client_cert CLIENT_SECRET= CRT_FILE_NAME= O= OU= emailAddress= 72 | ``` 73 | 74 | These commands use [OpenSSL][openssl] tool, so please make sure that you have it installed and set up before running these commands. The default values for Makefile variables are 75 | 76 | ```env 77 | CRT_LOCATION = certs 78 | CLIENT_SECRET = d7cc2964-a48b-4a6e-871a-08da28e7883d 79 | CLIENT_SECRET = d7cc2964-a48b-4a6e-871a-08da28e7883d 80 | O = SuperMQ 81 | OU = supermq_ca 82 | EA = info@supermq.com 83 | CN = localhost 84 | CRT_FILE_NAME = client 85 | CRT_FILE_NAME = client 86 | ``` 87 | 88 | Normally, in order to get clients running, you will need to specify only `CLIENT_SECRET`. The other variables are not mandatory and the termination should work with the default values. 89 | 90 | - Command `make ca` will generate a self-signed certificate that will later be used as a CA to sign other generated certificates. CA will expire in 3 years. 91 | - Command `make server_cert` will generate and sign (with previously created CA) server cert, which will expire after 1000 days. This cert is used as a SuperMQ server-side certificate in usual TLS flow to establish HTTPS or MQTTS connection. 92 | - Command `make client_cert` will finally generate and sign a client-side certificate and private key for the client. 93 | - Command `make client_cert` will finally generate and sign a client-side certificate and private key for the client. 94 | 95 | In this example `` represents secret of the client and `` represents the name of the certificate and key file which will be saved in `docker/ssl/certs` directory. Generated Certificate will expire after 2 years. The key must be stored in the x.509 certificate `CN` field. This script is created for testing purposes and is not meant to be used in production. We strongly recommend avoiding self-signed certificates and using a certificate management tool such as [Vault][vault] for the production. 96 | In this example `` represents secret of the client and `` represents the name of the certificate and key file which will be saved in `docker/ssl/certs` directory. Generated Certificate will expire after 2 years. The key must be stored in the x.509 certificate `CN` field. This script is created for testing purposes and is not meant to be used in production. We strongly recommend avoiding self-signed certificates and using a certificate management tool such as [Vault][vault] for the production. 97 | 98 | Once you have created CA and server-side cert, you can spin the composition using: 99 | 100 | ```bash 101 | AUTH=x509 docker-compose -f docker/docker-compose.yml up -d 102 | ``` 103 | 104 | Then, you can create user and provision clients and channels. Now, in order to send a message from the specific client to the channel, you need to connect client to the channel and generate corresponding client certificate using aforementioned commands. To publish a message to the channel, client should send following request: 105 | Then, you can create user and provision clients and channels. Now, in order to send a message from the specific client to the channel, you need to connect client to the channel and generate corresponding client certificate using aforementioned commands. To publish a message to the channel, client should send following request: 106 | 107 | ### WSS 108 | 109 | ```javascript 110 | const WebSocket = require("ws"); 111 | // Do not verify self-signed certificates if you are using one. 112 | process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0"; 113 | // Replace and with real values. 114 | // Replace and with real values. 115 | const ws = new WebSocket( 116 | "wss://localhost/ws/channels//messages?authorization=", 117 | // This is ClientOptions object that contains client cert and client key in the form of string. You can easily load these strings from cert and key files. 118 | { 119 | cert: `-----BEGIN CERTIFICATE-----....`, 120 | key: `-----BEGIN RSA PRIVATE KEY-----.....`, 121 | } 122 | ); 123 | ws.on("open", () => { 124 | ws.send("something"); 125 | }); 126 | ws.on("message", (data) => { 127 | console.log(data); 128 | }); 129 | ws.on("error", (e) => { 130 | console.log(e); 131 | }); 132 | ``` 133 | 134 | As you can see, `Authorization` header does not have to be present in the HTTP request, since the secret is present in the certificate. However, if you pass `Authorization` header, it _must be the same as the key in the cert_. In the case of MQTTS, `password` filed in CONNECT message _must match the key from the certificate_. In the case of WSS, `Authorization` header or `authorization` query parameter _must match cert key_. 135 | 136 | ### HTTPS 137 | 138 | ```bash 139 | curl -s -S -i --cacert docker/ssl/certs/ca.crt --cert docker/ssl/certs/.crt --key docker/ssl/certs/.key -X POST -H "Content-Type: application/senml+json" https://localhost/http/channels//messages -d '[{"bn":"some-base-name:","bt":1.276020076001e+09, "bu":"A","bver":5, "n":"voltage","u":"V","v":120.1}, {"n":"current","t":-5,"v":1.2}, {"n":"current","t":-4,"v":1.3}]' 140 | ``` 141 | 142 | ### MQTTS 143 | 144 | #### Publish 145 | 146 | ```bash 147 | mosquitto_pub -u -P -t channels//messages -h localhost -p 8883 --cafile docker/ssl/certs/ca.crt --cert docker/ssl/certs/.crt --key docker/ssl/certs/.key -m '[{"bn":"some-base-name:","bt":1.276020076001e+09, "bu":"A","bver":5, "n":"voltage","u":"V","v":120.1}, {"n":"current","t":-5,"v":1.2}, {"n":"current","t":-4,"v":1.3}]' 148 | ``` 149 | 150 | #### Subscribe 151 | 152 | ```bash 153 | mosquitto_sub -u -P --cafile docker/ssl/certs/ca.crt --cert docker/ssl/certs/.crt --key docker/ssl/certs/.key -t channels//messages -h localhost -p 8883 154 | ``` 155 | 156 | [jwt]: https://jwt.io/ 157 | [messaging]: ./messaging.md 158 | [rf5280]: https://tools.ietf.org/html/rfc5280 159 | [ssl-makefile]: https://github.com/absmach/supermq/blob/main/docker/ssl/Makefile 160 | [provision]: ./provision.md#platform-management 161 | [openssl]: https://www.openssl.org/ 162 | [vault]: https://www.vaultproject.io/ 163 | [oidc]: https://openid.net/connect/ 164 | [google-identity-platform]: https://cloud.google.com/identity-platform/docs/ 165 | [google-identity-platform-docs]: https://support.google.com/cloud/answer/6158849?hl=en 166 | -------------------------------------------------------------------------------- /docs/authz-spec.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: SuperMQ Authorization Specification Document 3 | --- 4 | 5 | ## Overview 6 | 7 | SuperMQ consists of following entities: 8 | 9 | - User 10 | - Client 11 | - Channel 12 | - Group 13 | - Domain 14 | - Platform 15 | 16 | ### Roles 17 | 18 | A role is a collection of actions that a group of users is allowed to perform on an entity. It simplifies the management of user privileges by consolidating related permissions into a single unit. 19 | 20 | #### Specification for Roles 21 | 22 | 1. Roles should have short id, name, description, list of permissable actions, list of members in role, except role ID all other fields are editable 23 | 2. Roles are identified by name in API, because for human friendly. 24 | 3. Role Actions list can have only the actions which are allowed for the entity. 25 | 4. The entity allowed actions are derived automatically from authorization schema(spicedb schema) while starting the entity service. 26 | 5. A user can be a member of multiple roles across different entities, but for a single entity, user can only have one role. and user should be member of domain 27 | 28 | #### Entities in SuperMQ which are having roles 29 | 30 | - Clients 31 | - Channels 32 | - Groups 33 | - Domains 34 | 35 | #### Role Relations 36 | 37 | - `entity`: Defines the entity type associated with the role (e.g., `domain`, `group`, `channel`, `client`). 38 | - `member`: Indicates that a `user` can be a member of a role, which grants them the associated permissions. 39 | - `built_in_role`: Indicates roles that are built-in or default roles (e.g., `admin`, `member`). 40 | 41 | #### Role Permissions 42 | 43 | Roles can grant permissions like: 44 | 45 | - **Delete**: Allows deleting the entity (e.g., a group or client). 46 | - **Update**: Allows updating the entity. 47 | - **Read**: Grants read access to the entity. 48 | - **Add User**: Grants permission to add users to the entity. 49 | - **Remove User**: Grants permission to remove users from the entity. 50 | - **View Users**: Allows viewing the users ass 51 | 52 | **Example:** 53 | Domains have allowed actions **read**, **update**, and **delete**. 54 | 55 | In Domain_1, there is a roles called **admin** and **editor**. 56 | 57 | **Admin** role in Domain_1 grants the following actions: **read**, **update**, and **delete**. 58 | **Admin** role is assigned to **user_1** and **user_2** as members, granting them the ability to perform all these actions within Domain_1. 59 | 60 | **Editor** role in Domain_1 grants the following actions: **read** and **update**. 61 | **Editor** role is assigned to **user_3** and **user_4** as members, granting them the ability to perform **read** and **update** actions within Domain_1. 62 | 63 | ### Role Hierarchy and Inheritance 64 | 65 | Roles are often hierarchical, allowing for inheritance of permissions from parent roles or entities. For instance: 66 | 67 | - A `user` can be a `member` of both a `domain` and a `group`, and can inherit permissions granted to these entities. 68 | - Roles assigned at a higher level (such as a `domain` or `groups`) can propagate their permissions to lower levels (e.g., groups, channels, clients) as needed. 69 | 70 | ### Clients 71 | 72 | A `client` is an entity that can represent a software application, IoT device, streaming application instance, which communicate through channels, allowing them to publish messages, subscribe to receive messages, or perform both actions. 73 | 74 | #### Specification of Clients 75 | 76 | 1. Each client should have it own roles. 77 | 2. Client can have only single parent group. this parent group defines the client's position in the hierarchy. 78 | 3. Parent groups roles actions can be inherited to client. 79 | 4. A client can connect to multiple channels, with publish action or subscribe action or both publish and subscribe actions. 80 | A client can: 81 | - **Publish**: Send messages to one or more channels. 82 | - **Subscribe**: Receive messages from one or more channels. 83 | - **Publish & Subscribe**: Both send and receive messages on the same or different channels. 84 | 85 | #### Client Actions 86 | 87 | - `update`, `read`, `delete`: The ability to create, update, read, or delete a client. 88 | - `connect_to_channel`: Allows to connect client to a client. 89 | - `manage_role`: Allows managing roles within a client, to edit, read, delete the role and actions. 90 | - `add_role_users`: Allows to add user to a role. 91 | - `remove_role_users`: Allows to remove user from a role. 92 | - `view_role_users`: Allows to view users in a role. 93 | 94 | ### Channels 95 | 96 | A `channel` is an entity which represent the topic to which clients and user can publish messages or subscribe messages or both publish and subscribe messages. 97 | 98 | #### Specification of Channels 99 | 100 | 1. Each channel should have it own roles. 101 | 2. Channel can have only single parent group. this parent group defines the channel's position in the hierarchy. 102 | 3. Parent groups roles actions can be inherited to channel. 103 | 4. A Channel can be connect to multiple channels, with publish action or subscribe action or both publish and subscribe actions. 104 | 5. Only authorized client and user should able to do publish/subscribe to channels, 105 | 6. To publish or subscribe messages to channels, following topic should starts with should `m//c/` for by it can have it own subtopic path. example: `m//c//subtopic1/subtopic2/subtopic3` 106 | 7. Both clients and users should able use channels to interact and exchange data with other clients or users. 107 | 108 | #### Channel Actions 109 | 110 | - `update`, `read`, `delete`: The ability to create, update, read, or delete a channel. 111 | - `publish`, `subscribe`: These allow `user` or `client` publish and subscribe over channel. 112 | - `connect_to_client`: Allows to connect client to a channel. 113 | - `manage_role`: Allows managing roles within a channel, to edit, read, delete the role and actions. 114 | - `add_role_users`: Allows to add user to a role. 115 | - `remove_role_users`: Allows to remove user from a role. 116 | - `view_role_users`: Allows to view users in a role. 117 | 118 | ### Groups 119 | 120 | A `group` is a hierarchical entity that can contain multiple child groups, clients, and channels. 121 | Each group can have only a single parent, forming a structured hierarchy that defines the relationships between entities. 122 | 123 | #### Specification of Groups 124 | 125 | 1. Each group should have it own roles. 126 | 2. Group can have only one parent group 127 | 3. Group can have multiple child groups, clients, channels, 128 | 4. Group should actions which can be use to operation like read, update, delete over the group and additional actions to read, update, delete the child groups, clients and channels, 129 | 130 | #### Group Actions 131 | 132 | - `update`, `read`, `delete`: The ability to create, update, read, or delete a group. 133 | - `manage_role`: Allows managing roles within a group, to edit, read, delete the role and actions. 134 | - `add_role_users`: Allows to add user to a role. 135 | - `remove_role_users`: Allows to remove user from a role. 136 | - `view_role_users`: Allows to view users in a role. 137 | 138 | - `client_create`: The ability to create children clients in group. 139 | - `client_update`, `client_read`, `client_delete`: The ability to update, read, or delete all the clients in group. 140 | - `client_connect_to_channel`: The ability to connect all clients in group to the channels in same domain. 141 | - `client_manage_role`: The ability to manage all the clients roles in the group. 142 | - `client_add_role_users`: Allows to add user to role for all clients in group. 143 | - `client_remove_role_users`: Allows to remove user from a role for all clients in group. 144 | - `client_view_role_users`: Allows to view users in a role for all clients in group. 145 | 146 | - `channel_create`: The ability to create children channels in group. 147 | - `channel_update`, `channel_read`, `channel_delete`: The ability to update, read, or delete all the channels in group. 148 | - `channel_publish`, `channel_subscribe`: These allow `user` publish and subscribe over all the channels in group. 149 | - `channel_connect_to_channel`: The ability to connect all channels in group to the clients in same domain. 150 | - `channel_manage_role`: The ability to manage all the channels roles in the group. 151 | - `channel_add_role_users`: Allows to add user to role for all channels in group. 152 | - `channel_remove_role_users`: Allows to remove user from a role for all channels in group. 153 | - `channel_view_role_users`: Allows to view users in a role for all channels in group. 154 | 155 | - `sub_group_create`: The ability to create children groups (sub-groups) in group. 156 | - `sub_group_update`, `group_read`, `group_delete`: The ability to update, read, or delete all the children groups in group which includes all nested sub-groups. 157 | - `sub_group_manage_role`: The ability to manage all the children groups roles in the group which includes all nested sub-groups. 158 | - `sub_group_add_role_users`: Allows to add user to role for all the children groups in group which includes all nested sub-groups. 159 | - `sub_group_remove_role_users`: Allows to remove user from a role for all the children groups in group which includes all nested sub-groups. 160 | - `sub_group_view_role_users`: Allows to view users in a role for all the children groups in group which includes all nested sub-groups. 161 | 162 | - `sub_group_client_create`: The ability to create children clients in all sub-groups. 163 | - `sub_group_client_update`, `sub_group_client_read`, `sub_group_client_delete`: The ability to update, read, or delete all the clients in all sub-groups. 164 | - `sub_group_client_connect_to_channel`: The ability to connect all clients in all sub-groups to the channels in same domain. 165 | - `sub_group_client_manage_role`: The ability to manage all the clients roles in all sub-groups. 166 | - `sub_group_client_add_role_users`: Allows to add user to role for all clients in all sub-groups. 167 | - `sub_group_client_remove_role_users`: Allows to remove user from a role for all clients in all sub-groups. 168 | - `sub_group_client_view_role_users`: Allows to view users in a role for all clients in all sub-groups. 169 | 170 | - `sub_group_channel_create`: The ability to create children channels in all sub-groups. 171 | - `sub_group_channel_update`, `sub_group_channel_read`, `sub_group_channel_delete`: The ability to update, read, or delete all the channels in group. 172 | - `sub_group_channel_publish`, `sub_group_channel_subscribe`: These allow `user` publish and subscribe over all the channels in all sub-groups. 173 | - `sub_group_channel_connect_to_channel`: The ability to connect all channels in all sub-groups to the clients in same domain. 174 | - `sub_group_channel_manage_role`: The ability to manage all the channels roles in all sub-groups. 175 | - `sub_group_channel_add_role_users`: Allows to add user to role for all channels in all sub-groups. 176 | - `sub_group_channel_remove_role_users`: Allows to remove user from a role for all channels in all sub-groups. 177 | - `sub_group_channel_view_role_users`: Allows to view users in a role for all channels in all sub-groups. 178 | 179 | ### Domain 180 | 181 | A `Domain` is a the top-level organizational unit that manages and governs various sub-entities like groups, channels, and clients. 182 | 183 | #### Specification of Domains 184 | 185 | 1. Each Domain should have it own roles. 186 | 2. User can be added in any role in domain 187 | 3. Domain should actions which can be use to operation like read, update, delete over the domain and additional actions to read, update, delete all its groups, clients and channels. 188 | 189 | #### Domain Actions 190 | 191 | - `update`, `read`, `delete`: The ability to create, update, read, or delete a domain. 192 | - `manage_role`: Allows managing roles within a domain, to edit, read, delete the role and actions. 193 | - `add_role_users`: Allows to add user to a role. 194 | - `remove_role_users`: Allows to remove user from a role. 195 | - `view_role_users`: Allows to view users in a role. 196 | 197 | - `client_create`: The ability to create clients in Domain. 198 | - `client_update`, `client_read`, `client_delete`: The ability to update, read, or delete all the clients in domain. 199 | - `client_connect_to_channel`: The ability to connect all clients in domain to the channels in same domain. 200 | - `client_manage_role`: The ability to manage all the clients roles in the domain. 201 | - `client_add_role_users`: Allows to add user to role for all clients in domain. 202 | - `client_remove_role_users`: Allows to remove user from a role for all clients in domain. 203 | - `client_view_role_users`: Allows to view users in a role for all clients in domain. 204 | 205 | - `channel_create`: The ability to create channels in Domain. 206 | - `channel_update`, `channel_read`, `channel_delete`: The ability to update, read, or delete all the channels in domain. 207 | - `channel_publish`, `channel_subscribe`: These allow `user` publish and subscribe over all the channels in domain. 208 | - `channel_connect_to_channel`: The ability to connect all channels in domain to the clients in same domain. 209 | - `channel_manage_role`: The ability to manage all the channels roles in the domain. 210 | - `channel_add_role_users`: Allows to add user to role for all channels in domain. 211 | - `channel_remove_role_users`: Allows to remove user from a role for all channels in domain. 212 | - `channel_view_role_users`: Allows to view users in a role for all channels in domain. 213 | 214 | - `group_create`: The ability to create groups in Domain. 215 | - `group_update`, `group_read`, `group_delete`: The ability to update, read, or delete all the groups in domain which includes sub-groups. 216 | - `group_manage_role`: The ability to manage all the groups roles in the domain which includes sub-groups. 217 | - `group_add_role_users`: Allows to add user to role for all groups in domain which includes sub-groups. 218 | - `group_remove_role_users`: Allows to remove user from a role for all groups in domain which includes sub-groups. 219 | - `group_view_role_users`: Allows to view users in a role for all groups in domain which includes sub-groups. 220 | 221 | ### Platform 222 | 223 | A `Platform` multiple domains and defines the global access control policy (typically using the `administrator` role). 224 | -------------------------------------------------------------------------------- /docs/benchmark.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Test Spec 3 | --- 4 | 5 | ## Tools 6 | 7 | - [MZBench][mzbench] 8 | - [vmq_mzbench][vmq_mzbench] 9 | - [mzb_api_ec2_plugin][mzb_api_ec2_plugin] 10 | 11 | ### Setting up MZBench 12 | 13 | MZbench is open-source tool for that can generate large traffic and measure performance of the application. MZBench is distributed, cloud-aware benchmarking tool that can seamlessly scale to millions of requests. It's originally developed by [satori-com][satori] but we will use [mzbench][mzbench] fork because it can run with newest Erlang releases and the original MzBench repository is not maintained anymore. 14 | 15 | We will describe installing MZBench server on Ubuntu 18.04 (this can be on your PC or some external cloud server, like droplet on Digital Ocean) 16 | 17 | Install latest OTP/Erlang (it's version 22.3 for me) 18 | 19 | ```bash 20 | sudo apt update 21 | sudo apt install erlang 22 | ``` 23 | 24 | For running this tool you will also need libz-dev package: 25 | 26 | ```bash 27 | sudo apt-get update 28 | sudo apt-get install libz-dev 29 | ``` 30 | 31 | and pip: 32 | 33 | ```bash 34 | sudo apt install python-pip 35 | ``` 36 | 37 | Clone mzbench tool and install the requirements: 38 | 39 | ```bash 40 | git clone https://github.com/mzbench/mzbench 41 | cd mzbench 42 | sudo pip install -r requirements.txt 43 | ``` 44 | 45 | This should be enough for installing MZBench, and you can now start MZBench server with this CLI command: 46 | 47 | ```bash 48 | ./bin/mzbench start_server 49 | ``` 50 | 51 | The [MZBench CLI][mzbench-cli] lets you control the server and benchmarks from the command line. 52 | 53 | Another way of using MZBench is over [Dashboard][mzbench-dashboard]. After starting server you should check dashboard on `http://localhost:4800`. 54 | 55 | Note that if you are installing MZBench on external server (i.e. Digital Ocean droplet), that you'll be able to reach MZBench dashboard on your server's IP address:4800, if you previously: 56 | 57 | - change default value for `network_interface` from `127.0.0.1` to `0.0.0.0` in configuration file. Default configuration file location is `~/.config/mzbench/server.config`, create it from sample configuration file `~/.config/mzbench/server.config.example` 58 | - open port `4800` with `ufw allow 4800` 59 | 60 | MZBench can run your test scenarios on many nodes, simultaneously. For now, you are able to run tests locally, so your nodes will be virtual nodes on machine where MZBench server is installed (your PC or DO droplet). You can try one of our [MQTT scenarios][mg-benchmark] that uses [vmq_mzbench][vmq_mzbench] worker. Copy-paste scenario in MZBench dashboard, click button _Environmental variables_ -> _Add from script_ and add appropriate values. Because it's running locally, you should try with smaller values, for example for fan-in scenario use 100 publishers on 2 nodes. Try this before moving forward in setting up Amazon EC2 plugin. 61 | 62 | ### Setting up Amazon EC2 plugin 63 | 64 | For larger-scale tests we will set up MZBench to run each node as one of Amazon EC2 instance with built-in plugin [mzb_api_ec2_plugin][mzb_api_ec2_plugin]. 65 | 66 | This is basic architecture when running MZBench: 67 | 68 | ![MZBench Architecture Running](https://github.com/mzbench/mzbench/raw/master/doc/images/scheme_2.png) 69 | 70 | Every node that runs your scenarios will be one of Amazon EC2 instance; plus one more additional node — the director node. The director doesn't run scenarios, it collects the metrics from the other nodes and runs [post and pre hooks][mzbench-scenarios]. So, if you want to run jobs on 10 nodes, actually 11 EC2 instances will be created. All instances will be automatically terminated when the test finishes. 71 | 72 | We will use one of ready-to-use Amazon Machine Images (AMI) with all necessary dependencies. We will choose AMI with OTP 22, because that is the version we have on MZBench server. So, we will search for `MZBench-erl22` AMI and find one with id `ami-03a169923be706764` available in `us-west-1b` zone. If you have chosen this AMI, everything you do from now must be in us-west-1 zone. We must have IAM user with `AmazonEC2FullAccess` and `IAMFullAccess` permissions policies, and his `access_key_id` and `secret_access_key` goes to configuration file. In EC2 dashboard, you must create new security group `MZbench_cluster` where you will add inbound rules to open ssh and TCP ports 4801-4804. Also, in EC2 dashboard go to section `key pairs`, click `Actions` -> `Import key pair` and upload public key you have on your MZBench server in `~/.ssh/id_rsa.pub` (if you need to create new, run `ssh-keygen` and follow instructions). Give it a name on EC2 dashboard, put that name (`key_name`) and path (`keyfile`) in configuration file. 73 | 74 | ```config 75 | [ 76 | {mzbench_api, [ 77 | {network_interface,"0.0.0.0"}, 78 | {keyfile, "~/.ssh/id_rsa"}, 79 | {cloud_plugins, [ 80 | {local,#{module => mzb_dummycloud_plugin}}, 81 | {ec2, #{module => mzb_api_ec2_plugin, 82 | instance_spec => [ 83 | {image_id, "ami-03a169923be706764"}, 84 | {group_set, ["MZbench_cluster"]}, 85 | {instance_type, "t2.micro"}, 86 | {availability_zone, "us-west-1b"}, 87 | {iam_instance_profile_name, "mzbench"}, 88 | {key_name, "key_pair_name"} 89 | ], 90 | config => [ 91 | {ec2_host, "ec2.us-west-1.amazonaws.com"}, 92 | {access_key_id, "IAM_USER_ACCESS_KEY_ID"}, 93 | {secret_access_key, "IAM_USER_SECRET_ACCESS_KEY"} 94 | ], 95 | instance_user => "ec2-user" 96 | }} 97 | ] 98 | } 99 | ]}]. 100 | ``` 101 | 102 | There is both `local` and `ec2` plugin in this configuration file, so you can choose to run tests on either of them. Default path for configuration file is `~/.config/mzbench/server.config`, if it's somewhere else, server is starting with: 103 | 104 | ```bash 105 | ./bin/mzbench start_server --config 106 | ``` 107 | 108 | Note that every time you update the configuration you have to restart the server: 109 | 110 | ```bash 111 | ./bin/mzbench restart_server 112 | ``` 113 | 114 | ## Test scenarios 115 | 116 | Testing environment to be determined. 117 | 118 | ### Message publishing 119 | 120 | In this scenario, large number of requests are sent to HTTP adapter service every second. This test checks how much time HTTP adapter needs to respond to each request. 121 | 122 | #### Message Publishing Results 123 | 124 | TBD 125 | 126 | ### Create and get client 127 | 128 | In this scenario, large number of requests are sent to things service to create things and than to retrieve their data. This test checks how much time things service needs to respond to each request. 129 | 130 | #### Create and Get Client Results 131 | 132 | TBD 133 | 134 | [mzbench]: https://github.com/mzbench/mzbench 135 | [vmq_mzbench]: https://github.com/vernemq/vmq_mzbench 136 | [mzb_api_ec2_plugin]: https://github.com/mzbench/mzbench/blob/master/doc/cloud_plugins.md#amazon-ec2 137 | [satori]: https://github.com/satori-com/mzbench 138 | [mzbench-cli]: https://github.com/mzbench/mzbench/blob/master/doc/cli.md 139 | [mzbench-dashboard]: https://github.com/mzbench/mzbench/blob/master/doc/dashboard.md 140 | [mg-benchmark]: https://github.com/absmach/benchmark/tree/master/mzbench 141 | [mzbench-scenarios]: https://github.com/mzbench/mzbench/blob/master/scenarios/spec.md#pre_hook-and-post_hook 142 | -------------------------------------------------------------------------------- /docs/bootstrap.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Bootstrap 3 | --- 4 | 5 | `Bootstrapping` refers to a self-starting process that is supposed to proceed without external input. SuperMQ platform supports bootstrapping process, but some of the preconditions need to be fulfilled in advance. The device can trigger a bootstrap when:s 6 | 7 | - device contains only bootstrap credentials and no SuperMQ credentials 8 | - device, for any reason, fails to start a communication with the configured SuperMQ services (server not responding, authentication failure, etc..). 9 | - device, for any reason, wants to update its configuration 10 | 11 | > Bootstrapping and provisioning are two different procedures. Provisioning refers to entities management while bootstrapping is related to entity configuration. 12 | 13 | Bootstrapping procedure is the following: 14 | 15 | ![Configure device](img/bootstrap/1.png) 16 | _1) Configure device with Bootstrap service URL, an external key and external ID_ 17 | 18 | > ![Provision SuperMQ channels](img/bootstrap/2.png) 19 | > 20 | > _Optionally create SuperMQ channels if they don't exist_ 21 | > 22 | > ![Provision SuperMQ clients](img/bootstrap/3.png) 23 | > 24 | > _Optionally create SuperMQ client if it doesn't exist_ 25 | 26 | ![Upload configuration](img/bootstrap/4.png) 27 | _2) Upload configuration for the SuperMQ client_ 28 | 29 | ![Bootstrap](img/bootstrap/5.png) 30 | _3) Bootstrap - send a request for the configuration_ 31 | 32 | ![Update, enable/disable, remove](img/bootstrap/6.png) 33 | _4) Connect/disconnect client from channels, update or remove configuration_ 34 | 35 | ## Configuration 36 | 37 | The configuration of SuperMQ client consists of three major parts: 38 | 39 | - The list of SuperMQ channels the client is connected to 40 | - Custom configuration related to the specific client 41 | - Client Secret and certificate data related to that client 42 | 43 | Also, the configuration contains an external ID and external key, which will be explained later. 44 | In order to enable the client to start bootstrapping process, the user needs to upload a valid configuration for that specific client. This can be done using the following HTTP request: 45 | 46 | ```bash 47 | curl -s -S -i -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" http://localhost:9013/clients/configs -d '{ 48 | "external_id":"09:6:0:sb:sa", 49 | "client_id": "7d63b564-3092-4cda-b441-e65fc1f285f0", 50 | "external_key":"key", 51 | "name":"some", 52 | "channels":[ 53 | "78c9b88c-b2c4-4d58-a973-725c32194fb3", 54 | "c4d6edb2-4e23-49f2-b6ea-df8bc6769591" 55 | ], 56 | "content": "config...", 57 | "client_cert": "PEM cert", 58 | "client_key": "PEM client cert key", 59 | "ca_cert": "PEM CA cert" 60 | }' 61 | ``` 62 | 63 | In this example, `channels` field represents the list of SuperMQ channel IDs the client is connected to. These channels need to be provisioned before the configuration is uploaded. Field `content` represents custom configuration. This custom configuration contains parameters that can be used to set up the client. It can also be empty if no additional set up is needed. Field `name` is human readable name and `client_id` is an ID of the SuperMQ client. This field is not required. If `client_id` is empty, corresponding SuperMQ client will be created implicitly and its ID will be sent as a part of `Location` header of the response. Fields `client_cert`, `client_key` and `ca_cert` represent PEM or base64-encoded DER client certificate, client certificate key and trusted CA, respectively. 64 | 65 | There are two more fields: `external_id` and `external_key`. External ID represents an ID of the device that corresponds to the given client. For example, this can be a MAC address or the serial number of the device. The external key represents the device key. This is the secret key that's safely stored on the device and it is used to authorize the client during the bootstrapping process. Please note that external ID and external key and SuperMQ ID and SuperMQ key are _completely different concepts_. External id and key are only used to authenticate a device that corresponds to the specific SuperMQ client during the bootstrapping procedure. As Configuration optionally contains client certificate and issuing CA, it's possible that device is not able to establish TLS encrypted communication with SuperMQ before bootstrapping. For that purpose, Bootstrap service exposes endpoint used for secure bootstrapping which can be used regardless of protocol (HTTP or HTTPS). Both device and Bootstrap service use a secret key to encrypt the content. Encryption is done as follows: 66 | 67 | 1. Device uses the secret encryption key to encrypt the value of that exact external key 68 | 2. Device sends a bootstrap request using the value from 1 as an Authorization header 69 | 3. Bootstrap service fetches config by its external ID 70 | 4. Bootstrap service uses the secret encryption key to decrypt Authorization header 71 | 5. Bootstrap service compares value from 4 with the external key of the config from 3 and proceeds to 6 if they're equal 72 | 6. Bootstrap service uses the secret encryption key to encrypt the content of the bootstrap response 73 | 74 | > Please have on mind that secret key is passed to the Bootstrap service as an environment variable. As security measurement, Bootstrap service removes this variable once it reads it on startup. However, depending on your deployment, this variable can still be visible as a part of your configuration or terminal emulator environment. 75 | 76 | For more details on which encryption mechanisms are used, please take a look at the implementation. 77 | 78 | ### Bootstrapping 79 | 80 | Currently, the bootstrapping procedure is executed over the HTTP protocol. Bootstrapping is noclient else but fetching and applying the configuration that corresponds to the given SuperMQ client. In order to fetch the configuration, _the client_ needs to send a bootstrapping request: 81 | 82 | ```bash 83 | curl -s -S -i -H "Authorization: Client " http://localhost:9013/clients/bootstrap/ 84 | ``` 85 | 86 | The response body should look someclient like: 87 | 88 | ```json 89 | { 90 | "client_id":"7d63b564-3092-4cda-b441-e65fc1f285f0", 91 | "client_key":"d0f6ff22-f521-4674-9065-e265a9376a78", 92 | "channels":[ 93 | { 94 | "id":"c4d6edb2-4e23-49f2-b6ea-df8bc6769591", 95 | "name":"c1", 96 | "metadata":null 97 | }, 98 | { 99 | "id":"78c9b88c-b2c4-4d58-a973-725c32194fb3", 100 | "name":"c0", 101 | "metadata":null 102 | } 103 | ], 104 | "content":"cofig...", 105 | "client_cert":"PEM cert", 106 | "client_key":"PEM client cert key", 107 | "ca_cert":"PEM CA cert" 108 | } 109 | ``` 110 | 111 | The response consists of an ID and key of the SuperMQ client, the list of channels and custom configuration (`content` field). The list of channels contains not just channel IDs, but the additional SuperMQ channel data (`name` and `metadata` fields), as well. 112 | 113 | ### Enabling and disabling clients 114 | 115 | Uploading configuration does not automatically connect client to the given list of channels. In order to connect the client to the channels, user needs to send the following HTTP request: 116 | 117 | ```bash 118 | curl -s -S -i -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" http://localhost:9013/clients/state/ -d '{"state": 1}' 119 | ``` 120 | 121 | In order to disconnect, the same request should be sent with the value of `state` set to 0. 122 | 123 | ### Using curl request for secure bootstrap configuration 124 | 125 | - _Encrypt the external key._ 126 | 127 | First, encrypt the external key of your client using AES encryption. The encryption key is specified by the `SMQ_BOOTSTRAP_ENCRYPT_KEY` environment variable. Use a library or utility that supports AES encryption to do this. Here's an example of how to encrypt using Go: 128 | 129 | ```go 130 | package main 131 | 132 | import ( 133 | "crypto/aes" 134 | "crypto/cipher" 135 | "crypto/rand" 136 | "fmt" 137 | "io" 138 | ) 139 | 140 | type reader struct { 141 | encKey []byte 142 | } 143 | 144 | func (r reader) encrypt(in []byte) ([]byte, error) { 145 | block, err := aes.NewCipher(r.encKey) 146 | if err != nil { 147 | return nil, err 148 | } 149 | ciphertext := make([]byte, aes.BlockSize+len(in)) 150 | iv := ciphertext[:aes.BlockSize] 151 | if _, err := io.ReadFull(rand.Reader, iv); err != nil { 152 | return nil, err 153 | } 154 | stream := cipher.NewCFBEncrypter(block, iv) 155 | stream.XORKeyStream(ciphertext[aes.BlockSize:], in) 156 | return ciphertext, nil 157 | } 158 | 159 | func main() { 160 | data := []byte("") 161 | 162 | r := reader{ 163 | encKey: []byte(""), 164 | } 165 | 166 | encryptedData, err := r.encrypt(data) 167 | if err != nil { 168 | fmt.Println("Error encrypting data:", err) 169 | return 170 | } 171 | 172 | fmt.Printf("%x\n", encryptedData) 173 | } 174 | ``` 175 | 176 | Replace `` and `` with the client's external key and `SMQ_BOOTSTRAP_ENCRYPT_KEY` respectively. 177 | 178 | - _Make a request to the bootstrap service._ 179 | 180 | Once the key is encrypted, make a request to the Bootstrap service. Here's how to do this using `curl`: 181 | 182 | ```bash 183 | curl --location 'http://localhost:9013/clients/bootstrap/secure/' \ 184 | --header 'Accept: application/json' \ 185 | --header 'authorization: Client ' --output - 186 | ``` 187 | 188 | The response from the Bootstrap service will be in encrypted binary format. Store this response in a file for later use. 189 | 190 | ```bash 191 | curl --location 'http://localhost:9013/clients/bootstrap/secure/' \ 192 | --header 'Accept: application/json' \ 193 | --header 'authorization: Client ' --output ~/// 194 | ``` 195 | 196 | - _Decrypt the response_ 197 | 198 | Finally, decrypt the response using a function. Here's an example of how to do this using Go: 199 | 200 | ```go 201 | package main 202 | 203 | import ( 204 | "crypto/aes" 205 | "crypto/cipher" 206 | "log" 207 | "os" 208 | ) 209 | 210 | func main() { 211 | encodedData, err := os.ReadFile("~///") 212 | if err != nil { 213 | log.Fatal(err) 214 | } 215 | 216 | key := []byte("") 217 | 218 | block, err := aes.NewCipher(key) 219 | if err != nil { 220 | log.Fatal(err) 221 | } 222 | 223 | if len(encodedData) < aes.BlockSize { 224 | log.Fatal("ciphertext too short") 225 | } 226 | 227 | iv := encodedData[:aes.BlockSize] 228 | encodedData = encodedData[aes.BlockSize:] 229 | stream := cipher.NewCFBDecrypter(block, iv) 230 | stream.XORKeyStream(encodedData, encodedData) 231 | 232 | err = os.WriteFile("~///", encodedData, 0644) 233 | if err != nil { 234 | log.Fatal(err) 235 | } 236 | } 237 | ``` 238 | 239 | ### Using SuperMQ CLI for secure bootstrap configuration 240 | 241 | To use SuperMQ CLI for the secure bootstrap configuration, use the following command: 242 | 243 | ```bash 244 | supermq-cli bootstrap secure 245 | ``` 246 | 247 | for example 248 | 249 | ```bash 250 | supermq-cli bootstrap bootstrap secure '09:6:0:sb:sa' 'key' 'v7aT0HGxJxt2gULzr3RHwf4WIf6DusPp' 251 | ``` 252 | 253 | For more information about the Bootstrap service API, please check out the [API documentation][api-docs]. 254 | 255 | [api-docs]: https://github.com/absmach/supermq/blob/main/api/openapi/bootstrap.yml 256 | -------------------------------------------------------------------------------- /docs/certs.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Certs 3 | --- 4 | 5 | 6 | Provisioning is a process of configuration of an IoT platform in which system operator creates and sets-up different entities used in the platform - users, groups, channels and clients. 7 | 8 | ## Certs Service 9 | 10 | Issues certificates for clients. `Certs` service can create certificates to be used when `SuperMQ` is deployed to support mTLS. 11 | `Certs` service will create certificate for valid client ID if valid user token is passed and user is owner of the provided client ID. 12 | 13 | Certificate service can create certificates in PKI mode - where certificates issued by PKI, when you deploy `Vault` as PKI certificate management `cert` service will proxy requests to `Vault` previously checking access rights and saving info on successfully created certificate. 14 | 15 | ### PKI mode 16 | 17 | When `SMQ_CERTS_VAULT_HOST` is set, it is presumed that `Vault` is installed and `certs` service will issue certificates using `Vault` API. 18 | 19 | First you'll need to set up `Vault`. 20 | 21 | To setup `Vault` follow steps in [Build Your Own Certificate Authority (CA)][vault-pki-engine]. 22 | 23 | To setup certs service with `Vault` following environment variables must be set: 24 | 25 | ```bash 26 | SMQ_CERTS_VAULT_HOST=vault-domain.com 27 | SMQ_CERTS_VAULT_PKI_PATH= 28 | SMQ_CERTS_VAULT_ROLE= 29 | SMQ_CERTS_VAULT_TOKEN= 30 | ``` 31 | 32 | For lab purposes you can use docker-compose and script for setting up PKI in [meodor-vault][meodor-vault]. 33 | 34 | Make sure you have an already running instance of `SuperMQ` , `Vault` and `Certs` service. 35 | 36 | To start SuperMQ run: 37 | 38 | ```bash 39 | make run up args="-d" 40 | ``` 41 | 42 | To start vault run: 43 | 44 | ```bash 45 | make run_addons vault up args="-d" 46 | ``` 47 | 48 | When vault service is up and running some initializations steps must be done to setup clients for `Certs` service. For more information about this steps please check [supermq-vault][supermq-vault] 49 | 50 | ```bash 51 | bash docker/addons/vault/vault-init.sh 52 | bash docker/addons/vault/vault-unseal.sh 53 | bash docker/addons/vault/vault-set-pki.sh 54 | ``` 55 | 56 | `vault-init.sh` initializes Vault, generates unseal keys and root tokens, and updates corresponding environment variables in the `.env` file. It's important to securely store these keys as they are required to unseal Vault. 57 | 58 | `vault-unseal.sh` is used to unseal Vault after initialization, but it's typically not needed since Vault can unseal itself when starting the container. 59 | 60 | `vault-set-pki.sh` generates certificates for Vault, including root and intermediate certificates, and copies them to the `docker/ssl/certs` folder. The CA parameters are sourced from environment variables in the `.env` file. 61 | 62 | To start certs service run: 63 | 64 | ```bash 65 | make run_addons certs up args="-d" 66 | ``` 67 | 68 | Provision a client: 69 | 70 | ```bash 71 | supermq-cli provision test 72 | ``` 73 | 74 | To stop certs service run: 75 | 76 | ```bash 77 | make run_addons certs down 78 | ``` 79 | 80 | To stop vault service run: 81 | 82 | ```bash 83 | make run_addons vault down 84 | ``` 85 | 86 | This step can be skipped if you already have a client ID. 87 | 88 | #### 1. Issue a certificate 89 | 90 | ```bash 91 | supermq-cli certs issue [--ttl=8760h] 92 | ``` 93 | 94 | For example: 95 | 96 | ```bash 97 | supermq-cli certs issue f13f0f30-f923-4504-8a7a-6aa45bcb4866 $USER_TOKEN 98 | 99 | { 100 | "cert_serial": "6f:35:d5:9d:47:9d:23:50:08:f7:31:13:82:22:e4:c8:e6:cf:2c:c1", 101 | "client_cert": "-----BEGIN CERTIFICATE-----\nMIIEATCCAumgAwIBAgIUbzXVnUedI1AI9zETgiLkyObPLMEwDQYJKoZIhvcNAQEL\nBQAwLjEsMCoGA1UEAxMjbWFpbmZsdXguY29tIEludGVybWVkaWF0ZSBBdXRob3Jp\ndHkwHhcNMjMwOTE0MTEwOTI5WhcNMjMxMDE0MTEwOTU4WjAvMS0wKwYDVQQDEyRi\nYTFmMmIxNi01MjA3LTQ2MDgtYTRkZS01ZmFiZmI4NjI3YzIwggEiMA0GCSqGSIb3\nDQEBAQUAA4IBDwAwggEKAoIBAQC9RxcHaTzn18vBdWWZf37K8Grc5dLW/m8vhwOJ\n8oe3iPUiE7xFijIXKw236R1NBh8fLT6/2lia/p4acZtls3yFRphooDwP7S2OiJRI\ngGb/r0SYmSnQKjHbdbixauNECGk1TDNSGvmpNSzvAZvYSJAvd5ZpYf/8Db9IBW0N\nvbI7TfIJHay8vC/0rn1BsmC3x+3nEm0W+Z5udC/UT4+pQn7QWrBsxjVT4r5WY0SQ\nkVhA9Wo+Wpzmy1CMC4X6yLmiIHmfRFlktDxKgPpyy/3zhAE2CkBpT7JEQ723Mv+m\n37oM2EJog+tgIZMExxDbw3Epqgo07B9DWpSZSBHCISeN/TzdAgMBAAGjggEUMIIB\nEDAOBgNVHQ8BAf8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC\nMB0GA1UdDgQWBBTAoqWVu8ctNmw5CKUBxsUKVDX+PDAfBgNVHSMEGDAWgBS7dmaT\nr5vJJPtV5dReawbYKhxzYzA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0\ndHA6Ly92YXVsdDo4MjAwL3YxL3BraV9pbnQvY2EwLwYDVR0RBCgwJoIkYmExZjJi\nMTYtNTIwNy00NjA4LWE0ZGUtNWZhYmZiODYyN2MyMDEGA1UdHwQqMCgwJqAkoCKG\nIGh0dHA6Ly92YXVsdDo4MjAwL3YxL3BraV9pbnQvY3JsMA0GCSqGSIb3DQEBCwUA\nA4IBAQCKMmDzyWWmuSJPh3O9hppRJ6mkX9gut4jP2rwowNv7haj3iu+hR8+GnTix\nu5oy3bZdmRryhhW0XyJsbCKO/z+wsY/RfVgMxF/c1cbmEzki804+AB4a4yNhQD6g\noEEQBD58b6mFi/vPCRiGZmmo5TqMlA37jBRSVnKO/CoH1CAvjqmfWdSoO4IC4uD4\nJev+QNr9wlOimYcA/usmo7rmqz7IB9R/Laxcdkq9iZelKly/jhftEbKgGf2NR/d7\nEKVONjCEp6fL2iBaQSA/899oJJ7QPqE5X821HhBlXKvNmZnYRyUmAS2h1jnxtovp\nsNGcLFRgIAFdaGl1172C7mBZF4C3\n-----END CERTIFICATE-----", 102 | "client_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAvUcXB2k859fLwXVlmX9+yvBq3OXS1v5vL4cDifKHt4j1IhO8\nRYoyFysNt+kdTQYfHy0+v9pYmv6eGnGbZbN8hUaYaKA8D+0tjoiUSIBm/69EmJkp\n0Cox23W4sWrjRAhpNUwzUhr5qTUs7wGb2EiQL3eWaWH//A2/SAVtDb2yO03yCR2s\nvLwv9K59QbJgt8ft5xJtFvmebnQv1E+PqUJ+0FqwbMY1U+K+VmNEkJFYQPVqPlqc\n5stQjAuF+si5oiB5n0RZZLQ8SoD6csv984QBNgpAaU+yREO9tzL/pt+6DNhCaIPr\nYCGTBMcQ28NxKaoKNOwfQ1qUmUgRwiEnjf083QIDAQABAoIBADKd7kSnGgiOJwkn\nUfJIrCmtPYaxVz7zb9xv6LxdRXoJgDSKvpCCMn8LnnGOP623c18tBFjeFU/tw24i\n74G1DBnAFUX1g9pmfQZe8/injePWhSuh2hK3FfowcyHPCdPJxAjixd6xJA7iD5Aj\nCABA934aJvkrof9P1dV2zgEct6sv6GPwUgSZxTYVNyU93T/pmvodvpNTYd3uk71A\nLCC5Ojv2gEOkHUWHhMntz7bl6wcH/atk//uYoYxcjZ811tL7/7xwUbyRxFD/b6kP\niptdoXBv27eWWKOtFMgF9iNkhefSKkmHZZWIL1J5CFE8fUdddeLoOa0e7a9vhYS9\n5TMzC2kCgYEA+TJf60QP3rjEgm6bJw1h48ffkPkZTsdp083GoJB77yXUH7m9Wt9g\nlYSALN+67fnkXPEe/C9SInMDRMp9VoswOHeJCFbCNdx5Klv8KKuMZMk0yCZifhx6\nBl7IsVlmlzq3EhK1ZjOVWMxvwS7MlMpPAcsc8DGhwhv9sXW3k2nMevsCgYEAwnHx\nheuaYgE/HrE/GEcPNAwy/uyBb8wxoKavl8OKEyPH+LK8powo9xss8zi+yEYHfSQP\nnJ45Rdz/HGl5QIwD4CjA3Vrm0sTMh094DPp9KhxcOwIhK/IvUJ0deKwHRWek/+c8\nwbD6HfX2Vtu5RU9z2KS7VtazjU5TkIbKP29LoAcCgYAUKAv0JrQ16rISbsnj9cQm\nPYOK4Ws3oQ+hTzKyyB0OMfwfeNGlKQ5R6b7IYmxnVWAwWFyOP3GgUbdA+DP9LRMA\nbkLKRuI8oxG16GzUCVQ4zsGTMu+ijcEdBMus9LNEpj4qmxLLKn75CMg9UwC/REHx\nvjEgCJOx9LungAMSTGt6wwKBgQCXvSGUt6pvhreCNSGeyX1EyaxWIaxU2U11J/7p\neQ/cJdUc8Cal9cTWKV/nokXHtlaLwsNoHlVlfrOasXiM9XbkzAjN9O0iV6+gfFSc\nFDHu1djnt565U7K2vxVLoTu/XsV1ajeQk5JsJRCK8cbgHsOxscP8XWobAJ/XrkhQ\nPoMOqwKBgD8goECBKj+SofUfqKCnGf3E2MWF3kTZMfPaBcuV8TaGMWRRljMmK8YT\npew6IIkAFrsIaXxQsym2JQ+j/L2AoxQkzlf2VF4SaBfUUByT3NijSBpD/d3xRlWA\n7UUO0d72YFnPTqY98Ch/fbKnaCRL/Usv8c9nCt5IdmnihYnuvxYT\n-----END RSA PRIVATE KEY-----", 103 | "expiration": "2023-10-14T11:09:58Z", 104 | "client_id": "f13f0f30-f923-4504-8a7a-6aa45bcb4866" 105 | } 106 | ``` 107 | 108 | #### 2. Retrieve a certificate 109 | 110 | ```bash 111 | supermq-cli certs get [ | client ] 112 | ``` 113 | 114 | For example: 115 | 116 | ```bash 117 | supermq-cli certs get 6f:35:d5:9d:47:9d:23:50:08:f7:31:13:82:22:e4:c8:e6:cf:2c:c1 $USER_TOKEN 118 | { 119 | "cert_serial": "6f:35:d5:9d:47:9d:23:50:08:f7:31:13:82:22:e4:c8:e6:cf:2c:c1", 120 | "client_cert": "-----BEGIN CERTIFICATE-----\nMIIEATCCAumgAwIBAgIUbzXVnUedI1AI9zETgiLkyObPLMEwDQYJKoZIhvcNAQEL\nBQAwLjEsMCoGA1UEAxMjbWFpbmZsdXguY29tIEludGVybWVkaWF0ZSBBdXRob3Jp\ndHkwHhcNMjMwOTE0MTEwOTI5WhcNMjMxMDE0MTEwOTU4WjAvMS0wKwYDVQQDEyRi\nYTFmMmIxNi01MjA3LTQ2MDgtYTRkZS01ZmFiZmI4NjI3YzIwggEiMA0GCSqGSIb3\nDQEBAQUAA4IBDwAwggEKAoIBAQC9RxcHaTzn18vBdWWZf37K8Grc5dLW/m8vhwOJ\n8oe3iPUiE7xFijIXKw236R1NBh8fLT6/2lia/p4acZtls3yFRphooDwP7S2OiJRI\ngGb/r0SYmSnQKjHbdbixauNECGk1TDNSGvmpNSzvAZvYSJAvd5ZpYf/8Db9IBW0N\nvbI7TfIJHay8vC/0rn1BsmC3x+3nEm0W+Z5udC/UT4+pQn7QWrBsxjVT4r5WY0SQ\nkVhA9Wo+Wpzmy1CMC4X6yLmiIHmfRFlktDxKgPpyy/3zhAE2CkBpT7JEQ723Mv+m\n37oM2EJog+tgIZMExxDbw3Epqgo07B9DWpSZSBHCISeN/TzdAgMBAAGjggEUMIIB\nEDAOBgNVHQ8BAf8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC\nMB0GA1UdDgQWBBTAoqWVu8ctNmw5CKUBxsUKVDX+PDAfBgNVHSMEGDAWgBS7dmaT\nr5vJJPtV5dReawbYKhxzYzA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0\ndHA6Ly92YXVsdDo4MjAwL3YxL3BraV9pbnQvY2EwLwYDVR0RBCgwJoIkYmExZjJi\nMTYtNTIwNy00NjA4LWE0ZGUtNWZhYmZiODYyN2MyMDEGA1UdHwQqMCgwJqAkoCKG\nIGh0dHA6Ly92YXVsdDo4MjAwL3YxL3BraV9pbnQvY3JsMA0GCSqGSIb3DQEBCwUA\nA4IBAQCKMmDzyWWmuSJPh3O9hppRJ6mkX9gut4jP2rwowNv7haj3iu+hR8+GnTix\nu5oy3bZdmRryhhW0XyJsbCKO/z+wsY/RfVgMxF/c1cbmEzki804+AB4a4yNhQD6g\noEEQBD58b6mFi/vPCRiGZmmo5TqMlA37jBRSVnKO/CoH1CAvjqmfWdSoO4IC4uD4\nJev+QNr9wlOimYcA/usmo7rmqz7IB9R/Laxcdkq9iZelKly/jhftEbKgGf2NR/d7\nEKVONjCEp6fL2iBaQSA/899oJJ7QPqE5X821HhBlXKvNmZnYRyUmAS2h1jnxtovp\nsNGcLFRgIAFdaGl1172C7mBZF4C3\n-----END CERTIFICATE-----", 121 | "expiration": "2023-10-14T11:09:58Z", 122 | "client_id": "f13f0f30-f923-4504-8a7a-6aa45bcb4866" 123 | } 124 | ``` 125 | 126 | ```bash 127 | supermq-cli certs get client f13f0f30-f923-4504-8a7a-6aa45bcb4866 $USER_TOKEN 128 | { 129 | "certs": [ 130 | { 131 | "cert_serial": "6f:35:d5:9d:47:9d:23:50:08:f7:31:13:82:22:e4:c8:e6:cf:2c:c1", 132 | "expiration": "0001-01-01T00:00:00Z" 133 | } 134 | ], 135 | "limit": 10, 136 | "offset": 0, 137 | "total": 1 138 | } 139 | ``` 140 | 141 | #### 3. Revoke a certificate 142 | 143 | ```bash 144 | supermq-cli certs revoke 145 | ``` 146 | 147 | For example: 148 | 149 | ```bash 150 | supermq-cli certs revoke f13f0f30-f923-4504-8a7a-6aa45bcb4866 $USER_TOKEN 151 | 152 | revoked: 2023-09-14 11:21:44 +0000 UTC 153 | ``` 154 | 155 | For more information about the Certification service API, please check out the [API documentation][api-docs]. 156 | 157 | [vault-pki-engine]: https://learn.hashicorp.com/tutorials/vault/pki-engine 158 | [meodor-vault]: https://github.com/mteodor/vault 159 | [api-docs]: https://github.com/absmach/supermq/blob/main/api/openapi/certs.yml 160 | [supermq-vault]: https://github.com/absmach/supermq/blob/main/docker/addons/vault/README.md#setup 161 | -------------------------------------------------------------------------------- /docs/dev-guide.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Developer's Guide 3 | --- 4 | 5 | 6 | ## Getting SuperMQ 7 | 8 | SuperMQ source can be found in the official [SuperMQ GitHub repository][supermq-repo]. You should fork this repository in order to make changes to the project. The forked version of the repository should be cloned using the following: 9 | 10 | ```bash 11 | git clone $SOMEPATH/supermq 12 | cd $SOMEPATH/supermq 13 | ``` 14 | 15 | **Note:** If your `$SOMEPATH` is equal to `$GOPATH/src/github.com/absmach/supermq`, make sure that your `$GOROOT` and `$GOPATH` do not overlap (otherwise, go modules won't work). 16 | 17 | ## Building 18 | 19 | ### Building Prerequisites 20 | 21 | Make sure that you have [Protocol Buffers][protocol-buffers] (version 21.12) compiler (`protoc`) installed. 22 | 23 | [Go Protobuf][golang-protobuf] installation instructions are [here][protobuf-install]. Go Protobuf uses C bindings, so you will need to install [C++ protobuf][protobuf] as a prerequisite. SuperMQ uses `Protocol Buffers for Go with Gadgets` to generate faster marshaling and unmarshaling Go code. Protocol Buffers for Go with Gadgets installation instructions can be found [here][google-protobuf]. 24 | 25 | A copy of [Go][go-install] (version 1.19.4) and docker template (version 3.7) will also need to be installed on your system. 26 | 27 | If any of these versions seem outdated, the latest can always be found in our [CI script][mg-ci-scripts]. 28 | 29 | ### Build All Services 30 | 31 | Use the _GNU Make_ tool to build all SuperMQ services: 32 | 33 | ```bash 34 | make 35 | ``` 36 | 37 | Build artifacts will be put in the `build` directory. 38 | 39 | > N.B. All SuperMQ services are built as a statically linked binaries. This way they can be portable (transferred to any platform just by placing them there and running them) as they contain all needed libraries and do not relay on shared system libraries. This helps creating [FROM scratch][scratch-docker] dockers. 40 | 41 | ### Build Individual Microservice 42 | 43 | Individual microservices can be built with: 44 | 45 | ```bash 46 | make 47 | ``` 48 | 49 | For example: 50 | 51 | ```bash 52 | make http 53 | ``` 54 | 55 | will build the HTTP Adapter microservice. 56 | 57 | ### Building Dockers 58 | 59 | Dockers can be built with: 60 | 61 | ```bash 62 | make dockers 63 | ``` 64 | 65 | or individually with: 66 | 67 | ```bash 68 | make docker_ 69 | ``` 70 | 71 | For example: 72 | 73 | ```bash 74 | make docker_http 75 | ``` 76 | 77 | > N.B. SuperMQ creates `FROM scratch` docker containers which are compact and small in size. 78 | > 79 | > N.B. The `clients-db` and `users-db` containers are built from a vanilla PostgreSQL docker image downloaded from docker hub which does not persist the data when these containers are rebuilt. Thus, **rebuilding of all docker containers with `make dockers` or rebuilding the `clients-db` and `users-db` containers separately with `make docker_clients-db` and `make docker_users-db` respectively, will cause data loss. All your users, clients, channels and connections between them will be lost!** As we use this setup only for development, we don't guarantee any permanent data persistence. Though, in order to enable data retention, we have configured persistent volumes for each container that stores some data. If you want to update your SuperMQ dockerized installation and want to keep your data, use `make cleandocker` to clean the containers and images and keep the data (stored in docker persistent volumes) and then `make run` to update the images and the containers. Check the [Cleaning up your dockerized SuperMQ setup][cleanup-docker] section for details. Please note that this kind of updating might not work if there are database changes. 80 | 81 | #### Building Docker images for development 82 | 83 | In order to speed up build process, you can use commands such as: 84 | 85 | ```bash 86 | make dockers_dev 87 | ``` 88 | 89 | or individually with 90 | 91 | ```bash 92 | make docker_dev_ 93 | ``` 94 | 95 | Commands `make dockers` and `make dockers_dev` are similar. The main difference is that building images in the development mode is done on the local machine, rather than an intermediate image, which makes building images much faster. Before running this command, corresponding binary needs to be built in order to make changes visible. This can be done using `make` or `make ` command. Commands `make dockers_dev` and `make docker_dev_` should be used only for development to speed up the process of image building. **For deployment images, commands from section above should be used.** 96 | 97 | ### Suggested workflow 98 | 99 | When the project is first cloned to your system, you will need to make sure and build all of the SuperMQ services. 100 | 101 | ```bash 102 | make 103 | make dockers_dev 104 | ``` 105 | 106 | As you develop and test changes, only the services related to your changes will need to be rebuilt. This will reduce compile time and create a much more enjoyable development experience. 107 | 108 | ```bash 109 | make 110 | make docker_dev_ 111 | make run 112 | ``` 113 | 114 | ### Overriding the default docker-compose configuration 115 | 116 | Sometimes, depending on the use case and the user's needs it might be useful to override or add some extra parameters to the docker-compose configuration. These configuration changes can be done by specifying multiple compose files with the [docker-compose command line option -f][docker-compose-ref] as described [here][docker-compose-extend]. 117 | The following format of the `docker-compose` command can be used to extend or override the configuration: 118 | 119 | ```bash 120 | docker-compose -f docker/docker-compose.yml -f docker/docker-compose.custom1.yml -f docker/docker-compose.custom2.yml up [-d] 121 | ``` 122 | 123 | In the command above each successive file overrides the previous parameters. 124 | 125 | A practical example in our case would be to enable debugging and tracing in NATS so that we can see better how are the messages moving around. 126 | 127 | `docker-compose.nats-debugging.yml` 128 | 129 | ```yaml 130 | version: "3" 131 | 132 | services: 133 | nats: 134 | command: --debug -DV 135 | ``` 136 | 137 | When we have the override files in place, to compose the whole infrastructure including the persistent volumes we can execute: 138 | 139 | ```bash 140 | docker-compose -f docker/docker-compose.yml -f docker/docker-compose.nats-debugging.yml up -d 141 | ``` 142 | 143 | **Note:** Please store your customizations to some folder outside the SuperMQ's source folder and maybe add them to some other git repository. You can always apply your customizations by pointing to the right file using `docker-compose -f ...`. 144 | 145 | ### Cleaning up your dockerized SuperMQ setup 146 | 147 | If you want to clean your whole dockerized SuperMQ installation you can use the `make pv=true cleandocker` command. Please note that **by default the `make cleandocker` command will stop and delete all of the containers and images, but NOT DELETE persistent volumes**. If you want to delete the gathered data in the system (the persistent volumes) please use the following command `make pv=true cleandocker` (pv = persistent volumes). This form of the command will stop and delete the containers, the images and will also delete the persistent volumes. 148 | 149 | ### MQTT Broker 150 | 151 | To build SuperMQ MQTT message broker Docker image, use the following commands: 152 | 153 | ```bash 154 | cd docker/vernemq 155 | docker build --no-cache . -t supermq/vernemq 156 | ``` 157 | 158 | The SuperMQ uses the [VerneMQ][vernemq] for implementation of the MQTT messaging. Therefore, for some questions or problems you can also check out the VerneMQ documentation or reach out its contributors. 159 | 160 | ### Protobuf 161 | 162 | If you've made any changes to `.proto` files, you should call `protoc` command prior to compiling individual microservices. 163 | 164 | To do this by hand, execute: 165 | 166 | ```bash 167 | protoc -I. --go_out=. --go_opt=paths=source_relative pkg/messaging/*.proto 168 | protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative users/policies/*.proto 169 | protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative clients/policies/*.proto 170 | ``` 171 | 172 | A shorthand to do this via `make` tool is: 173 | 174 | ```bash 175 | make proto 176 | ``` 177 | 178 | > N.B. This must be done once at the beginning in order to generate protobuf Go structures needed for the build. However, if you don't change any of `.proto` files, this step is not mandatory, since all generated files are included in the repository (those are files with `.pb.go` extension). 179 | 180 | ### Cross-compiling for ARM 181 | 182 | SuperMQ can be compiled for ARM platform and run on Raspberry Pi or other similar IoT gateways, by following the instructions [here][go-cross-compile] or [here][go-arm] as well as information found [here][wiki-go-arm]. The environment variables `GOARCH=arm` and `GOARM=7` must be set for the compilation. 183 | 184 | Cross-compilation for ARM with SuperMQ make: 185 | 186 | ```bash 187 | GOOS=linux GOARCH=arm GOARM=7 make 188 | ``` 189 | 190 | ## Running tests 191 | 192 | To run all of the tests you can execute: 193 | 194 | ```bash 195 | make test 196 | ``` 197 | 198 | Dockertest is used for the tests, so to run them, you will need the Docker daemon/service running. 199 | 200 | ## Installing 201 | 202 | Installing Go binaries is simple: just move them from `build` to `$GOBIN` (do not fortget to add `$GOBIN` to your `$PATH`). 203 | 204 | You can execute: 205 | 206 | ```bash 207 | make install 208 | ``` 209 | 210 | which will do this copying of the binaries. 211 | 212 | > N.B. Only Go binaries will be installed this way. The MQTT broker is a written in Erlang and its build scripts can be found in `docker/vernemq` dir. 213 | 214 | ## Deployment 215 | 216 | ### Deployment Prerequisites 217 | 218 | SuperMQ depends on several infrastructural services, notably the default message broker, [NATS][nats] and [PostgreSQL][postgresql] database. 219 | 220 | #### Message Broker 221 | 222 | SuperMQ uses NATS as it's default central message bus. For development purposes (when not run via Docker), it expects that NATS is installed on the local system. 223 | 224 | To do this execute: 225 | 226 | ```bash 227 | go install github.com/nats-io/nats-server/v2@latest 228 | ``` 229 | 230 | This will install `nats-server` binary that can be simply run by executing: 231 | 232 | ```bash 233 | nats-server 234 | ``` 235 | 236 | If you want to change the default message broker to [RabbitMQ][rabbitmq], [VerneMQ][vernemq] or [Kafka][kafka] you need to install it on the local system. 237 | To run using a different broker you need to set the `SMQ_BROKER_TYPE` env variable to `nats`, `rabbitmq` or `vernemq` during make and run process. 238 | 239 | ```bash 240 | SMQ_BROKER_TYPE= make 241 | SMQ_BROKER_TYPE= make run 242 | ``` 243 | 244 | #### PostgreSQL 245 | 246 | SuperMQ uses PostgreSQL to store metadata (`users`, `clients` and `channels` entities alongside with authorization tokens). It expects that PostgreSQL DB is installed, set up and running on the local system. 247 | 248 | Information how to set-up (prepare) PostgreSQL database can be found [here][postgres-roles], and it is done by executing following commands: 249 | 250 | ```bash 251 | # Create `users` and `clients` databases 252 | sudo -u postgres createdb users 253 | sudo -u postgres createdb clients 254 | 255 | # Set-up Postgres roles 256 | sudo su - postgres 257 | psql -U postgres 258 | postgres=# CREATE ROLE supermq WITH LOGIN ENCRYPTED PASSWORD 'supermq'; 259 | postgres=# ALTER USER supermq WITH LOGIN ENCRYPTED PASSWORD 'supermq'; 260 | ``` 261 | 262 | ### SuperMQ Services 263 | 264 | Running of the SuperMQ microservices can be tricky, as there is a lot of them and each demand configuration in the form of environment variables. 265 | 266 | The whole system (set of microservices) can be run with one command: 267 | 268 | ```bash 269 | make rundev 270 | ``` 271 | 272 | which will properly configure and run all microservices. 273 | 274 | Please assure that MQTT microservice has `node_modules` installed, as explained in _MQTT Microservice_ chapter. 275 | 276 | > N.B. `make rundev` actually calls helper script `scripts/run.sh`, so you can inspect this script for the details. 277 | 278 | [supermq-repo]: https://github.com/absmach/supermq 279 | [protocol-buffers]: https://developers.google.com/protocol-buffers/ 280 | [golang-protobuf]: https://github.com/golang/protobuf 281 | [protobuf-install]: https://github.com/golang/protobuf#installation 282 | [protobuf]: https://github.com/google/protobuf 283 | [google-protobuf]: https://google.golang.org/protobuf/proto 284 | [go-install]: https://golang.org/doc/install 285 | [mg-ci-scripts]: https://github.com/absmach/supermq/blob/main/scripts/ci.sh 286 | [scratch-docker]: https://hub.docker.com/_/scratch/ 287 | [cleanup-docker]: #cleaning-up-your-dockerized-supermq-setup 288 | [docker-compose-ref]: https://docs.docker.com/compose/reference/overview/ 289 | [docker-compose-extend]: https://docs.docker.com/compose/extends/ 290 | [go-cross-compile]: https://dave.cheney.net/2015/08/22/cross-compilation-with-go-1-5 291 | [go-arm]: https://www.alexruf.net/golang/arm/raspberrypi/2016/01/16/cross-compile-with-go-1-5-for-raspberry-pi.html 292 | [wiki-go-arm]: https://go.dev/wiki/GoArm 293 | [nats]: https://www.nats.io/ 294 | [postgresql]: https://www.postgresql.org/ 295 | [rabbitmq]: https://www.rabbitmq.com/download.html 296 | [vernemq]: https://vernemq.com/downloads/ 297 | [kafka]: https://kafka.apache.org/quickstart 298 | [postgres-roles]: https://support.rackspace.com/how-to/postgresql-creating-and-dropping-roles/ 299 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Getting Started 3 | --- 4 | 5 | 6 | ## Step 1 - Run the System 7 | 8 | Before proceeding, install the following prerequisites: 9 | 10 | - [Docker][docker] (version 20.10.16) 11 | - [Docker compose][docker-compose] (version 1.29.2) 12 | 13 | Once everything is installed, execute the following command from project root: 14 | 15 | ```bash 16 | make run 17 | ``` 18 | 19 | This will start SuperMQ docker composition, which will output the logs from the containers. 20 | 21 | ## Step 2 - Install the CLI 22 | 23 | Open a new terminal from which you can interact with the running SuperMQ system. The easiest way to do this is by using the SuperMQ CLI, which can be downloaded as a tarball from GitHub (here we use release `0.14.0` but be sure to use the [latest CLI release][mg-releases]): 24 | 25 | ```bash 26 | wget -O- https://github.com/absmach/supermq/releases/download/0.14.0/supermq-cli_0.14.0_linux-amd64.tar.gz | tar xvz -C $GOBIN 27 | ``` 28 | 29 | > Make sure that `$GOBIN` is added to your `$PATH` so that `supermq-cli` command can be accessible system-wide 30 | 31 | ### Build supermq-cli 32 | 33 | Build `supermq-cli` if the pre-built CLI is not compatible with your OS, i.e MacOS. Please see the [CLI][cli] for further details. 34 | 35 | ## Step 3 - Provision the System 36 | 37 | Once installed, you can use the CLI to quick-provision the system for testing: 38 | 39 | ```bash 40 | supermq-cli provision test 41 | ``` 42 | 43 | This command actually creates a temporary testing user, logs it in, then creates two things and two channels on behalf of this user. 44 | This quickly provisions a SuperMQ system with one simple testing scenario. 45 | 46 | You can read more about system provisioning in the dedicated [Provisioning][provisioning] chapter 47 | 48 | Output of the command follows this pattern: 49 | 50 | ```json 51 | { 52 | "created_at": "2023-04-04T08:02:47.686337Z", 53 | "credentials": { 54 | "identity": "crazy_feistel@email.com", 55 | "secret": "12345678" 56 | }, 57 | "id": "0216df07-8f08-40ef-ba91-ff0e700f387a", 58 | "name": "crazy_feistel", 59 | "status": "enabled", 60 | "updated_at": "2023-04-04T08:02:47.686337Z" 61 | } 62 | 63 | 64 | { 65 | "access_token": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw", 66 | "access_type": "Bearer", 67 | "refresh_token": "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA2ODE3NjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJyZWZyZXNoIn0.3xcrkIBbi2a8firNHtnK6I8sBBOgrQ6XBa3x7cybKc6omOuqrkkNjXGjKU9tgShvjpfCWT48AR1VqO_VxJxL8g" 68 | } 69 | 70 | 71 | [ 72 | { 73 | "created_at": "2023-04-04T08:02:47.81865461Z", 74 | "credentials": { 75 | "secret": "fc9473d8-6756-4fcc-968f-ea43cd0b803b" 76 | }, 77 | "id": "5d5e593b-7629-4cc3-bebc-b20d8ab9dbef", 78 | "name": "d0", 79 | "owner": "0216df07-8f08-40ef-ba91-ff0e700f387a", 80 | "status": "enabled", 81 | "updated_at": "2023-04-04T08:02:47.81865461Z" 82 | }, 83 | { 84 | "created_at": "2023-04-04T08:02:47.818661382Z", 85 | "credentials": { 86 | "secret": "56a4b1bd-9750-42b3-a3cb-cf5ee2b86fe4" 87 | }, 88 | "id": "45048a8e-c602-4e91-9556-a9d3af6617fb", 89 | "name": "d1", 90 | "owner": "0216df07-8f08-40ef-ba91-ff0e700f387a", 91 | "status": "enabled", 92 | "updated_at": "2023-04-04T08:02:47.818661382Z" 93 | } 94 | ] 95 | 96 | 97 | [ 98 | { 99 | "created_at": "2023-04-04T08:02:47.857619Z", 100 | "id": "a31e16f8-343c-4366-8b4f-c95e190937f4", 101 | "name": "c0", 102 | "owner_id": "0216df07-8f08-40ef-ba91-ff0e700f387a", 103 | "status": "enabled", 104 | "updated_at": "2023-04-04T08:02:47.857619Z" 105 | }, 106 | { 107 | "created_at": "2023-04-04T08:02:47.867336Z", 108 | "id": "e20ad0bb-c490-47dd-9366-fb8ffd56c5dc", 109 | "name": "c1", 110 | "owner_id": "0216df07-8f08-40ef-ba91-ff0e700f387a", 111 | "status": "enabled", 112 | "updated_at": "2023-04-04T08:02:47.867336Z" 113 | } 114 | ] 115 | 116 | ``` 117 | 118 | In the SuperMQ system terminal (where docker compose is running) you should see following logs: 119 | 120 | ```bash 121 | ... 122 | supermq-users | {"level":"info","message":"Method register_client with id 0216df07-8f08-40ef-ba91-ff0e700f387a using token took 87.335902ms to complete without errors.","ts":"2023-04-04T08:02:47.722776862Z"} 123 | supermq-users | {"level":"info","message":"Method issue_token of type Bearer for client crazy_feistel@email.com took 55.342161ms to complete without errors.","ts":"2023-04-04T08:02:47.783884818Z"} 124 | supermq-users | {"level":"info","message":"Method identify for token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw with id 0216df07-8f08-40ef-ba91-ff0e700f387a took 1.389463ms to complete without errors.","ts":"2023-04-04T08:02:47.817018631Z"} 125 | supermq-things | {"level":"info","message":"Method create_things 2 things using token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw took 48.137759ms to complete without errors.","ts":"2023-04-04T08:02:47.853310066Z"} 126 | supermq-users | {"level":"info","message":"Method identify for token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw with id 0216df07-8f08-40ef-ba91-ff0e700f387a took 302.571µs to complete without errors.","ts":"2023-04-04T08:02:47.856820523Z"} 127 | supermq-things | {"level":"info","message":"Method create_channel for 2 channels using token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw took 15.340692ms to complete without errors.","ts":"2023-04-04T08:02:47.872089509Z"} 128 | supermq-users | {"level":"info","message":"Method identify for token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw with id 0216df07-8f08-40ef-ba91-ff0e700f387a took 271.162µs to complete without errors.","ts":"2023-04-04T08:02:47.875812318Z"} 129 | supermq-things | {"level":"info","message":"Method add_policy for client with id 5d5e593b-7629-4cc3-bebc-b20d8ab9dbef using token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw took 28.632906ms to complete without errors.","ts":"2023-04-04T08:02:47.904041832Z"} 130 | supermq-users | {"level":"info","message":"Method identify for token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw with id 0216df07-8f08-40ef-ba91-ff0e700f387a took 269.959µs to complete without errors.","ts":"2023-04-04T08:02:47.906989497Z"} 131 | supermq-things | {"level":"info","message":"Method add_policy for client with id 5d5e593b-7629-4cc3-bebc-b20d8ab9dbef using token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw took 6.303771ms to complete without errors.","ts":"2023-04-04T08:02:47.910594262Z"} 132 | supermq-users | {"level":"info","message":"Method identify for token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw with id 0216df07-8f08-40ef-ba91-ff0e700f387a took 364.448µs to complete without errors.","ts":"2023-04-04T08:02:47.912905436Z"} 133 | supermq-things | {"level":"info","message":"Method add_policy for client with id 45048a8e-c602-4e91-9556-a9d3af6617fb using token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODA1OTYyNjcsImlhdCI6MTY4MDU5NTM2NywiaWRlbnRpdHkiOiJjcmF6eV9mZWlzdGVsQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjAyMTZkZjA3LThmMDgtNDBlZi1iYTkxLWZmMGU3MDBmMzg3YSIsInR5cGUiOiJhY2Nlc3MifQ.EpaFDcRjYAHwqhejLfay5ju8L1a7VdhXKohUlwTv7YTeOK-ClfNNx6KznV05Swdj6lgvbmVAfe0wz2JMpfMjdw took 7.73352ms to complete without errors.","ts":"2023-04-04T08:02:47.920205467Z"} 134 | ... 135 | 136 | ``` 137 | 138 | This proves that these provisioning commands were sent from the CLI to the SuperMQ system. 139 | 140 | ## Step 4 - Send Messages 141 | 142 | Once system is provisioned, a `thing` can start sending messages on a `channel`: 143 | 144 | ```bash 145 | supermq-cli messages send '[{"bn":"some-base-name:","bt":1.276020076001e+09, "bu":"A","bver":5, "n":"voltage","u":"V","v":120.1}, {"n":"current","t":-5,"v":1.2}, {"n":"current","t":-4,"v":1.3}]' 146 | ``` 147 | 148 | For example: 149 | 150 | ```bash 151 | supermq-cli messages send a31e16f8-343c-4366-8b4f-c95e190937f4 '[{"bn":"some-base-name:","bt":1.276020076001e+09, "bu":"A","bver":5, "n":"voltage","u":"V","v":120.1}, {"n":"current","t":-5,"v":1.2}, {"n":"current","t":-4,"v":1.3}]' fc9473d8-6756-4fcc-968f-ea43cd0b803b 152 | ``` 153 | 154 | In the SuperMQ system terminal you should see following logs: 155 | 156 | ```bash 157 | ... 158 | supermq-things | {"level":"info","message":"Method authorize_by_key for channel with id a31e16f8-343c-4366-8b4f-c95e190937f4 by client with secret fc9473d8-6756-4fcc-968f-ea43cd0b803b took 7.048706ms to complete without errors.","ts":"2023-04-04T08:06:09.750992633Z"} 159 | supermq-broker | [1] 2023/04/04 08:06:09.753072 [TRC] 192.168.144.11:60616 - cid:10 - "v1.18.0:go" - <<- [PUB channels.a31e16f8-343c-4366-8b4f-c95e190937f4 261] 160 | supermq-broker | [1] 2023/04/04 08:06:09.754037 [TRC] 192.168.144.11:60616 - cid:10 - "v1.18.0:go" - <<- MSG_PAYLOAD: ["\n$a31e16f8-343c-4366-8b4f-c95e190937f4\x1a$5d5e593b-7629-4cc3-bebc-b20d8ab9dbef\"\x04http*\xa6\x01[{\"bn\":\"some-base-name:\",\"bt\":1.276020076001e+09, \"bu\":\"A\",\"bver\":5, \"n\":\"voltage\",\"u\":\"V\",\"v\":120.1}, {\"n\":\"current\",\"t\":-5,\"v\":1.2}, {\"n\":\"current\",\"t\":-4,\"v\":1.3}]0\xd9\xe6\x8b\xc9Ø\xab\xa9\x17"] 161 | supermq-broker | [1] 2023/04/04 08:06:09.755550 [TRC] 192.168.144.13:58572 - cid:8 - "v1.18.0:go" - ->> [MSG channels.a31e16f8-343c-4366-8b4f-c95e190937f4 1 261] 162 | supermq-http | {"level":"info","message":"Method publish to channel a31e16f8-343c-4366-8b4f-c95e190937f4 took 15.979094ms to complete without errors.","ts":"2023-04-04T08:06:09.75232571Z"} 163 | ... 164 | ``` 165 | 166 | This proves that messages have been correctly sent through the system via the protocol adapter (`supermq-http`). 167 | 168 | [docker]: https://docs.docker.com/install/ 169 | [docker-compose]: https://docs.docker.com/compose/install/ 170 | [mg-releases]: https://github.com/absmach/supermq/releases 171 | [cli]: cli.md 172 | [provisioning]: ./provision.md#platform-management 173 | -------------------------------------------------------------------------------- /docs/img/architecture.xml: -------------------------------------------------------------------------------- 1 | 7Vpbc6M2FP41flwPd+PHtTeb7UyacZt0uvuUkUEGdQVihBzb++srgYQB4YQm+JImyWQCB13gO993dHQZ2fNke01BFv9OQohHlhFuR/aXkWWZjm3wf8KyKy1TyywNEUWhLLQ33KFfUBplvWiNQpg3CjJCMENZ0xiQNIUBa9gApWTTLLYiuNlrBiKoGe4CgHXr3yhkcWn1XWNv/wZRFKueTUM+WYLgZ0TJOpX9jSx7VfyUjxOg2pLl8xiEZFMz2Vcje04JYeVVsp1DLLBVsJX1vh54Wr03hSnrU2FSVngEeC0//fb6t9vv8uXYTgHyCClDHJ8bsIR4QXLEEEn5oyVhjCQjewbyrHTECm0hb3wWswTzW5NfqsqfMYpEJUYybs0ZJT/hnGBCuS0lKe9qRtYMo5RblV8NUTIGmXiPZBsJuo0DxCjajjNKtruHHFLePi+lf7pEQ3QPtzWThOIakgQyuuNF5NOKf5K2lueW95s9CSbeWBrjGgOmsiKQxIuqtvfg8wuJ/wHneZoz/uJfl2vOwMIJ/Is3MWLwLgOBsG84NE3YUVKyHCUR7zrAiGPD+NU1BPTBtPwt/xtnacSLrhDGyhOcsiGA/irQfMSfeIEPl6th0LanTbRNJa8a2qYSSh1sbwCwbVMD+/rPxVzDGoY8MMhbQllMIpICfLW3zgq1C8oXXK3hD7eIfRdmTpjy7ocsxDGju9ojcftD1soZ99JnEcO4IcAgz1GgzF8RVo3/AxnbycAJ1owI7VSvd0OEwopyB/2UkzUNoOK9jLGARlAWs6alTQDwpDcpxIChx2bkfI1rLN01C0oYCQgv5WH+frMl5VeRuAIhyNg7EoljnFAkrn0cPfiThiIqfSwgRfwlRTgvA38/LcA01Atx49HkYutymZxLLa6jqWXkzjIlGPeL5kLOPNb0DIU5+gWWRQGBe0ZQyop3cmeiBS4zOXQHHJditG2P6QkKw8L9he5mVRpUk4VMhLpAVkTTdFGlePLtGmlSl14+GWPDNOQI3Rts2dxCfHetCFmtcu7gtjeqXvuFM1tz0BwjKADWxIV5/MlhLeUJMFmHz0exVmiCZujCSVdomnoTG3jDhKZ2sjTpGZn8ASJTR+LKs1IWUXj3x42Ga4XmjieXYcHfZwBdlkHsZnkAYY6vHzpdCPvW0vYGQtj1WhD7OsTTYyVI1keCtI/4Uz3iq3T9DAnSVHPNfYzS6N0kQV06ON5MwdUHWE0EXRnIAaLWBVDRvE7ykvO9eDnRaamKPUvLGnZuB3TK9tIxVE2p7Vb+2nZJ+TmyVn2lot2Q32rIbDVUgqA19IIR29b19fZSqoq2r06peEZlGX4D/E/m68gxaIJl6+smFy3QvhPr0wjUbAnUm7xQoGZLoJ51PIH6mse/3d8v7t6ELhVbB5nquPYrpzrbzgrD6FLP0bu9dEHi7J3UfYjzkNsdffnuFB5Xkwk1sfiPXOjK8Hsv6nyQ4SAZ9MWPdzBP79rJONY83elY/7t8uak4W5eb4sqFyM21mk612yrpKzd38kxDA8pNn6vefr7XR9xSDWpr22q6vb3dusJkE8ScLOMQUZ6fPISAgQPSbMlv5YrfzuWB4mcg+bluA2DH1Ldtqx36OlPsAfTnTXS5DbAk1mt/pBKoqQs0BHlc9KCtmcmt9vqCmXGGXRTH10OAd7Y1NUefUrxD5bimPnAdTTlOxzJLr5GrQy/6MPTk4CVu22I6LJjDi8y9iN61u64C9YWMdW8xtVSffnr6NJjSFZf7xb/LokA73XGcF1JgYrQaaseKASngH2XsbWxHNRLhjnG2M5aceLxtELIX+dyOtQ81BJ7hDEPHMURCE4D5p1PNxf+TTS1NJ6c82ePpa+bzdS6OcmonrAKS5uvkLEesVn4Ag05HLH3XcZ8Mta84YjXtmD4M5Ah+uz/cW0a8/Qlq++pf -------------------------------------------------------------------------------- /docs/img/bootstrap/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/1.png -------------------------------------------------------------------------------- /docs/img/bootstrap/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/2.png -------------------------------------------------------------------------------- /docs/img/bootstrap/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/3.png -------------------------------------------------------------------------------- /docs/img/bootstrap/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/4.png -------------------------------------------------------------------------------- /docs/img/bootstrap/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/5.png -------------------------------------------------------------------------------- /docs/img/bootstrap/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/6.png -------------------------------------------------------------------------------- /docs/img/bootstrap/addconfig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/bootstrap/addconfig.png -------------------------------------------------------------------------------- /docs/img/edge/edge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/edge/edge.png -------------------------------------------------------------------------------- /docs/img/gopher-reading.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/gopher-reading.png -------------------------------------------------------------------------------- /docs/img/gopherBanner.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/gopherBanner.jpg -------------------------------------------------------------------------------- /docs/img/opcua/opcua.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/opcua/opcua.png -------------------------------------------------------------------------------- /docs/img/tracing/messagePub.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/tracing/messagePub.png -------------------------------------------------------------------------------- /docs/img/tracing/search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/tracing/search.png -------------------------------------------------------------------------------- /docs/img/tracing/trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/tracing/trace.png -------------------------------------------------------------------------------- /docs/img/twins/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/twins/architecture.png -------------------------------------------------------------------------------- /docs/img/ui/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/ui/dashboard.png -------------------------------------------------------------------------------- /docs/img/ui/details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/ui/details.png -------------------------------------------------------------------------------- /docs/img/ui/gateways.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/ui/gateways.png -------------------------------------------------------------------------------- /docs/img/ui/loraserver.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/ui/loraserver.png -------------------------------------------------------------------------------- /docs/img/ui/things.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/ui/things.png -------------------------------------------------------------------------------- /docs/img/ui/tracing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/docs/img/ui/tracing.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Overview 3 | slug: / 4 | --- 5 | 6 | 7 | ## About 8 | 9 | ### What is SuperMQ? 10 | 11 | SuperMQ is modern, scalable, secure open source and patent-free IoT cloud platform written in Go. 12 | 13 | It accepts user and client connections over various network protocols (i.e. HTTP, MQTT, WebSocket, CoAP), thus making a seamless bridge between them. It is used as the IoT middleware for building complex IoT solutions. 14 | 15 | ![banner](img/gopherBanner.jpg) 16 | 17 | ### Features 18 | 19 | - Protocol bridging (i.e. HTTP, MQTT, WebSocket, CoAP) 20 | - Device management and provisioning 21 | - Fine-grained access control 22 | - Platform logging and instrumentation support 23 | - Container-based deployment using Docker 24 | - Data ingestion, processing and storage. 25 | - Scalable and distributed by design 26 | - Multi-tenancy support 27 | - Extensibility and customization support 28 | 29 | ## Contributing to SuperMQ 30 | 31 | Thank you for your interest in SuperMQ and the desire to contribute! 32 | 33 | Take a look at our [open issues][open-issues]. The [good-first-issue][good-first-issue] label is specifically for issues that are great for getting started. Checkout the [contribution guide][contribution-guide] to learn more about our style and conventions. Make your changes compatible to our workflow. 34 | 35 | ## License 36 | 37 | [Apache-2.0][license] 38 | 39 | [open-issues]: https://github.com/absmach/supermq/issues 40 | [good-first-issue]: https://github.com/absmach/supermq/labels/good-first-issue 41 | [contribution-guide]: https://github.com/absmach/supermq/blob/main/CONTRIBUTING.md 42 | [license]: https://github.com/absmach/supermq/blob/main/LICENSE 43 | -------------------------------------------------------------------------------- /docs/kubernetes.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Kubernetes 3 | --- 4 | 5 | 6 | SuperMQ can be easily deployed on the Kubernetes platform using Helm Charts from the official [SuperMQ DevOps GitHub repository](https://github.com/absmach/devops). 7 | 8 | ## Prerequisites 9 | 10 | ### 1. Install Docker 11 | 12 | K3d requires Docker to run Kubernetes clusters inside Docker containers. Follow the official [Docker installation guide](https://docs.docker.com/get-docker/) to install Docker. 13 | 14 | Once installed, verify the installation: 15 | 16 | ```bash 17 | docker --version 18 | ``` 19 | 20 | --- 21 | 22 | ### 2. Install Kubernetes via K3d 23 | 24 | K3d is a lightweight Kubernetes distribution that runs inside Docker, ideal for local development. 25 | 26 | #### Steps to install K3d 27 | 28 | ```bash 29 | curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash 30 | ``` 31 | 32 | For more information on K3d, refer to the official [K3d documentation](https://k3d.io/). 33 | 34 | ### 3. Install kubectl 35 | 36 | `kubectl` is the command-line tool used to interact with your Kubernetes cluster. 37 | 38 | #### Steps to install kubectl 39 | 40 | ```bash 41 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 42 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 43 | ``` 44 | 45 | Verify the installation: 46 | 47 | ```bash 48 | kubectl version --client 49 | ``` 50 | 51 | --- 52 | 53 | ### 4. Install Helm v3 54 | 55 | Helm is a package manager for Kubernetes, simplifying application installation and management. 56 | 57 | #### Steps to install Helm 58 | 59 | ```bash 60 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash 61 | ``` 62 | 63 | Verify the installation: 64 | 65 | ```bash 66 | helm version 67 | ``` 68 | 69 | --- 70 | 71 | ### 5. Add Helm Repositories 72 | 73 | #### Add Stable Helm Repository 74 | 75 | The **Helm stable repository** contains Helm charts that you can use to install applications on Kubernetes. 76 | 77 | ```bash 78 | helm repo add stable https://charts.helm.sh/stable 79 | helm repo update 80 | ``` 81 | 82 | #### Add Bitnami Helm Repository 83 | 84 | Bitnami offers a collection of popular Helm charts for various applications. 85 | 86 | ```bash 87 | helm repo add bitnami https://charts.bitnami.com/bitnami 88 | helm repo update 89 | ``` 90 | 91 | --- 92 | 93 | ### 6. Install Nginx Ingress Controller 94 | 95 | The Nginx Ingress Controller manages external access to services within your Kubernetes cluster. 96 | 97 | #### Install Nginx Ingress Controller using Helm 98 | 99 | ```bash 100 | helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx 101 | helm repo update 102 | 103 | kubectl create namespace ingress-nginx 104 | 105 | helm install ingress-nginx ingress-nginx/ingress-nginx --version 3.26.0 --create-namespace -n ingress-nginx 106 | ``` 107 | 108 | Verify the installation: 109 | 110 | ```bash 111 | kubectl get pods -n ingress-nginx 112 | ``` 113 | 114 | --- 115 | 116 | ## Deploying Magistrala (Manual Local Deployment) 117 | 118 | This method involves **manually deploying SuperMQ** by cloning the Helm chart repository to your local machine, making any necessary customizations, and installing the chart from the local directory. 119 | 120 | ### Deploy Use Case 121 | 122 | This approach is useful if you want to: 123 | 124 | - Directly interact with the chart source files. 125 | - Modify the chart before installation. 126 | - Perform development or testing on the chart. 127 | 128 | ### Steps 129 | 130 | #### 1. Clone SuperMQ Helm Chart Repository 131 | 132 | ```bash 133 | git clone https://github.com/absmach/devops.git 134 | cd devops/charts/magistrala 135 | ``` 136 | 137 | #### 2. Add Required Helm Repositories 138 | 139 | ```bash 140 | helm repo add nats https://nats-io.github.io/k8s/helm/charts/ 141 | helm repo add jaegertracing https://jaegertracing.github.io/helm-charts 142 | helm repo add hashicorp https://helm.releases.hashicorp.com 143 | helm repo update 144 | ``` 145 | 146 | This ensures that all necessary repositories are available for dependencies. 147 | 148 | #### 3. Update Dependencies 149 | 150 | Once the repositories have been added, update the on-disk dependencies to match the `Chart.yaml` file by running: 151 | 152 | ```bash 153 | helm dependency update 154 | ``` 155 | 156 | If the repositories are set up correctly, this will resolve and download all chart dependencies to `charts/magistrala/charts`. 157 | 158 | ### 3. Create a Namespace (if needed) 159 | 160 | ```bash 161 | kubectl create namespace mg 162 | ``` 163 | 164 | --- 165 | 166 | ### 4. Deploy SuperMQ 167 | 168 | Deploy the SuperMQ Helm chart into the `mg` namespace: 169 | 170 | ```bash 171 | helm install magistrala . -n mg 172 | ``` 173 | 174 | If you encounter an error related to snippet annotations in Nginx, enable them with: 175 | 176 | ```bash 177 | helm upgrade ingress-nginx ingress-nginx/ingress-nginx -n ingress-nginx --set controller.allowSnippetAnnotations=true 178 | ``` 179 | 180 | Ensure you have the Nginx Ingress repository added: 181 | 182 | ```bash 183 | helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx 184 | ``` 185 | 186 | ### 5. Verifying the Deployment 187 | 188 | After deploying SuperMQ, verify the services and pods using `kubectl` commands: 189 | 190 | **List all pods:** 191 | 192 | ```bash 193 | kubectl get pods -n mg 194 | ``` 195 | 196 | **List all services:** 197 | 198 | ```bash 199 | kubectl get services -n mg 200 | ``` 201 | 202 | **View logs of a pod:** 203 | 204 | ```bash 205 | kubectl logs -n mg 206 | ``` 207 | 208 | ### Interacting with SuperMQ Services After Deployment 209 | 210 | Once you have successfully deployed SuperMQ, there are three primary ways you can interact with its services: 211 | 212 | - web-based User Interface (UI) 213 | - SuperMQ CLI tool (learn more in the [CLI Documentation](https://docs.magistrala.abstractmachines.fr/cli/)) 214 | - HTTP API Clients (e.g., cURL, Postman) 215 | 216 | The ingress-nginx-controller handles the routing for your deployed services using Kubernetes Ingress resources. To interact with your SuperMQ UI or any other service exposed through this load balancer, the first step is to retrieve the Public IP address of this load balancer. 217 | 218 | You can usually find this IP address in your DigitalOcean dashboard under the "Networking" or "Load Balancers" section, or by using the following command in your terminal: 219 | 220 | ```bash 221 | kubectl get svc -A | grep LoadBalancer 222 | ``` 223 | 224 | This command searches all namespaces for services of type `LoadBalancer`. The output looks something like this: 225 | 226 | ```plaintext 227 | ingress-nginx ingress-nginx-controller LoadBalancer 10.245.192.202 138.68.126.8 80:30424/TCP,443:31752/TCP 64d 228 | ``` 229 | 230 | NOTE: The Public IP in this case is `138.68.126.8`. 231 | 232 | #### Using the Web-Based UI 233 | 234 | - Once you have the Public IP address, open your web browser. 235 | - In the address bar, enter the IP address followed by `/ui/login` as shown below: 236 | 237 | ```plaintext 238 | http://138.68.126.8/ui/login 239 | ``` 240 | 241 | #### Using Postman 242 | 243 | If you prefer working with APIs, you can also interact with SuperMQ services using Postman by sending requests to the Public IP address of your load balancer. For example, to create a user: 244 | 245 | ##### 1. Set Up the Postman Request 246 | 247 | - **Method:** `POST` 248 | - **URL:** `http://138.68.126.8/users` 249 | 250 | This URL points to the endpoint that handles user creation on your SuperMQ deployment. Replace `138.68.126.8` with the actual IP address or domain of your deployment if it differs. 251 | 252 | ###### 2. Set Up the Request Body 253 | 254 | Switch to the `Body` tab in Postman and select `raw` as the format. Choose `JSON` from the dropdown menu, and then enter the following JSON structure in the text area: 255 | 256 | ```json 257 | { 258 | "name": "user1", 259 | "tags": ["tag1", "tag2"], 260 | "credentials": { 261 | "identity": "user1@email.com", 262 | "secret": "12345678" 263 | }, 264 | "metadata": { 265 | "domain": "domain1" 266 | } 267 | } 268 | ``` 269 | 270 | `Send` the request. If successful, the server will respond with the details of the newly created user. 271 | 272 | For more examples, refer to this [Postman Collection](https://elements.getpostman.com/redirect?entityId=38532610-ef9a0562-b353-4d2c-8aca-a5fae35ad0ad&entityType=collection). 273 | 274 | ## Install SuperMQ Charts (From Published Helm Repository) 275 | 276 | This method is the **standard installation** approach, where you install the SuperMQ chart directly from a Helm repository. This is quicker and ideal for end-users who do not need to modify the chart manually. 277 | 278 | ### Install Use Case 279 | 280 | This approach is suitable for: 281 | 282 | - End-users who simply want to install SuperMQ without modifying the source code. 283 | - Production environments where the chart is deployed directly from a hosted Helm repository. 284 | 285 | ### Install Steps 286 | 287 | #### 1. Add the SuperMQ Helm Repository 288 | 289 | The Helm charts are published via GitHub Pages. After installing Helm, add the SuperMQ DevOps Helm repository by running: 290 | 291 | ```bash 292 | helm repo add magistrala-devops https://absmach.github.io/devops/ 293 | helm repo update 294 | ``` 295 | 296 | For a complete list of all available flags to use with the `helm repo add [NAME] [URL] [flags]` command, run `helm repo add --help` 297 | 298 | #### 2. Install the SuperMQ Chart 299 | 300 | ```bash 301 | helm install magistrala-devops/magistrala [flags] 302 | ``` 303 | 304 | Replace `` with your desired release name. For the complete list of available flags to use with the above command, run `helm install --help`. 305 | 306 | Example with release name and flag: 307 | 308 | ```bash 309 | helm install my-magistrala magistrala-devops/magistrala --version 0.14.0 310 | ``` 311 | 312 | --- 313 | 314 | #### 3. Upgrading the SuperMQ Chart 315 | 316 | To upgrade the chart to a new version or update configurations: 317 | 318 | ```bash 319 | helm upgrade magistrala-devops/magistrala 320 | ``` 321 | 322 | --- 323 | 324 | #### 4. Uninstalling SuperMQ 325 | 326 | To uninstall the SuperMQ release: 327 | 328 | ```bash 329 | helm uninstall -n mg 330 | ``` 331 | 332 | This will remove the SuperMQ release from the previously created `mg` namespace. Use the `--dry-run` flag to see which releases will be uninstalled without actually uninstalling them. 333 | 334 | --- 335 | 336 | ### Customizing SuperMQ Installation 337 | 338 | To override values in the chart, use either the `--values` flag and pass in a file or use the `--set` flag and pass configuration from the command line, to force a string value use `--set-string`. You can use `--set-file` to set individual values from a file when the value itself is too long for the command line or is dynamically generated. You can also use `--set-json` to set json values (scalars/objects/arrays) from the command line. 339 | 340 | For example, if you want to set a custom hostname for the ingress (like `example.com`) and ensure you're using the latest version of the `users` image, you can do this during installation with the following command:: 341 | 342 | ```bash 343 | helm install magistrala -n mg --set ingress.hostname='example.com' --set users.image.tag='latest' 344 | ``` 345 | 346 | If Magistrala is already installed and you want to update it with new settings (for example, changing the ingress hostname or image tag), you can use the `helm upgrade` command: 347 | 348 | ```bash 349 | helm upgrade magistrala -n mg --set ingress.hostname='example.com' --set users.image.tag='latest' 350 | ``` 351 | 352 | This will apply your changes to the existing installation. For a complete table of the configurable parameters and their default values, see [configurable parameters and their default values](https://github.com/absmach/devops/blob/master/charts/magistrala/README.md). For changes to any of the configurable parameters, equally update the documentation at `charts/magistrala/README.md` using `helm-docs` as described in [Autogenerating Helm Chart Documentation](https://github.com/absmach/devops/blob/master/README.md). 353 | 354 | ### SuperMQ Core 355 | 356 | The SuperMQ Core includes the essential services that are installed by default: 357 | 358 | - authn 359 | - users 360 | - clients 361 | - adapter_http 362 | - adapter_mqtt 363 | - adapter_coap 364 | - ui 365 | 366 | These are the minimum required services to run SuperMQ. 367 | 368 | ### SuperMQ Add-ons 369 | 370 | SuperMQ Add-ons are optional services that are not installed by default. To enable an add-on, you need to specify it during installation. For example, to enable the InfluxDB reader and writer, you would use the following command: 371 | 372 | ```bash 373 | helm install magistrala . -n mg --set influxdb=true 374 | ``` 375 | 376 | Here’s a list of available add-ons: 377 | 378 | - bootstrap 379 | - influxdb.writer 380 | - influxdb.reader 381 | - adapter_opcua 382 | - adapter_lora 383 | - twins 384 | 385 | ### Scaling Services 386 | 387 | By default, the MQTT adapter, clients, Envoy, Authn, and the Message Broker services are set to scale with a replica count of 3. It’s recommended to set these values according to the number of nodes in your Kubernetes cluster. For example, you can adjust the replica count with the following command: 388 | 389 | ```bash 390 | helm install magistrala . -n mg --set defaults.replicaCount=3 --set messageBroker.replicaCount=3 391 | ``` 392 | 393 | This ensures that your services scale appropriately for your environment. 394 | 395 | ### Additional Steps to Configure Ingress Controller 396 | 397 | To allow your host to send MQTT messages on ports `1883` and `8883`, you need to configure the NGINX Ingress Controller with some additional steps. 398 | 399 | #### Step 1: Configure TCP and UDP Services 400 | 401 | The NGINX Ingress Controller uses ConfigMaps to expose TCP and UDP services. The necessary ConfigMaps are included in the Helm chart in the [ingress.yaml][ingress-yaml] file assuming that location of ConfigMaps should be `ingress-nginx/tcp-services` and `ingress-nginx/udp-services`. These locations are set with `--tcp-services-configmap` and `--udp-services-configmap` flags and you can check it in deployment of Ingress Controller or add it there in [args section for nginx-ingress-controller][ingress-controller-args] if it's not already specified. This is explained in [NGINX Ingress documentation][ingress-controller-tcp-udp] 402 | 403 | #### Step 2: Expose the Required Ports in the Ingress Service 404 | 405 | You need to expose the MQTT ports (`1883` for unencrypted and `8883` for encrypted messages) and the CoAP port (`5683` for UDP) in the NGINX Ingress Controller service. You can do that with the following command that edits your service: 406 | 407 | `kubectl edit svc -n ingress-nginx nginx-ingress-ingress-nginx-controller` 408 | 409 | and add in spec->ports: 410 | 411 | ```yaml 412 | - name: mqtt 413 | port: 1883 414 | protocol: TCP 415 | targetPort: 1883 416 | - name: mqtts 417 | port: 8883 418 | protocol: TCP 419 | targetPort: 8883 420 | - name: coap 421 | port: 5683 422 | protocol: UDP 423 | targetPort: 5683 424 | ``` 425 | 426 | ## Configuring TLS & mTLS 427 | 428 | ### Generating Certificates 429 | 430 | For testing purposes, you can generate the necessary TLS certificates. Detailed instructions are provided in the [authentication][authentication] chapter of this document. You can use [this script][makefile] to generate the certificates. After replacing all instances of `localhost` with your actual hostname, run the following commands: 431 | 432 | ```bash 433 | make ca 434 | make server_cert 435 | make client_cert KEY= 436 | ``` 437 | 438 | This will generate the following certificates in the `certs` folder, which you’ll use to set up TLS and mTLS: 439 | 440 | ```bash 441 | ca.crt 442 | ca.key 443 | ca.srl 444 | magistrala-server.crt 445 | magistrala-server.key 446 | client.crt 447 | client.key 448 | ``` 449 | 450 | ### Creating Kubernetes Secrets 451 | 452 | Create kubernetes secrets using those certificates by running commands from [secrets script][secrets]. In this example secrets are created in `mg` namespace: 453 | 454 | ```bash 455 | kubectl -n mg create secret tls magistrala-server --key magistrala-server.key --cert magistrala-server.crt 456 | 457 | kubectl -n mg create secret generic ca --from-file=ca.crt 458 | ``` 459 | 460 | You can check if they are succesfully created: 461 | 462 | ```bash 463 | kubectl get secrets -n mg 464 | ``` 465 | 466 | ### Configuring Ingress for TLS 467 | 468 | To secure your ingress with a TLS certificate, set the following values in your Helm configuration: 469 | 470 | - `ingress.hostname` to your hostname 471 | - `ingress.tls.hostname` to your hostname 472 | - `ingress.tls.secret` to `magistrala-server` 473 | 474 | After updating your Helm chart, your ingress will be secured with the TLS certificate. 475 | 476 | ### Configuring Ingress for mTLS 477 | 478 | For mTLS you need to set `nginx_internal.mtls.tls="magistrala-server"` and `nginx_internal.mtls.intermediate_crt="ca"`. 479 | 480 | ### Testing MQTT with mTLS 481 | 482 | You can test sending an MQTT message with the following command: 483 | 484 | ```bash 485 | mosquitto_pub -d -L mqtts://:@example.com:8883/m//c/ --cert client.crt --key client.key --cafile ca.crt -m "test-message" 486 | ``` 487 | 488 | [ingress-yaml]: https://github.com/absmach/devops/blob/master/charts/mainflux/templates/ingress.yaml#L141 489 | [ingress-controller-args]: https://kubernetes.github.io/ingress-nginx/user-guide/cli-arguments/ 490 | [ingress-controller-tcp-udp]: https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/ 491 | [authentication]: ./authentication.md 492 | [makefile]: https://github.com/absmach/magistrala/blob/master/docker/ssl/Makefile 493 | [secrets]: https://github.com/absmach/devops/blob/master/charts/mainflux/secrets/secrets.sh 494 | -------------------------------------------------------------------------------- /docs/lora.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: LoRa 3 | --- 4 | 5 | 6 | Bridging with LoRaWAN Networks can be done over the [lora-adapter][lora-adapter]. This service sits between SuperMQ and [LoRa Server][lora-server] and just forwards the messages from one system to another via MQTT protocol, using the adequate MQTT topics and in the good message format (JSON and SenML), i.e. respecting the APIs of both systems. 7 | 8 | LoRa Server is used for connectivity layer. Specially for the [LoRa Gateway Bridge][lora-gateway] service, which abstracts the [SemTech packet-forwarder UDP protocol][semtech] into JSON over MQTT. But also for the [LoRa Server][lora-server] service, responsible of the de-duplication and handling of uplink frames received by the gateway(s), handling of the LoRaWAN mac-layer and scheduling of downlink data transmissions. Finally the [Lora App Server][lora-app-server] services is used to interact with the system. 9 | 10 | ## Run Lora Server 11 | 12 | Before to run the `lora-adapter` you must install and run LoRa Server. First, execute the following command: 13 | 14 | ```bash 15 | go get github.com/brocaar/loraserver-docker 16 | ``` 17 | 18 | Once everything is installed, execute the following command from the LoRa Server project root: 19 | 20 | ```bash 21 | docker-compose up 22 | ``` 23 | 24 | **Troubleshouting:** SuperMQ and LoRa Server use their own MQTT brokers which by default occupy MQTT port `1883`. If both are ran on the same machine different ports must be used. You can fix this on SuperMQ side by configuring the environment variable `SMQ_MQTT_ADAPTER_MQTT_PORT`. 25 | 26 | ## Setup LoRa Server 27 | 28 | Now that both systems are running you must provision LoRa Server, which offers for integration with external services, a RESTful and gRPC API. You can do it as well over the [LoRa App Server][lora-app-server], which is good example of integration. 29 | 30 | - **Create an Organization:** To add your own Gateways to the network you must have an Organization. 31 | - **Create a Network:** Set the address of your Network-Server API that is used by LoRa App Server or other custom components interacting with LoRa Server (by default loraserver:8000). 32 | - **Create a Gateways-Profile:** In this profile you can select the radio LoRa channels and the LoRa Network Server to use. 33 | - **Create a Service-profile:** A service-profile connects an organization to a network-server and defines the features that an organization can use on this Network-Server. 34 | - **Create a Gateway:** You must set proper ID in order to be discovered by LoRa Server. 35 | - **Create an Application:** This will allows you to create Devices by connecting them to this application. This is equivalent to Devices connected to channels in SuperMQ. 36 | - **Create a Device-Profile:** Before creating Device you must create Device profile where you will define some parameter as LoRaWAN MAC version (format of the device address) and the LoRaWAN regional parameter (frequency band). This will allow you to create many devices using this profile. 37 | - **Create a Device:** Finally, you can create a Device. You must configure the `network session key` and `application session key` of your Device. You can generate and copy them on your device configuration or you can use your own pre generated keys and set them using the LoRa App Server UI. 38 | Device connect through OTAA. Make sure that loraserver device-profile is using same release as device. If MAC version is 1.0.X, `application key = app_key` and `app_eui = deviceEUI`. If MAC version is 1.1 or ABP both parameters will be needed, APP_key and Network key. 39 | 40 | ## SuperMQ and LoRa Server 41 | 42 | Once everything is running and the LoRa Server is provisioned, execute the following command from SuperMQ project root to run the lora-adapter: 43 | 44 | ```bash 45 | docker-compose -f docker/addons/lora-adapter/docker-compose.yml up -d 46 | ``` 47 | 48 | **Troubleshouting:** The lora-adapter subscribes to the LoRa Server MQTT broker and will fail if the connection is not established. You must ensure that the environment variable `SMQ_LORA_ADAPTER_MESSAGES_URL` is propertly configured. 49 | 50 | **Remark:** By defaut, `SMQ_LORA_ADAPTER_MESSAGES_URL` is set as `tcp://lora.mqtt.supermq.io:1883` in the [docker-compose.yml][lora-docker-compose] file of the adapter. If you run the composition without configure this variable you will start to receive messages from our demo server. 51 | 52 | ### Route Map 53 | 54 | The lora-adapter use [Redis][redis] database to create a route map between both systems. As in SuperMQ we use Channels to connect clients, LoRa Server uses Applications to connect Devices. 55 | 56 | The lora-adapter uses the matadata of provision events emitted by SuperMQ system to update his route map. For that, you must provision SuperMQ Channels and clients with an extra metadata key in the JSON Body of the HTTP request. It must be a JSON object with key `lora` which value is another JSON object. This nested JSON object should contain `app_id` or `dev_eui` field. In this case `app_id` or `dev_eui` must be an existent Lora application ID or device EUI: 57 | 58 | **Channel structure:** 59 | 60 | ```json 61 | { 62 | "name": "", 63 | "metadata:": { 64 | "lora": { 65 | "app_id": "" 66 | } 67 | } 68 | } 69 | ``` 70 | 71 | **client structure:** 72 | 73 | ```json 74 | { 75 | "type": "device", 76 | "name": "", 77 | "metadata:": { 78 | "lora": { 79 | "dev_eui": "" 80 | } 81 | } 82 | } 83 | ``` 84 | 85 | #### Messaging 86 | 87 | To forward LoRa messages the lora-adapter subscribes to topics `applications/+/devices/+` of the LoRa Server MQTT broker. It verifies the `app_id` and the `dev_eui` of received messages. If the mapping exists it uses corresponding `Channel ID` and `client ID` to sign and forwards the content of the LoRa message to the SuperMQ message broker. 88 | 89 | [lora-adapter]: https://github.com/absmach/smq-contrib/tree/main/lora 90 | [lora-server]: https://www.loraserver.io 91 | [lora-gateway]: https://www.loraserver.io/lora-gateway-bridge/overview/ 92 | [semtech]: https://github.com/Lora-net/packet_forwarder/blob/master/PROTOCOL.TXT 93 | [lora-app-server]: https://www.loraserver.io/lora-app-server/overview/ 94 | [lora-docker-compose]: https://github.com/absmach/smq-contrib/blob/main/docker/lora-adapter/docker-compose.yml 95 | [redis]: https://redis.io/ 96 | -------------------------------------------------------------------------------- /docs/opcua.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: OPC-UA 3 | --- 4 | 5 | 6 | OPC Unified Architecture (OPC-UA) is a communication protocol and framework that is widely used in industrial automation and the Industrial Internet of Things (IIoT). It provides a standard platform for connecting industrial devices and systems, allowing them to share data and information seamlessly. Data from the devices is sent to the OPC-UA Server where a client can consume it. 7 | 8 | Bridging with an OPC-UA Server can be done over the [opcua-adapter][opcua-adapter], which is typically an OPC-UA client built on top of SuperMQ. This service sits between SuperMQ and an [OPC-UA Server][opcua-arch] and consumes/sends messages to and from the OPC-UA server. 9 | 10 | ## Run OPC-UA Server 11 | 12 | The OPC-UA Server is used for connectivity layer. It allows various methods to read information from the OPC-UA server and its nodes. The current version of the opcua-adapter only supports `Browse` and `Subscribe` methods, which are typically used to perform singular browse commands to get data from the OPC-UA server, or continuous subscriptions to data on the nodes. [Public OPC-UA test servers][public-opcua] are available for testing of OPC-UA clients and can be used for development and test purposes. [Open62541][open62541] is recommended if you would like to build a simple custom server or to use some of their pre-built servers. 13 | 14 | To get data from a custom device to SuperMQ's OPC-UA adapter, the server has to be configured and the node id of the device has to be known. The node id is a unique identifier of the device's data point. The node id is used to subscribe to the data point and to get the data from the specific device. Additionally, the server URI is needed to connect to the specified server. The server URI is the address of the OPC-UA server, and has a prefix of `opc.tcp://` followed by the server's IP address and port number. 15 | 16 | ## SuperMQ OPC-UA Adapter 17 | 18 | Execute the following command from SuperMQ project root to run the opcua-adapter: 19 | 20 | ```bash 21 | docker-compose -f docker/addons/opcua-adapter/docker-compose.yml up -d 22 | ``` 23 | 24 | The OPC-UA adapter can connect to multiple OPC-UA servers and subscribe to multiple nodes on each server, as long as the unique node-id of the device is provided along with the server URI. 25 | 26 | ### Architecture 27 | 28 | | ![OPC-UA](./img/opcua/opcua.png) | 29 | | :--------------------------------------: | 30 | | Figure 1 - OPC-UA Adapter Architecture | 31 | 32 | SuperMQ's OPC-UA adapter is a custom built OPC-UA client that can perform `Browse` and `Subscribe` operations on an OPC-UA server. Browse operations are used to get instantaneous data from a node within the server. Once data is acquired, it is forwarded to the NATS message broker. Additionally, the OPC-UA adapter exposes a `\browse` HTTP endpoint that users in SuperMQ can use to trigger the adapter to perform `Browse` commands on an OPC-UA server, whose configuration is passed as part of the query. 33 | 34 | The adapter sources client and channel events from the events store and uses these to create route-maps between SuperMQ and the OPC-UA server. The route-map is important as it maps the OPC-UA server details and devices to the clients and channels on SuperMQ. Once route-maps are successfully created and stored in the Redis database, the adapter can subscribe to the OPC-UA server and forward the messages to the NATS message broker. Subscriptions are maintained in the Redis DB and are updated when new events are sourced from the clients service. 35 | 36 | ### Route Map 37 | 38 | The opcua-adapter uses [Redis][redis] database to create a route-map between SuperMQ and an OPC-UA Server. As SuperMQ uses Clients and Channels IDs to sign messages, OPC-UA uses node ID (node namespace and node identifier combination) and server URI. The adapter route-map associates a `Client ID` with a `Node ID` and a `Channel ID` with a `Server URI`. 39 | 40 | **Note:** SuperMQ OPC-UA adapter parses the node ID in a string format, which may not work with all OPC-UA servers. The node ID should be in the format of `ns=0;i=84` where `ns` is the namespace and `i` is the identifier. If your OPC-UA server uses a different format, you may need to modify the adapter to parse the node ID correctly. This can be done by modifying the query parameters of the browse request, specifically the value `identifierType`. 41 | 42 | The opcua-adapter uses the metadata of provision events emitted by SuperMQ system to update its route map. For that, you must provision SuperMQ Channels and Clients with an extra metadata key in the JSON Body of the HTTP request. It must be a JSON object with key `opcua` whose value is another JSON object. This nested JSON object should contain `node_id` or `server_uri` that corresponds to an existent OPC-UA `Node ID` or `Server URI`: 43 | 44 | **Channel structure:** 45 | 46 | The created channel should have metadata containing the OPC-UA server URI from which data should be fetched. Once a channel is created, the adapter automatically creates the channel route map and stores it in the Redis DB. 47 | 48 | ```json 49 | { 50 | "name": "", 51 | "metadata:": { 52 | "opcua": { 53 | "server_uri": "" 54 | } 55 | } 56 | } 57 | ``` 58 | 59 | **Client structure:** 60 | 61 | Similar to a channel, when a client is created, metadata should be created and it should contain the node ID of the specific device that will send data to the server. This is also stored in the Redis DB as a route map. 62 | 63 | ```json 64 | { 65 | "name": "", 66 | "metadata:": { 67 | "opcua": { 68 | "node_id": "" 69 | } 70 | } 71 | } 72 | ``` 73 | 74 | ### Subscribe 75 | 76 | To create an OPC-UA subscription, the user should connect the Client to the Channel. This triggers an event through the event store which in turn causes the adapter to automatically create the connection, enable the redis route-map and run a subscription to the `server_uri` and `node_id` defined in the Client and Channel metadata. 77 | 78 | The subscription details are stored locally and will be maintained until the Client or Channel is deleted, or the channel-client connection is disabled. The adapter will also listen for any changes in the Client and Channel metadata and update the subscription accordingly. Once data is available from the OPC-UA server, it is published to the OPC-UA adapter which then forwards it to the NATS message broker. 79 | 80 | ### Browse 81 | 82 | The opcua-adapter exposes a `/browse` HTTP endpoint accessible with method `GET` and configurable throw HTTP query parameters `server`, `namespace`, `identifier`, and `identifierType`. The server URI, the node namespace and the node identifier represent the parent node and are used to fetch the list of available children nodes starting from the given one. By default the root node ID (node namespace and node identifier combination) of an OPC-UA server is `ns=0;i=84`. It's also the default value used by the opcua-adapter to do the browsing if only the server URI is specified in the HTTP query. 83 | 84 | **Note:** Since different OPC-UA servers use different types of node IDs, the `identifierType` parameter is used to specify the type of the node identifier. The default value is `string` which is used to parse the node ID in the format of `ns=0;i=84`. If the node ID is in a different format, the `identifierType` parameter should be set to the appropriate value . 85 | 86 | Sample request: 87 | 88 | ```bash 89 | curl -X GET "http://supermq-opcua:8188/browse?server=opc.tcp://192.168.1.12:4840&namespace=1&identifier=myUintValue&identifierType=string" 90 | ``` 91 | 92 | Expected response: 93 | 94 | ```json 95 | { 96 | "nodes":[ 97 | { 98 | "NodeID":"ns=1;s=myUintValue", 99 | "DataType":"uint32", 100 | "Description":"myUintValue", 101 | "Unit":"", 102 | "Scale":"", 103 | "BrowseName":"myUintValue" 104 | } 105 | ] 106 | } 107 | ``` 108 | 109 | ### Messaging 110 | 111 | To forward OPC-UA messages the opcua-adapter subscribes to the Node ID of an OPC-UA Server URI. It verifies the `server_uri` and the `node_id` of received messages. If the mapping exists it uses corresponding `Channel ID` and `Client ID` to sign and forwards the content of the OPC-UA message to the SuperMQ message broker. If the mapping or the connection between the Client and the Channel don't exist the subscription stops. 112 | 113 | ### Sample Use Case 114 | 115 | The OPC-UA adapter can be used in an industrial setup to monitor process values from the different industrial devices and machines. The industrial devices which are controlled by controllers such as PLCs (Programmable Logic Controllers) send data to the OPC-UA server over TCP/IP, with each device containing a specific node ID. 116 | 117 | Clients on SuperMQ can be created to represent these devices and the channels can be created to represent the data points on the devices. The OPC-UA adapter can then be used to subscribe to the OPC-UA server and forward the data to the NATS message broker. This data can then be consumed by other services in the SuperMQ system, and further processing done if need be. 118 | 119 | [opcua-adapter]: https://github.com/absmach/smq-contrib/tree/main/opcua 120 | [opcua-arch]: https://en.wikipedia.org/wiki/OPC_Unified_Architecture 121 | [public-opcua]: https://github.com/node-opcua/node-opcua/wiki/publicly-available-OPC-UA-Servers-and-Clients 122 | [redis]: https://redis.io/ 123 | [open62541]: https://www.open62541.org/doc/master/index.html 124 | -------------------------------------------------------------------------------- /docs/roles.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Roles, Role Members, and Role Actions 3 | --- 4 | 5 | 6 | ## **Roles** 7 | 8 | A role is a collection of permissions (actions) that define what members of the role are allowed to do within a specific entity. Each entity (like a `Client`, `Channel`, `Group`, or `Domain`) can have multiple roles, each with its own members and actions. 9 | 10 | - **Role Members**: These are users assigned to a role. Members are the only users allowed to perform the role's actions on the entity. 11 | - **Role Actions**: These are permissions defining what members of the role can do. For example, actions can include `read`, `update`, `delete`, or more specific actions like `publish` or `connect_to_channel`. **Refer to `authz-spec.md` for the available actions for each entity type.** 12 | 13 | --- 14 | 15 | ## **Base URL** 16 | 17 | All API requests use the base URL: 18 | `http://localhost//` 19 | 20 | Replace `` with one of the entity types (`clients`, `channels`, `groups`, `domains`) and `` with the ID of the specific entity. 21 | 22 | --- 23 | 24 | ### **Endpoints and Examples** 25 | 26 | ### **1. Create a Role** 27 | 28 | **POST /role** 29 | Creates a role for the given entity. 30 | 31 | **Request Body**: 32 | 33 | ```json 34 | { 35 | "role_name": "member", 36 | "optional_actions": ["read"], 37 | "optional_members": ["user_1"] 38 | } 39 | ``` 40 | 41 | #### Example for a `Channel` 42 | 43 | **Request**: 44 | `POST http://localhost/channels//role` 45 | 46 | **Response**: 47 | 48 | ```json 49 | { 50 | "role_id": "id_xxxxx", 51 | "role_name": "member", 52 | "actions": ["read"], 53 | "members": ["user_1"] 54 | } 55 | ``` 56 | 57 | --- 58 | 59 | ### **2. List Roles** 60 | 61 | **GET /roles** 62 | Retrieves all roles for the given entity. 63 | 64 | #### List Roles Example for a `Client` 65 | 66 | **Request**: 67 | `GET http://localhost/clients//roles` 68 | 69 | **Response**: 70 | 71 | ```json 72 | [ 73 | { 74 | "role_id": "xxxxx", 75 | "role_name": "Admin", 76 | "actions": ["read", "update", "delete"], 77 | "members": ["user_1", "user_2"] 78 | }, 79 | { 80 | "role_id": "xxxxx", 81 | "role_name": "Viewer", 82 | "actions": ["read"], 83 | "members": ["user_3"] 84 | } 85 | ] 86 | ``` 87 | 88 | --- 89 | 90 | ### **3. Retrieve a Role** 91 | 92 | **GET /roles/`role_id`** 93 | Fetches details of a specific role. 94 | 95 | #### Retrieve a Role Example for a `Group` 96 | 97 | **Request**: 98 | `GET http://localhost/groups//roles/` 99 | 100 | **Response**: 101 | 102 | ```json 103 | { 104 | "role_id": "xxxxx", 105 | "role_name": "Admin", 106 | "actions": ["read", "update", "delete"], 107 | "members": ["user_1", "user_2"] 108 | } 109 | ``` 110 | 111 | --- 112 | 113 | ### **4. Delete a Role** 114 | 115 | **DELETE /roles/`role_id`** 116 | Deletes the specified role. 117 | 118 | #### Delete a Role Example for a `Domain` 119 | 120 | **Request**: 121 | `DELETE http://localhost/domains//roles/` 122 | 123 | --- 124 | 125 | ### **5. Add Role Members** 126 | 127 | **POST /roles/`role_id`/members** 128 | Adds members to the specified role. 129 | 130 | **Request Body**: 131 | 132 | ```json 133 | { 134 | "members": ["user_4"] 135 | } 136 | ``` 137 | 138 | #### Add Role Members Example for a `Client` 139 | 140 | **Request**: 141 | `POST http://localhost/clients//roles//members` 142 | 143 | **Request Body**: 144 | 145 | ```json 146 | { 147 | "members": ["user_4"] 148 | } 149 | ``` 150 | 151 | --- 152 | 153 | ### **6. List Role Members** 154 | 155 | **GET /roles/`role_id`/members** 156 | Retrieves all members of the specified role. 157 | 158 | #### List Role Members Example for a `Channel` 159 | 160 | **Request**: 161 | `GET http://localhost/channels//roles//members` 162 | 163 | **Response**: 164 | 165 | ```json 166 | 167 | ``` 168 | 169 | --- 170 | 171 | ### **7. Delete Specific Role Members** 172 | 173 | **POST /roles/`role_id`/members/delete** 174 | Deletes specific members from the role. 175 | 176 | **Request Body**: 177 | 178 | ```json 179 | { 180 | "members": ["user_4"] 181 | } 182 | ``` 183 | 184 | #### Delete Specific Role Members Example for a `Group` 185 | 186 | **Request**: 187 | `POST http://localhost/groups//roles//members/delete` 188 | 189 | **Response**: 190 | 191 | ```json 192 | { 193 | "message": "Members removed successfully" 194 | } 195 | ``` 196 | 197 | --- 198 | 199 | ### **8. Delete All Role Members** 200 | 201 | **POST /roles/`role_id`/members/delete-all** 202 | Removes all members from the role. 203 | 204 | #### Delete All Role Members Example for a `Domain` 205 | 206 | **Request**: 207 | `POST http://localhost/domains//roles//members/delete-all` 208 | 209 | --- 210 | 211 | ### **9. Add Role Actions** 212 | 213 | **POST /roles/`role_id`/actions** 214 | Adds actions to the specified role. 215 | 216 | **Request Body**: 217 | 218 | ```json 219 | { 220 | "actions": ["publish"] 221 | } 222 | ``` 223 | 224 | #### Add Role Actions Example for a `Client` 225 | 226 | **Request**: 227 | `POST http://localhost/clients//roles//actions` 228 | 229 | --- 230 | 231 | ### **10. List Role Actions** 232 | 233 | **GET /roles/`role_id`/actions** 234 | Retrieves all actions of the specified role. 235 | 236 | #### List Role Actions Example for a `Channel` 237 | 238 | **Request**: 239 | `GET http://localhost/channels//roles//actions` 240 | 241 | **Response**: 242 | 243 | ```json 244 | ["read", "update", "publish"] 245 | ``` 246 | 247 | --- 248 | 249 | ### **11. Delete Specific Role Actions** 250 | 251 | **POST /roles/`role_id`/actions/delete** 252 | Deletes specific actions from the role. 253 | 254 | #### Delete Specific Role Actions Example for a `Group` 255 | 256 | **Request**: 257 | `POST http://localhost/groups//roles//actions/delete` 258 | 259 | --- 260 | 261 | ### **12. Delete All Role Actions** 262 | 263 | **POST /roles/`role_id`/actions/delete-all** 264 | Removes all actions from the role. 265 | 266 | #### Delete All Role Actions Example for a `Domain` 267 | 268 | **Request**: 269 | `POST http://localhost/domains//roles//actions/delete-all` 270 | -------------------------------------------------------------------------------- /docs/security.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Security 3 | --- 4 | 5 | 6 | ## Server Configuration 7 | 8 | ### Users 9 | 10 | If either the cert or key is not set, the server will use insecure transport. 11 | 12 | `SMQ_USERS_SERVER_CERT` the path to server certificate in pem format. 13 | 14 | `SMQ_USERS_SERVER_KEY` the path to the server key in pem format. 15 | 16 | ### Clients 17 | 18 | If either the cert or key is not set, the server will use insecure transport. 19 | 20 | `SMQ_CLIENTS_SERVER_CERT` the path to server certificate in pem format. 21 | 22 | `SMQ_CLIENTS_SERVER_KEY` the path to the server key in pem format. 23 | 24 | ### Standalone mode 25 | 26 | Sometimes it makes sense to run Clients as a standalone service to reduce network traffic or simplify deployment. This means that Clients service operates only using a single user and is able to authorize it without gRPC communication with Auth service. When running Clients in the standalone mode, `Auth` and `Users` services can be omitted from the deployment. 27 | To run service in a standalone mode, set `SMQ_CLIENTS_STANDALONE_EMAIL` and `SMQ_CLIENTS_STANDALONE_TOKEN`. 28 | 29 | ## Client Configuration 30 | 31 | If you wish to secure the gRPC connection to `Clients` and `Users` services you must define the CAs that you trust. This does not support mutual certificate authentication. 32 | 33 | ### Adapter Configuration 34 | 35 | `SMQ_HTTP_ADAPTER_CA_CERTS`, `SMQ_MQTT_ADAPTER_CA_CERTS`, `SMQ_WS_ADAPTER_CA_CERTS`, `SMQ_COAP_ADAPTER_CA_CERTS` - the path to a file that contains the CAs in PEM format. If not set, the default connection will be insecure. If it fails to read the file, the adapter will fail to start up. 36 | 37 | ### Clients Configuration 38 | 39 | `SMQ_CLIENTS_CA_CERTS` - the path to a file that contains the CAs in PEM format. If not set, the default connection will be insecure. If it fails to read the file, the service will fail to start up. 40 | 41 | ## Securing PostgreSQL Connections 42 | 43 | By default, SuperMQ will connect to Postgres using insecure transport. 44 | If a secured connection is required, you can select the SSL mode and set paths to any extra certificates and keys needed. 45 | 46 | `SMQ_USERS_DB_SSL_MODE` the SSL connection mode for Users. 47 | `SMQ_USERS_DB_SSL_CERT` the path to the certificate file for Users. 48 | `SMQ_USERS_DB_SSL_KEY` the path to the key file for Users. 49 | `SMQ_USERS_DB_SSL_ROOT_CERT` the path to the root certificate file for Users. 50 | 51 | `SMQ_CLIENTS_DB_SSL_MODE` the SSL connection mode for Clients. 52 | `SMQ_CLIENTS_DB_SSL_CERT` the path to the certificate file for Clients. 53 | `SMQ_CLIENTS_DB_SSL_KEY` the path to the key file for Clients. 54 | `SMQ_CLIENTS_DB_SSL_ROOT_CERT` the path to the root certificate file for Clients. 55 | 56 | Supported database connection modes are: `disabled` (default), `required`, `verify-ca` and `verify-full`. 57 | 58 | ## Securing gRPC 59 | 60 | By default gRPC communication is not secure as SuperMQ system is most often run in a private network behind the reverse proxy. 61 | 62 | However, TLS can be activated and configured. 63 | -------------------------------------------------------------------------------- /docs/smq-contrib.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: SMQ-Contrib Repository 3 | --- 4 | 5 | 6 | The **SuperMQ-Contrib** repository serves as a collection of additional services, tools, and extensions that complement the SuperMQ platform. 7 | These contributions include features that did not make it into the main SuperMQ platform but are invaluable for specific use cases. 8 | The repository acts as a playground for exploring and testing new ideas and contributions to the ecosystem. 9 | 10 | This repository is an excellent starting point for developers looking to contribute new features or experiment with custom services and integrations for SuperMQ. 11 | 12 | ## Available Services in smq-contrib 13 | 14 | ### LoRa 15 | 16 | The **[LoRa Adapter][lora]** bridges SuperMQ with LoRaWAN networks. 17 | It forwards messages between SuperMQ and a LoRaWAN Server using MQTT while adhering to JSON and SenML formats. 18 | This adapter is ideal for users integrating low-power, wide-area devices into the SuperMQ ecosystem. 19 | 20 | - [Learn more about the LoRa Adapter](./lora.md) 21 | 22 | --- 23 | 24 | ### OPC-UA 25 | 26 | The **[OPC-UA Adapter][opcua]** serves as a bridge between SuperMQ and OPC-UA servers, enabling seamless communication with industrial devices. 27 | It supports browse and subscription methods for retrieving data from nodes on the OPC-UA server and forwards this data to the SuperMQ platform. 28 | 29 | - [Learn more about the OPC-UA Adapter](./opcua.md) 30 | 31 | --- 32 | 33 | ### Twins Service 34 | 35 | The **[Twins Service][twins]** introduces the concept of digital twins to SuperMQ. 36 | Digital twins provide a unified, abstract representation of complex real-world systems. 37 | The Twins Service maintains a history of system states, definitions, and metadata, offering an enhanced way to monitor and manage interconnected devices. 38 | 39 | - [Learn more about the Twins Service](./twins.md) 40 | 41 | --- 42 | 43 | ### Readers 44 | 45 | The **[Readers Service][readers]** is designed to persist SuperMQ messages into various databases, providing robust storage options for IoT data. Currently, the following readers are available: 46 | 47 | - **[Cassandra Reader][cassandra]**: Integrates with Apache Cassandra to store and query large volumes of IoT data in a distributed, highly available manner. 48 | - **[InfluxDB Reader][influx]**: Connects with InfluxDB for efficient time-series data storage and real-time queries. 49 | - **[MongoDB Reader][mongodb]**: Leverages MongoDB for storing structured and semi-structured IoT data with powerful query capabilities. 50 | 51 | These readers are implemented as independent services, each designed to interface with the respective database while maintaining compatibility with the SuperMQ messaging infrastructure. 52 | 53 | - [Learn more about the Readers Service](./messaging.md) 54 | 55 | --- 56 | 57 | ### Consumers 58 | 59 | **[Consumers][consumers]** work as specialized services that extract messages from the SuperMQ platform and act upon them, helping integrate SuperMQ with broader data-processing workflows. 60 | 61 | --- 62 | 63 | ## Contribute to smq-contrib 64 | 65 | The **SuperMQ-Contrib** repository is a dynamic and collaborative space for developers. If you have an idea for an additional service or integration, this is the perfect place to start. 66 | 67 | - **Add your own contributions**: Developers are encouraged to fork the repository, experiment with ideas, and submit pull requests for review. 68 | - **Collaborate with the community**: Join the discussion and help improve existing contributions. 69 | 70 | Visit the [SuperMQ-Contrib GitHub Repository](https://github.com/absmach/smq-contrib) to get started! 71 | 72 | ## Conclusion 73 | 74 | The **SuperMQ-Contrib** repository complements the SuperMQ platform by extending its functionality and fostering innovation. Whether you are a contributor or an end-user, the tools and services in this repository enable you to enhance your IoT infrastructure and explore advanced use cases. 75 | 76 | [lora]: https://github.com/absmach/smq-contrib/tree/main/lora 77 | [opcua]: https://github.com/absmach/smq-contrib/tree/main/opcua 78 | [twins]: https://github.com/absmach/smq-contrib/tree/main/twins 79 | [mongodb]: https://github.com/absmach/smq-contrib/tree/main/readers/mongodb 80 | [cassandra]: https://github.com/absmach/smq-contrib/tree/main/readers/cassandra 81 | [influx]: https://github.com/absmach/smq-contrib/tree/main/readers/influxdb 82 | [readers]: https://github.com/absmach/smq-contrib/tree/main/readers 83 | [consumers]: https://github.com/absmach/smq-contrib/tree/main/consumers 84 | -------------------------------------------------------------------------------- /docs/storage.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Storage 3 | --- 4 | 5 | 6 | SuperMQ supports various storage databases in which messages are stored: 7 | 8 | - CassandraDB 9 | - MongoDB 10 | - InfluxDB 11 | - PostgreSQL 12 | - Timescale 13 | 14 | These storages are activated via docker-compose add-ons. 15 | 16 | The `/docker` folder contains an `addons` directory. This directory is used for various services that are not core to the SuperMQ platform but could be used for providing additional features. 17 | 18 | In order to run these services, core services, as well as the network from the core composition, should be already running. 19 | 20 | ## Writers 21 | 22 | Writers provide an implementation of various `message writers`. Message writers are services that consume SuperMQ messages, transform them to desired format and store them in specific data store. The path of the configuration file can be set using the following environment variables: `SMQ_CASSANDRA_WRITER_CONFIG_PATH`, `SMQ_POSTGRES_WRITER_CONFIG_PATH`, `SMQ_INFLUX_WRITER_CONFIG_PATH`, `SMQ_MONGO_WRITER_CONFIG_PATH` and `SMQ_TIMESCALE_WRITER_CONFIG_PATH`. 23 | 24 | ### Subscriber config 25 | 26 | Each writer can filter messages based on subjects list that is set in `config.toml` configuration file. If you want to listen on all subjects, just set the field `subjects` in the `[subscriber]` section as `["channels.>"]`, otherwise pass the list of subjects. Here is an example: 27 | 28 | ```toml 29 | [subscriber] 30 | subjects = ["channels.*.messages.bedroom.temperature","channels.*.messages.bedroom.humidity"] 31 | ``` 32 | 33 | Regarding the [Subtopics Section][subtopic] in the messaging page, the example `channels//messages/bedroom/temperature` can be filtered as `"channels.*.bedroom.temperature"`. The formatting of this filtering list is determined by the default message broker, NATS, format ([Subject-Based Messaging][nats-subject] & [Wildcards][nats-wildcards]). 34 | 35 | ### Transformer config 36 | 37 | There are two types of transformers: SenML and JSON. The transformer type is set in configuration file. 38 | 39 | For SenML transformer, supported message payload formats are SenML+CBOR and SenML+JSON. They are configurable over `content_type` field in the `[transformer]` section and expect `application/senml+json` or `application/senml+cbor` formats. Here is an example: 40 | 41 | ```toml 42 | [transformer] 43 | format = "senml" 44 | content_type = "application/senml+json" 45 | ``` 46 | 47 | Usually, the payload of the IoT message contains message time. It can be in different formats (like base time and record time in the case of SenML) and the message field can be under the arbitrary key. Usually, we would want to map that time to the SuperMQ Message field Created and for that reason, we need to configure the Transformer to be able to read the field, parse it using proper format and location (if devices time is different than the service time), and map it to SuperMQ Message. 48 | 49 | For JSON transformer you can configure `time_fields` in the `[transformer]` section to use arbitrary fields from the JSON message payload as timestamp. `time_fields` is represented by an array of objects with fields `field_name`, `field_format` and `location` that represent respectively the name of the JSON key to use as timestamp, the time format to use for the field value and the time location. Here is an example: 50 | 51 | ```toml 52 | [transformer] 53 | format = "json" 54 | time_fields = [{ field_name = "seconds_key", field_format = "unix", location = "UTC"}, 55 | { field_name = "millis_key", field_format = "unix_ms", location = "UTC"}, 56 | { field_name = "micros_key", field_format = "unix_us", location = "UTC"}, 57 | { field_name = "nanos_key", field_format = "unix_ns", location = "UTC"}] 58 | ``` 59 | 60 | JSON transformer can be used for any JSON payload. For the messages that contain _JSON array as the root element_, JSON Transformer does normalization of the data: it creates a separate JSON message for each JSON object in the root. In order to be processed and stored properly, JSON messages need to contain message format information. For the sake of simplicity, nested JSON objects are flatten to a single JSON object in InfluxDB, using composite keys separated by the `/` separator. This implies that the separator character (`/`) _is not allowed in the JSON object key_ while using InfluxDB. Apart from InfluxDB, separator character (`/`) usage in the JSON object key is permitted, since other [Writer][writers] types do not flat the nested JSON objects. For example, the following JSON object: 61 | 62 | ```json 63 | { 64 | "name": "name", 65 | "id": 8659456789564231564, 66 | "in": 3.145, 67 | "alarm": true, 68 | "ts": 1571259850000, 69 | "d": { 70 | "tmp": 2.564, 71 | "hmd": 87, 72 | "loc": { 73 | "x": 1, 74 | "y": 2 75 | } 76 | } 77 | } 78 | ``` 79 | 80 | for InfluxDB will be transformed to: 81 | 82 | ```json 83 | { 84 | "name": "name", 85 | "id": 8659456789564231564, 86 | "in": 3.145, 87 | "alarm": true, 88 | "ts": 1571259850000, 89 | "d/tmp": 2.564, 90 | "d/hmd": 87, 91 | "d/loc/x": 1, 92 | "d/loc/y": 2 93 | } 94 | ``` 95 | 96 | while for other Writers it will preserve its original format. 97 | 98 | The message format is stored in _the subtopic_. It's the last part of the subtopic. In the example: 99 | 100 | ```txt 101 | http://localhost:8008/channels//messages/home/temperature/myFormat 102 | ``` 103 | 104 | the message format is `myFormat`. It can be any valid subtopic name, JSON transformer is format-agnostic. The format is used by the JSON message consumers so that they can process the message properly. If the format is not present (i.e. message subtopic is empty), JSON Transformer will report an error. Message writers will store the message(s) in the table/collection/measurement (depending on the underlying database) with the name of the format (which in the example is `myFormat`). SuperMQ writers will try to save any format received (whether it will be successful depends on the writer implementation and the underlying database), but it's recommended that publishers don't send different formats to the same subtopic. 105 | 106 | ### InfluxDB, InfluxDB Writer 107 | 108 | From the project root execute the following command: 109 | 110 | ```bash 111 | docker-compose -f docker/addons/influxdb-writer/docker-compose.yml up -d 112 | ``` 113 | 114 | This will install and start: 115 | 116 | - [InfluxDB][influxdb] - time series database 117 | - InfluxDB writer - message repository implementation for InfluxDB 118 | 119 | Those new services will take some additional ports: 120 | 121 | - 8086 by InfluxDB 122 | - 9006 by InfluxDB writer service 123 | 124 | To access Influx-UI, navigate to `http://localhost:8086` and login with: `supermq`, password: `supermq` 125 | 126 | ### Cassandra and Cassandra Writer 127 | 128 | ```bash 129 | ./docker/addons/cassandra-writer/init.sh 130 | ``` 131 | 132 | _Please note that Cassandra may not be suitable for your testing environment because of its high system requirements._ 133 | 134 | ### MongoDB and MongoDB Writer 135 | 136 | ```bash 137 | docker-compose -f docker/addons/mongodb-writer/docker-compose.yml up -d 138 | ``` 139 | 140 | MongoDB default port (27017) is exposed, so you can use various tools for database inspection and data visualization. 141 | 142 | ### PostgreSQL and PostgreSQL Writer 143 | 144 | ```bash 145 | docker-compose -f docker/addons/postgres-writer/docker-compose.yml up -d 146 | ``` 147 | 148 | Postgres default port (5432) is exposed, so you can use various tools for database inspection and data visualization. 149 | 150 | ### Timescale and Timescale Writer 151 | 152 | ```bash 153 | docker-compose -f docker/addons/timescale-writer/docker-compose.yml up -d 154 | ``` 155 | 156 | Timescale default port (5432) is exposed, so you can use various tools for database inspection and data visualization. 157 | 158 | ## Readers 159 | 160 | Readers provide an implementation of various `message readers`. Message readers are services that consume normalized (in `SenML` format) SuperMQ messages from data storage and opens HTTP API for message consumption. Installing corresponding writer before reader is implied. 161 | 162 | Each of the Reader services exposes the same [HTTP API][readers-api] for fetching messages on its default port. 163 | 164 | To read sent messages on channel with id `channel_id` you should send `GET` request to `/channels//messages` with client access token in `Authorization` header. That client must be connected to channel with `channel_id` 165 | 166 | Response should look like this: 167 | 168 | ```http 169 | HTTP/1.1 200 OK 170 | Content-Type: application/json 171 | Date: Tue, 18 Sep 2018 18:56:19 GMT 172 | Content-Length: 228 173 | 174 | { 175 | "messages": [ 176 | { 177 | "Channel": 1, 178 | "Publisher": 2, 179 | "Protocol": "mqtt", 180 | "Name": "name:voltage", 181 | "Unit": "V", 182 | "Value": 5.6, 183 | "Time": 48.56 184 | }, 185 | { 186 | "Channel": 1, 187 | "Publisher": 2, 188 | "Protocol": "mqtt", 189 | "Name": "name:temperature", 190 | "Unit": "C", 191 | "Value": 24.3, 192 | "Time": 48.56 193 | } 194 | ] 195 | } 196 | ``` 197 | 198 | Note that you will receive only those messages that were sent by authorization token's owner. You can specify `offset` and `limit` parameters in order to fetch specific group of messages. An example of HTTP request looks like: 199 | 200 | ```bash 201 | curl -s -S -i -H "Authorization: Client " http://localhost:/channels//messages?offset=0&limit=5&format= 202 | ``` 203 | 204 | If you don't provide `offset` and `limit` parameters, default values will be used instead: 0 for `offset` and 10 for `limit`. The `format` parameter indicates the last subtopic of the message. As indicated under the [`Writers`][writers] section, the message format is stored in the subtopic as the last part of the subtopic. In the example: 205 | 206 | ```txt 207 | http://localhost:/channels//messages/home/temperature/myFormat 208 | ``` 209 | 210 | the message format is `myFormat` and the value for `format=` is `format=myFormat`. 211 | 212 | ### InfluxDB Reader 213 | 214 | To start InfluxDB reader, execute the following command: 215 | 216 | ```bash 217 | docker-compose -f docker/addons/influxdb-reader/docker-compose.yml up -d 218 | ``` 219 | 220 | ### Cassandra Reader 221 | 222 | To start Cassandra reader, execute the following command: 223 | 224 | ```bash 225 | docker-compose -f docker/addons/cassandra-reader/docker-compose.yml up -d 226 | ``` 227 | 228 | ### MongoDB Reader 229 | 230 | To start MongoDB reader, execute the following command: 231 | 232 | ```bash 233 | docker-compose -f docker/addons/mongodb-reader/docker-compose.yml up -d 234 | ``` 235 | 236 | ### PostgreSQL Reader 237 | 238 | To start PostgreSQL reader, execute the following command: 239 | 240 | ```bash 241 | docker-compose -f docker/addons/postgres-reader/docker-compose.yml up -d 242 | ``` 243 | 244 | ### Timescale Reader 245 | 246 | To start Timescale reader, execute the following command: 247 | 248 | ```bash 249 | docker-compose -f docker/addons/timescale-reader/docker-compose.yml up -d 250 | ``` 251 | 252 | [subtopic]: ./messaging.md#subtopics 253 | [nats-subject]: https://docs.nats.io/nats-concepts/subjects 254 | [nats-wildcards]: https://docs.nats.io/nats-concepts/subjects#wildcards 255 | [writers]: ./storage.md#writers 256 | [influxdb]: https://docs.influxdata.com/influxdb 257 | [readers-api]: https://github.com/absmach/supermq/blob/main/api/openapi/readers.yml 258 | -------------------------------------------------------------------------------- /docs/tracing.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Tracing 3 | --- 4 | 5 | 6 | Distributed tracing is a method of profiling and monitoring applications. It can provide valuable insight when optimizing and debugging an application. SuperMQ includes the [Jaeger][jaegertracing] open tracing framework as a service with its stack by default. 7 | 8 | ## Launch 9 | 10 | The Jaeger service will launch with the rest of the SuperMQ services. All services can be launched using: 11 | 12 | ```bash 13 | make run 14 | ``` 15 | 16 | The Jaeger UI can then be accessed at `http://localhost:16686` from a browser. Details about the UI can be found in [Jaeger's official documentation][jaeger-ui]. 17 | 18 | ## Configure 19 | 20 | The Jaeger service can be disabled by using the `scale` flag with `docker-compose up` and setting the jaeger container to 0. 21 | 22 | ```bash 23 | --scale jaeger=0 24 | ``` 25 | 26 | Jaeger uses 5 ports within the SuperMQ framework. These ports can be edited in the `.env` file. 27 | 28 | | Variable | Description | Default | 29 | | ------------------- | ------------------------------------------------- | ----------- | 30 | | SMQ_JAEGER_PORT | Agent port for compact jaeger.thrift protocol | 6831 | 31 | | SMQ_JAEGER_FRONTEND | UI port | 16686 | 32 | | SMQ_JAEGER_COLLECTOR | Collector for jaeger.thrift directly from clients | 14268 | 33 | | SMQ_JAEGER_CONFIGS | Configuration server | 5778 | 34 | | SMQ_JAEGER_URL | Jaeger access from within SuperMQ | jaeger:6831 | 35 | 36 | ## Message Tracing 37 | 38 | SuperMQ provides for tracing of messages ingested into the SuperMQ platform. The message metadata such as topic, sub-topic, subscriber and publisher is also included in traces. ![HTTP Message Publishing trace](img/tracing/messagePub.png) 39 | 40 | The messages are tracked from end to end from the point they are published to the consumers where they are stored. ![Influx DB consumer trace][consumer-trace] 41 | 42 | ## Example 43 | 44 | As an example for using Jaeger, we can look at the traces generated after provisioning the system. Make sure to have ran the provisioning script that is part of the [Getting Started][getting-started] step. 45 | 46 | Before getting started with Jaeger, there are a few terms that are important to define. A `trace` can be thought of as one transaction within the system. A trace is made up of one or more `spans`. These are the individual steps that must be taken for a trace to perform its action. A span has `tags` and `logs` associated with it. Tags are key-value pairs that provide information such as a database type or http method. Tags are useful when filtering traces in the Jaeger UI. Logs are structured messages used at specific points in the trace's transaction. These are typically used to indicate an error. 47 | 48 | When first navigating to the Jaeger UI, it will present a search page with an empty results section. There are multiple fields to search from including service, operation, tags and time frames. Clicking `Find Traces` will fill the results section with traces containing the selected fields. 49 | 50 | ![Search page with results](img/tracing/search.png) 51 | 52 | The top of the results page includes a scatter plot of the traces and their durations. This can be very useful for finding a trace with a prolonged runtime. Clicking on one of the points will open the trace page of that trace. 53 | 54 | Below the graph is a list of all the traces with a summary of its information. Each trace shows a unique identifier, the overall runtime, the spans it is composed of and when it was ran. Clicking on one of the traces will open the trace page of that trace. 55 | 56 | ![Trace page with expanded spans](img/tracing/trace.png) 57 | 58 | The trace page provides a more detailed breakdown of the individual span calls. The top of the page shows a chart breaking down what spans the trace is spending its time in. Below the chart are the individual spans and their details. Expanding the spans shows any tags associated with that span and process information. This is also where any errors or logs seen while running the span will be reported. 59 | 60 | This is just a brief overview of the possibilities of Jaeger and its UI. For more information, check out [Jaeger's official documentation][jaeger-ui]. 61 | 62 | [jaegertracing]: https://www.jaegertracing.io/ 63 | [jaeger-ui]: https://www.jaegertracing.io/docs/1.14/frontend-ui/ 64 | [consumer-trace]: https://user-images.githubusercontent.com/44265300/241806789-a56f368c-a89f-4b5d-88fe-25b971ca4718.png 65 | [getting-started]: ./getting-started.md 66 | -------------------------------------------------------------------------------- /docusaurus.config.ts: -------------------------------------------------------------------------------- 1 | import { themes as prismThemes } from 'prism-react-renderer'; 2 | import type { Config } from '@docusaurus/types'; 3 | import type * as Preset from '@docusaurus/preset-classic'; 4 | 5 | const config: Config = { 6 | title: 'SuperMQ', 7 | favicon: 'img/favicon.png', 8 | 9 | url: 'https://docs.supermq.abstractmachines.fr', 10 | baseUrl: '/', 11 | 12 | organizationName: 'absmach', 13 | projectName: 'supermq', 14 | 15 | onBrokenLinks: 'throw', 16 | onBrokenMarkdownLinks: 'warn', 17 | 18 | i18n: { 19 | defaultLocale: 'en', 20 | locales: ['en'], 21 | }, 22 | 23 | presets: [ 24 | [ 25 | 'classic', 26 | { 27 | docs: { 28 | routeBasePath: '/', 29 | sidebarPath: 'sidebars.ts', 30 | editUrl: 31 | 'https://github.com/absmach/supermq-docs/blob/main', 32 | }, 33 | blog: { 34 | showReadingTime: true, 35 | feedOptions: { 36 | type: ['rss', 'atom'], 37 | xslt: true, 38 | }, 39 | editUrl: 40 | 'https://github.com/absmach/supermq-docs/blob/main', 41 | onInlineTags: 'warn', 42 | onInlineAuthors: 'warn', 43 | onUntruncatedBlogPosts: 'warn', 44 | blogSidebarTitle: 'All Blog Posts', 45 | blogSidebarCount: 'ALL', 46 | }, 47 | theme: { 48 | customCss: './src/css/custom.css', 49 | }, 50 | } satisfies Preset.Options, 51 | ], 52 | ], 53 | 54 | plugins: [ 55 | 'docusaurus-plugin-drawio', 56 | ], 57 | markdown: { 58 | mermaid: true, 59 | }, 60 | themes: ['@docusaurus/theme-mermaid'], 61 | 62 | themeConfig: { 63 | navbar: { 64 | logo: { 65 | alt: 'SuperMQ Logo', 66 | srcDark: 'img/logo-dark.png', 67 | src: 'img/logo-light1.png', 68 | }, 69 | items: [ 70 | { 71 | type: 'docSidebar', 72 | sidebarId: 'smqSidebar', 73 | position: 'left', 74 | label: 'Docs', 75 | }, 76 | { to: '/blog', label: 'Blog', position: 'left' }, 77 | { 78 | href: 'https://github.com/absmach/supermq', 79 | label: 'GitHub', 80 | position: 'right', 81 | }, 82 | ], 83 | }, 84 | footer: { 85 | style: 'dark', 86 | links: [ 87 | { 88 | title: 'Docs', 89 | items: [ 90 | { 91 | label: 'Overview', 92 | to: '/', 93 | }, 94 | ], 95 | }, 96 | { 97 | title: 'Community', 98 | items: [ 99 | { 100 | label: 'GitHub', 101 | href: 'https://github.com/absmach/supermq', 102 | }, 103 | { 104 | label: 'Gitter', 105 | href: 'https://app.gitter.im/#/room/#absmach_supermq:gitter.im?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge', 106 | }, 107 | { 108 | label: 'Google Group', 109 | href: 'https://groups.google.com/forum/#!forum/mainflux', 110 | }, 111 | { 112 | label: 'Twitter', 113 | href: 'hhttps://twitter.com/absmach', 114 | }, 115 | ], 116 | }, 117 | { 118 | title: 'More', 119 | items: [ 120 | { 121 | label: 'Blog', 122 | to: '/blog', 123 | }, 124 | ], 125 | }, 126 | ], 127 | copyright: `Copyright © ${new Date().getFullYear()} Abstract Machines.`, 128 | }, 129 | prism: { 130 | theme: prismThemes.github, 131 | darkTheme: prismThemes.dracula, 132 | }, 133 | typography: { 134 | fontFamily: 'Inter, sans-serif', 135 | }, 136 | } satisfies Preset.ThemeConfig, 137 | }; 138 | 139 | export default config; 140 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "super-mq", 3 | "version": "0.1.0", 4 | "private": false, 5 | "scripts": { 6 | "docusaurus": "docusaurus", 7 | "start": "docusaurus start", 8 | "build": "docusaurus build", 9 | "swizzle": "docusaurus swizzle", 10 | "deploy": "docusaurus deploy", 11 | "clear": "docusaurus clear", 12 | "serve": "docusaurus serve", 13 | "write-translations": "docusaurus write-translations", 14 | "write-heading-ids": "docusaurus write-heading-ids", 15 | "typecheck": "tsc" 16 | }, 17 | "dependencies": { 18 | "@docusaurus/core": "3.6.3", 19 | "@docusaurus/preset-classic": "3.6.3", 20 | "@docusaurus/theme-mermaid": "^3.6.3", 21 | "@mdx-js/react": "^3.0.0", 22 | "clsx": "^2.0.0", 23 | "docusaurus-plugin-drawio": "^0.4.0", 24 | "prism-react-renderer": "^2.3.0", 25 | "react": "^18.0.0", 26 | "react-dom": "^18.0.0" 27 | }, 28 | "devDependencies": { 29 | "@docusaurus/module-type-aliases": "3.6.3", 30 | "@docusaurus/tsconfig": "3.6.3", 31 | "@docusaurus/types": "3.6.3", 32 | "@types/node": "^22.10.1", 33 | "image-size": "~1.0.2", 34 | "markdownlint": "^0.37.1", 35 | "markdownlint-cli": "^0.43.0", 36 | "typescript": "~5.6.2" 37 | }, 38 | "browserslist": { 39 | "production": [ 40 | ">0.5%", 41 | "not dead", 42 | "not op_mini all" 43 | ], 44 | "development": [ 45 | "last 3 chrome version", 46 | "last 3 firefox version", 47 | "last 5 safari version" 48 | ] 49 | }, 50 | "engines": { 51 | "node": ">=18.0" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs-drawio-file 2 | -------------------------------------------------------------------------------- /sidebars.ts: -------------------------------------------------------------------------------- 1 | import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; 2 | 3 | const sidebars: SidebarsConfig = { 4 | smqSidebar: [ 5 | { 6 | type: 'doc', 7 | id: 'index', 8 | label: 'Overview', 9 | }, 10 | { 11 | type: 'doc', 12 | id: 'architecture', 13 | label: 'Architecture', 14 | }, 15 | { 16 | type: 'category', 17 | label: 'Concepts', 18 | items: [ 19 | {type: 'doc', id: 'entities', label: 'Entities'}, 20 | {type: 'doc', id: 'authentication', label: 'Authentication'}, 21 | {type: 'doc', id: 'authorization', label: 'Authorization'}, 22 | {type: 'doc', id: 'roles', label: 'Roles'}, 23 | {type: 'doc', id: 'authz-spec', label: 'SuperMQ Authorization Specification Document'}, 24 | {type: 'doc', id: 'security', label: 'Security'}, 25 | {type: 'doc', id: 'messaging', label: 'Messaging'}, 26 | ], 27 | }, 28 | { 29 | type: 'category', 30 | label: 'Quick Start', 31 | items: [ 32 | {type: 'doc', id: 'getting-started', label: 'Getting Started'}, 33 | {type: 'doc', id: 'api', label: 'API'}, 34 | {type: 'doc', id: 'cli', label: 'CLI'}, 35 | ], 36 | }, 37 | { 38 | type: 'category', 39 | label: 'Development Tools', 40 | items: [ 41 | {type: 'doc', id: 'dev-guide', label: 'Developers Guide'}, 42 | {type: 'doc', id: 'events', label: 'Events'}, 43 | {type: 'doc', id: 'tracing', label: 'Tracing'}, 44 | ], 45 | }, 46 | { 47 | type: 'doc', 48 | id: 'storage', 49 | label: 'Storage', 50 | }, 51 | { 52 | type: 'doc', 53 | id: 'edge', 54 | label: 'Edge', 55 | }, 56 | { 57 | type: 'doc', 58 | id: 'certs', 59 | label: 'Certs', 60 | }, 61 | { 62 | type: 'doc', 63 | id: 'kubernetes', 64 | label: 'Kubernetes', 65 | }, 66 | { 67 | type: 'category', 68 | label: 'Extensions', 69 | items: [ 70 | {type: 'doc', id: 'smq-contrib', label: 'SMQ-Contrib Repository'}, 71 | {type: 'doc', id: 'lora', label: 'LoRa'}, 72 | {type: 'doc', id: 'opcua', label: 'OPC-UA'}, 73 | {type: 'doc', id: 'provision', label: 'Provisioning'}, 74 | {type: 'doc', id: 'twins', label: 'Twins Service'}, 75 | {type: 'doc', id: 'bootstrap', label: 'Bootstrap'}, 76 | ], 77 | }, 78 | { 79 | type: 'doc', 80 | id: 'benchmark', 81 | label: 'Test Spec', 82 | }, 83 | ], 84 | }; 85 | 86 | export default sidebars; 87 | -------------------------------------------------------------------------------- /src/css/custom.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap'); 2 | 3 | /** 4 | * Any CSS included here will be global. The classic template 5 | * bundles Infima by default. Infima is a CSS framework designed to 6 | * work well for content-centric websites. 7 | */ 8 | 9 | /* You can override the default Infima variables here. */ 10 | :root { 11 | --ifm-color-primary: #2e8555; 12 | --ifm-color-primary-dark: #29784c; 13 | --ifm-color-primary-darker: #277148; 14 | --ifm-color-primary-darkest: #205d3b; 15 | --ifm-color-primary-light: #33925d; 16 | --ifm-color-primary-lighter: #359962; 17 | --ifm-color-primary-lightest: #3cad6e; 18 | --ifm-code-font-size: 95%; 19 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); 20 | } 21 | 22 | /* For readability concerns, you should choose a lighter palette in dark mode. */ 23 | [data-theme='dark'] { 24 | --ifm-color-primary: #25c2a0; 25 | --ifm-color-primary-dark: #21af90; 26 | --ifm-color-primary-darker: #1fa588; 27 | --ifm-color-primary-darkest: #1a8870; 28 | --ifm-color-primary-light: #29d5b0; 29 | --ifm-color-primary-lighter: #32d8b4; 30 | --ifm-color-primary-lightest: #4fddbf; 31 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); 32 | } 33 | 34 | body, h1, h2, h3, h4, h5, h6, p, li, a, span, code { 35 | font-family: 'Inter', sans-serif; 36 | } 37 | 38 | h1, h2, h3, h4, h5, h6 { 39 | font-weight: 600; 40 | } 41 | -------------------------------------------------------------------------------- /static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/static/.nojekyll -------------------------------------------------------------------------------- /static/CNAME: -------------------------------------------------------------------------------- 1 | docs.supermq.abstractmachines.fr -------------------------------------------------------------------------------- /static/img/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/static/img/favicon.png -------------------------------------------------------------------------------- /static/img/logo-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/static/img/logo-dark.png -------------------------------------------------------------------------------- /static/img/logo-light1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/absmach/supermq-docs/e39bd5838deeb900ee74596f10da4ddf8e198c5c/static/img/logo-light1.png -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | // This file is not used in compilation. It is here just for a nice editor experience. 3 | "extends": "@docusaurus/tsconfig", 4 | "compilerOptions": { 5 | "baseUrl": "." 6 | }, 7 | "exclude": [".docusaurus", "build"] 8 | } 9 | --------------------------------------------------------------------------------