├── .github └── workflows │ └── node.js.yml ├── .gitignore ├── .serverless └── cloudformation-template-create-stack.json ├── DockerFile ├── LICENSE ├── README.md ├── __test__ ├── adapters │ ├── event-adapter.no.js │ └── serverless.test.js ├── controllers │ └── post-model.test.js ├── datasources │ └── datasource-file.js ├── lambda │ ├── apigateway-getid.json │ ├── apigateway-post.json │ ├── lambda-get-by-id.json │ ├── lambda-html.json │ └── lambda-post.json ├── models │ ├── circuit-breaker.js │ ├── mixins.js │ ├── model-factory.test.js │ └── model.test.js ├── services │ └── event-service.js └── use-cases │ ├── add-model.js │ └── execute-command.js ├── cert ├── README.md └── mesh │ ├── keys.js │ ├── privateKey.pem │ └── publicKey.pem ├── dotenv.example ├── mongo.sh ├── package-lock.json ├── package.json ├── proxy.js ├── public ├── .well-known │ └── acme-challenge │ │ ├── README.md │ │ ├── RK65fkXKs7LTryMYAc4u4RCSboUBRLFsg8TWCk9ldHo │ │ ├── SMzetSJCRZfeNEV1lCstszji9iDIIqS1WZYokbcGSiQ │ │ └── sR6_IXaiFeIhYjp91yxSezMLMqPPiZ23PvfTWr6Pl-A ├── aegis-logo.png ├── aegis.config.json ├── app.js ├── arch.drawio ├── arch.html ├── arch.svg ├── c4.svg ├── hot-reload-complete.html ├── hot-reload.html ├── index.html ├── wsapp.html └── wsapp.js ├── rustup-init.sh ├── serverless.yml ├── src ├── bootstrap.js ├── host-container.js ├── middleware.js ├── server-less.js └── server.js ├── start.sh ├── status.sh ├── stop.sh ├── target └── npmlist.json ├── webpack.client.config.js ├── webpack.config.js └── webpack ├── fetch-remotes.js ├── remote-entries-type.js ├── remote-entries-util.js └── remote-entries ├── README.md ├── bli.js ├── cache-local.js ├── cache.js ├── customer.js ├── fdp.js ├── go.js ├── index.js ├── local.js ├── localhost-8000--remoteEntry.js ├── localhost-8001--remoteEntry.js ├── order.js ├── python.js ├── wasm-local.js ├── wasm.js └── worker.js /.github/workflows/node.js.yml: -------------------------------------------------------------------------------- 1 | # This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions 3 | 4 | name: Node.js CI 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | matrix: 19 | node-version: [16.x] 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - name: Use Node.js ${{ matrix.node-version }} 24 | uses: actions/setup-node@v1 25 | with: 26 | node-version: ${{ matrix.node-version }} 27 | - run: npm install 28 | - run: npm run build 29 | - run: npm test 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | .vscode 4 | *.cpuprofile 5 | src/.DS_Store 6 | .env* 7 | coverage 8 | src/domain/.DS_Store 9 | .DS_Store 10 | accessToken.sh 11 | public/token.json 12 | .serverless 13 | #amplify 14 | amplify/\#current-cloud-backend 15 | amplify/.config/local-* 16 | amplify/logs 17 | amplify/mock-data 18 | amplify/backend/amplify-meta.json 19 | amplify/backend/awscloudformation 20 | amplify/backend/.temp 21 | build/ 22 | dist/ 23 | node_modules/ 24 | aws-exports.js 25 | awsconfiguration.json 26 | amplifyconfiguration.json 27 | amplifyconfiguration.dart 28 | amplify-build-config.json 29 | amplify-gradle-config.json 30 | amplifytools.xcconfig 31 | .secret-* 32 | lambda.out 33 | .aws/config 34 | .aws/credentials 35 | nohup.out 36 | public/user.json 37 | public/order.json 38 | public/customer.json 39 | public/product.json 40 | public/wasm.json 41 | public/aegis.log 42 | public/inventory.json 43 | public/catalog.log 44 | yarn-error.log 45 | aegis.pid 46 | cert/certificate.pem 47 | cert/privatekey.pem 48 | cert/.prev 49 | aegis.pid 50 | public/.well-known/acme-challenge/* 51 | install.log 52 | public/hexanimation.gif 53 | public/mockup.png 54 | public/mockup1.png 55 | public/mockup3.pnf 56 | public/mobile.yaml 57 | -------------------------------------------------------------------------------- /.serverless/cloudformation-template-create-stack.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "The AWS CloudFormation template for this Serverless application", 4 | "Resources": { 5 | "ServerlessDeploymentBucket": { 6 | "Type": "AWS::S3::Bucket", 7 | "Properties": { 8 | "BucketEncryption": { 9 | "ServerSideEncryptionConfiguration": [ 10 | { 11 | "ServerSideEncryptionByDefault": { 12 | "SSEAlgorithm": "AES256" 13 | } 14 | } 15 | ] 16 | } 17 | } 18 | }, 19 | "ServerlessDeploymentBucketPolicy": { 20 | "Type": "AWS::S3::BucketPolicy", 21 | "Properties": { 22 | "Bucket": { 23 | "Ref": "ServerlessDeploymentBucket" 24 | }, 25 | "PolicyDocument": { 26 | "Statement": [ 27 | { 28 | "Action": "s3:*", 29 | "Effect": "Deny", 30 | "Principal": "*", 31 | "Resource": [ 32 | { 33 | "Fn::Join": [ 34 | "", 35 | [ 36 | "arn:", 37 | { 38 | "Ref": "AWS::Partition" 39 | }, 40 | ":s3:::", 41 | { 42 | "Ref": "ServerlessDeploymentBucket" 43 | }, 44 | "/*" 45 | ] 46 | ] 47 | }, 48 | { 49 | "Fn::Join": [ 50 | "", 51 | [ 52 | "arn:", 53 | { 54 | "Ref": "AWS::Partition" 55 | }, 56 | ":s3:::", 57 | { 58 | "Ref": "ServerlessDeploymentBucket" 59 | } 60 | ] 61 | ] 62 | } 63 | ], 64 | "Condition": { 65 | "Bool": { 66 | "aws:SecureTransport": false 67 | } 68 | } 69 | } 70 | ] 71 | } 72 | } 73 | } 74 | }, 75 | "Outputs": { 76 | "ServerlessDeploymentBucketName": { 77 | "Value": { 78 | "Ref": "ServerlessDeploymentBucket" 79 | } 80 | } 81 | } 82 | } -------------------------------------------------------------------------------- /DockerFile: -------------------------------------------------------------------------------- 1 | # Dockerized nvm development environment 2 | # 3 | # This Dockerfile is for building nvm development environment only, 4 | # not for any distribution/production usage. 5 | # 6 | # Please note that it'll use about 1.2 GB disk space and about 15 minutes to 7 | # build this image, it depends on your hardware. 8 | 9 | # Use Ubuntu Trusty Tahr as base image as we're using on Travis CI 10 | # I also tested with Ubuntu 16.04, should be good with it! 11 | FROM ubuntu:14.04 12 | LABEL maintainer="Peter Dave Hello " 13 | LABEL name="nvm-dev-env" 14 | LABEL version="latest" 15 | 16 | # Set the SHELL to bash with pipefail option 17 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 18 | 19 | # Prevent dialog during apt install 20 | ENV DEBIAN_FRONTEND noninteractive 21 | 22 | # ShellCheck version 23 | ENV SHELLCHECK_VERSION=0.5.0 24 | 25 | # Pick a Ubuntu apt mirror site for better speed 26 | # ref: https://launchpad.net/ubuntu/+archivemirrors 27 | ENV UBUNTU_APT_SITE ubuntu.cs.utah.edu 28 | 29 | # Disable src package source 30 | RUN sed -i 's/^deb-src\ /\#deb-src\ /g' /etc/apt/sources.list 31 | 32 | # Replace origin apt package site with the mirror site 33 | RUN sed -E -i "s/([a-z]+.)?archive.ubuntu.com/$UBUNTU_APT_SITE/g" /etc/apt/sources.list 34 | RUN sed -i "s/security.ubuntu.com/$UBUNTU_APT_SITE/g" /etc/apt/sources.list 35 | 36 | # Install apt packages 37 | RUN apt update && \ 38 | apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" && \ 39 | apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \ 40 | coreutils \ 41 | util-linux \ 42 | bsdutils \ 43 | file \ 44 | openssl \ 45 | ca-certificates \ 46 | ssh \ 47 | wget \ 48 | patch \ 49 | sudo \ 50 | htop \ 51 | dstat \ 52 | vim \ 53 | tmux \ 54 | curl \ 55 | git \ 56 | jq \ 57 | realpath \ 58 | zsh \ 59 | ksh \ 60 | gcc-4.8 \ 61 | g++-4.8 \ 62 | xz-utils \ 63 | build-essential \ 64 | bash-completion && \ 65 | apt-get clean 66 | 67 | # ShellCheck with Ubuntu 14.04 container workaround 68 | RUN wget https://storage.googleapis.com/shellcheck/shellcheck-v$SHELLCHECK_VERSION.linux.x86_64.tar.xz -O- | \ 69 | tar xJvf - shellcheck-v$SHELLCHECK_VERSION/shellcheck && \ 70 | mv shellcheck-v$SHELLCHECK_VERSION/shellcheck /bin && \ 71 | rmdir shellcheck-v$SHELLCHECK_VERSION && \ 72 | touch /tmp/libc.so.6 && \ 73 | echo "alias shellcheck='LD_LIBRARY_PATH=/tmp /bin/shellcheck'" >> /etc/bash.bashrc 74 | RUN LD_LIBRARY_PATH=/tmp shellcheck -V 75 | 76 | # Set locale 77 | RUN locale-gen en_US.UTF-8 78 | 79 | # Print tool versions 80 | RUN bash --version | head -n 1 81 | RUN zsh --version 82 | RUN ksh --version || true 83 | RUN dpkg -s dash | grep ^Version | awk '{print $2}' 84 | RUN git --version 85 | RUN curl --version 86 | RUN wget --version 87 | 88 | # Add user "nvm" as non-root user 89 | RUN useradd -ms /bin/bash nvm 90 | 91 | # Copy and set permission for nvm directory 92 | COPY . /home/nvm/.nvm/ 93 | RUN chown nvm:nvm -R "home/nvm/.nvm" 94 | 95 | # Set sudoer for "nvm" 96 | RUN echo 'nvm ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers 97 | 98 | # Switch to user "nvm" from now 99 | USER nvm 100 | 101 | # nvm 102 | RUN echo 'export NVM_DIR="$HOME/.nvm"' >> "$HOME/.bashrc" 103 | RUN echo '[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm' >> "$HOME/.bashrc" 104 | RUN echo '[ -s "$NVM_DIR/bash_completion" ] && . "$NVM_DIR/bash_completion" # This loads nvm bash_completion' >> "$HOME/.bashrc" 105 | 106 | # nodejs and tools 107 | RUN bash -c 'source $HOME/.nvm/nvm.sh && \ 108 | nvm install node && \ 109 | npm install -g doctoc urchin eclint dockerfile_lint && \ 110 | npm install --prefix "$HOME/.nvm/"' 111 | 112 | # Set WORKDIR to nvm directory 113 | WORKDIR /home/nvm/.nvm 114 | 115 | ENTRYPOINT ["/bin/bash"] 116 | 117 | COPY dist dist/ 118 | COPY package.json . 119 | COPY dest 120 | COPY webpack webpack/ 121 | ENV PORT 8070 122 | EXPOSE 80701 123 | 124 | 125 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [![aegis](https://user-images.githubusercontent.com/38910830/128654405-93098731-3c31-4f52-bda0-efe95d77c5fe.png)](https://blog.federated-microservices.com) 3 | 4 | 5 | 6 | # ÆGIS formerly _microlib_ 7 | 8 | The ÆGIS federation host deploys, runs, integrates and persists federated application components (or federated microservice libraries) in a distributed middleware fabric. Multiple, polyglot services can run together on the same host instance while remaining independently deployable. This allows organizations to reduce their footprint and simplify their operations without loosing any of autonomy and loose coupling they've come to expect from the microservice architectural style. Conversely, services can just as easily be distributed across the self-forming ÆGIS fabric, which can reach any part of the IT landscape: datacenter, edge, fronted, backend, mobile, embedded, IoT, phone or drone. In either case, the development experience is the same. Component deployment, integration and persistence is automated and transparent, freeing developers to concentrate on what pays the bills, the business logic. And paying the bills gets easier when you eliminate the need for bespoke deployment automation. Because federated deployment always works the same way, regardless of vendor or compute primitive, there's no deployment automation to deal with beyond the federation host itself. Considering ÆGIS runs on just about any platform and supports both a server and serverless execution mode, you can wave goodbye to vendor lock-in. 9 | 10 | This repo contains the host code. The library can be found [here](https://github.com/module-federation/aegis). An example of a federated app that runs on ÆGIS can be found [here](https://github.com/module-federation/aegis-app). 11 | 12 | [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/github.com/module-federation/aegis) 13 | 14 | Note: to avoid rate-limiting, create a variable in your Gitpod [profile](https://gitpod.io/account) called GITHUB_TOKEN with the value of a Github personal access [token](https://github.com/settings/apps). 15 | 16 | 17 | ## Open Branch: 18 | [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/t/module-federation/aegis-app) 19 | 20 | ## [Federated Microservices](https://trmidboe.medium.com/federated-applications-ba396d6925b1) 21 | 22 | - loaded from multiple network locations and repositories at runtime; 23 | - developed by multiple teams; 24 | - run together in a single process; 25 | - can be reloaded at any time without restarting the process or interrupting other components running in the process; 26 | - are _not_ _installed_ on the server but _streamed_ over the network as needed. 27 | 28 | ![mono-micro-fed 001](https://user-images.githubusercontent.com/38910830/126571702-0cd570fd-2a94-4560-86b0-18d514d7cb65.jpeg) 29 | 30 | 31 | ## TL;DR 32 | 33 | ```shell 34 | git clone https://github.com/module-federation/aegis-host 35 | cd aegis-host 36 | cp dotenv.example .env 37 | yarn 38 | yarn build 39 | yarn start 40 | yarn demo 41 | ``` 42 | 43 | Note: you no longer need to run the aegis-Example project, as the host has been configured to stream the federated 44 | modules directly from GitHub. 45 | 46 | [Importing your own app](#importing-your-own-repo) 47 | 48 | ## Purpose 49 | 50 | Stop paying the "microservices premium." 51 | 52 | When evaluating microservices as a candidate architecture, the most important fact to consider is that you are building a distributed application. Microservices are the components of distributed applications - and distribution is what enables their chief virtue, deployment independence. Unfortunately, relative to the traditional alternative, monoliths, distributed apps are much harder to build and manage. So much so, that many microservice implementations fail. 53 | 54 | This trade-off, dealing with the increased scope, cost and risk that stems from distribution, is called paying the "microservices premium". Sometimes the premium is well worth it. But in many cases it does more harm than good, leading many experts to advise against starting with microservices, but to instead introduce them gradually as scope or demand increases. 55 | 56 | That said, in cases where the implementation does succeed, organizations generally prefer microservices to monoliths because of the increased speed and agility that deployment independence brings. So one could make the argument that if the premium were somehow discounted, microservices would be appropriate for a much wider audience. 57 | 58 | _**Consider, then, what would happen if we could eliminate the need for distribution and still allow for independent deployment.**_ 59 | 60 | Is there such an alternative? [Fowler](https://martinfowler.com/articles/microservices.html) describes the implicit premise behind the distribution/deployment trade-off: 61 | 62 | > "One main reason for using services as components (rather than libraries) is that services are independently deployable. If you have an application that consists of multiple libraries in a single process, a change to any single component results in having to redeploy the entire application.” 63 | 64 | While technologies that support hot deployment have been around for some time (such as [OSGi](https://www.osgi.org/)), it would appear, up until now anyway, perhaps due to complexity, labor intensity, or skills scarcity, they haven't been considered a viable option. Whatever the reason, with the advent of module federation, this view is no longer warranted. 65 | 66 | Using module federation, it is possible to dynamically and efficiently import remote libraries, just as if they had been installed locally, with only a few, simple configuration steps. aegis exploits this technology to support a framework for building application components as independently deployable libraries, call them **microservice libraries**. 67 | 68 | Using webpack dependency graphs, code splitting and code streaming, Aegis supports hot deployment of federated modules, as well as any dependencies not present on the host, allowing development teams to deploy whenever they choose, without disrupting other components, and without having to coordinate with other teams. To simplify integration, promote composability and ensure components remain decoupled, aegis implements the port-adapter paradigm from hexagonal architecture to standardize the way modules communicate, so intra- and interprocess communication is transparent. I.e., whether deployed locally to the same aegis host instance or remotely, its all the same to the module developer. 69 | 70 | With Aegis, then, you get the best of both worlds. You are no longer forced to choose between manageability and autonomy. Rather, you avoid the microservices premium altogether by building truly modular and independently deployable component libraries that run together in the same process (or cluster of processes), in what you might call a _"polylith"_ - a monolith comprised of multiple (what would otherwise be) microservices. 71 | 72 | --- 73 | 74 | ## Features 75 | 76 | One of the main goals of Aegis is to provide an alternative to distributed systems and the performance and operational challenges that come with them, while preserving the benefits of deployment independence. To this end, Aegis organizes components according to hexagonal architecture, such that the boundaries of, and relations between, federated components are clear and useful. 77 | 78 | In addtion to zero-install, hot deployment and local eventing, aegis promotes strong boundaries between, and prevents coupling of, collocated components through the formalism of the port-adapter paradigm and the use of code generation to automate boilerplate integration tasks. Features include: 79 | 80 | ### Highlights 81 | 82 | - Deployment independence without distribution 83 | - Language independence without distribution 84 | - Self-deployment (_no deployment automation required!_) 85 | - Run on: any compute primitive: vm, container, raspberry pi... 86 | - Run as: single process, cluster, or serverless function 87 | - Containerless, secure, near-native performance using WebAssembly 88 | - Zero downtime, zero storage, zero installation runtime (using code streaming) 89 | - Transparent integration and persistence (same code works whether components are local or remote) 90 | - Self-forming, high-speed, in-process service mesh (no side car) 91 | - Runtime binding of services and adapters (add, modify features and fixes live in prod) 92 | - Multithreading for CPU-bound workloads (e.g. AI inference) 93 | - Distributed data / object fabric across datacenter, edge, mobile, IoT / embedded 94 | - [Fractal, hexagonal architecture](https://trmidboe.medium.com/fractal-architecture-56a1d2d6a599) for high composability and strong component boundaries 95 | 96 | ### Detail 97 | - [Dynamic API generation for federated modules](#zero-downtime---zero-install-deployment-api-generation) 98 | - Dynamic, indvidualized storage adapter generation 99 | - Automatic persistence of federated modules 100 | - Runtime port generation 101 | - Runtime binding (port-adapter binding, adapter-service binding) 102 | - Zero deployment automation required (install to any compute primitive) 103 | - Self-forming Service Mesh 104 | - Runtime linking of WebAssembly modules 105 | - [Hot deployment of federated modules](#zero-downtime---zero-install-deployment-api-generation) 106 | - Configuration-based service integration 107 | - Configuration-based service orchestration 108 | - Built-in error handling (circuit breaker, undo) 109 | - Common broker for locally shared (in-memory) events 110 | - Persistence API for cached datasources 111 | - Datasource relations for federated schemas and objects 112 | - Object broker for retrieving external model instances 113 | - Dependency/control inversion (IoC) 114 | - [Zero downtime, "zero install" deployment](#zero-downtime---zero-install-deployment-api-generation) 115 | - Evergreen deployment and semantic versioning 116 | - Dynamic A/B testing 117 | - Exhaustive deployment options (run as a Server, Cluster or Severless Function) 118 | - Vendor-agnostic serverless deployment (no vendor lock-in) 119 | - Fast deployment - no-install deployment is the shortest path 120 | - Self-deployment - built-in deployment automation 121 | - Configurable serialization for network and storage I/O 122 | - Clustering for availability and scalibilty 123 | - Cluster cache synchronization 124 | - Polyrepo code reuse (the answer to the shared code question) 125 | - Automated CA certifcate setup and renewal with zero downtime 126 | - Self-forming, built-in, pluggable service mesh 127 | - Support for WebAssembly modules as models, adapters, services 128 | - WebAssembly workflow - pipe modules togther to form control flows 129 | - Polyglossia - write components in any lang with a Wasm compile target 130 | - Eventually MLOps - ci/cd pipeline for machine learning deployment 131 | - Sooner than later AIOps - deep learning for adaptive, lights-outs operations 132 | 133 | 134 | --- 135 | 136 | ports-adapters 137 | 138 | ## Components 139 | 140 | Aegis uses a modified version of [Webpack Module Federation](https://webpack.js.org/concepts/module-federation/) to import remote modules over the network into the host framework at runtime. Aegis modules fall into three categories: `model`, `adapter` and `service`. 141 | 142 | A [model](https://github.com/module-federation/aegis-application/blob/master/src/config/order.js) is a domain entity/service - or in [polylith](https://polylith.gitbook.io/) architecture, a component - that implements all or part of the service’s core logic. Each model has a corresponding ModelSpecification object which is used to define and configure core properties of the model and their values or bindings, such as URL endpoint of the service or external dependencies to be injected. The object implements an interface the has many options but only a few simple requirements, so developers can use as much, or as little, of the framework's capabilities as they choose. 143 | 144 | One such capability is port generation. In a hexagonal or port-adapter architecture, ports handle I/O between the application and domain layers. An [adapter](https://github.com/module-federation/aegis-application/blob/master/src/adapters/event-adapter.js) implements the port ’s interface, facilitating communication with the outside world. As a property of models, ports are configurable and can be added, modified and deleted on the fly at runtime. Same goes for adapters. The framework automatically rebinds adapters to ports as needed, again with no downtime or restart required. 145 | 146 | A [service](https://github.com/module-federation/aegis-application/blob/master/src/services/event-service.js) provides an optional layer of abstraction for adapters and usually implements a client library. When an adapter is written to satisfy a common integration pattern, a service implements a particular instance of that pattern. For example, an event adapter implements pub/sub functionality, which works with Kafka, Nats, or RabbitMQ. Simply bind the corresponding service to the outside-facing end of the adapter to enable the desired messaging provider. Like adapters to ports, the framework dynamically imports and binds services to adapters at runtime, which means, in our example, you change from Kakfa to Nats, or add Nats and use both, without ever taking the system offline. 147 | 148 | --- 149 | 150 | persistence 151 | 152 | ## Persistence 153 | 154 | The framework automatically persists domain models as JSON documents either using the default adapter configured for the server or an adapter specifically configured for the model in the _ModelSpec_. In-memory, filesystem, and MongoDB adapters are provided. Adapters can be extended and individualized per model. Additionally, de/serialization can be customized. Finally, every write operation generates an event that can be forwarded to an external event or data sink. 155 | 156 | A common datasource factory manages adapters and provides access to each service’s individual datasource. The factory supports federated schemas (think GraphQL) through relations defined between datasources in the _ModelSpec_. With local caching, not only are data federated, **but so are related domain models**. 157 | 158 | ```js 159 | const customer = order.customer(); // relation `customer` defined in ModelSpec 160 | 161 | const creditCard = customer.decrypt().creditCardNumber; 162 | ``` 163 | 164 | Access to data and objects requires explicit permission, otherwise services cannot access one another’s code or data. Queries execute against an in-memory copy of the data. Datasources leverage this cache by extending the in-memory adapter. 165 | 166 | Note that the non-local cache or distributed cache is itself a storage option depending on the number of nodes in the service mesh and/or their attached storage. See the section on service mesh below. 167 | 168 | ### A note about external services and the Aegis fabric. 169 | When you deploy the same application model to multiple aegis instances, the application becomes a distributed application. If each application integrates with a different instance of a particular service, that service effectively becomes a single clustered instance. For example, deploying two Aegis instances that talk to two separate MongoDb instances, will cause the db instances to be synchronized. 170 | 171 | ### Data fabric (distributed object cache) vs custom adapter 172 | It's important to note that this automtatic persistence feature, while providing fairly sophisticated extensibility in and of itself, does not limit you from creating your own solution using ports and adapters, which more demanding use cases might call for; rather, it is merely an optional convenience that should prove effective in many common scenarios, saving you from boilerplate code. However, when writing a custom adapter, to be consistent with the design of the framework, local caching and object-relational APIs should be used to make your data available to the distributed cache (or data fabric), which supports performant, federated data access, as well as transparent integration. 173 | 174 | --- 175 | eventing 176 | 177 | ## Integration 178 | 179 | ### Ports & Adapters 180 | 181 | When ports are configured in the `ModelSpecification`, the framework dynamically generates methods on the domain model to invoke them. Each port is assigned an adapter, which either invokes the port (inbound) or is invoked by it (outbound). 182 | 183 | Ports can be instrumented for exceptions and timeouts to extend the framework’s circuit breaker, retry and compensation logic. They can also be piped together in control flows by specifying the output event of one port as the input or triggering event of another. 184 | 185 | An adapter either implements an external interface or exposes an interface for external clients to consume. On the port end, an adapter always implements the port interface; never the other way around. Ports are a function of the domain logic, which is orthogonal to environment-specific implementation details. By design, the domain has no knowledge of anything beyond the port. That which invokes an inbound port or that which is invoked by an outbound port - where the data comes from or where it goes - is irrelavent. Only the shape of the data (as defined by the domain interface) matters. 186 | 187 | Ports optionally specify a callback to process data received on the port before control is returned to the caller. The callback is passed as an argument to the port function. Ports can be configured to run on receipt of an event, API request, or called directly from code. 188 | 189 | Ports also have an undo callback for implementing compensating logic in the event of a downstream transaction failure. The framework remembers the order in which ports are invoked and runs the undo callback of each port in reverse order, starting at the point of failure. This allows transactions across multiple services to be rolled back. 190 | 191 | ### Local & Remote Events 192 | 193 | In addition to in-memory function calls, federated objects and ports, services can communicate with one another locally the same way they do remotely: by publishing and subscribing to events. Using local events, microservice libraries are virtually as decoupled as they would be running remotely. 194 | 195 | The framework provides a common broker for local service events and injects pub/sub functions into each model: 196 | 197 | ```js 198 | modelA.listen(event, callback); 199 | 200 | modelB.notify(event, data); 201 | ``` 202 | 203 | Local events can also be forwarded to remote event targets. Like any external integration remote ports must be configured for external event sources/sinks. Adapters are provided for **Kafka** and **WebSockets**. 204 | 205 | --- 206 | 207 | workflow 208 | 209 | ## Orchestration 210 | 211 | Service orchestration is built on the framework’s port-adapter implementation. As mentioned, ports both produce and consume events, allowing them to be piped together in control flows by specifying the output event of one port as the input event of another. Because events are shared internally and can be forwarded externally, this implementation works equally well whether services are local or remote. 212 | 213 | Callbacks specified for ports in the _ModelSpec_ can process data received on a port before its output event is fired and the next port runs. If not specified, the framework nevertheless saves the port output to the model. Of course, you can implement your own event handlers or adapter logic to customize the flow. 214 | 215 | --- 216 | 217 | ## Service Mesh 218 | 219 | Aegis provides an in-process service mesh that ties Aegis instances together forming an data/object fabric, where data is federated, and workload can be distributed and deployed dynamically in response to functional or non-functional requirements and conditions. As opposed to a side car, the service mesh is built directly into the federation host and runs on the same port as the API, but uses the Websockets protocol. External clients can connect to the ws mesh interface to integrate with, observe or control any aegis component. The mesh enables federated data access and transparent integration of aegis components, such that component developers can write business logic that is valid regardless of where components are deployed. The service mesh itself is pluggable, allowing different implementations to be turned on and off (and to coexist in different parts of the mesh). The default implementation, "webswitch" is a self-forming, switched mesh based on websockets. A Nats and QUIC-based implementation are planned. 220 | 221 | See 222 | 223 | ```shell 224 | public/aegis.config.json 225 | ``` 226 | 227 | ## Running the Application 228 | 229 | See above TL;DS section for a simplied install. Get up and running in about 60 seconds. 230 | 231 | ### Datasources 232 | 233 | In the default configuaton, aegis uses the local filesystem for default persistence. Alternatively, you can install MongoDB and update the .env accordingly to change the default to Mongo. You can also update an individual model's datasource in the ModelSpec. 234 | 235 | ```shell 236 | brew tap mongodb/brew 237 | mongod 238 | ``` 239 | 240 | .env 241 | 242 | ```shell 243 | DATASOURCE_ADAPTER=DataSourceMongoDb 244 | MONGODB_URL=mongodb://localhost:27017 245 | ``` 246 | 247 | ### Clustering 248 | 249 | Aegis supports clustering with automatic cache synchronization and rolling restart for increased stability, scalality and efficiency with zero downtime. To enable: 250 | 251 | .env 252 | 253 | ``` 254 | CLUSTER_ENABLED=true 255 | ``` 256 | 257 | ### Serverless 258 | 259 | Alternatively, Aegis can run as a serverless function. It's rated for AWS. Support can be extended to other platforms and vendors by writing a message parser that simply maps the input and output to request and response objects, indicating the HTTP method. See /adapters/serverless/parsers 260 | 261 | ### Authorization 262 | 263 | ÆGIS supports JSON Web Tokens for authorization of protected routes. To enable, you must provide JSON Web Key URI to retrieve the public key of the signer of the JSON Web Token. You can set up an account with Auth0 for testing purposes. You update the key set configuration in the `public/aegis.config.json` file. 264 | 265 | public/aegis.config.json 266 | 267 | ```json 268 | { 269 | "cache": true, 270 | "rateLimit": true, 271 | "jwksRequestsPerMinute": 5, 272 | "jwksUri": "https://dev-2fe2iar6.us.auth0.com/.well-known/jwks.json", 273 | "audience": "https://aegis.io/", 274 | "issuer": "https://dev-2fe2iar6.us.auth0.com/", 275 | "algorithms": ["RS256"] 276 | } 277 | ``` 278 | 279 | .env 280 | 281 | ```shell 282 | AUTH_ENABLED=true 283 | ``` 284 | 285 | ### Transport Layer Security (HTTPS) 286 | 287 | When ÆGIS starts (in server mode), it will check for the presence of `certificate.pem` and `privatekey.pem` files in the cert folder. If not there, it will automatically provision an x509 certificate for your domain using the [ACME standard](https://datatracker.ietf.org/doc/html/rfc8555) and write the files to the `cert` directory. The following environment must be set. Note: if `NODE_ENV` is set to anything other than `prod` the systems will provision a test certificate. 288 | 289 | .env 290 | 291 | ```shell 292 | NODE_ENV=prod 293 | DOMAIN=aegis.module-federation.org 294 | SSL_ENABLED=true 295 | ``` 296 | 297 | ### Importing your own repo 298 | 299 | Two options are available: EASY BUTTON or DIY. 300 | 301 | #### EASY BUTTON 302 | 303 | Click [here](https://github.com/module-federation/aegis-app/generate) to generate the scaffolding for building a federated application with Aegis. 304 | 305 | #### DIY 306 | 307 | To import your own models, update the `webpack/remote-entries.js` to point to your remoteEntry.js file and change owner, repo, filedir, and branch accordingly, if using GitHub as a remote. You must specify these same attributes in your repo, only in webpack.config.js [publicPath as URL params](https://github.com/module-federation/aegis-Example/blob/master/webpack.config.js). Also from aegis-example, you'll need the same version of webpack and the [extensions in the webpack dir](https://github.com/module-federation/aegis-app/tree/master/webpack). 308 | 309 | ### Installation 310 | 311 | [![install movie](https://img.youtube.com/vi/sHZgpIA_iWY/maxresdefault.jpg)](https://youtu.be/sHZgpIA_iWY) 312 | 313 | ### Zero Downtime - Zero Install Deployment, API Generation 314 | 315 | [![hot deployment](https://img.youtube.com/vi/WqRlSnBxLYw/mqdefault.jpg)](https://youtu.be/WqRlSnBxLYw) 316 | 317 | ### Reference Architecture 318 | 319 | [![ref arch](https://img.youtube.com/vi/6GJYX9cmk2Q/maxresdefault.jpg)](https://youtu.be/6GJYX9cmk2Q) 320 | 321 | Aegis prevents vendor lock-in by providing a layer of abstraction on top of vendor serverless frameworks. A vendors API gateway simply proxies requests to the Aegis serverless function, which is the only function adapted to the vendor's platform. From that point on, Aegis handles the "deployment" of functions as federated modules. Developers don't even need to know what cloud is hosting their software! 322 | 323 | ## Further Reading 324 | 325 | [Federated Microservices: Manageable Autonomy](https://trmidboe.medium.com/federated-applications-ba396d6925b1) 326 | 327 | [Stop Paying the Microservice Premium: Eliminating the Microservices Deployment/Distribution Trade-Off](https://trmidboe.medium.com/discounting-the-microservice-premium-a95311c61367) 328 | 329 | [Federated Applications: E Plurbis Unum](https://trmidboe.medium.com/federated-applications-e-plurbus-unum-2cc7850250a0?sk=08d98f5ae22695c2296fad382fb6006f) 330 | 331 | [Self-Deploying Applications: Deployment Automation That Works Anywhere](https://trmidboe.medium.com/what-is-a-self-deploying-or-installation-free-application-658f4d79082d?sk=3e27745b6660fa2d6837545c8e075ad3) 332 | 333 | [Cell-based Architecture and Federation](https://trmidboe.medium.com/cell-based-architecture-and-federated-microservices-4fc0cf3df5a6?sk=d50a09dcec880da26378f5e7522eb0b6) 334 | 335 | [Clean Micoservices: Building Composable Microservices with Module Federation](https://trmidboe.medium.com/clean-microservices-building-composable-microservices-with-module-federation-f1d2b03d2b27) 336 | 337 | [Webpack 5 Module Federation: A game-changer in JavaScript architecture](https://medium.com/swlh/webpack-5-module-federation-a-game-changer-to-javascript-architecture-bcdd30e02669) 338 | 339 | [Microservice trade-offs](https://martinfowler.com/articles/microservice-trade-offs.html) 340 | 341 | [Microservice Library Videos](https://www.youtube.com/channel/UCT-3YJ2Ilgcjebqvs40Qz2A) 342 | 343 | 344 | 345 | . 346 | -------------------------------------------------------------------------------- /__test__/adapters/event-adapter.no.js: -------------------------------------------------------------------------------- 1 | // var assert = require('assert'); 2 | // import Model from '../../src/domain/model'; 3 | // import { listen } from '../../src/adapters/event-adapter'; 4 | // import { Event } from '../services/event-service'; 5 | 6 | // describe('event-adapter', function () { 7 | // // describe('listen()', async function () { 8 | // it('should automatically unsubscribe on receipt of message', async function () { 9 | // const id = {}; 10 | // function make() { 11 | // return (...b) => ({ a: 'a', b }); 12 | // } 13 | // const adapters = { 14 | // listen: listen(Event), 15 | // async test({ model }) { 16 | // const subscription = await model.listen({ 17 | // topic: 'test', 18 | // id: id, 19 | // filter: 'test', 20 | // once: true, 21 | // model, 22 | // callback: ({ subscription }) => subscription 23 | // }); 24 | // console.log({ subscriptions: subscription.getSubscriptions()[0] }); 25 | // } 26 | // } 27 | // const model = await Model.create({ 28 | // spec: { 29 | // modelName: 'ABC', 30 | // factory: make(), 31 | // ports: { 32 | // listen: { 33 | // type: 'outbound' 34 | // }, 35 | // test: { 36 | // type: 'outbound' 37 | // } 38 | // }, 39 | // dependencies: adapters 40 | // }, 41 | // args: [{ c: 'c' }] 42 | // }); 43 | // const subscription = await model.test(); 44 | // console.log({ subscriptions: subscription.getSubscriptions()[0] }); 45 | // assert.strictEqual( 46 | // false, subscription.getSubscriptions()[0][1].delete(id) 47 | // ); 48 | // }); 49 | // }); 50 | // }); 51 | -------------------------------------------------------------------------------- /__test__/adapters/serverless.test.js: -------------------------------------------------------------------------------- 1 | const assert = require('assert') 2 | const { handleServerless } = require('../../src/server-less') 3 | 4 | describe('adapters', function () { 5 | describe('serverless', function () { 6 | it('should return a response', async function () { 7 | const resp = await handleServerless({ 8 | httpMethod: 'get', 9 | path: '/aegis/api/config', 10 | query: { isCached: false }, 11 | headers: { 'idempotency-key': '123' } 12 | }) 13 | if (resp) { 14 | assert.ok(resp) 15 | console.log(resp) 16 | } else { 17 | assert.fail('no resp') 18 | } 19 | }) 20 | }) 21 | }) 22 | -------------------------------------------------------------------------------- /__test__/controllers/post-model.test.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { 4 | AppError, 5 | ThreadPoolFactory, 6 | EventBrokerFactory, 7 | DataSourceFactory, 8 | DomainEvents, 9 | default: ModelFactory 10 | } = require('@module-federation/aegis/lib/domain') 11 | 12 | var assert = require('assert') 13 | 14 | const makeCreateModel = require('@module-federation/aegis/lib/domain/use-cases/create-model') 15 | .default 16 | const postModelFactory = require('@module-federation/aegis/lib/adapters/controllers/post-model') 17 | .default 18 | 19 | describe('Controllers', function () { 20 | describe('postModel()', function () { 21 | it('should add new model', async function () { 22 | ModelFactory.registerModel({ 23 | modelName: 'ABC', 24 | factory: ({ a }) => ({ a, b: 'c' }), 25 | endpoint: 'abcs', 26 | dependencies: {} 27 | }) 28 | ModelFactory.registerEvent( 29 | ModelFactory.EventTypes.CREATE, 30 | 'ABC', 31 | model => ({ model }) 32 | ) 33 | const createModel = makeCreateModel({ 34 | modelName: 'ABC', 35 | models: ModelFactory, 36 | repository: DataSourceFactory.getDataSource('ABC'), 37 | //broker: EventBrokerFactory.getInstance(), 38 | domainEvents: DomainEvents, 39 | AppError: AppError, 40 | threadpool: ThreadPoolFactory.getThreadPool('ABC') 41 | }) 42 | const resp = await postModelFactory(createModel)({ 43 | body: { a: 'a' }, 44 | headers: { 'User-Agent': 'test' }, 45 | ip: '127.0.0.1', 46 | log: () => 1 47 | }) 48 | console.log('resp.status', resp.statusCode) 49 | assert.strictEqual(resp.statusCode, 201) 50 | }) 51 | }) 52 | }) 53 | -------------------------------------------------------------------------------- /__test__/datasources/datasource-file.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | process.env.DATASOURCE_ADAPTER = "DataSourceFile"; 5 | const { 6 | default: DataSourceFactory, 7 | } = require("@module-federation/aegis/lib/domain/datasource-factory"); 8 | 9 | describe("datasources", function () { 10 | var ds = DataSourceFactory.getDataSource("test"); 11 | ds.load({ 12 | name: "test", 13 | }); 14 | ds.save(1, "data"); 15 | console.log("record", ds.find(1)); 16 | it("read from file", function () {}); 17 | }); 18 | -------------------------------------------------------------------------------- /__test__/lambda/apigateway-getid.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource": "/Aegis/api/domain/{any+}", 3 | "path": "/aegis/api/models/orders/9f1e89a7-2c7d-4021-94b8-1648ea0410d8", 4 | "httpMethod": "GET", 5 | "headers": { 6 | "Accept-Encoding": "gzip, deflate, br" 7 | }, 8 | "multiValueHeaders": { 9 | "Accept": "[Array]" 10 | }, 11 | "queryStringParameters": { "count": "all" }, 12 | "pathParameters": { "id": "2039895f-d9dd-427f-a52e-4e85f1f0c7d1" }, 13 | "body": null, 14 | "isBase64Encoded": false 15 | } 16 | -------------------------------------------------------------------------------- /__test__/lambda/apigateway-post.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource": "/Aegis/api/models/{any+}", 3 | "path": "/Aegis/api/models/orders", 4 | "httpMethod": "POST", 5 | "headers": { 6 | "Accept-Encoding": "gzip, deflate, br" 7 | }, 8 | "multiValueHeaders": { 9 | "Accept": "[Array]" 10 | }, 11 | "queryStringParameters": { "count": "all" }, 12 | "pathParameters": { "id": "9f1e89a7-2c7d-4021-94b8-1648ea0410d8" }, 13 | "body": { 14 | "firstName": "Uncle, \n ", 15 | "lastName": "Bob, \n", 16 | "email": "bob@email.com ", 17 | "creditCardNumber": "378282246310005", 18 | "shippingAddress": "123 Park Ave. NY, NY 45678 \n", 19 | "billingAddress": "123 Park Ave. NY, NY 45678 \n", 20 | "orderItems": [ 21 | { 22 | "itemId": "item1", 23 | "price": 329.95 24 | }, 25 | { 26 | "itemId": "item2 ", 27 | "price": 39.95 28 | } 29 | ] 30 | }, 31 | "isBase64Encoded": false 32 | } 33 | -------------------------------------------------------------------------------- /__test__/lambda/lambda-get-by-id.json: -------------------------------------------------------------------------------- 1 | { 2 | "path": "/aegis/api/models/orders", 3 | "httpMethod": "post", 4 | "pathParameters": { "id": "9753f3ca-2de9-462f-9bf6-a5d8061c87d6" }, 5 | "isBase64Encoded": false 6 | } 7 | -------------------------------------------------------------------------------- /__test__/lambda/lambda-html.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource": "", 3 | "path": "public", 4 | "httpMethod": "GET", 5 | "headers": { 6 | "Accept": "application/x-javascript,text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", 7 | "Accept-Encoding": "gzip, deflate, br", 8 | "Accept-Language": "en-GB,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4", 9 | "cache-control": "max-age=0", 10 | "CloudFront-Forwarded-Proto": "https", 11 | "CloudFront-Is-Desktop-Viewer": "true", 12 | "CloudFront-Is-Mobile-Viewer": "false", 13 | "CloudFront-Is-SmartTV-Viewer": "false", 14 | "CloudFront-Is-Tablet-Viewer": "false", 15 | "CloudFront-Viewer-Country": "GB", 16 | "content-type": "application/x-www-form-urlencoded", 17 | "upgrade-insecure-requests": "1", 18 | "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36", 19 | "X-Forwarded-Port": "443", 20 | "X-Forwarded-Proto": "https" 21 | }, 22 | "queryStringParameters": null, 23 | "pathParameters": null, 24 | "stageVariables": null, 25 | "requestContext": { 26 | "path": "/dev/", 27 | "resourcePath": "/", 28 | "httpMethod": "GET" 29 | }, 30 | 31 | "multiValueHeaders": {}, 32 | 33 | "multiValueQueryStringParameters": {}, 34 | 35 | "body": {}, 36 | "isBase64Encoded": false 37 | } 38 | -------------------------------------------------------------------------------- /__test__/lambda/lambda-post.json: -------------------------------------------------------------------------------- 1 | { 2 | "firstName": "Uncle", 3 | "lastName": "Bob", 4 | "email": "bob@email.com", 5 | "creditCardNumber": "378282246310005", 6 | "shippingAddress": "123 Park Ave. NY, NY 45678", 7 | "billingAddress": "123 Park Ave. NY, NY 45678", 8 | "orderItems": [ 9 | { 10 | "itemId": "item1", 11 | "price": 329.95 12 | }, 13 | { 14 | "itemId": "item2", 15 | "price": 59, 16 | "qty": 4 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /__test__/models/circuit-breaker.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | 5 | import CircuitBreaker from "@module-federation/aegis/lib/domain/circuit-breaker"; 6 | import Model from "@module-federation/aegis/lib/domain/circuit-breaker"; 7 | 8 | describe("Model", function () { 9 | const breaker = CircuitBreaker( 10 | "myfunc", 11 | x => { 12 | if (x === 1) { 13 | throw Error("error"); 14 | } 15 | }, 16 | { 17 | default: { 18 | errorRate: 100, 19 | callVolume: 2, 20 | intervalMs: 1000, 21 | testDelay: 2000, 22 | }, 23 | } 24 | ); 25 | 26 | for (const i = 0; i > 10; i++) breaker.invoke(1); 27 | }); 28 | -------------------------------------------------------------------------------- /__test__/models/mixins.js: -------------------------------------------------------------------------------- 1 | // "use strict"; 2 | 3 | // var assert = require("assert"); 4 | // const { 5 | // fromSymbol, 6 | // toSymbol, 7 | // fromTimestamp, 8 | // withSerializers, 9 | // withDeserializers, 10 | // } = require("../../src/domain/mixins"); 11 | 12 | // describe("Mixins", function () { 13 | // it("should return strings in place of symbols", function () { 14 | // const ID = Symbol("id"); 15 | // const CREATETIME = Symbol("createTime"); 16 | 17 | // const keyMap = { 18 | // id: ID, 19 | // createTime: CREATETIME, 20 | // }; 21 | // var time = new Date().getTime(); 22 | // var obj1 = { 23 | // [ID]: "123", 24 | // [CREATETIME]: time, 25 | // id: "123", 26 | // createTime: time, 27 | // }; 28 | // // console.log("obj1", obj1); 29 | // var obj2 = fromSymbol(keyMap)(obj1); 30 | // // console.log("obj2", obj2); 31 | // assert.strictEqual(JSON.stringify(obj1), JSON.stringify(obj2)); 32 | // }); 33 | // it("should return Symbols in place of strings", function () { 34 | // const ID = Symbol("id"); 35 | // const CREATETIME = Symbol("createTime"); 36 | 37 | // const keyMap = { 38 | // id: ID, 39 | // createTime: CREATETIME, 40 | // }; 41 | // var time = new Date().getTime(); 42 | // var obj1 = { 43 | // id: "123", 44 | // createTime: time, 45 | // }; 46 | // // console.log("obj1", obj1); 47 | // var obj2 = toSymbol(keyMap)(obj1); 48 | // // console.log("obj2", obj2); 49 | // assert.strictEqual(JSON.stringify(obj1), JSON.stringify(obj2)); 50 | // }); 51 | // it("should return utc in place of timestamp", function () { 52 | // var time = new Date().getTime(); 53 | // var obj1 = { 54 | // createTime: time, 55 | // updateTime: time, 56 | // }; 57 | // // console.log("obj1", obj1); 58 | // var obj2 = fromTimestamp(["createTime", "updateTime"])(obj1); 59 | // // console.log("obj2", obj2); 60 | // assert.strictEqual( 61 | // new Date(obj1.createTime).toUTCString(), 62 | // obj2.createTime 63 | // ); 64 | // assert.strictEqual( 65 | // new Date(obj1.updateTime).toUTCString(), 66 | // obj2.updateTime 67 | // ); 68 | // }); 69 | // it("should return serialized output", function () { 70 | // var time = new Date().getTime(); 71 | // const ID = Symbol("id"); 72 | // const CREATETIME = Symbol("createTime"); 73 | // const keyMap = { 74 | // id: ID, 75 | // createTime: CREATETIME, 76 | // }; 77 | // var obj1 = { 78 | // [ID]: "123", 79 | // [CREATETIME]: time, 80 | // }; 81 | // var serialize = withSerializers( 82 | // fromSymbol(keyMap), 83 | // fromTimestamp(["createTime"]) 84 | // ); 85 | // var obj2 = serialize(obj1); 86 | // var obj3 = { 87 | // createTime: new Date(time).toUTCString(), 88 | // id: "123", 89 | // }; 90 | // // console.log(fromTimestamps(["createTime"]).toString()); 91 | // // console.log(fromSymbols(keyMap).toString()); 92 | // // console.log(withSerializer( 93 | // // fromSymbols(keyMap), 94 | // // fromTimestamps(["createTime"]) 95 | // // ).toString()); 96 | // //console.log(makeModel.toString()); 97 | // // console.log("obj1", obj1); 98 | // // console.log("obj2", obj2); 99 | // // console.log("obj3", obj3); 100 | // // console.log("stringify(obj1)", JSON.stringify(obj1)); 101 | // // console.log("stringify(obj2)", JSON.stringify(obj2)); 102 | // // console.log("stringify(obj3)", JSON.stringify(obj3)); 103 | // assert.strictEqual(JSON.stringify(obj2), JSON.stringify(obj3)); 104 | // }); 105 | 106 | // it("should return deserialized output", function () { 107 | // var time = new Date().getTime(); 108 | // const ID = Symbol("id"); 109 | // const CREATETIME = Symbol("createTime"); 110 | // const keyMap = { 111 | // id: ID, 112 | // createTime: CREATETIME, 113 | // }; 114 | // var obj1 = { 115 | // [ID]: "123", 116 | // [CREATETIME]: time, 117 | // }; 118 | // var serialize = withSerializers( 119 | // fromSymbol(keyMap), 120 | // fromTimestamp(["createTime"]) 121 | // ); 122 | // var deserialize = withDeserializers( 123 | // toSymbol(keyMap), 124 | // ); 125 | // var obj2 = deserialize(serialize(obj1)); 126 | // var obj3 = { 127 | // createTime: new Date(time).toUTCString(), 128 | // id: "123", 129 | // }; 130 | // // console.log(fromTimestamps(["createTime"]).toString()); 131 | // // console.log(fromSymbols(keyMap).toString()); 132 | // // console.log(withSerializer( 133 | // // fromSymbols(keyMap), 134 | // // fromTimestamps(["createTime"]) 135 | // // ).toString()); 136 | // //console.log(makeModel.toString()); 137 | // console.log("obj1", obj1); 138 | // console.log("obj2", obj2); 139 | // console.log("obj3", obj3); 140 | // console.log("parse(obj1)", JSON.parse(JSON.stringify(obj1))); 141 | // console.log("parse(obj2)", JSON.parse(JSON.stringify(obj2))); 142 | // console.log("parse(obj3)", JSON.parse(JSON.stringify(obj3))); 143 | // assert.strictEqual(JSON.parse(JSON.stringify(obj2)), JSON.parse(JSON.stringify(obj))); 144 | // }); 145 | // }); 146 | -------------------------------------------------------------------------------- /__test__/models/model-factory.test.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | 5 | import ModelFactory from "@module-federation/aegis/lib/domain"; 6 | 7 | describe("ModelFactory", function () { 8 | describe("#createModel()", function () { 9 | it("should register & create model", async function () { 10 | ModelFactory.registerModel({ 11 | modelName: "ABC", 12 | factory: ({ a }) => ({ a, b: "c" }), 13 | endpoint: "abcs", 14 | dependencies: {}, 15 | }); 16 | const spec = ModelFactory.getModelSpec("ABC"); 17 | assert.strictEqual(spec.modelName, "ABC"); 18 | }); 19 | // it("should have props from args", async function () { 20 | // ModelFactory.registerModel({ 21 | // modelName: "ABC", 22 | // factory: ({ a }) => ({ a: "a", b: "c" }), 23 | // endpoint: "abcs", 24 | // dependencies: {}, 25 | // }); 26 | 27 | // const model = await ModelFactory.createModel("ABC"); 28 | // assert.strictEqual(model.a, "a"); 29 | // }); 30 | }); 31 | }); 32 | -------------------------------------------------------------------------------- /__test__/models/model.test.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var assert = require("assert"); 4 | 5 | import Model from "@module-federation/aegis/lib/domain/model"; 6 | 7 | describe("Model", function () { 8 | describe("#create()", function () { 9 | it("should return new model", async function () { 10 | const model = await Model.create({ 11 | spec: { 12 | modelName: "ABC", 13 | factory: ({ b }) => ({ a: "a", b }), 14 | }, 15 | args: [{ c: "c" }], 16 | }); 17 | assert.ok(model); 18 | }); 19 | }); 20 | describe("#injection()", function () { 21 | it("dependency injection should work", async function () { 22 | function make(dependencies) { 23 | return async (...b) => ({ 24 | a: "a", 25 | b, 26 | injection: dependencies.injection, 27 | }); 28 | } 29 | const dependencies = { 30 | injection() { 31 | return this; 32 | }, 33 | }; 34 | const model = await Model.create({ 35 | spec: { 36 | modelName: "ABC", 37 | factory: make(dependencies), 38 | dependencies, 39 | }, 40 | args: [{ c: "c" }], 41 | }); 42 | assert.strictEqual(model, model.injection()); 43 | }); 44 | }); 45 | describe("#port1()", function () { 46 | it("should generate port and attach to adapter", async function () { 47 | const adapters = { 48 | async port1({ model }) { 49 | console.log(model); 50 | }, 51 | }; 52 | 53 | function make() { 54 | return (...b) => ({ a: "a", b }); 55 | } 56 | 57 | const model = await Model.create({ 58 | spec: { 59 | modelName: "ABC", 60 | factory: make(), 61 | ports: { 62 | port1: { 63 | type: "outbound", 64 | }, 65 | }, 66 | dependencies: { ...adapters }, 67 | }, 68 | args: [{ c: "c" }], 69 | }); 70 | assert.ok(model.port1()); 71 | }); 72 | }); 73 | describe("#getName()", function () { 74 | it("should return model name", async function () { 75 | const model = await Model.create({ 76 | spec: { 77 | modelName: "ABC", 78 | factory: ({ b }) => ({ a: "a", b }), 79 | }, 80 | args: [{ b: "c" }], 81 | }); 82 | assert.ok(Model.getId(model)); 83 | assert.strictEqual(Model.getName(model), "ABC"); 84 | }); 85 | }); 86 | describe("#a", function () { 87 | it("should return model prop", async function () { 88 | const model = await Model.create({ 89 | spec: { 90 | modelName: "ABC", 91 | factory: ({ b }) => ({ a: "a", b }), 92 | }, 93 | args: [{ b: "c" }], 94 | }); 95 | assert.strictEqual(model.a, "a"); 96 | }); 97 | }); 98 | describe("#b", function () { 99 | it("should return model prop with args value", async function () { 100 | const model = await Model.create({ 101 | spec: { 102 | modelName: "ABC", 103 | factory: ({ b }) => ({ a: "a", b }), 104 | }, 105 | args: [{ b: "c" }], 106 | }); 107 | assert.strictEqual(model.b, "c"); 108 | }); 109 | }); 110 | describe("#getKey()", function () { 111 | it("should return key", async function () { 112 | const model = await Model.create({ 113 | spec: { 114 | modelName: "ABC", 115 | factory: ({ b }) => ({ a: "a", b }), 116 | }, 117 | args: [{ b: "c" }], 118 | }); 119 | assert.ok(Model.getKey("onUpdate")); 120 | }); 121 | }); 122 | describe("#onUpdate()", function () { 123 | it("should return updated model", async function () { 124 | const model = await Model.create({ 125 | spec: { 126 | modelName: "ABC", 127 | factory: ({ b }) => ({ a: "a", b }), 128 | }, 129 | args: [{ b: "c" }], 130 | }); 131 | assert.strictEqual(model.a, "a"); 132 | const updated = model[Model.getKey("onUpdate")]({ a: "b" }); 133 | assert.strictEqual(updated.a, "b"); 134 | }); 135 | }); 136 | }); 137 | -------------------------------------------------------------------------------- /__test__/services/event-service.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | export const Event = { 4 | 5 | async listen(topic, callback) { 6 | setTimeout(() => callback({ topic, message: 'test' }), 1000); 7 | } 8 | 9 | } -------------------------------------------------------------------------------- /__test__/use-cases/add-model.js: -------------------------------------------------------------------------------- 1 | // 'use strict' 2 | 3 | // var assert = require('assert'); 4 | 5 | // import addModelFactory from '../../src/use-cases/add-model' 6 | // import DataSourceFactory from '../../src/datasources' 7 | // import ModelFactory from '../../src/domain'; 8 | // import ObserverFactory from '../../src/lib/observer'; 9 | 10 | // describe('Use-Cases', function () { 11 | // describe('addModel()', function () { 12 | // it('should add new model', async function () { 13 | // ModelFactory.registerModel({ 14 | // modelName: 'ABC', 15 | // factory: ({ a }) => ({ a, b: 'c' }), 16 | // endpoint: 'abcs', 17 | // dependencies: {} 18 | // }); 19 | // ModelFactory.registerEvent( 20 | // ModelFactory.EventTypes.CREATE, 21 | // 'ABC', 22 | // (model) => ({model}) 23 | // ); 24 | // const model = await addModelFactory({ 25 | // modelName: 'ABC', 26 | // models: ModelFactory, 27 | // repository: DataSourceFactory.getDataSource('ABC'), 28 | // observer: ObserverFactory.getInstance() 29 | // })({ a: 'a' }); 30 | // assert.strictEqual(model.a, { a: 'a' }.a); 31 | // }); 32 | // }); 33 | // }); 34 | -------------------------------------------------------------------------------- /__test__/use-cases/execute-command.js: -------------------------------------------------------------------------------- 1 | // var assert = require("assert"); 2 | // import DataSourceFactory from "../../src/datasources"; 3 | // import ObserverFactory from "../../src/lib/observer"; 4 | // import ModelFactory from "../../src/models/model-factory"; 5 | // import checkAcl from "../../src/lib/check-acl"; 6 | 7 | // const { 8 | // default: executeCommand, 9 | // } = require("../../src/use-cases/execute-command"); 10 | 11 | // describe("executeCommand()", function () { 12 | // it("should add new model", async function () { 13 | // const spec = { 14 | // modelName: "test", 15 | // endpoint: "tests", 16 | // factory: () => ({ decrypt: () => console.log("decrypted") }), 17 | // commands: { 18 | // decrypt: { 19 | // command: "decrypt", 20 | // acl: "read", 21 | // }, 22 | // }, 23 | // }; 24 | 25 | // ModelFactory.registerModel(spec); 26 | 27 | // var m = await ModelFactory.createModel( 28 | // ObserverFactory.getInstance(), 29 | // DataSourceFactory.getDataSource("TEST"), 30 | // "TEST", 31 | // [1, 2, 3] 32 | // ); 33 | // //checkAcl(spec.commands[query.command].acl, permission) 34 | // console.log(m); 35 | // await executeCommand(ModelFactory, m, { command: "decrypt" }, "*"); 36 | // }); 37 | // }); 38 | -------------------------------------------------------------------------------- /cert/README.md: -------------------------------------------------------------------------------- 1 | This is here for the folder structure. The private key and cert for TLS go here. The system handles this programmatically. No manual effort to get certs or renew them. -------------------------------------------------------------------------------- /cert/mesh/keys.js: -------------------------------------------------------------------------------- 1 | //import the methods 2 | const { generateKeyPair, createSign, createVerify } = require('crypto') 3 | const fs = require('fs') 4 | const path = require('path') 5 | //generate the key pair 6 | generateKeyPair( 7 | 'rsa', 8 | { 9 | modulusLength: 2048, // It holds a number. It is the key size in bits and is applicable for RSA, and DSA algorithm only. 10 | publicKeyEncoding: { 11 | type: 'pkcs1', //Note the type is pkcs1 not spki 12 | format: 'pem' 13 | }, 14 | privateKeyEncoding: { 15 | type: 'pkcs1', //Note again the type is set to pkcs1 16 | format: 'pem' 17 | //cipher: "aes-256-cbc", //Optional 18 | //passphrase: "", //Optional 19 | } 20 | }, 21 | (err, publicKey, privateKey) => { 22 | // Handle errors and use the generated key pair. 23 | if (err) console.log('Error!', err) 24 | console.log({ 25 | publicKey, 26 | privateKey 27 | }) //Print the keys to the console or save them to a file. 28 | 29 | fs.writeFileSync(path.join(__dirname, 'publicKey.pem'), publicKey, 'utf-8') 30 | fs.writeFileSync( 31 | path.join(__dirname, 'privateKey.pem'), 32 | privateKey, 33 | 'utf-8' 34 | ) 35 | /* 36 | * At this point you will have to pem files, 37 | * the public key which will start with 38 | * '-----BEGIN RSA PUBLIC KEY-----\n' + 39 | * and the private key which will start with 40 | * '-----BEGIN RSA PRIVATE KEY-----\n' + 41 | */ 42 | //Verify it works by signing some data and verifying it. 43 | //Create some sample data that we want to sign 44 | const verifiableData = 'this need to be verified' 45 | 46 | // The signature method takes the data we want to sign, the 47 | // hashing algorithm, and the padding scheme, and generates 48 | // a signature in the form of bytes 49 | const signature = require('crypto').sign( 50 | 'sha256', 51 | Buffer.from(verifiableData), 52 | { 53 | key: privateKey, 54 | padding: require('crypto').constants.RSA_PKCS1_PSS_PADDING 55 | } 56 | ) 57 | //Convert the signature to base64 for storage. 58 | console.log(signature.toString('base64')) 59 | 60 | // To verify the data, we provide the same hashing algorithm and 61 | // padding scheme we provided to generate the signature, along 62 | // with the signature itself, the data that we want to 63 | // verify against the signature, and the public key 64 | const isVerified = require('crypto').verify( 65 | 'sha256', 66 | Buffer.from(verifiableData), 67 | { 68 | key: publicKey, 69 | padding: require('crypto').constants.RSA_PKCS1_PSS_PADDING 70 | }, 71 | Buffer.from(signature.toString('base64'), 'base64') 72 | ) 73 | 74 | // isVerified should be `true` if the signature is valid 75 | console.log('signature verified: ', isVerified) 76 | } 77 | ) 78 | -------------------------------------------------------------------------------- /cert/mesh/privateKey.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAxaYqvTbWM+0aRsbJz4VIol6vxJMEFOyvAYBQtzM7yxRPsxjF 3 | VG43SvdW6L6p9/k56r4xZKUg46/rA8fRorWvd713MRfRmEj9HqsfrEq5cZfHFJb4 4 | gGBszpUY+g5aLMEYKXUjsPAxAhHWupN+ZW8jydDny/0dVRTIXf5Y0bDWg3BfuNx0 5 | 3e57VQZ1BYjyok9acz+B0vPETXK5fpqD0iX4gCniXyHeP+ACgsHRz8fp68eGaEw3 6 | ++0FL91Ytds61JriyAVGk8aVOVWWQBQHaI703K30exygAuboVhksNXJuwC6uKImc 7 | MThkp+f22ueKa2xiA6X0iiQhBrUTWImoE2aIQwIDAQABAoIBAB3aknwQC1y+hIiK 8 | QDNbM3RrDuuHNUef1HNKmWaCk5aCE+O+axfsqedl4re3DN9KwsfFQJquIoChssLY 9 | bL7B6KYSdS1yJpN0+t6mBFu20bnprgVbJF/nsKend6VVg6Nc6zRf4Cs6rHxei2uV 10 | GbNijSpPpqN2MPcH85fW8j2CE3S4wUONStiKkSLH3IZzPO9+zsRCz/CIKMlk44Tw 11 | j3d4s31FsrZMcDp9lIDxr3wvb5zfBs91BvdAbc0EGPiNho56UvNPR39YVWBid2+b 12 | 2T5+X5w8+pKcKON1fip2hMTjOiYIpe8SNOEH0cZsmbO2GAfmt5/lqinay9ahcoHr 13 | suY77YkCgYEA+FFDdOy6Ro/SLjatZKxzO+pPThQXhx73hV3Ebmb8Ogbs65unHtJA 14 | vn2KyN0WWm8pwjruwsWsHn2nChdAOxisl7cEoAPwbCb4JPOoBIc1rLDSdK7uSDSj 15 | 5Rncq8+LbDQNGluon/3SMptSVBgQ96VomxgAeRXRadT6Dj9yQlzvJQUCgYEAy8OY 16 | 8BFWtpxEGIIo0VyPsVJAs+48R3xiMrXYr2U17PkDcvL7jHvF7agBntfGLVXpUKso 17 | e7Ik8Z+5WJxrl7HFPYzsaANqJTnIEquir4VRkpWC03gc+iwTiorPkXS4j/MdnxVC 18 | veUgCgJ9KvbF0msEBaTUuqVOJWdVWyDypziNeqcCgYBu5yxvjkLrvNQsW7M5YWPe 19 | AJoGzyZq7MOzxtsyS8Ce0fUCjXQoadMMwNk5OLUBEuDdpKB6UPq8a3YtJZFEU5LR 20 | 96WTKe84/naZWlFSJGkuhZKZFn/Js+j3K4PJIFquqtinSwSW4C/+9ivg7xa05CVC 21 | D4g2jC4JTFuZ/XA0f3k3WQKBgQDG8Ev0Lul7VmwBhaq2ZWmBVW4IfhNqM91n5zUM 22 | YYNZvvEIHIVN7lYbJwmV/NLm1lb5xHG8+vuo0CnUgrtlGlIZOupgztymRZi56crn 23 | FQHEXKajMkUQV1Io1Ee9mrjLCjCkbcyAvCAgxbdQ6CfRvVvq6rgrFlOKny2t0Cg4 24 | 7TTsRwKBgQCKNEm66X32OfYl/052EHX1OvMvDW4jWaqd7Bsg+oCAzYnQhiUAtS5Q 25 | eCCarZDBDQyMCrvGtPRJszE0q6BD0ui4gXjqAPkLlwju+hR6tMtcZLvqrIzqqdl4 26 | zwDdN5aojL1CY75RW3Q93WtnDA9MXpdJA/f1Yj5q0zJ7l1mk4Y4Q3g== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /cert/mesh/publicKey.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PUBLIC KEY----- 2 | MIIBCgKCAQEAxaYqvTbWM+0aRsbJz4VIol6vxJMEFOyvAYBQtzM7yxRPsxjFVG43 3 | SvdW6L6p9/k56r4xZKUg46/rA8fRorWvd713MRfRmEj9HqsfrEq5cZfHFJb4gGBs 4 | zpUY+g5aLMEYKXUjsPAxAhHWupN+ZW8jydDny/0dVRTIXf5Y0bDWg3BfuNx03e57 5 | VQZ1BYjyok9acz+B0vPETXK5fpqD0iX4gCniXyHeP+ACgsHRz8fp68eGaEw3++0F 6 | L91Ytds61JriyAVGk8aVOVWWQBQHaI703K30exygAuboVhksNXJuwC6uKImcMThk 7 | p+f22ueKa2xiA6X0iiQhBrUTWImoE2aIQwIDAQAB 8 | -----END RSA PUBLIC KEY----- 9 | -------------------------------------------------------------------------------- /dotenv.example: -------------------------------------------------------------------------------- 1 | 2 | # Example .env file - cp to .env and update as needed 3 | 4 | # Target environment: local, dev, test, prod 5 | NODE_ENV=local 6 | 7 | # is this instance an ægis webswitch? 8 | SWITCH=false 9 | 10 | # override multicast dns and explicitly set switch url 11 | # SWITCH_OVERRIDE=true 12 | # SWITCH_HOST=8888-modulefederat-aegishost-njnlhwopdp4.ws-us65.gitpod.io 13 | # SWITCH_PORT=443 14 | # SWITCH_PROTO=wss 15 | 16 | # system provisions cert for this domain 17 | DOMAIN=localhost 18 | 19 | # Run in serverless mode (no web server) 20 | SERVERLESS=false 21 | 22 | # Create child processes, one per core, which share server 23 | # socket and take turns (round-robin) handling requests 24 | CLUSTER_ENABLED=false 25 | 26 | # Token-based authorization (JSON Web Tokens) 27 | # Secure protected routes. See key-set in aegis.config. 28 | AUTH_ENABLED=false 29 | 30 | # Use Secure Sockets Layer / TLS / HTTPS 31 | SSL_ENABLED=false 32 | SSL_PORT=443 33 | PORT=80 34 | 35 | # URL root level path 36 | API_ROOT=/aegis/api 37 | 38 | # Shared secret for crypto 39 | ENCRYPTION_PWD=b!gSecr3t 40 | 41 | # Cloud serverless provider name 42 | # PROVIDER_NAME=azure 43 | # PROVIDER_NAME=google 44 | PROVIDER_NAME=aws 45 | 46 | # Get external IP 47 | CHECKIPHOST=checkip.amazonaws.com 48 | 49 | # Required by 3rd party client APIs 50 | MONGODB_URL=mongodb://localhost:27017 51 | KAFKA_GROUP_ID=aegis-host 52 | # AWS_ACCESS_KEY_ID=xxxxxx 53 | # AWS_SECRET_ACCESS_KEY=xxxxxx 54 | # GITHUB_TOKEN=xxxxxx 55 | # SQUARE_TOKEN=xxxxxx 56 | # PAYPAL_TOKEN=xxxxxx 57 | -------------------------------------------------------------------------------- /mongo.sh: -------------------------------------------------------------------------------- 1 | mkdir -p /tmp/mongodb && \ 2 | cd /tmp/mongodb && \ 3 | wget -qOmongodb.tgz https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-5.0.2.tgz && \ 4 | tar xf mongodb.tgz 5 | pwd && ls && \ 6 | cd mongodb-* && \ 7 | sudo cp bin/* /usr/local/bin/ && \ 8 | cd /workspace && rm -rf /tmp/mongodb && \ 9 | sudo mkdir -p /workspace/db && \ 10 | sudo chown gitpod:gitpod -R /workspace/db && \ 11 | mongod --dbpath /workspace/db 12 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aegis-host", 3 | "version": "1.2.0", 4 | "description": "Federated Application Host", 5 | "installConfig": { 6 | "pnp": false 7 | }, 8 | "config": { 9 | "s3BucketName": "aegisserverless2", 10 | "region": "us-east-1", 11 | "cloudFormationStackName": "ServerlessExpressHttpApiStack", 12 | "functionName": "", 13 | "domain": "" 14 | }, 15 | "repository": { 16 | "url": "https://github.com/module-federation/aegis-host" 17 | }, 18 | "scripts": { 19 | "build": "yarn clean && webpack --config webpack.config.js", 20 | "test": "jest", 21 | "transpile": "babel src --out-dir dist", 22 | "start": "node --title aegis --trace-warnings src/bootstrap.js", 23 | "demo": "open http://localhost", 24 | "reload": "curl -s http://localhost/reload", 25 | "stop": "PID=$(lsof -P -i | grep aegis | grep LISTEN | awk '{print $2}'); kill $PID", 26 | "restart": "yarn build && yarn start", 27 | "webswitch": "cd ../aegis && export SWITCH=true && export PORT=8888 && ./start.sh ", 28 | "gen-keys": "node cert/mesh/keys.js", 29 | "login-status": "echo \"/workspace/aegis-host/status.sh\" >> ~/.aegis && echo \". ~/.aegis\"", 30 | "cmd-log": "echo \"alias log='view ~/aegis-host/public/aegis.log'\" >> ~/.aegis", 31 | "cmd-tail": "echo \"alias tail='tail -f ~/aegis-host/public/aegis.log'\" >> ~/.aegis", 32 | "cmd-start": "echo \"alias start='/workspace/aegis-host/start.sh'\" >> ~/.aegis", 33 | "cmd-status": "echo \"alias status='~/aegis-host/status.sh'\" >> ~/.aegis", 34 | "cmd-stop": "echo \"alias stop='/workspace/aegis-host/stop.sh'\" >> ~/.aegis", 35 | "cmd-env": "cp /workspace/aegis-host/dotenv.example ~/aegis-host/.env", 36 | "cmd-backupcfg": "cp public/aegis.config.json ../", 37 | "cmd-restorecfg": "cp ../aegis.config.json public/aegis.config.json", 38 | "cmd-backupremote": "cp ~/aegis-host/webpack/remote-entries/index.js ~", 39 | "cmd-restoreremote": "cp ~/index.js ~/aegis-host/webpack/remote-entries", 40 | "cmd-backup": "echo \"alias backup='yarn cmd-backupcfg && yarn cmd-backupremote'\" >> ~/.aegis", 41 | "cmd-restore": "echo \"alias restore='yarn cmd-restorecfg && yarn cmd-restoreremote'\" >> ~/.aegis", 42 | "stats": "git ls-files | xargs wc -l", 43 | "deploy-lambda": "yarn clean npm run transpile && webpack --config webpack.config.js --env serverless && serverless package && serverless deploy", 44 | "clean": "rimraf dist", 45 | "test-mocha": "mocha --recursive -r esm", 46 | "demo-ssl": "open https://localhost:443", 47 | "demo-aws": "open https://aegis.module-federation.org", 48 | "refresh-token": "./accessToken.sh", 49 | "serverless-deploy": "serverless deploy", 50 | "serverless-invoke-local-api-post": "serverless invoke local --function aegis --path ./test/lambda/apigateway-post.json", 51 | "serverless-invoke-local-api-getid": "serverless invoke local --function aegis --path ./test/lambda/apigateway-getid.json", 52 | "serverless-invoke-local-html": "serverless invoke local --function html --path ./test/lambda/lambda-html.json", 53 | "call-aegis-on-lambda": "aws lambda invoke --function arn:aws:lambda:us-east-1:758072746799:function:aegis-dev-aegis --payload file://test/lambda/lambda-post.json lambda.out --cmd-binary-format raw-in-base64-out && cat lambda.out", 54 | "create-bucket": "cross-var aws s3 mb s3://$npm_package_config_s3BucketName --region $npm_package_config_region", 55 | "delete-bucket": "cross-var aws s3 rb s3://$npm_package_config_s3BucketName --region $npm_package_config_region", 56 | "aws-package": "cross-var aws cloudformation package --template ./sam-template.yaml --s3-bucket $npm_package_config_s3BucketName --output-template sam-template.packaged.yaml --region $npm_package_config_region", 57 | "aws-deploy": "cross-var aws cloudformation deploy --template-file sam-template.packaged.yaml --stack-name $npm_package_config_cloudFormationStackName --capabilities CAPABILITY_IAM --region $npm_package_config_region --parameter-overrides DomainName=$npm_package_config_domain", 58 | "aws-package-deploy": "npm run build && npm run package && npm run deploy", 59 | "aws-delete-stack": "cross-var aws cloudformation delete-stack --stack-name $npm_package_config_cloudFormationStackName --region $npm_package_config_region", 60 | "aws-setup": "npm install && (cross-var aws s3api get-bucket-location --bucket $npm_package_config_s3BucketName --region $npm_package_config_region || npm run create-bucket) && npm run package-deploy" 61 | }, 62 | "author": "Tyson Midboe", 63 | "license": "Apache-2.0", 64 | "dependencies": { 65 | "@module-federation/aegis": "1.4.2-beta", 66 | "bufferutil": "^4.0.6", 67 | "cors": "^2.8.5", 68 | "dotenv": "16.0.3", 69 | "express": "^4.18.1", 70 | "express-attack": "^0.5.3", 71 | "helmet": "^6.0.1", 72 | "path-to-regexp": "^6.2.1", 73 | "pino": "^8.5.0", 74 | "pretty": "^2.0.0", 75 | "pretty-cli": "^0.0.14", 76 | "regenerator-runtime": "^0.13.9", 77 | "require-fresh": "^1.1.0", 78 | "rimraf": "^3.0.2", 79 | "rustwasmc": "^0.1.29", 80 | "ws": "^8.8.0", 81 | "yarn": "^1.22.19" 82 | }, 83 | "devDependencies": { 84 | "@babel/cli": "^7.18.6", 85 | "@babel/core": "^7.15.5", 86 | "babel-loader": "^8.2.2", 87 | "chalk": "^2.4.2", 88 | "cross-var": "^1.1.0", 89 | "esm": "^3.2.25", 90 | "express-cli": "0.0.1", 91 | "jest": "^27.0.6", 92 | "mocha": "^8.2.0", 93 | "split": "^1.0.1", 94 | "webpack": "5.47.1", 95 | "webpack-cli": "^4.10.0", 96 | "webpack-node-externals": "^3.0.0" 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /proxy.js: -------------------------------------------------------------------------------- 1 | var proxy = require('express-http-proxy') 2 | var app = require('express')() 3 | 4 | app.use('/', proxy(process.argv[2])) 5 | app.listen(process.argv[3], () => 6 | console.log( 7 | 'proxy redirect to', 8 | process.argv[2], 9 | 'from port', 10 | process.argv[3] 11 | ) 12 | ) 13 | -------------------------------------------------------------------------------- /public/.well-known/acme-challenge/README.md: -------------------------------------------------------------------------------- 1 | # Automatic Certificate Management Environment 2 | This folder is required by the ACME HTTP test. 3 | See the RFC [here.](https://datatracker.ietf.org/doc/html/rfc8555) -------------------------------------------------------------------------------- /public/.well-known/acme-challenge/RK65fkXKs7LTryMYAc4u4RCSboUBRLFsg8TWCk9ldHo: -------------------------------------------------------------------------------- 1 | RK65fkXKs7LTryMYAc4u4RCSboUBRLFsg8TWCk9ldHo.La2nskmjZgP9FFp-K_vO-CDkJ-kPIaoXc-_bLG-_7FE -------------------------------------------------------------------------------- /public/.well-known/acme-challenge/SMzetSJCRZfeNEV1lCstszji9iDIIqS1WZYokbcGSiQ: -------------------------------------------------------------------------------- 1 | SMzetSJCRZfeNEV1lCstszji9iDIIqS1WZYokbcGSiQ.iPhdfXpTI6oJ03NMjunfDMyaQFOjYnuV0lGLsSfhkbM -------------------------------------------------------------------------------- /public/.well-known/acme-challenge/sR6_IXaiFeIhYjp91yxSezMLMqPPiZ23PvfTWr6Pl-A: -------------------------------------------------------------------------------- 1 | sR6_IXaiFeIhYjp91yxSezMLMqPPiZ23PvfTWr6Pl-A.nbjAXLIYFZCfi0m-gwmJOSNjhxl4FVtCrJyxFEqDh1Q -------------------------------------------------------------------------------- /public/aegis-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/module-federation/aegis-host/662098161c37105046862744c312e2740ae1f565/public/aegis-logo.png -------------------------------------------------------------------------------- /public/aegis.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "desc": "hot-reloadable configurtion variables, see https://github.com/module-federatio/aegis", 3 | "general": { 4 | "desc": "live-updateable environmental vars", 5 | "fqdn": "localhost", 6 | "remoteServices": "aegis-services", 7 | "resumeWorkflow": true, 8 | "checkIpHost": "https://checkip.amazonaws.com", 9 | "useIdempotencyKey": false, 10 | "defaultCircuitBreaker": { 11 | "errorRate": 25, 12 | "callVolume": 100, 13 | "intervalMs": 6000, 14 | "fallbackFn": null 15 | } 16 | }, 17 | "adapters": { 18 | "desc": "adapter config", 19 | "cacheSize": 3000, 20 | "enableFederatedQueries": false, 21 | "defaultDatasource": "DataSourceMongoDb", 22 | "datasources": { 23 | "DataSourceMemory": { 24 | "desc": "Non-persistent, in-memory storage", 25 | "enabled": true 26 | }, 27 | "DataSourceFile": { 28 | "desc": "Persistent storage on local file system", 29 | "enabled": true 30 | }, 31 | "DataSourceMongoDb": { 32 | "desc": "Persistent NoSQL, JSON document storage", 33 | "enabled": true, 34 | "url": "mongodb://localhost:27017", 35 | "cacheSize": 3000 36 | }, 37 | "DataSourceIpfs": { 38 | "desc": "Decentralized p2p Internet-wide storage network", 39 | "enabled": false 40 | }, 41 | "DataSourceSolidPod": { 42 | "desc": "Sir Tim Berners Lee's redesign of the Web for data privacy", 43 | "enabled": false 44 | }, 45 | "DataSourceEtherium": { 46 | "desc": "blockchain storage based on solidity", 47 | "enabled": false 48 | }, 49 | "DataSourceAppFabric": { 50 | "desc": "Aegis ", 51 | "enabled": false 52 | } 53 | } 54 | }, 55 | "services": { 56 | "desc": "services config", 57 | "activeServiceMesh": "WebSwitch", 58 | "serviceMesh": { 59 | "WebSwitch": { 60 | "desc": "Default implementation. Switched mesh over web sockets.", 61 | "enabled": true, 62 | "isSwitch": false, 63 | "heartbeat": 3000, 64 | "uplink": null, 65 | "debug": false, 66 | "isBackupSwitch": false 67 | }, 68 | "MeshLink": { 69 | "desc": "Fast UDP-based, peer-to-peer mesh with shared Redis cache.", 70 | "enabled": true, 71 | "config": { 72 | "redis": { 73 | "host": "127.0.0.1", 74 | "port": 6379 75 | }, 76 | "ttl": 10000, 77 | "prefix": "aegis", 78 | "strict": false, 79 | "relayLimit": 1, 80 | "relayDelay": 0, 81 | "updateInterval": 1000 82 | } 83 | }, 84 | "NatsMesh": { 85 | "desc": "Use NATS at layer 7", 86 | "enabled": false 87 | }, 88 | "QuicMesh": { 89 | "desc": "Uses QUIC transport protocol (replacing TCP in HTTP/3). Optimized for streaming.", 90 | "enabled": false 91 | } 92 | }, 93 | "auth": { 94 | "keySet": { 95 | "cache": true, 96 | "rateLimit": true, 97 | "jwksRequestsPerMinute": 5, 98 | "jwksUri": "https://dev-2fe2iar6.us.auth0.com/.well-known/jwks.json", 99 | "audience": "https://microlib.io/", 100 | "issuer": "https://dev-2fe2iar6.us.auth0.com/", 101 | "algorithms": ["RS256"] 102 | } 103 | }, 104 | "token": { 105 | "desc": "info to acquire token", 106 | "authEnabled": false, 107 | "oauthUri": "https://server.us.auth0.com/oauth/token", 108 | "client_id": "__client_id__", 109 | "client_secret": "__client_secret__", 110 | "audience": "https://aegis.io/", 111 | "grant_type": "__client_credentials__" 112 | }, 113 | "cert": { 114 | "webRoot": "public", 115 | "certDir": "cert", 116 | "domain": "localhost", 117 | "domainEmail": "admin@federated-microservices.com" 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /public/app.js: -------------------------------------------------------------------------------- 1 | ;(function () { 2 | const apiRoot = 'aegis/api' 3 | const modelApiPath = apiRoot + '/models' 4 | const messages = document.querySelector('#messages') 5 | const postButton = document.querySelector('#post') 6 | const patchButton = document.querySelector('#patch') 7 | const getButton = document.querySelector('#get') 8 | const deleteButton = document.querySelector('#delete') 9 | const clearButton = document.querySelector('#clear') 10 | const modelInput = document.querySelector('#model') 11 | const modelIdInput = document.querySelector('#modelId') 12 | const queryInput = document.querySelector('#query') 13 | const paramInput = document.querySelector('#parameter') 14 | const portInput = document.querySelector('#port') 15 | const copyButton = document.querySelector('#copyButton') 16 | const clearIdButton = document.querySelector('#clearIdButton') 17 | const clearQueryButton = document.querySelector('#clearQueryButton') 18 | const clearModelButton = document.querySelector('#clearModelButton') 19 | const clearParamButton = document.querySelector('#clearParamButton') 20 | const clearPortsButton = document.querySelector('#clearPortsButton') 21 | const reloadModelButton = document.querySelector('#reloadModelButton') 22 | 23 | let models 24 | class ProgressBar { 25 | constructor (events) { 26 | this.progresscntrl = document.getElementById('progresscntrl') 27 | this.progressbar = document.getElementById('progressbar') 28 | this.collapse = new bootstrap.Collapse(this.progresscntrl, { 29 | toggle: false 30 | }) 31 | this.events = events 32 | this.progress = 0 33 | this.progressbar.style.width = '0%' 34 | this.progressbar.setAttribute('aria-valuenow', 0) 35 | const context = this 36 | this.events.forEach(function (event) { 37 | // add events 38 | window.addEventListener(event, function (e) { 39 | context.makeProgress(e.detail.progress) 40 | }) 41 | }) 42 | } 43 | show () { 44 | this.collapse.show() 45 | } 46 | 47 | hide () { 48 | this.collapse.hide() 49 | } 50 | 51 | makeProgress (progress) { 52 | this.progressbar.style.width = progress + '%' 53 | this.progressbar.setAttribute('aria-valuenow', progress) 54 | } 55 | } 56 | 57 | async function instrumentedFetch (url, options) { 58 | window.dispatchEvent( 59 | new CustomEvent('fetch-connect', { detail: { progress: 35 } }) 60 | ) 61 | 62 | const response = await fetch(url, options) 63 | await handleError(response) 64 | 65 | window.dispatchEvent( 66 | new CustomEvent('fetch-connect', { detail: { progress: 50 } }) 67 | ) 68 | 69 | const reader = response.body.getReader() 70 | const contentLength = response.headers.get('Content-Length') 71 | let receivedLength = 0 72 | let ratio = 0 73 | let chunks = [] 74 | 75 | while (true) { 76 | const { done, value } = await reader.read() 77 | if (done) break 78 | chunks.push(value) 79 | receivedLength += value.length 80 | ratio = (contentLength / receivedLength) * 100 81 | window.dispatchEvent( 82 | new CustomEvent('fetch-read', { detail: { progress: ratio / 2 + 50 } }) 83 | ) 84 | } 85 | 86 | let chunksAll = new Uint8Array(receivedLength) 87 | let position = 0 88 | for (let chunk of chunks) { 89 | chunksAll.set(chunk, position) 90 | position += chunk.length 91 | } 92 | 93 | let result = new TextDecoder('utf-8').decode(chunksAll) 94 | window.dispatchEvent( 95 | new CustomEvent('fetch-done', { detail: { progress: 100 } }) 96 | ) 97 | return JSON.parse(result) 98 | } 99 | 100 | // Include JWT access token in header for auth check 101 | let authHeader = {} 102 | let useIdempotencyKey = false 103 | 104 | function generateUuid () { 105 | // Public Domain/MIT 106 | var d = new Date().getTime() //Timestamp 107 | var d2 = 108 | (typeof performance !== 'undefined' && 109 | performance.now && 110 | performance.now() * 1000) || 111 | 0 //Time in microseconds since page-load or 0 if unsupported 112 | return 'yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) { 113 | var r = Math.random() * 16 //random number between 0 and 16 114 | if (d > 0) { 115 | //Use timestamp until depleted 116 | r = (d + r) % 16 | 0 117 | d = Math.floor(d / 16) 118 | } else { 119 | //Use microseconds since page-load if supported 120 | r = (d2 + r) % 16 | 0 121 | d2 = Math.floor(d2 / 16) 122 | } 123 | return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) 124 | }) 125 | } 126 | 127 | /** 128 | * Returns headers, including auth header if auth is enabled. 129 | * @returns {{ 130 | * "Content-Type":"application/json", 131 | * "Authorization": "bearer " 132 | * }} 133 | */ 134 | function getHeaders () { 135 | const content = { 'Content-Type': 'application/json' } 136 | const headers = { 137 | ...content, 138 | ...authHeader 139 | } 140 | return useIdempotencyKey 141 | ? { ...headers, 'idempotency-key': generateUuid() } 142 | : headers 143 | } 144 | 145 | async function refreshAccessToken (conf) { 146 | const token = conf.services.token 147 | let jwtToken = { access_token: '' } 148 | if (token.authEnabled) { 149 | const data = await fetch(token.oauthUri, { 150 | method: 'POST', 151 | headers: getHeaders(), 152 | body: JSON.stringify({ 153 | client_id: token.client_id, 154 | client_secret: token.client_secret, 155 | audience: token.audience, 156 | grant_type: token.grant_type 157 | }) 158 | }) 159 | jwtToken = await data.json() 160 | // add json web token to authentication header 161 | authHeader = { Authorization: `bearer ${jwtToken.access_token}` } 162 | } 163 | } 164 | 165 | function setIdempotency (conf) { 166 | useIdempotencyKey = conf.general.useIdempotencyKey 167 | } 168 | 169 | /** 170 | * Get config file. If auth is enabled, request new 171 | * JSON Web Token and set `authHeader` accordingly. 172 | * Need CORS for this. 173 | */ 174 | async function updateConfigSettings () { 175 | const file = await fetch('aegis.config.json') 176 | const text = await file.text() 177 | const conf = JSON.parse(text) 178 | await refreshAccessToken(conf) 179 | setIdempotency(conf) 180 | } 181 | 182 | function displayUrl (url) { 183 | document.getElementById( 184 | 'url' 185 | ).value = `${location.protocol}//${location.host}/${url}` 186 | } 187 | 188 | function getUrl () { 189 | if (customUrl) return document.getElementById('url').value 190 | 191 | const id = modelIdInput.value 192 | const model = document.getElementById('model').value 193 | const param = document.getElementById('parameter').value 194 | const query = document.getElementById('query').value 195 | const port = document.getElementById('port').value 196 | 197 | let url = `${modelApiPath}/${model}` 198 | if (id) url += `/${id}` 199 | if (param) url += `/${param}` 200 | if (port) url += `/service/ports/${port}` 201 | if (query) url += `?${query}` 202 | 203 | displayUrl(url) 204 | return url 205 | } 206 | 207 | let customUrl = false 208 | 209 | function makeCustUrl () { 210 | customUrl = true 211 | } 212 | 213 | function makeAutoUrl () { 214 | customUrl = false 215 | } 216 | 217 | const urlInput = document.getElementById('url') 218 | 219 | urlInput.onfocus = makeCustUrl 220 | modelInput.onfocus = makeAutoUrl 221 | modelIdInput.onfocus = makeAutoUrl 222 | queryInput.onfocus = makeAutoUrl 223 | paramInput.onfocus = makeAutoUrl 224 | portInput.onfocus = makeAutoUrl 225 | 226 | modelInput.onchange = getUrl 227 | modelIdInput.onchange = getUrl 228 | queryInput.onchange = getUrl 229 | paramInput.onchange = getUrl 230 | portInput.onchange = getUrl 231 | 232 | modelInput.addEventListener('change', updatePorts) 233 | modelInput.addEventListener('change', updateQueryList) 234 | modelIdInput.addEventListener('change', updateQueryList) 235 | 236 | function removeAllChildNodes (parent) { 237 | while (parent.firstChild) { 238 | parent.removeChild(parent.firstChild) 239 | } 240 | } 241 | 242 | function updatePorts () { 243 | const endpoint = modelInput.value.toUpperCase() 244 | 245 | portInput.value = '' 246 | removeAllChildNodes(document.querySelector('#portList')) 247 | 248 | const model = models.find( 249 | model => model.endpoint.toUpperCase() === endpoint 250 | ) 251 | 252 | if (model?.ports) 253 | Object.keys(model.ports).forEach(port => { 254 | if (model.ports[port].type === 'inbound') 255 | portList.appendChild(new Option(port)) 256 | }) 257 | 258 | getUrl() 259 | } 260 | 261 | function updateQueryList () { 262 | const endpoint = modelInput.value.toUpperCase() 263 | 264 | queryInput.value = '' 265 | removeAllChildNodes(document.querySelector('#queryList')) 266 | 267 | if (modelIdInput.value === '') { 268 | queryList.appendChild(new Option('__count=all')) 269 | queryList.appendChild(new Option('__cached=true')) 270 | return 271 | } 272 | 273 | const model = models.find( 274 | model => model.endpoint.toUpperCase() === endpoint 275 | ) 276 | 277 | if (model?.relations) { 278 | Object.keys(model.relations).forEach(rel => { 279 | queryList.appendChild(new Option(`relation=${rel}`)) 280 | }) 281 | } 282 | if (model?.commands) { 283 | Object.keys(model.commands).forEach(cmd => { 284 | queryList.appendChild(new Option(`command=${cmd}`)) 285 | }) 286 | } 287 | queryList.appendChild(new Option('html=true')) 288 | queryList.appendChild(new Option('__cached=true')) 289 | 290 | getUrl() 291 | } 292 | 293 | let modelId 294 | 295 | function setModelId (id) { 296 | modelId = id 297 | } 298 | 299 | function modelNameFromEndpoint () { 300 | const endpoint = document.getElementById('model').value 301 | return models.find(model => model.endpoint === endpoint).modelName 302 | } 303 | 304 | const fetchEvents = ['fetch-connect', 'fetch-read', 'fetch-done'] 305 | 306 | window.addEventListener('fetch-connect', function (e) { 307 | reloadModelButton.disabled = true 308 | reloadModelButton.ariaBusy = true 309 | }) 310 | 311 | window.addEventListener('fetch-done', function (e) { 312 | reloadModelButton.disabled = false 313 | reloadModelButton.ariaBusy = false 314 | }) 315 | 316 | /** 317 | * Increase or decreae value to adjust how long 318 | * one should keep pressing down before the pressHold 319 | * event fires 320 | * 321 | * @param {*} item 322 | * @param {*} action 323 | * @param {*} pressHoldDuration 324 | */ 325 | function pressAndHold (item, action, pressHoldDuration = 20) { 326 | let timerID 327 | let counter = 0 328 | 329 | let pressHoldEvent = new CustomEvent('pressHold') 330 | 331 | // Listening for the mouse and touch events 332 | item.addEventListener('mousedown', pressingDown, false) 333 | item.addEventListener('mouseup', notPressingDown, false) 334 | item.addEventListener('mouseleave', notPressingDown, false) 335 | item.addEventListener('touchstart', pressingDown, false) 336 | item.addEventListener('touchend', notPressingDown, false) 337 | // Listening for our custom pressHold event 338 | item.addEventListener('pressHold', action, false) 339 | 340 | function pressingDown (e) { 341 | // Start the timer 342 | requestAnimationFrame(timer) 343 | e.preventDefault() 344 | console.log('Pressing!') 345 | } 346 | 347 | function notPressingDown (e) { 348 | // Stop the timer 349 | cancelAnimationFrame(timerID) 350 | counter = 0 351 | console.log('Not pressing!') 352 | } 353 | 354 | function timer () { 355 | console.log('Timer tick!') 356 | 357 | if (counter < pressHoldDuration) { 358 | timerID = requestAnimationFrame(timer) 359 | counter++ 360 | } else { 361 | console.log('Press threshold reached!') 362 | item.dispatchEvent(pressHoldEvent) 363 | } 364 | } 365 | } 366 | 367 | function showMessage (message, style = 'pretty') { 368 | const styles = { 369 | pretty: message => `\n${prettifyJson(message)}`, 370 | error: message => 371 | `\n${message}`, 372 | plain: message => `\n${message}` 373 | } 374 | //const msg = message === typeof Object ? JSON.stringify(message) : message 375 | document.getElementById('jsonCode').innerHTML += styles[style](message) 376 | messages.scrollTop = messages.scrollHeight 377 | } 378 | 379 | async function handleError (response) { 380 | let msg = null 381 | if (response.status > 199 && response.status < 300) { 382 | return response 383 | } 384 | try { 385 | msg = JSON.stringify(await response.json(), null, 2) 386 | } catch (error) { 387 | msg = `${response.status}: ${response.statusText}` 388 | } 389 | throw new Error(msg) 390 | } 391 | 392 | async function handleResponse (response) { 393 | return response.json() 394 | } 395 | 396 | let endpoints = [] 397 | 398 | window.addEventListener('change', () => (endpoints = [])) 399 | 400 | function prettifyJson (json) { 401 | let endpoint = modelInput.value 402 | if (!json) return 403 | if (typeof json !== 'string') { 404 | jsonArr = [json].flat() 405 | json = JSON.stringify(json, null, 2) 406 | } else { 407 | jsonArr = [JSON.parse(json)].flat() 408 | } 409 | let nextId = false 410 | let nextEndpoint = false 411 | return json.replace( 412 | /("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?)/g, 413 | function (match) { 414 | if (nextEndpoint) { 415 | nextEndpoint = false 416 | endpoints.push(match.replaceAll('"', '')) 417 | } 418 | if (nextId) { 419 | nextId = false 420 | console.log(location.search) 421 | const id = match.replaceAll('"', '') 422 | const ep = jsonArr.filter(x => x.id === id).map(x => x.endpoint)[0] 423 | endpoint = ep || endpoint 424 | return `` 426 | } 427 | let cls = '' 428 | if (/^"/.test(match)) { 429 | if (/:$/.test(match)) { 430 | if (/"id"|"modelId"/.test(match)) { 431 | nextId = true 432 | } 433 | if (/"endpoint"/.test(match)) { 434 | nextEndpoint = true 435 | } 436 | cls = "" 437 | } else { 438 | cls = '' 439 | } 440 | } else if (/true|false/.test(match)) { 441 | cls = "" 442 | } else if (/null/.test(match)) { 443 | cls = "" 444 | } 445 | return cls + match + '' 446 | } 447 | ) 448 | } 449 | 450 | window.addEventListener('getId', e => { 451 | modelInput.value = endpoints.pop() || e.detail.endpoint 452 | modelIdInput.value = e.detail.id 453 | getButton.click() 454 | }) 455 | 456 | reloadModelButton.onclick = async function () { 457 | try { 458 | const bar = new ProgressBar(fetchEvents) 459 | bar.show() 460 | const modelName = modelNameFromEndpoint() 461 | const jsonObj = await instrumentedFetch( 462 | `${modelApiPath}/reload?modelName=${modelName}`, 463 | { 464 | method: 'GET', 465 | headers: getHeaders() 466 | } 467 | ) 468 | showMessage(jsonObj) 469 | setTimeout(() => bar.hide(), 1000) 470 | } catch (error) { 471 | showMessage(error.message, 'error') 472 | } 473 | } 474 | 475 | pressAndHold(postButton, () => (modelIdInput.value = '')) 476 | 477 | postButton.onclick = async function post () { 478 | try { 479 | const jsonObj = await instrumentedFetch(getUrl(), { 480 | method: 'POST', 481 | body: document.getElementById('payload').value, 482 | headers: getHeaders() 483 | }) 484 | setModelId(jsonObj.id) 485 | showMessage(jsonObj) 486 | } catch (err) { 487 | showMessage(err.message, 'error') 488 | } 489 | } 490 | 491 | patchButton.onclick = function () { 492 | queryInput.value = '' 493 | fetch(getUrl(), { 494 | method: 'PATCH', 495 | body: document.getElementById('payload').value, 496 | headers: getHeaders() 497 | }) 498 | .then(handleError) 499 | .then(handleResponse) 500 | .then(showMessage) 501 | .catch(function (err) { 502 | showMessage(err.message, 'error') 503 | }) 504 | } 505 | 506 | getButton.onclick = function () { 507 | if (/html=true/i.test(queryInput.value)) { 508 | window.open(getUrl()) 509 | return 510 | } 511 | document.getElementById('parameter').value = '' 512 | fetch(getUrl(), { headers: getHeaders() }) 513 | .then(handleError) 514 | .then(handleResponse) 515 | .then(showMessage) 516 | .catch(function (err) { 517 | showMessage(err.message, 'error') 518 | }) 519 | } 520 | 521 | deleteButton.onclick = function () { 522 | fetch(getUrl(), { method: 'DELETE', headers: getHeaders() }) 523 | .then(handleError) 524 | .then(handleResponse) 525 | .then(showMessage) 526 | .catch(function (err) { 527 | showMessage(err.message, 'error') 528 | }) 529 | } 530 | 531 | clearButton.onclick = function () { 532 | document.getElementById('jsonCode').innerHTML = '' 533 | } 534 | 535 | copyButton.addEventListener('click', function () { 536 | modelIdInput.select() 537 | navigator.clipboard.writeText(modelIdInput.value) 538 | }) 539 | 540 | clearModelButton.addEventListener('click', function () { 541 | setModelId(null) 542 | modelInput.value = '' 543 | modelIdInput.value = '' 544 | queryInput.value = '' 545 | paramInput.value = '' 546 | portInput.value = '' 547 | updatePorts() 548 | updateQueryList() 549 | getUrl() 550 | }) 551 | 552 | clearIdButton.addEventListener('click', function () { 553 | modelIdInput.value = '' 554 | paramInput.value = '' 555 | updateQueryList() 556 | updatePorts() 557 | getUrl() 558 | }) 559 | 560 | clearParamButton.addEventListener('click', function () { 561 | paramInput.value = '' 562 | getUrl() 563 | }) 564 | 565 | clearQueryButton.addEventListener('click', function () { 566 | queryInput.value = '' 567 | updateQueryList() 568 | getUrl() 569 | }) 570 | 571 | clearPortsButton.addEventListener('click', function () { 572 | portInput.value = '' 573 | updatePorts() 574 | getUrl() 575 | }) 576 | 577 | async function loadModels () { 578 | const modelJson = await fetch('aegis/api/config?isCached=false', { 579 | headers: getHeaders() 580 | }) 581 | models = await modelJson.json() 582 | models.forEach(m => { 583 | modelList.appendChild(new Option(m.endpoint)) 584 | }) 585 | return models 586 | } 587 | 588 | window.addEventListener('load', async function () { 589 | // if enabled, request fresh access token and store in auth header 590 | await updateConfigSettings() 591 | // get list of all models and add to datalist for model input control 592 | await loadModels() 593 | }) 594 | })() 595 | -------------------------------------------------------------------------------- /public/arch.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 9 | 60 | 61 | 62 |
63 | architecture 64 |
65 | C4 66 | 67 |
68 |
69 | 70 | 71 | -------------------------------------------------------------------------------- /public/hot-reload-complete.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 |

hot reload complete

16 | 100% 17 |

back

18 | 19 | 20 | -------------------------------------------------------------------------------- /public/hot-reload.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | hot reload in progress... 14 | 15 | 16 |

hot reload in progress...

17 | 25 | reloading... 26 | 27 | 28 | -------------------------------------------------------------------------------- /public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 14 | 15 | ÆGIS Domain Model API 16 | 17 | 18 | 19 | 24 | 25 | 129 | 130 |
131 |
132 |
133 |
134 |
143 | hot deployment running... 144 |
145 |
146 |
147 | 153 | 154 | 155 | 163 | 170 |
171 | 172 |
173 | 178 | 179 | 182 | 189 |
190 | 191 |
192 | 199 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 215 |
216 | 217 |
218 | 225 | 226 | 227 | 234 |
235 | 236 |
237 | 243 | 244 | 245 | 252 |
253 | 254 |
255 | 286 | 287 |
288 | 289 |
290 | 293 | 296 | 299 | 302 | 305 |
306 |
307 | 308 |
309 | 315 |
321 |           
322 |         
323 |
324 |
325 |
326 | 327 | 328 | 329 | -------------------------------------------------------------------------------- /public/wsapp.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 16 | 17 | ÆGIS Service Mesh Listener 18 | 19 | 20 | 21 | 22 | 27 | 28 | 39 | > 40 | 41 |
42 |
43 |
44 |
45 | 46 | 49 | 52 | 55 |
56 |
57 |
58 |
59 |
60 |
65 |
66 | 70 |
71 |
72 | 73 | 74 | 75 | 76 | 77 | 81 | -------------------------------------------------------------------------------- /public/wsapp.js: -------------------------------------------------------------------------------- 1 | ;(function () { 2 | const messages = document.querySelector('#messages') 3 | const wsButton = document.querySelector('#wsButton') 4 | const statusButton = document.querySelector('#status') 5 | const clearButton = document.querySelector('#clear') 6 | 7 | function prettifyJson (json) { 8 | if (typeof json !== 'string') { 9 | json = JSON.stringify(json, null, 2) 10 | } 11 | return json.replace( 12 | /("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?)/g, 13 | function (match) { 14 | let cls = '' 15 | if (/^"/.test(match)) { 16 | if (/:$/.test(match)) { 17 | cls = "" 18 | } else { 19 | cls = '' 20 | } 21 | } else if (/true|false/.test(match)) { 22 | cls = "" 23 | } else if (/null/.test(match)) { 24 | cls = "" 25 | } 26 | return cls + match + '' 27 | } 28 | ) 29 | } 30 | 31 | function showMessage (message) { 32 | document.getElementById('jsonCode').innerHTML += `\n${prettifyJson( 33 | message 34 | )}` 35 | messages.scrollTop = messages.scrollHeight 36 | } 37 | 38 | let ws 39 | 40 | wsButton.onclick = function () { 41 | if (ws) { 42 | ws.close() 43 | ws.onerror = ws.onopen = ws.onclose = null 44 | } 45 | 46 | const proto = /https/i.test(location.protocol) ? 'wss' : 'ws' 47 | ws = new WebSocket(`${proto}://${location.hostname}:${location.port}`, [ 48 | 'webswitch' 49 | ]) 50 | ws.binaryType = 'arraybuffer' 51 | 52 | ws.onerror = function (e) { 53 | showMessage('WebSocket error', e) 54 | } 55 | 56 | ws.onopen = function () { 57 | showMessage('WebSocket connection established') 58 | // ws.send({ proto: 'webswitch', pid: 1, role: 'browser' }) 59 | } 60 | 61 | ws.onclose = function () { 62 | showMessage('WebSocket connection closed') 63 | ws = null 64 | } 65 | 66 | ws.onmessage = function (event) { 67 | try { 68 | if (event.data instanceof ArrayBuffer) { 69 | showMessage(JSON.parse(new TextDecoder().decode(event.data))) 70 | return 71 | } 72 | showMessage(JSON.parse(event)) 73 | } catch (err) { 74 | showMessage(err.message) 75 | console.error('onmessage', event, err.message) 76 | } 77 | } 78 | } 79 | 80 | statusButton.onclick = function () { 81 | ws.send(JSON.stringify('status')) 82 | } 83 | 84 | clearButton.onclick = function () { 85 | document.getElementById('jsonCode').innerHTML = '' 86 | } 87 | })() 88 | -------------------------------------------------------------------------------- /rustup-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # shellcheck shell=dash 3 | 4 | # This is just a little script that can be downloaded from the internet to 5 | # install rustup. It just does platform detection, downloads the installer 6 | # and runs it. 7 | 8 | # It runs on Unix shells like {a,ba,da,k,z}sh. It uses the common `local` 9 | # extension. Note: Most shells limit `local` to 1 var per line, contra bash. 10 | 11 | if [ "$KSH_VERSION" = 'Version JM 93t+ 2010-03-05' ]; then 12 | # The version of ksh93 that ships with many illumos systems does not 13 | # support the "local" extension. Print a message rather than fail in 14 | # subtle ways later on: 15 | echo 'rustup does not work with this ksh93 version; please try bash!' >&2 16 | exit 1 17 | fi 18 | 19 | 20 | set -u 21 | 22 | # If RUSTUP_UPDATE_ROOT is unset or empty, default it. 23 | RUSTUP_UPDATE_ROOT="${RUSTUP_UPDATE_ROOT:-https://static.rust-lang.org/rustup}" 24 | 25 | #XXX: If you change anything here, please make the same changes in setup_mode.rs 26 | usage() { 27 | cat 1>&2 < Choose a default host triple 44 | --default-toolchain Choose a default toolchain to install 45 | --default-toolchain none Do not install any toolchains 46 | --profile [minimal|default|complete] Choose a profile 47 | -c, --component ... Component name to also install 48 | -t, --target ... Target name to also install 49 | EOF 50 | } 51 | 52 | main() { 53 | downloader --check 54 | need_cmd uname 55 | need_cmd mktemp 56 | need_cmd chmod 57 | need_cmd mkdir 58 | need_cmd rm 59 | need_cmd rmdir 60 | 61 | get_architecture || return 1 62 | local _arch="$RETVAL" 63 | assert_nz "$_arch" "arch" 64 | 65 | local _ext="" 66 | case "$_arch" in 67 | *windows*) 68 | _ext=".exe" 69 | ;; 70 | esac 71 | 72 | local _url="${RUSTUP_UPDATE_ROOT}/dist/${_arch}/rustup-init${_ext}" 73 | 74 | local _dir 75 | _dir="$(ensure mktemp -d)" 76 | local _file="${_dir}/rustup-init${_ext}" 77 | 78 | local _ansi_escapes_are_valid=false 79 | if [ -t 2 ]; then 80 | if [ "${TERM+set}" = 'set' ]; then 81 | case "$TERM" in 82 | xterm*|rxvt*|urxvt*|linux*|vt*) 83 | _ansi_escapes_are_valid=true 84 | ;; 85 | esac 86 | fi 87 | fi 88 | 89 | # check if we have to use /dev/tty to prompt the user 90 | local need_tty=yes 91 | for arg in "$@"; do 92 | case "$arg" in 93 | --help) 94 | usage 95 | exit 0 96 | ;; 97 | *) 98 | OPTIND=1 99 | if [ "${arg%%--*}" = "" ]; then 100 | # Long option (other than --help); 101 | # don't attempt to interpret it. 102 | continue 103 | fi 104 | while getopts :hy sub_arg "$arg"; do 105 | case "$sub_arg" in 106 | h) 107 | usage 108 | exit 0 109 | ;; 110 | y) 111 | # user wants to skip the prompt -- 112 | # we don't need /dev/tty 113 | need_tty=no 114 | ;; 115 | *) 116 | ;; 117 | esac 118 | done 119 | ;; 120 | esac 121 | done 122 | 123 | if $_ansi_escapes_are_valid; then 124 | printf "\33[1minfo:\33[0m downloading installer\n" 1>&2 125 | else 126 | printf '%s\n' 'info: downloading installer' 1>&2 127 | fi 128 | 129 | ensure mkdir -p "$_dir" 130 | ensure downloader "$_url" "$_file" "$_arch" 131 | ensure chmod u+x "$_file" 132 | if [ ! -x "$_file" ]; then 133 | printf '%s\n' "Cannot execute $_file (likely because of mounting /tmp as noexec)." 1>&2 134 | printf '%s\n' "Please copy the file to a location where you can execute binaries and run ./rustup-init${_ext}." 1>&2 135 | exit 1 136 | fi 137 | 138 | if [ "$need_tty" = "yes" ] && [ ! -t 0 ]; then 139 | # The installer is going to want to ask for confirmation by 140 | # reading stdin. This script was piped into `sh` though and 141 | # doesn't have stdin to pass to its children. Instead we're going 142 | # to explicitly connect /dev/tty to the installer's stdin. 143 | if [ ! -t 1 ]; then 144 | err "Unable to run interactively. Run with -y to accept defaults, --help for additional options" 145 | fi 146 | 147 | ignore "$_file" "$@" < /dev/tty 148 | else 149 | ignore "$_file" "$@" 150 | fi 151 | 152 | local _retval=$? 153 | 154 | ignore rm "$_file" 155 | ignore rmdir "$_dir" 156 | 157 | return "$_retval" 158 | } 159 | 160 | check_proc() { 161 | # Check for /proc by looking for the /proc/self/exe link 162 | # This is only run on Linux 163 | if ! test -L /proc/self/exe ; then 164 | err "fatal: Unable to find /proc/self/exe. Is /proc mounted? Installation cannot proceed without /proc." 165 | fi 166 | } 167 | 168 | get_bitness() { 169 | need_cmd head 170 | # Architecture detection without dependencies beyond coreutils. 171 | # ELF files start out "\x7fELF", and the following byte is 172 | # 0x01 for 32-bit and 173 | # 0x02 for 64-bit. 174 | # The printf builtin on some shells like dash only supports octal 175 | # escape sequences, so we use those. 176 | local _current_exe_head 177 | _current_exe_head=$(head -c 5 /proc/self/exe ) 178 | if [ "$_current_exe_head" = "$(printf '\177ELF\001')" ]; then 179 | echo 32 180 | elif [ "$_current_exe_head" = "$(printf '\177ELF\002')" ]; then 181 | echo 64 182 | else 183 | err "unknown platform bitness" 184 | fi 185 | } 186 | 187 | is_host_amd64_elf() { 188 | need_cmd head 189 | need_cmd tail 190 | # ELF e_machine detection without dependencies beyond coreutils. 191 | # Two-byte field at offset 0x12 indicates the CPU, 192 | # but we're interested in it being 0x3E to indicate amd64, or not that. 193 | local _current_exe_machine 194 | _current_exe_machine=$(head -c 19 /proc/self/exe | tail -c 1) 195 | [ "$_current_exe_machine" = "$(printf '\076')" ] 196 | } 197 | 198 | get_endianness() { 199 | local cputype=$1 200 | local suffix_eb=$2 201 | local suffix_el=$3 202 | 203 | # detect endianness without od/hexdump, like get_bitness() does. 204 | need_cmd head 205 | need_cmd tail 206 | 207 | local _current_exe_endianness 208 | _current_exe_endianness="$(head -c 6 /proc/self/exe | tail -c 1)" 209 | if [ "$_current_exe_endianness" = "$(printf '\001')" ]; then 210 | echo "${cputype}${suffix_el}" 211 | elif [ "$_current_exe_endianness" = "$(printf '\002')" ]; then 212 | echo "${cputype}${suffix_eb}" 213 | else 214 | err "unknown platform endianness" 215 | fi 216 | } 217 | 218 | get_architecture() { 219 | local _ostype _cputype _bitness _arch _clibtype 220 | _ostype="$(uname -s)" 221 | _cputype="$(uname -m)" 222 | _clibtype="gnu" 223 | 224 | if [ "$_ostype" = Linux ]; then 225 | if [ "$(uname -o)" = Android ]; then 226 | _ostype=Android 227 | fi 228 | if ldd --version 2>&1 | grep -q 'musl'; then 229 | _clibtype="musl" 230 | fi 231 | fi 232 | 233 | if [ "$_ostype" = Darwin ] && [ "$_cputype" = i386 ]; then 234 | # Darwin `uname -m` lies 235 | if sysctl hw.optional.x86_64 | grep -q ': 1'; then 236 | _cputype=x86_64 237 | fi 238 | fi 239 | 240 | if [ "$_ostype" = SunOS ]; then 241 | # Both Solaris and illumos presently announce as "SunOS" in "uname -s" 242 | # so use "uname -o" to disambiguate. We use the full path to the 243 | # system uname in case the user has coreutils uname first in PATH, 244 | # which has historically sometimes printed the wrong value here. 245 | if [ "$(/usr/bin/uname -o)" = illumos ]; then 246 | _ostype=illumos 247 | fi 248 | 249 | # illumos systems have multi-arch userlands, and "uname -m" reports the 250 | # machine hardware name; e.g., "i86pc" on both 32- and 64-bit x86 251 | # systems. Check for the native (widest) instruction set on the 252 | # running kernel: 253 | if [ "$_cputype" = i86pc ]; then 254 | _cputype="$(isainfo -n)" 255 | fi 256 | fi 257 | 258 | case "$_ostype" in 259 | 260 | Android) 261 | _ostype=linux-android 262 | ;; 263 | 264 | Linux) 265 | check_proc 266 | _ostype=unknown-linux-$_clibtype 267 | _bitness=$(get_bitness) 268 | ;; 269 | 270 | FreeBSD) 271 | _ostype=unknown-freebsd 272 | ;; 273 | 274 | NetBSD) 275 | _ostype=unknown-netbsd 276 | ;; 277 | 278 | DragonFly) 279 | _ostype=unknown-dragonfly 280 | ;; 281 | 282 | Darwin) 283 | _ostype=apple-darwin 284 | ;; 285 | 286 | illumos) 287 | _ostype=unknown-illumos 288 | ;; 289 | 290 | MINGW* | MSYS* | CYGWIN* | Windows_NT) 291 | _ostype=pc-windows-gnu 292 | ;; 293 | 294 | *) 295 | err "unrecognized OS type: $_ostype" 296 | ;; 297 | 298 | esac 299 | 300 | case "$_cputype" in 301 | 302 | i386 | i486 | i686 | i786 | x86) 303 | _cputype=i686 304 | ;; 305 | 306 | xscale | arm) 307 | _cputype=arm 308 | if [ "$_ostype" = "linux-android" ]; then 309 | _ostype=linux-androideabi 310 | fi 311 | ;; 312 | 313 | armv6l) 314 | _cputype=arm 315 | if [ "$_ostype" = "linux-android" ]; then 316 | _ostype=linux-androideabi 317 | else 318 | _ostype="${_ostype}eabihf" 319 | fi 320 | ;; 321 | 322 | armv7l | armv8l) 323 | _cputype=armv7 324 | if [ "$_ostype" = "linux-android" ]; then 325 | _ostype=linux-androideabi 326 | else 327 | _ostype="${_ostype}eabihf" 328 | fi 329 | ;; 330 | 331 | aarch64 | arm64) 332 | _cputype=aarch64 333 | ;; 334 | 335 | x86_64 | x86-64 | x64 | amd64) 336 | _cputype=x86_64 337 | ;; 338 | 339 | mips) 340 | _cputype=$(get_endianness mips '' el) 341 | ;; 342 | 343 | mips64) 344 | if [ "$_bitness" -eq 64 ]; then 345 | # only n64 ABI is supported for now 346 | _ostype="${_ostype}abi64" 347 | _cputype=$(get_endianness mips64 '' el) 348 | fi 349 | ;; 350 | 351 | ppc) 352 | _cputype=powerpc 353 | ;; 354 | 355 | ppc64) 356 | _cputype=powerpc64 357 | ;; 358 | 359 | ppc64le) 360 | _cputype=powerpc64le 361 | ;; 362 | 363 | s390x) 364 | _cputype=s390x 365 | ;; 366 | riscv64) 367 | _cputype=riscv64gc 368 | ;; 369 | *) 370 | err "unknown CPU type: $_cputype" 371 | 372 | esac 373 | 374 | # Detect 64-bit linux with 32-bit userland 375 | if [ "${_ostype}" = unknown-linux-gnu ] && [ "${_bitness}" -eq 32 ]; then 376 | case $_cputype in 377 | x86_64) 378 | if [ -n "${RUSTUP_CPUTYPE:-}" ]; then 379 | _cputype="$RUSTUP_CPUTYPE" 380 | else { 381 | # 32-bit executable for amd64 = x32 382 | if is_host_amd64_elf; then { 383 | echo "This host is running an x32 userland; as it stands, x32 support is poor," 1>&2 384 | echo "and there isn't a native toolchain -- you will have to install" 1>&2 385 | echo "multiarch compatibility with i686 and/or amd64, then select one" 1>&2 386 | echo "by re-running this script with the RUSTUP_CPUTYPE environment variable" 1>&2 387 | echo "set to i686 or x86_64, respectively." 1>&2 388 | echo 1>&2 389 | echo "You will be able to add an x32 target after installation by running" 1>&2 390 | echo " rustup target add x86_64-unknown-linux-gnux32" 1>&2 391 | exit 1 392 | }; else 393 | _cputype=i686 394 | fi 395 | }; fi 396 | ;; 397 | mips64) 398 | _cputype=$(get_endianness mips '' el) 399 | ;; 400 | powerpc64) 401 | _cputype=powerpc 402 | ;; 403 | aarch64) 404 | _cputype=armv7 405 | if [ "$_ostype" = "linux-android" ]; then 406 | _ostype=linux-androideabi 407 | else 408 | _ostype="${_ostype}eabihf" 409 | fi 410 | ;; 411 | riscv64gc) 412 | err "riscv64 with 32-bit userland unsupported" 413 | ;; 414 | esac 415 | fi 416 | 417 | # Detect armv7 but without the CPU features Rust needs in that build, 418 | # and fall back to arm. 419 | # See https://github.com/rust-lang/rustup.rs/issues/587. 420 | if [ "$_ostype" = "unknown-linux-gnueabihf" ] && [ "$_cputype" = armv7 ]; then 421 | if ensure grep '^Features' /proc/cpuinfo | grep -q -v neon; then 422 | # At least one processor does not have NEON. 423 | _cputype=arm 424 | fi 425 | fi 426 | 427 | _arch="${_cputype}-${_ostype}" 428 | 429 | RETVAL="$_arch" 430 | } 431 | 432 | say() { 433 | printf 'rustup: %s\n' "$1" 434 | } 435 | 436 | err() { 437 | say "$1" >&2 438 | exit 1 439 | } 440 | 441 | need_cmd() { 442 | if ! check_cmd "$1"; then 443 | err "need '$1' (command not found)" 444 | fi 445 | } 446 | 447 | check_cmd() { 448 | command -v "$1" > /dev/null 2>&1 449 | } 450 | 451 | assert_nz() { 452 | if [ -z "$1" ]; then err "assert_nz $2"; fi 453 | } 454 | 455 | # Run a command that should never fail. If the command fails execution 456 | # will immediately terminate with an error showing the failing 457 | # command. 458 | ensure() { 459 | if ! "$@"; then err "command failed: $*"; fi 460 | } 461 | 462 | # This is just for indicating that commands' results are being 463 | # intentionally ignored. Usually, because it's being executed 464 | # as part of error handling. 465 | ignore() { 466 | "$@" 467 | } 468 | 469 | # This wraps curl or wget. Try curl first, if not installed, 470 | # use wget instead. 471 | downloader() { 472 | local _dld 473 | local _ciphersuites 474 | local _err 475 | local _status 476 | local _retry 477 | if check_cmd curl; then 478 | _dld=curl 479 | elif check_cmd wget; then 480 | _dld=wget 481 | else 482 | _dld='curl or wget' # to be used in error message of need_cmd 483 | fi 484 | 485 | if [ "$1" = --check ]; then 486 | need_cmd "$_dld" 487 | elif [ "$_dld" = curl ]; then 488 | check_curl_for_retry_support 489 | _retry="$RETVAL" 490 | get_ciphersuites_for_curl 491 | _ciphersuites="$RETVAL" 492 | if [ -n "$_ciphersuites" ]; then 493 | _err=$(curl $_retry --proto '=https' --tlsv1.2 --ciphers "$_ciphersuites" --silent --show-error --fail --location "$1" --output "$2" 2>&1) 494 | _status=$? 495 | else 496 | echo "Warning: Not enforcing strong cipher suites for TLS, this is potentially less secure" 497 | if ! check_help_for "$3" curl --proto --tlsv1.2; then 498 | echo "Warning: Not enforcing TLS v1.2, this is potentially less secure" 499 | _err=$(curl $_retry --silent --show-error --fail --location "$1" --output "$2" 2>&1) 500 | _status=$? 501 | else 502 | _err=$(curl $_retry --proto '=https' --tlsv1.2 --silent --show-error --fail --location "$1" --output "$2" 2>&1) 503 | _status=$? 504 | fi 505 | fi 506 | if [ -n "$_err" ]; then 507 | echo "$_err" >&2 508 | if echo "$_err" | grep -q 404$; then 509 | err "installer for platform '$3' not found, this may be unsupported" 510 | fi 511 | fi 512 | return $_status 513 | elif [ "$_dld" = wget ]; then 514 | if [ "$(wget -V 2>&1|head -2|tail -1|cut -f1 -d" ")" = "BusyBox" ]; then 515 | echo "Warning: using the BusyBox version of wget. Not enforcing strong cipher suites for TLS or TLS v1.2, this is potentially less secure" 516 | _err=$(wget "$1" -O "$2" 2>&1) 517 | _status=$? 518 | else 519 | get_ciphersuites_for_wget 520 | _ciphersuites="$RETVAL" 521 | if [ -n "$_ciphersuites" ]; then 522 | _err=$(wget --https-only --secure-protocol=TLSv1_2 --ciphers "$_ciphersuites" "$1" -O "$2" 2>&1) 523 | _status=$? 524 | else 525 | echo "Warning: Not enforcing strong cipher suites for TLS, this is potentially less secure" 526 | if ! check_help_for "$3" wget --https-only --secure-protocol; then 527 | echo "Warning: Not enforcing TLS v1.2, this is potentially less secure" 528 | _err=$(wget "$1" -O "$2" 2>&1) 529 | _status=$? 530 | else 531 | _err=$(wget --https-only --secure-protocol=TLSv1_2 "$1" -O "$2" 2>&1) 532 | _status=$? 533 | fi 534 | fi 535 | fi 536 | if [ -n "$_err" ]; then 537 | echo "$_err" >&2 538 | if echo "$_err" | grep -q ' 404 Not Found$'; then 539 | err "installer for platform '$3' not found, this may be unsupported" 540 | fi 541 | fi 542 | return $_status 543 | else 544 | err "Unknown downloader" # should not reach here 545 | fi 546 | } 547 | 548 | check_help_for() { 549 | local _arch 550 | local _cmd 551 | local _arg 552 | _arch="$1" 553 | shift 554 | _cmd="$1" 555 | shift 556 | 557 | local _category 558 | if "$_cmd" --help | grep -q 'For all options use the manual or "--help all".'; then 559 | _category="all" 560 | else 561 | _category="" 562 | fi 563 | 564 | case "$_arch" in 565 | 566 | *darwin*) 567 | if check_cmd sw_vers; then 568 | case $(sw_vers -productVersion) in 569 | 10.*) 570 | # If we're running on macOS, older than 10.13, then we always 571 | # fail to find these options to force fallback 572 | if [ "$(sw_vers -productVersion | cut -d. -f2)" -lt 13 ]; then 573 | # Older than 10.13 574 | echo "Warning: Detected macOS platform older than 10.13" 575 | return 1 576 | fi 577 | ;; 578 | 11.*) 579 | # We assume Big Sur will be OK for now 580 | ;; 581 | *) 582 | # Unknown product version, warn and continue 583 | echo "Warning: Detected unknown macOS major version: $(sw_vers -productVersion)" 584 | echo "Warning TLS capabilities detection may fail" 585 | ;; 586 | esac 587 | fi 588 | ;; 589 | 590 | esac 591 | 592 | for _arg in "$@"; do 593 | if ! "$_cmd" --help $_category | grep -q -- "$_arg"; then 594 | return 1 595 | fi 596 | done 597 | 598 | true # not strictly needed 599 | } 600 | 601 | # Check if curl supports the --retry flag, then pass it to the curl invocation. 602 | check_curl_for_retry_support() { 603 | local _retry_supported="" 604 | # "unspecified" is for arch, allows for possibility old OS using macports, homebrew, etc. 605 | if check_help_for "notspecified" "curl" "--retry"; then 606 | _retry_supported="--retry 3" 607 | fi 608 | 609 | RETVAL="$_retry_supported" 610 | 611 | } 612 | 613 | # Return cipher suite string specified by user, otherwise return strong TLS 1.2-1.3 cipher suites 614 | # if support by local tools is detected. Detection currently supports these curl backends: 615 | # GnuTLS and OpenSSL (possibly also LibreSSL and BoringSSL). Return value can be empty. 616 | get_ciphersuites_for_curl() { 617 | if [ -n "${RUSTUP_TLS_CIPHERSUITES-}" ]; then 618 | # user specified custom cipher suites, assume they know what they're doing 619 | RETVAL="$RUSTUP_TLS_CIPHERSUITES" 620 | return 621 | fi 622 | 623 | local _openssl_syntax="no" 624 | local _gnutls_syntax="no" 625 | local _backend_supported="yes" 626 | if curl -V | grep -q ' OpenSSL/'; then 627 | _openssl_syntax="yes" 628 | elif curl -V | grep -iq ' LibreSSL/'; then 629 | _openssl_syntax="yes" 630 | elif curl -V | grep -iq ' BoringSSL/'; then 631 | _openssl_syntax="yes" 632 | elif curl -V | grep -iq ' GnuTLS/'; then 633 | _gnutls_syntax="yes" 634 | else 635 | _backend_supported="no" 636 | fi 637 | 638 | local _args_supported="no" 639 | if [ "$_backend_supported" = "yes" ]; then 640 | # "unspecified" is for arch, allows for possibility old OS using macports, homebrew, etc. 641 | if check_help_for "notspecified" "curl" "--tlsv1.2" "--ciphers" "--proto"; then 642 | _args_supported="yes" 643 | fi 644 | fi 645 | 646 | local _cs="" 647 | if [ "$_args_supported" = "yes" ]; then 648 | if [ "$_openssl_syntax" = "yes" ]; then 649 | _cs=$(get_strong_ciphersuites_for "openssl") 650 | elif [ "$_gnutls_syntax" = "yes" ]; then 651 | _cs=$(get_strong_ciphersuites_for "gnutls") 652 | fi 653 | fi 654 | 655 | RETVAL="$_cs" 656 | } 657 | 658 | # Return cipher suite string specified by user, otherwise return strong TLS 1.2-1.3 cipher suites 659 | # if support by local tools is detected. Detection currently supports these wget backends: 660 | # GnuTLS and OpenSSL (possibly also LibreSSL and BoringSSL). Return value can be empty. 661 | get_ciphersuites_for_wget() { 662 | if [ -n "${RUSTUP_TLS_CIPHERSUITES-}" ]; then 663 | # user specified custom cipher suites, assume they know what they're doing 664 | RETVAL="$RUSTUP_TLS_CIPHERSUITES" 665 | return 666 | fi 667 | 668 | local _cs="" 669 | if wget -V | grep -q '\-DHAVE_LIBSSL'; then 670 | # "unspecified" is for arch, allows for possibility old OS using macports, homebrew, etc. 671 | if check_help_for "notspecified" "wget" "TLSv1_2" "--ciphers" "--https-only" "--secure-protocol"; then 672 | _cs=$(get_strong_ciphersuites_for "openssl") 673 | fi 674 | elif wget -V | grep -q '\-DHAVE_LIBGNUTLS'; then 675 | # "unspecified" is for arch, allows for possibility old OS using macports, homebrew, etc. 676 | if check_help_for "notspecified" "wget" "TLSv1_2" "--ciphers" "--https-only" "--secure-protocol"; then 677 | _cs=$(get_strong_ciphersuites_for "gnutls") 678 | fi 679 | fi 680 | 681 | RETVAL="$_cs" 682 | } 683 | 684 | # Return strong TLS 1.2-1.3 cipher suites in OpenSSL or GnuTLS syntax. TLS 1.2 685 | # excludes non-ECDHE and non-AEAD cipher suites. DHE is excluded due to bad 686 | # DH params often found on servers (see RFC 7919). Sequence matches or is 687 | # similar to Firefox 68 ESR with weak cipher suites disabled via about:config. 688 | # $1 must be openssl or gnutls. 689 | get_strong_ciphersuites_for() { 690 | if [ "$1" = "openssl" ]; then 691 | # OpenSSL is forgiving of unknown values, no problems with TLS 1.3 values on versions that don't support it yet. 692 | echo "TLS_AES_128_GCM_SHA256:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_256_GCM_SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384" 693 | elif [ "$1" = "gnutls" ]; then 694 | # GnuTLS isn't forgiving of unknown values, so this may require a GnuTLS version that supports TLS 1.3 even if wget doesn't. 695 | # Begin with SECURE128 (and higher) then remove/add to build cipher suites. Produces same 9 cipher suites as OpenSSL but in slightly different order. 696 | echo "SECURE128:-VERS-SSL3.0:-VERS-TLS1.0:-VERS-TLS1.1:-VERS-DTLS-ALL:-CIPHER-ALL:-MAC-ALL:-KX-ALL:+AEAD:+ECDHE-ECDSA:+ECDHE-RSA:+AES-128-GCM:+CHACHA20-POLY1305:+AES-256-GCM" 697 | fi 698 | } 699 | 700 | main "$@" || exit 1 701 | -------------------------------------------------------------------------------- /serverless.yml: -------------------------------------------------------------------------------- 1 | name: aegis-lambda 2 | component: aws-lambda 3 | 4 | provider: 5 | name: aws 6 | runtime: nodejs14.x 7 | 8 | inputs: 9 | src: dist 10 | handler: dist/bootstrap.handleServerless 11 | -------------------------------------------------------------------------------- /src/bootstrap.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | require('dotenv').config() 4 | require('regenerator-runtime') 5 | const { loadMiddleware } = require('./middleware') 6 | const importFresh = require('import-fresh') 7 | const express = require('express') 8 | const server = require('./server') 9 | const cors = require('cors') 10 | const app = express() 11 | 12 | function clearRoutes () { 13 | app._router.stack = app._router.stack.filter( 14 | k => !(k && k.route && k.route.path) 15 | ) 16 | } 17 | 18 | async function load (aegis = null) { 19 | if (aegis) { 20 | await aegis.dispose() 21 | clearRoutes() 22 | } 23 | 24 | const remote = importFresh('../dist/remoteEntry.js') 25 | return remote.get('./hostContainer').then(async factory => { 26 | const aegis = factory() 27 | const remotes = (await remote.get('./remoteEntries'))() 28 | const handle = await aegis.init(remotes) 29 | 30 | app.use(cors()) 31 | app.use(express.json()) 32 | app.use(express.static('public')) 33 | loadMiddleware(app) 34 | 35 | app.use('/reload', async (req, res) => { 36 | await load(aegis) 37 | res.send('

reload complete

back') 38 | }) 39 | 40 | app.all('*', (req, res) => handle(req.path, req.method, req, res)) 41 | }) 42 | } 43 | 44 | load().then(() => server.start(app)) 45 | -------------------------------------------------------------------------------- /src/host-container.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { aegis } = __non_webpack_require__('@module-federation/aegis') 4 | 5 | async function init (remotes) { 6 | return aegis.init(remotes) 7 | } 8 | 9 | async function dispose () { 10 | await aegis.dispose() 11 | 12 | Object.keys(__non_webpack_require__.cache).forEach(k => { 13 | delete __non_webpack_require__.cache[k] 14 | }) 15 | } 16 | 17 | module.exports = { init, dispose } 18 | -------------------------------------------------------------------------------- /src/middleware.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const helmet = require('helmet') 4 | const expressAttack = require('express-attack') 5 | const http = require('http') 6 | 7 | function throttleStore () { 8 | const store = {} 9 | const get = async function (key) { 10 | return store[key] 11 | } 12 | const set = async function (key, timestamp, period) { 13 | store[key] = timestamp 14 | } 15 | 16 | return { 17 | get, 18 | set 19 | } 20 | } 21 | 22 | /** 23 | * 24 | * @param {http.ClientRequest} req 25 | * @returns 26 | */ 27 | function throttleByHost (req) { 28 | return { 29 | key: req.host, 30 | burst: 2, 31 | emissionInterval: 100 32 | } 33 | } 34 | 35 | exports.loadMiddleware = function (app) { 36 | app.use(helmet()) 37 | app.disable('x-powered-by') 38 | 39 | app.use( 40 | expressAttack({ 41 | throttles: [throttleByHost], 42 | store: throttleStore() 43 | }) 44 | ) 45 | } 46 | -------------------------------------------------------------------------------- /src/server-less.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { aegis, adapters } = require('@module-federation/aegis') 4 | const remotes = require('../dist/remoteEntry') 5 | const { ServerlessAdapter } = adapters 6 | 7 | /** @type {Promise} */ 8 | const remoteEntries = remotes.get('./remoteEntries').then(factory => factory()) 9 | 10 | const adapter = remoteEntries 11 | .then(remotes => aegis.init(remotes)) 12 | .then(handle => ServerlessAdapter()(handle)) 13 | 14 | /** 15 | * Serverless entry point. Configure the serverless platform 16 | * to call this function. 17 | */ 18 | exports.handleServerless = async function (...args) { 19 | return adapter.then(handle => handle(...args)) 20 | } 21 | -------------------------------------------------------------------------------- /src/server.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Handle options and start the service. 3 | * Options: 4 | * 5 | * - run as serverless function or as web server 6 | * - http or https 7 | * - authorized routes enabled or disabled (json web token) 8 | * - clustering enabled or disabled 9 | * - hot reload as rolling restart or module cache refresh 10 | */ 11 | 12 | 'use strict' 13 | 14 | // aegis library 15 | const { 16 | AuthorizationService, 17 | CertificateService, 18 | ClusterService, 19 | ServiceMeshPlugin 20 | } = require('@module-federation/aegis').services 21 | 22 | const fs = require('node:fs/promises') 23 | const tls = require('tls') 24 | const http = require('http') 25 | const https = require('https') 26 | 27 | const port = process.argv[2] ? process.argv[2] : process.env.PORT || 80 28 | const sslPort = process.argv[2] ? process.argv[2] : process.env.SSL_PORT || 443 29 | const keyFile = 'cert/privatekey.pem' 30 | const certFile = 'cert/certificate.pem' 31 | const forceTimeout = 3000 // time to wait for conn to drop before closing server 32 | const certLoadPath = process.env.CERTLOAD_PATH || '/aegis/load-cert' 33 | const clusterEnabled = /true/i.test(process.env.CLUSTER_ENABLED) 34 | const checkIpHostname = process.env.CHECKIPHOST || 'checkip.amazonaws.com' 35 | 36 | const domain = 37 | require('../public/aegis.config.json').general.fqdn || process.env.DOMAIN 38 | 39 | const sslEnabled = // required in production 40 | /prod/i.test(process.env.NODE_ENV) || /true/i.test(process.env.SSL_ENABLED) 41 | 42 | /** 43 | * @param {import('express').Application} app 44 | */ 45 | exports.start = async function (app) { 46 | /** Authorize routes with JSON Web Tokens*/ 47 | AuthorizationService.protectRoutes(app, '/') 48 | 49 | const greeting = (proto, host, port) => 50 | `\n 🌎 ÆGIS listening on ${proto}://${host}:${port} \n` 51 | 52 | /** 53 | * Ping a public server for our public address. 54 | */ 55 | function checkPublicIpAddress () { 56 | const bytes = [] 57 | if (!/local/i.test(process.env.NODE_ENV)) { 58 | try { 59 | http.get( 60 | { 61 | hostname: checkIpHostname, 62 | method: 'get' 63 | }, 64 | response => { 65 | response.on('data', chunk => bytes.push(chunk)) 66 | response.on('end', function () { 67 | const ipAddr = bytes.join('').trim() 68 | console.log(greeting('http', ipAddr, port)) 69 | }) 70 | } 71 | ) 72 | return 73 | } catch (e) { 74 | console.error('checkip', e.message) 75 | } 76 | } else { 77 | console.log(greeting('http', 'localhost', port)) 78 | } 79 | } 80 | 81 | /** 82 | * Shutdown gracefully. Return 503 during shutdown to prevent new connections 83 | * @param {*} server 84 | * @param {*} [options] 85 | * @returns 86 | */ 87 | function shutdown (server) { 88 | let shuttingDown = false 89 | const devTimeout = 3000 90 | 91 | // Graceful shutdown taken from: http://blog.argteam.com/ 92 | process.on('SIGTERM', () => { 93 | // shorter timeout in dev 94 | const timeout = !/^prod.*/i.test(process.env.NODE_ENV) 95 | ? devTimeout 96 | : forceTimeout 97 | 98 | if (shuttingDown) return 99 | shuttingDown = true 100 | console.info('Received kill signal (SIGTERM), shutting down') 101 | 102 | setTimeout(function () { 103 | console.error( 104 | 'Taking too long to close connections, forcefully shutting down' 105 | ) 106 | process.exit(1) 107 | }, timeout).unref() 108 | 109 | server.close(function () { 110 | console.info('Closed out remaining connections.') 111 | process.exit() 112 | }) 113 | }) 114 | 115 | function middleware (req, res, next) { 116 | if (!shuttingDown) return next() 117 | res.set('Connection', 'close') 118 | res.status(503).send('Server is in the process of restarting.') 119 | } 120 | 121 | return middleware 122 | } 123 | 124 | /** 125 | * Programmatically provision CA cert using RFC 126 | * https://datatracker.ietf.org/doc/html/rfc8555 127 | * 128 | * {@link CertificateService} kicks off automated 129 | * id challenge test, conducted by the issuing CA. 130 | * If test passes or if cert already exists, hand 131 | * back the cert and private key. 132 | * 133 | * @param {string} domain domain for which cert will be created 134 | * @param {boolean} [renewal] false by default, set true to renew 135 | * @returns {{ key:string, cert:string }} the CA cert and private 136 | * key used to sign it 137 | */ 138 | async function requestTrustedCert (domain, renewal = false) { 139 | try { 140 | const [key, cert] = await Promise.all([ 141 | fs.readFile(keyFile, 'utf8'), 142 | fs.readFile(certFile, 'utf-8') 143 | ]) 144 | return { key, cert } 145 | } catch {} 146 | 147 | // call service to acquire or renew x509 certificate from PKI 148 | const { key, cert } = await CertificateService.provisionCert(domain) 149 | 150 | await Promise.all([ 151 | fs.writeFile(keyFile, key, 'utf-8'), 152 | fs.writeFile(certFile, cert, 'utf-8') 153 | ]) 154 | return { key, cert } 155 | } 156 | 157 | /** 158 | * redirect (from 80) to secure port (443)? 159 | */ 160 | let redirect = true 161 | 162 | /** 163 | * Using {@link tls.createSecureContext} to create/renew 164 | * certs without restarting the server 165 | * 166 | * @param {boolean} renewal 167 | * @returns 168 | */ 169 | async function createSecureContext (renewal = false) { 170 | // turn off redirect 171 | redirect = false 172 | // get cert 173 | const cert = await requestTrustedCert(domain, renewal) 174 | // turn redirect back on 175 | redirect = true 176 | // return cert 177 | return tls.createSecureContext(cert) 178 | } 179 | 180 | /** 181 | * Listen on unsecured port (80). Redirect 182 | * to secure port (443) if SSL is enabled. 183 | * Don't redirect while cert challenge is 184 | * in progress. Challenge requires port 80 185 | */ 186 | async function startHttpServer () { 187 | const httpServer = http.createServer(app) 188 | app.use(shutdown(httpServer)) 189 | 190 | if (sslEnabled) { 191 | /** 192 | * if {@link redirect} is true, redirect 193 | * all requests for http to https port 194 | */ 195 | app.all(function (req, res) { 196 | if (redirect && req.protocol === 'http:') { 197 | const redirectUrl = `https://${domain}:${sslPort}${req.url}` 198 | res.redirect(301, redirectUrl) 199 | } 200 | }) 201 | } else { 202 | // https disabled, so attach to http 203 | /** @type {ServiceMeshAdapter} */ 204 | ServiceMeshPlugin.attachServer(httpServer) 205 | } 206 | httpServer.listen(port, checkPublicIpAddress) 207 | } 208 | 209 | /** the current cert/key pair */ 210 | let secureCtx 211 | 212 | /** 213 | * Start the web server. Programmatically 214 | * provision CA cert if SSL (TLS) is enabled 215 | * and no cert is found in /cert directory. 216 | */ 217 | async function startWebServer () { 218 | startHttpServer() 219 | 220 | if (sslEnabled) { 221 | // provision or renew cert and key 222 | secureCtx = await createSecureContext() 223 | /** 224 | * provide cert via {@link secureCtx} - provision & 225 | * renew certs without having to restart the server 226 | */ 227 | const httpsServer = https.createServer( 228 | { 229 | SNICallback: (_, cb) => cb(null, secureCtx) 230 | }, 231 | app 232 | ) 233 | // update secureCtx to refresh certificate 234 | app.use( 235 | certLoadPath, 236 | async () => (secureCtx = await createSecureContext(true)) 237 | ) 238 | // graceful shutdown prevents new clients from connecting 239 | app.use(shutdown(httpsServer)) 240 | // service mesh uses same port 241 | ServiceMeshPlugin.attachServer(httpsServer, secureCtx) 242 | 243 | // listen on ssl port 244 | httpsServer.listen(sslPort, () => 245 | console.info(greeting('https', domain, sslPort)) 246 | ) 247 | } 248 | } 249 | 250 | /** 251 | * Handle rolling restart request if running in cluster mode 252 | */ 253 | function rollingRestart () { 254 | if (clusterEnabled) { 255 | // Manual reset if left in wrong state 256 | app.use(`${hotReloadPath}-reset`, function (_req, res) { 257 | process.send({ cmd: 'reload-reset' }) 258 | res.send('reload status reset...try again') 259 | }) 260 | app.use(hotReloadPath, function (_req, res) { 261 | res.send('

starting cluster reload

') 262 | process.send({ cmd: 'reload' }) 263 | }) 264 | } 265 | } 266 | 267 | /** 268 | * start aegis and the webserver 269 | * 270 | * this function isn't called if running in serverless mode 271 | */ 272 | async function startService () { 273 | try { 274 | rollingRestart() 275 | startWebServer() 276 | } catch (e) { 277 | console.error(startService.name, e) 278 | } 279 | } 280 | 281 | /** 282 | * Start a single instance or a cluster 283 | */ 284 | if (clusterEnabled) { 285 | // Fork child processes (one per core), 286 | // which share socket descriptor (round-robin) 287 | ClusterService.startCluster(startService) 288 | } else { 289 | startService() 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /start.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # downgrade TLS version - problem with websockets lib 4 | # export NODE_OPTIONS=--openssl-legacy-provider 5 | # Use sudo since we need to bind to ports 80, 443, run as daemon and log to webroot 6 | sudo nohup ${NVM_BIN}/node --title aegis $HOME/aegis-host/src/bootstrap.js >$HOME/aegis-host/public/aegis.log 2>&1 & 7 | # display result of command 8 | echo "checking status..." 9 | sleep 2 10 | $HOME/aegis-host/status.sh 11 | -------------------------------------------------------------------------------- /status.sh: -------------------------------------------------------------------------------- 1 | # lookup the domain's ip and compare to external ip for this host 2 | # list the aegis host process/es and listener sockets; 3 | # otherwise, display a message that the server is down. 4 | 5 | GREEN='\033[0;32m' 6 | RED='\033[0;31m' 7 | NC='\033[0m' 8 | 9 | DOMAIN1=aegis.module-federation.org 10 | DOMAIN2=aegis2.module-federation.org 11 | 12 | # Get public IP of this host 13 | IPADDR_PUBLIC=$(curl -s checkip.amazonaws.com) 14 | # Get IP of domain1 15 | IPADDR_DOMAIN1=$(nslookup -recurse $DOMAIN1 | grep Address | grep -v "#" | awk '{print $2}') 16 | # Get IP of domain2 17 | IPADDR_DOMAIN2=$(nslookup -recurse $DOMAIN2 | grep Address | grep -v "#" | awk '{print $2}') 18 | 19 | # print the public IP and fully qualified domain name of this host 20 | 21 | echo "public address $IPADDR_PUBLIC" 22 | 23 | if [ "$IPADDR_PUBLIC" == "$IPADDR_DOMAIN1" ]; then 24 | echo -e "domain${GREEN} $DOMAIN1 $NC" 25 | fi 26 | 27 | if [ "$IPADDR_PUBLIC" == "$IPADDR_DOMAIN2" ]; then 28 | echo -e "domain${GREEN} $DOMAIN2 $NC" 29 | fi 30 | 31 | # print current running process 32 | sudo lsof -P -i | grep LISTEN | grep aegis 33 | 34 | # get process ID of aegis 35 | PID=$(sudo lsof -P -i | grep LISTEN | grep aegis | awk '{print $2}') 36 | 37 | if [[ ${PID} ]]; then 38 | echo -e "${GREEN}server is up $NC" 39 | else 40 | echo -e "${RED}server is down $NC" 41 | fi 42 | -------------------------------------------------------------------------------- /stop.sh: -------------------------------------------------------------------------------- 1 | # Get PID of Aegis process 2 | PID=$(sudo lsof -P -i | grep aegis | grep LISTEN | awk '{ print $2 }') 3 | # display command to execute 4 | echo "sudo kill $PID" 5 | # kill process gracefully (sigterm) 6 | sudo kill $PID 7 | # display status 8 | ~/aegis-host/status.sh 9 | -------------------------------------------------------------------------------- /target/npmlist.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0.0", 3 | "name": "micro-lib", 4 | "dependencies": { 5 | "@module-federation/aegis": { 6 | "version": "0.0.1-beta.12" 7 | }, 8 | "@octokit/core": { 9 | "version": "3.4.0" 10 | }, 11 | "@octokit/rest": { 12 | "version": "18.5.3" 13 | }, 14 | "dotenv": { 15 | "version": "8.6.0" 16 | }, 17 | "express-graceful-shutdown": { 18 | "version": "1.1.3" 19 | }, 20 | "express-jwt": { 21 | "version": "6.0.0" 22 | }, 23 | "express": { 24 | "version": "4.17.1" 25 | }, 26 | "greenlock": { 27 | "version": "4.0.4" 28 | }, 29 | "import-fresh": { 30 | "version": "3.3.0" 31 | }, 32 | "jwks-rsa": { 33 | "version": "2.0.3" 34 | }, 35 | "mongodb-client-encryption": { 36 | "version": "1.2.3" 37 | }, 38 | "mongodb": { 39 | "version": "3.6.6" 40 | }, 41 | "nanoid": { 42 | "version": "3.1.20" 43 | }, 44 | "regenerator-runtime": { 45 | "version": "0.13.7" 46 | }, 47 | "serverless-aws-static-file-handler": { 48 | "version": "2.0.8" 49 | }, 50 | "whois": { 51 | "version": "2.13.5" 52 | } 53 | } 54 | } -------------------------------------------------------------------------------- /webpack.client.config.js: -------------------------------------------------------------------------------- 1 | require('dotenv').config() 2 | const path = require('path') 3 | const { ModuleFederationPlugin } = require('webpack').container 4 | const nodeExternals = require('webpack-node-externals') 5 | const NodePolyfillPlugin = require('node-polyfill-webpack-plugin') 6 | const fetchRemotes = require('./webpack/fetch-remotes') 7 | const remoteEntries = require('./webpack/remote-entries') 8 | const port = process.env.PORT || 80 9 | const sslPort = process.env.SSL_PORT || 443 10 | const sslEnabled = /true/i.test(process.env.SSL_ENABLED) 11 | const publicPort = sslEnabled ? sslPort : port 12 | const chalk = require('chalk') 13 | 14 | const server = env => { 15 | console.log(env) 16 | if (env.serverless) { 17 | remoteEntries.forEach(e => (e.path = 'webpack')) 18 | console.log(chalk.yellow('serverless build')) 19 | } 20 | return new Promise(resolve => { 21 | fetchRemotes(remoteEntries).then(remotes => { 22 | console.info(remotes) 23 | resolve({ 24 | externals: [nodeExternals()], 25 | target: 'async-node', 26 | mode: 'development', 27 | devtool: 'source-map', 28 | entry: ['@babel/polyfill', path.resolve(__dirname, 'src/server.js')], 29 | output: { 30 | publicPath: `http://localhost:${publicPort}`, 31 | path: path.resolve(__dirname, 'dist'), 32 | libraryTarget: 'commonjs' 33 | }, 34 | resolve: { 35 | extensions: ['.js'] 36 | }, 37 | module: { 38 | rules: [ 39 | { 40 | test: /\.js?$/, 41 | exclude: /node_modules/, 42 | use: { 43 | loader: 'babel-loader', 44 | options: { 45 | presets: ['@babel/preset-env'] 46 | } 47 | } 48 | } 49 | ] 50 | }, 51 | plugins: [ 52 | new ModuleFederationPlugin({ 53 | name: 'Aegis', 54 | filename: 'remoteEntry.js', 55 | library: { 56 | name: 'Aegis', 57 | type: 'commonjs-module' 58 | }, 59 | remoteType: 'commonjs-module', 60 | remotes, 61 | exposes: { 62 | './server': './src/server', 63 | './domain': '@module-federation/aegis/lib/domain', 64 | './remoteEntries': './webpack/remote-entries' 65 | } 66 | }) 67 | ] 68 | }) 69 | }) 70 | }) 71 | } 72 | 73 | const client = env => { 74 | console.log(env) 75 | if (env.serverless) { 76 | remoteEntries.forEach(e => (e.path = 'webpack')) 77 | console.log(chalk.yellow('serverless build')) 78 | } 79 | return new Promise(resolve => { 80 | fetchRemotes(remoteEntries).then(remotes => { 81 | console.info(remotes) 82 | resolve({ 83 | target: 'web', 84 | mode: 'development', 85 | devtool: 'source-map', 86 | entry: [path.resolve(__dirname, 'src/server.js')], 87 | output: { 88 | publicPath: `http://localhost:${publicPort}`, 89 | path: path.resolve(__dirname, 'dist'), 90 | libraryTarget: 'commonjs' 91 | }, 92 | resolve: { 93 | extensions: ['.js'] 94 | }, 95 | module: { 96 | rules: [ 97 | { 98 | test: /\.js?$/, 99 | exclude: /node_modules/, 100 | use: { 101 | loader: 'babel-loader', 102 | options: { 103 | presets: ['@babel/preset-env'] 104 | } 105 | } 106 | } 107 | ] 108 | }, 109 | plugins: [ 110 | new NodePolyfillPlugin(), 111 | new ModuleFederationPlugin({ 112 | name: 'Aegis', 113 | filename: 'remoteEntry.js', 114 | library: { 115 | name: 'Aegis', 116 | type: 'commonjs-module' 117 | }, 118 | remoteType: 'commonjs-module', 119 | remotes, 120 | exposes: { 121 | './server': './src/server', 122 | './domain': '@module-federation/aegis/lib/domain', 123 | './remoteEntries': './webpack/remote-entries' 124 | } 125 | }) 126 | ] 127 | }) 128 | }) 129 | }) 130 | } 131 | 132 | module.exports = [server] 133 | -------------------------------------------------------------------------------- /webpack.config.js: -------------------------------------------------------------------------------- 1 | require('dotenv').config() 2 | const path = require('path') 3 | const chalk = require('chalk') 4 | const { ModuleFederationPlugin } = require('webpack').container 5 | const nodeExternals = require('webpack-node-externals') 6 | const fetchRemotes = require('./webpack/fetch-remotes') 7 | let remoteEntries = require('./webpack/remote-entries') 8 | 9 | const server = env => { 10 | handleEnv(env) 11 | exitAfterBuild() 12 | return new Promise(resolve => { 13 | fetchRemotes(remoteEntries).then(remotes => { 14 | console.info(remotes) 15 | resolve({ 16 | externals: [nodeExternals(), 'mongodb-client-encryption'], 17 | target: 'async-node', 18 | //stats: 'verbose', 19 | mode: 'development', 20 | devtool: 'hidden-source-map', 21 | entry: path.resolve(__dirname, 'src/bootstrap.js'), 22 | output: { 23 | publicPath: `http://localhost`, 24 | path: path.resolve(__dirname, 'dist'), 25 | libraryTarget: 'commonjs', 26 | filename: '[name].js' 27 | }, 28 | resolve: { 29 | extensions: ['.js', '.mjs', '.cjs', '.jsx'] 30 | }, 31 | module: { 32 | rules: [ 33 | { 34 | test: /\.js?$/, 35 | loader: 'babel-loader', 36 | exclude: /node_modules/ 37 | } 38 | ] 39 | }, 40 | plugins: [ 41 | new ModuleFederationPlugin({ 42 | name: 'hostContainer', 43 | filename: 'remoteEntry.js', 44 | library: { type: 'commonjs' }, 45 | remotes, 46 | exposes: { 47 | './hostContainer': './src/host-container.js', 48 | './remoteEntries': './webpack/remote-entries' 49 | } 50 | }) 51 | ] 52 | }) 53 | }) 54 | }) 55 | } 56 | 57 | function handleEnv (env) { 58 | console.log(env) 59 | if (env.serverless) { 60 | remoteEntries.forEach(e => (e.path = 'webpack')) 61 | console.log(chalk.yellow('serverless build')) 62 | } 63 | } 64 | 65 | function exitAfterBuild () { 66 | setTimeout(() => process.exit(0), 4000) 67 | } 68 | 69 | module.exports = [server] 70 | -------------------------------------------------------------------------------- /webpack/fetch-remotes.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const { Octokit } = require('@octokit/rest') 4 | const fs = require('fs') 5 | const path = require('path') 6 | const { RemoteEntriesUtil } = require('./remote-entries-util') 7 | 8 | // Use developer token for github api 9 | const octokit = new Octokit({ auth: process.env.GITHUB_TOKEN }) 10 | 11 | /** 12 | * Download remote entry from github. Will be a blob (> 1MB). 13 | * File is base64 encoded. Decode to utf-8 and write to `path`. 14 | * 15 | * @param {*} entry remote entry record 16 | * @param {*} path where to write file contents 17 | * @returns 18 | */ 19 | async function octoGet (entry, path) { 20 | return octokit 21 | .request( 22 | 'GET https://api.github.com/repos/{owner}/{repo}/contents/{filedir}?ref={branch}', 23 | { 24 | owner: entry.owner, 25 | repo: entry.repo, 26 | filedir: entry.filedir, 27 | branch: entry.branch 28 | } 29 | ) 30 | .then(function (rest) { 31 | const file = rest.data.find(f => f.name === 'remoteEntry.js') 32 | return file.sha 33 | }) 34 | .then(function (sha) { 35 | return octokit.request('GET /repos/{owner}/{repo}/git/blobs/{sha}', { 36 | owner: entry.owner, 37 | repo: entry.repo, 38 | sha 39 | }) 40 | }) 41 | .then(function (rest) { 42 | fs.writeFileSync( 43 | path, 44 | Buffer.from(rest.data.content, 'base64').toString('utf-8') 45 | ) 46 | }) 47 | } 48 | 49 | function httpGet (entry, path, done) { 50 | const url = new URL(entry.url) 51 | require(url.protocol.replace(':', '')).get( 52 | entry.url, 53 | { rejectUnauthorized: false }, 54 | function (response) { 55 | response.pipe(fs.createWriteStream(path)) 56 | response.on('end', done) 57 | } 58 | ) 59 | } 60 | 61 | /** 62 | * Allow multiple entry points from different owners, repos, etc on github. 63 | * @param {*} entry 64 | * @param {*} url 65 | * @returns 66 | */ 67 | function generatePath (entry, url) { 68 | if (entry.owner) 69 | return `${entry.owner}-${entry.repo}-${entry.filedir 70 | .split('/') 71 | .join('-')}-${entry.branch}` 72 | return url.pathname.split('/').join('-') 73 | } 74 | 75 | function generateFilename (entry) { 76 | const url = new URL(entry.url) 77 | const hostpart = url.hostname.split('.').join('-') 78 | const portpart = url.port ? url.port : 80 79 | const pathpart = generatePath(entry, url) 80 | if (/remoteEntry/i.test(pathpart)) 81 | return `${hostpart}-${portpart}-${pathpart}` 82 | return `${hostpart}-${portpart}-${pathpart}-remoteEntry.js` 83 | } 84 | 85 | function getPath (entry) { 86 | var entry 87 | if (!entry || !entry.path) { 88 | entry.path = path.join(process.cwd(), 'webpack') 89 | } 90 | console.debug(getPath.name, entry) 91 | const filename = generateFilename(entry) 92 | let basedir = entry.path 93 | if (entry.path && entry.path.charAt(entry.path.length - 1) !== '/') { 94 | basedir = entry.path.concat('/') 95 | } 96 | return basedir.concat(filename) 97 | } 98 | 99 | /** 100 | * If streaming from github, owner, repo etc contribute to uniqueness. 101 | * @param {*} entry 102 | * @returns 103 | */ 104 | function uniqueUrl (entry) { 105 | return `${entry.url}${entry.owner}${entry.repo}${entry.filedir}${entry.branch}` 106 | } 107 | 108 | function deduplicate (remoteEntries) { 109 | if (!remoteEntries || remoteEntries.length < 1) return {} 110 | return remoteEntries 111 | .map(function (e) { 112 | return { 113 | [uniqueUrl(e)]: { 114 | ...e, 115 | name: uniqueUrl(e) 116 | } 117 | } 118 | }) 119 | .reduce((p, c) => ({ ...p, ...c }), remoteEntries) 120 | } 121 | 122 | /** 123 | * Download each unique remote entry file. 124 | * @param {{ 125 | * name: string, 126 | * url: string, 127 | * path: string 128 | * }[]} remoteEntries `name` of file, `url` of file, download file to `path` 129 | * 130 | * @returns {Promise<{[index: string]: string}>} local paths to downloaded entries 131 | */ 132 | module.exports = async remoteEntries => { 133 | const entries = RemoteEntriesUtil(remoteEntries) 134 | .validateEntries() 135 | .removeWasmEntries() 136 | .entries() 137 | 138 | console.log(entries) 139 | const remotes = await Promise.all( 140 | Object.values(deduplicate(entries)).map(function (entry) { 141 | const path = getPath(entry) 142 | console.log('downloading file to', path) 143 | 144 | return new Promise(async function (resolve) { 145 | const resolvePath = () => resolve({ [entry.name]: path }) 146 | 147 | if (/^https:\/\/api.github.com.*/i.test(entry.url)) { 148 | // Download from github. 149 | await octoGet(entry, path) 150 | resolvePath() 151 | } else { 152 | httpGet(entry, path, resolvePath) 153 | } 154 | }) 155 | }) 156 | ) 157 | 158 | return entries.map(e => ({ 159 | [e.name]: remotes.find(r => r[uniqueUrl(e)])[uniqueUrl(e)] 160 | })) 161 | } 162 | -------------------------------------------------------------------------------- /webpack/remote-entries-type.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @typedef {object} remoteEntry points to a manifest (remoteEntry.js) of remote modules 3 | * exposed for consumption at runtime. Note .wasm files are not contained in these bundles 4 | * as they are already optimized for size and statically linked to their dependencies. 5 | * It can, however, be dynamically "linked" to other modules at runtime via the Aegis 6 | * framework, i.e. events, ports/adapters, relations, commands, etc 7 | * @property {string} name descriptive name of the entry 8 | * @property {string} url location of the remoteEntry.js or .wasm file to be imported 9 | * - if using github, specify `https://api.github.com` 10 | * @property {string} path local path where compiled files are written 11 | * @property {"model"|"adapter"|"service"} type the type of components in the module 12 | * * @property {function():Promise} importRemote the function used to import the module 13 | * - `await import("microservices/models")` imports models based on the below webpack.config.js 14 | * 15 | * ```js 16 | * new ModuleFederationPlugin({ 17 | * name: "microservices", 18 | * filename: "remoteEntry.js", 19 | * library: { 20 | * name: "microservices", 21 | * type: "commonjs-module", 22 | * }, 23 | * remoteType: "commonjs-module", 24 | * exposes: { 25 | * "./models": "./src/domain", 26 | * "./adapters": "./src/adapters", 27 | * "./services": "./src/services", 28 | * }, 29 | * }), 30 | * ``` 31 | * @property {string} [repo] if using github, name of the github repo 32 | * @property {string} [owner] if using gitbub, owner of the repo 33 | * @property {string} [filedir] if using gitub, path to the remoteEntry.js file 34 | * @property {string} [branch] if using github, branch of the repo to use, e.g. "master" or "main" 35 | * @property {boolean} [wasm] is this a WebAssembly module? 36 | * @property {string} [serviceName] optional name of the service to which the module belongs 37 | * - use to group model, adapaters and services together 38 | * at startup instead of waiting until a request for the model has been received 39 | * model, adapter or service 40 | * @property {string} [worker] Creates a model that is controlled by a custom worker 41 | * instead of the system default worker. Developers can do whatever they want with the worker 42 | * and needn't use the associated model at all. That said, developers may want to make use of 43 | * the auto-generated APIs and storage, which are exposed at a lower level, allowing 44 | * for more extensive customization. 45 | * 46 | */ 47 | 48 | const { 49 | importWebAssembly 50 | } = require('@module-federation/aegis').adapters.webassembly 51 | 52 | /** 53 | * Example entries showing 54 | * 55 | * @type {remoteEntry[]} 56 | */ 57 | exports.crowdcontrol = [ 58 | { 59 | name: 'crowdcontrol', 60 | url: 'https://api.github.com', 61 | repo: 'crowdcontrol', 62 | owner: 'smartdistrict', 63 | filedir: 'dist', 64 | branch: 'beta', 65 | path: __dirname, 66 | type: 'model', 67 | importRemote: () => import('crowdcontrol/models') 68 | }, 69 | { 70 | name: 'livestream', 71 | url: 'https://cctv.local/streams.wasm', 72 | path: __dirname, 73 | type: 'adapter', 74 | wasm: true, 75 | importRemote () { 76 | return importWebAssembly(this) 77 | } 78 | }, 79 | { 80 | name: 'computervision', 81 | url: 'https://machinelearning.cdn?asset=vision.wasm', 82 | path: __dirname, 83 | type: 'adapter', 84 | wasm: true, 85 | importRemote () { 86 | return importWebAssembly(this) 87 | } 88 | } 89 | ] 90 | -------------------------------------------------------------------------------- /webpack/remote-entries-util.js: -------------------------------------------------------------------------------- 1 | /** 2 | * 3 | * @param {{name:string,path:sting,filedir:string,branch:string,url:string}[]} remoteEntries 4 | */ 5 | exports.RemoteEntriesUtil = function (remoteEntries) { 6 | console.info(remoteEntries) 7 | const entries = Object.values(remoteEntries) 8 | .map(e => Object.values(e)) 9 | .flat(2) 10 | 11 | return { 12 | validateEntries () { 13 | if (!entries || entries.length < 1) 14 | throw new Error('entries missing or invalid') 15 | return this 16 | }, 17 | 18 | removeWasmEntries () { 19 | if (Array.isArray(entries)) 20 | entries.forEach((e, i, a) => !e.wasm || a.splice(i, 1)) 21 | return this 22 | }, 23 | 24 | entries () { 25 | return entries 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /webpack/remote-entries/README.md: -------------------------------------------------------------------------------- 1 | ## This directory contains remote-entry.js files -------------------------------------------------------------------------------- /webpack/remote-entries/bli.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entry 5 | */ 6 | 7 | /** @type {entry[]} */ 8 | exports.bli = [ 9 | { 10 | name: 'bli', 11 | url: 'http://localhost:8000/remoteEntry.js', 12 | path: __dirname, 13 | type: 'model', 14 | importRemote: async () => import('bli/models') 15 | }, 16 | { 17 | name: 'adapters', 18 | url: 'http://localhost:8000/remoteEntry.js', 19 | path: __dirname, 20 | type: 'adapter', 21 | importRemote: async () => import('bli/adapters') 22 | }, 23 | { 24 | name: 'services', 25 | url: 'http://localhost:8000/remoteEntry.js', 26 | path: __dirname, 27 | type: 'service', 28 | importRemote: async () => import('bli/services') 29 | }, 30 | { 31 | name: 'ports', 32 | url: 'http://localhost:8000/remoteEntry.js', 33 | path: __dirname, 34 | type: 'port', 35 | importRemote: async () => import('bli/ports') 36 | } 37 | ] 38 | -------------------------------------------------------------------------------- /webpack/remote-entries/cache-local.js: -------------------------------------------------------------------------------- 1 | exports.cache = [ 2 | { 3 | name: 'distributed-cache', 4 | url: 'http://localhost:8001/remoteEntry.js', 5 | path: __dirname, 6 | type: 'model-cache', 7 | importRemote: async () => import('distributed-cache/model-cache') 8 | }, 9 | { 10 | name: 'adapter-cache', 11 | url: 'http://localhost:8001/remoteEntry.js', 12 | path: __dirname, 13 | type: 'adapter-cache', 14 | importRemote: async () => import('distributed-cache/adapter-cache') 15 | }, 16 | { 17 | name: 'service-cache', 18 | url: 'http://localhost:8001/remoteEntry.js', 19 | path: __dirname, 20 | type: 'service-cache', 21 | importRemote: async () => import('distributed-cache/service-cache') 22 | }, 23 | { 24 | name: 'port-cache', 25 | url: 'http://localhost:8001/remoteEntry.js', 26 | path: __dirname, 27 | type: 'port-cache', 28 | importRemote: async () => import('distributed-cache/port-cache') 29 | } 30 | ] 31 | -------------------------------------------------------------------------------- /webpack/remote-entries/cache.js: -------------------------------------------------------------------------------- 1 | exports.cache = [ 2 | { 3 | name: 'distributed-cache', 4 | url: 'https://api.github.com', 5 | repo: 'aegis-app', 6 | owner: 'module-federation', 7 | filedir: 'dist', 8 | branch: 'cache', 9 | path: __dirname, 10 | type: 'model-cache', 11 | importRemote: async () => import('distributed-cache/model-cache') 12 | }, 13 | { 14 | name: 'adapter-cache', 15 | url: 'https://api.github.com', 16 | repo: 'aegis-app', 17 | owner: 'module-federation', 18 | filedir: 'dist', 19 | branch: 'cache', 20 | path: __dirname, 21 | type: 'adapter-cache', 22 | importRemote: async () => import('distributed-cache/adapter-cache') 23 | }, 24 | { 25 | name: 'service-cache', 26 | url: 'https://api.github.com', 27 | repo: 'aegis-app', 28 | owner: 'module-federation', 29 | filedir: 'dist', 30 | branch: 'cache', 31 | path: __dirname, 32 | type: 'service-cache', 33 | importRemote: async () => import('distributed-cache/service-cache') 34 | }, 35 | { 36 | name: 'port-cache', 37 | url: 'https://api.github.com', 38 | repo: 'aegis-app', 39 | owner: 'module-federation', 40 | filedir: 'dist', 41 | branch: 'cache', 42 | path: __dirname, 43 | type: 'port-cache', 44 | importRemote: async () => import('distributed-cache/port-cache') 45 | } 46 | ] 47 | -------------------------------------------------------------------------------- /webpack/remote-entries/customer.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @typedef {import("../remote-entries-type").remoteEntry} entry 3 | */ 4 | 5 | /** @type {entry[]} */ 6 | exports.customer = [ 7 | { 8 | name: 'customer', 9 | url: 'https://api.github.com', 10 | repo: 'aegis-application', 11 | owner: 'module-federation', 12 | filedir: 'dist', 13 | branch: 'customer', 14 | path: __dirname, 15 | type: 'model', 16 | importRemote: async () => import('customer/models') 17 | }, 18 | { 19 | name: 'adapters', 20 | url: 'https://api.github.com', 21 | repo: 'aegis-application', 22 | owner: 'module-federation', 23 | filedir: 'dist', 24 | branch: 'customer', 25 | path: __dirname, 26 | type: 'adapter', 27 | importRemote: async () => import('customer/adapters') 28 | }, 29 | { 30 | name: 'services', 31 | url: 'https://api.github.com', 32 | repo: 'aegis-application', 33 | owner: 'module-federation', 34 | filedir: 'dist', 35 | branch: 'customer', 36 | path: __dirname, 37 | type: 'service', 38 | importRemote: async () => import('customer/services') 39 | } 40 | ] 41 | -------------------------------------------------------------------------------- /webpack/remote-entries/fdp.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entry 5 | */ 6 | 7 | /** @type {entry[]} */ 8 | exports.fdp = [ 9 | { 10 | name: 'fdp', 11 | url: 'http://localhost:8000/remoteEntry.js', 12 | path: __dirname, 13 | type: 'model', 14 | importRemote: async () => import('fdp/models') 15 | }, 16 | { 17 | name: 'adapters', 18 | url: 'http://localhost:8000/remoteEntry.js', 19 | path: __dirname, 20 | type: 'adapter', 21 | importRemote: async () => import('fdp/adapters') 22 | }, 23 | { 24 | name: 'services', 25 | url: 'http://localhost:8000/remoteEntry.js', 26 | path: __dirname, 27 | type: 'service', 28 | importRemote: async () => import('fdp/services') 29 | }, 30 | { 31 | name: 'ports', 32 | url: 'http://localhost:8000/remoteEntry.js', 33 | path: __dirname, 34 | type: 'port', 35 | importRemote: async () => import('fdp/ports') 36 | } 37 | ] 38 | -------------------------------------------------------------------------------- /webpack/remote-entries/go.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entries 5 | */ 6 | 7 | const { 8 | importWebAssembly 9 | } = require('@module-federation/aegis').adapters.webassembly 10 | 11 | /** @type {entries[]} */ 12 | exports.go = [ 13 | { 14 | name: 'go', 15 | url: 'http://localhost:8000/main.wasm', 16 | wasm: true, 17 | path: __dirname, 18 | type: 'model', 19 | importRemote () { 20 | return importWebAssembly(this) 21 | } 22 | } 23 | ] 24 | -------------------------------------------------------------------------------- /webpack/remote-entries/index.js: -------------------------------------------------------------------------------- 1 | exports.local = require('./local') 2 | exports.cache = require('./cache-local') 3 | exports.wasm = require('./wasm-local') 4 | 5 | // exports.github = require('./gitpod') 6 | // exports.cache = require('./cache-github') 7 | -------------------------------------------------------------------------------- /webpack/remote-entries/local.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entry 5 | */ 6 | 7 | /** @type {entry[]} */ 8 | exports.local = [ 9 | { 10 | name: 'local', 11 | url: 'http://localhost:8000/remoteEntry.js', 12 | path: __dirname, 13 | type: 'model', 14 | importRemote: async () => import('local/models') 15 | }, 16 | { 17 | name: 'adapters', 18 | url: 'http://localhost:8000/remoteEntry.js', 19 | path: __dirname, 20 | type: 'adapter', 21 | importRemote: async () => import('local/adapters') 22 | }, 23 | { 24 | name: 'services', 25 | url: 'http://localhost:8000/remoteEntry.js', 26 | path: __dirname, 27 | type: 'service', 28 | importRemote: async () => import('local/services') 29 | }, 30 | { 31 | name: 'ports', 32 | url: 'http://localhost:8000/remoteEntry.js', 33 | path: __dirname, 34 | type: 'port', 35 | importRemote: async () => import('local/ports') 36 | } 37 | ] 38 | -------------------------------------------------------------------------------- /webpack/remote-entries/order.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entry 5 | */ 6 | 7 | /** @type {entry[]} */ 8 | exports.order = [ 9 | { 10 | name: 'order', 11 | url: 'https://api.github.com', 12 | repo: 'aegis-application', 13 | owner: 'module-federation', 14 | filedir: 'dist', 15 | branch: 'order', 16 | path: __dirname, 17 | type: 'model', 18 | importRemote: async () => import('order/models') 19 | }, 20 | { 21 | name: 'adapters', 22 | url: 'https://api.github.com', 23 | repo: 'aegis-application', 24 | owner: 'module-federation', 25 | filedir: 'dist', 26 | branch: 'order', 27 | path: __dirname, 28 | type: 'adapter', 29 | importRemote: async () => import('order/adapters') 30 | }, 31 | { 32 | name: 'services', 33 | url: 'https://api.github.com', 34 | repo: 'aegis-application', 35 | owner: 'module-federation', 36 | filedir: 'dist', 37 | branch: 'order', 38 | path: __dirname, 39 | type: 'service', 40 | importRemote: async () => import('order/services') 41 | } 42 | ] 43 | -------------------------------------------------------------------------------- /webpack/remote-entries/python.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | 'use strict' 4 | 5 | /** 6 | * @typedef {import("../remote-entries-type").remoteEntry} entries 7 | */ 8 | 9 | const { importPython } = require('@module-federation/aegis').adapters.python 10 | 11 | /** 12 | * @typedef {import("../remote-entries-type").remoteEntry} entry 13 | */ 14 | 15 | /** @type {entry[]} */ 16 | exports.order = [ 17 | { 18 | name: 'python', 19 | url: 'https://api.github.com', 20 | repo: 'rustpython', 21 | owner: 'module-federation', 22 | filedir: 'dist', 23 | branch: 'main', 24 | path: __dirname, 25 | wasm: true, 26 | type: 'model', 27 | importRemote () { 28 | return importPython(this) 29 | } 30 | } 31 | ] 32 | -------------------------------------------------------------------------------- /webpack/remote-entries/wasm-local.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entries 5 | */ 6 | 7 | const { 8 | importWebAssembly 9 | } = require('@module-federation/aegis').adapters.webassembly 10 | 11 | /** @type {entries[]} */ 12 | exports.wasm = [ 13 | { 14 | name: 'go', 15 | url: 'http://localhost:8000/main.wasm', 16 | wasm: true, 17 | path: __dirname, 18 | type: 'model', 19 | importRemote () { 20 | return importWebAssembly(this) 21 | } 22 | } 23 | ] 24 | -------------------------------------------------------------------------------- /webpack/remote-entries/wasm.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | /** 4 | * @typedef {import("../remote-entries-type").remoteEntry} entries 5 | */ 6 | 7 | const { 8 | importWebAssembly 9 | } = require('@module-federation/aegis').adapters.webassembly 10 | 11 | /** @type {entries[]} */ 12 | exports.wasm = [ 13 | { 14 | name: 'wasm', 15 | url: 'https://api.github.com', 16 | repo: 'aegis', 17 | owner: 'module-federation', 18 | filedir: 'wasm/build', 19 | branch: 'main', 20 | wasm: true, 21 | path: __dirname, 22 | type: 'model', 23 | importRemote () { 24 | return importWebAssembly(this) 25 | } 26 | } 27 | ] 28 | -------------------------------------------------------------------------------- /webpack/remote-entries/worker.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @typedef {import("../remote-entries-type").remoteEntry} entries 3 | */ 4 | 5 | /** 6 | * This creates a model that is controlled by a custom worker 7 | * instead of the system default worker. Developers do whatever 8 | * they want with the worker and needn't use the associated model. 9 | * 10 | * @type {entries[]} 11 | */ 12 | exports.worker = [ 13 | { 14 | name: 'worker', 15 | url: 'https://api.github.com', 16 | repo: 'aegis', 17 | owner: 'module-federation', 18 | filedir: 'dist', 19 | branch: 'worker', 20 | path: __dirname, 21 | type: 'model', 22 | worker: 'worker.js', 23 | importRemote () { 24 | return import('workers/examples/simple') 25 | } 26 | } 27 | ] 28 | --------------------------------------------------------------------------------