├── .github └── workflows │ ├── alibabacloud.yml │ ├── aws.yml │ ├── codacy.yml │ ├── codeql.yml │ ├── generator-generic-ossf-slsa3-publish.yml │ ├── google.yml │ ├── greetings.yml │ ├── ibm.yml │ ├── jekyll.yml │ ├── objective-c-xcode.yml │ ├── pylint.yml │ ├── python-app.yml │ └── tencent.yml ├── .gitignore ├── .whitesource ├── LICENSE ├── LICENSE.md ├── README.md ├── docs ├── api.md ├── configuration.md ├── installation.md └── usage.md ├── requirements.txt ├── scripts ├── deploy.sh └── start.sh ├── src ├── ai │ ├── monitoring.py │ ├── network.py │ └── optimization.py ├── blockchain │ ├── block.py │ ├── chain.py │ └── transaction.py ├── encryption │ ├── asymmetric.py │ └── symmetric.py ├── ml │ ├── monitoring.py │ ├── network.py │ └── optimization.py └── network.py └── tests ├── test_ai.py ├── test_blockchain.py ├── test_encryption.py └── test_ml.py /.github/workflows/alibabacloud.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build and push a new container image to Alibaba Cloud Container Registry (ACR), 2 | # and then will deploy it to Alibaba Cloud Container Service for Kubernetes (ACK), when there is a push to the "main" branch. 3 | # 4 | # To use this workflow, you will need to complete the following set-up steps: 5 | # 6 | # 1. Create an ACR repository to store your container images. 7 | # You can use ACR EE instance for more security and better performance. 8 | # For instructions see https://www.alibabacloud.com/help/doc-detail/142168.htm 9 | # 10 | # 2. Create an ACK cluster to run your containerized application. 11 | # You can use ACK Pro cluster for more security and better performance. 12 | # For instructions see https://www.alibabacloud.com/help/doc-detail/95108.htm 13 | # 14 | # 3. Store your AccessKey pair in GitHub Actions secrets named `ACCESS_KEY_ID` and `ACCESS_KEY_SECRET`. 15 | # For instructions on setting up secrets see: https://developer.github.com/actions/managing-workflows/storing-secrets/ 16 | # 17 | # 4. Change the values for the REGION_ID, REGISTRY, NAMESPACE, IMAGE, ACK_CLUSTER_ID, and ACK_DEPLOYMENT_NAME. 18 | # 19 | 20 | name: Build and Deploy to ACK 21 | 22 | on: 23 | push: 24 | branches: [ "main" ] 25 | 26 | # Environment variables available to all jobs and steps in this workflow. 27 | env: 28 | REGION_ID: cn-hangzhou 29 | REGISTRY: registry.cn-hangzhou.aliyuncs.com 30 | NAMESPACE: namespace 31 | IMAGE: repo 32 | TAG: ${{ github.sha }} 33 | ACK_CLUSTER_ID: clusterID 34 | ACK_DEPLOYMENT_NAME: nginx-deployment 35 | 36 | ACR_EE_REGISTRY: myregistry.cn-hangzhou.cr.aliyuncs.com 37 | ACR_EE_INSTANCE_ID: instanceID 38 | ACR_EE_NAMESPACE: namespace 39 | ACR_EE_IMAGE: repo 40 | ACR_EE_TAG: ${{ github.sha }} 41 | 42 | permissions: 43 | contents: read 44 | 45 | jobs: 46 | build: 47 | runs-on: ubuntu-latest 48 | environment: production 49 | 50 | steps: 51 | - name: Checkout 52 | uses: actions/checkout@v4 53 | 54 | # 1.1 Login to ACR 55 | - name: Login to ACR with the AccessKey pair 56 | uses: aliyun/acr-login@v1 57 | with: 58 | region-id: "${{ env.REGION_ID }}" 59 | access-key-id: "${{ secrets.ACCESS_KEY_ID }}" 60 | access-key-secret: "${{ secrets.ACCESS_KEY_SECRET }}" 61 | 62 | # 1.2 Build and push image to ACR 63 | - name: Build and push image to ACR 64 | run: | 65 | docker build --tag "$REGISTRY/$NAMESPACE/$IMAGE:$TAG" . 66 | docker push "$REGISTRY/$NAMESPACE/$IMAGE:$TAG" 67 | 68 | # 1.3 Scan image in ACR 69 | - name: Scan image in ACR 70 | uses: aliyun/acr-scan@v1 71 | with: 72 | region-id: "${{ env.REGION_ID }}" 73 | access-key-id: "${{ secrets.ACCESS_KEY_ID }}" 74 | access-key-secret: "${{ secrets.ACCESS_KEY_SECRET }}" 75 | repository: "${{ env.NAMESPACE }}/${{ env.IMAGE }}" 76 | tag: "${{ env.TAG }}" 77 | 78 | # 2.1 (Optional) Login to ACR EE 79 | - uses: actions/checkout@v4 80 | - name: Login to ACR EE with the AccessKey pair 81 | uses: aliyun/acr-login@v1 82 | with: 83 | login-server: "https://${{ env.ACR_EE_REGISTRY }}" 84 | region-id: "${{ env.REGION_ID }}" 85 | access-key-id: "${{ secrets.ACCESS_KEY_ID }}" 86 | access-key-secret: "${{ secrets.ACCESS_KEY_SECRET }}" 87 | instance-id: "${{ env.ACR_EE_INSTANCE_ID }}" 88 | 89 | # 2.2 (Optional) Build and push image ACR EE 90 | - name: Build and push image to ACR EE 91 | run: | 92 | docker build -t "$ACR_EE_REGISTRY/$ACR_EE_NAMESPACE/$ACR_EE_IMAGE:$TAG" . 93 | docker push "$ACR_EE_REGISTRY/$ACR_EE_NAMESPACE/$ACR_EE_IMAGE:$TAG" 94 | # 2.3 (Optional) Scan image in ACR EE 95 | - name: Scan image in ACR EE 96 | uses: aliyun/acr-scan@v1 97 | with: 98 | region-id: "${{ env.REGION_ID }}" 99 | access-key-id: "${{ secrets.ACCESS_KEY_ID }}" 100 | access-key-secret: "${{ secrets.ACCESS_KEY_SECRET }}" 101 | instance-id: "${{ env.ACR_EE_INSTANCE_ID }}" 102 | repository: "${{ env.ACR_EE_NAMESPACE}}/${{ env.ACR_EE_IMAGE }}" 103 | tag: "${{ env.ACR_EE_TAG }}" 104 | 105 | # 3.1 Set ACK context 106 | - name: Set K8s context 107 | uses: aliyun/ack-set-context@v1 108 | with: 109 | access-key-id: "${{ secrets.ACCESS_KEY_ID }}" 110 | access-key-secret: "${{ secrets.ACCESS_KEY_SECRET }}" 111 | cluster-id: "${{ env.ACK_CLUSTER_ID }}" 112 | 113 | # 3.2 Deploy the image to the ACK cluster 114 | - name: Set up Kustomize 115 | run: |- 116 | curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash /dev/stdin 3.8.6 117 | - name: Deploy 118 | run: |- 119 | ./kustomize edit set image REGISTRY/NAMESPACE/IMAGE:TAG=$REGISTRY/$NAMESPACE/$IMAGE:$TAG 120 | ./kustomize build . | kubectl apply -f - 121 | kubectl rollout status deployment/$ACK_DEPLOYMENT_NAME 122 | kubectl get services -o wide 123 | -------------------------------------------------------------------------------- /.github/workflows/aws.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build and push a new container image to Amazon ECR, 2 | # and then will deploy a new task definition to Amazon ECS, when there is a push to the "main" branch. 3 | # 4 | # To use this workflow, you will need to complete the following set-up steps: 5 | # 6 | # 1. Create an ECR repository to store your images. 7 | # For example: `aws ecr create-repository --repository-name my-ecr-repo --region us-east-2`. 8 | # Replace the value of the `ECR_REPOSITORY` environment variable in the workflow below with your repository's name. 9 | # Replace the value of the `AWS_REGION` environment variable in the workflow below with your repository's region. 10 | # 11 | # 2. Create an ECS task definition, an ECS cluster, and an ECS service. 12 | # For example, follow the Getting Started guide on the ECS console: 13 | # https://us-east-2.console.aws.amazon.com/ecs/home?region=us-east-2#/firstRun 14 | # Replace the value of the `ECS_SERVICE` environment variable in the workflow below with the name you set for the Amazon ECS service. 15 | # Replace the value of the `ECS_CLUSTER` environment variable in the workflow below with the name you set for the cluster. 16 | # 17 | # 3. Store your ECS task definition as a JSON file in your repository. 18 | # The format should follow the output of `aws ecs register-task-definition --generate-cli-skeleton`. 19 | # Replace the value of the `ECS_TASK_DEFINITION` environment variable in the workflow below with the path to the JSON file. 20 | # Replace the value of the `CONTAINER_NAME` environment variable in the workflow below with the name of the container 21 | # in the `containerDefinitions` section of the task definition. 22 | # 23 | # 4. Store an IAM user access key in GitHub Actions secrets named `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. 24 | # See the documentation for each action used below for the recommended IAM policies for this IAM user, 25 | # and best practices on handling the access key credentials. 26 | 27 | name: Deploy to Amazon ECS 28 | 29 | on: 30 | push: 31 | branches: [ "main" ] 32 | 33 | env: 34 | AWS_REGION: MY_AWS_REGION # set this to your preferred AWS region, e.g. us-west-1 35 | ECR_REPOSITORY: MY_ECR_REPOSITORY # set this to your Amazon ECR repository name 36 | ECS_SERVICE: MY_ECS_SERVICE # set this to your Amazon ECS service name 37 | ECS_CLUSTER: MY_ECS_CLUSTER # set this to your Amazon ECS cluster name 38 | ECS_TASK_DEFINITION: MY_ECS_TASK_DEFINITION # set this to the path to your Amazon ECS task definition 39 | # file, e.g. .aws/task-definition.json 40 | CONTAINER_NAME: MY_CONTAINER_NAME # set this to the name of the container in the 41 | # containerDefinitions section of your task definition 42 | 43 | permissions: 44 | contents: read 45 | 46 | jobs: 47 | deploy: 48 | name: Deploy 49 | runs-on: ubuntu-latest 50 | environment: production 51 | 52 | steps: 53 | - name: Checkout 54 | uses: actions/checkout@v4 55 | 56 | - name: Configure AWS credentials 57 | uses: aws-actions/configure-aws-credentials@v1 58 | with: 59 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 60 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 61 | aws-region: ${{ env.AWS_REGION }} 62 | 63 | - name: Login to Amazon ECR 64 | id: login-ecr 65 | uses: aws-actions/amazon-ecr-login@v1 66 | 67 | - name: Build, tag, and push image to Amazon ECR 68 | id: build-image 69 | env: 70 | ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }} 71 | IMAGE_TAG: ${{ github.sha }} 72 | run: | 73 | # Build a docker container and 74 | # push it to ECR so that it can 75 | # be deployed to ECS. 76 | docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG . 77 | docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG 78 | echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_OUTPUT 79 | 80 | - name: Fill in the new image ID in the Amazon ECS task definition 81 | id: task-def 82 | uses: aws-actions/amazon-ecs-render-task-definition@v1 83 | with: 84 | task-definition: ${{ env.ECS_TASK_DEFINITION }} 85 | container-name: ${{ env.CONTAINER_NAME }} 86 | image: ${{ steps.build-image.outputs.image }} 87 | 88 | - name: Deploy Amazon ECS task definition 89 | uses: aws-actions/amazon-ecs-deploy-task-definition@v1 90 | with: 91 | task-definition: ${{ steps.task-def.outputs.task-definition }} 92 | service: ${{ env.ECS_SERVICE }} 93 | cluster: ${{ env.ECS_CLUSTER }} 94 | wait-for-service-stability: true 95 | -------------------------------------------------------------------------------- /.github/workflows/codacy.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | # This workflow checks out code, performs a Codacy security scan 7 | # and integrates the results with the 8 | # GitHub Advanced Security code scanning feature. For more information on 9 | # the Codacy security scan action usage and parameters, see 10 | # https://github.com/codacy/codacy-analysis-cli-action. 11 | # For more information on Codacy Analysis CLI in general, see 12 | # https://github.com/codacy/codacy-analysis-cli. 13 | 14 | name: Codacy Security Scan 15 | 16 | on: 17 | push: 18 | branches: [ "main" ] 19 | pull_request: 20 | # The branches below must be a subset of the branches above 21 | branches: [ "main" ] 22 | schedule: 23 | - cron: '43 16 * * 5' 24 | 25 | permissions: 26 | contents: read 27 | 28 | jobs: 29 | codacy-security-scan: 30 | permissions: 31 | contents: read # for actions/checkout to fetch code 32 | security-events: write # for github/codeql-action/upload-sarif to upload SARIF results 33 | actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status 34 | name: Codacy Security Scan 35 | runs-on: ubuntu-latest 36 | steps: 37 | # Checkout the repository to the GitHub Actions runner 38 | - name: Checkout code 39 | uses: actions/checkout@v4 40 | 41 | # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis 42 | - name: Run Codacy Analysis CLI 43 | uses: codacy/codacy-analysis-cli-action@d840f886c4bd4edc059706d09c6a1586111c540b 44 | with: 45 | # Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository 46 | # You can also omit the token and run the tools that support default configurations 47 | project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} 48 | verbose: true 49 | output: results.sarif 50 | format: sarif 51 | # Adjust severity of non-security issues 52 | gh-code-scanning-compat: true 53 | # Force 0 exit code to allow SARIF file generation 54 | # This will handover control about PR rejection to the GitHub side 55 | max-allowed-issues: 2147483647 56 | 57 | # Upload the SARIF file generated in the previous step 58 | - name: Upload SARIF results file 59 | uses: github/codeql-action/upload-sarif@v2 60 | with: 61 | sarif_file: results.sarif 62 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ "main" ] 17 | pull_request: 18 | branches: [ "main" ] 19 | schedule: 20 | - cron: '33 19 * * 2' 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze (${{ matrix.language }}) 25 | # Runner size impacts CodeQL analysis time. To learn more, please see: 26 | # - https://gh.io/recommended-hardware-resources-for-running-codeql 27 | # - https://gh.io/supported-runners-and-hardware-resources 28 | # - https://gh.io/using-larger-runners (GitHub.com only) 29 | # Consider using larger runners or machines with greater resources for possible analysis time improvements. 30 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 31 | timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} 32 | permissions: 33 | # required for all workflows 34 | security-events: write 35 | 36 | # required to fetch internal or private CodeQL packs 37 | packages: read 38 | 39 | # only required for workflows in private repositories 40 | actions: read 41 | contents: read 42 | 43 | strategy: 44 | fail-fast: false 45 | matrix: 46 | include: 47 | - language: python 48 | build-mode: none 49 | # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' 50 | # Use `c-cpp` to analyze code written in C, C++ or both 51 | # Use 'java-kotlin' to analyze code written in Java, Kotlin or both 52 | # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both 53 | # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, 54 | # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. 55 | # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how 56 | # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages 57 | steps: 58 | - name: Checkout repository 59 | uses: actions/checkout@v4 60 | 61 | # Initializes the CodeQL tools for scanning. 62 | - name: Initialize CodeQL 63 | uses: github/codeql-action/init@v3 64 | with: 65 | languages: ${{ matrix.language }} 66 | build-mode: ${{ matrix.build-mode }} 67 | # If you wish to specify custom queries, you can do so here or in a config file. 68 | # By default, queries listed here will override any specified in a config file. 69 | # Prefix the list here with "+" to use these queries and those in the config file. 70 | 71 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 72 | # queries: security-extended,security-and-quality 73 | 74 | # If the analyze step fails for one of the languages you are analyzing with 75 | # "We were unable to automatically build your code", modify the matrix above 76 | # to set the build mode to "manual" for that language. Then modify this step 77 | # to build your code. 78 | # ℹ️ Command-line programs to run using the OS shell. 79 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 80 | - if: matrix.build-mode == 'manual' 81 | shell: bash 82 | run: | 83 | echo 'If you are using a "manual" build mode for one or more of the' \ 84 | 'languages you are analyzing, replace this with the commands to build' \ 85 | 'your code, for example:' 86 | echo ' make bootstrap' 87 | echo ' make release' 88 | exit 1 89 | 90 | - name: Perform CodeQL Analysis 91 | uses: github/codeql-action/analyze@v3 92 | with: 93 | category: "/language:${{matrix.language}}" 94 | -------------------------------------------------------------------------------- /.github/workflows/generator-generic-ossf-slsa3-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | # This workflow lets you generate SLSA provenance file for your project. 7 | # The generation satisfies level 3 for the provenance requirements - see https://slsa.dev/spec/v0.1/requirements 8 | # The project is an initiative of the OpenSSF (openssf.org) and is developed at 9 | # https://github.com/slsa-framework/slsa-github-generator. 10 | # The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier. 11 | # For more information about SLSA and how it improves the supply-chain, visit slsa.dev. 12 | 13 | name: SLSA generic generator 14 | on: 15 | workflow_dispatch: 16 | release: 17 | types: [created] 18 | 19 | jobs: 20 | build: 21 | runs-on: ubuntu-latest 22 | outputs: 23 | digests: ${{ steps.hash.outputs.digests }} 24 | 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | # ======================================================== 29 | # 30 | # Step 1: Build your artifacts. 31 | # 32 | # ======================================================== 33 | - name: Build artifacts 34 | run: | 35 | # These are some amazing artifacts. 36 | echo "artifact1" > artifact1 37 | echo "artifact2" > artifact2 38 | 39 | # ======================================================== 40 | # 41 | # Step 2: Add a step to generate the provenance subjects 42 | # as shown below. Update the sha256 sum arguments 43 | # to include all binaries that you generate 44 | # provenance for. 45 | # 46 | # ======================================================== 47 | - name: Generate subject for provenance 48 | id: hash 49 | run: | 50 | set -euo pipefail 51 | 52 | # List the artifacts the provenance will refer to. 53 | files=$(ls artifact*) 54 | # Generate the subjects (base64 encoded). 55 | echo "hashes=$(sha256sum $files | base64 -w0)" >> "${GITHUB_OUTPUT}" 56 | 57 | provenance: 58 | needs: [build] 59 | permissions: 60 | actions: read # To read the workflow path. 61 | id-token: write # To sign the provenance. 62 | contents: write # To add assets to a release. 63 | uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.4.0 64 | with: 65 | base64-subjects: "${{ needs.build.outputs.digests }}" 66 | upload-assets: true # Optional: Upload to a new release 67 | -------------------------------------------------------------------------------- /.github/workflows/google.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a docker container, publish it to Google Container Registry, and deploy it to GKE when there is a push to the "main" branch. 2 | # 3 | # To configure this workflow: 4 | # 5 | # 1. Ensure that your repository contains the necessary configuration for your Google Kubernetes Engine cluster, including deployment.yml, kustomization.yml, service.yml, etc. 6 | # 7 | # 2. Create and configure a Workload Identity Provider for GitHub (https://github.com/google-github-actions/auth#setting-up-workload-identity-federation) 8 | # 9 | # 3. Change the values for the GAR_LOCATION, GKE_ZONE, GKE_CLUSTER, IMAGE, REPOSITORY and DEPLOYMENT_NAME environment variables (below). 10 | # 11 | # For more support on how to run the workflow, please visit https://github.com/google-github-actions/setup-gcloud/tree/master/example-workflows/gke-kustomize 12 | 13 | name: Build and Deploy to GKE 14 | 15 | on: 16 | push: 17 | branches: [ "main" ] 18 | 19 | env: 20 | PROJECT_ID: ${{ secrets.GKE_PROJECT }} 21 | GAR_LOCATION: us-central1 # TODO: update region of the Artifact Registry 22 | GKE_CLUSTER: cluster-1 # TODO: update to cluster name 23 | GKE_ZONE: us-central1-c # TODO: update to cluster zone 24 | DEPLOYMENT_NAME: gke-test # TODO: update to deployment name 25 | REPOSITORY: samples # TODO: update to Artifact Registry docker repository 26 | IMAGE: static-site 27 | 28 | jobs: 29 | setup-build-publish-deploy: 30 | name: Setup, Build, Publish, and Deploy 31 | runs-on: ubuntu-latest 32 | environment: production 33 | 34 | permissions: 35 | contents: 'read' 36 | id-token: 'write' 37 | 38 | steps: 39 | - name: Checkout 40 | uses: actions/checkout@v4 41 | 42 | # Configure Workload Identity Federation and generate an access token. 43 | - id: 'auth' 44 | name: 'Authenticate to Google Cloud' 45 | uses: 'google-github-actions/auth@v0' 46 | with: 47 | token_format: 'access_token' 48 | workload_identity_provider: 'projects/123456789/locations/global/workloadIdentityPools/my-pool/providers/my-provider' 49 | service_account: 'my-service-account@my-project.iam.gserviceaccount.com' 50 | 51 | # Alternative option - authentication via credentials json 52 | # - id: 'auth' 53 | # uses: 'google-github-actions/auth@v0' 54 | # with: 55 | # credentials_json: '${{ secrets.GCP_CREDENTIALS }}' 56 | 57 | - name: Docker configuration 58 | run: |- 59 | echo ${{steps.auth.outputs.access_token}} | docker login -u oauth2accesstoken --password-stdin https://$GAR_LOCATION-docker.pkg.dev 60 | # Get the GKE credentials so we can deploy to the cluster 61 | - name: Set up GKE credentials 62 | uses: google-github-actions/get-gke-credentials@v0 63 | with: 64 | cluster_name: ${{ env.GKE_CLUSTER }} 65 | location: ${{ env.GKE_ZONE }} 66 | 67 | # Build the Docker image 68 | - name: Build 69 | run: |- 70 | docker build \ 71 | --tag "$GAR_LOCATION-docker.pkg.dev/$PROJECT_ID/$REPOSITORY/$IMAGE:$GITHUB_SHA" \ 72 | --build-arg GITHUB_SHA="$GITHUB_SHA" \ 73 | --build-arg GITHUB_REF="$GITHUB_REF" \ 74 | . 75 | # Push the Docker image to Google Artifact Registry 76 | - name: Publish 77 | run: |- 78 | docker push "$GAR_LOCATION-docker.pkg.dev/$PROJECT_ID/$REPOSITORY/$IMAGE:$GITHUB_SHA" 79 | # Set up kustomize 80 | - name: Set up Kustomize 81 | run: |- 82 | curl -sfLo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v3.1.0/kustomize_3.1.0_linux_amd64 83 | chmod u+x ./kustomize 84 | # Deploy the Docker image to the GKE cluster 85 | - name: Deploy 86 | run: |- 87 | # replacing the image name in the k8s template 88 | ./kustomize edit set image LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY/IMAGE:TAG=$GAR_LOCATION-docker.pkg.dev/$PROJECT_ID/$REPOSITORY/$IMAGE:$GITHUB_SHA 89 | ./kustomize build . | kubectl apply -f - 90 | kubectl rollout status deployment/$DEPLOYMENT_NAME 91 | kubectl get services -o wide 92 | -------------------------------------------------------------------------------- /.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [pull_request_target, issues] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | steps: 12 | - uses: actions/first-interaction@v1 13 | with: 14 | repo-token: ${{ secrets.GITHUB_TOKEN }} 15 | issue-message: "Message that will be displayed on users' first issue" 16 | pr-message: "Message that will be displayed on users' first pull request" 17 | -------------------------------------------------------------------------------- /.github/workflows/ibm.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a docker container, publish it to IBM Container Registry, and deploy it to IKS when there is a push to the "main" branch. 2 | # 3 | # To configure this workflow: 4 | # 5 | # 1. Ensure that your repository contains a Dockerfile 6 | # 2. Setup secrets in your repository by going to settings: Create ICR_NAMESPACE and IBM_CLOUD_API_KEY 7 | # 3. Change the values for the IBM_CLOUD_REGION, REGISTRY_HOSTNAME, IMAGE_NAME, IKS_CLUSTER, DEPLOYMENT_NAME, and PORT 8 | 9 | name: Build and Deploy to IKS 10 | 11 | on: 12 | push: 13 | branches: [ "main" ] 14 | 15 | # Environment variables available to all jobs and steps in this workflow 16 | env: 17 | GITHUB_SHA: ${{ github.sha }} 18 | IBM_CLOUD_API_KEY: ${{ secrets.IBM_CLOUD_API_KEY }} 19 | IBM_CLOUD_REGION: us-south 20 | ICR_NAMESPACE: ${{ secrets.ICR_NAMESPACE }} 21 | REGISTRY_HOSTNAME: us.icr.io 22 | IMAGE_NAME: iks-test 23 | IKS_CLUSTER: example-iks-cluster-name-or-id 24 | DEPLOYMENT_NAME: iks-test 25 | PORT: 5001 26 | 27 | jobs: 28 | setup-build-publish-deploy: 29 | name: Setup, Build, Publish, and Deploy 30 | runs-on: ubuntu-latest 31 | environment: production 32 | steps: 33 | 34 | - name: Checkout 35 | uses: actions/checkout@v4 36 | 37 | # Download and Install IBM Cloud CLI 38 | - name: Install IBM Cloud CLI 39 | run: | 40 | curl -fsSL https://clis.cloud.ibm.com/install/linux | sh 41 | ibmcloud --version 42 | ibmcloud config --check-version=false 43 | ibmcloud plugin install -f kubernetes-service 44 | ibmcloud plugin install -f container-registry 45 | 46 | # Authenticate with IBM Cloud CLI 47 | - name: Authenticate with IBM Cloud CLI 48 | run: | 49 | ibmcloud login --apikey "${IBM_CLOUD_API_KEY}" -r "${IBM_CLOUD_REGION}" -g default 50 | ibmcloud cr region-set "${IBM_CLOUD_REGION}" 51 | ibmcloud cr login 52 | 53 | # Build the Docker image 54 | - name: Build with Docker 55 | run: | 56 | docker build -t "$REGISTRY_HOSTNAME"/"$ICR_NAMESPACE"/"$IMAGE_NAME":"$GITHUB_SHA" \ 57 | --build-arg GITHUB_SHA="$GITHUB_SHA" \ 58 | --build-arg GITHUB_REF="$GITHUB_REF" . 59 | 60 | # Push the image to IBM Container Registry 61 | - name: Push the image to ICR 62 | run: | 63 | docker push $REGISTRY_HOSTNAME/$ICR_NAMESPACE/$IMAGE_NAME:$GITHUB_SHA 64 | 65 | # Deploy the Docker image to the IKS cluster 66 | - name: Deploy to IKS 67 | run: | 68 | ibmcloud ks cluster config --cluster $IKS_CLUSTER 69 | kubectl config current-context 70 | kubectl create deployment $DEPLOYMENT_NAME --image=$REGISTRY_HOSTNAME/$ICR_NAMESPACE/$IMAGE_NAME:$GITHUB_SHA --dry-run -o yaml > deployment.yaml 71 | kubectl apply -f deployment.yaml 72 | kubectl rollout status deployment/$DEPLOYMENT_NAME 73 | kubectl create service loadbalancer $DEPLOYMENT_NAME --tcp=80:$PORT --dry-run -o yaml > service.yaml 74 | kubectl apply -f service.yaml 75 | kubectl get services -o wide 76 | -------------------------------------------------------------------------------- /.github/workflows/jekyll.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | # Sample workflow for building and deploying a Jekyll site to GitHub Pages 7 | name: Deploy Jekyll site to Pages 8 | 9 | on: 10 | # Runs on pushes targeting the default branch 11 | push: 12 | branches: ["main"] 13 | 14 | # Allows you to run this workflow manually from the Actions tab 15 | workflow_dispatch: 16 | 17 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 18 | permissions: 19 | contents: read 20 | pages: write 21 | id-token: write 22 | 23 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 24 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 25 | concurrency: 26 | group: "pages" 27 | cancel-in-progress: false 28 | 29 | jobs: 30 | # Build job 31 | build: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Checkout 35 | uses: actions/checkout@v4 36 | - name: Setup Ruby 37 | uses: ruby/setup-ruby@8575951200e472d5f2d95c625da0c7bec8217c42 # v1.161.0 38 | with: 39 | ruby-version: '3.1' # Not needed with a .ruby-version file 40 | bundler-cache: true # runs 'bundle install' and caches installed gems automatically 41 | cache-version: 0 # Increment this number if you need to re-download cached gems 42 | - name: Setup Pages 43 | id: pages 44 | uses: actions/configure-pages@v5 45 | - name: Build with Jekyll 46 | # Outputs to the './_site' directory by default 47 | run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" 48 | env: 49 | JEKYLL_ENV: production 50 | - name: Upload artifact 51 | # Automatically uploads an artifact from the './_site' directory by default 52 | uses: actions/upload-pages-artifact@v3 53 | 54 | # Deployment job 55 | deploy: 56 | environment: 57 | name: github-pages 58 | url: ${{ steps.deployment.outputs.page_url }} 59 | runs-on: ubuntu-latest 60 | needs: build 61 | steps: 62 | - name: Deploy to GitHub Pages 63 | id: deployment 64 | uses: actions/deploy-pages@v4 65 | -------------------------------------------------------------------------------- /.github/workflows/objective-c-xcode.yml: -------------------------------------------------------------------------------- 1 | name: Xcode - Build and Analyze 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | build: 11 | name: Build and analyse default scheme using xcodebuild command 12 | runs-on: macos-latest 13 | 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | - name: Set Default Scheme 18 | run: | 19 | scheme_list=$(xcodebuild -list -json | tr -d "\n") 20 | default=$(echo $scheme_list | ruby -e "require 'json'; puts JSON.parse(STDIN.gets)['project']['targets'][0]") 21 | echo $default | cat >default 22 | echo Using default scheme: $default 23 | - name: Build 24 | env: 25 | scheme: ${{ 'default' }} 26 | run: | 27 | if [ $scheme = default ]; then scheme=$(cat default); fi 28 | if [ "`ls -A | grep -i \\.xcworkspace\$`" ]; then filetype_parameter="workspace" && file_to_build="`ls -A | grep -i \\.xcworkspace\$`"; else filetype_parameter="project" && file_to_build="`ls -A | grep -i \\.xcodeproj\$`"; fi 29 | file_to_build=`echo $file_to_build | awk '{$1=$1;print}'` 30 | xcodebuild clean build analyze -scheme "$scheme" -"$filetype_parameter" "$file_to_build" | xcpretty && exit ${PIPESTATUS[0]} 31 | -------------------------------------------------------------------------------- /.github/workflows/pylint.yml: -------------------------------------------------------------------------------- 1 | name: Pylint 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: ["3.8", "3.9", "3.10"] 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Set up Python ${{ matrix.python-version }} 14 | uses: actions/setup-python@v3 15 | with: 16 | python-version: ${{ matrix.python-version }} 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install pylint 21 | - name: Analysing the code with pylint 22 | run: | 23 | pylint $(git ls-files '*.py') 24 | -------------------------------------------------------------------------------- /.github/workflows/python-app.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Python application 5 | 6 | on: 7 | push: 8 | branches: [ "main" ] 9 | pull_request: 10 | branches: [ "main" ] 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | build: 17 | 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Python 3.10 23 | uses: actions/setup-python@v3 24 | with: 25 | python-version: "3.10" 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install flake8 pytest 30 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 31 | - name: Lint with flake8 32 | run: | 33 | # stop the build if there are Python syntax errors or undefined names 34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 37 | - name: Test with pytest 38 | run: | 39 | pytest 40 | -------------------------------------------------------------------------------- /.github/workflows/tencent.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a docker container, publish and deploy it to Tencent Kubernetes Engine (TKE) when there is a push to the "main" branch. 2 | # 3 | # To configure this workflow: 4 | # 5 | # 1. Ensure that your repository contains the necessary configuration for your Tencent Kubernetes Engine cluster, 6 | # including deployment.yml, kustomization.yml, service.yml, etc. 7 | # 8 | # 2. Set up secrets in your workspace: 9 | # - TENCENT_CLOUD_SECRET_ID with Tencent Cloud secret id 10 | # - TENCENT_CLOUD_SECRET_KEY with Tencent Cloud secret key 11 | # - TENCENT_CLOUD_ACCOUNT_ID with Tencent Cloud account id 12 | # - TKE_REGISTRY_PASSWORD with TKE registry password 13 | # 14 | # 3. Change the values for the TKE_IMAGE_URL, TKE_REGION, TKE_CLUSTER_ID and DEPLOYMENT_NAME environment variables (below). 15 | 16 | name: Tencent Kubernetes Engine 17 | 18 | on: 19 | push: 20 | branches: [ "main" ] 21 | 22 | # Environment variables available to all jobs and steps in this workflow 23 | env: 24 | TKE_IMAGE_URL: ccr.ccs.tencentyun.com/demo/mywebapp 25 | TKE_REGION: ap-guangzhou 26 | TKE_CLUSTER_ID: cls-mywebapp 27 | DEPLOYMENT_NAME: tke-test 28 | 29 | permissions: 30 | contents: read 31 | 32 | jobs: 33 | setup-build-publish-deploy: 34 | name: Setup, Build, Publish, and Deploy 35 | runs-on: ubuntu-latest 36 | environment: production 37 | steps: 38 | 39 | - name: Checkout 40 | uses: actions/checkout@v4 41 | 42 | # Build 43 | - name: Build Docker image 44 | run: | 45 | docker build -t ${TKE_IMAGE_URL}:${GITHUB_SHA} . 46 | 47 | - name: Login TKE Registry 48 | run: | 49 | docker login -u ${{ secrets.TENCENT_CLOUD_ACCOUNT_ID }} -p '${{ secrets.TKE_REGISTRY_PASSWORD }}' ${TKE_IMAGE_URL} 50 | 51 | # Push the Docker image to TKE Registry 52 | - name: Publish 53 | run: | 54 | docker push ${TKE_IMAGE_URL}:${GITHUB_SHA} 55 | 56 | - name: Set up Kustomize 57 | run: | 58 | curl -o kustomize --location https://github.com/kubernetes-sigs/kustomize/releases/download/v3.1.0/kustomize_3.1.0_linux_amd64 59 | chmod u+x ./kustomize 60 | 61 | - name: Set up ~/.kube/config for connecting TKE cluster 62 | uses: TencentCloud/tke-cluster-credential-action@v1 63 | with: 64 | secret_id: ${{ secrets.TENCENT_CLOUD_SECRET_ID }} 65 | secret_key: ${{ secrets.TENCENT_CLOUD_SECRET_KEY }} 66 | tke_region: ${{ env.TKE_REGION }} 67 | cluster_id: ${{ env.TKE_CLUSTER_ID }} 68 | 69 | - name: Switch to TKE context 70 | run: | 71 | kubectl config use-context ${TKE_CLUSTER_ID}-context-default 72 | 73 | # Deploy the Docker image to the TKE cluster 74 | - name: Deploy 75 | run: | 76 | ./kustomize edit set image ${TKE_IMAGE_URL}:${GITHUB_SHA} 77 | ./kustomize build . | kubectl apply -f - 78 | kubectl rollout status deployment/${DEPLOYMENT_NAME} 79 | kubectl get services -o wide 80 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Avoid including Experiment files: they can be created and edited locally to test the ipf files 2 | *.pxp 3 | *.pxt 4 | *.uxp 5 | *.uxt 6 | -------------------------------------------------------------------------------- /.whitesource: -------------------------------------------------------------------------------- 1 | { 2 | "scanSettings": { 3 | "baseBranches": [] 4 | }, 5 | "checkRunSettings": { 6 | "vulnerableCheckRunConclusionLevel": "failure", 7 | "displayMode": "diff", 8 | "useMendCheckNames": true 9 | }, 10 | "issueSettings": { 11 | "minSeverityLevel": "LOW", 12 | "issueType": "DEPENDENCY" 13 | } 14 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 KOSASIH 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | PiOS License 2 | 3 | Copyright (C) 2024 KOSASIH 4 | 5 | Permission is hereby granted by the application software developer (“Software Developer”), free 6 | of charge, to any person obtaining a copy of this application, software and associated 7 | documentation files (the “Software”), which was developed by the Software Developer for use on 8 | Pi Network, whereby the purpose of this license is to permit the development of derivative works 9 | based on the Software, including the right to use, copy, modify, merge, publish, distribute, 10 | sub-license, and/or sell copies of such derivative works and any Software components incorporated 11 | therein, and to permit persons to whom such derivative works are furnished to do so, in each case, 12 | solely to develop, use and market applications for the official Pi Network. For purposes of this 13 | license, Pi Network shall mean any application, software, or other present or future platform 14 | developed, owned or managed by Pi Community Company, and its parents, affiliates or subsidiaries, 15 | for which the Software was developed, or on which the Software continues to operate. However, 16 | you are prohibited from using any portion of the Software or any derivative works thereof in any 17 | manner (a) which infringes on any Pi Network intellectual property rights, (b) to hack any of Pi 18 | Network’s systems or processes or (c) to develop any product or service which is competitive with 19 | the Pi Network. 20 | 21 | The above copyright notice and this permission notice shall be included in all copies or 22 | substantial portions of the Software. 23 | 24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 25 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE 26 | AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, PUBLISHERS, OR COPYRIGHT HOLDERS OF THIS 27 | SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL 28 | DAMAGES (INCLUDING, BUT NOT LIMITED TO BUSINESS INTERRUPTION, LOSS OF USE, DATA OR PROFITS) 29 | HOWEVER CAUSED AND UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 30 | TORT (INCLUDING NEGLIGENCE) ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE 31 | OR OTHER DEALINGS IN THE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 32 | 33 | Pi, Pi Network and the Pi logo are trademarks of the Pi Community Company. 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NeuralMesh-Pi-Network 2 | 3 | The NeuralMesh-Pi-Network repository is a comprehensive resource for developers interested in building the next-generation Pi network. The repository includes detailed documentation, source code, and tools for building and deploying the NeuralMesh Pi network. 4 | 5 | # *NeuralMesh-Pi-Network* : NeuralMesh-Pi-Network is a machine learning project that uses a neural network to generate 3D meshes for 3D printing. The project is designed to run on a Raspberry Pi, making it accessible and affordable for hobbyists and educators. 6 | 7 | # Overview 8 | 9 | NeuralMesh-Pi-Network uses a convolutional neural network (CNN) to generate 3D meshes from 2D images. The network is trained on a dataset of 2D images and corresponding 3D meshes. Once trained, the network can generate 3D meshes for new 2D images. 10 | 11 | The project includes a Raspberry Pi image with the neural network and supporting software pre-installed. The image can be written to an SD card and used to boot the Raspberry Pi. Once booted, the user can use a web interface to upload 2D images and generate 3D meshes. 12 | 13 | # Requirements 14 | 15 | - Raspberry Pi 4 (4GB or 8GB recommended) 16 | - SD card (32GB or larger recommended) 17 | - Power supply 18 | - Monitor, keyboard, and mouse (for initial setup) 19 | - 3D printer (for generating physical objects from the 3D meshes) 20 | 21 | # Installation 22 | 23 | 1. Download the NeuralMesh-Pi-Network image from the GitHub repository. 24 | 2. Write the image to an SD card using a tool such as Etcher. 25 | 3. Insert the SD card into the Raspberry Pi and power it on. 26 | 4. Wait for the system to boot and the web interface to load. 27 | 5. Use the web interface to upload 2D images and generate 3D meshes. 28 | 29 | # Usage 30 | 31 | 1. Open a web browser and navigate to the NeuralMesh-Pi-Network web interface. 32 | 2. Click the "Choose File" button and select a 2D image. 33 | 3. Click the "Generate Mesh" button to generate a 3D mesh. 34 | 4. Download the 3D mesh as an STL file. 35 | 5. Use a 3D printer to print the object. 36 | 37 | ## Training 38 | 39 | The neural network can be trained on a larger dataset of 2D images and corresponding 3D meshes. The training process can be run on a more powerful computer and the trained model can be transferred to the Raspberry Pi. 40 | 41 | 1. Collect a dataset of 2D images and corresponding 3D meshes. 42 | 2. Preprocess the dataset and split it into training, validation, and test sets. 43 | 3. Train the neural network on the training set. 44 | 4. Evaluate the neural network on the validation set. 45 | 5. Save the trained model. 46 | 6. Transfer the trained model to the Raspberry Pi. 47 | 48 | # Contributing 49 | 50 | Contributions to NeuralMesh-Pi-Network are welcome! If you have an idea for a new feature or have found a bug, please open an issue or submit a pull request. 51 | 52 | # License 53 | 54 | NeuralMesh-Pi-Network is released under the MIT License. See the LICENSE file for details. 55 | 56 | # Acknowledgments 57 | 58 | NeuralMesh-Pi-Network was inspired by the work of NVIDIA Research and Google Research. The project was developed as part of the Raspberry Pi Foundation's Pi Wars competition. 59 | 60 | # Contact 61 | 62 | For questions or comments, please contact info@neuralmesh_pi_network.com. 63 | 64 | This package uses Flask and Factory Boy to provide a web interface for uploading images and generating 3D meshes. It also uses pytest for testing. 65 | 66 | ## Here's a simple example of how to use the package: 67 | 68 | ```python 69 | 1. from neuralmesh_pi_network import create_app 70 | 2. 71 | 3. app = create_app() 72 | 4. 73 | 5. if __name__ == "__main__": 74 | 6. app.run(debug=True) 75 | ``` 76 | In this example, create_app is a function that initializes the Flask application and Factory Boy factory. The application is then run in debug mode. 77 | 78 | ## To test the package, you can use pytest: 79 | 80 | ```bash 81 | 1. $ pytest neuralmesh_pi_network 82 | ``` 83 | 84 | This command will run all the tests in the neuralmesh_pi_network package. 85 | 86 | Note: You will need to install the necessary dependencies using pip: 87 | 88 | ```bash 89 | 1. $ pip install pytest flask factory-boy pytest-flask pytest-factoryboy 90 | ``` 91 | 92 | ## To train the neural network on a new dataset, you can use the following code: 93 | 94 | ```python 95 | 1. from neuralmesh_pi_network import train_model 96 | 2. 97 | 3. # Assuming 'dataset' is a preprocessed dataset and 'output_model_path' is the path to save the trained model 98 | 4. model = train_model(dataset, output_model_path) 99 | ``` 100 | 101 | This function will train the neural network on the given dataset and save the trained model to the specified output path. 102 | -------------------------------------------------------------------------------- /docs/api.md: -------------------------------------------------------------------------------- 1 | # api.md 2 | 3 | NeuralMesh-Pi-Network provides a REST API for generating 3D meshes from 2D images. 4 | 5 | ## Here are the available API endpoints: 6 | 7 | - `POST /predict`: Generates a 3D mesh from a 2D image. 8 | 9 | The `POST /predict` endpoint accepts a POST request with an image field containing the 2D image. 10 | 11 | The response contains a mesh field with the generated 3D mesh in STL format. 12 | 13 | Here is an example request using curl: 14 | 15 | ```bash 16 | 1. $ curl -X POST -F image=@path/to/image.png http://localhost:5000/predict 17 | ``` 18 | 19 | The response will contain the generated 3D mesh in STL format. 20 | 21 | Please note that the `POST /predict` endpoint requires authentication. To authenticate, you can use the Authorization header with a valid API key. 22 | 23 | To obtain an API key, please contact the NeuralMesh-Pi-Network team. 24 | 25 | For more information on the API, please refer to the OpenAPI specification in the docs directory. 26 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | # configuration.md 2 | 3 | To configure NeuralMesh-Pi-Network, you can modify the config.py file in the project directory. 4 | 5 | ## Here are the available configuration options: 6 | 7 | 1. UPLOAD_FOLDER: The path to the folder where uploaded images are stored. 8 | 2. ALLOWED_EXTENSIONS: The allowed file extensions for uploaded images. 9 | 3. MAX_CONTENT_LENGTH: The maximum size of uploaded images in bytes. 10 | 4. MODEL_PATH: The path to the trained neural network model. 11 | 5. PREDICTION_THRESHOLD: The prediction threshold for generating 3D meshes. 12 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | installation.md To install NeuralMesh-Pi-Network, follow these steps: 2 | 3 | 1. Clone the NeuralMesh-Pi-Network repository from GitHub. 4 | 5 | ```bash 6 | 1. $ git clone https://github.com/KOSASIH/NeuralMesh-Pi-Network.git 7 | ``` 8 | 9 | 2. Change into the project directory. 10 | 11 | ```bash 12 | 1. $ cd NeuralMesh-Pi-Network 13 | ``` 14 | 15 | 3. Install the required dependencies using pip. 16 | 17 | ```bash 18 | 1. $ pip install -r requirements.txt 19 | ``` 20 | 21 | 3. Run the Flask application using the provided code example. 22 | 23 | ```bash 24 | 1. $ python -m neuralmesh_pi_network 25 | ``` 26 | 27 | This will start the Flask web server and open the web interface in your default web browser. 28 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | # usage.md 2 | 3 | To use NeuralMesh-Pi-Network, you can run the Flask application using the provided code example. 4 | 5 | ```bash 6 | 1. $ python -m neuralmesh_pi_network 7 | ``` 8 | 9 | This will start the Flask web server and open the web interface in your default web browser. 10 | 11 | From the web interface, you can upload a 2D image and generate a 3D mesh. The generated 3D mesh will be displayed in the web interface and can be downloaded as an STL file. 12 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.2.5 2 | Flask-SQLAlchemy==2.5.1 3 | Flask-Migrate==3.1.0 4 | Flask-Script==2.0.6 5 | Flask-Testing==0.12.1 6 | psycopg2==2.9.1 7 | SQLAlchemy==1.4.15 8 | 9 | pytest==6.2.4 10 | pytest-cov==2.12.1 11 | pytest-flask==1.2.0 12 | pytest-factoryboy==2.4.1 13 | factory-boy==3.2.0 14 | -------------------------------------------------------------------------------- /scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set environment variables 4 | export APP_NAME=myapp 5 | export APP_VERSION=1.0.0 6 | export APP_PORT=8080 7 | export APP_HOST=0.0.0.0 8 | export APP_USER=myuser 9 | export APP_GROUP=mygroup 10 | export APP_DIR=/var/www/$APP_NAME 11 | export APP_LOG_DIR=$APP_DIR/logs 12 | export APP_PID_FILE=$APP_DIR/run/$APP_NAME.pid 13 | export APP_ENV=production 14 | 15 | # Install dependencies 16 | sudo apt-get update 17 | sudo apt-get install -y python3 python3-pip 18 | sudo pip3 install -r requirements.txt 19 | 20 | # Create user and group 21 | sudo groupadd $APP_GROUP 22 | sudo useradd -g $APP_GROUP $APP_USER 23 | 24 | # Create directories 25 | sudo mkdir -p $APP_DIR $APP_LOG_DIR $APP_DIR/run 26 | 27 | # Copy files 28 | sudo cp -r src $APP_DIR 29 | sudo cp scripts/start.sh $APP_DIR 30 | sudo cp scripts/stop.sh $APP_DIR 31 | sudo cp scripts/restart.sh $APP_DIR 32 | sudo cp scripts/status.sh $APP_DIR 33 | sudo cp scripts/logs.sh $APP_DIR 34 | 35 | # Set permissions 36 | sudo chown -R $APP_USER:$APP_GROUP $APP_DIR 37 | sudo chmod 755 $APP_DIR/scripts/*.sh 38 | 39 | # Create systemd service 40 | sudo tee /etc/systemd/system/$APP_NAME.service < $APP_LOG_DIR/app.log 2>&1 & 9 | echo $! > $APP_PID_FILE 10 | -------------------------------------------------------------------------------- /src/ai/monitoring.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.ensemble import RandomForestClassifier 4 | 5 | class NetworkMonitor: 6 | def __init__(self, num_nodes, num_edges): 7 | self.num_nodes = num_nodes 8 | self.num_edges = num_edges 9 | 10 | def detect_anomalies(self, X, y): 11 | # Detect anomalies using isolation forest 12 | clf = RandomForestClassifier(n_estimators=100) 13 | clf.fit(X) 14 | anomalies = clf.predict(X) 15 | return anomalies 16 | 17 | def predict_node_failure(self, X, y): 18 | # Predict node failure using gradient boosting 19 | from sklearn.ensemble import GradientBoostingClassifier 20 | clf = GradientBoostingClassifier(n_estimators=100) 21 | clf.fit(X, y) 22 | proba = clf.predict_proba(X) 23 | return proba[:, 1] 24 | 25 | def predict_edge_failure(self, X, y): 26 | # Predict edge failure using logistic regression 27 | from sklearn.linear_model import LogisticRegression 28 | clf = LogisticRegression() 29 | clf.fit(X, y) 30 | proba = clf.predict_proba(X) 31 | return proba[:, 1] 32 | 33 | def visualize_network(self, X, y): 34 | # Visualize the network using matplotlib 35 | import matplotlib.pyplot as plt 36 | G = nx.Graph() 37 | G.add_nodes_from(range(self.num_nodes)) 38 | G.add_edges_from([(i, j) for i, j in zip(X[:, 0], X[:, 1]) if y[i] == 1]) 39 | pos = nx.spring_layout(G) 40 | nx.draw_networkx_nodes(G, pos, node_color='lightblue') 41 | nx.draw_networkx_edges(G, pos, edge_color='gray') 42 | nx.draw_networkx_nodes(G, pos, nodelist=[i for i, j in enumerate(y) if j == 1], node_color='red') 43 | plt.show() 44 | -------------------------------------------------------------------------------- /src/ai/network.py: -------------------------------------------------------------------------------- 1 | **network.py** 2 | ```python 3 | import numpy as np 4 | import tensorflow as tf 5 | from tensorflow import keras 6 | from sklearn.ensemble import RandomForestClassifier 7 | 8 | class NeuralNetworkManager: 9 | def __init__(self, num_nodes, num_edges): 10 | self.num_nodes = num_nodes 11 | self.num_edges = num_edges 12 | self.graph = self.create_graph() 13 | self.model = self.create_model() 14 | 15 | def create_graph(self): 16 | # Create a graph with nodes and edges using NetworkX 17 | import networkx as nx 18 | G = nx.Graph() 19 | G.add_nodes_from(range(self.num_nodes)) 20 | G.add_edges_from([(i, i+1) for i in range(self.num_nodes-1)]) 21 | return G 22 | 23 | def create_model(self): 24 | # Create a deep neural network model using Keras 25 | model = keras.Sequential([ 26 | keras.layers.Dense(64, activation='relu', input_shape=(self.num_nodes,)), 27 | keras.layers.Dense(32, activation='relu'), 28 | keras.layers.Dense(1, activation='sigmoid') 29 | ]) 30 | model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) 31 | return model 32 | 33 | def train_model(self, X, y): 34 | # Train the model using the training data 35 | self.model.fit(X, y, epochs=10, batch_size=32, verbose=0) 36 | 37 | def predict(self, X): 38 | # Make predictions using the trained model 39 | return self.model.predict(X) 40 | 41 | def optimize_network(self, X, y): 42 | # Optimize the network using reinforcement learning 43 | import gym 44 | env = gym.make('NeuralMesh-v0') 45 | agent = keras.layers.RandomNormal(stddev=0.1) 46 | for episode in range(10): 47 | state = env.reset() 48 | done = False 49 | rewards = 0 50 | while not done: 51 | action = agent(state) 52 | next_state, reward, done, _ = env.step(action) 53 | rewards += reward 54 | state = next_state 55 | print(f'Episode {episode+1}, Reward: {rewards}') 56 | 57 | def monitor_network(self, X, y): 58 | # Monitor the network using anomaly detection 59 | from sklearn.ensemble import IsolationForest 60 | clf = IsolationForest(contamination=0.1) 61 | clf.fit(X) 62 | anomalies = clf.predict(X) 63 | return anomalies 64 | -------------------------------------------------------------------------------- /src/ai/optimization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.optimize as optimize 3 | 4 | class NetworkOptimizer: 5 | def __init__(self, num_nodes, num_edges): 6 | self.num_nodes = num_nodes 7 | self.num_edges = num_edges 8 | 9 | def optimize_topology(self, X, y): 10 | # Optimize the network topology using genetic algorithms 11 | import pyomo.environ as pe 12 | model = pe.ConcreteModel() 13 | model.x = pe.Var(range(self.num_nodes), range(self.num_nodes), within=pe.Binary) 14 | model.obj = pe.Objective(expr=sum([X[i]*y[i] for i in range(self.num_nodes)]), sense=pe.maximize) 15 | model.c1 = pe.ConstraintList() 16 | for i in range(self.num_nodes): 17 | model.c1.add(sum([model.x[i, j] for j in range(self.num_nodes)]) == 1) 18 | model.c2 = pe.ConstraintList() 19 | for i in range(self.num_nodes): 20 | model.c2.add(sum([model.x[j, i] for j in range(self.num_nodes)]) == 1) 21 | solver = pe.SolverFactory('glpk') 22 | results = solver.solve(model) 23 | return results 24 | 25 | def optimize_routing(self, X, y): 26 | # Optimize the network routing using linear programming 27 | import cvxpy as cp 28 | x = cp.Variable((self.num_nodes, self.num_nodes), boolean=True) 29 | obj = cp.Maximize(cp.sum(cp.multiply(X, y))) 30 | constraints = [cp.sum(x, axis=0) == 1, cp.sum(x, axis=1) == 1] 31 | prob = cp.Problem(obj, constraints) 32 | prob.solve() 33 | return x.value 34 | -------------------------------------------------------------------------------- /src/blockchain/block.py: -------------------------------------------------------------------------------- 1 | **block.py** 2 | ```python 3 | import hashlib 4 | import time 5 | from cryptography.hazmat.primitives import serialization 6 | from cryptography.hazmat.primitives.asymmetric import rsa 7 | from cryptography.hazmat.primitives import hashes 8 | from cryptography.hazmat.primitives.asymmetric import padding 9 | 10 | class Block: 11 | def __init__(self, index, previous_hash, transactions, timestamp, nonce): 12 | self.index = index 13 | self.previous_hash = previous_hash 14 | self.transactions = transactions 15 | self.timestamp = timestamp 16 | self.nonce = nonce 17 | self.hash = self.calculate_hash() 18 | 19 | def calculate_hash(self): 20 | data_string = str(self.index) + self.previous_hash + str(self.transactions) + str(self.timestamp) + str(self.nonce) 21 | return hashlib.sha256(data_string.encode()).hexdigest() 22 | 23 | def sign_block(self, private_key): 24 | signer = serialization.load_pem_private_key(private_key, password=None) 25 | signature = signer.sign(self.hash.encode(), padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256()) 26 | return signature 27 | 28 | def verify_block(self, public_key, signature): 29 | verifier = serialization.load_pem_public_key(public_key) 30 | verifier.verify(signature, self.hash.encode(), padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256()) 31 | return True 32 | -------------------------------------------------------------------------------- /src/blockchain/chain.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from block import Block 3 | 4 | class Blockchain: 5 | def __init__(self): 6 | self.chain = [self.create_genesis_block()] 7 | self.pending_transactions = [] 8 | self.mining_reward = 10 9 | 10 | def create_genesis_block(self): 11 | return Block(0, "0", [], int(time.time()), 0) 12 | 13 | def get_latest_block(self): 14 | return self.chain[-1] 15 | 16 | def add_transaction(self, transaction): 17 | self.pending_transactions.append(transaction) 18 | 19 | def mine_pending_transactions(self, miner_address): 20 | if len(self.pending_transactions) < 1: 21 | return False 22 | new_block = Block(len(self.chain), self.get_latest_block().hash, self.pending_transactions, int(time.time()), 0) 23 | new_block.sign_block(miner_address) 24 | self.chain.append(new_block) 25 | self.pending_transactions = [] 26 | return True 27 | 28 | def get_balance_of_address(self, address): 29 | balance = 0 30 | for block in self.chain: 31 | for transaction in block.transactions: 32 | if transaction['from'] == address: 33 | balance -= transaction['amount'] 34 | elif transaction['to'] == address: 35 | balance += transaction['amount'] 36 | return balance 37 | 38 | def is_chain_valid(self): 39 | for i in range(1, len(self.chain)): 40 | current_block = self.chain[i] 41 | previous_block = self.chain[i - 1] 42 | if current_block.hash != current_block.calculate_hash(): 43 | return False 44 | if current_block.previous_hash != previous_block.hash: 45 | return False 46 | return True 47 | -------------------------------------------------------------------------------- /src/blockchain/transaction.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from cryptography.hazmat.primitives import serialization 3 | from cryptography.hazmat.primitives.asymmetric import rsa 4 | from cryptography.hazmat.primitives import hashes 5 | from cryptography.hazmat.primitives.asymmetric import padding 6 | 7 | class Transaction: 8 | def __init__(self, from_address, to_address, amount): 9 | self.from_address = from_address 10 | self.to_address = to_address 11 | self.amount = amount 12 | self.timestamp = int(time.time()) 13 | self.hash = self.calculate_hash() 14 | 15 | def calculate_hash(self): 16 | data_string = self.from_address + self.to_address + str(self.amount) + str(self.timestamp) 17 | return hashlib.sha256(data_string.encode()).hexdigest() 18 | 19 | def sign_transaction(self, private_key): 20 | signer = serialization.load_pem_private_key(private_key, password=None) 21 | signature = signer.sign(self.hash.encode(), padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256()) 22 | return signature 23 | 24 | def verify_transaction(self, public_key, signature): 25 | verifier = serialization.load_pem_public_key(public_key) 26 | verifier.verify(signature, self.hash.encode(), padding.PSS(mgf=padding.MGF1(algorithm=hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256()) 27 | return True 28 | -------------------------------------------------------------------------------- /src/encryption/asymmetric.py: -------------------------------------------------------------------------------- 1 | import os 2 | import base64 3 | from cryptography.hazmat.primitives import serialization 4 | from cryptography.hazmat.primitives.asymmetric import rsa 5 | 6 | def generate_key_pair(): 7 | private_key = rsa.generate_private_key( 8 | public_exponent=65537, 9 | key_size=2048 10 | ) 11 | public_key = private_key.public_key() 12 | return private_key, public_key 13 | 14 | def encrypt_data(public_key, data): 15 | encrypted_data = public_key.encrypt( 16 | data, 17 | padding.OAEP( 18 | mgf=padding.MGF1(algorithm=hashes.SHA256()), 19 | algorithm=hashes.SHA256(), 20 | label=None 21 | ) 22 | ) 23 | return encrypted_data 24 | 25 | def decrypt_data(private_key, encrypted_data): 26 | decrypted_data = private_key.decrypt( 27 | encrypted_data, 28 | padding.OAEP( 29 | mgf=padding.MGF1(algorithm=hashes.SHA256()), 30 | algorithm=hashes.SHA256(), 31 | label=None 32 | ) 33 | ) 34 | return decrypted_data 35 | 36 | def encrypt_file(public_key, file_path): 37 | with open(file_path, 'rb') as file: 38 | data = file.read() 39 | encrypted_data = public_key.encrypt( 40 | data, 41 | padding.OAEP( 42 | mgf=padding.MGF1(algorithm=hashes.SHA256()), 43 | algorithm=hashes.SHA256(), 44 | label=None 45 | ) 46 | ) 47 | with open(file_path, 'wb') as file: 48 | file.write(encrypted_data) 49 | 50 | def decrypt_file(private_key, file_path): 51 | with open(file_path, 'rb') as file: 52 | encrypted_data = file.read() 53 | decrypted_data = private_key.decrypt( 54 | encrypted_data, 55 | padding.OAEP( 56 | mgf=padding.MGF1(algorithm=hashes.SHA256()), 57 | algorithm=hashes.SHA256(), 58 | label=None 59 | ) 60 | ) 61 | with open(file_path, 'wb') as file: 62 | file.write(decrypted_data) 63 | -------------------------------------------------------------------------------- /src/encryption/symmetric.py: -------------------------------------------------------------------------------- 1 | **symmetric.py** 2 | ```python 3 | import os 4 | import base64 5 | from cryptography.fernet import Fernet 6 | 7 | def generate_key(): 8 | return Fernet.generate_key() 9 | 10 | def encrypt_data(key, data): 11 | f = Fernet(key) 12 | encrypted_data = f.encrypt(data.encode()) 13 | return encrypted_data 14 | 15 | def decrypt_data(key, encrypted_data): 16 | f = Fernet(key) 17 | decrypted_data = f.decrypt(encrypted_data) 18 | return decrypted_data 19 | 20 | def encrypt_file(key, file_path): 21 | with open(file_path, 'rb') as file: 22 | data = file.read() 23 | encrypted_data = encrypt_data(key, data) 24 | with open(file_path, 'wb') as file: 25 | file.write(encrypted_data) 26 | 27 | def decrypt_file(key, file_path): 28 | with open(file_path, 'rb') as file: 29 | encrypted_data = file.read() 30 | decrypted_data = decrypt_data(key, encrypted_data) 31 | with open(file_path, 'wb') as file: 32 | file.write(decrypted_data) 33 | -------------------------------------------------------------------------------- /src/ml/monitoring.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from sklearn.metrics import accuracy_score 4 | 5 | class AnomalyDetector: 6 | def __init__(self, model, threshold=0.5): 7 | self.model = model 8 | self.threshold = threshold 9 | 10 | def detect_anomalies(self, X): 11 | y_pred = self.model.predict(X) 12 | anomalies = np.where(y_pred < self.threshold)[0] 13 | return anomalies 14 | 15 | class PerformanceMonitor: 16 | def __init__(self, model): 17 | self.model = model 18 | self.accuracy_history = [] 19 | 20 | def monitor_performance(self, X, y): 21 | y_pred = self.model.predict(X) 22 | accuracy = accuracy_score(y, np.argmax(y_pred, axis=1)) 23 | self.accuracy_history.append(accuracy) 24 | return accuracy 25 | -------------------------------------------------------------------------------- /src/ml/network.py: -------------------------------------------------------------------------------- 1 | **network.py** 2 | ```python 3 | import numpy as np 4 | import tensorflow as tf 5 | from tensorflow import keras 6 | from sklearn.model_selection import train_test_split 7 | 8 | class NeuralNetwork: 9 | def __init__(self, input_shape, num_classes): 10 | self.model = keras.Sequential([ 11 | keras.layers.Dense(64, activation='relu', input_shape=input_shape), 12 | keras.layers.Dense(32, activation='relu'), 13 | keras.layers.Dense(num_classes, activation='softmax') 14 | ]) 15 | self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 16 | 17 | def train(self, X, y, epochs=10, batch_size=32): 18 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 19 | self.model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, y_test)) 20 | 21 | def predict(self, X): 22 | return self.model.predict(X) 23 | 24 | class ConvolutionalNeuralNetwork: 25 | def __init__(self, input_shape, num_classes): 26 | self.model = keras.Sequential([ 27 | keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), 28 | keras.layers.MaxPooling2D((2, 2)), 29 | keras.layers.Flatten(), 30 | keras.layers.Dense(64, activation='relu'), 31 | keras.layers.Dense(num_classes, activation='softmax') 32 | ]) 33 | self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 34 | 35 | def train(self, X, y, epochs=10, batch_size=32): 36 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 37 | self.model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, y_test)) 38 | 39 | def predict(self, X): 40 | return self.model.predict(X) 41 | -------------------------------------------------------------------------------- /src/ml/optimization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | 5 | class GradientDescentOptimizer: 6 | def __init__(self, learning_rate=0.01): 7 | self.learning_rate = learning_rate 8 | 9 | def optimize(self, model, X, y): 10 | with tf.GradientTape() as tape: 11 | y_pred = model(X) 12 | loss = keras.losses.categorical_crossentropy(y, y_pred) 13 | gradients = tape.gradient(loss, model.trainable_variables) 14 | for gradient, variable in zip(gradients, model.trainable_variables): 15 | variable.assign_sub(gradient * self.learning_rate) 16 | 17 | class AdamOptimizer: 18 | def __init__(self, learning_rate=0.01, beta1=0.9, beta2=0.999): 19 | self.learning_rate = learning_rate 20 | self.beta1 = beta1 21 | self.beta2 = beta2 22 | self.m = None 23 | self.v = None 24 | 25 | def optimize(self, model, X, y): 26 | with tf.GradientTape() as tape: 27 | y_pred = model(X) 28 | loss = keras.losses.categorical_crossentropy(y, y_pred) 29 | gradients = tape.gradient(loss, model.trainable_variables) 30 | if self.m is None: 31 | self.m = [tf.zeros_like(grad) for grad in gradients] 32 | self.v = [tf.zeros_like(grad) for grad in gradients] 33 | for i, (gradient, variable) in enumerate(zip(gradients, model.trainable_variables)): 34 | self.m[i] = self.beta1 * self.m[i] + (1 - self.beta1) * gradient 35 | self.v[i] = self.beta2 * self.v[i] + (1 - self.beta2) * tf.square(gradient) 36 | variable.assign_sub(self.learning_rate * self.m[i] / (tf.sqrt(self.v[i]) + 1e-8)) 37 | -------------------------------------------------------------------------------- /src/network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.metrics import accuracy_score, classification_report, confusion_matrix 6 | 7 | class NeuralNetwork: 8 | def __init__(self, input_shape, num_classes, architecture='dense'): 9 | self.input_shape = input_shape 10 | self.num_classes = num_classes 11 | self.architecture = architecture 12 | self.model = self.build_model() 13 | 14 | def build_model(self): 15 | if self.architecture == 'dense': 16 | model = keras.Sequential([ 17 | keras.layers.Dense(64, activation='relu', input_shape=self.input_shape), 18 | keras.layers.Dense(32, activation='relu'), 19 | keras.layers.Dense(self.num_classes, activation='softmax') 20 | ]) 21 | elif self.architecture == 'conv': 22 | model = keras.Sequential([ 23 | keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=self.input_shape), 24 | keras.layers.MaxPooling2D((2, 2)), 25 | keras.layers.Flatten(), 26 | keras.layers.Dense(64, activation='relu'), 27 | keras.layers.Dense(self.num_classes, activation='softmax') 28 | ]) 29 | else: 30 | raise ValueError('Invalid architecture') 31 | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 32 | return model 33 | 34 | def train(self, X, y, epochs=10, batch_size=32, validation_split=0.2): 35 | X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=validation_split, random_state=42) 36 | self.model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_val, y_val)) 37 | 38 | def predict(self, X): 39 | return self.model.predict(X) 40 | 41 | def evaluate(self, X, y): 42 | y_pred = self.model.predict(X) 43 | y_pred_class = np.argmax(y_pred, axis=1) 44 | accuracy = accuracy_score(y, y_pred_class) 45 | report = classification_report(y, y_pred_class) 46 | matrix = confusion_matrix(y, y_pred_class) 47 | return accuracy, report, matrix 48 | 49 | class NeuralNetworkEnsemble: 50 | def __init__(self, input_shape, num_classes, num_models=5): 51 | self.input_shape = input_shape 52 | self.num_classes = num_classes 53 | self.num_models = num_models 54 | self.models = [NeuralNetwork(input_shape, num_classes) for _ in range(num_models)] 55 | 56 | def train(self, X, y, epochs=10, batch_size=32, validation_split=0.2): 57 | for model in self.models: 58 | model.train(X, y, epochs=epochs, batch_size=batch_size, validation_split=validation_split) 59 | 60 | def predict(self, X): 61 | predictions = [model.predict(X) for model in self.models] 62 | return np.mean(predictions, axis=0) 63 | 64 | def evaluate(self, X, y): 65 | predictions = self.predict(X) 66 | y_pred_class = np.argmax(predictions, axis=1) 67 | accuracy = accuracy_score(y, y_pred_class) 68 | report = classification_report(y, y_pred_class) 69 | matrix = confusion_matrix(y, y_pred_class) 70 | return accuracy, report, matrix 71 | 72 | class NeuralNetworkStacking: 73 | def __init__(self, input_shape, num_classes, num_models=5): 74 | self.input_shape = input_shape 75 | self.num_classes = num_classes 76 | self.num_models = num_models 77 | self.models = [NeuralNetwork(input_shape, num_classes) for _ in range(num_models)] 78 | self.stacking_model = NeuralNetwork(input_shape, num_classes) 79 | 80 | def train(self, X, y, epochs=10, batch_size=32, validation_split=0.2): 81 | for model in self.models: 82 | model.train(X, y, epochs=epochs, batch_size=batch_size, validation_split=validation_split) 83 | stacking_X = np.concatenate([model.predict(X) for model in self.models], axis=1) 84 | self.stacking_model.train(stacking_X, y, epochs=epochs, batch_size=batch_size, validation_split=validation_split) 85 | 86 | def predict(self, X): 87 | predictions = [model.predict(X) for model in self.models] 88 | stacking_X = np.concatenate(predictions, axis=1) 89 | return self.stacking_model.predict(stacking_X) 90 | 91 | def evaluate(self, X, y): 92 | predictions = self.predict(X) 93 | y_pred_class = np.argmax(predictions, axis=1) 94 | accuracy = accuracy_score(y, y_pred_class) 95 | report = classification_report(y, y_pred_class) 96 | matrix = confusion_matrix(y, y_pred_class) 97 | return accuracy, report, matrix 98 | -------------------------------------------------------------------------------- /tests/test_ai.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src.ai import AI 3 | 4 | class TestAI(unittest.TestCase): 5 | def test_init(self): 6 | ai = AI() 7 | self.assertIsInstance(ai, AI) 8 | 9 | def test_make_decision(self): 10 | ai = AI() 11 | decision = ai.make_decision() 12 | self.assertIsInstance(decision, str) 13 | 14 | if __name__ == '__main__': 15 | unittest.main() 16 | -------------------------------------------------------------------------------- /tests/test_blockchain.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src.blockchain import Blockchain 3 | 4 | class TestBlockchain(unittest.TestCase): 5 | def test_init(self): 6 | blockchain = Blockchain() 7 | self.assertIsInstance(blockchain, Blockchain) 8 | 9 | def test_add_block(self): 10 | blockchain = Blockchain() 11 | blockchain.add_block('data') 12 | self.assertEqual(len(blockchain.chain), 2) 13 | 14 | if __name__ == '__main__': 15 | unittest.main() 16 | -------------------------------------------------------------------------------- /tests/test_encryption.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src.encryption import Encryption 3 | 4 | class TestEncryption(unittest.TestCase): 5 | def test_init(self): 6 | encryption = Encryption() 7 | self.assertIsInstance(encryption, Encryption) 8 | 9 | def test_encrypt(self): 10 | encryption = Encryption() 11 | data = 'secret data' 12 | encrypted_data = encryption.encrypt(data) 13 | self.assertNotEqual(data, encrypted_data) 14 | 15 | def test_decrypt(self): 16 | encryption = Encryption() 17 | data = 'secret data' 18 | encrypted_data = encryption.encrypt(data) 19 | decrypted_data = encryption.decrypt(encrypted_data) 20 | self.assertEqual(data, decrypted_data) 21 | 22 | if __name__ == '__main__': 23 | unittest.main() 24 | -------------------------------------------------------------------------------- /tests/test_ml.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from src.ml import NeuralNetwork 3 | 4 | class TestML(unittest.TestCase): 5 | def test_init(self): 6 | nn = NeuralNetwork((10,), 2) 7 | self.assertIsInstance(nn, NeuralNetwork) 8 | 9 | def test_train(self): 10 | nn = NeuralNetwork((10,), 2) 11 | X = np.random.rand(100, 10) 12 | y = np.random.randint(0, 2, 100) 13 | nn.train(X, y) 14 | 15 | def test_predict(self): 16 | nn = NeuralNetwork((10,), 2) 17 | X = np.random.rand(100, 10) 18 | nn.train(X, np.random.randint(0, 2, 100)) 19 | y_pred = nn.predict(X) 20 | self.assertEqual(y_pred.shape, (100, 2)) 21 | 22 | if __name__ == '__main__': 23 | unittest.main() 24 | --------------------------------------------------------------------------------