├── .github └── workflows │ └── codeql.yml ├── .gitignore ├── CodingLog └── v1.0.0.md ├── Docs └── connectFileSystem.md ├── ProjectDesign ├── ClassDesign │ └── fileServerModel │ │ ├── FileClass.drawio │ │ └── FileServer.pdm ├── README.md └── 系统架构图.drawio ├── README.md ├── SECURITY.md ├── app ├── CloudStorageBackend │ ├── FileServer │ │ ├── README.md │ │ ├── api │ │ │ ├── etc │ │ │ │ └── file-api.yaml │ │ │ ├── file.go │ │ │ ├── fileServer.api │ │ │ ├── internal │ │ │ │ ├── DFSClient │ │ │ │ │ ├── DFSClient.go │ │ │ │ │ ├── DFSClientPool.go │ │ │ │ │ ├── DFSClientPool_test.go │ │ │ │ │ ├── IClient │ │ │ │ │ │ └── ClientInterface.go │ │ │ │ │ ├── IPFSClient.go │ │ │ │ │ ├── fastDFS_test.go │ │ │ │ │ └── localFSClient.go │ │ │ │ ├── Model │ │ │ │ │ ├── File │ │ │ │ │ │ ├── FileFactory.go │ │ │ │ │ │ ├── IFile │ │ │ │ │ │ │ ├── DFileInterface.go │ │ │ │ │ │ │ └── FileMeta.go │ │ │ │ │ │ ├── IPFSFile.go │ │ │ │ │ │ ├── fastDFile.go │ │ │ │ │ │ ├── fileModel_test.go │ │ │ │ │ │ └── localFile.go │ │ │ │ │ └── errors.go │ │ │ │ ├── config │ │ │ │ │ └── config.go │ │ │ │ ├── handler │ │ │ │ │ ├── deletefilehandler.go │ │ │ │ │ ├── downloadfilehandler.go │ │ │ │ │ ├── fileHandler_test.go │ │ │ │ │ ├── getfileinfohandler.go │ │ │ │ │ ├── queryfileinfohandler.go │ │ │ │ │ ├── routes.go │ │ │ │ │ ├── updatefilehandler.go │ │ │ │ │ └── uploadfilehandler.go │ │ │ │ ├── logic │ │ │ │ │ ├── deletefilelogic.go │ │ │ │ │ ├── downloadfilelogic.go │ │ │ │ │ ├── getfileinfologic.go │ │ │ │ │ ├── queryfileinfologic.go │ │ │ │ │ └── updatefilelogic.go │ │ │ │ ├── svc │ │ │ │ │ └── servicecontext.go │ │ │ │ ├── types │ │ │ │ │ └── types.go │ │ │ │ └── util │ │ │ │ │ ├── JWT_token_generate_test.go │ │ │ │ │ ├── metaInfoUtil.go │ │ │ │ │ └── metaInfoUtil_test.go │ │ │ └── readme.md │ │ ├── fileServer.Dockefile │ │ ├── go.mod │ │ ├── go.sum │ │ ├── model │ │ │ └── PojoDB │ │ │ │ ├── fileMetaPojo │ │ │ │ ├── filemetatablemodel.go │ │ │ │ ├── filemetatablemodel_gen.go │ │ │ │ └── vars.go │ │ │ │ └── usergroupPojo │ │ │ │ ├── usergroupmodel.go │ │ │ │ ├── usergroupmodel_gen.go │ │ │ │ └── vars.go │ │ ├── mq │ │ │ ├── Job │ │ │ │ ├── asynqserver.go │ │ │ │ ├── etc │ │ │ │ │ └── asynqserver-api.yaml │ │ │ │ ├── internal │ │ │ │ │ ├── config │ │ │ │ │ │ └── config.go │ │ │ │ │ ├── logic │ │ │ │ │ │ ├── deleteFileMeta.go │ │ │ │ │ │ └── routes.go │ │ │ │ │ └── svc │ │ │ │ │ │ ├── asynqServer.go │ │ │ │ │ │ └── servicecontext.go │ │ │ │ └── jobtype │ │ │ │ │ └── JobType.go │ │ │ └── scheduleClient │ │ │ │ ├── etc │ │ │ │ └── mq-api.yaml │ │ │ │ ├── internal │ │ │ │ ├── config │ │ │ │ │ └── config.go │ │ │ │ ├── svc │ │ │ │ │ ├── asynqClient.go │ │ │ │ │ └── serverContext.go │ │ │ │ └── task │ │ │ │ │ ├── hardDeleteJob.go │ │ │ │ │ └── register.go │ │ │ │ └── mq.go │ │ └── rpc │ │ │ ├── README.md │ │ │ ├── etc │ │ │ └── fileserver.yaml │ │ │ ├── fileserver.go │ │ │ ├── fileserver │ │ │ └── fileserver.go │ │ │ ├── internal │ │ │ ├── config │ │ │ │ └── config.go │ │ │ ├── logic │ │ │ │ ├── deletehardlogic.go │ │ │ │ ├── deleteonelogic.go │ │ │ │ ├── findonelogic.go │ │ │ │ ├── inertonelogic.go │ │ │ │ ├── pinglogic.go │ │ │ │ └── queryfileslogic.go │ │ │ ├── server │ │ │ │ └── fileserverserver.go │ │ │ └── svc │ │ │ │ └── servicecontext.go │ │ │ └── pb │ │ │ ├── fileRpcUtil.go │ │ │ ├── fileServer.pb.go │ │ │ ├── fileServer.proto │ │ │ └── fileServer_grpc.pb.go │ └── UserServer │ │ ├── api │ │ ├── etc │ │ │ └── user-api.yaml │ │ ├── internal │ │ │ ├── config │ │ │ │ └── config.go │ │ │ ├── handler │ │ │ │ ├── captcha │ │ │ │ │ ├── captchahandler.go │ │ │ │ │ └── verifyhandler.go │ │ │ │ ├── routes.go │ │ │ │ └── user │ │ │ │ │ ├── loginhandler.go │ │ │ │ │ └── registerhandler.go │ │ │ ├── logic │ │ │ │ ├── captcha │ │ │ │ │ ├── captchalogic.go │ │ │ │ │ └── verifylogic.go │ │ │ │ └── user │ │ │ │ │ ├── loginlogic.go │ │ │ │ │ └── registerlogic.go │ │ │ ├── svc │ │ │ │ └── servicecontext.go │ │ │ └── types │ │ │ │ └── types.go │ │ ├── model │ │ │ ├── Css_user.sql │ │ │ ├── cssusermodel.go │ │ │ ├── cssusermodel_gen.go │ │ │ └── vars.go │ │ ├── units │ │ │ ├── func_test.go │ │ │ └── units.go │ │ ├── user.api │ │ └── user.go │ │ ├── go.mod │ │ └── go.sum └── CloudStorageFront │ ├── .gitignore │ ├── README.md │ ├── index.html │ ├── package-lock.json │ ├── package.json │ ├── public │ └── favicon.ico │ ├── src │ ├── App.vue │ ├── assets │ │ ├── logo.svg │ │ └── main.css │ ├── components │ │ ├── icons │ │ │ ├── IconCommunity.vue │ │ │ ├── IconDocumentation.vue │ │ │ ├── IconEcosystem.vue │ │ │ ├── IconSupport.vue │ │ │ └── IconTooling.vue │ │ └── topPersonalInformation.vue │ ├── main.js │ ├── router │ │ ├── GlobalAxios.js │ │ ├── RouteUtil.js │ │ └── index.js │ └── views │ │ ├── Home.vue │ │ └── Login.vue │ └── vite.config.js ├── deploy ├── DB │ └── mysql │ │ ├── 1_scheme.sql │ │ └── 2_init.sql ├── filebeat │ └── conf │ │ └── filebeat.yml ├── go-stash │ └── etc │ │ └── config.yaml ├── kubernetes │ ├── kompose │ │ ├── Prometheus-service.yaml │ │ ├── cloudstorage-cloudstoragesystem-networkpolicy.yaml │ │ ├── elasticsearch-claim0-persistentvolumeclaim.yaml │ │ ├── elasticsearch-deployment.yaml │ │ ├── elasticsearch-service.yaml │ │ ├── etcd-deployment.yaml │ │ ├── etcd-service.yaml │ │ ├── filebeat-claim0-persistentvolumeclaim.yaml │ │ ├── filebeat-claim1-persistentvolumeclaim.yaml │ │ ├── filebeat-deployment.yaml │ │ ├── fileserverapi-deployment.yaml │ │ ├── fileserverapi-service.yaml │ │ ├── fileserverrpc-deployment.yaml │ │ ├── fileserverrpc-service.yaml │ │ ├── go-stash-claim0-persistentvolumeclaim.yaml │ │ ├── go-stash-deployment.yaml │ │ ├── grafana-claim0-persistentvolumeclaim.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-service.yaml │ │ ├── jaeger-deployment.yaml │ │ ├── jaeger-service.yaml │ │ ├── kafka-claim0-persistentvolumeclaim.yaml │ │ ├── kafka-deployment.yaml │ │ ├── kafka-service.yaml │ │ ├── kibana-deployment.yaml │ │ ├── kibana-service.yaml │ │ ├── mysql-claim0-persistentvolumeclaim.yaml │ │ ├── mysql-claim1-persistentvolumeclaim.yaml │ │ ├── mysql-deployment.yaml │ │ ├── mysql-service.yaml │ │ ├── nginx-gateway-claim0-persistentvolumeclaim.yaml │ │ ├── nginx-gateway-claim1-persistentvolumeclaim.yaml │ │ ├── nginx-gateway-deployment.yaml │ │ ├── nginx-gateway-service.yaml │ │ ├── prometheus-claim0-persistentvolumeclaim.yaml │ │ ├── prometheus-claim1-persistentvolumeclaim.yaml │ │ ├── prometheus-deployment.yaml │ │ ├── redis-claim0-persistentvolumeclaim.yaml │ │ ├── redis-deployment.yaml │ │ ├── redis-service.yaml │ │ ├── zookeeper-deployment.yaml │ │ └── zookeeper-service.yaml │ └── services │ │ └── fileServer.yaml ├── nginx │ └── conf.d │ │ └── cloudStorage-gateway.conf └── prometheus │ └── server │ └── prometheus.yml ├── docker-compose-env.yaml └── docker-compose.yaml /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ "main" ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ "main" ] 20 | schedule: 21 | - cron: '19 19 * * 3' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 27 | timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} 28 | permissions: 29 | actions: read 30 | contents: read 31 | security-events: write 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | language: [ 'go', 'javascript' ] 37 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby', 'swift' ] 38 | # Use only 'java' to analyze code written in Java, Kotlin or both 39 | # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both 40 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 41 | 42 | steps: 43 | - name: Checkout repository 44 | uses: actions/checkout@v3 45 | 46 | # Initializes the CodeQL tools for scanning. 47 | - name: Initialize CodeQL 48 | uses: github/codeql-action/init@v2 49 | with: 50 | languages: ${{ matrix.language }} 51 | # If you wish to specify custom queries, you can do so here or in a config file. 52 | # By default, queries listed here will override any specified in a config file. 53 | # Prefix the list here with "+" to use these queries and those in the config file. 54 | 55 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 56 | # queries: security-extended,security-and-quality 57 | 58 | 59 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). 60 | # If this step fails, then you should remove it and run the build manually (see below) 61 | - name: Autobuild 62 | uses: github/codeql-action/autobuild@v2 63 | 64 | # ℹ️ Command-line programs to run using the OS shell. 65 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 66 | 67 | # If the Autobuild fails above, remove it and uncomment the following three lines. 68 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 69 | 70 | # - run: | 71 | # echo "Run, Build Application using script" 72 | # ./location_of_script_within_repo/buildscript.sh 73 | 74 | - name: Perform CodeQL Analysis 75 | uses: github/codeql-action/analyze@v2 76 | with: 77 | category: "/language:${{matrix.language}}" 78 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | 35 | 36 | ### data ### 37 | 38 | data/* 39 | -------------------------------------------------------------------------------- /CodingLog/v1.0.0.md: -------------------------------------------------------------------------------- 1 | ## v1.0.0 2 | 3 | 4 | + 降低了elasticsearch的版本(8.5.3->7.17.8),防止go-stash连接elasticsearch需要验证。 5 | + 为了用容器运行的时候能够定制化信息,使用environment variable 去配置了一些个性化的需求 -------------------------------------------------------------------------------- /Docs/connectFileSystem.md: -------------------------------------------------------------------------------- 1 | # FileServer 连接fastDFS文件系统 2 | 3 | 4 | ## fileserver支持 容器化部署,所以你可以通过配置环境变量的方式来向fileserver指定fastDFS集群的位置 5 | 6 | + default-DFSCluster-Location(DFS集群的位置) -------------------------------------------------------------------------------- /ProjectDesign/ClassDesign/fileServerModel/FileClass.drawio: -------------------------------------------------------------------------------- 1 | 7V1rc9o4F/41zLQf6NjGGPgYyKXZTbLdTft2308dgQU4NZYriwb661ey5atkIMS3dNRkCjqWbOmco0fnIiu9wWyzu8HAX98jG7o9Q7N3vcFlzzCMga7RD0bZR5SJYUaEFXbsiKSnhEfnF+RE3m61dWwY5CoShFzi+HniAnkeXJAcDWCMnvPVlsjNP9UHKygQHhfAFalfHZusI+rYGKX0j9BZreMn69YkurIBcWU+kmANbPScIQ2ueoMZRohE3za7GXQZ82K+fL3df3Xvvls3f/wd/ABfpn9+fvhfP7rZ9UuaJEPA0CNn39p8/Ptqs9rYV7tgNDPHv25v11qfcyEg+5hf0Kbs40WEyRqtkAfcq5Q6xWjr2ZDdVaOltM4dQj4l6pT4BAnZc10AW4IoaU02Lr964nD4sAO0xQt4YAzDqB7reEbknAk3EG0gwXtaAUMXEOdnXi8AV69VUi9p+gk5tH+GxqfCYGxETfhE0Mda/hYE4BUkvFUqCPol042UFIrnBaLiw/wJ3C0fwqUTEOzMt4QKw9CuHSodQ7v1CMRLQDlWlOzZEvgJMYG7g7yNeaSZOR6NOIue05mnD62Its7MOksrl0aOjy9lmt6KglOu4v2/rP2HYVz8P79dWLjc5Up7Xqp4Yow53IZ6eYhJg6pn0KtkZhi/ASidzPtxp3g/FkDmYk5BBiwYEF5GEFOQTfDsbFzgMSEskUdiMTExLNaOa9+BPdoyFgQELL7HpekaYecXrQ9iAdDLmHDxGBa7m+O6M+QiTAkeCh+QNnpkN+OPwTCgzT7FotMLpHuwy1W8AwGJO4hcF/iBMw+7zBpuqMAcb4oIQZuzZuULwFIvWVCyaGlqIlrqel1wORHE3zOmzOpyIDO7LphZBAm49ZZI0AM6ahKKEaPvsCA3iSiB66w8WnThkjVjbHOo1XbByYRN22ngg4Xjre7COpdmSvmHc4OREG27dEPLbO3YNvRCOCCAgHmiiT5blUNuDaf0l/JvxrB5SDs+o2U9LdNfVh2TGfKY5juh0CHVmWfI9EaiDgdn0nF14OI3rNOkH9erfq3Uj0vfplxVkq9Y8kPjNMkPx3VJfiBI/g5RkSRWpYL8aiBfH+Uh3xycCvmxYV297E2F+VXN/GQevSHQF91KQfxUkvALdpT0q5b+qcBfn/QtteS3I/rRuG3Ri67eko5b+XiVLvhmPMU7tOArJ6+6ef/2vLw4m3FI/GtC/C/YVdKvWvqtL/iG8vFbEn3rC77xO+T6DG5oHY+r8+WzI3F1Y9wG8+HOIZk0FC0lWSj6PU1CsUJNOajTBTZpS2BPP25+ff32MHWfvI/2zTe0usbTvvYbTZa6M+NWnI+Ioc5oNjNuiOHLnmG5DM79nAytH1u2WYM7ANFqp/k7+n/IfC2i98PViV0zM9fYAtjnyxq7toAs0Z6/bMMFwpSTiNdh6oBdh66IyaPptxX/DDs4jwmJyT242ACfrlost++t6IrlpBn9qA1l0rx4H0rzi7Q1LlLOYke0hrOLY3axOJIFhoCwtf+C9/hAj2p7Pryhk88X+jAXOEAtDC+meWADs03Ahlkl3jzw84zNtmlndN+Is2GbO9gH9aE3ftPdoFjF/HKyz/BLe2fDJdi6DAks03zfdJ/I3odoGVVoR/W2vp0IJ6zVmnyCcElhlylaWGbTj3eCS4chwBwht+lnl8911YNiDwomjdwtK/huGQeMOXo9HgKMbBjd4OVrsHFctv5/hO5PyO76GtPm9ABbssWM2x6WJMBmxNZ3LsBW3L5Xna1/QoBtq9Ipte+nSXYVHnW5K8ir99cPD59d+Pzv0wzdzcZ3+uqb1ZbX96rdhwVn4Tx3ROpSxbu/M56glGtRSrItx08WIIug1HZ+SqF7jnZ9uv4y+yOc23OEqdnfp+QExbkGsqu6RjVwmFyZg8X3VSjy6HLPGECT/SQ1fGDbyb0NGfqzlM09dR8+h/M1Bf6ww/nlIJrSslGUd5HBez81MHS4yXaBOkKum/Qxc0NDqMWhJ1dLK1/MyBzZe4EorHDEjimf/swMPiWLFQ8ul4caxr5OzY9RDVXDRhoyojihDkwxhy5NuxAHBG+5zHuuZZzvQt/dmGWc//ft8baz8lUNf4+GL56n0FuxsKN0otY6sPu9E4BNx9inGqqGFU48RpTZpozOTXBOrybe0kAYpZjCsWTvoowlvvOkgiiK1A8zBD/sfh/8EPekBGvgs69s10JAbXPmQj+vHQIffRC6o88Y+M0wcWDkmTiRvP5oSXhYxduPck/abDX+0Nc+aPqolw1BfBjoSZWSMERY+gSxQ3kAcfWxCTmnxNjEgVRjA7EJeTxp0m48KS/K0RFB1i0zTZRZOZK0JTJxt92XAOLQYeHhmiKeNbHVlkWZNPbTa2m/bTzK63ynluG/zJrIs+z5nIOZKJeAzaeqVymG9yd5DB9JFkJDk76UWVcUWYxIsoCh/xB5wFuqTtHXkoWRMoQ4wP0HLgjwViEkhHZHKIfI5ogEycfCvs9jWemifVI0YjaObbslOYp8+mFg9rqQf7AxonOPg4ZWmGfDnpjfOknXymf/S/YCS1VrUJd9INpYLIT6Sj2aZ+f5y/NWiTrVlLo6ph1smFzxT9YDfnYNf2YvOTHmqH6UYFFGIQZN6oO4jyqDNJSMF2uA3xna+3rARjuuHZHH9LaRpgFkGZ6ILFVsNpZ2T/ZSabu4Ums+vFOgUuKiHAGV2lRBfME0NVkUprwdTImPv2oNU2TvqipMaQZTSrZktIUpsh1WCj26jB66Jr7/JPejawuGyl58VPjRDH5MzsKP2nQhflhLgdSk0I1Aahy56nYkdSiC/sJFW/uRIMyOfN0HhO1MK4iVb6oV4omvCzuUxVJrRO6mAqDGiC/acRLLNAXg1k3JhuraFnvJETWFHY8qhN6lELqlDXMaNLZEz0GfyOJah1SIPy5dktPnFdKusueNTrBOgUv55AECpwzoA0FvD76piOZPMDwk0wVz6HJ44rtFZ0bvIk5W7X341zK6HPu++pD7vodngZXhAp82KtrfqWh/2cb1A0d/SKfBy60eWuQK+BJcHQm4qsL/zYb/LbnCtBX/l5wJlex4V6G6LjnbR6BGEv+XQk19NlsNERrlbJ+qHOOzYKU2ZZAcOZV9aV9By1uCFkkaoFlokR5hpaClGWgxSqK8rUGLISiDl88sDhSmdB5TZMmBhkFFdoSQApWGQKUkrtwaqIi7V9LzbzLIooClImDJR2lrQ5mBJJItDcDUpliyw9YVyjSEMuZZKFObMpiiMvDocAoxSWhYYUx3jRfZH7lr1Hgx1c6o9mDF7NjWKFOM6odntxja3FnFB3AqPOkwnkzaDt6aYhpA4UljeDLqGJ6IkfzotEd20BUqfx9bwUk34MSIJ29rcDJUGy9bhJPzdl7Wpwxi9N6GLpQecayQpdvIYrYdtY3/nIpCljZeCTnv5dX6lEEM4WfP51bI8oaQZXRipLY+ZVIvsLaILF17g1UMqZT+WQaFLJ1GloHWdrB2qIIrLSJLY8EVWsSIHWudXLvBwF/fI5u9HXD1Hw== -------------------------------------------------------------------------------- /ProjectDesign/README.md: -------------------------------------------------------------------------------- 1 | # fileServer table construct 2 | 3 | 4 | + open the file which suffixs drawio 5 | 6 | software address: https://github.com/jgraph/drawio-desktop 7 | 8 | online use: https://app.diagrams.net/ 9 | 10 | + open the file which suffixs pdm 11 | 12 | PowerDesigner: maybe you can connect with 1577002722@qq.com to obtain the software 13 | -------------------------------------------------------------------------------- /ProjectDesign/系统架构图.drawio: -------------------------------------------------------------------------------- 1 | 7V3bdto4FP2aPOIlyZZsPzaXdi6dWV3NdNo8ZQmsgBuDqS0S6NePBDbYkggmtQxD4CVYloWsfc4+F11y4V6N5x8yOh39lUYsuUAgml+41xdIfIAv/siSxaqEeEXBMIujVRHcFNzGP1lRCIrSWRyxvFaRp2nC42m9cJBOJmzAa2U0y9LnerWHNKn/6pQOmVZwO6CJXvo1jvioKIUk3Nz4jcXDUfHTASreb0zLysWb5CMapc+VIvfmwr3K0pSvvo3nVyyRg1eOy+q591vurjuWsQlv8sDl8O6DNx59efzn5/VHPH/68vHzYw8GRef4onxjFokBKC7TjI/SYTqhyc2m9DJLZ5OIyWaBuNrU+ZimU1EIReF3xvmiQJPOeCqKRnycFHf1rhdvk6ezbMBe6C9e1ZN9rDxYvPAHlo4ZzxaiQsYSyuOnOoi0kIXhut760U9pLLqCQCG3rleAVkith30HhRiUH6VBTrMh40UbGwjEl0qnNkVLYPYAqXjpJ5rMihfSQKP5dCX9D/FcInM5lZ1h2c2TGOK8GPa1BErYIpqP1hhWsMl5lj6yqzRJM1EySScS8YT2WfIpzWMepxNRPGCycXHjiWU8FtryUanQTzlPx5UK75J4KG9wKSKXtLhat/MQJ0n5mxfIFSMcBNerLk/lK47nQ8kvDv05y5iTpDS679OETgYsux+yCcviwVIwOS060AvBS4Imu8XmL4pQeRcrolAy0/OGDvyiaFQhgtBzcLhd7moysq9AhIdQWjGE2eKbfN7B5eVd0dzy4npeu1pUrz4JhMS7S6yvXwRmJwOQwggsVa5LpvglxMhZhUsVHucDypzlU9MszpmTs+yposYtqCxwPFdRWuJgpOmtGzoQ66obQAdjS4JQskdNEkjC5QCmSxO0EQnyY5aWN3r5Ui/fiQpidOabm+LbUP4Vg8vZM12UrYnerRpc3dbETYwlr4uNhqgK/DiOohWXMNEb2l82BUpJXY4UvrzAEnRJH6sev2zwm4PaQ3UedkNPwxMCAxGXz7UPJbQEpTRvhXU7VTQhwrvRRF2iSQ7iCa+N6vrirm5huzGq0G9oVQmy4oC/yzLJXOsKhQhu9c+R6p8TUhWAnfUJchWBWfWgVbcdAnwQkZrH/Fvle0WgxNVGnuTFojXJsR2RBX4dQUwUItgSg+0rWerveAR2ISkH8eiPH/UwUNEATuDjRsi3Bo5vMPMKWHUonkcxZ7dTuhzCZ+H01oddcZQjyoKHgea2iztkELD+QzsGV81ouFgPYz3XZHARdNQRb8+FCneP7c74pDK0DUMVY4CkxS+XN+/R1hTEeJH/SO4jymmf5qwdjHygcByEThDqMBlQwrbcImSKV86R6ypypdNpItqXP3u/imJRG6oaIscP69rqi5BUEwSMnVAXBWJLWZEp3jmLgl1RcB3ogc3Hq5vDADkorHyMMoINKQ5iMTvpNaAMNoneydkZOfYJzfNl+raJa7IzQvGChq5JdZBeYNRf9GA8bwNASetIDn7lU29z5Xxp7ozecqC1LOXB7dQ58hpwwlvG2oPeK9H19bbUpKRtbIMz32/l+9LzazFtTYjiovuBTufIwZ4uwL6teQtsK13N+CB6fXJzpwNfFDVPgppitnpU1wrCxCnXB7wUhxkoyrXl4OP9GLwYWOO8oM0U5k5bUFY8EluAlMwFViO0pobAVYNCNQNue/EBssQAM+Em3y495TfHA0hNZuJNjmUHD1ib/8AN0lwNecABKKxzAQpIx2wAjosN8Jbs275soDXUNRuYfMI22OB9nLBfZYMjn/JUlN51dcsPYacq3yD7+urg7Vg0L3SU6Bu8VvW0loJmurf35Kb2Qy7Ys2ek9oCdSSvSQmiAt6xk6Z8qCSA1FURMGX7TekJrLOD7utJ3uZrQZtSwezUhauhQ+HZWHu/LDW49Aez7XSi6KQI454FsLV7EThgodkYPDVyiM4S1WVriWnL6xKizPqP8VNm+pyQBiGHteNAp17eA5Juz2dBVjHbYbAeAPRTPG0K6JGRl5YxviNvcwDFk5dX4oD0BMG0naIOQH+nDIz1ZPRZxEqpDCXQoSZeaXPr+b3LRcbnG5VC+t+Zcw1DR9I5XGoaG9MubicQaS0PodiMNvsr7frepVr9Bdu5s5duy8m5AnHrY5SPDjIxbWpDaVk9r5sHW7HuSDns5F6JwqrbeVdbOmCx9t7vFwsPuLzosuQdN02zlgvAj2bZb9nsvDq4Mv0HUVbWJx8sTKJZ6W8AI0ab8Oh4PRdeTuC9fQNKg+HtFByN2/5lFcX7/KUuj2YA7+dPwwjj33YI2YXW+GmJT2to0jeMhW9C0MBNpTGWIIeSnOwuJvdDx65uj147OoaKg0NZM0nQmf/w0gVyv838BRZNC2kOxhe3QW1A8XT+lpwYZQHc6O80thofZs1q6KT3gAA8d1Fcp48vdgShp21cxB6IhOWwgGpomDRQZOc5Neu1uzwvQ7kRwaNBVa2ng8syE1hn3JqG5gOiW0WxwstRL1GyfgXpht9zbQlrfiOdj3KeT083rA9WIkmaKae9kGNAgbty1ss50jkP53XyKg7xozw4e2RJaX2Ff7QCIpuv4MFGWV3S8hBaCBvZ0p3BUHSZ3vbi6cJi8V7lMa4FzAKrIHHB8EWt3KHdN13iXyZlTE1Bfy7Z0LaC2ppe/UzY83dSKr275OfQ6EQj+Z6e7QtDZaULQtAV7PfPTtcI12Htznupra6ovwCpRm49Z6XauD1o7GXKayS6O2Cw/VeYN1cONDDunuz1NEEKDT3fU1FueZml9kQ0wnekVgAOdrQ2Np3ieudcS92oHusmZRF1ZO+deWyvcxbg+nHTqJai7Th4GJjw7XeQOoSkvukul7c/a/znrs2wiYuTc6mR9DyrzBcgwMwiB55QHG1ZBgeruyNeg8jWNeqiPxr/f/Ysf//768OXq5x+m08P+kdJ+BCqgja0Bge0qUC7GLvd4+AZXsh3xF5ebfxeyMoCbf7ri3vwH -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CloudStorageSystem 2 | 3 | ## Structure(暂定) 4 | 5 | + language:golang(1.17.6) 6 | 7 | + backend: 8 | 9 | |Environment|Version| 10 | |--|--| 11 | |golang|1.17.6| 12 | |etcd|3.5.0| 13 | |go-zero|1.4.2| 14 | |go-stash|1.0.8| 15 | |elasticsearch|7.17.8| 16 | |filebeat|7.17.8| 17 | |kibana|7.17.8| 18 | |asynq|0.24.0| 19 | |zookeeper|3.8.0| 20 | |kafka|3.3.1| 21 | |nginx|1.21.5| 22 | |docker|20.10.21| 23 | |docker-compose|2.13.0| 24 | |kubernetes|暂定| 25 | |DTM|暂定| 26 | |jaeger|1.41.0| 27 | |Prometheus|2.41.0| 28 | |grafana|9.3.2| 29 | |mysql|8.0.28| 30 | |redis|6.2.5| 31 | 32 | + frontend: 33 | 34 | |Environment|Version| 35 | |--|--| 36 | |vue3|| 37 | |element-plus|| 38 | |bootstrap|| 39 | |axios|| 40 | 41 | 42 | + Mysql DB Name:cloudStorageSystem 43 | 44 | 45 | 46 | ## v1.0.0(2023.1.2-预期(2023.1.22)) 47 | 48 | ### 功能 49 | + 用户登录,注册(邮件注册),文件上传、下载、预览,用户信息修改,不同权限登录(管理员、用户),页面美化(axios前后的进度条,文件目录的树结构,文件下载界面,首页结构和组件需要改善) 50 | 51 | + 文件服务(fastdfs客户端、文件上传、下载、预览、session验证、文件元数据管理(mysql、redis)) 52 | 53 | + 用户服务(登录、注册、修改、发邮件、不同权限的表设计、session、验证码)go-zero(自动降载、自动熔断、鉴权、数据记录、监控报警、超时控制、链路追踪...) 54 | + 要求:对于微服务系统,我们使用DTM进行分布式事务管理,同时集成日志管理系统(报警日志、链路追踪日志、服务日志、nginx网关日志)(filebeat、kafka、go-stash、elasticsearch、kibana、jeager、Prometheus),服务数据方面使用传统的mysql、redis集群, 55 | 对于redis来说使用主从控制+哨兵模型来追求并发量,同时对于mysql来说,根据情况使用分库分表使用mysql集群。前期使用docker-compose进行容器化部署,使用nginx网关进行反向代理,后期升级为kubernetes做自动拆箱装箱,横向扩展,负载均衡 -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Use this section to tell people about which versions of your project are 6 | currently being supported with security updates. 7 | 8 | | Version | Supported | 9 | | ------- | ------------------ | 10 | | 5.1.x | :white_check_mark: | 11 | | 5.0.x | :x: | 12 | | 4.0.x | :white_check_mark: | 13 | | < 4.0 | :x: | 14 | 15 | ## Reporting a Vulnerability 16 | 17 | Use this section to tell people how to report a vulnerability. 18 | 19 | Tell them where to go, how often they can expect to get an update on a 20 | reported vulnerability, what to expect if the vulnerability is accepted or 21 | declined, etc. 22 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/README.md: -------------------------------------------------------------------------------- 1 | # File Server 2 | 3 | + 暂时只支持https协议或者http协议传输的文件,目前使用uri进行文件定位 4 | + 采用合理的设计模式,扩展文件系统种类更容易 5 | + 对于本地文件流的处理,我们在采用缓冲流的前提下,对于File 模型的处理,我们使用即用即取的方式来提高性能 6 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/etc/file-api.yaml: -------------------------------------------------------------------------------- 1 | Name: file-api 2 | Host: 0.0.0.0 3 | Port: 8889 4 | 5 | Auth: 6 | AccessSecret: "cloudstoragesystem" 7 | AccessExpire: 600 8 | 9 | Log: 10 | ServiceName: fileServer 11 | Mode: console 12 | Level: info 13 | 14 | Prometheus: 15 | Host: 0.0.0.0 16 | Port: 4005 17 | Path: /metrics 18 | 19 | Telemetry: 20 | Name: fileServer-api 21 | Endpoint: http://jaeger:14268/api/traces 22 | Sampler: 1.0 23 | Batcher: jaeger 24 | ### dtm Transaction manager address 25 | #DtmServerConfig: http://localhost:36789/api/dtmsvr 26 | 27 | FileServerRpcConfig: 28 | Etcd: 29 | Hosts: 30 | - etcd:2379 31 | Key: fileserver.rpc 32 | InsecureSkipVerify: true 33 | 34 | 35 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/file.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fileServer/api/internal/config" 5 | "fileServer/api/internal/handler" 6 | "fileServer/api/internal/svc" 7 | "flag" 8 | "fmt" 9 | "github.com/zeromicro/go-zero/core/conf" 10 | "github.com/zeromicro/go-zero/rest" 11 | ) 12 | 13 | var configFile = flag.String("f", "etc/file-api.yaml", "the config file") 14 | 15 | func main() { 16 | flag.Parse() 17 | var c config.Config 18 | conf.MustLoad(*configFile, &c) 19 | server := rest.MustNewServer(c.RestConf) 20 | defer server.Stop() 21 | 22 | ctx := svc.NewServiceContext(c) 23 | handler.RegisterHandlers(server, ctx) 24 | fmt.Printf("Starting server at %s:%d...\n", c.Host, c.Port) 25 | server.Start() 26 | } 27 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/fileServer.api: -------------------------------------------------------------------------------- 1 | syntax = "v1" 2 | 3 | info( 4 | title: "file server api" 5 | author: "JBossBC" 6 | email: "1577002722@qq.com" 7 | version: "v1.0.0" 8 | ) 9 | 10 | /** 11 | the metaInfo regulations:(map[string]interface) 12 | "owner": string(owner name) 13 | "location":string(default:/ownerName/user indicating location) 14 | "authority":string(default 644) //current cant use 15 | "create_time" timestamp(default "") 16 | **/ 17 | type ( 18 | baseResponse { 19 | Result string `json:"result"` 20 | Message string `json:"message"` 21 | Data interface{} `json:"data"` 22 | } 23 | downloadReq { 24 | MetaInfo map[string]interface{} `json:"metaInfo"` //must include the location key and the 25 | } 26 | findReq { 27 | MetaInfo map[string]interface{} `json:"metaInfo"` //must include the location key 28 | } 29 | queryReq { 30 | MetaInfo map[string]interface{} `json:"metaInfo"` //mush include the owner name 31 | } 32 | ) 33 | 34 | @server( 35 | handler: "/file" 36 | jwt: Auth 37 | prefix:"file" 38 | ) 39 | service file-api { 40 | @doc "downloadFile" 41 | @handler downloadFile 42 | get /api/file/download(downloadReq) returns(baseResponse) 43 | @doc "getFileInfo" 44 | @handler getFileInfo 45 | get /api/file/get(findReq)returns(baseResponse) 46 | @doc "queryFileInfo" 47 | @handler queryFileInfo 48 | get /api/file/query(queryReq)returns(baseResponse) 49 | @handler deleteFile 50 | get /api/file/delete(findReq)returns(baseResponse) 51 | @handler updateFile 52 | get /api/file/update(findReq)returns(baseResponse) 53 | } -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/DFSClient.go: -------------------------------------------------------------------------------- 1 | package DFSClient 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "github.com/eventials/go-tus" 7 | "io" 8 | "net/http" 9 | "os" 10 | "strings" 11 | ) 12 | 13 | var FastDFSCluster string = "http://127.0.0.1:8080/" 14 | 15 | func init() { 16 | getenv := os.Getenv("default-DFSCluster-Location") 17 | if getenv == "" && FastDFSCluster == "" { 18 | panic("Cant find the fastDFS cluster location,running fileserver failed") 19 | } 20 | if FastDFSCluster == "" { 21 | FastDFSCluster = getenv 22 | } 23 | } 24 | 25 | type ( 26 | fastDFSClient struct { 27 | Client *tus.Client 28 | } 29 | 30 | FastDFSOption func(client *fastDFSClient) 31 | ) 32 | 33 | /** 34 | url set nil,beacuse the url decide the upload file location,需要单例模式 35 | */ 36 | func newFastDFSClient(option ...FastDFSOption) (result *fastDFSClient) { 37 | client, _ := tus.NewClient("", nil) 38 | if !strings.HasSuffix(FastDFSCluster, "/") { 39 | FastDFSCluster = FastDFSCluster + "/" 40 | } 41 | result = &fastDFSClient{ 42 | Client: client, 43 | } 44 | for i, _ := range option { 45 | option[i](result) 46 | } 47 | return result 48 | } 49 | 50 | func (FC *fastDFSClient) upload(extraData map[string]interface{}, data []byte) (eventuallyURL string, err error) { 51 | filename, ok := extraData["filename"].(string) 52 | if !ok { 53 | return "", errors.New("cant find the filename") 54 | } 55 | user, ok := extraData["user"].(string) 56 | if !ok { 57 | return "", errors.New("cant find the user ") 58 | } 59 | restURL := FC.buildUploadURL("group1") 60 | //set restURL to transport 61 | FC.Client.Url = restURL 62 | defer func() { 63 | FC.Client.Url = FastDFSCluster 64 | }() 65 | upload, err := FC.convertFileUpload(filename, user, data) 66 | if err != nil { 67 | return "", err 68 | } 69 | err = upload.Upload() 70 | if err != nil { 71 | return "", err 72 | } 73 | return upload.Url(), nil 74 | } 75 | func (FC *fastDFSClient) convertFileUpload(targetFilename string, user string, data []byte) (result *tus.Uploader, err error) { 76 | upload := tus.NewUploadFromBytes(data) 77 | upload.Metadata["filename"] = targetFilename 78 | upload.Metadata["path"] = user 79 | result, err = FC.Client.CreateOrResumeUpload(upload) 80 | return result, err 81 | } 82 | func (FC *fastDFSClient) download(url string) (io.Reader, error) { 83 | req, _ := http.NewRequest(http.MethodGet, url, nil) 84 | data, err := FC.Client.Do(req) 85 | if err != nil { 86 | return nil, err 87 | } 88 | reader := bufio.NewReader(data.Body) 89 | 90 | return reader, nil 91 | } 92 | 93 | func (FC *fastDFSClient) buildUploadURL(servers string) string { 94 | builder := strings.Builder{} 95 | builder.WriteString(FastDFSCluster) 96 | builder.WriteString(servers) 97 | builder.WriteString("/big/upload/") 98 | return builder.String() 99 | } 100 | func WithChunkSize(size int64) FastDFSOption { 101 | return func(client *fastDFSClient) { 102 | client.Client.Config.ChunkSize = size 103 | } 104 | } 105 | func WithResume(flag bool) FastDFSOption { 106 | return func(client *fastDFSClient) { 107 | client.Client.Config.Resume = flag 108 | } 109 | } 110 | func WithOverridePatchMethod(flag bool) FastDFSOption { 111 | return func(client *fastDFSClient) { 112 | client.Client.Config.OverridePatchMethod = flag 113 | 114 | } 115 | } 116 | func WithStore(store tus.Store) FastDFSOption { 117 | return func(client *fastDFSClient) { 118 | client.Client.Config.Store = store 119 | } 120 | } 121 | func WithHttpHeader(header map[string]interface{}) FastDFSOption { 122 | return func(client *fastDFSClient) { 123 | for key, value := range header { 124 | client.Client.Config.Header.Set(key, value.(string)) 125 | } 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/DFSClientPool.go: -------------------------------------------------------------------------------- 1 | package DFSClient 2 | 3 | import ( 4 | "io" 5 | "sync" 6 | ) 7 | 8 | // improve memory and defend the file system is break 9 | type fastDFSPool struct { 10 | schedulerMachine chan *fastDFSClient //number is equal to cacheNums,to avoid the producer waitting 11 | cacheNums int //default is 100 12 | } 13 | type PoolOption func(pool *fastDFSPool) 14 | 15 | var ( 16 | //exposed to the outside world to get fastdfs client 17 | fastClientpool *fastDFSPool 18 | once sync.Once 19 | lock sync.Mutex 20 | ) 21 | 22 | // you should must init the fastClientPool when you use it 23 | func InitFastDFSPool(cacheNums int, fastDFSOption ...FastDFSOption) { 24 | once.Do(func() { 25 | fastClientpool = &fastDFSPool{} 26 | //init pool object for system 27 | fastClientpool = &fastDFSPool{cacheNums: cacheNums} 28 | fastClientpool.buildPool(fastDFSOption...) 29 | }) 30 | } 31 | 32 | // concurrency is unsafe 33 | func GetFastDFSPool() *fastDFSPool { 34 | // if you cant use initFastDFSPool to init the fastClientPool before you use GetFastDFSPool function,system considers to help you init this variable by default way 35 | lock.Lock() 36 | if fastClientpool == nil { 37 | InitFastDFSPool(100) 38 | } 39 | lock.Unlock() 40 | return fastClientpool 41 | } 42 | 43 | // according to the feature of channel to complete cache queue 44 | func (pool *fastDFSPool) Upload(extraData map[string]interface{}, data []byte) (string, error) { 45 | client := <-pool.schedulerMachine 46 | defer func() { 47 | pool.schedulerMachine <- client 48 | }() 49 | URL, err := client.upload(extraData, data) 50 | if err != nil { 51 | return "", err 52 | } 53 | return URL, nil 54 | } 55 | 56 | /** 57 | 后期封装为自定义的IFile obj 58 | */ 59 | func (pool *fastDFSPool) Download(uri string) (io.Reader, error) { 60 | client := <-pool.schedulerMachine 61 | defer func() { 62 | pool.schedulerMachine <- client 63 | }() 64 | download, err := client.download(uri) 65 | if err != nil { 66 | return nil, err 67 | } 68 | return download, nil 69 | } 70 | 71 | func (dfsPool *fastDFSPool) buildPool(options ...FastDFSOption) { 72 | dfsPool.schedulerMachine = make(chan *fastDFSClient, dfsPool.cacheNums) 73 | for i := 0; i < dfsPool.cacheNums; i++ { 74 | protoClient := newFastDFSClient(options...) 75 | dfsPool.schedulerMachine <- protoClient 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/DFSClientPool_test.go: -------------------------------------------------------------------------------- 1 | package DFSClient 2 | 3 | import ( 4 | "io/ioutil" 5 | "sync" 6 | "testing" 7 | ) 8 | 9 | import _ "net/http/pprof" 10 | 11 | func TestUploadConcurrency(t *testing.T) { 12 | const MaxTestTimes = 10000000 13 | group := sync.WaitGroup{} 14 | for i := 0; i < MaxTestTimes; i++ { 15 | go func() { 16 | group.Add(1) 17 | defer func() { 18 | group.Done() 19 | }() 20 | GetFastDFSPool().Upload(nil, nil) 21 | }() 22 | 23 | } 24 | group.Wait() 25 | 26 | } 27 | 28 | func BenchmarkConcurrencyUpload(b *testing.B) { 29 | group := sync.WaitGroup{} 30 | for i := 0; i < b.N; i++ { 31 | go func() { 32 | group.Add(1) 33 | defer func() { 34 | group.Done() 35 | }() 36 | //IO 用不到CPU 37 | GetFastDFSPool().Upload(nil, nil) 38 | }() 39 | } 40 | group.Wait() 41 | } 42 | func BenchmarkUpload(t *testing.B) { 43 | for i := 0; i < t.N; i++ { 44 | GetFastDFSPool().Upload(nil, nil) 45 | } 46 | } 47 | func BenchmarkDownload(t *testing.B) { 48 | group := sync.WaitGroup{} 49 | group.Add(t.N) 50 | for i := 0; i < t.N; i++ { 51 | go func() { 52 | defer func() { 53 | group.Done() 54 | }() 55 | download, err := GetFastDFSPool().Download("http://127.0.0.1:8080/group1/big/upload/9bb7f4ea7472846e226a716c35ca8fb4") 56 | if err != nil { 57 | print(err.Error()) 58 | return 59 | } 60 | _, err = ioutil.ReadAll(download) 61 | if err != nil { 62 | print(err.Error()) 63 | return 64 | } 65 | }() 66 | } 67 | group.Wait() 68 | } 69 | 70 | func BenchmarkDownloadByRoutinue(t *testing.B) { 71 | group := sync.WaitGroup{} 72 | group.Add(t.N) 73 | for i := 0; i < t.N; i++ { 74 | go func() { 75 | defer func() { 76 | group.Done() 77 | }() 78 | download, err := newFastDFSClient().download("http://127.0.0.1:8080/group1/big/upload/d825b677594299b6f15cd58ebeb09530") 79 | if err != nil { 80 | //print(err.Error()) 81 | return 82 | } 83 | _, err = ioutil.ReadAll(download) 84 | if err != nil { 85 | //print(err.Error()) 86 | return 87 | } 88 | }() 89 | 90 | } 91 | group.Wait() 92 | } 93 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/IClient/ClientInterface.go: -------------------------------------------------------------------------------- 1 | package IClient 2 | 3 | type ( 4 | IDFSClient interface { 5 | //this map is extraData need be use 6 | Upload(map[string]interface{}, []byte) (string, error) 7 | Download(map[string]interface{}) error 8 | } 9 | ) 10 | 11 | type DFSClientFactory struct { 12 | } 13 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/IPFSClient.go: -------------------------------------------------------------------------------- 1 | package DFSClient 2 | 3 | import "github.com/libp2p/go-libp2p/core/host" 4 | 5 | type IPFSClient struct { 6 | host.Host 7 | } 8 | 9 | func (*IPFSClient) Download(map[string]interface{}) error { 10 | return nil 11 | } 12 | 13 | func (*IPFSClient) Upload(map[string]interface{}, []byte) (string, error) { 14 | return "", nil 15 | } 16 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/fastDFS_test.go: -------------------------------------------------------------------------------- 1 | package DFSClient 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "testing" 7 | ) 8 | 9 | func TestTransportFile(t *testing.T) { 10 | url, err := GetFastDFSPool().Upload(map[string]interface{}{"filename": "ss", "user": "xiyang"}, []byte("jiangxiyanssssg")) 11 | if err != nil { 12 | print(err.Error()) 13 | return 14 | } 15 | fmt.Println(url) 16 | } 17 | 18 | func TestFastDFSPool_Download(t *testing.T) { 19 | download, err := GetFastDFSPool().Download("http://127.0.0.1:8080/group1/big/upload/d825b677594299b6f15cd58ebeb09530") 20 | if err != nil { 21 | print(err.Error()) 22 | return 23 | } 24 | all, err := ioutil.ReadAll(download) 25 | if err != nil { 26 | print(err.Error()) 27 | return 28 | } 29 | print(string(all)) 30 | } 31 | func BenchmarkDownloadClient(t *testing.B) { 32 | for i := 0; i < t.N; i++ { 33 | download, err := newFastDFSClient().download("http://127.0.0.1:8080/group1/big/upload/d825b677594299b6f15cd58ebeb09530") 34 | if err != nil { 35 | print(err.Error()) 36 | return 37 | } 38 | _, err = ioutil.ReadAll(download) 39 | if err != nil { 40 | print(err.Error()) 41 | return 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/DFSClient/localFSClient.go: -------------------------------------------------------------------------------- 1 | package DFSClient 2 | 3 | import "fileServer/api/internal/DFSClient/IClient" 4 | 5 | type LocalFSClient struct { 6 | IClient.IDFSClient 7 | } 8 | 9 | func (*LocalFSClient) Download(metaInfo map[string]interface{}) error { 10 | return nil 11 | } 12 | func (*LocalFSClient) Upload(map[string]interface{}, []byte) (string, error) { 13 | return "", nil 14 | } 15 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/FileFactory.go: -------------------------------------------------------------------------------- 1 | package File 2 | 3 | import ( 4 | "errors" 5 | "fileServer/api/internal/Model/File/IFile" 6 | 7 | "strings" 8 | "sync" 9 | ) 10 | 11 | const ( 12 | HTTP_PROTOCOL = "http" 13 | HTTPS_PROTOCOL = "https" 14 | FILE_PROTOCOL = "file" 15 | ) 16 | 17 | var SupportProtocol map[string]interface{} 18 | 19 | func init() { 20 | SupportProtocol = map[string]interface{}{HTTP_PROTOCOL: nil, HTTPS_PROTOCOL: nil, FILE_PROTOCOL: nil} 21 | } 22 | 23 | type fileFactory struct { 24 | } 25 | 26 | var ( 27 | fileFactorySingle *fileFactory 28 | once sync.Once 29 | ) 30 | 31 | func GetFileFactory() *fileFactory { 32 | once.Do(func() { 33 | if fileFactorySingle == nil { 34 | fileFactorySingle = &fileFactory{} 35 | } 36 | }) 37 | return fileFactorySingle 38 | } 39 | 40 | /** 41 | create file model default local file,use the uri create the fileModel. 42 | 取数据支持延迟调用 43 | 目前只支持本地文件系统还有使用http/https协议的uri 44 | */ 45 | func (*fileFactory) CreateFile(uri string, deferCall bool) (result IFile.IFile, err error) { 46 | var protocolName = strings.Split(uri, ":")[0] 47 | if _, ok := SupportProtocol[protocolName]; !ok { 48 | return nil, errors.New("不支持此类文件协议") 49 | } 50 | switch protocolName { 51 | case "file": 52 | result = &LocalFile{} 53 | err = result.InitFile(strings.SplitN(uri, "///", 2)[1], deferCall) 54 | case "http", "https": 55 | result = &fastDFile{} 56 | err = result.InitFile(uri, deferCall) 57 | default: 58 | err = errors.New("不能识别的文件协议") 59 | } 60 | return result, err 61 | } 62 | func (ff *fileFactory) CreateFileDefer(uri string) (IFile.IFile, error) { 63 | return ff.CreateFile(uri, true) 64 | } 65 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/IFile/DFileInterface.go: -------------------------------------------------------------------------------- 1 | package IFile 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "sync" 7 | ) 8 | 9 | type IFile interface { 10 | InitFile(uri string, deferCall bool) error 11 | GetFileData() (io.Reader, error) 12 | } 13 | 14 | // File 尽量使用充血模型进行优化,data设为私有变量的原因提供延迟注入数据,同时保证开闭原则 / 15 | type File struct { 16 | // seem the file descriptor 17 | MetaInfo *FileMeta 18 | //support the file protocol 19 | Uri string 20 | Lock sync.Mutex 21 | DeferWrapData func() error 22 | WrapOnce sync.Once 23 | DataSource io.Reader 24 | IFile 25 | } 26 | 27 | func (file *File) isEmpty() bool { 28 | if file.MetaInfo.size <= 0 { 29 | return true 30 | } 31 | return false 32 | } 33 | func (file *File) GetFileLocation() string { 34 | return file.Uri 35 | } 36 | func (file *File) InitFile(uri string) error { 37 | return errors.New("不能使用抽象类") 38 | } 39 | 40 | /** 41 | maybe throws the panic from DeferWrapData func 42 | */ 43 | func (file *File) GetFileData() (data io.Reader, err error) { 44 | file.WrapOnce.Do(func() { 45 | err = file.DeferWrapData() 46 | }) 47 | return file.DataSource, err 48 | } 49 | func (file *File) SetFileData(data io.Reader) { 50 | file.DataSource = data 51 | } 52 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/IFile/FileMeta.go: -------------------------------------------------------------------------------- 1 | package IFile 2 | 3 | import ( 4 | "os" 5 | "time" 6 | ) 7 | 8 | /** 9 | 文件系统中文件的状态 10 | */ 11 | type FileMeta struct { 12 | name string 13 | create_time time.Time 14 | update_time time.Time 15 | isDir bool 16 | typeof string 17 | authority string 18 | size int64 19 | } 20 | 21 | func (fm *FileMeta) GetSize() int64 { 22 | return fm.size 23 | } 24 | 25 | /** 26 | 伪适配,貌似标准库中的filestat不支持运行时调用,只能一个一个匹配 27 | */ 28 | func CreateFileMeta(fileInfo os.FileInfo) (metaData *FileMeta) { 29 | metaData = &FileMeta{} 30 | metaData.size = fileInfo.Size() 31 | metaData.update_time = fileInfo.ModTime() 32 | metaData.isDir = fileInfo.IsDir() 33 | metaData.name = fileInfo.Name() 34 | var fileMode = fileInfo.Mode() 35 | metaData.authority = fileMode.Perm().String() 36 | metaData.typeof = fileMode.Type().String() 37 | return metaData 38 | } 39 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/IPFSFile.go: -------------------------------------------------------------------------------- 1 | package File 2 | 3 | import "io" 4 | 5 | type IPFSFile struct { 6 | } 7 | 8 | func (*IPFSFile) InitFile(uri string, deferCall bool) error { 9 | return nil 10 | } 11 | func (*IPFSFile) GetFileData() (io.Reader, error) { 12 | return nil, nil 13 | } 14 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/fastDFile.go: -------------------------------------------------------------------------------- 1 | package File 2 | 3 | import ( 4 | "fileServer/api/internal/DFSClient" 5 | "fileServer/api/internal/Model/File/IFile" 6 | ) 7 | 8 | type fastDFile struct { 9 | IFile.File 10 | } 11 | 12 | func (df *fastDFile) InitFile(uri string, deferCall bool) (err error) { 13 | df.DeferWrapData = func() error { 14 | download, wrapEerr := DFSClient.GetFastDFSPool().Download(uri) 15 | if wrapEerr != nil { 16 | return wrapEerr 17 | } 18 | df.DataSource = download 19 | return nil 20 | } 21 | if !deferCall { 22 | err = df.DeferWrapData() 23 | } 24 | return nil 25 | } 26 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/fileModel_test.go: -------------------------------------------------------------------------------- 1 | package File 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestFileFactory_CreateFile(t *testing.T) { 8 | file, err := GetFileFactory().CreateFile("file:///D:/CloudStorage/ProjectDesign/ClassDesign/fileServerModel/README.md", true) 9 | if err != nil { 10 | println(err.Error()) 11 | return 12 | } 13 | println(file.GetFileData()) 14 | } 15 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/File/localFile.go: -------------------------------------------------------------------------------- 1 | package File 2 | 3 | import ( 4 | "bufio" 5 | "fileServer/api/internal/Model/File/IFile" 6 | 7 | "github.com/zeromicro/go-zero/core/logx" 8 | "os" 9 | ) 10 | 11 | type LocalFile struct { 12 | IFile.File 13 | } 14 | 15 | func (lf *LocalFile) InitFile(uri string, deferCall bool) (err error) { 16 | file, err := os.OpenFile(uri, os.O_APPEND, 644) 17 | if err != nil { 18 | return err 19 | } 20 | defer func() { 21 | lf.DataSource = file 22 | //注册延迟调用函数 23 | lf.DeferWrapData = func() error { 24 | lf.Lock.Lock() 25 | defer lf.Lock.Unlock() 26 | lf.DataSource = bufio.NewReader(lf.DataSource) 27 | return nil 28 | } 29 | if !deferCall { 30 | lf.WrapOnce.Do(func() { 31 | err = lf.DeferWrapData() 32 | }) 33 | } 34 | //错误恢复 35 | if panicErr := recover(); panicErr != nil { 36 | logx.Errorf("localFile %s init error:%s", uri, panicErr) 37 | } 38 | }() 39 | lf.Uri = uri 40 | fileInfo, err := file.Stat() 41 | if err != nil { 42 | return err 43 | } 44 | lf.MetaInfo = IFile.CreateFileMeta(fileInfo) 45 | return err 46 | } 47 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/Model/errors.go: -------------------------------------------------------------------------------- 1 | package Model 2 | 3 | import "errors" 4 | 5 | var SystemIsNotUse error = errors.New("系统崩溃") 6 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/rest" 5 | "github.com/zeromicro/go-zero/zrpc" 6 | ) 7 | 8 | type Config struct { 9 | rest.RestConf 10 | Auth struct { 11 | AccessSecret string 12 | AccessExpire int64 13 | } 14 | FileServerRpcConfig zrpc.RpcClientConf 15 | DtmServerConfig string 16 | } 17 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/deletefilehandler.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "net/http" 5 | 6 | "fileServer/api/internal/logic" 7 | "fileServer/api/internal/svc" 8 | "fileServer/api/internal/types" 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func deleteFileHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.FindReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.Error(w, err) 17 | return 18 | } 19 | 20 | l := logic.NewDeleteFileLogic(r.Context(), svcCtx) 21 | resp, err := l.DeleteFile(&req) 22 | if err != nil { 23 | httpx.Error(w, err) 24 | } else { 25 | httpx.OkJson(w, resp) 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/downloadfilehandler.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "bufio" 5 | "fileServer/api/internal/logic" 6 | "fileServer/api/internal/svc" 7 | "fileServer/api/internal/types" 8 | "fmt" 9 | "io/ioutil" 10 | "net/http" 11 | 12 | "github.com/zeromicro/go-zero/rest/httpx" 13 | ) 14 | 15 | func downloadFileHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 16 | return func(w http.ResponseWriter, r *http.Request) { 17 | var req types.DownloadReq 18 | if err := httpx.Parse(r, &req); err != nil { 19 | httpx.Error(w, err) 20 | return 21 | } 22 | l := logic.NewDownloadFileLogic(r.Context(), svcCtx) 23 | resp := l.DownloadFile(&req) 24 | if resp.Result != "true" { 25 | httpx.OkJson(w, resp) 26 | return 27 | } 28 | header := w.Header() 29 | header.Set("Content-Disposition", fmt.Sprintf("attachment;fileName=%s", req.MetaInfo["filename"].(string))) 30 | reader := resp.Data.(*bufio.Reader) 31 | all, _ := ioutil.ReadAll(reader) 32 | w.Write(all) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/fileHandler_test.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "testing" 9 | ) 10 | 11 | func BenchmarkDownLoad(t *testing.B) { 12 | //header := http.Header{} 13 | //header.Add("Authorization", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjMwMDAwMDAwMTY3NDQ3NTg0OCwiaWF0IjoxNjc0NDc1ODQ4LCJ1c2VyIjoieGl5YW5nIn0.yJ4r9Cxs8OnlbQTuPjvgmat95SuIXvA5nxDMsYfjb3U") 14 | var JSONObj = make(map[string]interface{}) 15 | JSONObj["metaInfo"] = map[string]interface{}{"filename": "README.md"} 16 | marshal, _ := json.Marshal(JSONObj) 17 | //request := &http.Request{ 18 | // Method: http.MethodGet, 19 | // URL: net.FormatURL("http", "localhost", 8889, "/file/api/file/download"), 20 | // Header: header, 21 | // Body: body, 22 | //} 23 | request, _ := http.NewRequest(http.MethodGet, "http://localhost:8889/file/api/file/download", bytes.NewReader(marshal)) 24 | request.Header.Set("Content-Length", fmt.Sprintf("%d", len(marshal))) 25 | request.Header.Set("Content-Type", "application/json") 26 | request.Header.Set("Connection", "keep-alive") 27 | request.Header.Set("Authorization", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjMwMDAwMDAwMTY3NDQ3NTg0OCwiaWF0IjoxNjc0NDc1ODQ4LCJ1c2VyIjoieGl5YW5nIn0.yJ4r9Cxs8OnlbQTuPjvgmat95SuIXvA5nxDMsYfjb3U") 28 | client := http.Client{} 29 | t.N = 1 30 | for i := 0; i < t.N; i++ { 31 | do, err := client.Do(request) 32 | if err != nil && do == nil { 33 | fmt.Errorf("error:%s", err.Error()) 34 | return 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/getfileinfohandler.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "fileServer/api/internal/logic" 5 | "fileServer/api/internal/svc" 6 | "fileServer/api/internal/types" 7 | "net/http" 8 | 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func getFileInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.FindReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.Error(w, err) 17 | return 18 | } 19 | l := logic.NewGetFileInfoLogic(r.Context(), svcCtx) 20 | resp := l.GetFileInfo(&req) 21 | httpx.OkJson(w, resp) 22 | 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/queryfileinfohandler.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "fileServer/api/internal/logic" 5 | "fileServer/api/internal/svc" 6 | "fileServer/api/internal/types" 7 | "net/http" 8 | 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func queryFileInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.QueryReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.Error(w, err) 17 | return 18 | } 19 | 20 | l := logic.NewQueryFileInfoLogic(r.Context(), svcCtx) 21 | resp := l.QueryFileInfo(&req) 22 | httpx.OkJson(w, resp) 23 | 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/routes.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT. 2 | package handler 3 | 4 | import ( 5 | "net/http" 6 | "time" 7 | 8 | "fileServer/api/internal/svc" 9 | 10 | "github.com/zeromicro/go-zero/rest" 11 | ) 12 | 13 | func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) { 14 | server.AddRoutes( 15 | []rest.Route{ 16 | { 17 | Method: http.MethodPost, 18 | Path: "/api/file/upload", 19 | Handler: uploadFileHandler(serverCtx), 20 | }, 21 | { 22 | Method: http.MethodGet, 23 | Path: "/api/file/download", 24 | Handler: downloadFileHandler(serverCtx), 25 | }, 26 | { 27 | Method: http.MethodGet, 28 | Path: "/api/file/get", 29 | Handler: getFileInfoHandler(serverCtx), 30 | }, 31 | { 32 | Method: http.MethodGet, 33 | Path: "/api/file/query", 34 | Handler: queryFileInfoHandler(serverCtx), 35 | }, 36 | { 37 | Method: http.MethodGet, 38 | Path: "/api/file/delete", 39 | Handler: deleteFileHandler(serverCtx), 40 | }, 41 | { 42 | Method: http.MethodGet, 43 | Path: "/api/file/update", 44 | Handler: updateFileHandler(serverCtx), 45 | }, 46 | }, 47 | rest.WithJwt(serverCtx.Config.Auth.AccessSecret), 48 | rest.WithPrefix("/file"), 49 | rest.WithTimeout(3*time.Second), 50 | rest.WithPriority(), 51 | ) 52 | } 53 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/updatefilehandler.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "net/http" 5 | 6 | "fileServer/api/internal/logic" 7 | "fileServer/api/internal/svc" 8 | "fileServer/api/internal/types" 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func updateFileHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.FindReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.Error(w, err) 17 | return 18 | } 19 | l := logic.NewUpdateFileLogic(r.Context(), svcCtx) 20 | resp, err := l.UpdateFile(&req) 21 | if err != nil { 22 | httpx.Error(w, err) 23 | } else { 24 | httpx.OkJson(w, resp) 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/handler/uploadfilehandler.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fileServer/api/internal/DFSClient" 7 | "fileServer/api/internal/svc" 8 | "fileServer/api/internal/types" 9 | "fileServer/rpc/pb" 10 | "fmt" 11 | "github.com/zeromicro/go-zero/rest/httpx" 12 | "io/ioutil" 13 | "mime/multipart" 14 | "net/http" 15 | "strings" 16 | "sync" 17 | "time" 18 | ) 19 | 20 | const MaxTransportByte int64 = 209715200 21 | 22 | func uploadFileHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 23 | return func(w http.ResponseWriter, r *http.Request) { 24 | var resp = types.NewDefaultRes() 25 | defer func() { 26 | if panicHandler := recover(); panicHandler != nil { 27 | resp.GetFailedRep("系统错误") 28 | httpx.OkJson(w, resp) 29 | } 30 | }() 31 | 32 | //support max transport size for a request 33 | err := r.ParseMultipartForm(MaxTransportByte) 34 | if err != nil { 35 | resp.GetFailedRep("未识别到文件") 36 | httpx.OkJson(w, resp) 37 | return 38 | } 39 | form := r.MultipartForm 40 | err = handlerFiles(svcCtx, form, r) 41 | if err != nil { 42 | resp.GetFailedRep(err.Error()) 43 | httpx.OkJson(w, resp) 44 | return 45 | } 46 | //logic because the r.MultipartForm data is large,consider to avoid multiple copy ,so reduce the times of convey,and 47 | // i must do it because the go-zero logic currently cant support the logic of existing file 48 | httpx.OkJson(w, resp) 49 | 50 | } 51 | } 52 | func handlerFiles(handler *svc.ServiceContext, form *multipart.Form, r *http.Request) (err error) { 53 | group := sync.WaitGroup{} 54 | group.Add(len(form.File)) 55 | errUrl := make(chan string, len(form.File)) 56 | user := r.Context().Value("user").(string) 57 | for index, _ := range form.File { 58 | file, m, err := r.FormFile(index) 59 | //maybe is unsafe 60 | defer file.Close() 61 | if err != nil { 62 | return err 63 | } 64 | data, err := ioutil.ReadAll(file) 65 | if err != nil { 66 | return err 67 | } 68 | // high performance 69 | go func() { 70 | defer func() { 71 | group.Done() 72 | }() 73 | extraData := map[string]interface{}{"user": user, "filename": m.Filename} 74 | upload, err := DFSClient.GetFastDFSPool().Upload(extraData, data) 75 | if err != nil { 76 | errUrl <- fmt.Sprintf("filename %s upload failed:%s", m.Filename, err.Error()) 77 | return 78 | } 79 | //combine rpc pb.FileMetaInfo 80 | info := &pb.FileMetaInfo{ 81 | Creator: user, 82 | CreateGroup: user, 83 | Name: m.Filename, 84 | CreateTime: time.Now().Format(time.RFC850), 85 | Authority: "644", 86 | TypeOf: "file", 87 | UpdateTime: time.Now().Format(time.RFC850), 88 | Size: m.Size, 89 | IsDir: false, 90 | DeleteTime: "", 91 | Description: upload, 92 | } 93 | //spin lock,reduce concurrent load using exponential dispersion algorithm 94 | var retryTimes = 5 95 | var recoveryTime = 1 * time.Second 96 | for i := 0; i < retryTimes; i++ { 97 | ctx, _ := context.WithTimeout(context.Background(), recoveryTime) 98 | one, err := handler.FileRpc.InertOne(ctx, info) 99 | if err == nil && one.Result == "true" { 100 | break 101 | } 102 | <-ctx.Done() 103 | recoveryTime = time.Duration(int64(float64(recoveryTime.Nanoseconds()) * 1.5)) 104 | time.Sleep(recoveryTime) 105 | } 106 | // default In retryTimes,the database operation must be successful 107 | }() 108 | } 109 | group.Wait() 110 | close(errUrl) 111 | errLength := len(errUrl) 112 | if errLength != 0 { 113 | builder := strings.Builder{} 114 | for i := 0; i < errLength; i++ { 115 | builder.WriteString(<-errUrl) 116 | } 117 | return errors.New(builder.String()) 118 | } 119 | return nil 120 | } 121 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/logic/deletefilelogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | 6 | "fileServer/api/internal/svc" 7 | "fileServer/api/internal/types" 8 | 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type DeleteFileLogic struct { 13 | logx.Logger 14 | ctx context.Context 15 | svcCtx *svc.ServiceContext 16 | } 17 | 18 | func NewDeleteFileLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteFileLogic { 19 | return &DeleteFileLogic{ 20 | Logger: logx.WithContext(ctx), 21 | ctx: ctx, 22 | svcCtx: svcCtx, 23 | } 24 | } 25 | 26 | func (l *DeleteFileLogic) DeleteFile(req *types.FindReq) (resp *types.BaseResponse, err error) { 27 | // todo: add your logic here and delete this line 28 | defer func() { 29 | if panicErr := recover(); panicErr != nil { 30 | resp.GetFailedRep("系统出错") 31 | } 32 | }() 33 | inputMap := req.MetaInfo 34 | if _, ok := inputMap["creator"]; !ok { 35 | resp.GetFailedRep("请指定用户") 36 | return resp, err 37 | } 38 | if _, ok := inputMap["name"]; !ok { 39 | resp.GetFailedRep("没有要删除的文件") 40 | return resp, err 41 | } 42 | //center logic 43 | return 44 | } 45 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/logic/downloadfilelogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/api/internal/DFSClient" 6 | "fileServer/api/internal/svc" 7 | "fileServer/api/internal/types" 8 | "fileServer/rpc/pb" 9 | 10 | "github.com/zeromicro/go-zero/core/logx" 11 | ) 12 | 13 | type DownloadFileLogic struct { 14 | logx.Logger 15 | ctx context.Context 16 | svcCtx *svc.ServiceContext 17 | } 18 | 19 | func NewDownloadFileLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DownloadFileLogic { 20 | return &DownloadFileLogic{ 21 | Logger: logx.WithContext(ctx), 22 | ctx: ctx, 23 | svcCtx: svcCtx, 24 | } 25 | } 26 | 27 | func (l *DownloadFileLogic) DownloadFile(req *types.DownloadReq) (resp *types.BaseResponse) { 28 | // todo: add your logic here and delete this line 29 | resp = types.NewDefaultRes() 30 | defer func() { 31 | if panicErr := recover(); panicErr != nil { 32 | logx.Error(panicErr) 33 | resp.GetFailedRep("系统错误") 34 | } 35 | }() 36 | user, ok := l.ctx.Value("user").(string) 37 | filename, ok := req.MetaInfo["filename"].(string) 38 | if !ok { 39 | resp.GetFailedRep("未指定文件名") 40 | return resp 41 | } 42 | one, err := l.svcCtx.FileRpc.FindOne(l.ctx, &pb.FindFileReq{Owner: user, Name: filename}) 43 | if err != nil { 44 | resp.GetFailedRep("查询失败") 45 | return resp 46 | } 47 | if one == nil { 48 | resp.GetFailedRep("文件不存在") 49 | return resp 50 | } 51 | download, err := DFSClient.GetFastDFSPool().Download(one.Description) 52 | if err != nil { 53 | resp.GetFailedRep("文件下载失败") 54 | return resp 55 | } 56 | resp.AddData(download) 57 | return resp 58 | } 59 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/logic/getfileinfologic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/api/internal/svc" 6 | "fileServer/api/internal/types" 7 | "fileServer/api/internal/util" 8 | "fileServer/rpc/pb" 9 | 10 | "github.com/zeromicro/go-zero/core/logx" 11 | ) 12 | 13 | type GetFileInfoLogic struct { 14 | logx.Logger 15 | ctx context.Context 16 | svcCtx *svc.ServiceContext 17 | } 18 | 19 | func NewGetFileInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetFileInfoLogic { 20 | return &GetFileInfoLogic{ 21 | Logger: logx.WithContext(ctx), 22 | ctx: ctx, 23 | svcCtx: svcCtx, 24 | } 25 | } 26 | 27 | func (l *GetFileInfoLogic) GetFileInfo(req *types.FindReq) (resp *types.BaseResponse) { 28 | // todo: add your logic here and delete this line 29 | resp = types.NewDefaultRes() 30 | defer func() { 31 | if errHandler := recover(); errHandler != nil { 32 | logx.Error(errHandler) 33 | resp.GetFailedRep("系统出错") 34 | } 35 | }() 36 | owner, ok := req.MetaInfo["creator"].(string) 37 | if !ok { 38 | resp.GetFailedRep("未指定用户") 39 | return resp 40 | } 41 | name, ok := req.MetaInfo["name"].(string) 42 | if !ok { 43 | resp.GetFailedRep("没有指定文件") 44 | return resp 45 | } 46 | result, rpcErr := l.svcCtx.FileRpc.FindOne(l.ctx, &pb.FindFileReq{Owner: owner, Name: name}) 47 | if rpcErr != nil { 48 | resp.GetFailedRep("获取文件失败") 49 | return resp 50 | } 51 | meta, convertErr := util.ConvertRpcFileMeta(result) 52 | if convertErr != nil { 53 | logx.Error(convertErr.Error()) 54 | resp.GetFailedRep("系统出错") 55 | return resp 56 | } 57 | resp.AddData(meta) 58 | return resp 59 | } 60 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/logic/queryfileinfologic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/api/internal/svc" 6 | "fileServer/api/internal/types" 7 | "fileServer/api/internal/util" 8 | "fileServer/rpc/pb" 9 | 10 | "github.com/zeromicro/go-zero/core/logx" 11 | ) 12 | 13 | type QueryFileInfoLogic struct { 14 | logx.Logger 15 | ctx context.Context 16 | svcCtx *svc.ServiceContext 17 | } 18 | 19 | func NewQueryFileInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *QueryFileInfoLogic { 20 | return &QueryFileInfoLogic{ 21 | Logger: logx.WithContext(ctx), 22 | ctx: ctx, 23 | svcCtx: svcCtx, 24 | } 25 | } 26 | 27 | func (l *QueryFileInfoLogic) QueryFileInfo(req *types.QueryReq) (resp *types.BaseResponse) { 28 | // todo: add your logic here and delete this line 29 | resp = types.NewDefaultRes() 30 | defer func() { 31 | if errHandler := recover(); errHandler != nil { 32 | logx.Error(errHandler) 33 | resp.GetFailedRep("系统出错") 34 | } 35 | }() 36 | owner, ok := req.MetaInfo["creator"].(string) 37 | if !ok { 38 | resp.GetFailedRep("请指定用户") 39 | return resp 40 | } 41 | files, rpcErr := l.svcCtx.FileRpc.QueryFiles(l.ctx, &pb.QueryFileReq{Owner: owner}) 42 | if rpcErr != nil { 43 | logx.Error(rpcErr.Error()) 44 | resp.GetFailedRep("查询失败") 45 | return resp 46 | } 47 | list, convertErr := util.ConvertRpcFileMetaList(files.List) 48 | if convertErr != nil { 49 | logx.Error(convertErr.Error()) 50 | resp.GetFailedRep("系统出错") 51 | return resp 52 | } 53 | resp.AddData(list) 54 | return resp 55 | } 56 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/logic/updatefilelogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | 6 | "fileServer/api/internal/svc" 7 | "fileServer/api/internal/types" 8 | 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type UpdateFileLogic struct { 13 | logx.Logger 14 | ctx context.Context 15 | svcCtx *svc.ServiceContext 16 | } 17 | 18 | func NewUpdateFileLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateFileLogic { 19 | return &UpdateFileLogic{ 20 | Logger: logx.WithContext(ctx), 21 | ctx: ctx, 22 | svcCtx: svcCtx, 23 | } 24 | } 25 | 26 | func (l *UpdateFileLogic) UpdateFile(req *types.FindReq) (resp *types.BaseResponse, err error) { 27 | // todo: add your logic here and delete this line 28 | 29 | return 30 | } 31 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/svc/servicecontext.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "fileServer/api/internal/config" 5 | "fileServer/rpc/fileserver" 6 | "github.com/zeromicro/go-zero/zrpc" 7 | ) 8 | 9 | type ServiceContext struct { 10 | Config config.Config 11 | FileRpc fileserver.FileServer 12 | DtmServer string 13 | } 14 | 15 | func NewServiceContext(c config.Config) *ServiceContext { 16 | return &ServiceContext{ 17 | Config: c, 18 | FileRpc: fileserver.NewFileServer(zrpc.MustNewClient(c.FileServerRpcConfig)), 19 | DtmServer: c.DtmServerConfig, 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/types/types.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT. 2 | package types 3 | 4 | import "os" 5 | 6 | type BaseResponse struct { 7 | Result string `json:"result"` 8 | Message string `json:"message"` 9 | Data interface{} `json:"data"` 10 | } 11 | func NewDefaultRes()*BaseResponse{ 12 | return &BaseResponse{ 13 | Result: "true", 14 | Message: "操作成功", 15 | } 16 | } 17 | func (br *BaseResponse)AddData(data interface{}){ 18 | br.Data=data 19 | } 20 | func( br *BaseResponse) GetFailedRep(msg string){ 21 | br.Result= "fail" 22 | br.Message=msg 23 | } 24 | 25 | type UploadReq struct { 26 | MetaInfo map[string]interface{} `json:"metaInfo"` 27 | Data []os.File `form:"fileData"` 28 | } 29 | 30 | type DownloadReq struct { 31 | MetaInfo map[string]interface{} `json:"metaInfo"` //must include the location key and the 32 | } 33 | 34 | type FindReq struct { 35 | MetaInfo map[string]interface{} `json:"metaInfo"` //must include the location key 36 | } 37 | 38 | type QueryReq struct { 39 | MetaInfo map[string]interface{} `json:"metaInfo"` //mush include the owner name 40 | } 41 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/util/JWT_token_generate_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "github.com/golang-jwt/jwt/v4" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestGenerateToken(t *testing.T) { 11 | cliams := make(jwt.MapClaims) 12 | cliams["exp"] = time.Now().Unix() + 300000000000000000 13 | cliams["iat"] = time.Now().Unix() 14 | cliams["user"] = "xiyang" 15 | token := jwt.New(jwt.SigningMethodHS256) 16 | token.Claims = cliams 17 | fmt.Println(token.SignedString([]byte("cloudstoragesystem"))) 18 | } 19 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/util/metaInfoUtil.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "errors" 5 | "fileServer/rpc/pb" 6 | "reflect" 7 | ) 8 | 9 | /** 10 | maybe has some question 11 | */ 12 | func ConvertRpcFileMeta(info *pb.FileMetaInfo) (result map[string]interface{}, err error) { 13 | result = make(map[string]interface{}, 10) 14 | refValueOf := reflect.ValueOf(*info) 15 | refTypeOf := reflect.TypeOf(*info) 16 | fieldNumber := refValueOf.NumField() 17 | for i := 0; i < fieldNumber; i++ { 18 | field := refValueOf.Field(i) 19 | fieldName := refTypeOf.Field(i).Name 20 | switch field.Kind() { 21 | case reflect.Invalid: 22 | err = errors.New("系统出错") 23 | break 24 | case reflect.Int, reflect.Int64: 25 | result[fieldName] = field.Int() 26 | case reflect.String: 27 | result[fieldName] = field.String() 28 | case reflect.Bool: 29 | result[fieldName] = field.Bool() 30 | case reflect.Float64, reflect.Float32: 31 | result[fieldName] = field.Float() 32 | default: 33 | continue 34 | } 35 | } 36 | return result, err 37 | } 38 | 39 | func ConvertRpcFileMetaList(info []*pb.FileMetaInfo) (result []map[string]interface{}, err error) { 40 | result = make([]map[string]interface{}, 10) 41 | for i := 0; i < len(info); i++ { 42 | result[i], err = ConvertRpcFileMeta(info[i]) 43 | if err != nil { 44 | return nil, err 45 | } 46 | } 47 | return result, err 48 | } 49 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/internal/util/metaInfoUtil_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fileServer/rpc/pb" 5 | "fmt" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestConvertRpcFileMeta(t *testing.T) { 11 | meta, err := ConvertRpcFileMeta(&pb.FileMetaInfo{ 12 | Creator: "xiyang", 13 | CreateGroup: "xiyang", 14 | Name: "hello", 15 | CreateTime: time.Now().String(), 16 | Authority: "644", 17 | TypeOf: "file", 18 | UpdateTime: time.Now().String(), 19 | Size: 0, 20 | IsDir: false, 21 | DeleteTime: time.Now().String(), 22 | }) 23 | if err != nil { 24 | fmt.Println(err.Error()) 25 | return 26 | } 27 | for index, value := range meta { 28 | fmt.Printf("%s %v\n", index, value) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/api/readme.md: -------------------------------------------------------------------------------- 1 | ### 1. "uploadFile" 2 | 3 | 1. route definition 4 | 5 | - Url: /file/api/file/upload 6 | - Method: POST 7 | - Request: `uploadReq` 8 | - Response: `baseResponse` 9 | 10 | 2. request definition 11 | 12 | 13 | 14 | ```golang 15 | type UploadReq struct { 16 | MetaInfo map[string]interface{} `json:"metaInfo"` 17 | Data []byte `form:"fileData"` 18 | } 19 | ``` 20 | 21 | 22 | 3. response definition 23 | 24 | 25 | 26 | ```golang 27 | type BaseResponse struct { 28 | Result string `json:"result"` 29 | Message string `json:"message"` 30 | Data map[string]interface{} `json:"data"` 31 | } 32 | ``` 33 | 34 | ### 2. "downloadFile" 35 | 36 | 1. route definition 37 | 38 | - Url: /file/api/file/download 39 | - Method: GET 40 | - Request: `downloadReq` 41 | - Response: `baseResponse` 42 | 43 | 2. request definition 44 | 45 | 46 | 47 | ```golang 48 | type DownloadReq struct { 49 | MetaInfo map[string]interface{} `json:"metaInfo"` //must include the location key and the 50 | } 51 | ``` 52 | 53 | 54 | 3. response definition 55 | 56 | 57 | 58 | ```golang 59 | type BaseResponse struct { 60 | Result string `json:"result"` 61 | Message string `json:"message"` 62 | Data map[string]interface{} `json:"data"` 63 | } 64 | ``` 65 | 66 | ### 3. "getFileInfo" 67 | 68 | 1. route definition 69 | 70 | - Url: /file/api/file/get 71 | - Method: GET 72 | - Request: `findReq` 73 | - Response: `baseResponse` 74 | 75 | 2. request definition 76 | 77 | 78 | 79 | ```golang 80 | type FindReq struct { 81 | MetaInfo map[string]interface{} `json:"metaInfo"` //must include the location key 82 | } 83 | ``` 84 | 85 | 86 | 3. response definition 87 | 88 | 89 | 90 | ```golang 91 | type BaseResponse struct { 92 | Result string `json:"result"` 93 | Message string `json:"message"` 94 | Data map[string]interface{} `json:"data"` 95 | } 96 | ``` 97 | 98 | ### 4. "queryFileInfo" 99 | 100 | 1. route definition 101 | 102 | - Url: /file/api/file/query 103 | - Method: GET 104 | - Request: `queryReq` 105 | - Response: `baseResponse` 106 | 107 | 2. request definition 108 | 109 | 110 | 111 | ```golang 112 | type QueryReq struct { 113 | MetaInfo map[string]interface{} `json:"metaInfo"` //mush include the owner name 114 | } 115 | ``` 116 | 117 | 118 | 3. response definition 119 | 120 | 121 | 122 | ```golang 123 | type BaseResponse struct { 124 | Result string `json:"result"` 125 | Message string `json:"message"` 126 | Data map[string]interface{} `json:"data"` 127 | } 128 | ``` 129 | 130 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/fileServer.Dockefile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS builder 2 | 3 | ENV CGO_ENABLED 0 4 | ENV GOPROXY https://goproxy.cn,direct 5 | 6 | 7 | WORKDIR /build 8 | 9 | ADD go.mod . 10 | ADD go.sum . 11 | RUN go mod download 12 | COPY . . 13 | RUN mkdir -p /app/fileServer/etc 14 | COPY rpc/etc/fileserver.yaml /app/fileServer/etc 15 | COPY api/etc/file-api.yaml /app/fileServer/etc 16 | WORKDIR /build/api 17 | RUN go build -ldflags="-s -w" -o /app/fileServer/api ./file.go 18 | WORKDIR /build/rpc 19 | RUN go build -ldflags="-s -w" -o /app/fileServer/rpc ./fileserver.go 20 | 21 | 22 | 23 | 24 | FROM scratch AS fileserverapi 25 | 26 | ENV TZ Asia/Shanghai 27 | 28 | 29 | WORKDIR /app 30 | COPY --from=builder /app/fileServer/etc/file-api.yaml /app/file-api.yaml 31 | COPY --from=builder /app/fileServer/api /app/api 32 | EXPOSE 8889 33 | CMD ["./api","-f","file-api.yaml"] 34 | 35 | 36 | FROM scratch AS fileserverrpc 37 | ENV TZ Asia/shanghai 38 | 39 | WORKDIR /app 40 | COPY --from=builder /app/fileServer/etc/fileserver.yaml /app/fileserver.yaml 41 | COPY --from=builder /app/fileServer/rpc /app/rpc 42 | EXPOSE 8001 43 | CMD ["./rpc","-f","fileserver.yaml"] 44 | 45 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/go.mod: -------------------------------------------------------------------------------- 1 | module fileServer 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/eventials/go-tus v0.0.0-20220610120217-05d0564bb571 7 | github.com/golang-jwt/jwt/v4 v4.4.3 8 | github.com/hibiken/asynq v0.24.0 9 | github.com/lib/pq v1.10.7 10 | github.com/libp2p/go-libp2p v0.24.2 11 | github.com/zeromicro/go-zero v1.4.3 12 | google.golang.org/grpc v1.52.0 13 | google.golang.org/protobuf v1.30.0 14 | 15 | ) 16 | 17 | require ( 18 | github.com/beorn7/perks v1.0.1 // indirect 19 | github.com/cenkalti/backoff/v4 v4.1.3 // indirect 20 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 21 | github.com/coreos/go-semver v0.3.0 // indirect 22 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 23 | github.com/davecgh/go-spew v1.1.1 // indirect 24 | github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect 25 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 26 | github.com/fatih/color v1.13.0 // indirect 27 | github.com/felixge/fgprof v0.9.3 // indirect 28 | github.com/go-logr/logr v1.2.3 // indirect 29 | github.com/go-logr/stdr v1.2.2 // indirect 30 | github.com/go-redis/redis/v8 v8.11.5 // indirect 31 | github.com/go-sql-driver/mysql v1.7.0 // indirect 32 | github.com/gogo/protobuf v1.3.2 // indirect 33 | github.com/golang/mock v1.6.0 // indirect 34 | github.com/golang/protobuf v1.5.2 // indirect 35 | github.com/google/go-cmp v0.5.9 // indirect 36 | github.com/google/gofuzz v1.2.0 // indirect 37 | github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect 38 | github.com/google/uuid v1.3.0 // indirect 39 | github.com/googleapis/gnostic v0.5.5 // indirect 40 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect 41 | github.com/ipfs/go-cid v0.3.2 // indirect 42 | github.com/json-iterator/go v1.1.12 // indirect 43 | github.com/klauspost/cpuid/v2 v2.2.4 // indirect 44 | github.com/kr/pretty v0.3.0 // indirect 45 | github.com/libp2p/go-buffer-pool v0.1.0 // indirect 46 | github.com/libp2p/go-openssl v0.1.0 // indirect 47 | github.com/mattn/go-colorable v0.1.12 // indirect 48 | github.com/mattn/go-isatty v0.0.19 // indirect 49 | github.com/mattn/go-pointer v0.0.1 // indirect 50 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 51 | github.com/minio/sha256-simd v1.0.0 // indirect 52 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 53 | github.com/modern-go/reflect2 v1.0.2 // indirect 54 | github.com/mr-tron/base58 v1.2.0 // indirect 55 | github.com/multiformats/go-base32 v0.1.0 // indirect 56 | github.com/multiformats/go-base36 v0.2.0 // indirect 57 | github.com/multiformats/go-multiaddr v0.8.0 // indirect 58 | github.com/multiformats/go-multibase v0.1.1 // indirect 59 | github.com/multiformats/go-multicodec v0.7.0 // indirect 60 | github.com/multiformats/go-multihash v0.2.1 // indirect 61 | github.com/multiformats/go-varint v0.0.7 // indirect 62 | github.com/onsi/gomega v1.24.0 // indirect 63 | github.com/openzipkin/zipkin-go v0.4.0 // indirect 64 | github.com/pelletier/go-toml/v2 v2.0.8 // indirect 65 | github.com/prometheus/client_golang v1.14.0 // indirect 66 | github.com/prometheus/client_model v0.3.0 // indirect 67 | github.com/prometheus/common v0.37.0 // indirect 68 | github.com/prometheus/procfs v0.8.0 // indirect 69 | github.com/robfig/cron/v3 v3.0.1 // indirect 70 | github.com/rogpeppe/go-internal v1.8.1 // indirect 71 | github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect 72 | github.com/spaolacci/murmur3 v1.1.0 // indirect 73 | github.com/spf13/cast v1.4.1 // indirect 74 | go.etcd.io/etcd/api/v3 v3.5.5 // indirect 75 | go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect 76 | go.etcd.io/etcd/client/v3 v3.5.5 // indirect 77 | go.opentelemetry.io/otel v1.10.0 // indirect 78 | go.opentelemetry.io/otel/exporters/jaeger v1.10.0 // indirect 79 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect 80 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect 81 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect 82 | go.opentelemetry.io/otel/exporters/zipkin v1.10.0 // indirect 83 | go.opentelemetry.io/otel/sdk v1.10.0 // indirect 84 | go.opentelemetry.io/otel/trace v1.10.0 // indirect 85 | go.opentelemetry.io/proto/otlp v0.19.0 // indirect 86 | go.uber.org/atomic v1.10.0 // indirect 87 | go.uber.org/automaxprocs v1.5.1 // indirect 88 | go.uber.org/multierr v1.8.0 // indirect 89 | go.uber.org/zap v1.24.0 // indirect 90 | golang.org/x/crypto v0.9.0 // indirect 91 | golang.org/x/net v0.10.0 // indirect 92 | golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect 93 | golang.org/x/sys v0.8.0 // indirect 94 | golang.org/x/term v0.8.0 // indirect 95 | golang.org/x/text v0.9.0 // indirect 96 | golang.org/x/time v0.3.0 // indirect 97 | google.golang.org/appengine v1.6.7 // indirect 98 | google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect 99 | gopkg.in/inf.v0 v0.9.1 // indirect 100 | gopkg.in/yaml.v2 v2.4.0 // indirect 101 | gopkg.in/yaml.v3 v3.0.1 // indirect 102 | k8s.io/api v0.23.0 // indirect 103 | k8s.io/apimachinery v0.23.0 // indirect 104 | k8s.io/client-go v0.23.0 // indirect 105 | k8s.io/klog/v2 v2.80.1 // indirect 106 | k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect 107 | k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect 108 | lukechampine.com/blake3 v1.1.7 // indirect 109 | sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect 110 | sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect 111 | sigs.k8s.io/yaml v1.3.0 // indirect 112 | ) 113 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/model/PojoDB/fileMetaPojo/filemetatablemodel.go: -------------------------------------------------------------------------------- 1 | package fileMetaPojo 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/core/stores/cache" 5 | "github.com/zeromicro/go-zero/core/stores/sqlx" 6 | ) 7 | 8 | var _ FilemetatableModel = (*customFilemetatableModel)(nil) 9 | 10 | type ( 11 | // FilemetatableModel is an interface to be customized, add more methods here, 12 | // and implement the added methods in customFilemetatableModel. 13 | FilemetatableModel interface { 14 | filemetatableModel 15 | } 16 | 17 | customFilemetatableModel struct { 18 | *defaultFilemetatableModel 19 | } 20 | ) 21 | 22 | // NewFilemetatableModel returns a model for the database table. 23 | func NewFilemetatableModel(conn sqlx.SqlConn, c cache.CacheConf) FilemetatableModel { 24 | return &customFilemetatableModel{ 25 | defaultFilemetatableModel: newFilemetatableModel(conn, c), 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/model/PojoDB/fileMetaPojo/filemetatablemodel_gen.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT! 2 | 3 | package fileMetaPojo 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | "fmt" 9 | "github.com/zeromicro/go-zero/core/logx" 10 | "strings" 11 | "time" 12 | 13 | "github.com/zeromicro/go-zero/core/stores/builder" 14 | "github.com/zeromicro/go-zero/core/stores/cache" 15 | "github.com/zeromicro/go-zero/core/stores/sqlc" 16 | "github.com/zeromicro/go-zero/core/stores/sqlx" 17 | "github.com/zeromicro/go-zero/core/stringx" 18 | ) 19 | 20 | var ( 21 | filemetatableFieldNames = builder.RawFieldNames(&Filemetatable{}) 22 | filemetatableRows = strings.Join(filemetatableFieldNames, ",") 23 | filemetatableRowsExpectAutoSet = strings.Join(stringx.Remove(filemetatableFieldNames, "`updated_at`", "`update_time`", "`create_at`", "`created_at`", "`create_time`", "`update_at`"), ",") 24 | filemetatableRowsWithPlaceHolder = strings.Join(stringx.Remove(filemetatableFieldNames, "`creator`", "`updated_at`", "`update_time`", "`create_at`", "`created_at`", "`create_time`", "`update_at`"), "=?,") + "=?" 25 | 26 | cacheCloudStorageSystemFilemetatableCreatorPrefix = "cache:cloudStorageSystem:filemetatable:creator:" 27 | ) 28 | 29 | type ( 30 | filemetatableModel interface { 31 | Insert(ctx context.Context, data *Filemetatable) (sql.Result, error) 32 | FindOne(ctx context.Context, creator string, name string) (*Filemetatable, error) 33 | Update(ctx context.Context, data *Filemetatable) error 34 | Delete(ctx context.Context, creator string, name string) error 35 | Query(ctx context.Context, creator string) ([]*Filemetatable, error) 36 | DeleteHard(ctx context.Context, timeInt time.Time) error 37 | } 38 | 39 | defaultFilemetatableModel struct { 40 | sqlc.CachedConn 41 | table string 42 | } 43 | 44 | Filemetatable struct { 45 | Creator string `db:"creator"` 46 | CreateGroup string `db:"createGroup"` 47 | Name string `db:"name"` 48 | Description string `db:"description"` 49 | CreateTime time.Time `db:"create_time"` 50 | Authority string `db:"authority"` 51 | TypeOf string `db:"typeOf"` 52 | UpdateTime time.Time `db:"update_time"` 53 | Size int64 `db:"size"` 54 | IsDir int `db:"isDir"` 55 | DeleteTime sql.NullTime `db:"delete_time"` 56 | } 57 | ) 58 | 59 | func newFilemetatableModel(conn sqlx.SqlConn, c cache.CacheConf) *defaultFilemetatableModel { 60 | return &defaultFilemetatableModel{ 61 | CachedConn: sqlc.NewConn(conn, c), 62 | table: "`filemetatable`", 63 | } 64 | } 65 | 66 | func (m *defaultFilemetatableModel) Delete(ctx context.Context, creator string, name string) error { 67 | cloudStorageSystemFilemetatableCreatorKey := fmt.Sprintf("%s%v%v", cacheCloudStorageSystemFilemetatableCreatorPrefix, creator, name) 68 | _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 69 | query := fmt.Sprintf("delete from %s where `creator` = ? and `name`= ? ", m.table) 70 | return conn.ExecCtx(ctx, query, creator, name) 71 | }, cloudStorageSystemFilemetatableCreatorKey) 72 | return err 73 | } 74 | 75 | func (m *defaultFilemetatableModel) FindOne(ctx context.Context, creator string, name string) (*Filemetatable, error) { 76 | cloudStorageSystemFilemetatableCreatorKey := fmt.Sprintf("%s%v%v", cacheCloudStorageSystemFilemetatableCreatorPrefix, creator, name) 77 | var resp Filemetatable 78 | err := m.QueryRowCtx(ctx, &resp, cloudStorageSystemFilemetatableCreatorKey, func(ctx context.Context, conn sqlx.SqlConn, v interface{}) error { 79 | query := fmt.Sprintf("select %s from %s where `creator` = ? and `name` = ? limit 1", filemetatableRows, m.table) 80 | return conn.QueryRowCtx(ctx, v, query, creator, name) 81 | }) 82 | switch err { 83 | case nil: 84 | return &resp, nil 85 | case sqlc.ErrNotFound: 86 | return nil, ErrNotFound 87 | default: 88 | return nil, err 89 | } 90 | } 91 | func (m *defaultFilemetatableModel) Query(ctx context.Context, creator string) ([]*Filemetatable, error) { 92 | var resp []*Filemetatable 93 | err := m.QueryRowsNoCacheCtx(ctx, &resp, fmt.Sprintf("select %s from %s where `creator` = '%s'", filemetatableRows, m.table, creator)) 94 | switch err { 95 | case nil: 96 | return resp, nil 97 | case sqlc.ErrNotFound: 98 | return nil, ErrNotFound 99 | default: 100 | return nil, err 101 | } 102 | } 103 | func (m *defaultFilemetatableModel) DeleteHard(ctx context.Context, timeInt time.Time) error { 104 | err := m.CachedConn.TransactCtx(ctx, func(ctx context.Context, session sqlx.Session) error { 105 | var resp []*Filemetatable 106 | err := session.QueryRowsCtx(ctx, &resp, fmt.Sprintf("select %s from %s where `delete_time` < \"%s\" for update", filemetatableRows, m.table, timeInt)) 107 | if err != nil { 108 | if err == sqlc.ErrNotFound { 109 | return nil 110 | } 111 | return err 112 | } 113 | for _, value := range resp { 114 | _, err := session.ExecCtx(ctx, fmt.Sprintf("delete from %s where 'creator'= %s and 'name' = %s ", m.table, value.Creator, value.Name)) 115 | if err != nil { 116 | fmt.Printf("delete hard the filemeta info error:%v", err) 117 | return err 118 | } 119 | logx.Info(fmt.Sprintf("successful delete hard the filemetinfo------> creator:%s , filename:%s", value.Creator, value.Name)) 120 | } 121 | return nil 122 | }) 123 | if err != nil { 124 | return err 125 | } 126 | return nil 127 | } 128 | func (m *defaultFilemetatableModel) Insert(ctx context.Context, data *Filemetatable) (sql.Result, error) { 129 | cloudStorageSystemFilemetatableCreatorKey := fmt.Sprintf("%s%v:%v", cacheCloudStorageSystemFilemetatableCreatorPrefix, data.Creator, data.Name) 130 | ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 131 | query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, filemetatableRowsExpectAutoSet) 132 | return conn.ExecCtx(ctx, query, data.Creator, data.CreateGroup, data.Name, data.Description, data.Authority, data.TypeOf, data.Size, data.IsDir, data.DeleteTime) 133 | }, cloudStorageSystemFilemetatableCreatorKey) 134 | return ret, err 135 | } 136 | 137 | func (m *defaultFilemetatableModel) Update(ctx context.Context, data *Filemetatable) error { 138 | cloudStorageSystemFilemetatableCreatorKey := fmt.Sprintf("%s%v:%v", cacheCloudStorageSystemFilemetatableCreatorPrefix, data.Creator, data.Name) 139 | _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 140 | query := fmt.Sprintf("update %s set %s where `creator` = ?", m.table, filemetatableRowsWithPlaceHolder) 141 | return conn.ExecCtx(ctx, query, data.CreateGroup, data.Name, data.Authority, data.TypeOf, data.Size, data.IsDir, data.DeleteTime, data.Creator) 142 | }, cloudStorageSystemFilemetatableCreatorKey) 143 | return err 144 | } 145 | func (m *defaultFilemetatableModel) query(ctx context.Context, creator string) ([]Filemetatable, error) { 146 | return nil, nil 147 | } 148 | func (m *defaultFilemetatableModel) formatPrimary(primary interface{}) string { 149 | return fmt.Sprintf("%s%v", cacheCloudStorageSystemFilemetatableCreatorPrefix, primary) 150 | } 151 | 152 | func (m *defaultFilemetatableModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary interface{}) error { 153 | query := fmt.Sprintf("select %s from %s where `creator` = ? limit 1", filemetatableRows, m.table) 154 | return conn.QueryRowCtx(ctx, v, query, primary) 155 | } 156 | 157 | func (m *defaultFilemetatableModel) tableName() string { 158 | return m.table 159 | } 160 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/model/PojoDB/fileMetaPojo/vars.go: -------------------------------------------------------------------------------- 1 | package fileMetaPojo 2 | 3 | import "github.com/zeromicro/go-zero/core/stores/sqlx" 4 | 5 | var ErrNotFound = sqlx.ErrNotFound 6 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/model/PojoDB/usergroupPojo/usergroupmodel.go: -------------------------------------------------------------------------------- 1 | package usergroupPojo 2 | 3 | import ( 4 | "github.com/lib/pq" 5 | "github.com/zeromicro/go-zero/core/stores/cache" 6 | "github.com/zeromicro/go-zero/core/stores/sqlx" 7 | ) 8 | 9 | var _ UsergroupModel = (*customUsergroupModel)(nil) 10 | 11 | type ( 12 | // UsergroupModel is an interface to be customized, add more methods here, 13 | // and implement the added methods in customUsergroupModel. 14 | UsergroupModel interface { 15 | usergroupModel 16 | } 17 | 18 | customUsergroupModel struct { 19 | *defaultUsergroupModel 20 | } 21 | ) 22 | 23 | // NewUsergroupModel returns a model for the database table. 24 | func NewUsergroupModel(conn sqlx.SqlConn, c cache.CacheConf) UsergroupModel { 25 | return &customUsergroupModel{ 26 | defaultUsergroupModel: newUsergroupModel(conn, c), 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/model/PojoDB/usergroupPojo/usergroupmodel_gen.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT! 2 | 3 | package usergroupPojo 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | "fmt" 9 | "strings" 10 | 11 | "github.com/zeromicro/go-zero/core/stores/builder" 12 | "github.com/zeromicro/go-zero/core/stores/cache" 13 | "github.com/zeromicro/go-zero/core/stores/sqlc" 14 | "github.com/zeromicro/go-zero/core/stores/sqlx" 15 | "github.com/zeromicro/go-zero/core/stringx" 16 | ) 17 | 18 | var ( 19 | usergroupFieldNames = builder.RawFieldNames(&Usergroup{}) 20 | usergroupRows = strings.Join(usergroupFieldNames, ",") 21 | usergroupRowsExpectAutoSet = strings.Join(stringx.Remove(usergroupFieldNames, "`update_time`", "`create_at`", "`created_at`", "`create_time`", "`update_at`", "`updated_at`"), ",") 22 | usergroupRowsWithPlaceHolder = strings.Join(stringx.Remove(usergroupFieldNames, "`groupName`", "`update_time`", "`create_at`", "`created_at`", "`create_time`", "`update_at`", "`updated_at`"), "=?,") + "=?" 23 | 24 | cacheCloudstoragesystemUsergroupGroupNamePrefix = "cache:cloudstoragesystem:usergroup:groupName:" 25 | ) 26 | 27 | type ( 28 | usergroupModel interface { 29 | Insert(ctx context.Context, data *Usergroup) (sql.Result, error) 30 | FindOne(ctx context.Context, groupName string) (*Usergroup, error) 31 | Update(ctx context.Context, data *Usergroup) error 32 | Delete(ctx context.Context, groupName string) error 33 | } 34 | 35 | defaultUsergroupModel struct { 36 | sqlc.CachedConn 37 | table string 38 | } 39 | 40 | Usergroup struct { 41 | GroupName string `db:"groupName"` 42 | UserName string `db:"userName"` 43 | } 44 | ) 45 | 46 | func newUsergroupModel(conn sqlx.SqlConn, c cache.CacheConf) *defaultUsergroupModel { 47 | return &defaultUsergroupModel{ 48 | CachedConn: sqlc.NewConn(conn, c), 49 | table: "`usergroup`", 50 | } 51 | } 52 | 53 | func (m *defaultUsergroupModel) Delete(ctx context.Context, groupName string, userName string) error { 54 | cloudstoragesystemUsergroupGroupNameKey := fmt.Sprintf("%s%v%v", cacheCloudstoragesystemUsergroupGroupNamePrefix, groupName, userName) 55 | _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 56 | query := fmt.Sprintf("delete from %s where `groupName` = ? and `userName` = ? ", m.table) 57 | return conn.ExecCtx(ctx, query, groupName, userName) 58 | }, cloudstoragesystemUsergroupGroupNameKey) 59 | return err 60 | } 61 | 62 | func (m *defaultUsergroupModel) FindOne(ctx context.Context, groupName string, userName string) (*Usergroup, error) { 63 | cloudstoragesystemUsergroupGroupNameKey := fmt.Sprintf("%s%v%v", cacheCloudstoragesystemUsergroupGroupNamePrefix, groupName, userName) 64 | var resp Usergroup 65 | err := m.QueryRowCtx(ctx, &resp, cloudstoragesystemUsergroupGroupNameKey, func(ctx context.Context, conn sqlx.SqlConn, v interface{}) error { 66 | query := fmt.Sprintf("select %s from %s where `groupName` = ? and `userName`= ? limit 1", usergroupRows, m.table) 67 | return conn.QueryRowCtx(ctx, v, query, groupName, userName) 68 | }) 69 | switch err { 70 | case nil: 71 | return &resp, nil 72 | case sqlc.ErrNotFound: 73 | return nil, ErrNotFound 74 | default: 75 | return nil, err 76 | } 77 | } 78 | 79 | //TODO need the query method 80 | 81 | func (m *defaultUsergroupModel) Insert(ctx context.Context, data *Usergroup) (sql.Result, error) { 82 | cloudstoragesystemUsergroupGroupNameKey := fmt.Sprintf("%s%v%v", cacheCloudstoragesystemUsergroupGroupNamePrefix, data.GroupName, data.UserName) 83 | ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 84 | query := fmt.Sprintf("insert into %s (%s) values (?, ?)", m.table, usergroupRowsExpectAutoSet) 85 | return conn.ExecCtx(ctx, query, data.GroupName, data.UserName) 86 | }, cloudstoragesystemUsergroupGroupNameKey) 87 | return ret, err 88 | } 89 | 90 | func (m *defaultUsergroupModel) Update(ctx context.Context, data *Usergroup) error { 91 | cloudstoragesystemUsergroupGroupNameKey := fmt.Sprintf("%s%v%v", cacheCloudstoragesystemUsergroupGroupNamePrefix, data.GroupName, data.UserName) 92 | _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 93 | query := fmt.Sprintf("update %s set %s where `groupName` = ? and `userName` = ? ", m.table, usergroupRowsWithPlaceHolder) 94 | return conn.ExecCtx(ctx, query, data.UserName, data.GroupName, data.UserName) 95 | }, cloudstoragesystemUsergroupGroupNameKey) 96 | return err 97 | } 98 | 99 | func (m *defaultUsergroupModel) formatPrimary(primary interface{}) string { 100 | return fmt.Sprintf("%s%v", cacheCloudstoragesystemUsergroupGroupNamePrefix, primary) 101 | } 102 | 103 | func (m *defaultUsergroupModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary interface{}) error { 104 | query := fmt.Sprintf("select %s from %s where `groupName` = ? limit 1", usergroupRows, m.table) 105 | return conn.QueryRowCtx(ctx, v, query, primary) 106 | } 107 | 108 | func (m *defaultUsergroupModel) tableName() string { 109 | return m.table 110 | } 111 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/model/PojoDB/usergroupPojo/vars.go: -------------------------------------------------------------------------------- 1 | package usergroupPojo 2 | 3 | import "github.com/zeromicro/go-zero/core/stores/sqlx" 4 | 5 | var ErrNotFound = sqlx.ErrNotFound 6 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/asynqserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fileServer/mq/Job/internal/config" 6 | "fileServer/mq/Job/internal/logic" 7 | "fileServer/mq/Job/internal/svc" 8 | "flag" 9 | "fmt" 10 | "github.com/zeromicro/go-zero/core/logx" 11 | "os" 12 | 13 | "github.com/zeromicro/go-zero/core/conf" 14 | ) 15 | 16 | var configFile = flag.String("f", "etc/asynqserver-api.yaml", "the config file") 17 | 18 | func main() { 19 | flag.Parse() 20 | 21 | var c config.Config 22 | conf.MustLoad(*configFile, &c) 23 | 24 | serverContext := svc.NewServiceContext(c) 25 | ctx := context.Background() 26 | cronJob := logic.NewCronJob(ctx, serverContext) 27 | mux := cronJob.Register() 28 | if err := serverContext.Scheduler.Run(mux); err != nil { 29 | logx.WithContext(ctx).Errorf("!!!CronJobErr!!! run err:%+v", err) 30 | os.Exit(1) 31 | } 32 | fmt.Printf("Starting cronJob server at %s:%d...\n", c.Host, c.Port) 33 | } 34 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/etc/asynqserver-api.yaml: -------------------------------------------------------------------------------- 1 | Name: asynqserver-api 2 | Host: 0.0.0.0 3 | Port: 3002 4 | 5 | #监控 6 | Prometheus: 7 | Host: 0.0.0.0 8 | Port: 4010 9 | Path: /metrics 10 | 11 | #Link Tracking 12 | Telemetry: 13 | Name: mqueue-job 14 | Endpoint: http://jaeger:14268/api/traces 15 | Sampler: 1.0 16 | Batcher: jaeger 17 | 18 | Log: 19 | ServiceName: mqueue-fileServer 20 | Level: error 21 | 22 | Redis: 23 | Host: localhost:6379 24 | Type: node 25 | Pass: root 26 | 27 | 28 | FileserverClient: 29 | Etcd: 30 | Hosts: [ localhost:2379 ] 31 | Key: fileserver.rpc 32 | 33 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/core/stores/redis" 5 | "github.com/zeromicro/go-zero/rest" 6 | "github.com/zeromicro/go-zero/zrpc" 7 | ) 8 | 9 | type Config struct { 10 | rest.RestConf 11 | Redis redis.RedisConf 12 | FileserverClient zrpc.RpcClientConf 13 | } 14 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/internal/logic/deleteFileMeta.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fileServer/mq/Job/internal/svc" 7 | "fileServer/rpc/pb" 8 | "github.com/hibiken/asynq" 9 | "github.com/zeromicro/go-zero/core/logx" 10 | "time" 11 | ) 12 | 13 | type FileMetaDeleteHandler struct { 14 | svcCtx *svc.ServiceContext 15 | } 16 | 17 | func NewFileMetaDeleteHandler(ctx *svc.ServiceContext) *FileMetaDeleteHandler { 18 | return &FileMetaDeleteHandler{svcCtx: ctx} 19 | } 20 | func (l *FileMetaDeleteHandler) ProcessTask(ctx context.Context, _ *asynq.Task) error { 21 | expireTime := time.UnixMilli(time.Now().UnixMilli() - time.Hour.Milliseconds()) 22 | hard, err := l.svcCtx.FileserverRpc.DeleteHard(ctx, &pb.BaseTime{Date: expireTime.String()}) 23 | if err != nil || hard.Result != "true" { 24 | logx.Errorf("定时删除filemeta错误:%v", err) 25 | return errors.New("delete task failed,maybe should retry") 26 | } 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/internal/logic/routes.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/mq/Job/internal/svc" 6 | "fileServer/mq/Job/jobtype" 7 | "github.com/hibiken/asynq" 8 | ) 9 | 10 | type CronJob struct { 11 | ctx context.Context 12 | svcCtx *svc.ServiceContext 13 | } 14 | 15 | func NewCronJob(ctx context.Context, svcCtx *svc.ServiceContext) *CronJob { 16 | return &CronJob{ctx: ctx, svcCtx: svcCtx} 17 | } 18 | 19 | func (l *CronJob) Register() *asynq.ServeMux { 20 | mux := asynq.NewServeMux() 21 | mux.Handle(jobtype.ScheduleDeleteMetaInfo, NewFileMetaDeleteHandler(l.svcCtx)) 22 | return mux 23 | } 24 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/internal/svc/asynqServer.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "fileServer/mq/Job/internal/config" 5 | "fmt" 6 | "github.com/hibiken/asynq" 7 | ) 8 | 9 | func NewAsynqServer(c config.Config) *asynq.Server { 10 | return asynq.NewServer( 11 | asynq.RedisClientOpt{Addr: c.Redis.Host, Password: c.Redis.Pass}, 12 | asynq.Config{ 13 | Concurrency: 20, 14 | IsFailure: func(err error) bool { 15 | fmt.Printf("asynq server exec task isFailure ========>>>>>> err: %+v \n", err) 16 | return true 17 | }, 18 | }, 19 | ) 20 | } 21 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/internal/svc/servicecontext.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "fileServer/mq/Job/internal/config" 5 | "fileServer/rpc/fileserver" 6 | "github.com/hibiken/asynq" 7 | "github.com/zeromicro/go-zero/zrpc" 8 | ) 9 | 10 | type ServiceContext struct { 11 | Config config.Config 12 | Scheduler *asynq.Server 13 | FileserverRpc fileserver.FileServer 14 | } 15 | 16 | func NewServiceContext(c config.Config) *ServiceContext { 17 | return &ServiceContext{ 18 | Config: c, 19 | FileserverRpc: fileserver.NewFileServer(zrpc.MustNewClient(c.FileserverClient)), 20 | Scheduler: NewAsynqServer(c), 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/Job/jobtype/JobType.go: -------------------------------------------------------------------------------- 1 | package jobtype 2 | 3 | const ScheduleDeleteMetaInfo = "schedule:metainfo:delete" 4 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/etc/mq-api.yaml: -------------------------------------------------------------------------------- 1 | Name: mq-api 2 | Host: 0.0.0.0 3 | Port: 3003 4 | 5 | 6 | #监控 7 | Prometheus: 8 | Host: 0.0.0.0 9 | Port: 4010 10 | Path: /metrics 11 | 12 | #Link Tracking 13 | Telemetry: 14 | Name: mqueue-job 15 | Endpoint: http://jaeger:14268/api/traces 16 | Sampler: 1.0 17 | Batcher: jaeger 18 | 19 | Log: 20 | ServiceName: mqueue-fileServer 21 | Level: error 22 | 23 | Redis: 24 | Host: localhost:6379 25 | Type: node 26 | Pass: root 27 | 28 | 29 | FileserverClient: 30 | Etcd: 31 | Hosts: [ localhost:2379 ] 32 | Key: fileserver.rpc 33 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/core/stores/redis" 5 | "github.com/zeromicro/go-zero/rest" 6 | "github.com/zeromicro/go-zero/zrpc" 7 | ) 8 | 9 | type Config struct { 10 | rest.RestConf 11 | Redis redis.RedisConf 12 | FileserverClient zrpc.RpcClientConf 13 | } 14 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/internal/svc/asynqClient.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "fileServer/mq/scheduleClient/internal/config" 5 | "fmt" 6 | "github.com/hibiken/asynq" 7 | "time" 8 | ) 9 | 10 | func NewAsynqClient(c config.Config) *asynq.Scheduler { 11 | location, _ := time.LoadLocation("Asia/Shanghai") 12 | return asynq.NewScheduler( 13 | asynq.RedisClientOpt{ 14 | Addr: c.Redis.Host, Password: c.Redis.Pass, 15 | }, &asynq.SchedulerOpts{ 16 | Location: location, 17 | EnqueueErrorHandler: func(task *asynq.Task, opts []asynq.Option, err error) { 18 | fmt.Printf("Scheduler EnqueueErrorHandler err : %+v , task : %+v", err, task) 19 | }, 20 | }, 21 | ) 22 | } 23 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/internal/svc/serverContext.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "fileServer/mq/scheduleClient/internal/config" 5 | "fileServer/rpc/fileserver" 6 | "github.com/hibiken/asynq" 7 | "github.com/zeromicro/go-zero/zrpc" 8 | ) 9 | 10 | type ServiceContext struct { 11 | Config config.Config 12 | Scheduler *asynq.Scheduler 13 | FileserverRpc fileserver.FileServer 14 | } 15 | 16 | func NewServiceContext(c config.Config) *ServiceContext { 17 | return &ServiceContext{ 18 | Config: c, 19 | FileserverRpc: fileserver.NewFileServer(zrpc.MustNewClient(c.FileserverClient)), 20 | Scheduler: NewAsynqClient(c), 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/internal/task/hardDeleteJob.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fileServer/mq/Job/jobtype" 5 | "github.com/hibiken/asynq" 6 | ) 7 | 8 | func (l *MqueueScheduler) hardDeleteFileMeta() { 9 | task := asynq.NewTask(jobtype.ScheduleDeleteMetaInfo, nil) 10 | //重试两次,避免排他锁造成性能下降的很快,这里避免使用指数退避算法,考虑到删除这个可能会成为一个大体量的操作,会造成 其他服务的服务停止从而影响系统可用性 11 | l.svcCtx.Scheduler.Register("*/1 * * * *", task, asynq.MaxRetry(2), asynq.Queue("FileDeleteSchedule")) 12 | } 13 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/internal/task/register.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "fileServer/mq/scheduleClient/internal/svc" 6 | ) 7 | 8 | type MqueueScheduler struct { 9 | ctx context.Context 10 | svcCtx *svc.ServiceContext 11 | } 12 | 13 | func NewCronScheduler(ctx context.Context, svcCtx *svc.ServiceContext) *MqueueScheduler { 14 | return &MqueueScheduler{ 15 | ctx: ctx, 16 | svcCtx: svcCtx, 17 | } 18 | } 19 | 20 | func (l *MqueueScheduler) Register() { 21 | l.hardDeleteFileMeta() 22 | } 23 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/mq/scheduleClient/mq.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fileServer/mq/scheduleClient/internal/config" 6 | "fileServer/mq/scheduleClient/internal/svc" 7 | "fileServer/mq/scheduleClient/internal/task" 8 | "flag" 9 | "github.com/zeromicro/go-zero/core/conf" 10 | "github.com/zeromicro/go-zero/core/logx" 11 | "os" 12 | ) 13 | 14 | var configFile = flag.String("f", "etc/mq-api.yaml", "the config file") 15 | 16 | func main() { 17 | flag.Parse() 18 | 19 | var c config.Config 20 | conf.MustLoad(*configFile, &c) 21 | logx.DisableStat() 22 | if err := c.SetUp(); err != nil { 23 | panic(err) 24 | } 25 | server := svc.NewServiceContext(c) 26 | serContext := task.NewCronScheduler(context.Background(), server) 27 | serContext.Register() 28 | if err := server.Scheduler.Run(); err != nil { 29 | logx.Errorf("!!!MqueueSchedulerErr!!! run err:%+v", err) 30 | os.Exit(1) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/README.md: -------------------------------------------------------------------------------- 1 | ## fileserver rpc 2 | 3 | 4 | port 8001 5 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/etc/fileserver.yaml: -------------------------------------------------------------------------------- 1 | Name: fileserver.rpc 2 | ListenOn: 0.0.0.0:8001 3 | Etcd: 4 | Hosts: 5 | - etcd:2379 6 | Key: fileserver.rpc 7 | #Telemetry: 8 | # Name: fileServer-api 9 | # Endpoint: http://jaeger:14268/api/traces 10 | # Sampler: 1.0 11 | # Batcher: jaeger 12 | 13 | DB: 14 | DataSource: root:root@tcp(mysql:3306)/cloudstoragesystem?charset=utf8&parseTime=true 15 | Cache: 16 | - Host: redis:6379 17 | Pass: root 18 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/fileserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fileServer/rpc/internal/config" 5 | "fileServer/rpc/internal/server" 6 | "fileServer/rpc/internal/svc" 7 | "fileServer/rpc/pb" 8 | "flag" 9 | "fmt" 10 | "github.com/zeromicro/go-zero/core/conf" 11 | "github.com/zeromicro/go-zero/core/service" 12 | "github.com/zeromicro/go-zero/zrpc" 13 | "google.golang.org/grpc" 14 | "google.golang.org/grpc/reflection" 15 | ) 16 | 17 | var configFile = flag.String("f", "etc/fileserver.yaml", "the config file") 18 | 19 | func main() { 20 | flag.Parse() 21 | 22 | var c config.Config 23 | conf.MustLoad(*configFile, &c) 24 | ctx := svc.NewServiceContext(c) 25 | 26 | s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) { 27 | pb.RegisterFileServerServer(grpcServer, server.NewFileServerServer(ctx)) 28 | 29 | if c.Mode == service.DevMode || c.Mode == service.TestMode { 30 | reflection.Register(grpcServer) 31 | } 32 | }) 33 | defer s.Stop() 34 | fmt.Printf("Starting rpc server at %s...\n", c.ListenOn) 35 | s.Start() 36 | } 37 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/fileserver/fileserver.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT! 2 | // Source: fileServer.proto 3 | 4 | package fileserver 5 | 6 | import ( 7 | "context" 8 | 9 | "fileServer/rpc/pb" 10 | 11 | "github.com/zeromicro/go-zero/zrpc" 12 | "google.golang.org/grpc" 13 | ) 14 | 15 | type ( 16 | BaseRes = pb.BaseRes 17 | BaseTime = pb.BaseTime 18 | FileMetaInfo = pb.FileMetaInfo 19 | FindFileReq = pb.FindFileReq 20 | QueryFileReq = pb.QueryFileReq 21 | QueryFileRes = pb.QueryFileRes 22 | Request = pb.Request 23 | Response = pb.Response 24 | 25 | FileServer interface { 26 | // 就绪性探针 27 | Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) 28 | FindOne(ctx context.Context, in *FindFileReq, opts ...grpc.CallOption) (*FileMetaInfo, error) 29 | QueryFiles(ctx context.Context, in *QueryFileReq, opts ...grpc.CallOption) (*QueryFileRes, error) 30 | InertOne(ctx context.Context, in *FileMetaInfo, opts ...grpc.CallOption) (*BaseRes, error) 31 | DeleteOne(ctx context.Context, in *FileMetaInfo, opts ...grpc.CallOption) (*BaseRes, error) 32 | DeleteHard(ctx context.Context, in *BaseTime, opts ...grpc.CallOption) (*BaseRes, error) 33 | } 34 | 35 | defaultFileServer struct { 36 | cli zrpc.Client 37 | } 38 | ) 39 | 40 | func NewFileServer(cli zrpc.Client) FileServer { 41 | return &defaultFileServer{ 42 | cli: cli, 43 | } 44 | } 45 | 46 | // 就绪性探针 47 | func (m *defaultFileServer) Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) { 48 | client := pb.NewFileServerClient(m.cli.Conn()) 49 | return client.Ping(ctx, in, opts...) 50 | } 51 | 52 | func (m *defaultFileServer) FindOne(ctx context.Context, in *FindFileReq, opts ...grpc.CallOption) (*FileMetaInfo, error) { 53 | client := pb.NewFileServerClient(m.cli.Conn()) 54 | return client.FindOne(ctx, in, opts...) 55 | } 56 | 57 | func (m *defaultFileServer) QueryFiles(ctx context.Context, in *QueryFileReq, opts ...grpc.CallOption) (*QueryFileRes, error) { 58 | client := pb.NewFileServerClient(m.cli.Conn()) 59 | return client.QueryFiles(ctx, in, opts...) 60 | } 61 | 62 | func (m *defaultFileServer) InertOne(ctx context.Context, in *FileMetaInfo, opts ...grpc.CallOption) (*BaseRes, error) { 63 | client := pb.NewFileServerClient(m.cli.Conn()) 64 | return client.InertOne(ctx, in, opts...) 65 | } 66 | 67 | func (m *defaultFileServer) DeleteOne(ctx context.Context, in *FileMetaInfo, opts ...grpc.CallOption) (*BaseRes, error) { 68 | client := pb.NewFileServerClient(m.cli.Conn()) 69 | return client.DeleteOne(ctx, in, opts...) 70 | } 71 | 72 | func (m *defaultFileServer) DeleteHard(ctx context.Context, in *BaseTime, opts ...grpc.CallOption) (*BaseRes, error) { 73 | client := pb.NewFileServerClient(m.cli.Conn()) 74 | return client.DeleteHard(ctx, in, opts...) 75 | } 76 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/core/stores/cache" 5 | "github.com/zeromicro/go-zero/zrpc" 6 | ) 7 | 8 | type Config struct { 9 | zrpc.RpcServerConf 10 | DB struct { 11 | DataSource string 12 | } 13 | Cache cache.CacheConf 14 | } 15 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/logic/deletehardlogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "fileServer/rpc/internal/svc" 8 | "fileServer/rpc/pb" 9 | 10 | "github.com/zeromicro/go-zero/core/logx" 11 | ) 12 | 13 | type DeleteHardLogic struct { 14 | ctx context.Context 15 | svcCtx *svc.ServiceContext 16 | logx.Logger 17 | } 18 | 19 | func NewDeleteHardLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteHardLogic { 20 | return &DeleteHardLogic{ 21 | ctx: ctx, 22 | svcCtx: svcCtx, 23 | Logger: logx.WithContext(ctx), 24 | } 25 | } 26 | 27 | func (l *DeleteHardLogic) DeleteHard(in *pb.BaseTime) (res *pb.BaseRes, err error) { 28 | // todo: add your logic here and delete this line 29 | defer func() { 30 | if errCatch := recover(); errCatch != nil { 31 | res.GetFailedRes("系统出错") 32 | logx.Error(errCatch) 33 | err = nil 34 | } 35 | }() 36 | res = pb.NewDefaultBaseRes() 37 | baseTime, err := time.Parse(time.RFC850, in.Date) 38 | if err != nil { 39 | logx.Error(err) 40 | res.GetFailedRes("时间转换出错") 41 | return res, nil 42 | } 43 | l.svcCtx.FileModel.DeleteHard(l.ctx, baseTime) 44 | return res, nil 45 | } 46 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/logic/deleteonelogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "fileServer/rpc/internal/svc" 8 | "fileServer/rpc/pb" 9 | 10 | "github.com/zeromicro/go-zero/core/logx" 11 | ) 12 | 13 | type DeleteOneLogic struct { 14 | ctx context.Context 15 | svcCtx *svc.ServiceContext 16 | logx.Logger 17 | } 18 | 19 | func NewDeleteOneLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteOneLogic { 20 | return &DeleteOneLogic{ 21 | ctx: ctx, 22 | svcCtx: svcCtx, 23 | Logger: logx.WithContext(ctx), 24 | } 25 | } 26 | 27 | func (l *DeleteOneLogic) DeleteOne(in *pb.FileMetaInfo) (result *pb.BaseRes, err error) { 28 | // todo: add your logic here and delete this line 29 | info, err := pb.ConvertFileMetaInfo(in) 30 | result = pb.NewDefaultBaseRes() 31 | if err != nil { 32 | result.GetFailedRes(err.Error()) 33 | return result, err 34 | } 35 | info.DeleteTime.Time = time.Now() 36 | l.svcCtx.FileModel.Update(l.ctx, info) 37 | return result, nil 38 | } 39 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/logic/findonelogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/rpc/internal/svc" 6 | "fileServer/rpc/pb" 7 | "github.com/zeromicro/go-zero/core/stores/sqlc" 8 | 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type FindOneLogic struct { 13 | ctx context.Context 14 | svcCtx *svc.ServiceContext 15 | logx.Logger 16 | } 17 | 18 | func NewFindOneLogic(ctx context.Context, svcCtx *svc.ServiceContext) *FindOneLogic { 19 | return &FindOneLogic{ 20 | ctx: ctx, 21 | svcCtx: svcCtx, 22 | Logger: logx.WithContext(ctx), 23 | } 24 | } 25 | 26 | func (l *FindOneLogic) FindOne(in *pb.FindFileReq) (*pb.FileMetaInfo, error) { 27 | // todo: add your logic here and delete this line 28 | var owner = in.Owner 29 | var name = in.Name 30 | if owner == "" || name == "" { 31 | return nil, sqlc.ErrNotFound 32 | } 33 | fileModel, err := l.svcCtx.FileModel.FindOne(l.ctx, owner, name) 34 | if err != nil { 35 | return nil, err 36 | } 37 | return pb.GetFileMetaInfo(fileModel), nil 38 | } 39 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/logic/inertonelogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/rpc/internal/svc" 6 | "fileServer/rpc/pb" 7 | "fmt" 8 | 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type InertOneLogic struct { 13 | ctx context.Context 14 | svcCtx *svc.ServiceContext 15 | logx.Logger 16 | } 17 | 18 | func NewInertOneLogic(ctx context.Context, svcCtx *svc.ServiceContext) *InertOneLogic { 19 | return &InertOneLogic{ 20 | ctx: ctx, 21 | svcCtx: svcCtx, 22 | Logger: logx.WithContext(ctx), 23 | } 24 | } 25 | 26 | func (l *InertOneLogic) InertOne(in *pb.FileMetaInfo) (result *pb.BaseRes, err error) { 27 | // todo: add your logic here and delete this line 28 | info, err := pb.ConvertFileMetaInfo(in) 29 | result = pb.NewDefaultBaseRes() 30 | if err != nil { 31 | result.GetFailedRes(err.Error()) 32 | return result, err 33 | } 34 | insert, err := l.svcCtx.FileModel.Insert(l.ctx, info) 35 | if row, _ := insert.RowsAffected(); err != nil || row <= 0 { 36 | result.GetFailedRes(fmt.Sprintf("fileRPCServer insertOne error:%v,影响行数:%d", err, row)) 37 | return result, err 38 | } 39 | return result, err 40 | } 41 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/logic/pinglogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | 6 | "fileServer/rpc/internal/svc" 7 | "fileServer/rpc/pb" 8 | 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type PingLogic struct { 13 | ctx context.Context 14 | svcCtx *svc.ServiceContext 15 | logx.Logger 16 | } 17 | 18 | func NewPingLogic(ctx context.Context, svcCtx *svc.ServiceContext) *PingLogic { 19 | return &PingLogic{ 20 | ctx: ctx, 21 | svcCtx: svcCtx, 22 | Logger: logx.WithContext(ctx), 23 | } 24 | } 25 | 26 | // 就绪性探针 27 | func (l *PingLogic) Ping(in *pb.Request) (*pb.Response, error) { 28 | // todo: add your logic here and delete this line 29 | 30 | return &pb.Response{}, nil 31 | } 32 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/logic/queryfileslogic.go: -------------------------------------------------------------------------------- 1 | package logic 2 | 3 | import ( 4 | "context" 5 | "fileServer/rpc/internal/svc" 6 | "fileServer/rpc/pb" 7 | "github.com/zeromicro/go-zero/core/stores/sqlx" 8 | 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type QueryFilesLogic struct { 13 | ctx context.Context 14 | svcCtx *svc.ServiceContext 15 | logx.Logger 16 | } 17 | 18 | func NewQueryFilesLogic(ctx context.Context, svcCtx *svc.ServiceContext) *QueryFilesLogic { 19 | return &QueryFilesLogic{ 20 | ctx: ctx, 21 | svcCtx: svcCtx, 22 | Logger: logx.WithContext(ctx), 23 | } 24 | } 25 | 26 | func (l *QueryFilesLogic) QueryFiles(in *pb.QueryFileReq) (*pb.QueryFileRes, error) { 27 | // todo: add your logic here and delete this line 28 | var owner = in.Owner 29 | if owner == "" { 30 | return nil, sqlx.ErrNotFound 31 | } 32 | query, err := l.svcCtx.FileModel.Query(l.ctx, owner) 33 | if err != nil { 34 | return nil, err 35 | } 36 | return &pb.QueryFileRes{ 37 | Result: &pb.BaseRes{Result: "true", Message: "处理成功"}, 38 | List: pb.GetFileMetaList(query), 39 | }, nil 40 | } 41 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/server/fileserverserver.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT! 2 | // Source: fileServer.proto 3 | 4 | package server 5 | 6 | import ( 7 | "context" 8 | 9 | "fileServer/rpc/internal/logic" 10 | "fileServer/rpc/internal/svc" 11 | "fileServer/rpc/pb" 12 | ) 13 | 14 | type FileServerServer struct { 15 | svcCtx *svc.ServiceContext 16 | pb.UnimplementedFileServerServer 17 | } 18 | 19 | func NewFileServerServer(svcCtx *svc.ServiceContext) *FileServerServer { 20 | return &FileServerServer{ 21 | svcCtx: svcCtx, 22 | } 23 | } 24 | 25 | // 就绪性探针 26 | func (s *FileServerServer) Ping(ctx context.Context, in *pb.Request) (*pb.Response, error) { 27 | l := logic.NewPingLogic(ctx, s.svcCtx) 28 | return l.Ping(in) 29 | } 30 | 31 | func (s *FileServerServer) FindOne(ctx context.Context, in *pb.FindFileReq) (*pb.FileMetaInfo, error) { 32 | l := logic.NewFindOneLogic(ctx, s.svcCtx) 33 | return l.FindOne(in) 34 | } 35 | 36 | func (s *FileServerServer) QueryFiles(ctx context.Context, in *pb.QueryFileReq) (*pb.QueryFileRes, error) { 37 | l := logic.NewQueryFilesLogic(ctx, s.svcCtx) 38 | return l.QueryFiles(in) 39 | } 40 | 41 | func (s *FileServerServer) InertOne(ctx context.Context, in *pb.FileMetaInfo) (*pb.BaseRes, error) { 42 | l := logic.NewInertOneLogic(ctx, s.svcCtx) 43 | return l.InertOne(in) 44 | } 45 | 46 | func (s *FileServerServer) DeleteOne(ctx context.Context, in *pb.FileMetaInfo) (*pb.BaseRes, error) { 47 | l := logic.NewDeleteOneLogic(ctx, s.svcCtx) 48 | return l.DeleteOne(in) 49 | } 50 | 51 | func (s *FileServerServer) DeleteHard(ctx context.Context, in *pb.BaseTime) (*pb.BaseRes, error) { 52 | l := logic.NewDeleteHardLogic(ctx, s.svcCtx) 53 | return l.DeleteHard(in) 54 | } 55 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/internal/svc/servicecontext.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "fileServer/model/PojoDB/fileMetaPojo" 5 | "fileServer/rpc/internal/config" 6 | "github.com/zeromicro/go-zero/core/stores/sqlx" 7 | ) 8 | 9 | type ServiceContext struct { 10 | Config config.Config 11 | FileModel fileMetaPojo.FilemetatableModel 12 | } 13 | 14 | func NewServiceContext(c config.Config) *ServiceContext { 15 | sqlConn := sqlx.NewMysql(c.DB.DataSource) 16 | return &ServiceContext{ 17 | Config: c, 18 | FileModel: fileMetaPojo.NewFilemetatableModel(sqlConn, c.Cache), 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/pb/fileRpcUtil.go: -------------------------------------------------------------------------------- 1 | package pb 2 | 3 | import ( 4 | "errors" 5 | "fileServer/model/PojoDB/fileMetaPojo" 6 | "fmt" 7 | "time" 8 | ) 9 | 10 | func ConvertFileMetaInfo(filemeta *FileMetaInfo) (result *fileMetaPojo.Filemetatable, err error) { 11 | var createTime, updateTime, deleteTime time.Time 12 | createStr := filemeta.CreateTime 13 | updateStr := filemeta.UpdateTime 14 | deleteStr := filemeta.DeleteTime 15 | if createStr != "" { 16 | createTime, err = time.Parse(time.RFC850, createStr) 17 | if err != nil { 18 | return nil, errors.New(fmt.Sprintf("convert createTime error:%s", createStr)) 19 | } 20 | } 21 | if updateStr != "" { 22 | updateTime, err = time.Parse(time.RFC850, updateStr) 23 | if err != nil { 24 | return nil, errors.New(fmt.Sprintf("convert updateTime error:%s", updateStr)) 25 | } 26 | } 27 | if deleteStr != "" { 28 | deleteTime, err = time.Parse(time.RFC850, deleteStr) 29 | if err != nil { 30 | return nil, errors.New(fmt.Sprintf("convert deleteTime error:%s", deleteStr)) 31 | } 32 | } 33 | isDirInt := 0 34 | if filemeta.IsDir { 35 | isDirInt = 1 36 | } 37 | result = &fileMetaPojo.Filemetatable{ 38 | Creator: filemeta.Creator, 39 | CreateGroup: filemeta.CreateGroup, 40 | Name: filemeta.Name, 41 | CreateTime: createTime, 42 | Authority: filemeta.Authority, 43 | TypeOf: filemeta.TypeOf, 44 | UpdateTime: updateTime, 45 | Size: filemeta.Size, 46 | IsDir: isDirInt, 47 | Description: filemeta.Description, 48 | } 49 | result.DeleteTime.Time = deleteTime 50 | return result, err 51 | } 52 | func GetFileMetaInfo(filemetatable *fileMetaPojo.Filemetatable) (result *FileMetaInfo) { 53 | result = &FileMetaInfo{ 54 | 55 | Creator: filemetatable.Creator, 56 | CreateGroup: filemetatable.CreateGroup, 57 | Name: filemetatable.Name, 58 | CreateTime: filemetatable.CreateTime.String(), 59 | Authority: filemetatable.Authority, 60 | TypeOf: filemetatable.TypeOf, 61 | UpdateTime: filemetatable.UpdateTime.String(), 62 | Size: filemetatable.Size, 63 | Description: filemetatable.Description, 64 | } 65 | if filemetatable.IsDir == 0 { 66 | result.IsDir = true 67 | } else { 68 | result.IsDir = false 69 | } 70 | deleteTime := filemetatable.DeleteTime 71 | if deleteTime.Valid { 72 | result.DeleteTime = deleteTime.Time.String() 73 | } 74 | return result 75 | } 76 | func GetFileMetaList(filemetatable []*fileMetaPojo.Filemetatable) (result []*FileMetaInfo) { 77 | result = make([]*FileMetaInfo, len(filemetatable)) 78 | for index, value := range filemetatable { 79 | result[index] = GetFileMetaInfo(value) 80 | } 81 | return result 82 | } 83 | 84 | //原型模式 85 | func NewDefaultBaseRes() *BaseRes { 86 | return &BaseRes{Result: "true", Message: "处理成功"} 87 | } 88 | 89 | func (br *BaseRes) GetFailedRes(message string) { 90 | br.Result = "false" 91 | br.Message = message 92 | } 93 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/FileServer/rpc/pb/fileServer.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option go_package = "./pb"; 3 | package pb; 4 | 5 | 6 | message FileMetaInfo{ 7 | string creator=1; 8 | string createGroup=2; 9 | string name=3; 10 | string create_time=4; 11 | string authority=5; 12 | string typeOf=6; 13 | string update_time=7; 14 | int64 size=8; 15 | bool isDir=9; 16 | string delete_time=10; 17 | string description=11; 18 | } 19 | 20 | message FindFileReq{ 21 | string owner=1; 22 | string name=2; 23 | } 24 | message QueryFileReq{ 25 | string owner=1; 26 | } 27 | message QueryFileRes{ 28 | baseRes result=1; 29 | repeated FileMetaInfo list=2; 30 | } 31 | 32 | message baseTime{ 33 | string Date=1; 34 | } 35 | 36 | message baseRes{ 37 | string result=1; 38 | string message=2; 39 | } 40 | message Request { 41 | string ping = 1; 42 | } 43 | 44 | message Response { 45 | string pong = 1; 46 | } 47 | service fileServer{ 48 | //就绪性探针 49 | rpc Ping(Request) returns(Response); 50 | rpc FindOne(FindFileReq)returns(FileMetaInfo); 51 | rpc QueryFiles(QueryFileReq)returns(QueryFileRes); 52 | rpc InertOne(FileMetaInfo)returns(baseRes); 53 | rpc DeleteOne(FileMetaInfo)returns(baseRes); 54 | rpc DeleteHard(baseTime)returns(baseRes); 55 | } -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/etc/user-api.yaml: -------------------------------------------------------------------------------- 1 | Name: user-api 2 | Host: 0.0.0.0 3 | Port: 8888 4 | 5 | 6 | DB: 7 | DataSource: root:123456@tcp(localhost:3307)/cloudstoragesystem?charset=utf8&parseTime=true 8 | Cache: 9 | - Host: redis:6379 10 | Pass: 123456 11 | 12 | Auth: 13 | AccessSecret: "cloudstoragesystem" 14 | AccessExpire: 600 -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/core/stores/cache" 5 | "github.com/zeromicro/go-zero/rest" 6 | ) 7 | 8 | type Config struct { 9 | rest.RestConf 10 | Auth struct { 11 | AccessSecret string 12 | AccessExpire int64 13 | } 14 | DB struct { 15 | DataSource string 16 | } 17 | Cache cache.CacheConf 18 | } 19 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/handler/captcha/captchahandler.go: -------------------------------------------------------------------------------- 1 | package captcha 2 | 3 | import ( 4 | "UserServer/api/internal/logic/captcha" 5 | "net/http" 6 | 7 | "UserServer/api/internal/svc" 8 | "github.com/zeromicro/go-zero/rest/httpx" 9 | ) 10 | 11 | func CaptchaHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 12 | return func(w http.ResponseWriter, r *http.Request) { 13 | l := captcha.NewCaptchaLogic(r.Context(), svcCtx) 14 | err := l.Captcha(w) 15 | if err != nil { 16 | httpx.ErrorCtx(r.Context(), w, err) 17 | } else { 18 | httpx.Ok(w) 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/handler/captcha/verifyhandler.go: -------------------------------------------------------------------------------- 1 | package captcha 2 | 3 | import ( 4 | "UserServer/api/internal/logic/captcha" 5 | "net/http" 6 | 7 | "UserServer/api/internal/svc" 8 | "UserServer/api/internal/types" 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func VerifyHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.CaptchaVerifyReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.ErrorCtx(r.Context(), w, err) 17 | return 18 | } 19 | 20 | l := captcha.NewVerifyLogic(r.Context(), svcCtx) 21 | resp, err := l.Verify(&req) 22 | if err != nil { 23 | httpx.ErrorCtx(r.Context(), w, err) 24 | } else { 25 | httpx.OkJsonCtx(r.Context(), w, resp) 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/handler/routes.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT. 2 | package handler 3 | 4 | import ( 5 | "net/http" 6 | 7 | captcha "UserServer/api/internal/handler/captcha" 8 | user "UserServer/api/internal/handler/user" 9 | "UserServer/api/internal/svc" 10 | 11 | "github.com/zeromicro/go-zero/rest" 12 | ) 13 | 14 | func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) { 15 | server.AddRoutes( 16 | []rest.Route{ 17 | { 18 | Method: http.MethodPost, 19 | Path: "/user/login", 20 | Handler: user.LoginHandler(serverCtx), 21 | }, 22 | { 23 | Method: http.MethodPost, 24 | Path: "/user/register", 25 | Handler: user.RegisterHandler(serverCtx), 26 | }, 27 | }, 28 | ) 29 | 30 | server.AddRoutes( 31 | []rest.Route{ 32 | { 33 | Method: http.MethodGet, 34 | Path: "/captcha/get", 35 | Handler: captcha.CaptchaHandler(serverCtx), 36 | }, 37 | { 38 | Method: http.MethodGet, 39 | Path: "/captcha/verify", 40 | Handler: captcha.VerifyHandler(serverCtx), 41 | }, 42 | }, 43 | ) 44 | } 45 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/handler/user/loginhandler.go: -------------------------------------------------------------------------------- 1 | package user 2 | 3 | import ( 4 | "UserServer/api/internal/logic/user" 5 | "UserServer/api/internal/svc" 6 | "UserServer/api/internal/types" 7 | "net/http" 8 | 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func LoginHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.LoginReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.ErrorCtx(r.Context(), w, err) 17 | return 18 | } 19 | 20 | l := user.NewLoginLogic(r.Context(), svcCtx) 21 | resp, err := l.Login(&req) 22 | if err != nil { 23 | httpx.ErrorCtx(r.Context(), w, err) 24 | } else { 25 | httpx.OkJsonCtx(r.Context(), w, resp) 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/handler/user/registerhandler.go: -------------------------------------------------------------------------------- 1 | package user 2 | 3 | import ( 4 | "UserServer/api/internal/logic/user" 5 | "UserServer/api/internal/svc" 6 | "UserServer/api/internal/types" 7 | "net/http" 8 | 9 | "github.com/zeromicro/go-zero/rest/httpx" 10 | ) 11 | 12 | func RegisterHandler(svcCtx *svc.ServiceContext) http.HandlerFunc { 13 | return func(w http.ResponseWriter, r *http.Request) { 14 | var req types.RegisterReq 15 | if err := httpx.Parse(r, &req); err != nil { 16 | httpx.ErrorCtx(r.Context(), w, err) 17 | return 18 | } 19 | 20 | l := user.NewRegisterLogic(r.Context(), svcCtx) 21 | resp, err := l.Register(&req) 22 | if err != nil { 23 | httpx.ErrorCtx(r.Context(), w, err) 24 | } else { 25 | httpx.OkJsonCtx(r.Context(), w, resp) 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/logic/captcha/captchalogic.go: -------------------------------------------------------------------------------- 1 | package captcha 2 | 3 | import ( 4 | "context" 5 | "github.com/dchest/captcha" 6 | "net/http" 7 | 8 | "UserServer/api/internal/svc" 9 | "github.com/zeromicro/go-zero/core/logx" 10 | ) 11 | 12 | type CaptchaLogic struct { 13 | logx.Logger 14 | ctx context.Context 15 | svcCtx *svc.ServiceContext 16 | } 17 | 18 | func NewCaptchaLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CaptchaLogic { 19 | return &CaptchaLogic{ 20 | Logger: logx.WithContext(ctx), 21 | ctx: ctx, 22 | svcCtx: svcCtx, 23 | } 24 | } 25 | 26 | func (l *CaptchaLogic) Captcha(w http.ResponseWriter) error { 27 | // todo: add your logic here and delete this line 28 | // 生成验证码id 29 | id := captcha.NewLen(6) 30 | w.Header().Add("id", id) 31 | // 生成验证码并写入`w` 32 | err := captcha.WriteImage(w, id, 120, 50) 33 | return err 34 | } 35 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/logic/captcha/verifylogic.go: -------------------------------------------------------------------------------- 1 | package captcha 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/dchest/captcha" 7 | 8 | "UserServer/api/internal/svc" 9 | "UserServer/api/internal/types" 10 | 11 | "github.com/zeromicro/go-zero/core/logx" 12 | ) 13 | 14 | type VerifyLogic struct { 15 | logx.Logger 16 | ctx context.Context 17 | svcCtx *svc.ServiceContext 18 | } 19 | 20 | func NewVerifyLogic(ctx context.Context, svcCtx *svc.ServiceContext) *VerifyLogic { 21 | return &VerifyLogic{ 22 | Logger: logx.WithContext(ctx), 23 | ctx: ctx, 24 | svcCtx: svcCtx, 25 | } 26 | } 27 | 28 | func (l *VerifyLogic) Verify(req *types.CaptchaVerifyReq) (resp *types.CaptchaVerifyRes, err error) { 29 | // todo: add your logic here and delete this line 30 | result := captcha.VerifyString(req.Id, req.Value) 31 | return &types.CaptchaVerifyRes{ 32 | Result: fmt.Sprint(result), 33 | }, nil 34 | } 35 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/logic/user/loginlogic.go: -------------------------------------------------------------------------------- 1 | package user 2 | 3 | import ( 4 | "UserServer/api/internal/svc" 5 | "UserServer/api/internal/types" 6 | "UserServer/api/units" 7 | "context" 8 | "github.com/golang-jwt/jwt/v4" 9 | "time" 10 | 11 | "github.com/zeromicro/go-zero/core/logx" 12 | ) 13 | 14 | type LoginLogic struct { 15 | logx.Logger 16 | ctx context.Context 17 | svcCtx *svc.ServiceContext 18 | } 19 | 20 | func NewLoginLogic(ctx context.Context, svcCtx *svc.ServiceContext) *LoginLogic { 21 | return &LoginLogic{ 22 | Logger: logx.WithContext(ctx), 23 | ctx: ctx, 24 | svcCtx: svcCtx, 25 | } 26 | } 27 | 28 | func (l *LoginLogic) Login(req *types.LoginReq) (resp *types.LoginRes, err error) { 29 | // todo: add your logic here and delete this line 30 | user, err := l.svcCtx.CssUserModel.FindOneByAccount(l.ctx, req.Name, units.Md5(req.Password)) 31 | token, err := l.getJwtToken(l.svcCtx.Config.Auth.AccessSecret, user.Id, user.Account, user.Name) 32 | 33 | return &types.LoginRes{ 34 | AccessToken: token, 35 | Result: "true", 36 | }, nil 37 | } 38 | 39 | func (l *LoginLogic) getJwtToken(secretKey string, id int64, account string, name string) (string, error) { 40 | claims := make(jwt.MapClaims) 41 | claims["exp"] = time.Now().Unix() + 60000 42 | claims["id"] = id 43 | claims["account"] = account 44 | claims["name"] = name 45 | token := jwt.New(jwt.SigningMethodHS256) 46 | token.Claims = claims 47 | return token.SignedString([]byte(secretKey)) 48 | } 49 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/logic/user/registerlogic.go: -------------------------------------------------------------------------------- 1 | package user 2 | 3 | import ( 4 | "UserServer/api/internal/svc" 5 | "UserServer/api/internal/types" 6 | "context" 7 | 8 | "github.com/zeromicro/go-zero/core/logx" 9 | ) 10 | 11 | type RegisterLogic struct { 12 | logx.Logger 13 | ctx context.Context 14 | svcCtx *svc.ServiceContext 15 | } 16 | 17 | func NewRegisterLogic(ctx context.Context, svcCtx *svc.ServiceContext) *RegisterLogic { 18 | return &RegisterLogic{ 19 | Logger: logx.WithContext(ctx), 20 | ctx: ctx, 21 | svcCtx: svcCtx, 22 | } 23 | } 24 | 25 | func (l *RegisterLogic) Register(req *types.RegisterReq) (resp *types.RegisterRes, err error) { 26 | // todo: add your logic here and delete this line 27 | 28 | return 29 | } 30 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/svc/servicecontext.go: -------------------------------------------------------------------------------- 1 | package svc 2 | 3 | import ( 4 | "UserServer/api/internal/config" 5 | "UserServer/api/model" 6 | "github.com/zeromicro/go-zero/core/stores/sqlx" 7 | ) 8 | 9 | type ServiceContext struct { 10 | Config config.Config 11 | model.CssUserModel 12 | } 13 | 14 | func NewServiceContext(c config.Config) *ServiceContext { 15 | return &ServiceContext{ 16 | Config: c, 17 | CssUserModel: model.NewCssUserModel(sqlx.NewMysql(c.DB.DataSource), c.Cache), 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/internal/types/types.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT. 2 | package types 3 | 4 | type LoginReq struct { 5 | Name string `form:"name"` 6 | Password string `form:"password"` 7 | } 8 | 9 | type LoginRes struct { 10 | AccessToken string `json:"accessToken"` 11 | AccessExpire int64 `json:"accessExpire"` 12 | Result string `json:"result"` 13 | } 14 | 15 | type RegisterReq struct { 16 | Name string `from:"name"` 17 | Password string `from:"password"` 18 | Email string `from:"email"` 19 | } 20 | 21 | type RegisterRes struct { 22 | Result string `json:"result"` 23 | Data string `json:"data"` 24 | } 25 | 26 | type CaptchaVerifyReq struct { 27 | Id string `form:"id"` 28 | Value string `form:"value"` 29 | } 30 | 31 | type CaptchaVerifyRes struct { 32 | Result string `json:"result"` 33 | } 34 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/model/Css_user.sql: -------------------------------------------------------------------------------- 1 | 2 | SET NAMES utf8mb4; 3 | SET FOREIGN_KEY_CHECKS = 0; 4 | 5 | -- ---------------------------- 6 | -- Table structure for Css_user 7 | -- ---------------------------- 8 | DROP TABLE IF EXISTS `Css_user`; 9 | CREATE TABLE `Css_user` ( 10 | `id` int UNSIGNED NOT NULL AUTO_INCREMENT, 11 | `account` varchar(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL, 12 | `name` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL, 13 | `email` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL, 14 | `password` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL, 15 | `role` char(1) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL COMMENT '0:管理员1:普通用户', 16 | PRIMARY KEY (`id`) USING BTREE 17 | ) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci ROW_FORMAT = Dynamic; 18 | 19 | SET FOREIGN_KEY_CHECKS = 1; 20 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/model/cssusermodel.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "github.com/zeromicro/go-zero/core/stores/cache" 5 | "github.com/zeromicro/go-zero/core/stores/sqlx" 6 | ) 7 | 8 | var _ CssUserModel = (*customCssUserModel)(nil) 9 | 10 | type ( 11 | // CssUserModel is an interface to be customized, add more methods here, 12 | // and implement the added methods in customCssUserModel. 13 | CssUserModel interface { 14 | cssUserModel 15 | } 16 | 17 | customCssUserModel struct { 18 | *defaultCssUserModel 19 | } 20 | ) 21 | 22 | // NewCssUserModel returns a model for the database table. 23 | func NewCssUserModel(conn sqlx.SqlConn, c cache.CacheConf) CssUserModel { 24 | return &customCssUserModel{ 25 | defaultCssUserModel: newCssUserModel(conn, c), 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/model/cssusermodel_gen.go: -------------------------------------------------------------------------------- 1 | // Code generated by goctl. DO NOT EDIT. 2 | 3 | package model 4 | 5 | import ( 6 | 7 | "context" 8 | "database/sql" 9 | "fmt" 10 | "strings" 11 | 12 | "github.com/zeromicro/go-zero/core/stores/builder" 13 | "github.com/zeromicro/go-zero/core/stores/cache" 14 | "github.com/zeromicro/go-zero/core/stores/sqlc" 15 | "github.com/zeromicro/go-zero/core/stores/sqlx" 16 | "github.com/zeromicro/go-zero/core/stringx" 17 | ) 18 | 19 | var ( 20 | cssUserFieldNames = builder.RawFieldNames(&CssUser{}) 21 | cssUserRows = strings.Join(cssUserFieldNames, ",") 22 | cssUserRowsExpectAutoSet = strings.Join(stringx.Remove(cssUserFieldNames, "`id`", "`update_time`", "`create_at`", "`created_at`", "`create_time`", "`update_at`", "`updated_at`"), ",") 23 | cssUserRowsWithPlaceHolder = strings.Join(stringx.Remove(cssUserFieldNames, "`id`", "`update_time`", "`create_at`", "`created_at`", "`create_time`", "`update_at`", "`updated_at`"), "=?,") + "=?" 24 | 25 | cacheCssUserIdPrefix = "cache:cssUser:id:" 26 | ) 27 | 28 | type ( 29 | cssUserModel interface { 30 | Insert(ctx context.Context, data *CssUser) (sql.Result, error) 31 | FindOne(ctx context.Context, id int64) (*CssUser, error) 32 | Update(ctx context.Context, data *CssUser) error 33 | Delete(ctx context.Context, id int64) error 34 | FindOneByAccount(ctx context.Context,username string, password string) (*CssUser, error) 35 | } 36 | 37 | defaultCssUserModel struct { 38 | sqlc.CachedConn 39 | table string 40 | } 41 | 42 | CssUser struct { 43 | Id int64 `db:"id"` 44 | Account string `db:"account"` 45 | Name string `db:"name"` 46 | Email string `db:"email"` 47 | Password string `db:"password"` 48 | Role string `db:"role"` // 0:管理员1:普通用户 49 | } 50 | ) 51 | 52 | func newCssUserModel(conn sqlx.SqlConn, c cache.CacheConf) *defaultCssUserModel { 53 | return &defaultCssUserModel{ 54 | CachedConn: sqlc.NewConn(conn, c), 55 | table: "`Css_user`", 56 | } 57 | } 58 | 59 | func(m *defaultCssUserModel) FindOneByAccount(ctx context.Context,username string,password string)(*CssUser,error){ 60 | cssUserIdKey := fmt.Sprintf("%s%v", cacheCssUserIdPrefix, nil) 61 | var resp CssUser 62 | err := m.QueryRowCtx(ctx,&resp,cssUserIdKey,func(ctx context.Context, conn sqlx.SqlConn,v interface{}) error{ 63 | query := fmt.Sprintf("select %s from %s where `name` = ? and `password` = ?", cssUserRows,m.table) 64 | return conn.QueryRowCtx(ctx,v,query,username,password) 65 | }) 66 | 67 | switch err { 68 | case nil: 69 | return &resp, nil 70 | case sqlc.ErrNotFound: 71 | return nil, ErrNotFound 72 | default: 73 | return nil, err 74 | } 75 | } 76 | 77 | func (m *defaultCssUserModel) Delete(ctx context.Context, id int64) error { 78 | cssUserIdKey := fmt.Sprintf("%s%v", cacheCssUserIdPrefix, id) 79 | _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 80 | query := fmt.Sprintf("delete from %s where `id` = ?", m.table) 81 | return conn.ExecCtx(ctx, query, id) 82 | }, cssUserIdKey) 83 | return err 84 | } 85 | 86 | func (m *defaultCssUserModel) FindOne(ctx context.Context, id int64) (*CssUser, error) { 87 | cssUserIdKey := fmt.Sprintf("%s%v", cacheCssUserIdPrefix, id) 88 | var resp CssUser 89 | err := m.QueryRowCtx(ctx, &resp, cssUserIdKey, func(ctx context.Context, conn sqlx.SqlConn, v interface{}) error { 90 | query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", cssUserRows, m.table) 91 | return conn.QueryRowCtx(ctx, v, query, id) 92 | }) 93 | switch err { 94 | case nil: 95 | return &resp, nil 96 | case sqlc.ErrNotFound: 97 | return nil, ErrNotFound 98 | default: 99 | return nil, err 100 | } 101 | } 102 | 103 | func (m *defaultCssUserModel) Insert(ctx context.Context, data *CssUser) (sql.Result, error) { 104 | cssUserIdKey := fmt.Sprintf("%s%v", cacheCssUserIdPrefix, data.Id) 105 | ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 106 | query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?)", m.table, cssUserRowsExpectAutoSet) 107 | return conn.ExecCtx(ctx, query, data.Account, data.Name, data.Email, data.Password, data.Role) 108 | }, cssUserIdKey) 109 | return ret, err 110 | } 111 | 112 | func (m *defaultCssUserModel) Update(ctx context.Context, data *CssUser) error { 113 | cssUserIdKey := fmt.Sprintf("%s%v", cacheCssUserIdPrefix, data.Id) 114 | _, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) { 115 | query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, cssUserRowsWithPlaceHolder) 116 | return conn.ExecCtx(ctx, query, data.Account, data.Name, data.Email, data.Password, data.Role, data.Id) 117 | }, cssUserIdKey) 118 | return err 119 | } 120 | 121 | func (m *defaultCssUserModel) formatPrimary(primary interface{}) string { 122 | return fmt.Sprintf("%s%v", cacheCssUserIdPrefix, primary) 123 | } 124 | 125 | func (m *defaultCssUserModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary interface{}) error { 126 | query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", cssUserRows, m.table) 127 | return conn.QueryRowCtx(ctx, v, query, primary) 128 | } 129 | 130 | func (m *defaultCssUserModel) tableName() string { 131 | return m.table 132 | } 133 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/model/vars.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import "github.com/zeromicro/go-zero/core/stores/sqlx" 4 | 5 | var ErrNotFound = sqlx.ErrNotFound 6 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/units/func_test.go: -------------------------------------------------------------------------------- 1 | package units 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestMd5(t *testing.T) { 9 | cp := Md5("123456") 10 | fmt.Println(cp) 11 | } 12 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/units/units.go: -------------------------------------------------------------------------------- 1 | package units 2 | 3 | import ( 4 | "crypto/md5" 5 | "fmt" 6 | "github.com/golang-jwt/jwt/v4" 7 | ) 8 | 9 | func Md5(str string) string { 10 | return fmt.Sprintf("%x", md5.Sum([]byte(str))) 11 | } 12 | 13 | // 生成`Token` 14 | func GenerateToken(id int64, account string, name string) (string, error) { 15 | uc := jwt.MapClaims{ // 声明 payload 16 | } 17 | token := jwt.NewWithClaims(jwt.SigningMethodHS256, uc) // 创建使用HS256签名的JWT 18 | signedToken, err := token.SignedString("123") // 签名,组装token 19 | return signedToken, err 20 | } 21 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/user.api: -------------------------------------------------------------------------------- 1 | type ( 2 | LoginReq { 3 | Name string `form:"name"` 4 | Password string `form:"password"` 5 | } 6 | 7 | LoginRes { 8 | AccessToken string `json:"accessToken"` 9 | AccessExpire int64 `json:"accessExpire"` 10 | Result string `json:"result"` 11 | } 12 | 13 | registerReq { 14 | Name string `from:"name"` 15 | Password string `from:"password"` 16 | Email string `from:"email"` 17 | } 18 | 19 | registerRes { 20 | Result string `json:"result"` 21 | Data string `json:"data"` 22 | } 23 | 24 | captchaVerifyReq { 25 | Id string `form:"id"` 26 | Value string `form:"value"` 27 | } 28 | 29 | captchaVerifyRes { 30 | Result string `json:"result"` 31 | } 32 | ) 33 | 34 | @server( 35 | group: user 36 | ) 37 | service user-api { 38 | @doc "user" 39 | @handler login 40 | post /user/login (LoginReq) returns (LoginRes) 41 | @doc "user" 42 | @handler register 43 | post /user/register (registerReq) returns (registerRes) 44 | } 45 | 46 | @server( 47 | group: captcha 48 | ) 49 | service user-api { 50 | @doc "captcha" 51 | @handler captcha 52 | get /captcha/get () returns () 53 | 54 | @doc "captcha" 55 | @handler verify 56 | get /captcha/verify (captchaVerifyReq) returns (captchaVerifyRes) 57 | } -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/api/user.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | 7 | "UserServer/api/internal/config" 8 | "UserServer/api/internal/handler" 9 | "UserServer/api/internal/svc" 10 | 11 | "github.com/zeromicro/go-zero/core/conf" 12 | "github.com/zeromicro/go-zero/rest" 13 | ) 14 | 15 | var configFile = flag.String("f", "etc/user-api.yaml", "the config file") 16 | 17 | func main() { 18 | flag.Parse() 19 | 20 | var c config.Config 21 | conf.MustLoad(*configFile, &c) 22 | 23 | server := rest.MustNewServer(c.RestConf) 24 | defer server.Stop() 25 | 26 | ctx := svc.NewServiceContext(c) 27 | handler.RegisterHandlers(server, ctx) 28 | 29 | fmt.Printf("Starting server at %s:%d...\n", c.Host, c.Port) 30 | server.Start() 31 | } 32 | -------------------------------------------------------------------------------- /app/CloudStorageBackend/UserServer/go.mod: -------------------------------------------------------------------------------- 1 | module UserServer 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/dchest/captcha v1.0.0 7 | github.com/golang-jwt/jwt/v4 v4.4.3 8 | github.com/zeromicro/go-zero v1.4.3 9 | ) 10 | 11 | require ( 12 | github.com/beorn7/perks v1.0.1 // indirect 13 | github.com/cenkalti/backoff/v4 v4.1.3 // indirect 14 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 15 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 16 | github.com/fatih/color v1.13.0 // indirect 17 | github.com/felixge/fgprof v0.9.3 // indirect 18 | github.com/go-logr/logr v1.2.3 // indirect 19 | github.com/go-logr/stdr v1.2.2 // indirect 20 | github.com/go-redis/redis/v8 v8.11.5 // indirect 21 | github.com/go-sql-driver/mysql v1.7.0 // indirect 22 | github.com/golang/protobuf v1.5.2 // indirect 23 | github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect 24 | github.com/google/uuid v1.3.0 // indirect 25 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect 26 | github.com/mattn/go-colorable v0.1.9 // indirect 27 | github.com/mattn/go-isatty v0.0.14 // indirect 28 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect 29 | github.com/openzipkin/zipkin-go v0.4.0 // indirect 30 | github.com/pelletier/go-toml/v2 v2.0.6 // indirect 31 | github.com/prometheus/client_golang v1.13.0 // indirect 32 | github.com/prometheus/client_model v0.2.0 // indirect 33 | github.com/prometheus/common v0.37.0 // indirect 34 | github.com/prometheus/procfs v0.8.0 // indirect 35 | github.com/spaolacci/murmur3 v1.1.0 // indirect 36 | go.opentelemetry.io/otel v1.10.0 // indirect 37 | go.opentelemetry.io/otel/exporters/jaeger v1.10.0 // indirect 38 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect 39 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect 40 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect 41 | go.opentelemetry.io/otel/exporters/zipkin v1.10.0 // indirect 42 | go.opentelemetry.io/otel/sdk v1.10.0 // indirect 43 | go.opentelemetry.io/otel/trace v1.10.0 // indirect 44 | go.opentelemetry.io/proto/otlp v0.19.0 // indirect 45 | go.uber.org/automaxprocs v1.5.1 // indirect 46 | golang.org/x/net v0.0.0-20221014081412-f15817d10f9b // indirect 47 | golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect 48 | golang.org/x/text v0.4.0 // indirect 49 | google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e // indirect 50 | google.golang.org/grpc v1.50.1 // indirect 51 | google.golang.org/protobuf v1.28.1 // indirect 52 | gopkg.in/yaml.v2 v2.4.0 // indirect 53 | ) 54 | -------------------------------------------------------------------------------- /app/CloudStorageFront/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | .DS_Store 12 | dist 13 | dist-ssr 14 | coverage 15 | *.local 16 | 17 | /cypress/videos/ 18 | /cypress/screenshots/ 19 | 20 | # Editor directories and files 21 | .vscode/* 22 | !.vscode/extensions.json 23 | .idea 24 | *.suo 25 | *.ntvs* 26 | *.njsproj 27 | *.sln 28 | *.sw? 29 | -------------------------------------------------------------------------------- /app/CloudStorageFront/README.md: -------------------------------------------------------------------------------- 1 | # CloudStorageFront 2 | 3 | This template should help get you started developing with Vue 3 in Vite. 4 | 5 | ## Recommended IDE Setup 6 | 7 | [VSCode](https://code.visualstudio.com/) + [Volar](https://marketplace.visualstudio.com/items?itemName=Vue.volar) (and 8 | disable Vetur) 9 | + [TypeScript Vue Plugin (Volar)](https://marketplace.visualstudio.com/items?itemName=Vue.vscode-typescript-vue-plugin). 10 | 11 | ## Customize configuration 12 | 13 | See [Vite Configuration Reference](https://vitejs.dev/config/). 14 | 15 | ## Project Setup 16 | 17 | ```sh 18 | npm install 19 | ``` 20 | 21 | ### Compile and Hot-Reload for Development 22 | 23 | ```sh 24 | npm run dev 25 | ``` 26 | 27 | ### Compile and Minify for Production 28 | 29 | ```sh 30 | npm run build 31 | ``` 32 | -------------------------------------------------------------------------------- /app/CloudStorageFront/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Vite App 9 | 10 | 11 |
12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /app/CloudStorageFront/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cloudstoragefront", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "vite", 7 | "build": "vite build", 8 | "preview": "vite preview" 9 | }, 10 | "dependencies": { 11 | "@element-plus/icons-vue": "^2.0.10", 12 | "axios": "^1.2.1", 13 | "bootstrap": "^5.2.3", 14 | "element-plus": "^2.2.27", 15 | "nprogress": "^0.2.0", 16 | "vue": "^3.2.45", 17 | "vue-router": "^4.1.6", 18 | "vuex": "^4.1.0" 19 | }, 20 | "devDependencies": { 21 | "@vitejs/plugin-vue": "^4.0.0", 22 | "babel-plugin-component": "^1.1.1", 23 | "vite": "^4.0.0" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /app/CloudStorageFront/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JBossBC/CloudStorageSystem/f587eb5add7154e519727f88187a72e1b9a52a27/app/CloudStorageFront/public/favicon.ico -------------------------------------------------------------------------------- /app/CloudStorageFront/src/App.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | 12 | 13 | 20 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/assets/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/assets/main.css: -------------------------------------------------------------------------------- 1 | body { 2 | width: 100%; 3 | height: 100%; 4 | border: 0px; 5 | margin: 0px; 6 | } 7 | 8 | html { 9 | height: 100%; 10 | width: 100%; 11 | } -------------------------------------------------------------------------------- /app/CloudStorageFront/src/components/icons/IconCommunity.vue: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/components/icons/IconDocumentation.vue: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/components/icons/IconEcosystem.vue: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/components/icons/IconSupport.vue: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/components/icons/IconTooling.vue: -------------------------------------------------------------------------------- 1 | 2 | 20 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/components/topPersonalInformation.vue: -------------------------------------------------------------------------------- 1 | 2 | 64 | 81 | 82 | 83 | 183 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/main.js: -------------------------------------------------------------------------------- 1 | import {createApp} from 'vue' 2 | import App from './App.vue' 3 | import router from './router' 4 | import 'element-plus/dist/index.css' 5 | import Vuex, {createStore} from "vuex"; 6 | import "./assets/main.css"; 7 | import ElementPlus from "element-plus" 8 | import axiosApi from "@/router/GlobalAxios"; 9 | const app =createApp(App) 10 | app.use(ElementPlus) 11 | app.config.globalProperties.$axios=axiosApi 12 | app.use(router) 13 | app.mount('#app') 14 | const store = createStore({ 15 | state() { 16 | return { 17 | userName: "", 18 | } 19 | }, 20 | mutations: { 21 | setUserName(state, username) { 22 | state.userName = username; 23 | } 24 | } 25 | }) 26 | app.use(store); 27 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/router/GlobalAxios.js: -------------------------------------------------------------------------------- 1 | import axios from 'axios' 2 | import NProgress from 'nprogress' 3 | import 'nprogress/nprogress.css' 4 | // // set the global baseurl 5 | // axios.defaults.baseURL="" 6 | // //allow to carry the cookie 7 | // axios.defaults.withCredentials=true 8 | const axiosApi=axios.create({ 9 | // set the global baseurl 10 | baseURL:"http://localhost:8080/", 11 | withCredentials:true 12 | }) 13 | 14 | axios.interceptors.request.use(function(config){ 15 | NProgress.start(); 16 | return config; 17 | },error => { 18 | return Promise.reject(error); 19 | }) 20 | 21 | 22 | axios.interceptors.response.use(function(config){ 23 | NProgress.done(); 24 | return config; 25 | },error=>{ 26 | NProgress.done(); 27 | return Promise.reject(error); 28 | }) 29 | 30 | 31 | export default axiosApi; 32 | 33 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/router/RouteUtil.js: -------------------------------------------------------------------------------- 1 | const CookieUtil=new Object({ 2 | getCookie: function (name) { 3 | let cookieList = document.cookie.split(";"); 4 | for (let i = 0; i < cookieList.length; i++) { 5 | let keyValueList = cookieList[i].split("="); 6 | let key = keyValueList[0]; 7 | let value = keyValueList[1]; 8 | if (key == name) { 9 | return value; 10 | } 11 | } 12 | return ""; 13 | }, 14 | //expireTime base on milliSecond 15 | setCookie:function(key,value,expireTime){ 16 | let expire=";expires="+new Date(new Date().getTime()+1000*expireTime).toDateString(); 17 | document.cookie=key+"="+value+expire; 18 | }, 19 | deleteCookie:function(key,value){ 20 | let overdueTime=";expires="+new Date(0); 21 | document.cookie=key+"="+value+overdueTime; 22 | } 23 | } 24 | ) 25 | export default CookieUtil; -------------------------------------------------------------------------------- /app/CloudStorageFront/src/router/index.js: -------------------------------------------------------------------------------- 1 | import {createRouter, createWebHistory} from 'vue-router' 2 | import CookieUtil from "@/router/RouteUtil"; 3 | const router = createRouter({ 4 | history: createWebHistory(import.meta.env.BASE_URL), 5 | routes: [ 6 | { 7 | path: '/Login', 8 | name: 'Login', 9 | component: () => import('../views/Login.vue') 10 | }, 11 | { 12 | path: "/", 13 | name: "index", 14 | component: () => import('../views/Login.vue') 15 | }, 16 | { 17 | path: '/home', 18 | name: 'home', 19 | // route level code-splitting 20 | // this generates a separate chunk (About.[hash].js) for this route 21 | // which is lazy-loaded when the route is visited. 22 | component: () => import('../views/Home.vue') 23 | } 24 | ] 25 | }) 26 | 27 | router.beforeEach(function (to, from, next) { 28 | let cookie = CookieUtil.getCookie("isLogin"); 29 | if (to.path === "/Login") { 30 | if (cookie) { 31 | return next("/Home"); 32 | } else { 33 | return next(); 34 | } 35 | } else { 36 | if (cookie) { 37 | return next(); 38 | } else { 39 | return next("/Login"); 40 | } 41 | } 42 | }) 43 | 44 | export default router -------------------------------------------------------------------------------- /app/CloudStorageFront/src/views/Home.vue: -------------------------------------------------------------------------------- 1 | 49 | 50 | 183 | 184 | -------------------------------------------------------------------------------- /app/CloudStorageFront/src/views/Login.vue: -------------------------------------------------------------------------------- 1 | 30 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /app/CloudStorageFront/vite.config.js: -------------------------------------------------------------------------------- 1 | import {fileURLToPath, URL} from 'node:url' 2 | 3 | import {defineConfig} from 'vite' 4 | import vue from '@vitejs/plugin-vue' 5 | 6 | // https://vitejs.dev/config/ 7 | export default defineConfig({ 8 | plugins: [vue()], 9 | resolve: { 10 | alias: { 11 | '@': fileURLToPath(new URL('./src', import.meta.url)) 12 | } 13 | } 14 | }) 15 | -------------------------------------------------------------------------------- /deploy/DB/mysql/1_scheme.sql: -------------------------------------------------------------------------------- 1 | 2 | create database if not exists cloudStorageSystem; 3 | use cloudStorageSystem; 4 | 5 | drop table if exists FileMetaTable; 6 | create table FileMetaTable( 7 | creator varchar(20) NOT NULL, 8 | createGroup varchar(20) NOT NULL, 9 | name varchar(30) NOT NULL, 10 | description varchar(30) NOT NULL , /*set the uri from this file or folder*/ 11 | create_time timestamp default NOW(), 12 | authority varchar(3) NOT NULL DEFAULT 644, 13 | typeOf varchar(15) NOT NULL DEFAULT 'file', 14 | update_time timestamp NOT NULL DEFAULT NOW(), 15 | size bigint NOT NULL DEFAULT 0, 16 | isDir bool NOT NULL default false, 17 | delete_time timestamp, 18 | primary key(creator,name), 19 | index(name,createGroup) 20 | )ENGINE=myisam; 21 | 22 | drop table if exists UserGroup; 23 | 24 | create table UserGroup( 25 | groupName varchar(20), 26 | userName varchar(20), 27 | primary key(groupName,userName) 28 | )engine=innodb; 29 | 30 | /** 31 | 开放数据库root访问权限 32 | */ 33 | use mysql; 34 | update user set `Host`='%' where `User`='root'; 35 | flush privileges; 36 | -------------------------------------------------------------------------------- /deploy/DB/mysql/2_init.sql: -------------------------------------------------------------------------------- 1 | 2 | use cloudStorageSystem; 3 | insert into FileMetaTable values("xiyang","xiyang","hello","this is test file",NOW(),"644","file",NOW(),0,0,null); 4 | insert into usergroup values("xiyang","xiyang"); -------------------------------------------------------------------------------- /deploy/filebeat/conf/filebeat.yml: -------------------------------------------------------------------------------- 1 | filebeat.inputs: 2 | - type: log 3 | enabled: true 4 | paths: 5 | - /var/lib/docker/containers/*/*-json.log 6 | 7 | filebeat.config: 8 | modules: 9 | path: ${path.config}/modules.d/*.yml 10 | reload.enabled: false 11 | 12 | processors: 13 | - add_cloud_metadata: ~ 14 | - add_docker_metadata: ~ 15 | 16 | output.kafka: 17 | enabled: true 18 | hosts: ["kafka:9092"] 19 | #要提前创建topic 20 | topic: "cloudStorage-log" 21 | partition.hash: 22 | reachable_only: true 23 | compression: gzip 24 | max_message_bytes: 1000000 25 | required_acks: 1 26 | -------------------------------------------------------------------------------- /deploy/go-stash/etc/config.yaml: -------------------------------------------------------------------------------- 1 | Clusters: 2 | - Input: 3 | Kafka: 4 | Name: gostash 5 | Brokers: 6 | - "kafka:9092" 7 | Topics: 8 | - "cloudStorage-log" 9 | Group: pro 10 | Consumers: 16 11 | Filters: 12 | - Action: drop 13 | Conditions: 14 | - Key: k8s_container_name 15 | Value: "-rpc" 16 | Type: contains 17 | - Key: level 18 | Value: info 19 | Type: match 20 | Op: and 21 | - Action: remove_field 22 | Fields: 23 | # - message 24 | - _source 25 | - _type 26 | - _score 27 | - _id 28 | - "@version" 29 | - topic 30 | - index 31 | - beat 32 | - docker_container 33 | - offset 34 | - prospector 35 | - source 36 | - stream 37 | - "@metadata" 38 | - Action: transfer 39 | Field: message 40 | Target: data 41 | Output: 42 | ElasticSearch: 43 | Hosts: 44 | - "http://elasticsearch:9200" 45 | Index: "cloudStorage-{{yyyy-MM-dd}}" 46 | 47 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/Prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: prometheus 10 | name: Prometheus 11 | spec: 12 | ports: 13 | - name: "9090" 14 | port: 9090 15 | targetPort: 9090 16 | selector: 17 | io.kompose.service: prometheus 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/cloudstorage-cloudstoragesystem-networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | creationTimestamp: null 5 | name: cloudstorage-cloudstoragesystem 6 | spec: 7 | ingress: 8 | - from: 9 | - podSelector: 10 | matchLabels: 11 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 12 | podSelector: 13 | matchLabels: 14 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/elasticsearch-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: elasticsearch-claim0 7 | name: elasticsearch-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/elasticsearch-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: elasticsearch 10 | name: elasticsearch 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: elasticsearch 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: elasticsearch 27 | spec: 28 | containers: 29 | - env: 30 | - name: ES_JAVA_OPTS 31 | value: -Xms512m -Xmx512m 32 | - name: TZ 33 | value: Asia/Shanghai 34 | - name: discovery.type 35 | value: single-node 36 | image: elasticsearch:7.17.8 37 | name: elasticsearch 38 | ports: 39 | - containerPort: 9300 40 | resources: {} 41 | volumeMounts: 42 | - mountPath: /use/share/elasticsearch/data 43 | name: elasticsearch-claim0 44 | restartPolicy: Always 45 | volumes: 46 | - name: elasticsearch-claim0 47 | persistentVolumeClaim: 48 | claimName: elasticsearch-claim0 49 | status: {} 50 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/elasticsearch-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: elasticsearch 10 | name: elasticsearch 11 | spec: 12 | ports: 13 | - name: "9200" 14 | port: 9200 15 | targetPort: 9300 16 | - name: "9300" 17 | port: 9300 18 | targetPort: 9300 19 | selector: 20 | io.kompose.service: elasticsearch 21 | status: 22 | loadBalancer: {} 23 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/etcd-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: etcd 10 | name: etcd 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: etcd 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert -f docker-compose-env.yaml 21 | kompose.version: 1.28.0 (c4137012e) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 25 | io.kompose.service: etcd 26 | spec: 27 | containers: 28 | - env: 29 | - name: ALLOW_NONE_AUTHENTICATION 30 | value: "yes" 31 | image: bitnami/etcd:3.5 32 | name: etcd 33 | ports: 34 | - containerPort: 2379 35 | resources: {} 36 | hostname: etcd 37 | restartPolicy: Always 38 | status: {} 39 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/etcd-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: etcd 10 | name: etcd 11 | spec: 12 | ports: 13 | - name: "2379" 14 | port: 2379 15 | targetPort: 2379 16 | selector: 17 | io.kompose.service: etcd 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/filebeat-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: filebeat-claim0 7 | name: filebeat-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/filebeat-claim1-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: filebeat-claim1 7 | name: filebeat-claim1 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/filebeat-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: filebeat 10 | name: filebeat 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: filebeat 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: filebeat 27 | spec: 28 | containers: 29 | - command: 30 | - filebeat 31 | - -e 32 | - -strict.perms=false 33 | env: 34 | - name: TZ 35 | value: Asia/Shanghai 36 | image: elastic/filebeat:7.17.8 37 | name: filebeat 38 | resources: {} 39 | volumeMounts: 40 | - mountPath: /usr/share/filebeat/filebeat.yml 41 | name: filebeat-claim0 42 | - mountPath: /var/lib/docker/containers 43 | name: filebeat-claim1 44 | restartPolicy: Always 45 | volumes: 46 | - name: filebeat-claim0 47 | persistentVolumeClaim: 48 | claimName: filebeat-claim0 49 | - name: filebeat-claim1 50 | persistentVolumeClaim: 51 | claimName: filebeat-claim1 52 | status: {} 53 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/fileserverapi-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: fileserverapi 10 | name: fileserverapi 11 | spec: 12 | replicas: 3 13 | selector: 14 | matchLabels: 15 | io.kompose.service: fileserverapi 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert 21 | kompose.version: 1.28.0 (c4137012e) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 25 | io.kompose.service: fileserverapi 26 | spec: 27 | containers: 28 | - image: jiangxiyang/fileserverapi:1.0 29 | name: fileserverapi 30 | ports: 31 | - containerPort: 8889 32 | resources: {} 33 | hostname: fileserverapi 34 | restartPolicy: Always 35 | status: {} 36 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/fileserverapi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: fileserverapi 10 | name: fileserverapi 11 | spec: 12 | ports: 13 | - name: "8889" 14 | port: 8889 15 | targetPort: 8889 16 | selector: 17 | io.kompose.service: fileserverapi 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/fileserverrpc-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: fileserverrpc 10 | name: fileserverrpc 11 | spec: 12 | replicas: 3 13 | selector: 14 | matchLabels: 15 | io.kompose.service: fileserverrpc 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert 21 | kompose.version: 1.28.0 (c4137012e) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 25 | io.kompose.service: fileserverrpc 26 | spec: 27 | containers: 28 | - image: jiangxiyang/fileserverrpc:1.0 29 | name: fileserverrpc 30 | ports: 31 | - containerPort: 8001 32 | resources: {} 33 | hostname: fileserverrpc 34 | restartPolicy: Always 35 | status: {} 36 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/fileserverrpc-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: fileserverrpc 10 | name: fileserverrpc 11 | spec: 12 | ports: 13 | - name: "8001" 14 | port: 8001 15 | targetPort: 8001 16 | selector: 17 | io.kompose.service: fileserverrpc 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/go-stash-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: go-stash-claim0 7 | name: go-stash-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/go-stash-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: go-stash 10 | name: go-stash 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: go-stash 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: go-stash 27 | spec: 28 | containers: 29 | - env: 30 | - name: TZ 31 | value: Asia/Shanghai 32 | image: kevinwan/go-stash:1.0.8 33 | name: go-stash 34 | resources: {} 35 | volumeMounts: 36 | - mountPath: /app/etc 37 | name: go-stash-claim0 38 | restartPolicy: Always 39 | volumes: 40 | - name: go-stash-claim0 41 | persistentVolumeClaim: 42 | claimName: go-stash-claim0 43 | status: {} 44 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/grafana-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: grafana-claim0 7 | name: grafana-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/grafana-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: grafana 10 | name: grafana 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: grafana 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: grafana 27 | spec: 28 | containers: 29 | - env: 30 | - name: TZ 31 | value: Asia/Shanghai 32 | image: grafana/grafana:9.3.2 33 | name: grafana 34 | ports: 35 | - containerPort: 3001 36 | resources: {} 37 | volumeMounts: 38 | - mountPath: /var/lib/grafana 39 | name: grafana-claim0 40 | hostname: grafana 41 | restartPolicy: Always 42 | volumes: 43 | - name: grafana-claim0 44 | persistentVolumeClaim: 45 | claimName: grafana-claim0 46 | status: {} 47 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: grafana 10 | name: grafana 11 | spec: 12 | ports: 13 | - name: "3001" 14 | port: 3001 15 | targetPort: 3001 16 | selector: 17 | io.kompose.service: grafana 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/jaeger-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: jaeger 10 | name: jaeger 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: jaeger 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert -f docker-compose-env.yaml 21 | kompose.version: 1.28.0 (c4137012e) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 25 | io.kompose.service: jaeger 26 | spec: 27 | containers: 28 | - env: 29 | - name: ES_SERVER_URLS 30 | value: http://elasticsearch:9200 31 | - name: LOG_LEVEL 32 | value: debug 33 | - name: SPAN_STORAGE_TYPE 34 | value: elasticsearch 35 | image: jaegertracing/all-in-one:1.41.0 36 | name: jaeger 37 | ports: 38 | - containerPort: 5775 39 | protocol: UDP 40 | - containerPort: 6831 41 | protocol: UDP 42 | - containerPort: 6832 43 | protocol: UDP 44 | - containerPort: 5778 45 | - containerPort: 16686 46 | - containerPort: 14268 47 | - containerPort: 9411 48 | resources: {} 49 | restartPolicy: Always 50 | status: {} 51 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/jaeger-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: jaeger 10 | name: jaeger 11 | spec: 12 | ports: 13 | - name: "5775" 14 | port: 5775 15 | protocol: UDP 16 | targetPort: 5775 17 | - name: "6831" 18 | port: 6831 19 | protocol: UDP 20 | targetPort: 6831 21 | - name: "6832" 22 | port: 6832 23 | protocol: UDP 24 | targetPort: 6832 25 | - name: "5778" 26 | port: 5778 27 | targetPort: 5778 28 | - name: "16686" 29 | port: 16686 30 | targetPort: 16686 31 | - name: "14268" 32 | port: 14268 33 | targetPort: 14268 34 | - name: "9411" 35 | port: 9411 36 | targetPort: 9411 37 | selector: 38 | io.kompose.service: jaeger 39 | status: 40 | loadBalancer: {} 41 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/kafka-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: kafka-claim0 7 | name: kafka-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/kafka-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: kafka 10 | name: kafka 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: kafka 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: kafka 27 | spec: 28 | containers: 29 | - env: 30 | - name: ALLOW_PLAINTEXT_LISTENER 31 | value: "yes" 32 | - name: KAFKA_ADVERTISED_HOST_NAME 33 | value: kafka 34 | - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE 35 | value: "false" 36 | - name: KAFKA_ZOOKEEPER_CONNECT 37 | value: zookeeper:2181 38 | - name: TZ 39 | value: Asia/Shanghai 40 | image: bitnami/kafka:3.3.1 41 | name: kafka 42 | ports: 43 | - containerPort: 9092 44 | resources: {} 45 | volumeMounts: 46 | - mountPath: /var/run/docker/sock 47 | name: kafka-claim0 48 | restartPolicy: Always 49 | volumes: 50 | - name: kafka-claim0 51 | persistentVolumeClaim: 52 | claimName: kafka-claim0 53 | status: {} 54 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/kafka-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: kafka 10 | name: kafka 11 | spec: 12 | ports: 13 | - name: "9092" 14 | port: 9092 15 | targetPort: 9092 16 | selector: 17 | io.kompose.service: kafka 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/kibana-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: kibana 10 | name: kibana 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: kibana 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert -f docker-compose-env.yaml 21 | kompose.version: 1.28.0 (c4137012e) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 25 | io.kompose.service: kibana 26 | spec: 27 | containers: 28 | - env: 29 | - name: TZ 30 | value: Asia/Shanghai 31 | image: kibana:7.17.8 32 | name: kibana 33 | ports: 34 | - containerPort: 5601 35 | resources: {} 36 | restartPolicy: Always 37 | status: {} 38 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/kibana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: kibana 10 | name: kibana 11 | spec: 12 | ports: 13 | - name: "5601" 14 | port: 5601 15 | targetPort: 5601 16 | selector: 17 | io.kompose.service: kibana 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/mysql-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: mysql-claim0 7 | name: mysql-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/mysql-claim1-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: mysql-claim1 7 | name: mysql-claim1 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/mysql-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: mysql 10 | name: mysql 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: mysql 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: mysql 27 | spec: 28 | containers: 29 | - args: 30 | - --default-authentication-plugin=mysql_native_password 31 | - --character-set-server=utf8mb4 32 | - --collation-server=utf8mb4_general_ci 33 | - --explicit_defaults_for_timestamp=true 34 | - --lower_case_table_names=1 35 | env: 36 | - name: MYSQL_ROOT_PASSWORD 37 | value: root 38 | - name: TZ 39 | value: Asia/Shanghai 40 | image: mysql/mysql-server:8.0.28 41 | name: mysql 42 | ports: 43 | - containerPort: 3306 44 | resources: {} 45 | securityContext: 46 | privileged: true 47 | volumeMounts: 48 | - mountPath: /docker-entrypoint-initdb.d 49 | name: mysql-claim0 50 | - mountPath: /var/lib/mysql 51 | name: mysql-claim1 52 | restartPolicy: Always 53 | volumes: 54 | - name: mysql-claim0 55 | persistentVolumeClaim: 56 | claimName: mysql-claim0 57 | - name: mysql-claim1 58 | persistentVolumeClaim: 59 | claimName: mysql-claim1 60 | status: {} 61 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/mysql-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: mysql 10 | name: mysql 11 | spec: 12 | ports: 13 | - name: "3306" 14 | port: 3306 15 | targetPort: 3306 16 | selector: 17 | io.kompose.service: mysql 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/nginx-gateway-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: nginx-gateway-claim0 7 | name: nginx-gateway-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/nginx-gateway-claim1-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: nginx-gateway-claim1 7 | name: nginx-gateway-claim1 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/nginx-gateway-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: nginx-gateway 10 | name: nginx-gateway 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: nginx-gateway 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: nginx-gateway 27 | spec: 28 | containers: 29 | - env: 30 | - name: TZ 31 | value: Asia/Shanghai 32 | image: nginx:1.21.5 33 | name: nginx-gateway 34 | ports: 35 | - containerPort: 8081 36 | resources: {} 37 | securityContext: 38 | privileged: true 39 | volumeMounts: 40 | - mountPath: /etc/nginx/conf.d 41 | name: nginx-gateway-claim0 42 | - mountPath: /var/log/nginx 43 | name: nginx-gateway-claim1 44 | restartPolicy: Always 45 | volumes: 46 | - name: nginx-gateway-claim0 47 | persistentVolumeClaim: 48 | claimName: nginx-gateway-claim0 49 | - name: nginx-gateway-claim1 50 | persistentVolumeClaim: 51 | claimName: nginx-gateway-claim1 52 | status: {} 53 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/nginx-gateway-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: nginx-gateway 10 | name: nginx-gateway 11 | spec: 12 | ports: 13 | - name: "8888" 14 | port: 8888 15 | targetPort: 8081 16 | selector: 17 | io.kompose.service: nginx-gateway 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/prometheus-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: prometheus-claim0 7 | name: prometheus-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/prometheus-claim1-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: prometheus-claim1 7 | name: prometheus-claim1 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/prometheus-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: prometheus 10 | name: prometheus 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: prometheus 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: prometheus 27 | spec: 28 | containers: 29 | - args: 30 | - --config.file=/etc/prometheus/prometheus.yml 31 | - --storage.tsdb.path=/prometheus 32 | env: 33 | - name: TZ 34 | value: Asia/Shanghai 35 | image: bitnami/prometheus:2.41.0 36 | name: prometheus 37 | ports: 38 | - containerPort: 9090 39 | resources: {} 40 | volumeMounts: 41 | - mountPath: /etc/prometheus/prometheus.yml 42 | name: prometheus-claim0 43 | - mountPath: /prometheus 44 | name: prometheus-claim1 45 | restartPolicy: Always 46 | volumes: 47 | - name: prometheus-claim0 48 | persistentVolumeClaim: 49 | claimName: prometheus-claim0 50 | - name: prometheus-claim1 51 | persistentVolumeClaim: 52 | claimName: prometheus-claim1 53 | status: {} 54 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/redis-claim0-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | io.kompose.service: redis-claim0 7 | name: redis-claim0 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Mi 14 | status: {} 15 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/redis-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: redis 10 | name: redis 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: redis 16 | strategy: 17 | type: Recreate 18 | template: 19 | metadata: 20 | annotations: 21 | kompose.cmd: kompose convert -f docker-compose-env.yaml 22 | kompose.version: 1.28.0 (c4137012e) 23 | creationTimestamp: null 24 | labels: 25 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 26 | io.kompose.service: redis 27 | spec: 28 | containers: 29 | - args: 30 | - redis-server 31 | - --requirepass 32 | - root 33 | - --appendonly 34 | - "yes" 35 | image: redis:6.2.5 36 | name: redis 37 | ports: 38 | - containerPort: 6379 39 | resources: {} 40 | securityContext: 41 | privileged: true 42 | volumeMounts: 43 | - mountPath: /data 44 | name: redis-claim0 45 | restartPolicy: Always 46 | volumes: 47 | - name: redis-claim0 48 | persistentVolumeClaim: 49 | claimName: redis-claim0 50 | status: {} 51 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/redis-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: redis 10 | name: redis 11 | spec: 12 | ports: 13 | - name: "6379" 14 | port: 6379 15 | targetPort: 6379 16 | selector: 17 | io.kompose.service: redis 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/zookeeper-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: zookeeper 10 | name: zookeeper 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | io.kompose.service: zookeeper 16 | strategy: {} 17 | template: 18 | metadata: 19 | annotations: 20 | kompose.cmd: kompose convert -f docker-compose-env.yaml 21 | kompose.version: 1.28.0 (c4137012e) 22 | creationTimestamp: null 23 | labels: 24 | io.kompose.network/cloudstorage-cloudstoragesystem: "true" 25 | io.kompose.service: zookeeper 26 | spec: 27 | containers: 28 | - env: 29 | - name: TZ 30 | value: Asia/Shanghai 31 | image: zookeeper:3.8.0 32 | name: zookeeper 33 | ports: 34 | - containerPort: 2181 35 | resources: {} 36 | restartPolicy: Always 37 | status: {} 38 | -------------------------------------------------------------------------------- /deploy/kubernetes/kompose/zookeeper-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert -f docker-compose-env.yaml 6 | kompose.version: 1.28.0 (c4137012e) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: zookeeper 10 | name: zookeeper 11 | spec: 12 | ports: 13 | - name: "2181" 14 | port: 2181 15 | targetPort: 2181 16 | selector: 17 | io.kompose.service: zookeeper 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /deploy/kubernetes/services/fileServer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: FSDeployment 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchExpressions: 9 | - key: "fileserver" 10 | operator: "In" 11 | values: 12 | - "ready" 13 | template: 14 | metadata: 15 | name: Fileserver 16 | labels: 17 | fileserver: ready 18 | spec: 19 | restartPolicy: OnFailture 20 | containers: 21 | - image: "jiangxiyang/fileserverapi:latest" 22 | imagePullPolicy: IfNotPresent 23 | name: fileserver_api 24 | ports: 8889 25 | resources: 26 | limits: 27 | cpu: 8 28 | memory: 4Gi 29 | requests: 30 | cpu: 4 31 | memory: 100Mi 32 | - image: "jiangxiyang/fileserverrpc:latest" 33 | imagePullPolicy: IfNotPresent 34 | name: fileserver_rpc 35 | ports: 8001 36 | resources: 37 | limits: 38 | cpu: 8 39 | memory: 4Gi 40 | requests: 41 | cpu: 4 42 | memory: 100Mi 43 | readinessProbe: 44 | httpGet: 45 | host: "localhost" 46 | path: "/ping" 47 | port: "8001" 48 | scheme: "http" 49 | 50 | --- 51 | 52 | apiVersion: v1 53 | kind: Service 54 | metadata: 55 | name: file_service 56 | spec: 57 | selector: 58 | name: Fileserver 59 | ## current ways to expose port for internal environment 60 | type: NodePort 61 | ports: 62 | - 8889 63 | 64 | -------------------------------------------------------------------------------- /deploy/nginx/conf.d/cloudStorage-gateway.conf: -------------------------------------------------------------------------------- 1 | server{ 2 | listen 8081; 3 | access_log /var/log/nginx/cloudstorage.com_access.log; 4 | error_log /var/log/nginx/cloudstorage.com_error.log; 5 | 6 | location ~ /file/ { 7 | proxy_set_header Host $http_host; 8 | proxy_set_header X-Real-IP $remote_addr; 9 | proxy_set_header REMOTE-HOST $remote_addr; 10 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 11 | proxy_pass http://fileserverapi:8889; 12 | } 13 | 14 | } 15 | -------------------------------------------------------------------------------- /deploy/prometheus/server/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 3 | external_labels: 4 | monitor: 'codelab-monitor' 5 | scrape_configs: 6 | - job_name: 'prometheus' 7 | scrape_interval: 5s 8 | static_configs: 9 | - targets: ['127.0.0.1:9090'] 10 | 11 | -------------------------------------------------------------------------------- /docker-compose-env.yaml: -------------------------------------------------------------------------------- 1 | #collect the file which contains the log data which is created by server or nginx config 2 | ## elasticsearch version: 7.17.8 3 | ## kibana version : 7.17.0 4 | ## filebeat version: 7.17.8 5 | ## zookeeper version: 3.8.0 6 | ## kafka version: 3.3.1 7 | ## go-stash version:1.0.8 8 | ## Prometheus version:2.41.0 9 | ## grafana version:9.3.2 10 | ## jaeger version: 1.41.0 11 | # log collect container 12 | version: "3" 13 | services: 14 | filebeat: 15 | image: elastic/filebeat:7.17.8 16 | container_name: filebeat 17 | environment: 18 | TZ: Asia/Shanghai 19 | restart: always 20 | user: root 21 | entrypoint: "filebeat -e -strict.perms=false" #解决配置文件权限问题 22 | volumes: 23 | - ./deploy/filebeat/conf/filebeat.yml:/usr/share/filebeat/filebeat.yml 24 | - /var/lib/docker/containers:/var/lib/docker/containers 25 | networks: 26 | - cloudStorageSystem 27 | depends_on: 28 | - kafka 29 | go-stash: 30 | image: kevinwan/go-stash:1.0.8 31 | container_name: go-stash 32 | environment: 33 | - TZ=Asia/Shanghai 34 | restart: always 35 | volumes: 36 | - ./deploy/go-stash/etc:/app/etc 37 | networks: 38 | - cloudStorageSystem 39 | depends_on: 40 | - elasticsearch 41 | - kafka 42 | 43 | elasticsearch: 44 | image: elasticsearch:7.17.8 45 | container_name: elasticsearch 46 | environment: 47 | - discovery.type=single-node 48 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 49 | - TZ=Asia/Shanghai 50 | volumes: 51 | - ./data/elasticsearch/data:/use/share/elasticsearch/data 52 | restart: always 53 | ports: 54 | - 9200:9300 55 | - 9300:9300 56 | networks: 57 | - cloudStorageSystem 58 | kibana: 59 | image: kibana:7.17.8 60 | container_name: kibana 61 | environment: 62 | - elasticsearch.hosts:http://elasticsearch:9200 63 | - TZ=Asia/Shanghai 64 | restart: always 65 | networks: 66 | - cloudStorageSystem 67 | ports: 68 | - 5601:5601 69 | depends_on: 70 | - elasticsearch 71 | zookeeper: 72 | image: zookeeper:3.8.0 73 | container_name: zookeeper 74 | environment: 75 | TZ: Asia/Shanghai 76 | restart: always 77 | ports: 78 | - 2181:2181 79 | networks: 80 | - cloudStorageSystem 81 | kafka: 82 | image: bitnami/kafka:3.3.1 83 | container_name: kafka 84 | ports: 85 | - 9092:9092 86 | environment: 87 | - ALLOW_PLAINTEXT_LISTENER=yes 88 | - KAFKA_ADVERTISED_HOST_NAME=kafka 89 | - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 90 | - KAFKA_AUTO_CREATE_TOPICS_ENABLE=false 91 | - TZ=Asia/Shanghai 92 | restart: always 93 | volumes: 94 | - /var/run/docker.sock:/var/run/docker/sock 95 | networks: 96 | - cloudStorageSystem 97 | depends_on: 98 | - zookeeper 99 | # Prometheus 100 | Prometheus: 101 | image: bitnami/prometheus:2.41.0 102 | container_name: Prometheus 103 | environment: 104 | TZ: Asia/Shanghai 105 | user: root 106 | volumes: 107 | - ./deploy/prometheus/server/prometheus.yml:/etc/prometheus/prometheus.yml 108 | - ./data/prometheus/data:/prometheus 109 | command: 110 | - '--config.file=/etc/prometheus/prometheus.yml' 111 | - '--storage.tsdb.path=/prometheus' 112 | restart: always 113 | ports: 114 | - 9090:9090 115 | networks: 116 | - cloudStorageSystem 117 | grafana: 118 | image: grafana/grafana:9.3.2 119 | container_name: grafana 120 | hostname: grafana 121 | environment: 122 | TZ: Asia/Shanghai 123 | user: root 124 | restart: always 125 | volumes: 126 | - ./data/grafana/data:/var/lib/grafana 127 | ports: 128 | - 3001:3001 129 | networks: 130 | - cloudStorageSystem 131 | # linktrace 132 | jaeger: 133 | image: jaegertracing/all-in-one:1.41.0 134 | container_name: jaeger 135 | restart: always 136 | ports: 137 | - "5775:5775/udp" 138 | - "6831:6831/udp" 139 | - "6832:6832/udp" 140 | - "5778:5778" 141 | - "16686:16686" 142 | - "14268:14268" 143 | - "9411:9411" 144 | environment: 145 | - SPAN_STORAGE_TYPE=elasticsearch 146 | - ES_SERVER_URLS=http://elasticsearch:9200 147 | - LOG_LEVEL=debug 148 | networks: 149 | - cloudStorageSystem 150 | etcd: 151 | image: bitnami/etcd:3.5 152 | container_name: etcd 153 | hostname: etcd 154 | restart: always 155 | environment: 156 | - ALLOW_NONE_AUTHENTICATION=yes 157 | ports: 158 | - "2379:2379" 159 | networks: 160 | - cloudStorageSystem 161 | mysql: 162 | image: mysql/mysql-server:8.0.28 163 | restart: always 164 | container_name: mysql 165 | ports: 166 | - "3306:3306" 167 | environment: 168 | MYSQL_ROOT_PASSWORD: root 169 | TZ: Asia/Shanghai 170 | volumes: 171 | - ./deploy/DB/mysql/:/docker-entrypoint-initdb.d/ 172 | - ./data/mysql/data:/var/lib/mysql 173 | command: 174 | --default-authentication-plugin=mysql_native_password 175 | --character-set-server=utf8mb4 176 | --collation-server=utf8mb4_general_ci 177 | --explicit_defaults_for_timestamp=true 178 | --lower_case_table_names=1 179 | privileged: true 180 | networks: 181 | - cloudStorageSystem 182 | redis: 183 | image: redis:6.2.5 184 | container_name: redis 185 | ports: 186 | - "6379:6379" 187 | restart: always 188 | volumes: 189 | - ./data/redis/data:/data:rw 190 | command: "redis-server --requirepass root --appendonly yes" 191 | privileged: true 192 | networks: 193 | - cloudStorageSystem 194 | 195 | networks: 196 | cloudStorageSystem: 197 | driver: bridge 198 | ipam: 199 | config: 200 | - subnet: 172.20.0.0/16 201 | 202 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | nginx-gateway: 4 | image: nginx:1.21.5 5 | container_name: nginx_gateway 6 | restart: always 7 | privileged: true 8 | environment: 9 | - TZ=Asia/Shanghai 10 | ports: 11 | - 8888:8081 12 | volumes: 13 | - ./deploy/nginx/conf.d:/etc/nginx/conf.d 14 | - ./data/nginx/log:/var/log/nginx 15 | networks: 16 | - cloudStorageSystem 17 | depends_on: 18 | - fileserverapi 19 | fileserverrpc: 20 | image: jiangxiyang/fileserverrpc:1.0 21 | container_name: fileserverrpc 22 | restart: always 23 | hostname: fileserverrpc 24 | ports: 25 | - "8001:8001" 26 | networks: 27 | - cloudStorageSystem 28 | deploy: 29 | mode: replicated 30 | replicas: 3 31 | 32 | fileserverapi: 33 | image: jiangxiyang/fileserverapi:1.0 34 | container_name: fileserverapi 35 | restart: always 36 | hostname: fileserverapi 37 | ports: 38 | - "8889:8889" 39 | depends_on: 40 | - fileserverrpc 41 | networks: 42 | - cloudStorageSystem 43 | deploy: 44 | mode: replicated 45 | replicas: 3 46 | 47 | 48 | networks: 49 | cloudStorageSystem: 50 | driver: bridge 51 | ipam: 52 | config: 53 | - subnet: 172.20.0.0/16 54 | --------------------------------------------------------------------------------