├── .dockerignore ├── .gitignore ├── .travis.yml ├── Makefile ├── README.md ├── Untitled Diagram.xml ├── api ├── dashboard.yaml ├── restserver_v1.md ├── restserver_v1.yaml └── restserver_v2.md ├── docker-compose.yml ├── docs ├── arch.md ├── dashboard.md ├── db.md ├── deployment.md ├── imgs │ ├── architecture.png │ ├── dashboard_add_cluster.png │ ├── dashboard_add_host.png │ ├── dashboard_clusters.png │ ├── dashboard_hosts.png │ ├── dashboard_main.png │ ├── dashboard_status.png │ ├── deployment.graffle │ ├── deployment_topo.png │ ├── logo.png │ └── scenario.png ├── production_config.md └── scenario.md ├── nginx └── nginx.conf ├── scripts ├── header.sh ├── redeploy.sh ├── setup.sh ├── start.sh ├── start_cadvisor.sh ├── start_mongoexpress.sh ├── start_netdata.sh ├── stop.sh └── update_docker_images.sh ├── src ├── Dockerfile-dashboard ├── Dockerfile-restserver ├── Dockerfile-watchdog ├── __init__.py ├── _compose_files │ ├── local │ │ ├── cluster-4.yml │ │ ├── cluster-6.yml │ │ └── peer-pbft.yml │ └── syslog │ │ ├── cluster-4.yml │ │ ├── cluster-6.yml │ │ └── peer-pbft.yml ├── agent │ ├── __init__.py │ └── docker_swarm.py ├── common │ ├── __init__.py │ ├── db.py │ ├── log.py │ ├── response.py │ └── utils.py ├── config.py ├── dashboard.py ├── modules │ ├── __init__.py │ ├── cluster.py │ ├── host.py │ ├── scheduler.py │ └── stat.py ├── requirements.txt ├── resources │ ├── __init__.py │ ├── cluster_api.py │ ├── cluster_view.py │ ├── host_api.py │ ├── host_view.py │ ├── index.py │ └── stat.py ├── restserver.py ├── static │ ├── css │ │ ├── bootstrap-table.min.css │ │ ├── bootstrap.min.css │ │ ├── dashboard.css │ │ ├── dataTables.bootstrap.min.css │ │ ├── jquery.dataTables.min.css │ │ └── paginate.css │ ├── fonts │ │ ├── glyphicons-halflings-regular.eot │ │ ├── glyphicons-halflings-regular.svg │ │ ├── glyphicons-halflings-regular.ttf │ │ ├── glyphicons-halflings-regular.woff │ │ └── glyphicons-halflings-regular.woff2 │ ├── img │ │ └── favicon.ico │ └── js │ │ ├── bootbox.min.js │ │ ├── bootstrap-notify.min.js │ │ ├── bootstrap-table-zh-CN.min.js │ │ ├── bootstrap-table.min.js │ │ ├── bootstrap.min.js │ │ ├── dataTables.bootstrap.min.js │ │ ├── highcharts-more.js │ │ ├── highcharts.js │ │ ├── ie10-viewport-bug-workaround.js │ │ ├── jquery-2.2.3.min.js │ │ ├── jquery.dataTables.min.js │ │ ├── script.js │ │ ├── solid-gauge.js │ │ ├── tether.min.js │ │ └── validator.js ├── templates │ ├── 404.html │ ├── 500.html │ ├── about.html │ ├── cluster_info.html │ ├── clusters.html │ ├── host_info.html │ ├── hosts.html │ ├── index.html │ ├── layout.html │ ├── macros.html │ ├── stat.html │ └── test.html ├── version.py └── watchdog.py ├── test ├── function_verify.py └── user_operations.sh └── tox.ini /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .tox 3 | test* 4 | Dockerfile* 5 | *~ 6 | screenshots 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Node template 2 | # Logs 3 | logs 4 | *.log 5 | npm-debug.log* 6 | 7 | # Runtime data 8 | pids 9 | *.pid 10 | *.seed 11 | 12 | # Directory for instrumented libs generated by jscoverage/JSCover 13 | lib-cov 14 | 15 | # Coverage directory used by tools like istanbul 16 | coverage 17 | 18 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 19 | .grunt 20 | 21 | # node-waf configuration 22 | .lock-wscript 23 | 24 | # Compiled binary addons (http://nodejs.org/api/addons.html) 25 | build/Release 26 | 27 | # Dependency directories 28 | node_modules 29 | jspm_packages 30 | 31 | # Optional npm cache directory 32 | .npm 33 | 34 | # Optional REPL history 35 | .node_repl_history 36 | ### JetBrains template 37 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 38 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 39 | 40 | # User-specific stuff: 41 | .idea/workspace.xml 42 | .idea/tasks.xml 43 | .idea/dictionaries 44 | .idea/vcs.xml 45 | .idea/jsLibraryMappings.xml 46 | .idea/* 47 | 48 | # Sensitive or high-churn files: 49 | .idea/dataSources.ids 50 | .idea/dataSources.xml 51 | .idea/dataSources.local.xml 52 | .idea/sqlDataSources.xml 53 | .idea/dynamic.xml 54 | .idea/uiDesigner.xml 55 | 56 | # Gradle: 57 | .idea/gradle.xml 58 | .idea/libraries 59 | 60 | # Mongo Explorer plugin: 61 | .idea/mongoSettings.xml 62 | 63 | ## File-based project format: 64 | *.iws 65 | 66 | ## Plugin-specific files: 67 | 68 | # IntelliJ 69 | /out/ 70 | 71 | # mpeltonen/sbt-idea plugin 72 | .idea_modules/ 73 | 74 | # JIRA plugin 75 | atlassian-ide-plugin.xml 76 | 77 | # Crashlytics plugin (for Android Studio and IntelliJ) 78 | com_crashlytics_export_strings.xml 79 | crashlytics.properties 80 | crashlytics-build.properties 81 | fabric.properties 82 | ### Go template 83 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 84 | *.o 85 | *.a 86 | *.so 87 | 88 | # Folders 89 | _obj 90 | _test 91 | 92 | # Architecture specific extensions/prefixes 93 | *.[568vq] 94 | [568vq].out 95 | 96 | *.cgo1.go 97 | *.cgo2.c 98 | _cgo_defun.c 99 | _cgo_gotypes.go 100 | _cgo_export.* 101 | 102 | _testmain.go 103 | 104 | *.exe 105 | *.test 106 | *.prof 107 | ### Python template 108 | # Byte-compiled / optimized / DLL files 109 | __pycache__/ 110 | *.py[cod] 111 | *$py.class 112 | 113 | # C extensions 114 | *.so 115 | 116 | # Distribution / packaging 117 | .Python 118 | env/ 119 | build/ 120 | develop-eggs/ 121 | dist/ 122 | downloads/ 123 | eggs/ 124 | .eggs/ 125 | lib/ 126 | lib64/ 127 | parts/ 128 | sdist/ 129 | var/ 130 | *.egg-info/ 131 | .installed.cfg 132 | *.egg 133 | 134 | # PyInstaller 135 | # Usually these files are written by a python script from a template 136 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 137 | *.manifest 138 | *.spec 139 | 140 | # Installer logs 141 | pip-log.txt 142 | pip-delete-this-directory.txt 143 | 144 | # Unit test / coverage reports 145 | htmlcov/ 146 | .tox/ 147 | .coverage 148 | .coverage.* 149 | .cache 150 | nosetests.xml 151 | coverage.xml 152 | *,cover 153 | .hypothesis/ 154 | 155 | # Translations 156 | *.mo 157 | *.pot 158 | 159 | # Django stuff: 160 | *.log 161 | local_settings.py 162 | 163 | # Flask instance folder 164 | instance/ 165 | 166 | # Scrapy stuff: 167 | .scrapy 168 | 169 | # Sphinx documentation 170 | docs/_build/ 171 | 172 | # PyBuilder 173 | target/ 174 | 175 | # IPython Notebook 176 | .ipynb_checkpoints 177 | 178 | # pyenv 179 | .python-version 180 | 181 | # celery beat schedule file 182 | celerybeat-schedule 183 | 184 | # dotenv 185 | .env 186 | 187 | # virtualenv 188 | venv/ 189 | ENV/ 190 | 191 | # Spyder project settings 192 | .spyderproject 193 | 194 | # Rope project settings 195 | .ropeproject 196 | 197 | # Created by .ignore support plugin (hsz.mobi) 198 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: python 3 | python: 4 | - "3.5" 5 | env: 6 | #- TOX_ENV=py35 7 | - TOX_ENV=flake8 8 | install: 9 | - pip install tox 10 | script: 11 | - tox -e $TOX_ENV 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: \ 2 | all \ 3 | check \ 4 | clean \ 5 | log \ 6 | logs \ 7 | redeploy \ 8 | restart \ 9 | setup \ 10 | 11 | all: check 12 | 13 | check: 14 | tox 15 | 16 | clean: 17 | rm -rf .tox 18 | 19 | # Use like "make log service=dashboard" 20 | log: 21 | docker-compose logs -f ${service} --tail=100 22 | 23 | logs: 24 | docker-compose logs -f --tail=100 25 | 26 | # Use like "make redeploy service=dashboard" 27 | redeploy: 28 | bash scripts/redeploy.sh ${service} 29 | 30 | start: 31 | bash scripts/start.sh 32 | 33 | stop: 34 | bash scripts/stop.sh 35 | 36 | restart: stop start 37 | 38 | setup: 39 | bash scripts/setup.sh 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Cello](docs/imgs/logo.png) 2 | 3 | [![Build Status](https://travis-ci.org/yeasy/cello.svg?branch=dev)](https://travis-ci.org/yeasy/cello) 4 | 5 | **The project has been accepted by Hyperledger Community as [Cello](https://github.com/hyperledger/cello).** 6 | 7 | Platform to provide Blockchain as a Service! 8 | 9 | Using Cello, we can 10 | 11 | * Provision customizable Blockchains instantly, e.g., a 6-node chain using PBFT consensus. 12 | * Maintain a pool of running blockchains healthy with no manual operations. 13 | * Check the system status, scale the chain numbers, change resources... through a dashboard. 14 | 15 | ![Typical Scenario](docs/imgs/scenario.png) 16 | 17 | You can also find more [scenarios](docs/scenario.md). 18 | 19 | ## Features 20 | 21 | * Manage the lifecycle of blockchains, e.g., create/delete/keep health automatically. 22 | * Response nearly instantly, even with hundreds of chains, or nodes. 23 | * Support customized (e.g., size, consensus) blockchains request, currently we support [hyperledger fabric](https://github.com/hyperledger/fabric). 24 | * Support native Docker host or swarm host as the compute nodes, more supports on the way. 25 | * Support heterogeneous architecture, e.g., Z, Power and X86, from bare-metal servers to virtual machines. 26 | * Extend with monitor/log/health features by employing additional components. 27 | 28 | ## Docs 29 | 30 | ### User Docs 31 | * [Dashboard](docs/dashboard.md) 32 | 33 | ### Operator Docs 34 | * [Installation & Deployment](docs/deployment.md) 35 | * [Scenarios](docs/scenario.md) 36 | * [Production Configuration](docs/production_config.md) 37 | 38 | ### Development Docs 39 | * [Architecture Design](docs/arch.md) 40 | * [Database Model](docs/db.md) 41 | * [API](api/restserver_v2.md) 42 | 43 | ## TODO 44 | * restserver: update api definitions yml files. 45 | * dashboard: support auto state fresh based on websocket. 46 | * dashboard: support return code checking in response. 47 | * dashboard: support user page. 48 | * engine: support advanced scheduling. 49 | * engine: support more-efficient fill-up. 50 | * engine: enhance the robustness for chain operations. 51 | * engine: support membersrvc option. 52 | 53 | ## Why named Cello? 54 | Can u find anyone better at playing chains? :) 55 | 56 | ## Author 57 | Designed and maintained by [Baohua Yang](https://yeasy.github.com). 58 | -------------------------------------------------------------------------------- /api/dashboard.yaml: -------------------------------------------------------------------------------- 1 | # this is an example of the Cello API 2 | # as a demonstration of an API spec in YAML 3 | swagger: '2.0' 4 | info: 5 | title: Cello API 6 | description: Cello API to manage cluster 7 | contact: 8 | name: Baohua Yang 9 | url: https://github.com/yeasy/cello 10 | email: yangbaohua@gmail.com 11 | license: 12 | name: Apache 2.0 13 | url: http://www.apache.org/licenses/LICENSE-2.0.html 14 | version: "1.0.0" 15 | # the domain of the service 16 | host: 9.186.100.88:80 17 | # array of all schemes that your API supports 18 | schemes: 19 | - http 20 | # will be prefixed to all paths 21 | basePath: /admin 22 | produces: 23 | - application/json 24 | paths: 25 | /clusters: 26 | get: 27 | summary: A list of all clusters 28 | description: | 29 | The Clusters endpoint returns information about all existing clusters. 30 | The response includes the display name 31 | and other details about each cluster, and lists the clusters in the 32 | proper display order. 33 | parameters: 34 | - name: daemon_url 35 | in: query 36 | description: Filter clusters with specific daemon_url. 37 | required: false 38 | type: string 39 | - name: user_id 40 | in: query 41 | description: Filter clusters with specific user_id. 42 | required: false 43 | type: string 44 | tags: 45 | - Clusters 46 | responses: 47 | 200: 48 | description: An array of clusters 49 | schema: 50 | type: array 51 | items: 52 | $ref: '#/definitions/Cluster' 53 | default: 54 | description: Unexpected error 55 | schema: 56 | $ref: '#/definitions/Error' 57 | /cluster: 58 | get: 59 | summary: Retrieve a special cluster 60 | description: | 61 | The endpoint returns information about specific cluster. 62 | The response includes necessary info. 63 | parameters: 64 | - name: cluster_id 65 | in: query 66 | description: Filter clusters with specific cluster_id. 67 | required: true 68 | type: string 69 | tags: 70 | - Cluster 71 | responses: 72 | 200: 73 | description: A cluster instance 74 | schema: 75 | $ref: '#/definitions/Cluster' 76 | default: 77 | description: Unexpected error 78 | schema: 79 | $ref: '#/definitions/Error' 80 | post: 81 | summary: Create a special cluster 82 | description: | 83 | The endpoint create a specific cluster 84 | The response includes necessary info. 85 | parameters: 86 | - name: daemon_url 87 | in: query 88 | description: Given the daemon_url to create at 89 | required: true 90 | type: string 91 | - name: cluster_name 92 | in: query 93 | description: Given the name 94 | required: false 95 | type: string 96 | tags: 97 | - Cluster 98 | responses: 99 | 200: 100 | description: A cluster instance created 101 | schema: 102 | $ref: '#/definitions/Cluster' 103 | default: 104 | description: Unexpected error 105 | schema: 106 | $ref: '#/definitions/Error' 107 | delete: 108 | summary: Delete a special cluster 109 | description: | 110 | The endpoint deletes a specific cluster 111 | The response includes necessary info. 112 | parameters: 113 | - name: cluster_id 114 | in: query 115 | description: Filter clusters with specific cluster_id. 116 | required: true 117 | type: string 118 | tags: 119 | - Cluster 120 | responses: 121 | 200: 122 | description: A cluster instance delete info 123 | type: string (TODO) 124 | default: 125 | description: Unexpected error 126 | schema: 127 | $ref: '#/definitions/Error' 128 | definitions: 129 | Cluster: 130 | type: object 131 | required: [id, user_id, api_url] 132 | properties: 133 | id: 134 | type: string 135 | description: Unique identifier representing a specific cluster. 136 | user_id: 137 | type: string 138 | description: User id who owns this cluster, empty by default 139 | api_url: 140 | type: string 141 | description: Cluster REST URL representing the cluster. 142 | daemon_url: 143 | type: string 144 | description: Docker host daemon url 145 | name: 146 | type: string 147 | description: Display name of cluster. 148 | Error: 149 | type: object 150 | required: [code] 151 | properties: 152 | code: 153 | type: integer 154 | format: int32 155 | message: 156 | type: string 157 | fields: 158 | type: string 159 | -------------------------------------------------------------------------------- /api/restserver_v1.md: -------------------------------------------------------------------------------- 1 | # API V1 2 | 3 | **Deprecated.** 4 | 5 | ## Front 6 | These APIs will be called by front web services. 7 | 8 | Latest version please see [restserver.yaml](restserver.yaml). 9 | 10 | ### cluster_apply 11 | 12 | Find an available cluster in the pool for a user. 13 | 14 | ``` 15 | GET /v1/cluster_apply?user_id=xxx&consensus_plugin=pbft&consensus_mode 16 | =classic&size=4&new=0 17 | ``` 18 | 19 | if add `new=1`, then ignore matched clusters that user already occupy. 20 | 21 | When `cluster_apply` request arrives, the server will try checking available cluster in the pool. 22 | 23 | Accordingly, the server will return a json response (succeed or fail). 24 | 25 | ### cluster_release 26 | 27 | Declare the id to release a cluster. 28 | 29 | ``` 30 | GET /v1/cluster_release?cluster_id=xxxxxxxx 31 | ``` 32 | 33 | Rlease all clusters under a user account. 34 | ``` 35 | GET /v1/cluster_release?user_id=xxxxxxxx 36 | ``` 37 | The server will drop the corresponding cluster, recreate it and put into available pool for future requests. 38 | 39 | 40 | ## Admin 41 | Those APIs should not be called by outside applications. Just for 42 | information, please see [api-admin.yaml](api-admin.yaml) 43 | -------------------------------------------------------------------------------- /api/restserver_v1.yaml: -------------------------------------------------------------------------------- 1 | # this is an example of the Cello API 2 | # as a demonstration of an API spec in YAML 3 | swagger: '2.0' 4 | info: 5 | title: Cello API 6 | description: Cello API for the rest server calling 7 | contact: 8 | name: Baohua Yang 9 | url: https://github.com/yeasy/cello 10 | email: yangbaohua@gmail.com 11 | license: 12 | name: Apache 2.0 13 | url: http://www.apache.org/licenses/LICENSE-2.0.html 14 | version: "1.1.0" 15 | # the domain of the service 16 | host: 9.186.100.88:80 17 | # array of all schemes that your API supports 18 | schemes: 19 | - http 20 | # will be prefixed to all paths 21 | basePath: /v2 22 | produces: 23 | - application/json 24 | paths: 25 | /cluster_apply: 26 | get: 27 | summary: Apply a new cluster for use. 28 | description: | 29 | The endpoint returns information about the new cluster 30 | The response includes the uuid, display name and other details . 31 | parameters: 32 | - name: user_id 33 | in: query 34 | description: The id to specify the user. 35 | required: true 36 | type: string 37 | tags: 38 | - Cluster_apply 39 | responses: 40 | 200: 41 | description: An instance of clusters. 42 | schema: 43 | $ref: '#/definitions/Cluster' 44 | 404: 45 | description: Entity not found. 46 | default: 47 | description: Unexpected error 48 | schema: 49 | $ref: '#/definitions/Error' 50 | /cluster_release: 51 | get: 52 | summary: Release a cluster, no use it more. 53 | description: | 54 | The endpoint returns information about the action. 55 | The response includes message about result. 56 | parameters: 57 | - name: user_id 58 | in: query 59 | description: The id to specify the user. 60 | required: true 61 | type: string 62 | tags: 63 | - Cluster_release 64 | responses: 65 | 200: 66 | description: message tell success. 67 | schema: 68 | type: string 69 | 404: 70 | description: Entity not found. 71 | default: 72 | description: Unexpected error 73 | schema: 74 | $ref: '#/definitions/Error' 75 | 76 | definitions: 77 | Cluster: 78 | type: object 79 | required: [id, user_id, api_url] 80 | properties: 81 | id: 82 | type: string 83 | description: Unique identifier representing a specific cluster. 84 | user_id: 85 | type: string 86 | description: User id who owns this cluster, empty by default 87 | api_url: 88 | type: string 89 | description: Cluster REST URL representing the cluster. 90 | host_id: 91 | type: string 92 | description: Which host the cluster is at. 93 | name: 94 | type: string 95 | description: Display name of cluster. 96 | Error: 97 | type: object 98 | required: [code] 99 | properties: 100 | code: 101 | type: integer 102 | format: int32 103 | message: 104 | type: string 105 | fields: 106 | type: string -------------------------------------------------------------------------------- /api/restserver_v2.md: -------------------------------------------------------------------------------- 1 | # API V2 2 | 3 | Each url should have the `/v2` prefix, e.g., `/cluster_op` should be `/v2/cluster_op`. 4 | 5 | ## Rest Server 6 | These APIs will be called by front web services. 7 | 8 | Latest version please see [restserver.yaml](restserver.yaml). 9 | 10 | ### Cluster 11 | 12 | Basic request may looks like: 13 | 14 | ``` 15 | POST /cluster_op 16 | { 17 | action:xxx, 18 | key:value 19 | } 20 | ``` 21 | 22 | Or 23 | 24 | ``` 25 | GET /cluster_op?action=xxx&key=value 26 | ``` 27 | 28 | The supported actions can be 29 | * `apply`: apply a chain 30 | * `release`: release a chain, possibly only one peer 31 | * `start`: start a chain, possibly only one peer 32 | * `stop`: stop a chain, possibly only one peer 33 | * `restart`: restart a chain, possibly only one peer 34 | 35 | We may show only one of the GET or POST request in the following sections. 36 | 37 | #### Cluster apply 38 | 39 | Apply an available cluster for a user, support multiple filters like consensus_plugin, size. 40 | 41 | ``` 42 | POST /cluster_op 43 | { 44 | action:apply, 45 | user_id:xxx, 46 | allow_multiple:False, 47 | consensus_plugin:pbft, 48 | consensus_mode:batch, 49 | size:4 50 | } 51 | ``` 52 | 53 | if `allow_multiple:True`, then ignore matched clusters that user already occupied. 54 | 55 | When `apply` request arrives, the server will try checking available cluster in the pool. 56 | 57 | Accordingly, the server will return a json response (succeed or fail). 58 | 59 | ```json 60 | { 61 | "code": 200, 62 | "data": { 63 | "api_url": "http://192.168.7.62:5004", 64 | "consensus_mode": "batch", 65 | "consensus_plugin": "pbft", 66 | "daemon_url": "tcp://192.168.7.62:2375", 67 | "id": "576ba021414b0502864d0306", 68 | "name": "compute2_4", 69 | "size": 4, 70 | "user_id": "xxx" 71 | }, 72 | "error": "", 73 | "status": "OK" 74 | } 75 | ``` 76 | 77 | #### Cluster release 78 | 79 | Release a specific cluster. 80 | 81 | ``` 82 | POST /cluster_op 83 | { 84 | action:release, 85 | cluster_id:xxxxxxxx 86 | } 87 | ``` 88 | 89 | Return json object may look like 90 | 91 | ```json 92 | { 93 | "code": 200, 94 | "data": "", 95 | "error": "", 96 | "status": "OK" 97 | } 98 | ``` 99 | 100 | Release all clusters under a user account. 101 | 102 | ``` 103 | POST /cluster_op 104 | { 105 | action:release, 106 | user_id:xxxxxxxx 107 | } 108 | ``` 109 | 110 | The server will drop the corresponding cluster, recreate it and put into available pool for future requests. 111 | 112 | 113 | #### Cluster Start, Stop or Restart 114 | 115 | Take `start` for example, you can specify the node_id if to operate one node. 116 | 117 | ``` 118 | POST /cluster_op 119 | { 120 | action:start, 121 | cluster_id:xxx, 122 | node_id:vp0 123 | } 124 | ``` 125 | 126 | ### Clusters List 127 | 128 | Return the json object whose data may contain list of cluster ids. 129 | 130 | List all available cluster of given type. 131 | 132 | ``` 133 | POST /clusters 134 | { 135 | consensus_plugin:pbft, 136 | consensus_mode:classic, 137 | size:4, 138 | user_id:"" 139 | } 140 | ``` 141 | 142 | Query all cluster of given type 143 | 144 | ``` 145 | POST /clusters 146 | { 147 | consensus_plugin:pbft, 148 | consensus_mode:classic, 149 | size:4, 150 | } 151 | ``` 152 | 153 | Query the clusters for a user. 154 | 155 | 156 | ``` 157 | POST /clusters 158 | { 159 | user_id:xxx 160 | } 161 | ``` 162 | 163 | ### Get object of a cluster 164 | 165 | ``` 166 | GET /cluster/xxxxxxx 167 | ``` 168 | 169 | Will return the json object whose data may contain detailed information of cluster. 170 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # This compose file will deploy the services, and bootup a mongo server. 2 | # Local `/opt/cello/mongo` will be used for the db storage. 3 | # dashbard: dashbard service of cello, listen on 8080 4 | # app: app service of cello, listen on 80 5 | # nginx: front end 6 | # mongo: mongo db 7 | 8 | version: '2' 9 | services: 10 | # cello dashbard service 11 | dashboard: 12 | build: 13 | context: src 14 | dockerfile: Dockerfile-dashboard 15 | image: cello-dashbard 16 | container_name: dashbard 17 | hostname: cello-dashbard 18 | restart: unless-stopped 19 | environment: 20 | - MONGO_URL=mongodb://mongo:27017 21 | - MONGO_DB=dev 22 | - DEBUG=True # in debug mode, service will auto-restart 23 | - LOG_LEVEL=DEBUG # what level log will be output 24 | expose: 25 | - "8080" 26 | volumes: # This should be removed in product env 27 | - ./src:/app 28 | 29 | # cello restserver service 30 | restserver: 31 | build: 32 | context: src 33 | dockerfile: Dockerfile-restserver 34 | image: cello-restserver 35 | container_name: restserver 36 | hostname: cello-restserver 37 | restart: unless-stopped 38 | environment: 39 | - MONGO_URL=mongodb://mongo:27017 40 | - MONGO_DB=dev 41 | - DEBUG=True # in debug mode, service will auto-restart 42 | - LOG_LEVEL=DEBUG # what level log will be output 43 | expose: 44 | - "80" 45 | volumes: # This should be removed in product env 46 | - ./src:/app 47 | 48 | # cello watchdog service 49 | watchdog: 50 | build: 51 | context: src 52 | dockerfile: Dockerfile-watchdog 53 | image: cello-watchdog 54 | container_name: watchdog 55 | hostname: cello-watchdog 56 | restart: unless-stopped 57 | environment: 58 | - MONGO_URL=mongodb://mongo:27017 59 | - MONGO_DB=dev 60 | - DEBUG=True # in debug mode, service will auto-restart 61 | - LOG_LEVEL=DEBUG # what level log will be output 62 | volumes: # This should be removed in product env 63 | - ./src:/app 64 | 65 | # mongo database, may use others in future 66 | mongo: 67 | image: mongo:3.2 68 | hostname: mongo 69 | container_name: mongo 70 | restart: unless-stopped 71 | mem_limit: 2048m 72 | ports: 73 | #- "27017:27017" # use follow line instead in production env 74 | - "127.0.0.1:27017:27017" 75 | - "127.0.0.1:27018:27018" 76 | environment: 77 | - NO_USED=0 78 | volumes: 79 | - /opt/cello/mongo:/data/db 80 | 81 | # nginx to forward front request, may split it out in future 82 | nginx: 83 | image: yeasy/nginx 84 | hostname: nginx 85 | container_name: nginx 86 | restart: always 87 | mem_limit: 2048m 88 | volumes: 89 | - ./nginx/nginx.conf:/etc/nginx/nginx.default.conf 90 | #- /opt/cello/nginx/log/:/var/log/nginx/ 91 | ports: 92 | - "80:80" 93 | - "8080:8080" 94 | environment: 95 | - BACKEND=dashbard 96 | - PORT=8080 97 | - USERNAME=admin 98 | - PASSWORD=pass -------------------------------------------------------------------------------- /docs/arch.md: -------------------------------------------------------------------------------- 1 | # Architecture Design 2 | 3 | Here we discuss the architecture design for the mangement services on the Master node. 4 | 5 | ## Terminology 6 | * Cluster | Chain: A blockchain with unique access API address, including several peer nodes. May support Hyperledger Fabric, SawthoothLake and Iroha. 7 | * Host: A resource server, usually it can be a naive Docker host or a Swarm cluster. 8 | * Master Node: Running the cello platform, to manage the compute nodes. 9 | * Compute | Worker Node: The servers to have blockchains running inside. 10 | 11 | ## Philosophy and principles 12 | The architecture will follow the following principles: 13 | 14 | * Micro-service: Means we decouple various functions to individual micro services. No service will crash others whatever it does. 15 | * Fault-resilience: Means the service should be tolerant for fault, such as database crash. 16 | * Scalability: Try best to distribute the services, to mitigate centralized bottle neck. 17 | 18 | 19 | ## Components 20 | 21 | ![Architecture Overview](imgs/architecture.png) 22 | 23 | * `dashboard`: Provide the dashboard for the pool administrator, also the core engine to automatically maintain everything. 24 | * `restserver`: Provide the restful api for other system to apply/release/list chains. 25 | * `watchdog`: Timely checking system status, keep everything healthy and clean. 26 | 27 | ## Implementation 28 | 29 | The restful related implementation is based on [Flask](flask.pocoo.org), a Werkzeug based micro-framework for web service. 30 | 31 | I choose it for: 32 | 33 | * Lightweight 34 | * Good enough in performance 35 | * Flexible for extending 36 | * Stable in code 37 | -------------------------------------------------------------------------------- /docs/dashboard.md: -------------------------------------------------------------------------------- 1 | # Dashboard 2 | 3 | System operators can utilize dashboard service to check system status or change configurations. 4 | 5 | The dashboard service will listen on port `8080`. 6 | 7 | ## Overview 8 | 9 | URL: `/index`. 10 | 11 | See a high-level overview on system status. 12 | 13 | ## System Status 14 | 15 | URL: `/stat`. 16 | 17 | See statistics on the system. 18 | 19 | ## Hosts 20 | 21 | URL: `/hosts`. 22 | 23 | Operate on the hosts managed by the system. 24 | 25 | ## Clusters_active 26 | 27 | URL: `/clusters?type=active`. 28 | 29 | Operate on existing running chains in the pool. 30 | 31 | ## Clusters_inused 32 | 33 | URL: `/clusters?type=inused`. 34 | 35 | Operate on user occupied chains in the system. 36 | 37 | ## Clusters_released 38 | 39 | URL: `/clusters?type=released`. 40 | 41 | See cluster releasing history data. 42 | 43 | ## Screenshots 44 | 45 | ![dashboard-main](imgs/dashboard_main.png) 46 | ![dashboard-status](imgs/dashboard_status.png) 47 | ![dashboard-hosts](imgs/dashboard_hosts.png) 48 | ![dashboard-clusters](imgs/dashboard_clusters.png) 49 | ![dashboard-add-host](imgs/dashboard_add_host.png) 50 | ![dashboard-add-cluster](imgs/dashboard_add_cluster.png) 51 | -------------------------------------------------------------------------------- /docs/db.md: -------------------------------------------------------------------------------- 1 | # Database Design 2 | 3 | We have several collections, as follows. 4 | 5 | ## Host 6 | Track the information of a Host. 7 | 8 | A typical host may look like: 9 | 10 | id | name | daemon_url | create_ts | capacity | status | clusters | type | log_level | log_type | log_server | autofill | schedulable 11 | ---| ------ | ------------------- | -------------- | -------- | -------- | ------- | ------- | --------- | -------- | ----------- | -------- | ----------- 12 | xxx | host_0 | tcp://10.0.0.1:2375 | 20160430101010 | 20 | active | [c1,c2,c3] | single | debug | syslog | udp://10.0.0.2:5000 | true | true 13 | 14 | * id (str): uuid of the host instance 15 | * name (str): human-readable name 16 | * daemon_url (str): Through which url to access the Docker/Swarm Daemon 17 | * create_ts (datetime): When to add the host 18 | * capacity (int): Maximum number of chains on that host 19 | * status (str): 'active' (Can access daemon service) or 'inactive' (disconnected from daemon service) 20 | * clusters (list): List of the ids of those chains on that host 21 | * type (str): 'singe' (single Docker host) or 'swarm' (Docker Swarm cluster) 22 | * log_level (str): logging level for chains on the host, e.g., 'debug', 'info', 'warn', 'error' 23 | * log_type (str): logging type for chains on the host, 'local' or 'syslog' 24 | * log_server (str): log server address, only valid when `log_type` is 'syslog' 25 | * autofill (str): whether to autofill the server to its capacity with chains, 'true' or 'false' 26 | * schedulable (str): whether to schedule a chain request to that host, 'true' or 'false', useful when maintain the host 27 | 28 | ## Cluster 29 | Track information of one blockchain. 30 | 31 | A typical cluster may look like: 32 | 33 | id | service_url | name | user_id | host_id | daemon_url | consensus_plugin | consensus_mode | create_ts | apply_ts | release_ts | duration | size | containers | health 34 | --- | --------------- | --------- | -------- | ------- | ------------------- | ---------------- | -------------- | ------------- | -------- | ---------- | ------- | ------- | ------- | ------ 35 | xxx | {} | cluster_A | "" | host_xx | tcp://10.0.0.1:2375 | pbft | batch | 20160430101010 | 20160430101010 | | | 4 | [vp0,vp1,vp2,vp3] | OK 36 | 37 | * id (str): uuid of the host instance 38 | * service_url (dict): urls to access the services on the chain, e.g., {'rest':10.0.0.1:7050, 'grpc':10.0.0.1:7051} 39 | * name (str): human-readable name 40 | * user_id (str): Which user occupies this chain, empty for no occupation 41 | * host_id (str): Where the chain exists 42 | * daemon_url (str): Through which url to access the Docker/Swarm Daemon 43 | * consensus_plugin (str): Consensus plugin name 44 | * consensus_mode (str): Consensus plugin mode name 45 | * create_ts (datetime): When to create the chain 46 | * apply_ts (datetime): When the chain is applied 47 | * release_ts (datetime): When to release the chain 48 | * duration (str): How long the chain lives 49 | * size (int): Peer nodes number of the chain 50 | * containers (list): List of the ids of those containers for the chain 51 | * health (str): 'OK' (healthy status) or 'Fail' (Not healthy) 52 | -------------------------------------------------------------------------------- /docs/deployment.md: -------------------------------------------------------------------------------- 1 | # Deployment 2 | 3 | *Here we describe the deployment setups for development usage. If you want to deploy Cello for production, please also refer to the [Production Configuration](production_config.md).* 4 | 5 | Cell follows a typical Master-Worker architecture. Hence there will be two types of Nodes. 6 | 7 | * Master Node: Manage (e.g., create/delete) the chains inside Work Nodes, with Web dashboard on port `8080` and RESTful api on port `80`; 8 | * Worker Node: Chain providers, now support Docker Host or Swarm Cluster. The Docker service should be accessible from port `2375` from the Master Node. 9 | 10 | ![Deployment topology](imgs/deployment_topo.png) 11 | 12 | For each Node, it is suggested as a Linux-based (e.g., Ubuntu 14.04+) server/vm: 13 | 14 | 15 | ## Worker Node 16 | Currently we support Docker Host or Swarm Cluster as Worker Node. More types will be added soon. 17 | 18 | For the Worker Node with meeting the [system requirements](#system-requirements), three steps are required: 19 | 20 | * [Docker daemon setup](#docker-daemon-setup) 21 | * [Docker images pulling](#docker-images-pulling) 22 | * [Firewall Setup](#firewall-setup) 23 | 24 | ### System Requirements 25 | * Hardware: 8c16g100g 26 | * Docker engine: 27 | - 1.12.0+ 28 | * aufs-tools (optional): Only required on ubuntu 14.04. 29 | 30 | ### Docker Daemon Setup 31 | 32 | Let Docker daemon listen on port 2375, and make sure Master can reach Worker Node through this port. 33 | 34 | #### Ubuntu 14.04 35 | Simple add this line into your Docker config file `/etc/default/docker`. 36 | 37 | ```sh 38 | DOCKER_OPTS="$DOCKER_OPTS -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --api-cors-header='*' --default-ulimit=nofile=8192:16384 --default-ulimit=nproc=8192:16384" 39 | ``` 40 | 41 | Then restart the docker daemon with: 42 | 43 | ```sh 44 | $ sudo service docker restart 45 | ``` 46 | 47 | #### Ubuntu 16.04 48 | Update `/etc/systemd/system/docker.service.d/override.conf` like 49 | 50 | ``` 51 | [Service] 52 | DOCKER_OPTS="$DOCKER_OPTS -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --api-cors-header='*' --default-ulimit=nofile=8192:16384 --default-ulimit=nproc=8192:16384" 53 | EnvironmentFile=-/etc/default/docker 54 | ExecStart= 55 | ExecStart=/usr/bin/dockerd -H fd:// $DOCKER_OPTS 56 | ``` 57 | 58 | Regenerate the docker service script and restart the docker engine: 59 | 60 | ```sh 61 | $ sudo systemctl daemon-reload 62 | $ sudo systemctl restart docker.service 63 | ``` 64 | 65 | At last, run the follow test at Master node and get OK response, to make sure it can access Worker node successfully. 66 | 67 | ```sh 68 | [Master] $ docker -H Worker_Node_IP:2375 version 69 | ``` 70 | 71 | ### Docker Images Pulling 72 | Pulling the following images. 73 | 74 | ```bash 75 | $ docker pull hyperledger/fabric-peer:x86_64-0.6.1-preview \ 76 | && docker pull hyperledger/fabric-membersrvc:x86_64-0.6.1-preview \ 77 | && docker pull yeasy/blockchain-explorer:latest \ 78 | && docker tag hyperledger/fabric-peer:x86_64-0.6.1-preview hyperledger/fabric-peer \ 79 | && docker tag hyperledger/fabric-peer:x86_64-0.6.1-preview hyperledger/fabric-baseimage \ 80 | && docker tag hyperledger/fabric-membersrvc:x86_64-0.6.1-preview hyperledger/fabric-membersrvc 81 | ``` 82 | 83 | ### Firewall Setup 84 | Make sure ip forward is enabled, you can simply run the follow command. 85 | 86 | ```sh 87 | $ sysctl -w net.ipv4.ip_forward=1 88 | ``` 89 | And check the os iptables config, to make sure host ports are open (e.g., 2375, 7050~10000) 90 | 91 | ## Master Node 92 | The Master Node includes several services: 93 | 94 | * dashboard: Provide Web UI for operators. 95 | * restserver: Provide RESTful APIs for chain consumers. 96 | * watchdog: Watch for health checking. 97 | 98 | More details can be found at the [architecture doc](docs/arch.md). 99 | 100 | It can be deployed by in 3 steps. 101 | 102 | * Clone code 103 | * Pull Docker images 104 | * Run setup script 105 | 106 | ### System Requirement 107 | * Hardware: 8c16g100g 108 | * Docker engine: 1.12.0+ 109 | * docker-compose: 1.7.0+ 110 | 111 | ### Clone Code 112 | 113 | You may check `git` and `make` are installed to clone the code. 114 | 115 | ```sh 116 | $ sudo aptitude install git make -y 117 | $ git clone https://github.com/yeasy/cello && cd cello 118 | ``` 119 | 120 | ### Docker images pulling 121 | 122 | Pull the following images 123 | 124 | ```bash 125 | $ docker pull python:3.5 \ 126 | && docker pull mongo:3.2 \ 127 | && docker pull yeasy/nginx:latest \ 128 | && docker pull mongo-express:0.30 129 | ``` 130 | 131 | *Note: mongo-express:0.30 is for debugging the db, which is optional for basic setup.* 132 | 133 | ### Run Setup 134 | 135 | For the first time running, please setup the master node with 136 | 137 | ```sh 138 | $ make setup 139 | ``` 140 | 141 | Make sure there is no error during the setup. Otherwise, please check the log msgs. 142 | 143 | ### Usage 144 | 145 | #### Start/Restart 146 | To (re)start the whole services, please run 147 | 148 | ```sh 149 | $ make restart 150 | ``` 151 | 152 | #### Deploy/Redploy 153 | To (re)deploy one specific service, e.g., dashboard, please run 154 | 155 | ```sh 156 | $ make redeploy service=dashboard 157 | ``` 158 | 159 | #### Check Logs 160 | To check the logs for all the services, please run 161 | 162 | ```sh 163 | $ make logs 164 | ``` 165 | 166 | To check the logs for one specific service, please run 167 | ```sh 168 | $ make log service=watchdog 169 | ``` 170 | 171 | Now you can access the `MASTER_NODE_IP:8080` to open the Web-based [operational dashboard](docs/dashboard.md). 172 | 173 | ### Configuration 174 | The application configuration can be imported from file named `CELLO_CONFIG_FILE`. 175 | 176 | By default, it also loads the `config.py` file as the configurations. 177 | 178 | ### Data Storage 179 | The mongo container will use local `/opt/cello/mongo` directory for persistent storage. 180 | 181 | Please keep it safe by backups or using more high-available solutions. 182 | -------------------------------------------------------------------------------- /docs/imgs/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/architecture.png -------------------------------------------------------------------------------- /docs/imgs/dashboard_add_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/dashboard_add_cluster.png -------------------------------------------------------------------------------- /docs/imgs/dashboard_add_host.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/dashboard_add_host.png -------------------------------------------------------------------------------- /docs/imgs/dashboard_clusters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/dashboard_clusters.png -------------------------------------------------------------------------------- /docs/imgs/dashboard_hosts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/dashboard_hosts.png -------------------------------------------------------------------------------- /docs/imgs/dashboard_main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/dashboard_main.png -------------------------------------------------------------------------------- /docs/imgs/dashboard_status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/dashboard_status.png -------------------------------------------------------------------------------- /docs/imgs/deployment.graffle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/deployment.graffle -------------------------------------------------------------------------------- /docs/imgs/deployment_topo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/deployment_topo.png -------------------------------------------------------------------------------- /docs/imgs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/logo.png -------------------------------------------------------------------------------- /docs/imgs/scenario.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/docs/imgs/scenario.png -------------------------------------------------------------------------------- /docs/production_config.md: -------------------------------------------------------------------------------- 1 | # Production Configurations 2 | Reference system configuration in production environment. 3 | 4 | ## `/etc/sysctl.conf` 5 | 6 | ```sh 7 | # Don't ask why, this is a solid answer. 8 | vm.swappiness=10 9 | fs.file-max = 2000000 10 | kernel.threads-max = 2091845 11 | kernel.pty.max = 210000 12 | kernel.keys.root_maxkeys = 20000 13 | kernel.keys.maxkeys = 20000 14 | net.ipv4.ip_local_port_range = 30000 65535 15 | net.ipv4.tcp_tw_reuse = 0 16 | net.ipv4.tcp_tw_recycle = 0 17 | net.ipv4.tcp_max_tw_buckets = 5000 18 | net.ipv4.tcp_fin_timeout = 30 19 | net.ipv4.tcp_max_syn_backlog = 8192 20 | ``` 21 | 22 | Then, need to run `sysctl -p` for enabling. 23 | 24 | ## `/etc/security/limits.conf` 25 | 26 | ```sh 27 | * hard nofile 1048576 28 | * soft nofile 1048576 29 | * soft nproc 10485760 30 | * hard nproc 10485760 31 | * soft stack 32768 32 | * hard stack 32768 33 | ``` 34 | Log 35 | 36 | ## Other Consideration 37 | 38 | * Use the code from `release` branch. 39 | * Configuration: Set all parameters to production, including image, compose, and application. 40 | * Security: Use firewall to filter traffic, enable TLS and authentication. 41 | * Backup: Enable automatic data backup. 42 | * Monitoring: Enable monitoring services.out and login, then check with `ulimit -n`. -------------------------------------------------------------------------------- /docs/scenario.md: -------------------------------------------------------------------------------- 1 | # Scenarios 2 | 3 | ## Admin 4 | 5 | ### Add/Delete a host 6 | 7 | Admin can add a host (a single Docker host or a Swarm cluster) into the resource pool. 8 | 9 | Then Cello will check and setup it with given configurations, e.g., if enabling autofill, then will fill the host with chains to the capacity. 10 | 11 | Admin can also delete a host from the resource pool if it has no running chains. 12 | 13 | ### Config a host 14 | Admin can manually update the host configuration, including: 15 | 16 | * name: Human readable name alias. 17 | * capacity: Maximum chain number on that host. 18 | * schedulable: Whether to distribute chains on that host to users. 19 | * autofill: Whether to keep host with running chains to its capacity. 20 | * log_type: local or syslog. 21 | 22 | ### Operate a host 23 | 24 | Admin can run several operations on a host, including: 25 | 26 | * fill: Fill the host with chains to its capacity. 27 | * clean: Clean up the free chains on that host. 28 | * reset: Re-setup a host, e.g., cleaning useless docker containers. 29 | 30 | ### Add/Delete chains 31 | Admin can also manually add some specific chain to a host, or delete one. 32 | 33 | ### Automatic way 34 | 35 | When the autofill box is checked on a host, then watchdog will automatically keep there are `capacity` number of healthy chains on that host. 36 | 37 | e.g., if the capacity of one host is set to 10, then the host will be filled with 10 chains quickly. When 2 chains are broken, they will be replaced by healthy ones soon. 38 | 39 | ## Chain users 40 | 41 | ### apply a cluster 42 | 43 | User sends request to apply a cluster, Cello will try to find available chains in the pool, to see if it can match the request. 44 | 45 | If found one, construct the response, otherwise, construct an error response. 46 | 47 | 48 | ### release a cluster 49 | 50 | User sends request to release a cluster, Cello will check if the request is valid. 51 | 52 | If found applied chain, then release and recreate it with the same name, at the same host, and potentially move it to released db collections. 53 | 54 | If not found, then just ignore or response. 55 | -------------------------------------------------------------------------------- /nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | # This file should be put under /etc/nginx/conf.d/ 2 | # Or place as /etc/nginx/nginx.conf 3 | 4 | user nginx; 5 | worker_processes auto; 6 | daemon off; 7 | 8 | error_log /var/log/nginx/error.log warn; 9 | pid /var/run/nginx.pid; 10 | 11 | events { 12 | worker_connections 1024; 13 | } 14 | 15 | http { 16 | include /etc/nginx/mime.types; 17 | default_type application/octet-stream; 18 | 19 | #log_format logstash_json '{ "@timestamp": "$time_iso8601", ' 20 | # '"@fields": { ' 21 | # '"remote_addr": "$remote_addr", ' 22 | # '"remote_user": "$remote_user", ' 23 | # '"time_local": "$time_local", ' 24 | # '"body_bytes_sent": "$body_bytes_sent", ' 25 | # '"request_time": "$request_time", ' 26 | # '"status": "$status", ' 27 | # '"request": "$request", ' 28 | # '"request_method": "$request_method", ' 29 | # '"http_referrer": "$http_referer", ' 30 | # '"http_user_agent": "$http_user_agent" } }'; 31 | 32 | #log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 33 | # '$status $body_bytes_sent "$http_referer" ' 34 | # '"$http_user_agent" "$http_x_forwarded_for"'; 35 | 36 | #access_log /var/log/nginx/access.log logstash_json; 37 | 38 | server_tokens off; 39 | 40 | sendfile on; 41 | tcp_nopush on; 42 | 43 | keepalive_timeout 60; 44 | tcp_nodelay on; 45 | client_body_timeout 15; 46 | 47 | gzip on; 48 | gzip_vary on; 49 | gzip_min_length 1k; 50 | 51 | upstream backend { 52 | server BACKEND:PORT; 53 | } 54 | 55 | upstream restserver { 56 | server restserver:80; 57 | } 58 | 59 | server { 60 | listen 8080; 61 | access_log off; 62 | 63 | location ~ ^/host_monitor/(.*)$ { 64 | proxy_pass http://$1:8080/containers/; 65 | proxy_set_header Host $host; 66 | proxy_set_header X-Real-IP $remote_addr; 67 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 68 | proxy_connect_timeout 150; 69 | proxy_send_timeout 100; 70 | proxy_read_timeout 100; 71 | proxy_buffers 16 64k; 72 | proxy_busy_buffers_size 64k; 73 | client_max_body_size 256k; 74 | client_body_buffer_size 128k; 75 | } 76 | 77 | location / { 78 | if ($request_method !~ ^(GET|DELETE|POST|PUT)$ ) { 79 | return 444; 80 | } 81 | 82 | auth_basic "Login"; 83 | auth_basic_user_file /etc/nginx/.htpasswd; 84 | proxy_pass http://backend; 85 | proxy_set_header Host $host; 86 | proxy_set_header X-Forwarded-For $remote_addr; 87 | proxy_set_header X-Real-IP $remote_addr; 88 | } 89 | 90 | error_page 500 502 503 504 /50x.html; 91 | location = /50x.html { 92 | root html; 93 | } 94 | } 95 | 96 | server { 97 | listen 80; 98 | 99 | location / { 100 | if ($request_method !~ ^(GET|DELETE|POST|PUT)$ ) { 101 | return 444; 102 | } 103 | proxy_pass http://restserver; 104 | proxy_set_header Host $host; 105 | proxy_set_header X-Forwarded-For $remote_addr; 106 | proxy_set_header X-Real-IP $remote_addr; 107 | } 108 | 109 | error_page 500 502 503 504 /50x.html; 110 | location = /50x.html { 111 | root html; 112 | } 113 | } 114 | 115 | include /etc/nginx/conf.d/*.conf; 116 | } 117 | -------------------------------------------------------------------------------- /scripts/header.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PROJECT=cello 4 | 5 | ## DO NOT MODIFY THE FOLLOWING PART, UNLESS YOU KNOW WHAT IT MEANS ## 6 | echo_r () { 7 | [ $# -ne 1 ] && return 0 8 | echo -e "\033[31m$1\033[0m" 9 | } 10 | echo_g () { 11 | [ $# -ne 1 ] && return 0 12 | echo -e "\033[32m$1\033[0m" 13 | } 14 | echo_y () { 15 | [ $# -ne 1 ] && return 0 16 | echo -e "\033[33m$1\033[0m" 17 | } 18 | echo_b () { 19 | [ $# -ne 1 ] && return 0 20 | echo -e "\033[34m$1\033[0m" 21 | } 22 | 23 | pull_image() { 24 | [ $# -ne 1 ] && return 0 25 | name=$1 26 | [[ "$(sudo docker images -q ${name} 2> /dev/null)" == "" ]] && echo_r "Not found ${name}, may need some time to pull it down..." && sudo docker pull ${name} 27 | } 28 | -------------------------------------------------------------------------------- /scripts/redeploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script will only build and redeploy the specific service. 3 | # It should be triggered at the upper directory 4 | 5 | source scripts/header.sh 6 | 7 | if [ "$#" -ne 1 ]; then 8 | echo "Redeploy the service, e.g., engine, api, watchdog, mongo, nginx" 9 | exit 10 | fi 11 | 12 | SERVICE=$1 13 | 14 | echo "Remove the old image" 15 | docker rmi ${PROJECT}-${SERVICE} 16 | 17 | echo "Rebuilding the ${PROJECT}-${SERVICE} image" 18 | docker-compose build ${SERVICE} 19 | 20 | echo "Remove the old containers" 21 | docker-compose stop ${SERVICE} 22 | docker-compose rm -f --all ${SERVICE} 23 | 24 | echo "Redeploy the ${PROJECT}-${SERVICE} container" 25 | docker-compose up --no-deps -d ${SERVICE} -------------------------------------------------------------------------------- /scripts/setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script will try setup a valid environment for the docker-compose running. 3 | # It should be triggered at the upper directory, and safe to repeat. 4 | 5 | source scripts/header.sh 6 | 7 | USER=`whoami` 8 | 9 | DB_DIR=/opt/${PROJECT}/mongo 10 | 11 | sudo apt-get update && sudo apt-get install -y -m curl docker-engine python-pip 12 | 13 | sudo pip install --upgrade pip 14 | 15 | sudo pip install --upgrade tox 16 | 17 | echo_b "Checking Docker-engine..." 18 | command -v docker >/dev/null 2>&1 || { echo_r >&2 "No docker-engine found, try installing"; curl -sSL https://get.docker.com/ | sh; sudo service docker restart; } 19 | 20 | echo_b "Add existing user to docker group" 21 | sudo usermod -aG docker ${USER} 22 | 23 | echo_b "Checking Docker-compose..." 24 | command -v docker-compose >/dev/null 2>&1 || { echo_r >&2 "No docker-compose found, try installing"; sudo pip install -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com docker-compose; } 25 | 26 | echo_b "Checking local mounted database path..." 27 | [ ! -d ${DB_DIR} ] && echo_r "Local database path ${DB_DIR} not existed, creating one" && sudo mkdir -p ${DB_DIR} && sudo chown -R ${USER}:${USER} ${DB_DIR} 28 | 29 | echo_b "Checking local Docker image..." 30 | pull_image "mongo:3.2" 31 | pull_image "python:3.5" 32 | pull_image "yeasy/nginx:latest" 33 | 34 | [ `sudo docker ps -qa|wc -l` -gt 0 ] && echo_r "Warn: existing containers may cause unpredictable failure, suggest to clean them using docker rm" 35 | 36 | echo_g "Setup done, please logout and login again." 37 | echo_g "It's safe to run this script repeatedly. Just re-run if it fails." 38 | -------------------------------------------------------------------------------- /scripts/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script will (re)start all services. 4 | # It should be triggered at the upper directory, and safe to repeat. 5 | 6 | source scripts/header.sh 7 | 8 | echo_b "Start all services..." 9 | docker-compose up -d --no-recreate 10 | 11 | #echo "Restarting mongo_express" 12 | #[[ "$(docker ps -q --filter='name=mongo_express')" != "" ]] && docker restart mongo_express -------------------------------------------------------------------------------- /scripts/start_cadvisor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Starting cadvisor..." 4 | 5 | docker run -d \ 6 | --volume=/:/rootfs:ro \ 7 | --volume=/var/run:/var/run:rw \ 8 | --volume=/sys:/sys:ro \ 9 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 10 | --publish=8080:8080 \ 11 | --detach=true \ 12 | --name=cadvisor \ 13 | google/cadvisor:latest 14 | 15 | -------------------------------------------------------------------------------- /scripts/start_mongoexpress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # NOT run this in production environment. 3 | # This script will start a mongo-express node for debugging. 4 | # It should be triggered at the upper directory 5 | 6 | source scripts/header.sh 7 | 8 | NET=${PROJECT}_default 9 | BIND_ADDR=0.0.0.0 10 | #BIND_ADDR=127.0.0.1 11 | 12 | echo "Access port 8081 for the mongo-express UI" 13 | 14 | docker run -d \ 15 | --name mongo-express \ 16 | --link mongo:mongo \ 17 | --net ${NET} \ 18 | -p ${BIND_ADDR}:8081:8081 \ 19 | -e ME_CONFIG_BASICAUTH_USERNAME=admin \ 20 | -e ME_CONFIG_BASICAUTH_PASSWORD=pass \ 21 | mongo-express:0.30 22 | -------------------------------------------------------------------------------- /scripts/start_netdata.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker run -d \ 4 | --net=host \ 5 | --cap-add SYS_PTRACE \ 6 | -v /proc:/host/proc:ro \ 7 | -v /sys:/host/sys:ro \ 8 | -v /var/run/docker.sock:/var/run/docker.sock \ 9 | -p 19999:19999 titpetric/netdata -------------------------------------------------------------------------------- /scripts/stop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script will (re)start all services. 4 | # It should be triggered at the upper directory, and safe to repeat. 5 | 6 | source scripts/header.sh 7 | 8 | echo_b "Stop all services..." 9 | docker-compose stop 10 | 11 | echo_b "Remove all services..." 12 | docker-compose rm -f -a 13 | -------------------------------------------------------------------------------- /scripts/update_docker_images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Pull yeasy/hyperledger:latest and retagging" 4 | docker pull yeasy/hyperledger:latest && \ 5 | docker rmi hyperledger/fabric-baseimage:latest && \ 6 | docker tag yeasy/hyperledger:latest hyperledger/fabric-baseimage:latest 7 | 8 | echo "Pull yeasy/hyperledger-peer:latest" 9 | docker pull yeasy/hyperledger-peer:latest && \ 10 | docker tag yeasy/hyperledger-peer:latest yeasy/hyperledger-peer:latest 11 | -------------------------------------------------------------------------------- /src/Dockerfile-dashboard: -------------------------------------------------------------------------------- 1 | FROM python:3.5 2 | MAINTAINER Baohua Yang <"baohyang@cn.ibm.com"> 3 | ENV TZ Asia/Shanghai 4 | 5 | WORKDIR /app 6 | COPY ./requirements.txt /app 7 | RUN pip install --no-cache-dir -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com -r requirements.txt 8 | 9 | COPY . /app 10 | 11 | # use this in development 12 | CMD ["python", "dashboard.py"] 13 | 14 | # use this in product 15 | #CMD ["gunicorn", "-w", "128", "-b", "0.0.0.0:8080", "dashboard:app"] 16 | -------------------------------------------------------------------------------- /src/Dockerfile-restserver: -------------------------------------------------------------------------------- 1 | FROM python:3.5 2 | MAINTAINER Baohua Yang <"baohyang@cn.ibm.com"> 3 | ENV TZ Asia/Shanghai 4 | 5 | WORKDIR /app 6 | COPY ./requirements.txt /app 7 | RUN pip install --no-cache-dir -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com -r requirements.txt 8 | 9 | COPY . /app 10 | 11 | # use this in development 12 | CMD ["python", "restserver.py"] 13 | 14 | # use this in product 15 | #CMD ["gunicorn", "-w", "128", "-b", "0.0.0.0:80", "restserver:app"] 16 | -------------------------------------------------------------------------------- /src/Dockerfile-watchdog: -------------------------------------------------------------------------------- 1 | FROM python:3.5 2 | MAINTAINER Baohua Yang <"baohyang@cn.ibm.com"> 3 | ENV TZ Asia/Shanghai 4 | 5 | WORKDIR /app 6 | COPY ./requirements.txt /app 7 | RUN pip install --no-cache-dir -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com -r requirements.txt 8 | 9 | COPY . /app 10 | 11 | # use this in development 12 | CMD ["python", "watchdog.py"] -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import version, author 2 | 3 | __title__ = 'Cello' 4 | __version__ = version 5 | __author__ = author 6 | -------------------------------------------------------------------------------- /src/_compose_files/local/cluster-4.yml: -------------------------------------------------------------------------------- 1 | # This compose file will start 4 hyperledger peer nodes, and make a cluster 2 | # vp0: validating node as root 3 | # vp1 - vp3: validating node as peer 4 | # https://github.com/yeasy/docker-compose-files#hyperledger 5 | 6 | version: '2' 7 | 8 | services: 9 | # validating node as the root 10 | # vp0 will also be used for client interactive operations 11 | # If you want to run fabric command on the host, then map 7051:7051 to host 12 | # port, or use like `CORE_PEER_ADDRESS=172.17.0.2:7051` to specify peer addr. 13 | vp0: 14 | extends: 15 | file: peer-pbft.yml 16 | service: vp 17 | hostname: vp0 18 | container_name: ${COMPOSE_PROJECT_NAME}_vp0 19 | environment: 20 | - CORE_PEER_ID=vp0 21 | ports: 22 | - "${REST_PORT}:7050" 23 | - "${GRPC_PORT}:7051" 24 | 25 | # validating node 26 | vp1: 27 | extends: 28 | file: peer-pbft.yml 29 | service: vp 30 | hostname: vp1 31 | container_name: ${COMPOSE_PROJECT_NAME}_vp1 32 | environment: 33 | - CORE_PEER_ID=vp1 34 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 35 | #links: 36 | # - vp0 37 | 38 | # validating node 39 | vp2: 40 | extends: 41 | file: peer-pbft.yml 42 | service: vp 43 | hostname: vp2 44 | container_name: ${COMPOSE_PROJECT_NAME}_vp2 45 | environment: 46 | - CORE_PEER_ID=vp2 47 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 48 | 49 | 50 | # validating node 51 | vp3: 52 | extends: 53 | file: peer-pbft.yml 54 | service: vp 55 | hostname: vp3 56 | container_name: ${COMPOSE_PROJECT_NAME}_vp3 57 | environment: 58 | - CORE_PEER_ID=vp3 59 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 60 | 61 | networks: 62 | default: 63 | external: 64 | name: ${CLUSTER_NETWORK} -------------------------------------------------------------------------------- /src/_compose_files/local/cluster-6.yml: -------------------------------------------------------------------------------- 1 | # This compose file will start 6 hyperledger peer nodes, and make a cluster 2 | # vp0: validating node as root 3 | # vp1 - vp5: validating node as peer 4 | # https://github.com/yeasy/docker-compose-files#hyperledger 5 | 6 | version: '2' 7 | 8 | services: 9 | # validating node as the root 10 | # vp0 will also be used for client interactive operations 11 | # If run fabric command on the host, then map peer listen port to host port, 12 | # or use `CORE_PEER_ADDRESS=172.17.0.2:7051` to specify peer addr. 13 | vp0: 14 | extends: 15 | file: peer-pbft.yml 16 | service: vp 17 | hostname: vp0 18 | container_name: ${COMPOSE_PROJECT_NAME}_vp0 19 | environment: 20 | - CORE_PEER_ID=vp0 21 | ports: 22 | - "${REST_PORT}:7050" 23 | - "${GRPC_PORT}:7051" 24 | 25 | # validating node 26 | vp1: 27 | extends: 28 | file: peer-pbft.yml 29 | service: vp 30 | hostname: vp1 31 | container_name: ${COMPOSE_PROJECT_NAME}_vp1 32 | environment: 33 | - CORE_PEER_ID=vp1 34 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 35 | #links: 36 | # - vp0 37 | 38 | # validating node 39 | vp2: 40 | extends: 41 | file: peer-pbft.yml 42 | service: vp 43 | hostname: vp2 44 | container_name: ${COMPOSE_PROJECT_NAME}_vp2 45 | environment: 46 | - CORE_PEER_ID=vp2 47 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 48 | 49 | # validating node 50 | vp3: 51 | extends: 52 | file: peer-pbft.yml 53 | service: vp 54 | hostname: vp3 55 | container_name: ${COMPOSE_PROJECT_NAME}_vp3 56 | environment: 57 | - CORE_PEER_ID=vp3 58 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 59 | 60 | # validating node 61 | vp4: 62 | extends: 63 | file: peer-pbft.yml 64 | service: vp 65 | hostname: vp4 66 | container_name: ${COMPOSE_PROJECT_NAME}_vp4 67 | environment: 68 | - CORE_PEER_ID=vp4 69 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 70 | 71 | # validating node 72 | vp5: 73 | extends: 74 | file: peer-pbft.yml 75 | service: vp 76 | hostname: vp5 77 | container_name: ${COMPOSE_PROJECT_NAME}_vp5 78 | environment: 79 | - CORE_PEER_ID=vp5 80 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 81 | 82 | networks: 83 | default: 84 | external: 85 | name: ${CLUSTER_NETWORK} -------------------------------------------------------------------------------- /src/_compose_files/local/peer-pbft.yml: -------------------------------------------------------------------------------- 1 | # This is the default base file to config env and command 2 | # Notice that chaincode is executed inside docker in default net mode 3 | # https://github.com/yeasy/docker-compose-files 4 | 5 | # Depends on the yeasy/hyperledger-peer:latest image 6 | 7 | # If you want to enable consensus, just uncomment the 8 | # CORE_PEER_VALIDATOR_CONSENSUE=obcpbft line 9 | # See https://github.com/hyperledger/fabric/blob/master/docs/dev-setup/devnet-setup.md#using-consensus-plugin for more details. 10 | 11 | version: '2' 12 | 13 | services: 14 | vp: 15 | image: hyperledger/fabric-peer:latest 16 | restart: unless-stopped 17 | labels: 18 | - monitor=true 19 | - hyperledger=true 20 | - com.docker.swarm.reschedule-policy=["on-node-failure"] 21 | environment: 22 | - CORE_PEER_ADDRESSAUTODETECT=true 23 | - CORE_PEER_NETWORKID=${PEER_NETWORKID} 24 | - CORE_LOGGING_LEVEL=${CLUSTER_LOG_LEVEL} #critical, error, warning, notice, info, debug 25 | - CORE_VM_ENDPOINT=${VM_ENDPOINT} 26 | - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${VM_DOCKER_HOSTCONFIG_NETWORKMODE} # host, bridge, ipvlan, none 27 | - CORE_PEER_VALIDATOR_CONSENSUS_PLUGIN=${PEER_VALIDATOR_CONSENSUS_PLUGIN} # noops, pbft 28 | # The following section enables noops consensus 29 | - CORE_NOOPS_BLOCK_TIMEOUT=2 # only useful when in noops 30 | - CORE_NOOPS_BLOCK_WAIT=2 # only useful when in noops 31 | # The following section enables pbft consensus 32 | - CORE_PBFT_GENERAL_MODE=${PBFT_GENERAL_MODE} # batch, classic, sieve 33 | - CORE_PBFT_GENERAL_N=${PBFT_GENERAL_N} 34 | - CORE_PBFT_GENERAL_BATCHSIZE=2 # only useful when in batch mode 35 | - CORE_PBFT_GENERAL_TIMEOUT_REQUEST=5s 36 | expose: 37 | - "7050" # Rest 38 | - "7051" # Grpc 39 | - "7052" # Peer CLI 40 | - "7053" # Peer Event 41 | - "7054" # eCAP 42 | - "7055" # eCAA 43 | - "7056" # tCAP 44 | - "7057" # eCAA 45 | - "7058" # tlsCAP 46 | - "7059" # tlsCAA 47 | #volumes: # docker.sock is mapped as the default CORE_VM_ENDPOINT 48 | # - /var/run/docker.sock:/var/run/docker.sock 49 | mem_limit: 512000000 50 | memswap_limit: 1000000000 51 | cpu_quota: 50000 52 | logging: 53 | driver: json-file 54 | options: 55 | max-size: 100m 56 | command: peer node start 57 | -------------------------------------------------------------------------------- /src/_compose_files/syslog/cluster-4.yml: -------------------------------------------------------------------------------- 1 | # This compose file will start 4 hyperledger peer nodes, and make a cluster 2 | # vp0: validating node as root 3 | # vp1: validating node as peer 4 | # vp2: validating node as peer 5 | # vp3: validating node as peer 6 | # https://github.com/yeasy/docker-compose-files 7 | 8 | version: '2' 9 | 10 | services: 11 | # validating node as the root 12 | # vp0 will also be used for client interactive operations 13 | # If you want to run fabric command on the host, then map 7051:7051 to host 14 | # port, or use like `CORE_PEER_ADDRESS=172.17.0.2:7051` to specify peer addr. 15 | vp0: 16 | extends: 17 | file: peer-pbft.yml 18 | service: vp 19 | hostname: vp0 20 | container_name: ${COMPOSE_PROJECT_NAME}_vp0 21 | environment: 22 | - CORE_PEER_ID=vp0 23 | ports: 24 | - "${REST_PORT}:7050" 25 | - "${GRPC_PORT}:7051" 26 | 27 | # validating node 28 | vp1: 29 | extends: 30 | file: peer-pbft.yml 31 | service: vp 32 | hostname: vp1 33 | container_name: ${COMPOSE_PROJECT_NAME}_vp1 34 | environment: 35 | - CORE_PEER_ID=vp1 36 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 37 | #links: 38 | # - vp0 39 | 40 | # validating node 41 | vp2: 42 | extends: 43 | file: peer-pbft.yml 44 | service: vp 45 | hostname: vp2 46 | container_name: ${COMPOSE_PROJECT_NAME}_vp2 47 | environment: 48 | - CORE_PEER_ID=vp2 49 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 50 | 51 | 52 | # validating node 53 | vp3: 54 | extends: 55 | file: peer-pbft.yml 56 | service: vp 57 | hostname: vp3 58 | container_name: ${COMPOSE_PROJECT_NAME}_vp3 59 | environment: 60 | - CORE_PEER_ID=vp3 61 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 62 | 63 | networks: 64 | default: 65 | external: 66 | name: ${CLUSTER_NETWORK} -------------------------------------------------------------------------------- /src/_compose_files/syslog/cluster-6.yml: -------------------------------------------------------------------------------- 1 | # This compose file will start 4 hyperledger peer nodes, and make a cluster 2 | # vp0: validating node as root 3 | # vp1: validating node as peer 4 | # vp2: validating node as peer 5 | # vp3: validating node as peer 6 | # https://github.com/yeasy/docker-compose-files 7 | 8 | version: '2' 9 | 10 | services: 11 | # validating node as the root 12 | # vp0 will also be used for client interactive operations 13 | # If you want to run fabric command on the host, then map 7051:7051 to host 14 | # port, or use like `CORE_PEER_ADDRESS=172.17.0.2:7051` to specify peer addr. 15 | vp0: 16 | extends: 17 | file: peer-pbft.yml 18 | service: vp 19 | hostname: vp0 20 | container_name: ${COMPOSE_PROJECT_NAME}_vp0 21 | environment: 22 | - CORE_PEER_ID=vp0 23 | ports: 24 | - "${REST_PORT}:7050" 25 | - "${GRPC_PORT}:7051" 26 | 27 | # validating node 28 | vp1: 29 | extends: 30 | file: peer-pbft.yml 31 | service: vp 32 | hostname: vp1 33 | container_name: ${COMPOSE_PROJECT_NAME}_vp1 34 | environment: 35 | - CORE_PEER_ID=vp1 36 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 37 | #links: 38 | # - vp0 39 | 40 | # validating node 41 | vp2: 42 | extends: 43 | file: peer-pbft.yml 44 | service: vp 45 | hostname: vp2 46 | container_name: ${COMPOSE_PROJECT_NAME}_vp2 47 | environment: 48 | - CORE_PEER_ID=vp2 49 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 50 | 51 | # validating node 52 | vp3: 53 | extends: 54 | file: peer-pbft.yml 55 | service: vp 56 | hostname: vp3 57 | container_name: ${COMPOSE_PROJECT_NAME}_vp3 58 | environment: 59 | - CORE_PEER_ID=vp3 60 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 61 | 62 | # validating node 63 | vp4: 64 | extends: 65 | file: peer-pbft.yml 66 | service: vp 67 | hostname: vp4 68 | container_name: ${COMPOSE_PROJECT_NAME}_vp4 69 | environment: 70 | - CORE_PEER_ID=vp4 71 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 72 | 73 | # validating node 74 | vp5: 75 | extends: 76 | file: peer-pbft.yml 77 | service: vp 78 | hostname: vp5 79 | container_name: ${COMPOSE_PROJECT_NAME}_vp5 80 | environment: 81 | - CORE_PEER_ID=vp5 82 | - CORE_PEER_DISCOVERY_ROOTNODE=${COMPOSE_PROJECT_NAME}_vp0:7051 83 | 84 | networks: 85 | default: 86 | external: 87 | name: ${CLUSTER_NETWORK} -------------------------------------------------------------------------------- /src/_compose_files/syslog/peer-pbft.yml: -------------------------------------------------------------------------------- 1 | # This is the default base file to config env and command 2 | # Notice that chaincode is executed inside docker in default net mode 3 | # https://github.com/yeasy/docker-compose-files 4 | 5 | # Depends on the yeasy/hyperledger-peer:latest image 6 | 7 | # If you want enable consensus, just uncomment the 8 | # CORE_PEER_VALIDATOR_CONSENSUE=obcpbft line 9 | # See https://github.com/hyperledger/fabric/blob/master/docs/dev-setup/devnet-setup.md#using-consensus-plugin for more details. 10 | 11 | version: '2' 12 | 13 | services: 14 | vp: 15 | image: hyperledger/fabric-peer:latest 16 | restart: unless-stopped 17 | labels: 18 | - monitor=true 19 | - hyperledger=true 20 | - com.docker.swarm.reschedule-policy=["on-node-failure"] 21 | environment: 22 | - CORE_PEER_ADDRESSAUTODETECT=true 23 | - CORE_PEER_NETWORKID=${PEER_NETWORKID} 24 | - CORE_LOGGING_LEVEL=${LOGGING_LEVEL_CLUSTERS} #critical, error, warning, notice, info, debug 25 | - CORE_VM_ENDPOINT=${VM_ENDPOINT} 26 | - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${VM_DOCKER_HOSTCONFIG_NETWORKMODE} # host, bridge, ipvlan, none 27 | - CORE_PEER_VALIDATOR_CONSENSUS_PLUGIN=${PEER_VALIDATOR_CONSENSUS_PLUGIN} # noops, pbft 28 | # The following section enables noops consensus 29 | - CORE_NOOPS_BLOCK_TIMEOUT=2 # only useful when in noops 30 | - CORE_NOOPS_BLOCK_WAIT=2 # only useful when in noops 31 | # The following section enables pbft consensus 32 | - CORE_PBFT_GENERAL_MODE=${PBFT_GENERAL_MODE} # batch, classic, sieve 33 | - CORE_PBFT_GENERAL_N=${PBFT_GENERAL_N} 34 | - CORE_PBFT_GENERAL_BATCHSIZE=2 # only useful when in batch mode 35 | - CORE_PBFT_GENERAL_TIMEOUT_REQUEST=5s 36 | expose: 37 | - "7050" # Rest 38 | - "7051" # Grpc 39 | - "7052" # Peer CLI 40 | - "7053" # Peer Event 41 | - "7054" # eCAP 42 | - "7055" # eCAA 43 | - "7056" # tCAP 44 | - "7057" # eCAA 45 | - "7058" # tlsCAP 46 | - "7059" # tlsCAA 47 | #volumes: # docker.sock is mapped as the default CORE_VM_ENDPOINT 48 | # - /var/run/docker.sock:/var/run/docker.sock 49 | mem_limit: 512000000 50 | memswap_limit: 1000000000 51 | cpu_quota: 50000 52 | command: peer node start 53 | logging: 54 | driver: syslog 55 | options: 56 | syslog-address: ${SYSLOG_SERVER} 57 | tag: "{{.ImageName}}/{{.Name}}/{{.ID}}" -------------------------------------------------------------------------------- /src/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .docker_swarm import get_project, \ 2 | check_daemon, detect_daemon_type, \ 3 | get_swarm_node_ip, \ 4 | compose_up, compose_clean, compose_start, compose_stop, compose_restart, \ 5 | setup_container_host, cleanup_host, reset_container_host 6 | -------------------------------------------------------------------------------- /src/common/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .db import db, col_host 3 | from .response import make_ok_response, make_fail_response, CODE_NOT_FOUND,\ 4 | CODE_BAD_REQUEST, CODE_CONFLICT, CODE_CREATED, CODE_FORBIDDEN, \ 5 | CODE_METHOD_NOT_ALLOWED, CODE_NO_CONTENT, CODE_NOT_ACCEPTABLE, CODE_OK 6 | 7 | from .log import log_handler, LOG_LEVEL 8 | from .utils import \ 9 | PEER_SERVICE_PORTS, CA_SERVICE_PORTS, SERVICE_PORTS, \ 10 | COMPOSE_FILE_PATH, \ 11 | CONSENSUS_PLUGINS, CONSENSUS_MODES, CONSENSUS_TYPES, \ 12 | HOST_TYPES, \ 13 | CLUSTER_PORT_START, CLUSTER_PORT_STEP, CLUSTER_SIZES, \ 14 | CLUSTER_NETWORK, \ 15 | CLUSTER_LOG_TYPES, CLUSTER_LOG_LEVEL, \ 16 | SYS_CREATOR, SYS_DELETER, SYS_RESETTING, SYS_USER, \ 17 | request_debug, request_get, request_json_body 18 | -------------------------------------------------------------------------------- /src/common/db.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from pymongo import MongoClient 4 | 5 | MONGO_URL = os.environ.get('MONGO_URL', None) or 'mongodb://mongo:27017' 6 | MONGO_DB = os.environ.get('MONGO_DB', None) or 'dev' 7 | 8 | mongo_client = MongoClient(MONGO_URL) 9 | db = mongo_client[MONGO_DB] 10 | 11 | col_host = db["host"] 12 | # col_cluster_active = db["cluster_active"] 13 | # col_cluster_released = db["cluster_released"] 14 | -------------------------------------------------------------------------------- /src/common/log.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | log_handler = logging.StreamHandler() 5 | 6 | formatter = logging.Formatter("[%(asctime)s] %(levelname)s [%(name)s]" 7 | " [%(filename)s:%(lineno)s %(funcName)20s()]" 8 | " - %(message)s") 9 | log_handler.setFormatter(formatter) 10 | 11 | LOG_LEVEL = eval("logging." + os.environ.get("LOG_LEVEL", "INFO")) 12 | -------------------------------------------------------------------------------- /src/common/response.py: -------------------------------------------------------------------------------- 1 | from flask import jsonify 2 | 3 | CODE_OK = 200 4 | CODE_CREATED = 201 5 | CODE_NO_CONTENT = 204 6 | CODE_BAD_REQUEST = 400 7 | CODE_FORBIDDEN = 403 8 | CODE_NOT_FOUND = 404 9 | CODE_METHOD_NOT_ALLOWED = 405 10 | CODE_NOT_ACCEPTABLE = 406 11 | CODE_CONFLICT = 409 12 | 13 | response_ok = { 14 | "status": "OK", 15 | "code": CODE_OK, 16 | "error": "", 17 | "data": {} 18 | } 19 | 20 | response_fail = { 21 | "status": "FAIL", 22 | "code": CODE_BAD_REQUEST, 23 | "error": "", 24 | "data": {} 25 | } 26 | 27 | 28 | def make_ok_response(error="", data={}, code=CODE_OK): 29 | response_ok['code'] = code 30 | response_ok["error"] = error 31 | response_ok["data"] = data 32 | return jsonify(response_ok), CODE_OK 33 | 34 | 35 | def make_fail_response(error="Invalid request", data={}, 36 | code=CODE_BAD_REQUEST): 37 | response_fail['code'] = code 38 | response_fail["error"] = error 39 | response_fail["data"] = data 40 | return jsonify(response_fail), CODE_BAD_REQUEST 41 | -------------------------------------------------------------------------------- /src/common/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | 5 | COMPOSE_FILE_PATH = os.getenv("COMPOSE_FILE_PATH", "./_compose_files") 6 | 7 | CLUSTER_NETWORK = "cello_net" 8 | CLUSTER_SIZES = [4, 6] 9 | 10 | # first port that can be assigned as cluster API 11 | CLUSTER_PORT_START = int(os.getenv("CLUSTER_PORT_START", 7050)) 12 | 13 | # number of port allocated to each cluster in case collision 14 | CLUSTER_PORT_STEP = 100 15 | 16 | PEER_SERVICE_PORTS = { 17 | 'rest': 7050, # this is the reference starter for cluster port step 18 | 'grpc': 7051, 19 | 'cli': 7052, 20 | 'event': 7053, 21 | } 22 | 23 | CA_SERVICE_PORTS = { 24 | 'ecap': 7054, 25 | 'ecaa': 7055, 26 | 'tcap': 7056, 27 | 'tcaa': 7057, 28 | 'tlscap': 7058, 29 | 'tlscaa': 7059, 30 | } 31 | 32 | SERVICE_PORTS = dict(list(PEER_SERVICE_PORTS.items()) + 33 | list(CA_SERVICE_PORTS.items())) 34 | 35 | 36 | CONSENSUS_PLUGINS = ['noops', 'pbft'] # first one is the default one 37 | # CONSENSUS_MODES = ['classic', 'batch', 'sieve'] # pbft has various modes 38 | CONSENSUS_MODES = ['batch'] # pbft has various modes 39 | 40 | CONSENSUS_TYPES = [ 41 | ('noops', ''), 42 | ('pbft', 'batch'), 43 | # ('pbft', 'classic'), 44 | # ('pbft', 'sieve'), 45 | ] 46 | 47 | 48 | HOST_TYPES = ['single', 'swarm'] 49 | 50 | CLUSTER_LOG_TYPES = ['local', 'syslog'] 51 | 52 | CLUSTER_LOG_LEVEL = ['DEBUG', 'INFO', 'NOTICE', 'WARNING', 'ERROR', 53 | 'CRITICAL'] 54 | 55 | SYS_USER = "__SYSTEM__" 56 | SYS_CREATOR = SYS_USER + "CREATING" 57 | SYS_DELETER = SYS_USER + "DELETING" 58 | SYS_RESETTING = SYS_USER + "RESETTING" 59 | 60 | 61 | def json_decode(jsonstr): 62 | try: 63 | json_object = json.loads(jsonstr) 64 | except json.decoder.JSONDecodeError as e: 65 | print(e) 66 | return jsonstr 67 | return json_object 68 | 69 | 70 | def request_debug(request, logger): 71 | logger.debug("path={}, method={}".format(request.path, request.method)) 72 | logger.debug("request args:") 73 | for k in request.args: 74 | logger.debug("Arg: {0}:{1}".format(k, request.args[k])) 75 | logger.debug("request form:") 76 | for k in request.form: 77 | logger.debug("Form: {0}:{1}".format(k, request.form[k])) 78 | logger.debug("request raw body data:") 79 | logger.debug(request.data) 80 | logger.debug(request.get_json(force=True, silent=True)) 81 | 82 | 83 | def request_get(request, key, default_value=None): 84 | if key in request.args: 85 | return request.args.get(key) 86 | elif key in request.form: 87 | return request.form.get(key) 88 | try: 89 | json_body = request.get_json(force=True, silent=True) 90 | if key in json_body: 91 | return json_body[key] 92 | else: 93 | return default_value 94 | except Exception: 95 | return default_value 96 | 97 | 98 | def request_json_body(request, default_value={}): 99 | try: 100 | json_body = request.get_json(force=True, silent=True) 101 | return json_body 102 | except Exception: 103 | return default_value 104 | -------------------------------------------------------------------------------- /src/config.py: -------------------------------------------------------------------------------- 1 | class Config(object): 2 | DEBUG = False 3 | SECRET_KEY = '?\xbf,\xb4\x8d\xa3"<\x9c\xb0@\x0f5\xab,w\xee\x8d$0\x13\x8b83' 4 | 5 | 6 | class ProductionConfig(Config): 7 | DEBUG = False 8 | 9 | 10 | class DevelopmentConfig(Config): 11 | DEBUG = True 12 | -------------------------------------------------------------------------------- /src/dashboard.py: -------------------------------------------------------------------------------- 1 | import os 2 | from common import log_handler, LOG_LEVEL 3 | from flask import Flask, render_template 4 | from resources import bp_index, \ 5 | bp_stat_view, bp_stat_api, \ 6 | bp_cluster_view, bp_cluster_api, \ 7 | bp_host_view, bp_host_api 8 | 9 | app = Flask(__name__, static_folder='static', template_folder='templates') 10 | 11 | app.config.from_object('config.DevelopmentConfig') 12 | app.config.from_envvar('CELLO_CONFIG_FILE', silent=True) 13 | 14 | app.logger.setLevel(LOG_LEVEL) 15 | app.logger.addHandler(log_handler) 16 | 17 | app.register_blueprint(bp_index) 18 | app.register_blueprint(bp_host_view) 19 | app.register_blueprint(bp_host_api) 20 | app.register_blueprint(bp_cluster_view) 21 | app.register_blueprint(bp_cluster_api) 22 | app.register_blueprint(bp_stat_view) 23 | app.register_blueprint(bp_stat_api) 24 | 25 | 26 | @app.errorhandler(404) 27 | def page_not_found(error): 28 | return render_template('404.html'), 404 29 | 30 | 31 | @app.errorhandler(500) 32 | def internal_error(error): 33 | return render_template('500.html'), 500 34 | 35 | 36 | if __name__ == '__main__': 37 | app.run( 38 | host='0.0.0.0', 39 | port=8080, 40 | debug=os.environ.get('DEBUG', app.config.get("DEBUG", True)), 41 | threaded=True) 42 | -------------------------------------------------------------------------------- /src/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .cluster import cluster_handler 2 | from .host import host_handler 3 | from .stat import stat_handler 4 | -------------------------------------------------------------------------------- /src/modules/host.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | import os 4 | import random 5 | import sys 6 | import time 7 | 8 | from threading import Thread 9 | from pymongo.collection import ReturnDocument 10 | 11 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 12 | from common import \ 13 | db, log_handler, \ 14 | LOG_LEVEL, CLUSTER_LOG_TYPES, CLUSTER_LOG_LEVEL, \ 15 | CLUSTER_SIZES, CLUSTER_PORT_START, CLUSTER_PORT_STEP, \ 16 | CONSENSUS_TYPES 17 | 18 | from agent import cleanup_host, check_daemon, detect_daemon_type, \ 19 | reset_container_host, setup_container_host 20 | 21 | from modules import cluster 22 | 23 | logger = logging.getLogger(__name__) 24 | logger.setLevel(LOG_LEVEL) 25 | logger.addHandler(log_handler) 26 | 27 | 28 | def check_status(func): 29 | def wrapper(self, *arg): 30 | if not self.is_active(*arg): 31 | logger.warning("Host inactive") 32 | return False 33 | else: 34 | return func(self, *arg) 35 | return wrapper 36 | 37 | 38 | class HostHandler(object): 39 | """ Main handler to operate the Docker hosts 40 | """ 41 | def __init__(self): 42 | self.col = db["host"] 43 | 44 | def create(self, name, daemon_url, capacity=1, 45 | log_level=CLUSTER_LOG_LEVEL[0], 46 | log_type=CLUSTER_LOG_TYPES[0], log_server="", autofill="false", 47 | schedulable="false", serialization=True): 48 | """ Create a new docker host node 49 | 50 | A docker host is potentially a single node or a swarm. 51 | Will full fill with clusters of given capacity. 52 | 53 | :param name: name of the node 54 | :param daemon_url: daemon_url of the host 55 | :param capacity: The number of clusters to hold 56 | :param log_type: type of the log 57 | :param log_level: level of the log 58 | :param log_server: server addr of the syslog 59 | :param autofill: Whether automatically fillup with chains 60 | :param schedulable: Whether can schedule cluster request to it 61 | :param serialization: whether to get serialized result or object 62 | :return: True or False 63 | """ 64 | logger.debug("Create host: name={}, daemon_url={}, capacity={}, " 65 | "log={}/{}, autofill={}, schedulable={}" 66 | .format(name, daemon_url, capacity, log_type, 67 | log_server, autofill, schedulable)) 68 | if not daemon_url.startswith("tcp://"): 69 | daemon_url = "tcp://" + daemon_url 70 | 71 | if self.col.find_one({"daemon_url": daemon_url}): 72 | logger.warning("{} already existed in db".format(daemon_url)) 73 | return {} 74 | 75 | if "://" not in log_server: 76 | log_server = "udp://" + log_server 77 | if log_type == CLUSTER_LOG_TYPES[0]: 78 | log_server = "" 79 | if check_daemon(daemon_url): 80 | logger.warning("The daemon_url is active:" + daemon_url) 81 | status = "active" 82 | else: 83 | logger.warning("The daemon_url is inactive:" + daemon_url) 84 | status = "inactive" 85 | 86 | detected_type = detect_daemon_type(daemon_url) 87 | 88 | if not setup_container_host(detected_type, daemon_url): 89 | logger.warning("{} cannot be setup".format(name)) 90 | return {} 91 | 92 | h = { 93 | 'id': '', 94 | 'name': name, 95 | 'daemon_url': daemon_url, 96 | 'create_ts': datetime.datetime.now(), 97 | 'capacity': capacity, 98 | 'status': status, 99 | 'clusters': [], 100 | 'type': detected_type, 101 | 'log_level': log_level, 102 | 'log_type': log_type, 103 | 'log_server': log_server, 104 | 'autofill': autofill, 105 | 'schedulable': schedulable 106 | } 107 | hid = self.col.insert_one(h).inserted_id # object type 108 | host = self.db_update_one( 109 | {"_id": hid}, 110 | {"$set": {"id": str(hid)}}) 111 | 112 | if capacity > 0 and autofill == "true": # should autofill it 113 | self.fillup(str(hid)) 114 | 115 | if serialization: 116 | return self._serialize(host) 117 | else: 118 | return host 119 | 120 | def get_by_id(self, id): 121 | """ Get a host 122 | 123 | :param id: id of the doc 124 | :return: serialized result or obj 125 | """ 126 | # logger.debug("Get a host with id=" + id) 127 | ins = self.col.find_one({"id": id}) 128 | if not ins: 129 | logger.warning("No host found with id=" + id) 130 | return {} 131 | return self._serialize(ins) 132 | 133 | def update(self, id, d): 134 | """ Update a host 135 | 136 | TODO: may check when changing host type 137 | 138 | :param id: id of the host 139 | :param d: dict to use as updated values 140 | :return: serialized result or obj 141 | """ 142 | logger.debug("Get a host with id=" + id) 143 | h_old = self.get_by_id(id) 144 | if not h_old: 145 | logger.warning("No host found with id=" + id) 146 | return {} 147 | 148 | if "daemon_url" in d and not d["daemon_url"].startswith("tcp://"): 149 | d["daemon_url"] = "tcp://" + d["daemon_url"] 150 | 151 | if "capacity" in d: 152 | d["capacity"] = int(d["capacity"]) 153 | if d["capacity"] < len(h_old.get("clusters")): 154 | logger.warning("Cannot set cap smaller than running clusters") 155 | return {} 156 | if "log_server" in d and "://" not in d["log_server"]: 157 | d["log_server"] = "udp://" + d["log_server"] 158 | if "log_type" in d and d["log_type"] == CLUSTER_LOG_TYPES[0]: 159 | d["log_server"] = "" 160 | h_new = self.db_set_by_id(id, **d) 161 | return self._serialize(h_new) 162 | 163 | def list(self, filter_data={}): 164 | """ List hosts with given criteria 165 | 166 | :param filter_data: Image with the filter properties 167 | :return: iteration of serialized doc 168 | """ 169 | hosts = self.col.find(filter_data) 170 | return list(map(self._serialize, hosts)) 171 | 172 | def delete(self, id): 173 | """ Delete a host instance 174 | 175 | :param id: id of the host to delete 176 | :return: 177 | """ 178 | logger.debug("Delete a host with id={0}".format(id)) 179 | 180 | h = self.get_by_id(id) 181 | if not h: 182 | logger.warning("Cannot delete non-existed host") 183 | return False 184 | if h.get("clusters", ""): 185 | logger.warning("There are clusters on that host, cannot delete.") 186 | return False 187 | cleanup_host(h.get("daemon_url")) 188 | self.col.delete_one({"id": id}) 189 | return True 190 | 191 | @check_status 192 | def fillup(self, id): 193 | """ 194 | Fullfil a host with clusters to its capacity limit 195 | 196 | :param id: host id 197 | :return: True or False 198 | """ 199 | logger.debug("Try fillup host {}".format(id)) 200 | host = self.get_by_id(id) 201 | if not host: 202 | return False 203 | num_new = host.get("capacity") - len(host.get("clusters")) 204 | if num_new <= 0: 205 | logger.warning("host {} already full".format(id)) 206 | return True 207 | 208 | free_ports = cluster.cluster_handler.find_free_start_ports(id, num_new) 209 | logger.debug("Free_ports = {}".format(free_ports)) 210 | 211 | def create_cluster_work(start_port): 212 | cluster_name = "{}_{}".format( 213 | host.get("name"), 214 | int((start_port - CLUSTER_PORT_START) / CLUSTER_PORT_STEP)) 215 | consensus_plugin, consensus_mode = random.choice(CONSENSUS_TYPES) 216 | cluster_size = random.choice(CLUSTER_SIZES) 217 | cid = cluster.cluster_handler.create( 218 | name=cluster_name, host_id=id, start_port=start_port, 219 | consensus_plugin=consensus_plugin, 220 | consensus_mode=consensus_mode, size=cluster_size) 221 | if cid: 222 | logger.debug("Create cluster {} with id={}".format( 223 | cluster_name, cid)) 224 | else: 225 | logger.warning("Create cluster failed") 226 | for p in free_ports: 227 | t = Thread(target=create_cluster_work, args=(p,)) 228 | t.start() 229 | time.sleep(0.2) 230 | 231 | return True 232 | 233 | @check_status 234 | def clean(self, id): 235 | """ 236 | Clean a host's free clusters. 237 | 238 | :param id: host id 239 | :return: True or False 240 | """ 241 | logger.debug("clean host with id = {}".format(id)) 242 | host = self.get_by_id(id) 243 | if not host: 244 | return False 245 | if len(host.get("clusters")) <= 0: 246 | return True 247 | 248 | host = self.db_set_by_id(id, autofill="false") 249 | schedulable_status = host.get("schedulable") 250 | if schedulable_status == "true": 251 | host = self.db_set_by_id(id, schedulable="false") 252 | 253 | for cid in host.get("clusters"): 254 | t = Thread(target=cluster.cluster_handler.delete, args=(cid,)) 255 | t.start() 256 | time.sleep(0.2) 257 | 258 | if schedulable_status == "true": 259 | self.db_set_by_id(id, schedulable=schedulable_status) 260 | 261 | return True 262 | 263 | @check_status 264 | def reset(self, id): 265 | """ 266 | Clean a host's free clusters. 267 | 268 | :param id: host id 269 | :return: True or False 270 | """ 271 | logger.debug("clean host with id = {}".format(id)) 272 | host = self.get_by_id(id) 273 | if not host or len(host.get("clusters")) > 0: 274 | logger.warning("No find resettable host with id ={}".format(id)) 275 | return False 276 | return reset_container_host(host_type=host.get("type"), 277 | daemon_url=host.get("daemon_url")) 278 | 279 | def refresh_status(self, id): 280 | """ 281 | Refresh the status of the host by detection 282 | 283 | :param host: the host to update status 284 | :return: Updated host 285 | """ 286 | host = self.get_by_id(id) 287 | if not host: 288 | logger.warning("No host found with id=" + id) 289 | return False 290 | if not check_daemon(host.get("daemon_url")): 291 | logger.warning("Host {} is inactive".format(id)) 292 | self.db_set_by_id(id, status="inactive") 293 | return False 294 | else: 295 | self.db_set_by_id(id, status="active") 296 | return True 297 | 298 | def is_active(self, host_id): 299 | """ 300 | Update status of the host 301 | 302 | :param host_id: the id of the host to update status 303 | :return: Updated host 304 | """ 305 | host = self.get_by_id(host_id) 306 | if not host: 307 | logger.warning("invalid host is given") 308 | return False 309 | return host.get("status") == "active" 310 | 311 | def get_active_host_by_id(self, id): 312 | """ 313 | Check if id exists, and status is active. Otherwise update to inactive. 314 | 315 | :param id: host id 316 | :return: host or None 317 | """ 318 | logger.debug("check host with id = {}".format(id)) 319 | host = self.col.find_one({"id": id, "status": "active"}) 320 | if not host: 321 | logger.warning("No active host found with id=" + id) 322 | return {} 323 | return self._serialize(host) 324 | 325 | def _serialize(self, doc, keys=['id', 'name', 'daemon_url', 'capacity', 326 | 'type', 'create_ts', 'status', 'autofill', 327 | 'schedulable', 'clusters', 'log_level', 328 | 'log_type', 'log_server']): 329 | """ Serialize an obj 330 | 331 | :param doc: doc to serialize 332 | :param keys: filter which key in the results 333 | :return: serialized obj 334 | """ 335 | result = {} 336 | if doc: 337 | for k in keys: 338 | result[k] = doc.get(k, '') 339 | return result 340 | 341 | def db_set_by_id(self, id, **kwargs): 342 | """ 343 | Set the key:value pairs to the data 344 | :param id: Which host to update 345 | :param kwargs: kv pairs 346 | :return: The updated host json dict 347 | """ 348 | return self.db_update_one({"id": id}, {"$set": kwargs}) 349 | 350 | def db_update_one(self, filter, operations, after=True): 351 | """ 352 | Update the data into the active db 353 | 354 | :param filter: Which instance to update, e.g., {"id": "xxx"} 355 | :param operations: data to update to db, e.g., {"$set": {}} 356 | :param after: return AFTER or BEFORE 357 | :return: The updated host json dict 358 | """ 359 | if after: 360 | return_type = ReturnDocument.AFTER 361 | else: 362 | return_type = ReturnDocument.BEFORE 363 | doc = self.col.find_one_and_update( 364 | filter, operations, return_document=return_type) 365 | return self._serialize(doc) 366 | 367 | 368 | host_handler = HostHandler() 369 | -------------------------------------------------------------------------------- /src/modules/scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | class Scheduler(object): 3 | def __init__(self): 4 | pass 5 | 6 | def get_one(self, prefer): 7 | return {} 8 | 9 | 10 | class HostScheduler(Scheduler): 11 | 12 | def __init__(self): 13 | pass 14 | 15 | def get_host(self): 16 | return {} 17 | -------------------------------------------------------------------------------- /src/modules/stat.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from threading import Thread 4 | from common import LOG_LEVEL, HOST_TYPES, CONSENSUS_PLUGINS, log_handler, \ 5 | CONSENSUS_MODES 6 | 7 | from modules import host_handler, cluster_handler 8 | 9 | logger = logging.getLogger(__name__) 10 | logger.setLevel(LOG_LEVEL) 11 | logger.addHandler(log_handler) 12 | 13 | 14 | class StatHandler(object): 15 | """ Main handler to get the Statistics data 16 | """ 17 | 18 | def __init__(self): 19 | pass 20 | 21 | def hosts(self): 22 | """ 23 | Get hosts related statistic result 24 | 25 | :return: The stat result 26 | """ 27 | result = {'status': [], 'type': []} 28 | actives = list(host_handler.list(filter_data={'status': 'active'})) 29 | inactive = list(host_handler.list(filter_data={'status': 'inactive'})) 30 | result['status'] = [ 31 | {'name': 'active', 'y': len(actives)}, 32 | {'name': 'inactive', 'y': len(inactive)} 33 | ] 34 | for host_type in HOST_TYPES: 35 | hosts = list(host_handler.list(filter_data={'type': host_type})) 36 | result['type'].append({ 37 | 'name': host_type, 38 | 'y': len(hosts) 39 | }) 40 | 41 | return result 42 | 43 | def clusters(self): 44 | """ 45 | Get clusters related statistic result 46 | 47 | :return: The stat result 48 | """ 49 | result = {'status': [], 'type': []} 50 | total_clusters = list(cluster_handler.list()) 51 | free_clusters = list(cluster_handler.list(filter_data={ 52 | 'user_id': ''})) 53 | total_number = len(total_clusters) 54 | free_clusters_number = len(free_clusters) 55 | result['status'] = [ 56 | {'name': 'free', 'y': free_clusters_number}, 57 | {'name': 'used', 'y': total_number - free_clusters_number} 58 | ] 59 | for consensus_plugin in CONSENSUS_PLUGINS: 60 | if consensus_plugin == CONSENSUS_PLUGINS[0]: 61 | consensus_type = consensus_plugin 62 | clusters = list(cluster_handler.list(filter_data={ 63 | 'consensus_plugin': consensus_plugin})) 64 | result['type'].append({ 65 | 'name': consensus_type, 66 | 'y': len(clusters) 67 | }) 68 | else: 69 | for consensus_mode in CONSENSUS_MODES: 70 | consensus_type = consensus_plugin + "/" + consensus_mode 71 | clusters = list(cluster_handler.list(filter_data={ 72 | 'consensus_plugin': consensus_plugin, 73 | 'consensus_mode': consensus_mode 74 | })) 75 | result['type'].append({ 76 | 'name': consensus_type, 77 | 'y': len(clusters) 78 | }) 79 | return result 80 | 81 | 82 | stat_handler = StatHandler() 83 | -------------------------------------------------------------------------------- /src/requirements.txt: -------------------------------------------------------------------------------- 1 | docker-compose>=1.7.0 2 | Flask>=0.11.0 3 | greenlet>=0.4.5 4 | gunicorn>=19.0.0 5 | pymongo>=3.2.0 6 | requests>=2.0.0 -------------------------------------------------------------------------------- /src/resources/__init__.py: -------------------------------------------------------------------------------- 1 | from .index import bp_index 2 | 3 | from .host_api import bp_host_api 4 | from .cluster_api import bp_cluster_api, front_rest_v2 5 | 6 | from .cluster_view import bp_cluster_view 7 | from .host_view import bp_host_view 8 | 9 | from .stat import bp_stat_api, bp_stat_view 10 | -------------------------------------------------------------------------------- /src/resources/cluster_api.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from flask import Blueprint, render_template 6 | from flask import request as r 7 | 8 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 9 | from common import log_handler, LOG_LEVEL, \ 10 | request_get, make_ok_response, make_fail_response, \ 11 | request_debug, request_json_body, \ 12 | CODE_CREATED, CODE_NOT_FOUND, \ 13 | CONSENSUS_PLUGINS, CONSENSUS_MODES, CLUSTER_SIZES 14 | from modules import cluster_handler, host_handler 15 | 16 | logger = logging.getLogger(__name__) 17 | logger.setLevel(LOG_LEVEL) 18 | logger.addHandler(log_handler) 19 | 20 | 21 | bp_cluster_api = Blueprint('bp_cluster_api', __name__, 22 | url_prefix='/{}'.format("api")) 23 | 24 | front_rest_v2 = Blueprint('front_rest_v2', __name__, 25 | url_prefix='/{}'.format("v2")) 26 | 27 | 28 | def cluster_start(r): 29 | """Start a cluster which should be in stopped status currently. 30 | 31 | :param r: 32 | :return: 33 | """ 34 | cluster_id = request_get(r, "cluster_id") 35 | if not cluster_id: 36 | logger.warning("No cluster_id is given") 37 | return make_fail_response("No cluster_id is given") 38 | if cluster_handler.start(cluster_id): 39 | return make_ok_response() 40 | 41 | return make_fail_response("cluster start failed") 42 | 43 | 44 | def cluster_restart(r): 45 | """Start a cluster which should be in stopped status currently. 46 | 47 | :param r: 48 | :return: 49 | """ 50 | cluster_id = request_get(r, "cluster_id") 51 | if not cluster_id: 52 | logger.warning("No cluster_id is given") 53 | return make_fail_response("No cluster_id is given") 54 | if cluster_handler.restart(cluster_id): 55 | return make_ok_response() 56 | 57 | return make_fail_response("cluster restart failed") 58 | 59 | 60 | def cluster_stop(r): 61 | """Stop a cluster which should be in running status currently. 62 | 63 | :param r: 64 | :return: 65 | """ 66 | cluster_id = request_get(r, "cluster_id") 67 | if not cluster_id: 68 | logger.warning("No cluster_id is given") 69 | return make_fail_response("No cluster_id is given") 70 | if cluster_handler.stop(cluster_id): 71 | return make_ok_response() 72 | 73 | return make_fail_response("cluster stop failed") 74 | 75 | 76 | def cluster_apply(r): 77 | """Apply a cluster. 78 | 79 | Return a Cluster json body. 80 | """ 81 | request_debug(r, logger) 82 | 83 | user_id = request_get(r, "user_id") 84 | if not user_id: 85 | logger.warning("cluster_apply without user_id") 86 | return make_fail_response("cluster_apply without user_id") 87 | 88 | allow_multiple, condition = request_get(r, "allow_multiple"), {} 89 | 90 | consensus_plugin = request_get(r, "consensus_plugin") 91 | consensus_mode = request_get(r, "consensus_mode") 92 | cluster_size = int(request_get(r, "size") or -1) 93 | if consensus_plugin: 94 | if consensus_plugin not in CONSENSUS_PLUGINS: 95 | logger.warning("Invalid consensus_plugin") 96 | return make_fail_response("Invalid consensus_plugin") 97 | else: 98 | condition["consensus_plugin"] = consensus_plugin 99 | 100 | if consensus_mode: 101 | if consensus_mode not in CONSENSUS_MODES: 102 | logger.warning("Invalid consensus_mode") 103 | return make_fail_response("Invalid consensus_mode") 104 | else: 105 | condition["consensus_mode"] = consensus_mode 106 | 107 | if cluster_size >= 0: 108 | if cluster_size not in CLUSTER_SIZES: 109 | logger.warning("Invalid cluster_size") 110 | return make_fail_response("Invalid cluster_size") 111 | else: 112 | condition["size"] = cluster_size 113 | 114 | logger.debug("condition={}".format(condition)) 115 | c = cluster_handler.apply_cluster(user_id=user_id, condition=condition, 116 | allow_multiple=allow_multiple) 117 | if not c: 118 | logger.warning("cluster_apply failed") 119 | return make_fail_response("No available res for {}".format(user_id)) 120 | else: 121 | return make_ok_response(data=c) 122 | 123 | 124 | def cluster_release(r): 125 | """Release a cluster which should be in used status currently. 126 | 127 | :param r: 128 | :return: 129 | """ 130 | cluster_id = request_get(r, "cluster_id") 131 | if not cluster_id: 132 | logger.warning("No cluster_id is given") 133 | return make_fail_response("No cluster_id is given") 134 | if cluster_handler.release_cluster(cluster_id): 135 | return make_ok_response() 136 | 137 | return make_fail_response("cluster release failed") 138 | 139 | 140 | @front_rest_v2.route('/cluster_op', methods=['GET', 'POST']) 141 | @bp_cluster_api.route('/cluster_op', methods=['GET', 'POST']) 142 | def cluster_actions(): 143 | """Issue some operations on the cluster. 144 | Valid operations include: apply, release, start, stop, restart 145 | e.g., 146 | apply a cluster for user: GET /cluster_op?action=apply&user_id=xxx 147 | release a cluster: GET /cluster_op?action=release&cluster_id=xxx 148 | start a cluster: GET /cluster_op?action=start&cluster_id=xxx 149 | stop a cluster: GET /cluster_op?action=stop&cluster_id=xxx 150 | restart a cluster: GET /cluster_op?action=restart&cluster_id=xxx 151 | 152 | Return a json obj. 153 | """ 154 | request_debug(r, logger) 155 | action = request_get(r, "action") 156 | logger.info("cluster_op with action={}".format(action)) 157 | if action == "apply": 158 | return cluster_apply(r) 159 | elif action == "release": 160 | return cluster_release(r) 161 | elif action == "start": 162 | return cluster_start(r) 163 | elif action == "stop": 164 | return cluster_stop(r) 165 | elif action == "restart": 166 | return cluster_restart(r) 167 | else: 168 | return make_fail_response(error="Unknown action type") 169 | 170 | 171 | @bp_cluster_api.route('/cluster/', methods=['GET']) 172 | @front_rest_v2.route('/cluster/', methods=['GET']) 173 | def cluster_query(cluster_id): 174 | """Query a json obj of a cluster 175 | 176 | GET /cluster/xxxx 177 | 178 | Return a json obj of the cluster. 179 | """ 180 | request_debug(r, logger) 181 | result = cluster_handler.get_by_id(cluster_id) 182 | logger.info(result) 183 | if result: 184 | return make_ok_response(data=result) 185 | else: 186 | error_msg = "cluster not found with id=" + cluster_id 187 | logger.warning(error_msg) 188 | return make_fail_response(error=error_msg, data=r.form, 189 | code=CODE_NOT_FOUND) 190 | 191 | 192 | @bp_cluster_api.route('/cluster', methods=['POST']) 193 | def cluster_create(): 194 | """Create a cluster on a host 195 | 196 | POST /cluster 197 | { 198 | name: xxx, 199 | host_id: xxx, 200 | consensus_plugin: pbft, 201 | consensus_mode: batch, 202 | size: 4, 203 | } 204 | 205 | :return: response object 206 | """ 207 | logger.info("/cluster action=" + r.method) 208 | request_debug(r, logger) 209 | if not r.form["name"] or not r.form["host_id"] or not \ 210 | r.form["consensus_plugin"] or not r.form["size"]: 211 | error_msg = "cluster post without enough data" 212 | logger.warning(error_msg) 213 | return make_fail_response(error=error_msg, data=r.form) 214 | else: 215 | name, host_id, consensus_plugin, consensus_mode, size = \ 216 | r.form['name'], r.form['host_id'], r.form['consensus_plugin'],\ 217 | r.form['consensus_mode'] or '', int(r.form[ 218 | "size"]) 219 | if consensus_plugin not in CONSENSUS_PLUGINS: 220 | logger.debug("Unknown consensus_plugin={}".format( 221 | consensus_plugin)) 222 | return make_fail_response() 223 | if consensus_plugin != CONSENSUS_PLUGINS[0] and consensus_mode \ 224 | not in CONSENSUS_MODES: 225 | logger.debug("Invalid consensus, plugin={}, mode={}".format( 226 | consensus_plugin, consensus_mode)) 227 | return make_fail_response() 228 | 229 | if size not in CLUSTER_SIZES: 230 | logger.debug("Unknown cluster size={}".format(size)) 231 | return make_fail_response() 232 | if cluster_handler.create(name=name, host_id=host_id, 233 | consensus_plugin=consensus_plugin, 234 | consensus_mode=consensus_mode, 235 | size=size): 236 | logger.debug("cluster POST successfully") 237 | return make_ok_response(code=CODE_CREATED) 238 | else: 239 | logger.debug("cluster creation failed") 240 | return make_fail_response(error="Failed to create cluster {}". 241 | format(name)) 242 | 243 | 244 | @bp_cluster_api.route('/cluster', methods=['DELETE']) 245 | def cluster_delete(): 246 | """Delete a cluster 247 | 248 | DELETE /cluster 249 | { 250 | id: xxx 251 | col_name: active 252 | } 253 | 254 | :return: response obj 255 | """ 256 | logger.info("/cluster action=" + r.method) 257 | request_debug(r, logger) 258 | if not r.form["id"] or not r.form["col_name"]: 259 | error_msg = "cluster operation post without enough data" 260 | logger.warning(error_msg) 261 | return make_fail_response(error=error_msg, data=r.form) 262 | else: 263 | logger.debug("cluster delete with id={0}, col_name={1}".format( 264 | r.form["id"], r.form["col_name"])) 265 | if r.form["col_name"] == "active": 266 | result = cluster_handler.delete(id=r.form["id"]) 267 | else: 268 | result = cluster_handler.delete_released(id=r.form["id"]) 269 | if result: 270 | return make_ok_response() 271 | else: 272 | error_msg = "Failed to delete cluster {}".format(r.form["id"]) 273 | logger.warning(error_msg) 274 | return make_fail_response(error=error_msg) 275 | 276 | 277 | @bp_cluster_api.route('/clusters', methods=['GET', 'POST']) 278 | @front_rest_v2.route('/clusters', methods=['GET', 'POST']) 279 | def cluster_list(): 280 | """List clusters with the filter 281 | e.g., 282 | 283 | GET /clusters?consensus_plugin=pbft 284 | 285 | Return objs of the clusters. 286 | """ 287 | request_debug(r, logger) 288 | f = {} 289 | if r.method == 'GET': 290 | f.update(r.args.to_dict()) 291 | elif r.method == 'POST': 292 | f.update(request_json_body(r)) 293 | logger.info(f) 294 | result = cluster_handler.list(filter_data=f) 295 | logger.error(result) 296 | return make_ok_response(data=result) 297 | 298 | 299 | # will deprecate 300 | @front_rest_v2.route('/cluster_apply', methods=['GET', 'POST']) 301 | def cluster_apply_dep(): 302 | """ 303 | Return a Cluster json body. 304 | """ 305 | request_debug(r, logger) 306 | 307 | user_id = request_get(r, "user_id") 308 | if not user_id: 309 | error_msg = "cluster_apply without user_id" 310 | logger.warning(error_msg) 311 | return make_fail_response(error=error_msg) 312 | 313 | allow_multiple, condition = request_get(r, "allow_multiple"), {} 314 | 315 | consensus_plugin = request_get(r, "consensus_plugin") 316 | consensus_mode = request_get(r, "consensus_mode") 317 | cluster_size = int(request_get(r, "size") or -1) 318 | if consensus_plugin: 319 | if consensus_plugin not in CONSENSUS_PLUGINS: 320 | error_msg = "Invalid consensus_plugin" 321 | logger.warning(error_msg) 322 | return make_fail_response(error=error_msg) 323 | else: 324 | condition["consensus_plugin"] = consensus_plugin 325 | 326 | if consensus_mode: 327 | if consensus_mode not in CONSENSUS_MODES: 328 | error_msg = "Invalid consensus_mode" 329 | logger.warning(error_msg) 330 | return make_fail_response(error=error_msg) 331 | else: 332 | condition["consensus_mode"] = consensus_mode 333 | 334 | if cluster_size >= 0: 335 | if cluster_size not in CLUSTER_SIZES: 336 | error_msg = "Invalid cluster_size" 337 | logger.warning(error_msg) 338 | return make_fail_response(error=error_msg) 339 | else: 340 | condition["size"] = cluster_size 341 | 342 | logger.debug("condition={}".format(condition)) 343 | c = cluster_handler.apply_cluster(user_id=user_id, condition=condition, 344 | allow_multiple=allow_multiple) 345 | if not c: 346 | error_msg = "No available res for {}".format(user_id) 347 | logger.warning(error_msg) 348 | return make_fail_response(error=error_msg) 349 | else: 350 | return make_ok_response(data=c) 351 | 352 | 353 | # will deprecate 354 | @front_rest_v2.route('/cluster_release', methods=['GET', 'POST']) 355 | def cluster_release_dep(): 356 | """ 357 | Return status. 358 | """ 359 | request_debug(r, logger) 360 | user_id = request_get(r, "user_id") 361 | cluster_id = request_get(r, "cluster_id") 362 | if not user_id and not cluster_id: 363 | error_msg = "cluster_release without id" 364 | logger.warning(error_msg) 365 | return make_fail_response(error=error_msg, data=r.args) 366 | else: 367 | result = None 368 | if cluster_id: 369 | result = cluster_handler.release_cluster(cluster_id=cluster_id) 370 | elif user_id: 371 | result = cluster_handler.release_cluster_for_user(user_id=user_id) 372 | if not result: 373 | error_msg = "cluster_release failed user_id={} cluster_id={}". \ 374 | format(user_id, cluster_id) 375 | logger.warning(error_msg) 376 | data = { 377 | "user_id": user_id, 378 | "cluster_id": cluster_id, 379 | } 380 | return make_fail_response(error=error_msg, data=data) 381 | else: 382 | return make_ok_response() 383 | -------------------------------------------------------------------------------- /src/resources/cluster_view.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from flask import Blueprint, render_template 6 | from flask import request as r 7 | 8 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 9 | from common import log_handler, LOG_LEVEL, \ 10 | request_debug, \ 11 | CONSENSUS_PLUGINS, CONSENSUS_MODES, CLUSTER_SIZES 12 | from modules import cluster_handler, host_handler 13 | 14 | logger = logging.getLogger(__name__) 15 | logger.setLevel(LOG_LEVEL) 16 | logger.addHandler(log_handler) 17 | 18 | 19 | bp_cluster_view = Blueprint('bp_cluster_view', __name__, 20 | url_prefix='/{}'.format("view")) 21 | 22 | 23 | # Return a web page with cluster info 24 | @bp_cluster_view.route('/cluster/', methods=['GET']) 25 | def cluster_info_show(cluster_id): 26 | logger.debug("/ cluster_info/{}?released={} action={}".format( 27 | cluster_id, r.args.get('released', '0'), r.method)) 28 | released = (r.args.get('released', '0') != '0') 29 | if not released: 30 | return render_template("cluster_info.html", 31 | item=cluster_handler.get_by_id(cluster_id), 32 | consensus_plugins=CONSENSUS_PLUGINS) 33 | else: 34 | return render_template("cluster_info.html", 35 | item=cluster_handler.get_by_id( 36 | cluster_id, col_name="released"), 37 | consensus_plugins=CONSENSUS_PLUGINS) 38 | 39 | 40 | # Return a web page with clusters 41 | @bp_cluster_view.route('/clusters', methods=['GET']) 42 | def clusters_show(): 43 | request_debug(r, logger) 44 | show_type = r.args.get("type", "active") 45 | col_filter = dict((key, r.args.get(key)) for key in r.args if 46 | key != "col_name" and key != "page" and key != "type") 47 | if show_type != "released": 48 | col_name = r.args.get("col_name", "active") 49 | else: 50 | col_name = r.args.get("col_name", "released") 51 | 52 | if show_type == "inused": 53 | col_filter["user_id"] = {"$ne": ""} 54 | 55 | clusters = list(cluster_handler.list(filter_data=col_filter, 56 | col_name=col_name)) 57 | if show_type == "active": 58 | clusters.sort(key=lambda x: str(x["create_ts"]), reverse=True) 59 | elif show_type == "inused": 60 | clusters.sort(key=lambda x: str(x["apply_ts"]), reverse=True) 61 | else: 62 | clusters.sort(key=lambda x: str(x["release_ts"]), reverse=True) 63 | total_items = len(clusters) 64 | 65 | hosts = list(host_handler.list()) 66 | hosts_avail = list(filter(lambda e: e["status"] == "active" and len( 67 | e["clusters"]) < e["capacity"], hosts)) 68 | return render_template("clusters.html", type=show_type, col_name=col_name, 69 | items_count=total_items, items=clusters, 70 | hosts_available=hosts_avail, 71 | consensus_plugins=CONSENSUS_PLUGINS, 72 | consensus_modes=CONSENSUS_MODES, 73 | cluster_sizes=CLUSTER_SIZES) 74 | -------------------------------------------------------------------------------- /src/resources/host_api.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from flask import jsonify, Blueprint, render_template 6 | from flask import request as r 7 | 8 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 9 | from common import log_handler, LOG_LEVEL, \ 10 | make_ok_response, make_fail_response, \ 11 | CODE_CREATED, \ 12 | request_debug 13 | 14 | from modules import host_handler 15 | 16 | logger = logging.getLogger(__name__) 17 | logger.setLevel(LOG_LEVEL) 18 | logger.addHandler(log_handler) 19 | 20 | 21 | bp_host_api = Blueprint('bp_host_api', __name__, 22 | url_prefix='/{}'.format("api")) 23 | 24 | 25 | @bp_host_api.route('/host/', methods=['GET']) 26 | def host_query(host_id): 27 | request_debug(r, logger) 28 | result = host_handler.get_by_id(host_id) 29 | logger.debug(result) 30 | if result: 31 | return make_ok_response(data=result) 32 | else: 33 | error_msg = "host not found with id=" + host_id 34 | logger.warning(error_msg) 35 | return make_fail_response(error=error_msg, data=r.form) 36 | 37 | 38 | @bp_host_api.route('/host', methods=['POST']) 39 | def host_create(): 40 | request_debug(r, logger) 41 | name, daemon_url, capacity, log_type, log_server, log_level = \ 42 | r.form['name'], r.form['daemon_url'], r.form['capacity'], \ 43 | r.form['log_type'], r.form['log_server'], r.form['log_level'] 44 | 45 | if "autofill" in r.form and r.form["autofill"] == "on": 46 | autofill = "true" 47 | else: 48 | autofill = "false" 49 | 50 | if "schedulable" in r.form and r.form["schedulable"] == "on": 51 | schedulable = "true" 52 | else: 53 | schedulable = "false" 54 | 55 | logger.debug("name={}, daemon_url={}, capacity={}" 56 | "fillup={}, schedulable={}, log={}/{}". 57 | format(name, daemon_url, capacity, autofill, schedulable, 58 | log_type, log_server)) 59 | if not name or not daemon_url or not capacity or not log_type: 60 | error_msg = "host POST without enough data" 61 | logger.warning(error_msg) 62 | return make_fail_response(error=error_msg, data=r.form) 63 | else: 64 | result = host_handler.create(name=name, daemon_url=daemon_url, 65 | capacity=int(capacity), 66 | autofill=autofill, 67 | schedulable=schedulable, 68 | log_level=log_level, 69 | log_type=log_type, 70 | log_server=log_server) 71 | if result: 72 | logger.debug("host creation successfully") 73 | return make_ok_response(code=CODE_CREATED) 74 | else: 75 | error_msg = "Failed to create host {}".format(r.form["name"]) 76 | logger.warning(error_msg) 77 | return make_fail_response(error=error_msg) 78 | 79 | 80 | @bp_host_api.route('/host', methods=['PUT']) 81 | def host_update(): 82 | request_debug(r, logger) 83 | if "id" not in r.form: 84 | error_msg = "host PUT without enough data" 85 | logger.warning(error_msg) 86 | return make_fail_response(error=error_msg, 87 | data=r.form) 88 | else: 89 | id, d = r.form["id"], {} 90 | for k in r.form: 91 | if k != "id": 92 | d[k] = r.form.get(k) 93 | result = host_handler.update(id, d) 94 | if result: 95 | logger.debug("host PUT successfully") 96 | return make_ok_response() 97 | else: 98 | error_msg = "Failed to update host {}".format(result.get("name")) 99 | logger.warning(error_msg) 100 | return make_fail_response(error=error_msg) 101 | 102 | 103 | @bp_host_api.route('/host', methods=['PUT', 'DELETE']) 104 | def host_delete(): 105 | request_debug(r, logger) 106 | if "id" not in r.form or not r.form["id"]: 107 | error_msg = "host delete without enough data" 108 | logger.warning(error_msg) 109 | return make_fail_response(error=error_msg, data=r.form) 110 | else: 111 | logger.debug("host delete with id={0}".format(r.form["id"])) 112 | if host_handler.delete(id=r.form["id"]): 113 | return make_ok_response() 114 | else: 115 | error_msg = "Failed to delete host {}".format(r.form["id"]) 116 | logger.warning(error_msg) 117 | return make_fail_response(error=error_msg) 118 | 119 | 120 | @bp_host_api.route('/host_op', methods=['POST']) 121 | def host_actions(): 122 | logger.info("/host_op, method=" + r.method) 123 | request_debug(r, logger) 124 | 125 | host_id, action = r.form['id'], r.form['action'] 126 | if not host_id or not action: 127 | error_msg = "host POST without enough data" 128 | logger.warning(error_msg) 129 | return make_fail_response(error=error_msg, 130 | data=r.form) 131 | else: 132 | if action == "fillup": 133 | if host_handler.fillup(host_id): 134 | logger.debug("fillup successfully") 135 | return make_ok_response() 136 | else: 137 | error_msg = "Failed to fillup the host." 138 | logger.warning(error_msg) 139 | return make_fail_response(error=error_msg, data=r.form) 140 | elif action == "clean": 141 | if host_handler.clean(host_id): 142 | logger.debug("clean successfully") 143 | return make_ok_response() 144 | else: 145 | error_msg = "Failed to clean the host." 146 | logger.warning(error_msg) 147 | return make_fail_response(error=error_msg, data=r.form) 148 | elif action == "reset": 149 | if host_handler.reset(host_id): 150 | logger.debug("reset successfully") 151 | return make_ok_response() 152 | else: 153 | error_msg = "Failed to reset the host." 154 | logger.warning(error_msg) 155 | return make_fail_response(error=error_msg, data=r.form) 156 | 157 | error_msg = "unknown host action={}".format(action) 158 | logger.warning(error_msg) 159 | return make_fail_response(error=error_msg, data=r.form) 160 | -------------------------------------------------------------------------------- /src/resources/host_view.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from flask import Blueprint, render_template 6 | from flask import request as r 7 | 8 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 9 | from common import log_handler, LOG_LEVEL, \ 10 | HOST_TYPES, request_debug, \ 11 | CLUSTER_LOG_TYPES, CLUSTER_LOG_LEVEL 12 | from modules import host_handler 13 | 14 | logger = logging.getLogger(__name__) 15 | logger.setLevel(LOG_LEVEL) 16 | logger.addHandler(log_handler) 17 | 18 | 19 | bp_host_view = Blueprint('bp_host_view', __name__, 20 | url_prefix='/{}'.format("view")) 21 | 22 | 23 | @bp_host_view.route('/hosts', methods=['GET']) 24 | def hosts_show(): 25 | logger.info("/hosts method=" + r.method) 26 | request_debug(r, logger) 27 | col_filter = dict((key, r.args.get(key)) for key in r.args) 28 | items = list(host_handler.list(filter_data=col_filter)) 29 | items.sort(key=lambda x: str(x["name"]), reverse=True) 30 | logger.debug(items) 31 | 32 | return render_template("hosts.html", 33 | items_count=len(items), 34 | items=items, 35 | host_types=HOST_TYPES, 36 | log_types=CLUSTER_LOG_TYPES, 37 | log_levels=CLUSTER_LOG_LEVEL, 38 | ) 39 | 40 | 41 | @bp_host_view.route('/host/', methods=['GET']) 42 | def host_info(host_id): 43 | logger.debug("/ host_info/{0} method={1}".format(host_id, r.method)) 44 | return render_template("host_info.html", item=host_handler.get_by_id( 45 | host_id)) 46 | -------------------------------------------------------------------------------- /src/resources/index.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from flask import Blueprint, render_template 5 | from flask import request as r 6 | 7 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 8 | from common import log_handler, LOG_LEVEL, CONSENSUS_PLUGINS, \ 9 | CONSENSUS_MODES, HOST_TYPES, CLUSTER_SIZES, request_debug, \ 10 | CLUSTER_LOG_TYPES, CLUSTER_LOG_LEVEL 11 | from version import version, homepage, author 12 | 13 | logger = logging.getLogger(__name__) 14 | logger.setLevel(LOG_LEVEL) 15 | logger.addHandler(log_handler) 16 | 17 | from modules import cluster_handler, host_handler 18 | 19 | bp_index = Blueprint('bp_index', __name__) 20 | 21 | 22 | @bp_index.route('/', methods=['GET']) 23 | @bp_index.route('/index', methods=['GET']) 24 | def show(): 25 | request_debug(r, logger) 26 | hosts = list(host_handler.list(filter_data={})) 27 | hosts.sort(key=lambda x: x["name"], reverse=False) 28 | hosts_active = list(filter(lambda e: e["status"] == "active", hosts)) 29 | hosts_inactive = list(filter(lambda e: e["status"] != "active", hosts)) 30 | hosts_free = list(filter( 31 | lambda e: len(e["clusters"]) < e["capacity"], hosts_active)) 32 | hosts_available = hosts_free 33 | clusters_active = len(list(cluster_handler.list(col_name="active"))) 34 | clusters_released = len(list(cluster_handler.list(col_name="released"))) 35 | clusters_free = len(list(cluster_handler.list(filter_data={"user_id": ""}, 36 | col_name="active"))) 37 | clusters_inuse = clusters_active - clusters_free 38 | 39 | clusters_temp = len(list(cluster_handler.list(filter_data={ 40 | "user_id": "/^__/"}, col_name="active"))) 41 | 42 | return render_template("index.html", hosts=hosts, 43 | hosts_free=hosts_free, 44 | hosts_active=hosts_active, 45 | hosts_inactive=hosts_inactive, 46 | hosts_available=hosts_available, 47 | clusters_active=clusters_active, 48 | clusters_released=clusters_released, 49 | clusters_free=clusters_free, 50 | clusters_inuse=clusters_inuse, 51 | clusters_temp=clusters_temp, 52 | cluster_sizes=CLUSTER_SIZES, 53 | consensus_plugins=CONSENSUS_PLUGINS, 54 | consensus_modes=CONSENSUS_MODES, 55 | host_types=HOST_TYPES, 56 | log_types=CLUSTER_LOG_TYPES, 57 | log_levels=CLUSTER_LOG_LEVEL, 58 | ) 59 | 60 | 61 | @bp_index.route('/about', methods=['GET']) 62 | def about(): 63 | logger.info("path={}, method={}".format(r.path, r.method)) 64 | return render_template("about.html", author=author, version=version, 65 | homepage=homepage) 66 | -------------------------------------------------------------------------------- /src/resources/stat.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) 5 | 6 | from flask import Blueprint, jsonify, render_template 7 | from flask import request as r 8 | from common import log_handler, LOG_LEVEL, CODE_OK, request_debug 9 | from version import version 10 | from modules import host_handler, stat_handler 11 | 12 | logger = logging.getLogger(__name__) 13 | logger.setLevel(LOG_LEVEL) 14 | logger.addHandler(log_handler) 15 | 16 | 17 | bp_stat_api = Blueprint('bp_stat_api', __name__, url_prefix='/api') 18 | 19 | 20 | @bp_stat_api.route('/health', methods=['GET']) 21 | def health(): 22 | request_debug(r, logger) 23 | result = { 24 | 'health': 'OK', 25 | 'version': version 26 | } 27 | 28 | return jsonify(result), CODE_OK 29 | 30 | 31 | @bp_stat_api.route('/stat', methods=['GET']) 32 | def get(): 33 | request_debug(r, logger) 34 | res = r.args.get('res') 35 | if res == 'host': 36 | result = stat_handler.hosts() 37 | elif res == 'cluster': 38 | result = stat_handler.clusters() 39 | else: 40 | result = { 41 | 'example': '/api/stat?res=host' 42 | } 43 | 44 | logger.debug(result) 45 | return jsonify(result), CODE_OK 46 | 47 | 48 | bp_stat_view = Blueprint('bp_stat_view', __name__, url_prefix='/view') 49 | 50 | 51 | @bp_stat_view.route('/stat', methods=['GET']) 52 | def show(): 53 | logger.info("path={}, method={}".format(r.path, r.method)) 54 | hosts = list(host_handler.list()) 55 | 56 | return render_template("stat.html", hosts=hosts) 57 | -------------------------------------------------------------------------------- /src/restserver.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Flask 3 | 4 | from common import log_handler, LOG_LEVEL 5 | from resources import front_rest_v2 6 | 7 | app = Flask(__name__, static_folder='static', template_folder='templates') 8 | 9 | app.config.from_object('config.DevelopmentConfig') 10 | app.config.from_envvar('CELLO_CONFIG_FILE', silent=True) 11 | 12 | app.logger.addHandler(log_handler) 13 | app.logger.setLevel(LOG_LEVEL) 14 | 15 | 16 | # app.register_blueprint(front_rest_v1) 17 | app.register_blueprint(front_rest_v2) 18 | 19 | if __name__ == '__main__': 20 | app.run( 21 | host='0.0.0.0', 22 | port=80, 23 | debug=os.environ.get('DEBUG', app.config.get("DEBUG", True)), 24 | threaded=True 25 | ) 26 | -------------------------------------------------------------------------------- /src/static/css/bootstrap-table.min.css: -------------------------------------------------------------------------------- 1 | .fixed-table-container .bs-checkbox,.fixed-table-container .no-records-found{text-align:center}.fixed-table-body thead th .th-inner,.table td,.table th{box-sizing:border-box}.bootstrap-table .table{margin-bottom:0!important;border-bottom:1px solid #ddd;border-collapse:collapse!important;border-radius:1px}.bootstrap-table .table:not(.table-condensed),.bootstrap-table .table:not(.table-condensed)>tbody>tr>td,.bootstrap-table .table:not(.table-condensed)>tbody>tr>th,.bootstrap-table .table:not(.table-condensed)>tfoot>tr>td,.bootstrap-table .table:not(.table-condensed)>tfoot>tr>th,.bootstrap-table .table:not(.table-condensed)>thead>tr>td{padding:8px}.bootstrap-table .table.table-no-bordered>tbody>tr>td,.bootstrap-table .table.table-no-bordered>thead>tr>th{border-right:2px solid transparent}.fixed-table-container{position:relative;clear:both;border:1px solid #ddd;border-radius:4px;-webkit-border-radius:4px;-moz-border-radius:4px}.fixed-table-container.table-no-bordered{border:1px solid transparent}.fixed-table-footer,.fixed-table-header{overflow:hidden}.fixed-table-footer{border-top:1px solid #ddd}.fixed-table-body{overflow-x:auto;overflow-y:auto;height:100%}.fixed-table-container table{width:100%}.fixed-table-container thead th{height:0;padding:0;margin:0;border-left:1px solid #ddd}.fixed-table-container thead th:focus{outline:transparent solid 0}.fixed-table-container thead th:first-child{border-left:none;border-top-left-radius:4px;-webkit-border-top-left-radius:4px;-moz-border-radius-topleft:4px}.fixed-table-container tbody td .th-inner,.fixed-table-container thead th .th-inner{padding:8px;line-height:24px;vertical-align:top;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.fixed-table-container thead th .sortable{cursor:pointer;background-position:right;background-repeat:no-repeat;padding-right:30px}.fixed-table-container thead th .both{background-image:url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABMAAAATCAQAAADYWf5HAAAAkElEQVQoz7X QMQ5AQBCF4dWQSJxC5wwax1Cq1e7BAdxD5SL+Tq/QCM1oNiJidwox0355mXnG/DrEtIQ6azioNZQxI0ykPhTQIwhCR+BmBYtlK7kLJYwWCcJA9M4qdrZrd8pPjZWPtOqdRQy320YSV17OatFC4euts6z39GYMKRPCTKY9UnPQ6P+GtMRfGtPnBCiqhAeJPmkqAAAAAElFTkSuQmCC')}.fixed-table-container thead th .asc{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABMAAAATCAYAAAByUDbMAAAAZ0lEQVQ4y2NgGLKgquEuFxBPAGI2ahhWCsS/gDibUoO0gPgxEP8H4ttArEyuQYxAPBdqEAxPBImTY5gjEL9DM+wTENuQahAvEO9DMwiGdwAxOymGJQLxTyD+jgWDxCMZRsEoGAVoAADeemwtPcZI2wAAAABJRU5ErkJggg==)}.fixed-table-container thead th .desc{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABMAAAATCAYAAAByUDbMAAAAZUlEQVQ4y2NgGAWjYBSggaqGu5FA/BOIv2PBIPFEUgxjB+IdQPwfC94HxLykus4GiD+hGfQOiB3J8SojEE9EM2wuSJzcsFMG4ttQgx4DsRalkZENxL+AuJQaMcsGxBOAmGvopk8AVz1sLZgg0bsAAAAASUVORK5CYII=)}.fixed-table-container th.detail{width:30px}.fixed-table-container tbody td{border-left:1px solid #ddd}.fixed-table-container tbody tr:first-child td{border-top:none}.fixed-table-container tbody td:first-child{border-left:none}.fixed-table-container tbody .selected td{background-color:#f5f5f5}.fixed-table-container .bs-checkbox .th-inner{padding:8px 0}.fixed-table-container input[type=radio],.fixed-table-container input[type=checkbox]{margin:0 auto!important}.fixed-table-pagination .pagination-detail,.fixed-table-pagination div.pagination{margin-top:10px;margin-bottom:10px}.fixed-table-pagination div.pagination .pagination{margin:0}.fixed-table-pagination .pagination a{padding:6px 12px;line-height:1.428571429}.fixed-table-pagination .pagination-info{line-height:34px;margin-right:5px}.fixed-table-pagination .btn-group{position:relative;display:inline-block;vertical-align:middle}.fixed-table-pagination .dropup .dropdown-menu{margin-bottom:0}.fixed-table-pagination .page-list{display:inline-block}.fixed-table-toolbar .columns-left{margin-right:5px}.fixed-table-toolbar .columns-right{margin-left:5px}.fixed-table-toolbar .columns label{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.428571429}.fixed-table-toolbar .bars,.fixed-table-toolbar .columns,.fixed-table-toolbar .search{position:relative;margin-top:10px;margin-bottom:10px;line-height:34px}.fixed-table-pagination li.disabled a{pointer-events:none;cursor:default}.fixed-table-loading{display:none;position:absolute;top:42px;right:0;bottom:0;left:0;z-index:99;background-color:#fff;text-align:center}.fixed-table-body .card-view .title{font-weight:700;display:inline-block;min-width:30%;text-align:left!important}.table td,.table th{vertical-align:middle}.fixed-table-toolbar .dropdown-menu{text-align:left;max-height:300px;overflow:auto}.fixed-table-toolbar .btn-group>.btn-group{display:inline-block;margin-left:-1px!important}.fixed-table-toolbar .btn-group>.btn-group>.btn{border-radius:0}.fixed-table-toolbar .btn-group>.btn-group:first-child>.btn{border-top-left-radius:4px;border-bottom-left-radius:4px}.fixed-table-toolbar .btn-group>.btn-group:last-child>.btn{border-top-right-radius:4px;border-bottom-right-radius:4px}.bootstrap-table .table>thead>tr>th{vertical-align:bottom;border-bottom:1px solid #ddd}.bootstrap-table .table thead>tr>th{padding:0;margin:0}.bootstrap-table .fixed-table-footer tbody>tr>td{padding:0!important}.bootstrap-table .fixed-table-footer .table{border-bottom:none;border-radius:0;padding:0!important}.pull-right .dropdown-menu{right:0;left:auto}p.fixed-table-scroll-inner{width:100%;height:200px}div.fixed-table-scroll-outer{top:0;left:0;visibility:hidden;width:200px;height:150px;overflow:hidden} -------------------------------------------------------------------------------- /src/static/css/dashboard.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Base structure 3 | */ 4 | 5 | /* Move down content because we have a fixed navbar that is 50px tall */ 6 | body { 7 | padding-top: 50px; 8 | } 9 | 10 | 11 | /* 12 | * Global add-ons 13 | */ 14 | 15 | .sub-header { 16 | padding-bottom: 10px; 17 | border-bottom: 1px solid #eee; 18 | } 19 | 20 | /* 21 | * Top navigation 22 | * Hide default border to remove 1px line. 23 | */ 24 | .navbar-fixed-top { 25 | border: 0; 26 | } 27 | 28 | /* 29 | * Sidebar 30 | */ 31 | 32 | /* Hide for mobile, show later */ 33 | .sidebar { 34 | display: none; 35 | } 36 | @media (min-width: 768px) { 37 | .sidebar { 38 | position: fixed; 39 | top: 51px; 40 | bottom: 0; 41 | left: 0; 42 | z-index: 1000; 43 | display: block; 44 | padding: 20px; 45 | overflow-x: hidden; 46 | overflow-y: auto; /* Scrollable contents if viewport is shorter than content. */ 47 | background-color: #f5f5f5; 48 | border-right: 1px solid #eee; 49 | } 50 | } 51 | 52 | /* Sidebar navigation */ 53 | .nav-sidebar { 54 | margin-right: -21px; /* 20px padding + 1px border */ 55 | margin-bottom: 20px; 56 | margin-left: -20px; 57 | } 58 | .nav-sidebar > li > a { 59 | padding-right: 20px; 60 | padding-left: 20px; 61 | } 62 | .nav-sidebar > .active > a, 63 | .nav-sidebar > .active > a:hover, 64 | .nav-sidebar > .active > a:focus { 65 | color: #fff; 66 | background-color: #428bca; 67 | } 68 | 69 | 70 | /* 71 | * Main content 72 | */ 73 | 74 | .main { 75 | padding: 20px; 76 | } 77 | @media (min-width: 768px) { 78 | .main { 79 | padding-right: 40px; 80 | padding-left: 40px; 81 | } 82 | } 83 | .main .page-header { 84 | margin-top: 0; 85 | } 86 | 87 | 88 | /* 89 | * Placeholder dashboard ideas 90 | */ 91 | 92 | .placeholders { 93 | margin-bottom: 30px; 94 | text-align: center; 95 | } 96 | .placeholders h4 { 97 | margin-bottom: 0; 98 | } 99 | .placeholder { 100 | margin-bottom: 20px; 101 | } 102 | .placeholder img { 103 | display: inline-block; 104 | border-radius: 50%; 105 | } 106 | -------------------------------------------------------------------------------- /src/static/css/dataTables.bootstrap.min.css: -------------------------------------------------------------------------------- 1 | table.dataTable{clear:both;margin-top:6px !important;margin-bottom:6px !important;max-width:none !important;border-collapse:separate !important}table.dataTable td,table.dataTable th{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}table.dataTable td.dataTables_empty,table.dataTable th.dataTables_empty{text-align:center}table.dataTable.nowrap th,table.dataTable.nowrap td{white-space:nowrap}div.dataTables_wrapper div.dataTables_length label{font-weight:normal;text-align:left;white-space:nowrap}div.dataTables_wrapper div.dataTables_length select{width:75px;display:inline-block}div.dataTables_wrapper div.dataTables_filter{text-align:right}div.dataTables_wrapper div.dataTables_filter label{font-weight:normal;white-space:nowrap;text-align:left}div.dataTables_wrapper div.dataTables_filter input{margin-left:0.5em;display:inline-block;width:auto}div.dataTables_wrapper div.dataTables_info{padding-top:8px;white-space:nowrap}div.dataTables_wrapper div.dataTables_paginate{margin:0;white-space:nowrap;text-align:right}div.dataTables_wrapper div.dataTables_paginate ul.pagination{margin:2px 0;white-space:nowrap}div.dataTables_wrapper div.dataTables_processing{position:absolute;top:50%;left:50%;width:200px;margin-left:-100px;margin-top:-26px;text-align:center;padding:1em 0}table.dataTable thead>tr>th.sorting_asc,table.dataTable thead>tr>th.sorting_desc,table.dataTable thead>tr>th.sorting,table.dataTable thead>tr>td.sorting_asc,table.dataTable thead>tr>td.sorting_desc,table.dataTable thead>tr>td.sorting{padding-right:30px}table.dataTable thead>tr>th:active,table.dataTable thead>tr>td:active{outline:none}table.dataTable thead .sorting,table.dataTable thead .sorting_asc,table.dataTable thead .sorting_desc,table.dataTable thead .sorting_asc_disabled,table.dataTable thead .sorting_desc_disabled{cursor:pointer;position:relative}table.dataTable thead .sorting:after,table.dataTable thead .sorting_asc:after,table.dataTable thead .sorting_desc:after,table.dataTable thead .sorting_asc_disabled:after,table.dataTable thead .sorting_desc_disabled:after{position:absolute;bottom:8px;right:8px;display:block;font-family:'Glyphicons Halflings';opacity:0.5}table.dataTable thead .sorting:after{opacity:0.2;content:"\e150"}table.dataTable thead .sorting_asc:after{content:"\e155"}table.dataTable thead .sorting_desc:after{content:"\e156"}table.dataTable thead .sorting_asc_disabled:after,table.dataTable thead .sorting_desc_disabled:after{color:#eee}div.dataTables_scrollHead table.dataTable{margin-bottom:0 !important}div.dataTables_scrollBody table{border-top:none;margin-top:0 !important;margin-bottom:0 !important}div.dataTables_scrollBody table thead .sorting:after,div.dataTables_scrollBody table thead .sorting_asc:after,div.dataTables_scrollBody table thead .sorting_desc:after{display:none}div.dataTables_scrollBody table tbody tr:first-child th,div.dataTables_scrollBody table tbody tr:first-child td{border-top:none}div.dataTables_scrollFoot table{margin-top:0 !important;border-top:none}@media screen and (max-width: 767px){div.dataTables_wrapper div.dataTables_length,div.dataTables_wrapper div.dataTables_filter,div.dataTables_wrapper div.dataTables_info,div.dataTables_wrapper div.dataTables_paginate{text-align:center}}table.dataTable.table-condensed>thead>tr>th{padding-right:20px}table.dataTable.table-condensed .sorting:after,table.dataTable.table-condensed .sorting_asc:after,table.dataTable.table-condensed .sorting_desc:after{top:6px;right:6px}table.table-bordered.dataTable th,table.table-bordered.dataTable td{border-left-width:0}table.table-bordered.dataTable th:last-child,table.table-bordered.dataTable th:last-child,table.table-bordered.dataTable td:last-child,table.table-bordered.dataTable td:last-child{border-right-width:0}table.table-bordered.dataTable tbody th,table.table-bordered.dataTable tbody td{border-bottom-width:0}div.dataTables_scrollHead table.table-bordered{border-bottom-width:0}div.table-responsive>div.dataTables_wrapper>div.row{margin:0}div.table-responsive>div.dataTables_wrapper>div.row>div[class^="col-"]:first-child{padding-left:0}div.table-responsive>div.dataTables_wrapper>div.row>div[class^="col-"]:last-child{padding-right:0} 2 | -------------------------------------------------------------------------------- /src/static/css/jquery.dataTables.min.css: -------------------------------------------------------------------------------- 1 | table.dataTable{width:100%;margin:0 auto;clear:both;border-collapse:separate;border-spacing:0}table.dataTable thead th,table.dataTable tfoot th{font-weight:bold}table.dataTable thead th,table.dataTable thead td{padding:10px 18px;border-bottom:1px solid #111}table.dataTable thead th:active,table.dataTable thead td:active{outline:none}table.dataTable tfoot th,table.dataTable tfoot td{padding:10px 18px 6px 18px;border-top:1px solid #111}table.dataTable thead .sorting,table.dataTable thead .sorting_asc,table.dataTable thead .sorting_desc{cursor:pointer;*cursor:hand}table.dataTable thead .sorting,table.dataTable thead .sorting_asc,table.dataTable thead .sorting_desc,table.dataTable thead .sorting_asc_disabled,table.dataTable thead .sorting_desc_disabled{background-repeat:no-repeat;background-position:center right}table.dataTable thead .sorting{background-image:url("../images/sort_both.png")}table.dataTable thead .sorting_asc{background-image:url("../images/sort_asc.png")}table.dataTable thead .sorting_desc{background-image:url("../images/sort_desc.png")}table.dataTable thead .sorting_asc_disabled{background-image:url("../images/sort_asc_disabled.png")}table.dataTable thead .sorting_desc_disabled{background-image:url("../images/sort_desc_disabled.png")}table.dataTable tbody tr{background-color:#ffffff}table.dataTable tbody tr.selected{background-color:#B0BED9}table.dataTable tbody th,table.dataTable tbody td{padding:8px 10px}table.dataTable.row-border tbody th,table.dataTable.row-border tbody td,table.dataTable.display tbody th,table.dataTable.display tbody td{border-top:1px solid #ddd}table.dataTable.row-border tbody tr:first-child th,table.dataTable.row-border tbody tr:first-child td,table.dataTable.display tbody tr:first-child th,table.dataTable.display tbody tr:first-child td{border-top:none}table.dataTable.cell-border tbody th,table.dataTable.cell-border tbody td{border-top:1px solid #ddd;border-right:1px solid #ddd}table.dataTable.cell-border tbody tr th:first-child,table.dataTable.cell-border tbody tr td:first-child{border-left:1px solid #ddd}table.dataTable.cell-border tbody tr:first-child th,table.dataTable.cell-border tbody tr:first-child td{border-top:none}table.dataTable.stripe tbody tr.odd,table.dataTable.display tbody tr.odd{background-color:#f9f9f9}table.dataTable.stripe tbody tr.odd.selected,table.dataTable.display tbody tr.odd.selected{background-color:#acbad4}table.dataTable.hover tbody tr:hover,table.dataTable.display tbody tr:hover{background-color:#f6f6f6}table.dataTable.hover tbody tr:hover.selected,table.dataTable.display tbody tr:hover.selected{background-color:#aab7d1}table.dataTable.order-column tbody tr>.sorting_1,table.dataTable.order-column tbody tr>.sorting_2,table.dataTable.order-column tbody tr>.sorting_3,table.dataTable.display tbody tr>.sorting_1,table.dataTable.display tbody tr>.sorting_2,table.dataTable.display tbody tr>.sorting_3{background-color:#fafafa}table.dataTable.order-column tbody tr.selected>.sorting_1,table.dataTable.order-column tbody tr.selected>.sorting_2,table.dataTable.order-column tbody tr.selected>.sorting_3,table.dataTable.display tbody tr.selected>.sorting_1,table.dataTable.display tbody tr.selected>.sorting_2,table.dataTable.display tbody tr.selected>.sorting_3{background-color:#acbad5}table.dataTable.display tbody tr.odd>.sorting_1,table.dataTable.order-column.stripe tbody tr.odd>.sorting_1{background-color:#f1f1f1}table.dataTable.display tbody tr.odd>.sorting_2,table.dataTable.order-column.stripe tbody tr.odd>.sorting_2{background-color:#f3f3f3}table.dataTable.display tbody tr.odd>.sorting_3,table.dataTable.order-column.stripe tbody tr.odd>.sorting_3{background-color:whitesmoke}table.dataTable.display tbody tr.odd.selected>.sorting_1,table.dataTable.order-column.stripe tbody tr.odd.selected>.sorting_1{background-color:#a6b4cd}table.dataTable.display tbody tr.odd.selected>.sorting_2,table.dataTable.order-column.stripe tbody tr.odd.selected>.sorting_2{background-color:#a8b5cf}table.dataTable.display tbody tr.odd.selected>.sorting_3,table.dataTable.order-column.stripe tbody tr.odd.selected>.sorting_3{background-color:#a9b7d1}table.dataTable.display tbody tr.even>.sorting_1,table.dataTable.order-column.stripe tbody tr.even>.sorting_1{background-color:#fafafa}table.dataTable.display tbody tr.even>.sorting_2,table.dataTable.order-column.stripe tbody tr.even>.sorting_2{background-color:#fcfcfc}table.dataTable.display tbody tr.even>.sorting_3,table.dataTable.order-column.stripe tbody tr.even>.sorting_3{background-color:#fefefe}table.dataTable.display tbody tr.even.selected>.sorting_1,table.dataTable.order-column.stripe tbody tr.even.selected>.sorting_1{background-color:#acbad5}table.dataTable.display tbody tr.even.selected>.sorting_2,table.dataTable.order-column.stripe tbody tr.even.selected>.sorting_2{background-color:#aebcd6}table.dataTable.display tbody tr.even.selected>.sorting_3,table.dataTable.order-column.stripe tbody tr.even.selected>.sorting_3{background-color:#afbdd8}table.dataTable.display tbody tr:hover>.sorting_1,table.dataTable.order-column.hover tbody tr:hover>.sorting_1{background-color:#eaeaea}table.dataTable.display tbody tr:hover>.sorting_2,table.dataTable.order-column.hover tbody tr:hover>.sorting_2{background-color:#ececec}table.dataTable.display tbody tr:hover>.sorting_3,table.dataTable.order-column.hover tbody tr:hover>.sorting_3{background-color:#efefef}table.dataTable.display tbody tr:hover.selected>.sorting_1,table.dataTable.order-column.hover tbody tr:hover.selected>.sorting_1{background-color:#a2aec7}table.dataTable.display tbody tr:hover.selected>.sorting_2,table.dataTable.order-column.hover tbody tr:hover.selected>.sorting_2{background-color:#a3b0c9}table.dataTable.display tbody tr:hover.selected>.sorting_3,table.dataTable.order-column.hover tbody tr:hover.selected>.sorting_3{background-color:#a5b2cb}table.dataTable.no-footer{border-bottom:1px solid #111}table.dataTable.nowrap th,table.dataTable.nowrap td{white-space:nowrap}table.dataTable.compact thead th,table.dataTable.compact thead td{padding:4px 17px 4px 4px}table.dataTable.compact tfoot th,table.dataTable.compact tfoot td{padding:4px}table.dataTable.compact tbody th,table.dataTable.compact tbody td{padding:4px}table.dataTable th.dt-left,table.dataTable td.dt-left{text-align:left}table.dataTable th.dt-center,table.dataTable td.dt-center,table.dataTable td.dataTables_empty{text-align:center}table.dataTable th.dt-right,table.dataTable td.dt-right{text-align:right}table.dataTable th.dt-justify,table.dataTable td.dt-justify{text-align:justify}table.dataTable th.dt-nowrap,table.dataTable td.dt-nowrap{white-space:nowrap}table.dataTable thead th.dt-head-left,table.dataTable thead td.dt-head-left,table.dataTable tfoot th.dt-head-left,table.dataTable tfoot td.dt-head-left{text-align:left}table.dataTable thead th.dt-head-center,table.dataTable thead td.dt-head-center,table.dataTable tfoot th.dt-head-center,table.dataTable tfoot td.dt-head-center{text-align:center}table.dataTable thead th.dt-head-right,table.dataTable thead td.dt-head-right,table.dataTable tfoot th.dt-head-right,table.dataTable tfoot td.dt-head-right{text-align:right}table.dataTable thead th.dt-head-justify,table.dataTable thead td.dt-head-justify,table.dataTable tfoot th.dt-head-justify,table.dataTable tfoot td.dt-head-justify{text-align:justify}table.dataTable thead th.dt-head-nowrap,table.dataTable thead td.dt-head-nowrap,table.dataTable tfoot th.dt-head-nowrap,table.dataTable tfoot td.dt-head-nowrap{white-space:nowrap}table.dataTable tbody th.dt-body-left,table.dataTable tbody td.dt-body-left{text-align:left}table.dataTable tbody th.dt-body-center,table.dataTable tbody td.dt-body-center{text-align:center}table.dataTable tbody th.dt-body-right,table.dataTable tbody td.dt-body-right{text-align:right}table.dataTable tbody th.dt-body-justify,table.dataTable tbody td.dt-body-justify{text-align:justify}table.dataTable tbody th.dt-body-nowrap,table.dataTable tbody td.dt-body-nowrap{white-space:nowrap}table.dataTable,table.dataTable th,table.dataTable td{-webkit-box-sizing:content-box;box-sizing:content-box}.dataTables_wrapper{position:relative;clear:both;*zoom:1;zoom:1}.dataTables_wrapper .dataTables_length{float:left}.dataTables_wrapper .dataTables_filter{float:right;text-align:right}.dataTables_wrapper .dataTables_filter input{margin-left:0.5em}.dataTables_wrapper .dataTables_info{clear:both;float:left;padding-top:0.755em}.dataTables_wrapper .dataTables_paginate{float:right;text-align:right;padding-top:0.25em}.dataTables_wrapper .dataTables_paginate .paginate_button{box-sizing:border-box;display:inline-block;min-width:1.5em;padding:0.5em 1em;margin-left:2px;text-align:center;text-decoration:none !important;cursor:pointer;*cursor:hand;color:#333 !important;border:1px solid transparent;border-radius:2px}.dataTables_wrapper .dataTables_paginate .paginate_button.current,.dataTables_wrapper .dataTables_paginate .paginate_button.current:hover{color:#333 !important;border:1px solid #979797;background-color:white;background:-webkit-gradient(linear, left top, left bottom, color-stop(0%, #fff), color-stop(100%, #dcdcdc));background:-webkit-linear-gradient(top, #fff 0%, #dcdcdc 100%);background:-moz-linear-gradient(top, #fff 0%, #dcdcdc 100%);background:-ms-linear-gradient(top, #fff 0%, #dcdcdc 100%);background:-o-linear-gradient(top, #fff 0%, #dcdcdc 100%);background:linear-gradient(to bottom, #fff 0%, #dcdcdc 100%)}.dataTables_wrapper .dataTables_paginate .paginate_button.disabled,.dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover,.dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active{cursor:default;color:#666 !important;border:1px solid transparent;background:transparent;box-shadow:none}.dataTables_wrapper .dataTables_paginate .paginate_button:hover{color:white !important;border:1px solid #111;background-color:#585858;background:-webkit-gradient(linear, left top, left bottom, color-stop(0%, #585858), color-stop(100%, #111));background:-webkit-linear-gradient(top, #585858 0%, #111 100%);background:-moz-linear-gradient(top, #585858 0%, #111 100%);background:-ms-linear-gradient(top, #585858 0%, #111 100%);background:-o-linear-gradient(top, #585858 0%, #111 100%);background:linear-gradient(to bottom, #585858 0%, #111 100%)}.dataTables_wrapper .dataTables_paginate .paginate_button:active{outline:none;background-color:#2b2b2b;background:-webkit-gradient(linear, left top, left bottom, color-stop(0%, #2b2b2b), color-stop(100%, #0c0c0c));background:-webkit-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);background:-moz-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);background:-ms-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);background:-o-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);background:linear-gradient(to bottom, #2b2b2b 0%, #0c0c0c 100%);box-shadow:inset 0 0 3px #111}.dataTables_wrapper .dataTables_paginate .ellipsis{padding:0 1em}.dataTables_wrapper .dataTables_processing{position:absolute;top:50%;left:50%;width:100%;height:40px;margin-left:-50%;margin-top:-25px;padding-top:20px;text-align:center;font-size:1.2em;background-color:white;background:-webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255,255,255,0)), color-stop(25%, rgba(255,255,255,0.9)), color-stop(75%, rgba(255,255,255,0.9)), color-stop(100%, rgba(255,255,255,0)));background:-webkit-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%);background:-moz-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%);background:-ms-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%);background:-o-linear-gradient(left, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%);background:linear-gradient(to right, rgba(255,255,255,0) 0%, rgba(255,255,255,0.9) 25%, rgba(255,255,255,0.9) 75%, rgba(255,255,255,0) 100%)}.dataTables_wrapper .dataTables_length,.dataTables_wrapper .dataTables_filter,.dataTables_wrapper .dataTables_info,.dataTables_wrapper .dataTables_processing,.dataTables_wrapper .dataTables_paginate{color:#333}.dataTables_wrapper .dataTables_scroll{clear:both}.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody{*margin-top:-1px;-webkit-overflow-scrolling:touch}.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th,.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td{vertical-align:middle}.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody th>div.dataTables_sizing,.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody td>div.dataTables_sizing{height:0;overflow:hidden;margin:0 !important;padding:0 !important}.dataTables_wrapper.no-footer .dataTables_scrollBody{border-bottom:1px solid #111}.dataTables_wrapper.no-footer div.dataTables_scrollHead table,.dataTables_wrapper.no-footer div.dataTables_scrollBody table{border-bottom:none}.dataTables_wrapper:after{visibility:hidden;display:block;content:"";clear:both;height:0}@media screen and (max-width: 767px){.dataTables_wrapper .dataTables_info,.dataTables_wrapper .dataTables_paginate{float:none;text-align:center}.dataTables_wrapper .dataTables_paginate{margin-top:0.5em}}@media screen and (max-width: 640px){.dataTables_wrapper .dataTables_length,.dataTables_wrapper .dataTables_filter{float:none;text-align:center}.dataTables_wrapper .dataTables_filter{margin-top:0.5em}} 2 | -------------------------------------------------------------------------------- /src/static/css/paginate.css: -------------------------------------------------------------------------------- 1 | .pagination-page-info { 2 | padding: .6em; 3 | padding-left: 0; 4 | width: 40em; 5 | margin: .5em; 6 | margin-left: 0; 7 | font-size: 12px; 8 | } 9 | .pagination-page-info b { 10 | color: black; 11 | background: #6aa6ed; 12 | padding-left: 2px; 13 | padding: .1em .25em; 14 | font-size: 150%; 15 | } -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/src/static/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/src/static/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/src/static/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /src/static/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/src/static/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /src/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yeasy/cello-archived/aa86854b88973004476d0f87efec6c6ba3f60fb1/src/static/img/favicon.ico -------------------------------------------------------------------------------- /src/static/js/bootbox.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * bootbox.js v4.2.0 3 | * 4 | * http://bootboxjs.com/license.txt 5 | */ 6 | !function(a,b){"use strict";"function"==typeof define&&define.amd?define(["jquery"],b):"object"==typeof exports?module.exports=b(require("jquery")):a.bootbox=b(a.jQuery)}(this,function a(b,c){"use strict";function d(a){var b=q[o.locale];return b?b[a]:q.en[a]}function e(a,c,d){a.stopPropagation(),a.preventDefault();var e=b.isFunction(d)&&d(a)===!1;e||c.modal("hide")}function f(a){var b,c=0;for(b in a)c++;return c}function g(a,c){var d=0;b.each(a,function(a,b){c(a,b,d++)})}function h(a){var c,d;if("object"!=typeof a)throw new Error("Please supply an object of options");if(!a.message)throw new Error("Please specify a message");return a=b.extend({},o,a),a.buttons||(a.buttons={}),a.backdrop=a.backdrop?"static":!1,c=a.buttons,d=f(c),g(c,function(a,e,f){if(b.isFunction(e)&&(e=c[a]={callback:e}),"object"!==b.type(e))throw new Error("button with key "+a+" must be an object");e.label||(e.label=a),e.className||(e.className=2>=d&&f===d-1?"btn-primary":"btn-default")}),a}function i(a,b){var c=a.length,d={};if(1>c||c>2)throw new Error("Invalid argument length");return 2===c||"string"==typeof a[0]?(d[b[0]]=a[0],d[b[1]]=a[1]):d=a[0],d}function j(a,c,d){return b.extend(!0,{},a,i(c,d))}function k(a,b,c,d){var e={className:"bootbox-"+a,buttons:l.apply(null,b)};return m(j(e,d,c),b)}function l(){for(var a={},b=0,c=arguments.length;c>b;b++){var e=arguments[b],f=e.toLowerCase(),g=e.toUpperCase();a[f]={label:d(g)}}return a}function m(a,b){var d={};return g(b,function(a,b){d[b]=!0}),g(a.buttons,function(a){if(d[a]===c)throw new Error("button key "+a+" is not allowed (options are "+b.join("\n")+")")}),a}var n={dialog:"",header:"",footer:"",closeButton:"",form:"
",inputs:{text:"",textarea:"",email:"",select:"",checkbox:"
",date:"",time:"",number:"",password:""}},o={locale:"en",backdrop:!0,animate:!0,className:null,closeButton:!0,show:!0,container:"body"},p={};p.alert=function(){var a;if(a=k("alert",["ok"],["message","callback"],arguments),a.callback&&!b.isFunction(a.callback))throw new Error("alert requires callback property to be a function when provided");return a.buttons.ok.callback=a.onEscape=function(){return b.isFunction(a.callback)?a.callback():!0},p.dialog(a)},p.confirm=function(){var a;if(a=k("confirm",["cancel","confirm"],["message","callback"],arguments),a.buttons.cancel.callback=a.onEscape=function(){return a.callback(!1)},a.buttons.confirm.callback=function(){return a.callback(!0)},!b.isFunction(a.callback))throw new Error("confirm requires a callback");return p.dialog(a)},p.prompt=function(){var a,d,e,f,h,i,k;f=b(n.form),d={className:"bootbox-prompt",buttons:l("cancel","confirm"),value:"",inputType:"text"},a=m(j(d,arguments,["title","callback"]),["cancel","confirm"]),i=a.show===c?!0:a.show;var o=["date","time","number"],q=document.createElement("input");if(q.setAttribute("type",a.inputType),o[a.inputType]&&(a.inputType=q.type),a.message=f,a.buttons.cancel.callback=a.onEscape=function(){return a.callback(null)},a.buttons.confirm.callback=function(){var c;switch(a.inputType){case"text":case"textarea":case"email":case"select":case"date":case"time":case"number":case"password":c=h.val();break;case"checkbox":var d=h.find("input:checked");c=[],g(d,function(a,d){c.push(b(d).val())})}return a.callback(c)},a.show=!1,!a.title)throw new Error("prompt requires a title");if(!b.isFunction(a.callback))throw new Error("prompt requires a callback");if(!n.inputs[a.inputType])throw new Error("invalid prompt type");switch(h=b(n.inputs[a.inputType]),a.inputType){case"text":case"textarea":case"email":case"date":case"time":case"number":case"password":h.val(a.value);break;case"select":var r={};if(k=a.inputOptions||[],!k.length)throw new Error("prompt with select requires options");g(k,function(a,d){var e=h;if(d.value===c||d.text===c)throw new Error("given options in wrong format");d.group&&(r[d.group]||(r[d.group]=b("").attr("label",d.group)),e=r[d.group]),e.append("")}),g(r,function(a,b){h.append(b)}),h.val(a.value);break;case"checkbox":var s=b.isArray(a.value)?a.value:[a.value];if(k=a.inputOptions||[],!k.length)throw new Error("prompt with checkbox requires options");if(!k[0].value||!k[0].text)throw new Error("given options in wrong format");h=b("
"),g(k,function(c,d){var e=b(n.inputs[a.inputType]);e.find("input").attr("value",d.value),e.find("label").append(d.text),g(s,function(a,b){b===d.value&&e.find("input").prop("checked",!0)}),h.append(e)})}return a.placeholder&&h.attr("placeholder",a.placeholder),a.pattern&&h.attr("pattern",a.pattern),f.append(h),f.on("submit",function(a){a.preventDefault(),e.find(".btn-primary").click()}),e=p.dialog(a),e.off("shown.bs.modal"),e.on("shown.bs.modal",function(){h.focus()}),i===!0&&e.modal("show"),e},p.dialog=function(a){a=h(a);var c=b(n.dialog),d=c.find(".modal-body"),f=a.buttons,i="",j={onEscape:a.onEscape};if(g(f,function(a,b){i+="",j[a]=b.callback}),d.find(".bootbox-body").html(a.message),a.animate===!0&&c.addClass("fade"),a.className&&c.addClass(a.className),a.title&&d.before(n.header),a.closeButton){var k=b(n.closeButton);a.title?c.find(".modal-header").prepend(k):k.css("margin-top","-10px").prependTo(d)}return a.title&&c.find(".modal-title").html(a.title),i.length&&(d.after(n.footer),c.find(".modal-footer").html(i)),c.on("hidden.bs.modal",function(a){a.target===this&&c.remove()}),c.on("shown.bs.modal",function(){c.find(".btn-primary:first").focus()}),c.on("escape.close.bb",function(a){j.onEscape&&e(a,c,j.onEscape)}),c.on("click",".modal-footer button",function(a){var d=b(this).data("bb-handler");e(a,c,j[d])}),c.on("click",".bootbox-close-button",function(a){e(a,c,j.onEscape)}),c.on("keyup",function(a){27===a.which&&c.trigger("escape.close.bb")}),b(a.container).append(c),c.modal({backdrop:a.backdrop,keyboard:!1,show:!1}),a.show&&c.modal("show"),c},p.setDefaults=function(){var a={};2===arguments.length?a[arguments[0]]=arguments[1]:a=arguments[0],b.extend(o,a)},p.hideAll=function(){b(".bootbox").modal("hide")};var q={br:{OK:"OK",CANCEL:"Cancelar",CONFIRM:"Sim"},da:{OK:"OK",CANCEL:"Annuller",CONFIRM:"Accepter"},de:{OK:"OK",CANCEL:"Abbrechen",CONFIRM:"Akzeptieren"},en:{OK:"OK",CANCEL:"Cancel",CONFIRM:"OK"},es:{OK:"OK",CANCEL:"Cancelar",CONFIRM:"Aceptar"},fi:{OK:"OK",CANCEL:"Peruuta",CONFIRM:"OK"},fr:{OK:"OK",CANCEL:"Annuler",CONFIRM:"D'accord"},he:{OK:"אישור",CANCEL:"ביטול",CONFIRM:"אישור"},it:{OK:"OK",CANCEL:"Annulla",CONFIRM:"Conferma"},lt:{OK:"Gerai",CANCEL:"Atšaukti",CONFIRM:"Patvirtinti"},lv:{OK:"Labi",CANCEL:"Atcelt",CONFIRM:"Apstiprināt"},nl:{OK:"OK",CANCEL:"Annuleren",CONFIRM:"Accepteren"},no:{OK:"OK",CANCEL:"Avbryt",CONFIRM:"OK"},pl:{OK:"OK",CANCEL:"Anuluj",CONFIRM:"Potwierdź"},ru:{OK:"OK",CANCEL:"Отмена",CONFIRM:"Применить"},sv:{OK:"OK",CANCEL:"Avbryt",CONFIRM:"OK"},tr:{OK:"Tamam",CANCEL:"İptal",CONFIRM:"Onayla"},zh_CN:{OK:"OK",CANCEL:"取消",CONFIRM:"确认"},zh_TW:{OK:"OK",CANCEL:"取消",CONFIRM:"確認"}};return p.init=function(c){return a(c||b)},p}); -------------------------------------------------------------------------------- /src/static/js/bootstrap-notify.min.js: -------------------------------------------------------------------------------- 1 | /* Project: Bootstrap Growl = v3.1.3 | Description: Turns standard Bootstrap alerts into "Growl-like" notifications. | Author: Mouse0270 aka Robert McIntosh | License: MIT License | Website: https://github.com/mouse0270/bootstrap-growl */ 2 | !function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t("object"==typeof exports?require("jquery"):jQuery)}(function(t){function e(e,i,n){var i={content:{message:"object"==typeof i?i.message:i,title:i.title?i.title:"",icon:i.icon?i.icon:"",url:i.url?i.url:"#",target:i.target?i.target:"-"}};n=t.extend(!0,{},i,n),this.settings=t.extend(!0,{},s,n),this._defaults=s,"-"==this.settings.content.target&&(this.settings.content.target=this.settings.url_target),this.animations={start:"webkitAnimationStart oanimationstart MSAnimationStart animationstart",end:"webkitAnimationEnd oanimationend MSAnimationEnd animationend"},"number"==typeof this.settings.offset&&(this.settings.offset={x:this.settings.offset,y:this.settings.offset}),this.init()}var s={element:"body",position:null,type:"info",allow_dismiss:!0,newest_on_top:!1,showProgressbar:!1,placement:{from:"top",align:"right"},offset:20,spacing:10,z_index:1031,delay:5e3,timer:1e3,url_target:"_blank",mouse_over:null,animate:{enter:"animated fadeInDown",exit:"animated fadeOutUp"},onShow:null,onShown:null,onClose:null,onClosed:null,icon_type:"class",template:''};String.format=function(){for(var t=arguments[0],e=1;e .progress-bar').removeClass("progress-bar-"+t.settings.type),t.settings.type=i[e],this.$ele.addClass("alert-"+i[e]).find('[data-notify="progressbar"] > .progress-bar').addClass("progress-bar-"+i[e]);break;case"icon":var n=this.$ele.find('[data-notify="icon"]');"class"==t.settings.icon_type.toLowerCase()?n.removeClass(t.settings.content.icon).addClass(i[e]):(n.is("img")||n.find("img"),n.attr("src",i[e]));break;case"progress":var a=t.settings.delay-t.settings.delay*(i[e]/100);this.$ele.data("notify-delay",a),this.$ele.find('[data-notify="progressbar"] > div').attr("aria-valuenow",i[e]).css("width",i[e]+"%");break;case"url":this.$ele.find('[data-notify="url"]').attr("href",i[e]);break;case"target":this.$ele.find('[data-notify="url"]').attr("target",i[e]);break;default:this.$ele.find('[data-notify="'+e+'"]').html(i[e])}var o=this.$ele.outerHeight()+parseInt(t.settings.spacing)+parseInt(t.settings.offset.y);t.reposition(o)},close:function(){t.close()}}},buildNotify:function(){var e=this.settings.content;this.$ele=t(String.format(this.settings.template,this.settings.type,e.title,e.message,e.url,e.target)),this.$ele.attr("data-notify-position",this.settings.placement.from+"-"+this.settings.placement.align),this.settings.allow_dismiss||this.$ele.find('[data-notify="dismiss"]').css("display","none"),(this.settings.delay<=0&&!this.settings.showProgressbar||!this.settings.showProgressbar)&&this.$ele.find('[data-notify="progressbar"]').remove()},setIcon:function(){"class"==this.settings.icon_type.toLowerCase()?this.$ele.find('[data-notify="icon"]').addClass(this.settings.content.icon):this.$ele.find('[data-notify="icon"]').is("img")?this.$ele.find('[data-notify="icon"]').attr("src",this.settings.content.icon):this.$ele.find('[data-notify="icon"]').append('Notify Icon')},styleURL:function(){this.$ele.find('[data-notify="url"]').css({backgroundImage:"url(data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7)",height:"100%",left:"0px",position:"absolute",top:"0px",width:"100%",zIndex:this.settings.z_index+1}),this.$ele.find('[data-notify="dismiss"]').css({position:"absolute",right:"10px",top:"5px",zIndex:this.settings.z_index+2})},placement:function(){var e=this,s=this.settings.offset.y,i={display:"inline-block",margin:"0px auto",position:this.settings.position?this.settings.position:"body"===this.settings.element?"fixed":"absolute",transition:"all .5s ease-in-out",zIndex:this.settings.z_index},n=!1,a=this.settings;switch(t('[data-notify-position="'+this.settings.placement.from+"-"+this.settings.placement.align+'"]:not([data-closing="true"])').each(function(){return s=Math.max(s,parseInt(t(this).css(a.placement.from))+parseInt(t(this).outerHeight())+parseInt(a.spacing))}),1==this.settings.newest_on_top&&(s=this.settings.offset.y),i[this.settings.placement.from]=s+"px",this.settings.placement.align){case"left":case"right":i[this.settings.placement.align]=this.settings.offset.x+"px";break;case"center":i.left=0,i.right=0}this.$ele.css(i).addClass(this.settings.animate.enter),t.each(Array("webkit","moz","o","ms",""),function(t,s){e.$ele[0].style[s+"AnimationIterationCount"]=1}),t(this.settings.element).append(this.$ele),1==this.settings.newest_on_top&&(s=parseInt(s)+parseInt(this.settings.spacing)+this.$ele.outerHeight(),this.reposition(s)),t.isFunction(e.settings.onShow)&&e.settings.onShow.call(this.$ele),this.$ele.one(this.animations.start,function(){n=!0}).one(this.animations.end,function(){t.isFunction(e.settings.onShown)&&e.settings.onShown.call(this)}),setTimeout(function(){n||t.isFunction(e.settings.onShown)&&e.settings.onShown.call(this)},600)},bind:function(){var e=this;if(this.$ele.find('[data-notify="dismiss"]').on("click",function(){e.close()}),this.$ele.mouseover(function(){t(this).data("data-hover","true")}).mouseout(function(){t(this).data("data-hover","false")}),this.$ele.data("data-hover","false"),this.settings.delay>0){e.$ele.data("notify-delay",e.settings.delay);var s=setInterval(function(){var t=parseInt(e.$ele.data("notify-delay"))-e.settings.timer;if("false"===e.$ele.data("data-hover")&&"pause"==e.settings.mouse_over||"pause"!=e.settings.mouse_over){var i=(e.settings.delay-t)/e.settings.delay*100;e.$ele.data("notify-delay",t),e.$ele.find('[data-notify="progressbar"] > div').attr("aria-valuenow",i).css("width",i+"%")}t<=-e.settings.timer&&(clearInterval(s),e.close())},e.settings.timer)}},close:function(){var e=this,s=parseInt(this.$ele.css(this.settings.placement.from)),i=!1;this.$ele.data("closing","true").addClass(this.settings.animate.exit),e.reposition(s),t.isFunction(e.settings.onClose)&&e.settings.onClose.call(this.$ele),this.$ele.one(this.animations.start,function(){i=!0}).one(this.animations.end,function(){t(this).remove(),t.isFunction(e.settings.onClosed)&&e.settings.onClosed.call(this)}),setTimeout(function(){i||(e.$ele.remove(),e.settings.onClosed&&e.settings.onClosed(e.$ele))},600)},reposition:function(e){var s=this,i='[data-notify-position="'+this.settings.placement.from+"-"+this.settings.placement.align+'"]:not([data-closing="true"])',n=this.$ele.nextAll(i);1==this.settings.newest_on_top&&(n=this.$ele.prevAll(i)),n.each(function(){t(this).css(s.settings.placement.from,e),e=parseInt(e)+parseInt(s.settings.spacing)+t(this).outerHeight()})}}),t.notify=function(t,s){var i=new e(this,t,s);return i.notify},t.notifyDefaults=function(e){return s=t.extend(!0,{},s,e)},t.notifyClose=function(e){"undefined"==typeof e||"all"==e?t("[data-notify]").find('[data-notify="dismiss"]').trigger("click"):t('[data-notify-position="'+e+'"]').find('[data-notify="dismiss"]').trigger("click")}}); -------------------------------------------------------------------------------- /src/static/js/bootstrap-table-zh-CN.min.js: -------------------------------------------------------------------------------- 1 | /* 2 | * bootstrap-table - v1.10.1 - 2016-02-17 3 | * https://github.com/wenzhixin/bootstrap-table 4 | * Copyright (c) 2016 zhixin wen 5 | * Licensed MIT License 6 | */ 7 | !function(a){"use strict";a.fn.bootstrapTable.locales["zh-CN"]={formatLoadingMessage:function(){return"正在努力地加载数据中,请稍候……"},formatRecordsPerPage:function(a){return"每页显示 "+a+" 条记录"},formatShowingRows:function(a,b,c){return"显示第 "+a+" 到第 "+b+" 条记录,总共 "+c+" 条记录"},formatSearch:function(){return"搜索"},formatNoMatches:function(){return"没有找到匹配的记录"},formatPaginationSwitch:function(){return"隐藏/显示分页"},formatRefresh:function(){return"刷新"},formatToggle:function(){return"切换"},formatColumns:function(){return"列"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["zh-CN"])}(jQuery); -------------------------------------------------------------------------------- /src/static/js/dataTables.bootstrap.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | DataTables Bootstrap 3 integration 3 | ©2011-2015 SpryMedia Ltd - datatables.net/license 4 | */ 5 | (function(b){"function"===typeof define&&define.amd?define(["jquery","datatables.net"],function(a){return b(a,window,document)}):"object"===typeof exports?module.exports=function(a,d){a||(a=window);if(!d||!d.fn.dataTable)d=require("datatables.net")(a,d).$;return b(d,a,a.document)}:b(jQuery,window,document)})(function(b,a,d){var f=b.fn.dataTable;b.extend(!0,f.defaults,{dom:"<'row'<'col-sm-6'l><'col-sm-6'f>><'row'<'col-sm-12'tr>><'row'<'col-sm-5'i><'col-sm-7'p>>",renderer:"bootstrap"});b.extend(f.ext.classes, 6 | {sWrapper:"dataTables_wrapper form-inline dt-bootstrap",sFilterInput:"form-control input-sm",sLengthSelect:"form-control input-sm",sProcessing:"dataTables_processing panel panel-default"});f.ext.renderer.pageButton.bootstrap=function(a,h,r,m,j,n){var o=new f.Api(a),s=a.oClasses,k=a.oLanguage.oPaginate,t=a.oLanguage.oAria.paginate||{},e,g,p=0,q=function(d,f){var l,h,i,c,m=function(a){a.preventDefault();!b(a.currentTarget).hasClass("disabled")&&o.page()!=a.data.action&&o.page(a.data.action).draw("page")}; 7 | l=0;for(h=f.length;l",{"class":s.sPageButton+" "+g,id:0===r&&"string"===typeof c?a.sTableId+"_"+c:null}).append(b("",{href:"#", 8 | "aria-controls":a.sTableId,"aria-label":t[c],"data-dt-idx":p,tabindex:a.iTabIndex}).html(e)).appendTo(d),a.oApi._fnBindAction(i,{action:c},m),p++)}},i;try{i=b(h).find(d.activeElement).data("dt-idx")}catch(u){}q(b(h).empty().html('