├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── cell_service.go ├── collector_module.go ├── daemon.go ├── go.mod ├── go.sum ├── src ├── service │ ├── dhcp_service.go │ ├── go.mod │ ├── go.sum │ ├── guest_initiator.go │ ├── instance_manager.go │ ├── instance_utility.go │ ├── io_scheduler.go │ ├── network_manager.go │ ├── network_manager_test.go │ ├── network_utility.go │ ├── service_interface.go │ ├── storage_manager.go │ ├── storage_manager_test.go │ └── storage_utility.go └── task │ ├── add_security_rule.go │ ├── attach_instance.go │ ├── change_security_policy_action.go │ ├── change_security_rule_order.go │ ├── change_storage_path.go │ ├── create_disk_image.go │ ├── create_instance.go │ ├── create_snapshot.go │ ├── delete_instance.go │ ├── delete_snapshot.go │ ├── detach_instance.go │ ├── eject_media.go │ ├── get_auth.go │ ├── get_cell_info.go │ ├── get_instance_config.go │ ├── get_instance_status.go │ ├── get_security_policy.go │ ├── get_snapshot.go │ ├── go.mod │ ├── go.sum │ ├── handle_address_pool_changed.go │ ├── handle_compute_cell_removed.go │ ├── handle_compute_pool_ready.go │ ├── insert_media.go │ ├── modify_auth.go │ ├── modify_auto_start.go │ ├── modify_core.go │ ├── modify_cpu_priority.go │ ├── modify_guest_name.go │ ├── modify_memory.go │ ├── modify_security_rule.go │ ├── query_snapshot.go │ ├── query_storage_paths.go │ ├── remove_security_rule.go │ ├── reset_guest_system.go │ ├── reset_monitor_secret.go │ ├── resize_volume.go │ ├── restore_snapshot.go │ ├── set_disk_threshold.go │ ├── set_network_threshold.go │ ├── shrink_volume.go │ ├── start_instance.go │ └── stop_instance.go └── transaction_manager.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | config 15 | log 16 | pkg 17 | cell 18 | test 19 | .idea 20 | .vscode -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | ## [1.4.1] - 2024-07-16 4 | 5 | ### 修正 6 | 7 | - qemu-img参数错误导致新建云主机时构建磁盘卷失败 8 | 9 | ### Fixed 10 | 11 | - Resize volume fail with wrong qemu-img parameters when creating new instance 12 | 13 | ## [1.4.0] - 2023-10-13 14 | 15 | ### 新增 16 | 17 | - StorageManager增加测试用例 18 | - 配置文件"data/instance.data"新增选项max_guest设置当前节点允许承载的最大云主机数量,默认值100 19 | - 配置文件"domain.cfg"新增选项timeout,设置操作超时时间,单位为秒,默认值10 20 | 21 | ### 变更 22 | 23 | - 云主机存在快照时,拒绝创建磁盘镜像 24 | - 允许删除根快照和活动快照 25 | - 启动时同步网络资源 26 | - 启动时校验存储卷 27 | - 磁盘卷锁定时拒绝启动云主机 28 | 29 | ### 修正 30 | 31 | - 删除快照失败导致模块崩溃 32 | - 缺少参数导致缩小卷失败 33 | 34 | ### Added 35 | 36 | - Add test cases for storage manager 37 | - Add option 'max_guest' to configure file "data/instance.data", default value is 100 38 | - Add option 'timeout' to configure file "domain.cfg", default value is 10 seconds 39 | 40 | ### Changed 41 | 42 | - Refuse to create disk image when snapshots available 43 | - Allow to delete root and active snapshot 44 | - Sync network resources when startup 45 | - Validate storage volumes when startup 46 | - Add custom cpu model for Windows 2016 compatible and better performance 47 | - Refuse to start instance when volumes locked 48 | 49 | ### Fixed 50 | 51 | - crash when delete snapshot failed 52 | - shrink volume fail when parameter omitted 53 | 54 | ## [1.3.1] - 2021-02-19 55 | 56 | ### Added 57 | 58 | - Set auto start 59 | 60 | ## [1.3.0] - 2020-10-29 61 | 62 | ### Added 63 | 64 | - Allocate instance address using Cloud-Init 65 | - Manage security policy of instance 66 | - Create instances with security policy 67 | 68 | ### Changed 69 | 70 | - Return an explicit error when resize disk fail after creating a new image 71 | - Return an explicit error when resize volume fail 72 | - Optimize output of image size in IO Scheduler 73 | 74 | ## [1.2.0] - 2020-04-11 75 | 76 | ### Added 77 | 78 | - Query/Change storage path 79 | - Create guest using template, use vga as the default video driver for better resolution 80 | - Reset monitor secret 81 | 82 | ### Changed 83 | 84 | - Collect disk space via storage paths 85 | 86 | ### Fixed 87 | 88 | - Huge/Wrong available memory/disk number when no qga available in guest 89 | 90 | ## [1.1.1] - 2020-01-01 91 | 92 | ### Changed 93 | 94 | - Reset system before initialization change from error to a warning 95 | - Reduce log for DHCP warning 96 | - Network detect interval change to two minutes after established some IP 97 | - Add CreateTime/MAC address to instance 98 | 99 | ## [1.1.0] - 2019-11-07 100 | 101 | ### Added 102 | 103 | - Add go mod 104 | 105 | ### Changed 106 | 107 | - Call core API via prefix '/api/v1/' 108 | - Change "/media_image_files/:id" to "/media_images/:id/file/" 109 | - Change "/disk_image_files/:id" to "/disk_images/:id/file/" 110 | 111 | ## [1.0.0] - 2019-7-14 112 | 113 | ### Added 114 | 115 | - Set threshold of CPU/Disk IO/Network 116 | 117 | ### Changed 118 | 119 | - Generate module name base on br0 120 | 121 | - Move to "github.com/project-nano" 122 | 123 | ## [0.8.3] - 2019-4-22 124 | 125 | ### Fixed 126 | 127 | - Read interface fail due to script code in ifcfg 128 | 129 | ## [0.8.2] - 2019-04-04 130 | 131 | ### Fixed 132 | 133 | - Enable Cloud-Init after resetting system image 134 | 135 | ## [0.8.1] - 2019-02-15 136 | 137 | ### Added 138 | 139 | - Rename guest 140 | 141 | ### Changed 142 | 143 | - Migrate bridge configure from interface 144 | 145 | - Adapt to new runnable implement 146 | 147 | ## [0.7.1] - 2018-11-27 148 | 149 | ### Added 150 | 151 | - "legacy" option of system version 152 | 153 | - Reset guest system 154 | 155 | - Sync storage option when compute pool available 156 | 157 | ## [0.6.1] - 2018-11-27 158 | 159 | ### Added 160 | 161 | - Support assigned network address of instance 162 | 163 | - Enable distributed DHCP service for MAC bound 164 | 165 | - Configure template for different OS version 166 | 167 | - Optimize mouse position using tablet input 168 | 169 | - Disable DHCP service in default network when startup 170 | 171 | ## [0.5.1] - 2018-10-31 172 | 173 | ### Added 174 | 175 | - Attach/Detach instances 176 | 177 | - Add 'qcow2' suffix to volume/snapshot files 178 | 179 | - Set hostname when using CloudInit module 180 | 181 | - Check the default route when startup 182 | 183 | ### Changed 184 | 185 | - Randomize allocation of the monitor port 186 | 187 | - Determine system/admin name of the guest by version when creating an instance 188 | 189 | ## [0.4.2] - 2018-10-11 190 | 191 | ### Added 192 | 193 | - Synchronize allocated network ports to instance configures 194 | 195 | ### Fixed 196 | 197 | - Compatible with local storage config file of the previous version 198 | 199 | ## [0.4.1] - 2018-9-30 200 | 201 | ### Added 202 | 203 | - Support NFS storage pool 204 | 205 | - Report share storage(NFS) mount status 206 | 207 | - Support volume/snapshot save on shared storage 208 | 209 | - Create instance metadata when using shared storage 210 | 211 | - Automount shared storage when cell start or added to compute pool 212 | 213 | ### Fixed 214 | 215 | - Snapshot files left when delete volumes 216 | 217 | - Try recover stub service when stop module 218 | 219 | ## [0.3.1] - 2018-8-29 220 | 221 | ### Added 222 | 223 | - Snapshot management: create/delete/restore/get/query 224 | 225 | - Storage create time of guest 226 | 227 | - Inject/eject media in running instance 228 | 229 | - Lock volumes when running disk operates 230 | 231 | ### Fixed 232 | 233 | - Get instance status sync status without notify core 234 | 235 | ## [0.2.3] - 2018-8-14 236 | 237 | ### Added 238 | 239 | - Support initialize guest after created using Cloud-Init in NoCloudMode 240 | 241 | - Enable guest system version/modules configure 242 | 243 | - Enable change admin password/create new admin/auto resize&mount disk when ci module enabled(cloud-init cloud-utils required in guest) 244 | 245 | ### Modified 246 | 247 | - Add listen port TCP: {cellIP}:25469 for Cloud-Init initiator 248 | 249 | ## [0.2.2] - 2018-8-6 250 | 251 | ### Modified 252 | 253 | - Enable KVM instead of TCG of QEMU, boost performance when VT-x/AMD-v enabled 254 | 255 | - Using the IDE system disk if the system of an instance is "windows". 256 | 257 | - Don't save NetworkAddress of Instance 258 | 259 | - Avoid response channel block after timeout event invoked 260 | 261 | - Fixed: panic when try to notify the Resize/Shrink tasks 262 | 263 | ## [0.2.1] - 2018-7-29 264 | 265 | ### Added 266 | 267 | - Modify Cores/Memory/Disk Size 268 | 269 | - Shrink guest volume 270 | 271 | - Set/Get user password 272 | 273 | - Add "system" property in guest 274 | 275 | ## [0.1.4] - 2018-7-24 276 | 277 | ### Modified 278 | 279 | - Resize guest disk when clone finished 280 | 281 | - Compute instance CPU usage properly 282 | 283 | - Get IP address for started instance 284 | 285 | - Notify Core module when instance ip detected 286 | 287 | - Fixed: Internal instance address not send to Core module 288 | 289 | ## [0.1.3] - 2018-7-19 290 | 291 | ### modified 292 | 293 | - fix instance MAC not properly generated 294 | 295 | 296 | ## [0.1.2] - 2018-7-18 297 | 298 | ### modified 299 | 300 | - add version output on the console 301 | 302 | - add qemu-agent channel in guest 303 | 304 | - fix instance memory usage monitor 305 | 306 | - try to reconnect when core disconnected 307 | 308 | - gracefully disconnect when module stop 309 | 310 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 project-nano 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nano Cell 2 | 3 | [[版本历史/ChangeLog](CHANGELOG.md)] 4 | 5 | [English Version](#introduce) 6 | 7 | ### 简介 8 | 9 | Cell模块是Nano集群的资源节点,用于组成虚拟化资源池,在本地创建、管理和释放虚拟机实例。 10 | 11 | 由于涉及网络配置,建议使用专用Installer进行部署,项目最新版本请访问[此地址](https://github.com/project-nano/releases) 12 | 13 | [项目官网](https://nanos.cloud/) 14 | 15 | [项目全部源代码](https://github.com/project-nano) 16 | 17 | ### 编译 18 | 19 | 环境要求 20 | 21 | - CentOS 7 x86 22 | - Golang 1.20 23 | 24 | ``` 25 | 准备依赖的framework 26 | $git clone https://github.com/project-nano/framework.git 27 | 28 | 准备编译源代码 29 | $git clone https://github.com/project-nano/cell.git 30 | 31 | 编译 32 | $cd cell 33 | $go build 34 | ``` 35 | 36 | 编译成功在当前目录生成二进制文件cell 37 | 38 | ### 使用 39 | 40 | 环境要求 41 | 42 | - CentOS 7 x86 43 | 44 | ``` 45 | 执行以下指令,启动Cell模块 46 | $./cell start 47 | 48 | 也可以使用绝对地址调用或者写入开机启动脚本,比如 49 | $/opt/nano/cell/cell start 50 | 51 | ``` 52 | 53 | 模块运行日志输出在log/cell.log文件中,用于查错和调试 54 | 55 | **由于Cell节点依赖Core模块进行自动网络识别,所以集群工作时必须最先启动Core模块,再启动Cell节点。** 56 | 57 | 此外,除了模块启动功能,Cell还支持以下命令参数启动 58 | 59 | | 命令名 | 说明 | 60 | | ------ | ---------------------------------- | 61 | | start | 启动服务 | 62 | | stop | 停止服务 | 63 | | status | 检查当前服务状态 | 64 | | halt | 强行中止服务(用于服务异常时重启) | 65 | 66 | 67 | 68 | ### 配置 69 | 70 | Cell模块配置信息存放在config路径文件中,修改后需要重启模块生效 71 | 72 | #### 域通讯配置 73 | 74 | 文件config/domain.cfg管理Cell模块的域通讯信息,域参数必须与Core模块一致才能成功识别 75 | 76 | | 参数 | 值类型 | 默认值 | 必填 | 说明 | 77 | | ----------------- | ------ | ----------- | ---- | ---------------------------- | 78 | | **domain** | 字符串 | nano | 是 | 通讯域名称,用于节点间识别 | 79 | | **group_address** | 字符串 | 224.0.0.226 | 是 | 通讯域组播地址,用于服务发现 | 80 | | **group_port** | 整数 | 5599 | 是 | 通讯域组播端口,用于服务发现 | 81 | | **timeout** | 整数 | 10 | | 交易处理超时时间,单位:秒 | 82 | 83 | 示例配置文件如下 84 | 85 | ```json 86 | { 87 | "domain": "nano", 88 | "group_address": "224.0.0.226", 89 | "group_port": 5599 90 | } 91 | ``` 92 | 93 | 94 | 95 | ### 目录结构 96 | 97 | 模块主要目录和文件如下 98 | 99 | | 目录/文件 | 说明 | 100 | | --------- | -------------------- | 101 | | cell | 模块二进制执行文件 | 102 | | config/ | 配置文件存储目录 | 103 | | data/ | 模块运行数据存储目录 | 104 | | log/ | 运行日志存储目录 | 105 | 106 | # README 107 | 108 | ### Introduce 109 | 110 | Cell is the resource node of Nano cluster, forming a virtualized resource pool. It creates, manages, and releases local virtual machine instances. 111 | 112 | It is recommended to use a dedicated Installer for deployment. For the latest project version, please visit [this address](https://github.com/project-nano/releases). 113 | 114 | [Official Project Website](https://us.nanos.cloud/en/) 115 | 116 | [Full Source Code of the Project](https://github.com/project-nano) 117 | 118 | ### Compilation 119 | 120 | Environment requirements 121 | 122 | - CentOS 7 x86 123 | - Golang 1.20 124 | 125 | ```bash 126 | Prepare the framework dependencies 127 | $git clone https://github.com/project-nano/framework.git 128 | 129 | Prepare the source code for compilation 130 | $git clone https://github.com/project-nano/cell.git 131 | 132 | Compile 133 | $cd cell 134 | $go build 135 | 136 | ``` 137 | 138 | The binary file "cell" will be generated in the current directory when success 139 | 140 | ### Usage 141 | 142 | Environment 143 | 144 | - CentOS 7 x86 145 | 146 | ```bash 147 | start module 148 | $./cell start 149 | 150 | Alternatively, you can use an absolute address or write it into a startup script, such as: 151 | $/opt/nano/cell/cell start 152 | 153 | ``` 154 | 155 | The running log is output on the file: log/core.log 156 | 157 | **Since the Cell nodes depend on the Core module for automatic network recognition, you must start the Core module before Cell nodes.** 158 | 159 | Core also supports the following command 160 | 161 | | Command name | Explanation | 162 | | ------------ | ----------------------------------------- | 163 | | start | Start service | 164 | | stop | Stop service | 165 | | status | Check current service status | 166 | | halt | Force abort service when exception occurs | 167 | 168 | 169 | 170 | ### Configuration 171 | 172 | Cell module configuration information is stored in files under the config path, and modifications require a restart of the module to take effect. 173 | 174 | #### Domain Communication 175 | 176 | The file `config/domain.cfg` manages the domain communication information for the Cell module. 177 | 178 | | Parameter | Value Type | Default Value | Required | Explanation | 179 | | ----------------- | ---------- | ------------- | -------- | ------------------------------------------------------------ | 180 | | **domain** | String | nano | Yes | The name of the communication domain, used for cluster identification | 181 | | **group_address** | String | 224.0.0.226 | Yes | Multicast address of the communication domain, used for service discovery | 182 | | **group_port** | Integer | 5599 | Yes | Multicast port of the communication domain, used for service discovery | 183 | | **timeout** | Integer | 10 | | Transaction timeout in seconds | 184 | 185 | An example configuration file is as follows: 186 | 187 | ```json 188 | { 189 | "domain": "nano", 190 | "group_address": "224.0.0.226", 191 | "group_port": 5599 192 | } 193 | ``` 194 | 195 | 196 | 197 | ### Directory Structure 198 | 199 | | Directory/File | Explanation | 200 | | -------------- | ---------------------------------------- | 201 | | cell | The binary execution file of the module | 202 | | config/ | The storage directory for configurations | 203 | | data/ | The storage directory for operation data | 204 | | log/ | The storage directory for logs | 205 | -------------------------------------------------------------------------------- /cell_service.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "path/filepath" 8 | "time" 9 | 10 | "github.com/libvirt/libvirt-go" 11 | "github.com/project-nano/cell/service" 12 | "github.com/project-nano/framework" 13 | ) 14 | 15 | const ( 16 | CurrentVersion = "1.4.1" 17 | ) 18 | 19 | type CellService struct { 20 | framework.EndpointService 21 | DataPath string 22 | collector *CollectorModule 23 | insManager *service.InstanceManager 24 | storageManager *service.StorageManager 25 | networkManager *service.NetworkManager 26 | transManager *TransactionManager 27 | virConnect *libvirt.Connect 28 | initiator *service.GuestInitiator 29 | dhcpService *service.DHCPService 30 | } 31 | 32 | func CreateCellService(config DomainConfig, workingPath string) (service *CellService, err error) { 33 | var dataPath = filepath.Join(workingPath, DataPathName) 34 | if _, err = os.Stat(dataPath); os.IsNotExist(err) { 35 | if err = os.Mkdir(dataPath, DefaultPathPerm); err != nil { 36 | err = fmt.Errorf("create data path '%s' fail: %s", dataPath, err.Error()) 37 | return 38 | } else { 39 | log.Printf("data path '%s' created", dataPath) 40 | } 41 | } 42 | service = &CellService{} 43 | service.DataPath = dataPath 44 | if service.EndpointService, err = framework.CreatePeerEndpoint(config.GroupAddress, config.GroupPort, config.Domain); err != nil { 45 | err = fmt.Errorf("create new endpoint fail: %s", err.Error()) 46 | return 47 | } 48 | return service, nil 49 | } 50 | 51 | func (cell *CellService) OnMessageReceived(msg framework.Message) { 52 | if targetSession := msg.GetToSession(); targetSession != 0 { 53 | if err := cell.transManager.PushMessage(msg); err != nil { 54 | log.Printf(" push message [%08X] from %s to session [%08X] fail: %s", msg.GetID(), msg.GetSender(), targetSession, err.Error()) 55 | } 56 | return 57 | } 58 | switch msg.GetID() { 59 | case framework.CreateGuestRequest: 60 | case framework.DeleteGuestRequest: 61 | case framework.GetGuestRequest: 62 | case framework.QueryGuestRequest: 63 | case framework.GetInstanceStatusRequest: 64 | case framework.StartInstanceRequest: 65 | case framework.StopInstanceRequest: 66 | case framework.ComputePoolReadyEvent: 67 | case framework.CreateDiskImageRequest: 68 | case framework.ModifyCoreRequest: 69 | case framework.ModifyMemoryRequest: 70 | case framework.ModifyPriorityRequest: 71 | case framework.ModifyDiskThresholdRequest: 72 | case framework.ModifyNetworkThresholdRequest: 73 | case framework.ModifyAuthRequest: 74 | case framework.ModifyGuestNameRequest: 75 | case framework.ModifyAutoStartRequest: 76 | case framework.GetAuthRequest: 77 | case framework.ResizeDiskRequest: 78 | case framework.ShrinkDiskRequest: 79 | case framework.ResetSystemRequest: 80 | case framework.InsertMediaRequest: 81 | case framework.EjectMediaRequest: 82 | case framework.QuerySnapshotRequest: 83 | case framework.GetSnapshotRequest: 84 | case framework.CreateSnapshotRequest: 85 | case framework.DeleteSnapshotRequest: 86 | case framework.RestoreSnapshotRequest: 87 | case framework.GetComputePoolCellRequest: 88 | case framework.ComputeCellRemovedEvent: 89 | case framework.AttachInstanceRequest: 90 | case framework.DetachInstanceRequest: 91 | case framework.ResetSecretRequest: 92 | case framework.QueryCellStorageRequest: 93 | case framework.ModifyCellStorageRequest: 94 | case framework.AddressPoolChangedEvent: 95 | 96 | //security policy 97 | case framework.GetGuestRuleRequest: 98 | case framework.AddGuestRuleRequest: 99 | case framework.ModifyGuestRuleRequest: 100 | case framework.ChangeGuestRuleDefaultActionRequest: 101 | case framework.ChangeGuestRuleOrderRequest: 102 | case framework.RemoveGuestRuleRequest: 103 | 104 | default: 105 | cell.handleIncomingMessage(msg) 106 | return 107 | } 108 | var err = cell.transManager.InvokeTask(msg) 109 | if err != nil { 110 | log.Printf(" invoke transaction with message [%08X] fail: %s", msg.GetID(), err.Error()) 111 | } 112 | } 113 | 114 | func (cell *CellService) GetVersion() string { 115 | return CurrentVersion 116 | } 117 | 118 | func (cell *CellService) handleIncomingMessage(msg framework.Message) { 119 | switch msg.GetID() { 120 | default: 121 | log.Printf(" message [%08X] from %s.[%08X] ignored", msg.GetID(), msg.GetSender(), msg.GetFromSession()) 122 | } 123 | } 124 | 125 | func (cell *CellService) OnServiceConnected(name string, t framework.ServiceType, remoteAddress string) { 126 | log.Printf(" service %s connected, type %d", name, t) 127 | if t == framework.ServiceTypeCore { 128 | cell.collector.AddObserver(name) 129 | } 130 | } 131 | 132 | func (cell *CellService) OnServiceDisconnected(name string, t framework.ServiceType, gracefullyClose bool) { 133 | if gracefullyClose { 134 | log.Printf(" service %s closed by remote, type %d", name, t) 135 | } else { 136 | log.Printf(" service %s lost, type %d", name, t) 137 | } 138 | if t == framework.ServiceTypeCore { 139 | cell.collector.RemoveObserver(name) 140 | } 141 | } 142 | 143 | func (cell *CellService) OnDependencyReady() { 144 | cell.SetServiceReady() 145 | } 146 | 147 | func (cell *CellService) InitialEndpoint() (err error) { 148 | log.Printf(" initial cell service, v %s", CurrentVersion) 149 | log.Printf(" domain %s, group address %s:%d", cell.GetDomain(), cell.GetGroupAddress(), cell.GetGroupPort()) 150 | log.Printf(" default operate timeout %d seconds", service.GetConfigurator().GetOperateTimeout()/time.Second) 151 | 152 | const ( 153 | DefaultLibvirtURL = "qemu:///system" 154 | ) 155 | if cell.virConnect, err = libvirt.NewConnect(DefaultLibvirtURL); err != nil { 156 | return err 157 | } 158 | if cell.storageManager, err = service.CreateStorageManager(cell.DataPath, cell.virConnect); err != nil { 159 | err = fmt.Errorf("initial storage manager fail: %s", err.Error()) 160 | return 161 | } 162 | 163 | if cell.insManager, err = service.CreateInstanceManager(cell.DataPath, cell.virConnect); err != nil { 164 | err = fmt.Errorf("initial instance manager fail: %s", err.Error()) 165 | return 166 | } 167 | if cell.collector, err = CreateCollectorModule(cell, 168 | cell.insManager.GetEventChannel(), cell.storageManager.GetOutputEventChannel()); err != nil { 169 | return err 170 | } 171 | 172 | if cell.networkManager, err = service.CreateNetworkManager( 173 | cell.DataPath, cell.virConnect, cell.insManager.GetMaxGuest()); err != nil { 174 | return err 175 | } 176 | var volumeResources = cell.insManager.GetInstanceVolumeResources() 177 | if err = cell.storageManager.ValidateResources(volumeResources); err != nil { 178 | return err 179 | } 180 | var networkResources = cell.insManager.GetInstanceNetworkResources() 181 | if err = cell.networkManager.SyncInstanceResources(networkResources); err != nil { 182 | return err 183 | } 184 | if cell.initiator, err = service.CreateInitiator(cell.networkManager, cell.insManager); err != nil { 185 | return err 186 | } 187 | if cell.dhcpService, err = service.CreateDHCPService(cell.networkManager); err != nil { 188 | return err 189 | } 190 | 191 | cell.transManager, err = CreateTransactionManager(cell, cell.insManager, cell.storageManager, cell.networkManager) 192 | if err != nil { 193 | return err 194 | } 195 | log.Println(" all module ready") 196 | return nil 197 | } 198 | func (cell *CellService) OnEndpointStarted() (err error) { 199 | if err = cell.collector.Start(); err != nil { 200 | return err 201 | } 202 | if err = cell.insManager.Start(); err != nil { 203 | return err 204 | } 205 | if err = cell.storageManager.Start(); err != nil { 206 | return err 207 | } 208 | if err = cell.networkManager.Start(); err != nil { 209 | return err 210 | } 211 | if err = cell.initiator.Start(); err != nil { 212 | return 213 | } 214 | if err = cell.dhcpService.Start(); err != nil { 215 | return 216 | } 217 | if err = cell.transManager.Start(); err != nil { 218 | return err 219 | } 220 | log.Println(" started") 221 | return nil 222 | } 223 | func (cell *CellService) OnEndpointStopped() { 224 | if err := cell.transManager.Stop(); err != nil { 225 | log.Printf(" stop transaction manger fail: %s", err.Error()) 226 | } 227 | if err := cell.dhcpService.Stop(); err != nil { 228 | log.Printf(" stop dhcp service fail: %s", err.Error()) 229 | } 230 | if err := cell.initiator.Stop(); err != nil { 231 | log.Printf(" stop guest initiator fail: %s", err.Error()) 232 | } 233 | if err := cell.networkManager.Stop(); err != nil { 234 | log.Printf(" stop network manager fail: %s", err.Error()) 235 | } 236 | if err := cell.storageManager.Stop(); err != nil { 237 | log.Printf(" stop storage manager fail: %s", err.Error()) 238 | } 239 | if err := cell.insManager.Stop(); err != nil { 240 | log.Printf(" stop instance manager fail: %s", err.Error()) 241 | } 242 | if err := cell.collector.Stop(); err != nil { 243 | log.Printf(" stop collector fail: %s", err.Error()) 244 | } 245 | log.Println(" all module stopped") 246 | } 247 | -------------------------------------------------------------------------------- /collector_module.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "github.com/shirou/gopsutil/cpu" 8 | "github.com/shirou/gopsutil/disk" 9 | "github.com/shirou/gopsutil/mem" 10 | "github.com/shirou/gopsutil/net" 11 | "log" 12 | "time" 13 | ) 14 | 15 | type hostStatus struct { 16 | Cores uint 17 | CpuUsage float64 18 | Memory uint64 19 | MemoryAvailable uint64 20 | Disk uint64 21 | DiskAvailable uint64 22 | } 23 | 24 | type ioSnapshot struct { 25 | Timestamp time.Time 26 | DiskWrite uint64 27 | DiskRead uint64 28 | NetworkSend uint64 29 | NetworkReceive uint64 30 | } 31 | 32 | type ioCounter struct { 33 | Timestamp time.Time 34 | Duration time.Duration 35 | BytesWritten uint64 36 | BytesRead uint64 37 | BytesSent uint64 38 | BytesReceived uint64 39 | WriteSpeed uint64 40 | ReadSpeed uint64 41 | SendSpeed uint64 42 | ReceiveSpeed uint64 43 | } 44 | 45 | type collectorCmd struct { 46 | Command collectorCommandType 47 | Name string 48 | } 49 | 50 | type collectorCommandType int 51 | 52 | const ( 53 | collectCommandAdd = iota 54 | collectCommandRemove 55 | ) 56 | 57 | type CollectorModule struct { 58 | sender framework.MessageSender 59 | commands chan collectorCmd 60 | instanceEvents chan service.InstanceStatusChangedEvent 61 | onStoragePathsChanged chan []string 62 | localStoragePaths []string 63 | runner *framework.SimpleRunner 64 | } 65 | 66 | func CreateCollectorModule(sender framework.MessageSender, 67 | eventChan chan service.InstanceStatusChangedEvent, storageChan chan []string) (*CollectorModule, error) { 68 | const ( 69 | DefaultQueueSize = 1 << 10 70 | ) 71 | var module = CollectorModule{} 72 | module.sender = sender 73 | module.commands = make(chan collectorCmd, DefaultQueueSize) 74 | module.instanceEvents = eventChan 75 | module.onStoragePathsChanged = storageChan 76 | module.runner = framework.CreateSimpleRunner(module.Routine) 77 | return &module, nil 78 | } 79 | 80 | func (collector *CollectorModule) AddObserver(name string) error { 81 | collector.commands <- collectorCmd{collectCommandAdd, name} 82 | return nil 83 | } 84 | 85 | func (collector *CollectorModule) RemoveObserver(name string) error { 86 | collector.commands <- collectorCmd{collectCommandRemove, name} 87 | return nil 88 | } 89 | 90 | func (collector *CollectorModule) Start() error { 91 | return collector.runner.Start() 92 | } 93 | 94 | func (collector *CollectorModule) Stop() error { 95 | return collector.runner.Stop() 96 | } 97 | 98 | func (collector *CollectorModule) Routine(c framework.RoutineController) { 99 | const ( 100 | reportInterval = 2 * time.Second 101 | collectInterval = reportInterval 102 | ) 103 | log.Println(" module started") 104 | var observerMap = map[string]bool{} 105 | var latestIOSnapshot ioSnapshot 106 | var latestSnapshotAvailable = false 107 | var reportAvailable = false 108 | var reportMessage framework.Message 109 | var reportTicker = time.NewTicker(reportInterval) 110 | var collectTicker = time.NewTicker(collectInterval) 111 | 112 | //prepare cpu percentage 113 | cpu.Percent(0, false) 114 | 115 | for !c.IsStopping() { 116 | select { 117 | case <-reportTicker.C: 118 | //on report 119 | if !reportAvailable { 120 | break 121 | } 122 | if 0 == len(observerMap) { 123 | //no observer available 124 | break 125 | } 126 | for target, _ := range observerMap { 127 | if err := collector.sender.SendMessage(reportMessage, target); err != nil { 128 | log.Printf(" warning: send report to %s fail: %s", target, err.Error()) 129 | } 130 | } 131 | 132 | case <-collectTicker.C: 133 | //on collect 134 | status, err := collector.collectHostStatus() 135 | if err != nil { 136 | log.Printf(" collect host status fail: %s", err.Error()) 137 | break 138 | } 139 | if !latestSnapshotAvailable { 140 | //collect latest counter 141 | latestIOSnapshot, err = captureIOSnapshot() 142 | if err != nil { 143 | log.Printf(" capture first io snapshot fail: %s", err.Error()) 144 | break 145 | } 146 | latestSnapshotAvailable = true 147 | break 148 | } 149 | currentSnapshot, err := captureIOSnapshot() 150 | if err != nil { 151 | log.Printf(" capture io snapshot fail: %s", err.Error()) 152 | break 153 | } 154 | counter := computeIOCounter(latestIOSnapshot, currentSnapshot) 155 | latestIOSnapshot = currentSnapshot 156 | reportMessage, err = buildObserverNotifyMessage(status, counter) 157 | if err != nil { 158 | log.Printf(" marshal report message fail: %s", err.Error()) 159 | break 160 | } 161 | reportAvailable = true 162 | case <-c.GetNotifyChannel(): 163 | //exit 164 | c.SetStopping() 165 | case cmd := <-collector.commands: 166 | switch cmd.Command { 167 | case collectCommandAdd: 168 | var coreName = cmd.Name 169 | if _, exists := observerMap[coreName]; exists { 170 | log.Printf(" observer %s already exists", coreName) 171 | break 172 | } 173 | observerMap[coreName] = true 174 | log.Printf(" new observer %s added", coreName) 175 | case collectCommandRemove: 176 | if _, exists := observerMap[cmd.Name]; !exists { 177 | log.Printf(" invalid observer %s", cmd.Name) 178 | } else { 179 | delete(observerMap, cmd.Name) 180 | log.Printf(" observer %s removed", cmd.Name) 181 | } 182 | default: 183 | log.Printf(" invalid collector command %d", cmd.Command) 184 | } 185 | case paths := <- collector.onStoragePathsChanged: 186 | collector.localStoragePaths = paths 187 | log.Printf(" local storage paths changed to %s", paths) 188 | case event := <-collector.instanceEvents: 189 | var msg framework.Message 190 | switch event.Event { 191 | case service.InstanceStarted: 192 | msg, _ = framework.CreateJsonMessage(framework.GuestStartedEvent) 193 | msg.SetFromSession(0) 194 | msg.SetString(framework.ParamKeyInstance, event.ID) 195 | case service.InstanceStopped: 196 | msg, _ = framework.CreateJsonMessage(framework.GuestStoppedEvent) 197 | msg.SetFromSession(0) 198 | msg.SetString(framework.ParamKeyInstance, event.ID) 199 | case service.AddressChanged: 200 | msg, _ = framework.CreateJsonMessage(framework.AddressChangedEvent) 201 | msg.SetFromSession(0) 202 | msg.SetString(framework.ParamKeyInstance, event.ID) 203 | msg.SetString(framework.ParamKeyAddress, event.Address) 204 | default: 205 | log.Printf(" ignore invalid instance event type %d", event.Event) 206 | } 207 | collector.broadCastMessage(msg, observerMap) 208 | } 209 | } 210 | 211 | log.Println(" module stopped") 212 | c.NotifyExit() 213 | } 214 | 215 | func (collector *CollectorModule) broadCastMessage(message framework.Message, observers map[string]bool) { 216 | if 0 == len(observers) { 217 | log.Println(" ignore broadcast, cause no observer available") 218 | return 219 | } 220 | for receiver, _ := range observers { 221 | if err := collector.sender.SendMessage(message, receiver); err != nil { 222 | log.Printf(" warnning: notify message %08X to %s fail: %s", message.GetID(), receiver, err.Error()) 223 | } 224 | } 225 | } 226 | 227 | func (collector *CollectorModule)collectHostStatus() (hostStatus, error) { 228 | var status hostStatus 229 | count, err := cpu.Counts(true) 230 | if err != nil { 231 | return status, err 232 | } 233 | status.Cores = uint(count) 234 | usages, err := cpu.Percent(0, false) 235 | if err != nil { 236 | return status, err 237 | } 238 | if 1 != len(usages) { 239 | return status, fmt.Errorf("unpected cpu usages size %d", len(usages)) 240 | } 241 | status.CpuUsage = usages[0] 242 | vm, err := mem.VirtualMemory() 243 | if err != nil { 244 | return status, err 245 | } 246 | status.Memory = vm.Total 247 | status.MemoryAvailable = vm.Available 248 | for _, path := range collector.localStoragePaths { 249 | usage, err := disk.Usage(path) 250 | if err != nil { 251 | return status, err 252 | } 253 | status.Disk += usage.Total 254 | status.DiskAvailable += usage.Free 255 | } 256 | return status, nil 257 | } 258 | 259 | func captureIOSnapshot() (ioSnapshot, error) { 260 | var snapshot ioSnapshot 261 | partitions, err := disk.Partitions(false) 262 | if err != nil { 263 | return snapshot, err 264 | } 265 | for _, partitionStat := range partitions { 266 | counters, err := disk.IOCounters(partitionStat.Device) 267 | if err != nil { 268 | return snapshot, err 269 | } 270 | for _, counter := range counters { 271 | //for devName, counter := range counters{ 272 | snapshot.DiskWrite += counter.WriteBytes 273 | snapshot.DiskRead += counter.ReadBytes 274 | //log.Printf("disk io %s > %s: %d / %d", partitionStat.Device, devName, counter.WriteBytes, counter.ReadBytes) 275 | } 276 | } 277 | 278 | netStats, err := net.IOCounters(false) 279 | if err != nil { 280 | return snapshot, err 281 | } 282 | for _, stat := range netStats { 283 | snapshot.NetworkReceive += stat.BytesRecv 284 | snapshot.NetworkSend += stat.BytesSent 285 | //log.Printf("interface %s: %d / %d", stat.Name, stat.BytesSent, stat.BytesRecv) 286 | } 287 | //log.Printf("debug: snapshot disk %d / %d, network %d / %d", snapshot.DiskWrite, snapshot.DiskRead, snapshot.NetworkSend, snapshot.NetworkReceive) 288 | return snapshot, nil 289 | } 290 | 291 | func computeIOCounter(previous, current ioSnapshot) ioCounter { 292 | elapsed := current.Timestamp.Sub(previous.Timestamp) 293 | if elapsed < time.Second*1 { 294 | return ioCounter{current.Timestamp, elapsed, 0, 0, 0, 0, 295 | 0, 0, 0, 0} 296 | } 297 | elapsedMilliSeconds := uint64(elapsed / time.Millisecond) 298 | var result = ioCounter{Timestamp: current.Timestamp, Duration: elapsed} 299 | result.BytesRead = current.DiskRead - previous.DiskRead 300 | result.BytesWritten = current.DiskWrite - previous.DiskWrite 301 | result.BytesSent = current.NetworkSend - previous.NetworkSend 302 | result.BytesReceived = current.NetworkReceive - previous.NetworkReceive 303 | result.WriteSpeed = result.BytesWritten * 1000 / elapsedMilliSeconds 304 | result.ReadSpeed = result.BytesRead * 1000 / elapsedMilliSeconds 305 | result.SendSpeed = result.BytesSent * 1000 / elapsedMilliSeconds 306 | result.ReceiveSpeed = result.BytesReceived * 1000 / elapsedMilliSeconds 307 | return result 308 | } 309 | 310 | func buildObserverNotifyMessage(status hostStatus, io ioCounter) (msg framework.Message, err error) { 311 | msg, err = framework.CreateJsonMessage(framework.CellStatusReportEvent) 312 | if err != nil { 313 | return msg, err 314 | } 315 | msg.SetUInt(framework.ParamKeyCore, status.Cores) 316 | msg.SetFloat(framework.ParamKeyUsage, status.CpuUsage) 317 | msg.SetUIntArray(framework.ParamKeyMemory, []uint64{status.MemoryAvailable, status.Memory}) 318 | msg.SetUIntArray(framework.ParamKeyDisk, []uint64{status.DiskAvailable, status.Disk}) 319 | msg.SetUIntArray(framework.ParamKeyIO, []uint64{io.BytesRead, io.BytesWritten, io.BytesReceived, io.BytesSent}) 320 | msg.SetUIntArray(framework.ParamKeySpeed, []uint64{io.ReadSpeed, io.WriteSpeed, io.ReceiveSpeed, io.SendSpeed}) 321 | return msg, nil 322 | } 323 | -------------------------------------------------------------------------------- /daemon.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "github.com/project-nano/cell/service" 9 | "github.com/project-nano/framework" 10 | "github.com/project-nano/sonar" 11 | "github.com/vishvananda/netlink" 12 | "log" 13 | "net" 14 | "os" 15 | "os/exec" 16 | "path/filepath" 17 | "strings" 18 | ) 19 | 20 | type DomainConfig struct { 21 | Domain string `json:"domain"` 22 | GroupAddress string `json:"group_address"` 23 | GroupPort int `json:"group_port"` 24 | Timeout int `json:"timeout,omitempty"` 25 | } 26 | 27 | type MainService struct { 28 | cell *CellService 29 | } 30 | 31 | const ( 32 | ExecuteName = "cell" 33 | DomainConfigFileName = "domain.cfg" 34 | ConfigPathName = "config" 35 | DataPathName = "data" 36 | DefaultPathPerm = 0740 37 | DefaultConfigPerm = 0640 38 | defaultOperateTimeout = 10 //10 seconds 39 | ) 40 | 41 | func (service *MainService) Start() (output string, err error) { 42 | if nil == service.cell { 43 | err = errors.New("invalid service") 44 | return 45 | } 46 | if err = service.cell.Start(); err != nil { 47 | return 48 | } 49 | output = fmt.Sprintf("\nCell Modeul %s\nservice %s listen at '%s:%d'\ngroup '%s:%d', domain '%s'", 50 | service.cell.GetVersion(), 51 | service.cell.GetName(), service.cell.GetListenAddress(), service.cell.GetListenPort(), 52 | service.cell.GetGroupAddress(), service.cell.GetGroupPort(), service.cell.GetDomain()) 53 | return 54 | } 55 | 56 | func (service *MainService) Stop() (output string, err error) { 57 | if nil == service.cell { 58 | err = errors.New("invalid service") 59 | return 60 | } 61 | err = service.cell.Stop() 62 | return 63 | } 64 | 65 | func (service *MainService) Snapshot() (output string, err error) { 66 | output = "hello, this is stub for snapshot" 67 | return 68 | } 69 | 70 | func generateConfigure(workingPath string) (err error) { 71 | if err = configureNetworkForCell(); err != nil { 72 | fmt.Printf("configure cell network fail: %s\n", err.Error()) 73 | return 74 | } 75 | if err = checkDefaultRoute(); err != nil { 76 | fmt.Printf("check default route fail: %s\n", err.Error()) 77 | return 78 | } 79 | var configPath = filepath.Join(workingPath, ConfigPathName) 80 | if _, err = os.Stat(configPath); os.IsNotExist(err) { 81 | //create path 82 | err = os.Mkdir(configPath, DefaultPathPerm) 83 | if err != nil { 84 | return 85 | } 86 | fmt.Printf("config path %s created\n", configPath) 87 | } 88 | 89 | var configFile = filepath.Join(configPath, DomainConfigFileName) 90 | if _, err = os.Stat(configFile); os.IsNotExist(err) { 91 | fmt.Println("No configures available, following instructions to generate a new one.") 92 | 93 | var config = DomainConfig{ 94 | Timeout: defaultOperateTimeout, 95 | } 96 | if config.Domain, err = framework.InputString("Group Domain Name", sonar.DefaultDomain); err != nil { 97 | return 98 | } 99 | if config.GroupAddress, err = framework.InputString("Group MultiCast Address", sonar.DefaultMulticastAddress); err != nil { 100 | return 101 | } 102 | if config.GroupPort, err = framework.InputInteger("Group MultiCast Port", sonar.DefaultMulticastPort); err != nil { 103 | return 104 | } 105 | //write 106 | var data []byte 107 | data, err = json.MarshalIndent(config, "", " ") 108 | if err != nil { 109 | return 110 | } 111 | if err = os.WriteFile(configFile, data, DefaultConfigPerm); err != nil { 112 | return 113 | } 114 | fmt.Printf("default configure '%s' generated\n", configFile) 115 | } 116 | return 117 | } 118 | 119 | func createDaemon(workingPath string) (daemon framework.DaemonizedService, err error) { 120 | var configPath = filepath.Join(workingPath, ConfigPathName) 121 | var configFile = filepath.Join(configPath, DomainConfigFileName) 122 | var data []byte 123 | if data, err = os.ReadFile(configFile); err != nil { 124 | err = fmt.Errorf("read config fail: %s", err.Error()) 125 | return 126 | } 127 | var config DomainConfig 128 | if err = json.Unmarshal(data, &config); err != nil { 129 | err = fmt.Errorf("load config fail: %s", err.Error()) 130 | return 131 | } 132 | var inf *net.Interface 133 | if inf, err = net.InterfaceByName(service.DefaultBridgeName); err != nil { 134 | err = fmt.Errorf("get default bridge fail: %s", err.Error()) 135 | return 136 | } 137 | //set timeout 138 | if config.Timeout > 0 { 139 | service.GetConfigurator().SetOperateTimeout(config.Timeout) 140 | } 141 | var s = MainService{} 142 | if s.cell, err = CreateCellService(config, workingPath); err != nil { 143 | err = fmt.Errorf("create service fail: %s", err.Error()) 144 | return 145 | } 146 | 147 | s.cell.RegisterHandler(s.cell) 148 | err = s.cell.GenerateName(framework.ServiceTypeCell, inf) 149 | return &s, err 150 | } 151 | 152 | func checkDefaultRoute() (err error) { 153 | var routes []netlink.Route 154 | routes, err = netlink.RouteList(nil, netlink.FAMILY_V4) 155 | if err != nil { 156 | return 157 | } 158 | if 0 == len(routes) { 159 | err = errors.New("no route available") 160 | return 161 | } 162 | var defaultRouteAvailable = false 163 | for _, route := range routes { 164 | if route.Dst == nil { 165 | defaultRouteAvailable = true 166 | } 167 | } 168 | if !defaultRouteAvailable { 169 | err = errors.New("no default route available") 170 | return 171 | } 172 | fmt.Printf("default route ready\n") 173 | return nil 174 | } 175 | 176 | func configureNetworkForCell() (err error) { 177 | if hasDefaultBridge() { 178 | fmt.Printf("bridge %s is ready\n", service.DefaultBridgeName) 179 | return nil 180 | } 181 | var interfaceName string 182 | interfaceName, err = framework.SelectEthernetInterface("interface to bridge", true) 183 | if err != nil { 184 | return 185 | } 186 | fmt.Printf("try link interface '%s' to bridge '%s', input 'yes' to confirm:", interfaceName, service.DefaultBridgeName) 187 | var input string 188 | _, err = fmt.Scanln(&input) 189 | if err != nil { 190 | return 191 | } 192 | if "yes" != input { 193 | return errors.New("user interrupted") 194 | } 195 | if err = linkBridge(interfaceName, service.DefaultBridgeName); err != nil { 196 | return 197 | } 198 | var errorMessage []byte 199 | { 200 | //disable & stop network manager 201 | var cmd = exec.Command("systemctl", "stop", "NetworkManager") 202 | if errorMessage, err = cmd.CombinedOutput(); err != nil { 203 | fmt.Printf("warning: stop networkmanager fail: %s", errorMessage) 204 | } else { 205 | fmt.Println("network manager stopped") 206 | } 207 | cmd = exec.Command("systemctl", "disable", "NetworkManager") 208 | if errorMessage, err = cmd.CombinedOutput(); err != nil { 209 | fmt.Printf("warning: disable networkmanager fail: %s", errorMessage) 210 | } else { 211 | fmt.Println("network manager disabled") 212 | } 213 | } 214 | { 215 | //restart network 216 | var cmd = exec.Command("systemctl", "stop", "network") 217 | if errorMessage, err = cmd.CombinedOutput(); err != nil { 218 | fmt.Printf("warning: stop network service fail: %s", errorMessage) 219 | } else { 220 | fmt.Println("network service stopped") 221 | } 222 | cmd = exec.Command("systemctl", "start", "network") 223 | if errorMessage, err = cmd.CombinedOutput(); err != nil { 224 | fmt.Printf("warning: start network service fail: %s", errorMessage) 225 | return 226 | } else { 227 | fmt.Println("network service restarted") 228 | } 229 | } 230 | return 231 | } 232 | 233 | func hasDefaultBridge() bool { 234 | list, err := net.Interfaces() 235 | if err != nil { 236 | fmt.Printf("fetch interface fail: %s", err.Error()) 237 | return false 238 | } 239 | for _, i := range list { 240 | if service.DefaultBridgeName == i.Name { 241 | return true 242 | } 243 | } 244 | return false 245 | } 246 | 247 | func linkBridge(interfaceName, bridgeName string) (err error) { 248 | const ( 249 | ScriptsPath = "/etc/sysconfig/network-scripts" 250 | ScriptPrefix = "ifcfg" 251 | ) 252 | var interfaceScript = filepath.Join(ScriptsPath, fmt.Sprintf("%s-%s", ScriptPrefix, interfaceName)) 253 | var bridgeScript = filepath.Join(ScriptsPath, fmt.Sprintf("%s-%s", ScriptPrefix, bridgeName)) 254 | interfaceConfig, err := readInterfaceConfig(interfaceScript) 255 | if err != nil { 256 | return 257 | } 258 | bridgeConfig, err := generateBridgeConfig(bridgeName) 259 | if err != nil { 260 | return 261 | } 262 | err = migrateInterfaceConfig(bridgeName, &interfaceConfig, &bridgeConfig) 263 | if err != nil { 264 | return 265 | } 266 | err = writeInterfaceConfig(interfaceConfig, interfaceScript) 267 | if err != nil { 268 | return 269 | } 270 | fmt.Printf("interface script %s updated\n", interfaceScript) 271 | err = writeInterfaceConfig(bridgeConfig, bridgeScript) 272 | if err != nil { 273 | return 274 | } 275 | fmt.Printf("bridge script %s generated\n", bridgeScript) 276 | link, err := netlink.LinkByName(interfaceName) 277 | if err != nil { 278 | return 279 | } 280 | if err = netlink.LinkSetDown(link); err != nil { 281 | fmt.Printf("warning:set down link fail: %s\n", err.Error()) 282 | } 283 | var bridgeAttrs = netlink.NewLinkAttrs() 284 | bridgeAttrs.Name = bridgeName 285 | var bridge = &netlink.Bridge{LinkAttrs: bridgeAttrs} 286 | if err = netlink.LinkAdd(bridge); err != nil { 287 | return 288 | } 289 | fmt.Printf("new bridge %s created\n", bridgeName) 290 | if err = netlink.LinkSetMaster(link, bridge); err != nil { 291 | return 292 | } 293 | fmt.Printf("link %s added to bridge %s\n", interfaceName, bridgeName) 294 | if err = netlink.LinkSetUp(bridge); err != nil { 295 | return 296 | } 297 | fmt.Printf("bridge %s up\n", bridgeName) 298 | if err = netlink.LinkSetUp(link); err != nil { 299 | return 300 | } 301 | fmt.Printf("link %s up\n", interfaceName) 302 | return nil 303 | } 304 | 305 | type InterfaceConfig struct { 306 | Params map[string]string 307 | } 308 | 309 | func generateBridgeConfig(bridgeName string) (config InterfaceConfig, err error) { 310 | config.Params = map[string]string{ 311 | "NM_CONTROLLED": "no", 312 | "DELAY": "0", 313 | "TYPE": "Bridge", 314 | "ONBOOT": "yes", 315 | "ZONE": "public", 316 | } 317 | config.Params["NAME"] = bridgeName 318 | config.Params["DEVICE"] = bridgeName 319 | return config, nil 320 | } 321 | func readInterfaceConfig(filepath string) (config InterfaceConfig, err error) { 322 | const ( 323 | ValidDataCount = 2 324 | DataName = 0 325 | DataValue = 1 326 | ) 327 | file, err := os.Open(filepath) 328 | if err != nil { 329 | return 330 | } 331 | config.Params = map[string]string{} 332 | var scanner = bufio.NewScanner(file) 333 | var lineIndex = 0 334 | for scanner.Scan() { 335 | var line = scanner.Text() 336 | var data = strings.Split(line, "=") 337 | lineIndex++ 338 | if ValidDataCount != len(data) { 339 | fmt.Printf("ignore line %d of '%s': %s\n", lineIndex, filepath, line) 340 | continue 341 | } 342 | config.Params[data[DataName]] = data[DataValue] 343 | } 344 | fmt.Printf("%d params loaded from '%s'\n", len(config.Params), filepath) 345 | return config, nil 346 | } 347 | 348 | func writeInterfaceConfig(config InterfaceConfig, filepath string) (err error) { 349 | file, err := os.Create(filepath) 350 | if err != nil { 351 | return err 352 | } 353 | for name, value := range config.Params { 354 | fmt.Fprintf(file, "%s=%s\n", name, value) 355 | } 356 | return file.Close() 357 | } 358 | 359 | func migrateInterfaceConfig(bridgeName string, ifcfg, brcfg *InterfaceConfig) (err error) { 360 | const ( 361 | NMControl = "NM_CONTROLLED" 362 | BRIDGE = "BRIDGE" 363 | ONBOOT = "ONBOOT" 364 | ) 365 | var migrateList = []string{ 366 | "BOOTPROTO", "PREFIX", "IPADDR", "GATEWAY", "NETMASK", "DNS1", "DNS2", "DOMAIN", 367 | "DEFROUTE", "PEERDNS", "PEERROUTES", "IPV4_FAILURE_FATAL", "IPV6_FAILURE_FATAL", "PROXY_METHOD", 368 | "IPV6ADDR", "IPV6_DEFAULTGW", "IPV6_AUTOCONF", "IPV6_DEFROUTE", "IPV6INIT", "IPV6_ADDR_GEN_MODE", 369 | } 370 | 371 | for _, name := range migrateList { 372 | if value, exists := ifcfg.Params[name]; exists { 373 | brcfg.Params[name] = value 374 | delete(ifcfg.Params, name) 375 | } 376 | } 377 | ifcfg.Params[NMControl] = "no" 378 | ifcfg.Params[BRIDGE] = bridgeName 379 | ifcfg.Params[ONBOOT] = "yes" 380 | return nil 381 | } 382 | 383 | func main() { 384 | log.SetFlags(log.Ldate | log.Lmicroseconds) 385 | framework.ProcessDaemon(ExecuteName, generateConfigure, createDaemon) 386 | } 387 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/project-nano/cell 2 | 3 | go 1.19 4 | 5 | replace ( 6 | github.com/project-nano/cell/service => ./src/service 7 | github.com/project-nano/cell/task => ./src/task 8 | github.com/project-nano/framework => ../framework 9 | ) 10 | 11 | require ( 12 | github.com/libvirt/libvirt-go v7.4.0+incompatible 13 | github.com/project-nano/cell/service v0.0.0-00010101000000-000000000000 14 | github.com/project-nano/cell/task v0.0.0-00010101000000-000000000000 15 | github.com/project-nano/framework v1.0.9 16 | github.com/project-nano/sonar v0.0.0-20190628085230-df7942628d6f 17 | github.com/shirou/gopsutil v3.21.11+incompatible 18 | github.com/vishvananda/netlink v1.1.0 19 | ) 20 | 21 | require ( 22 | github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect 23 | github.com/go-ole/go-ole v1.3.0 // indirect 24 | github.com/julienschmidt/httprouter v1.3.0 // indirect 25 | github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect 26 | github.com/klauspost/cpuid/v2 v2.2.5 // indirect 27 | github.com/klauspost/reedsolomon v1.11.8 // indirect 28 | github.com/krolaw/dhcp4 v0.0.0-20190909130307-a50d88189771 // indirect 29 | github.com/pkg/errors v0.9.1 // indirect 30 | github.com/sevlyar/go-daemon v0.1.6 // indirect 31 | github.com/stretchr/testify v1.6.1 // indirect 32 | github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161 // indirect 33 | github.com/templexxx/xor v0.0.0-20191217153810-f85b25db303b // indirect 34 | github.com/tjfoc/gmsm v1.4.1 // indirect 35 | github.com/tklauser/go-sysconf v0.3.12 // indirect 36 | github.com/tklauser/numcpus v0.6.1 // indirect 37 | github.com/vishvananda/netns v0.0.4 // indirect 38 | github.com/xtaci/kcp-go v5.4.20+incompatible // indirect 39 | github.com/yusufpapurcu/wmi v1.2.3 // indirect 40 | golang.org/x/crypto v0.14.0 // indirect 41 | golang.org/x/net v0.17.0 // indirect 42 | golang.org/x/sys v0.13.0 // indirect 43 | ) 44 | -------------------------------------------------------------------------------- /src/service/dhcp_service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "net" 5 | "github.com/project-nano/framework" 6 | "github.com/krolaw/dhcp4" 7 | "log" 8 | "time" 9 | "fmt" 10 | "strings" 11 | ) 12 | 13 | type clientLease struct { 14 | IP string 15 | Netmask string 16 | Gateway string 17 | DNS []string 18 | Expire time.Time 19 | Options dhcp4.Options 20 | } 21 | 22 | type DHCPHandler struct { 23 | operates chan dhcpOperate 24 | serverIP net.IP 25 | } 26 | 27 | type operateType int 28 | 29 | const ( 30 | opAllocate = iota 31 | opUpdate 32 | opDeallocate 33 | opChange 34 | ) 35 | 36 | type dhcpOperate struct { 37 | Type operateType 38 | MAC string 39 | IP string 40 | Gateway string 41 | DNS []string 42 | RespChan chan operateResult 43 | } 44 | 45 | type operateResult struct { 46 | Error error 47 | IP net.IP 48 | Options dhcp4.Options 49 | } 50 | 51 | type DHCPService struct { 52 | dhcpConn *net.UDPConn 53 | handler *DHCPHandler 54 | operates chan dhcpOperate 55 | leases map[string]clientLease //key = HW address 56 | networkModule NetworkModule 57 | runner *framework.SimpleRunner 58 | } 59 | 60 | const ( 61 | leaseTimeout = 12 * time.Hour 62 | confirmDuration = 3 * time.Minute 63 | checkInterval = 1 * time.Minute 64 | ) 65 | 66 | func CreateDHCPService(netModule *NetworkManager) (service* DHCPService, err error) { 67 | const ( 68 | DHCPAddress = ":67" 69 | ) 70 | listenAddress, err := net.ResolveUDPAddr("udp", DHCPAddress) 71 | if err != nil{ 72 | return 73 | } 74 | serverAddress, err := GetCurrentIPOfDefaultBridge() 75 | if err != nil{ 76 | return 77 | } 78 | serverIP, err := stringToIPv4(serverAddress) 79 | if err != nil{ 80 | return 81 | } 82 | service = &DHCPService{} 83 | service.operates = make(chan dhcpOperate, 1 << 10) 84 | service.networkModule = netModule 85 | service.leases = map[string]clientLease{} 86 | netModule.OnAddressUpdated = service.updateServer 87 | service.handler = &DHCPHandler{service.operates, serverIP} 88 | service.dhcpConn, err = net.ListenUDP("udp", listenAddress) 89 | if err != nil{ 90 | err = fmt.Errorf("listen on DHCP port fail, please disable dnsmasq or other DHCP service.\nmessage: %s", err.Error()) 91 | return 92 | } 93 | service.runner = framework.CreateSimpleRunner(service.Routine) 94 | return 95 | } 96 | 97 | func (service *DHCPService) updateServer(gateway string, dns []string){ 98 | service.operates <- dhcpOperate{Type:opChange, Gateway:gateway, DNS:dns} 99 | } 100 | 101 | func (service *DHCPService) Start() error{ 102 | return service.runner.Start() 103 | } 104 | 105 | func (service *DHCPService) Stop() error{ 106 | return service.runner.Stop() 107 | } 108 | 109 | func (service *DHCPService) Routine(c framework.RoutineController) { 110 | log.Println(" service started") 111 | go func() { 112 | var err = dhcp4.Serve(service.dhcpConn, service.handler) 113 | log.Printf(" handler stopped: %s", err.Error()) 114 | }() 115 | var checkTicker = time.NewTicker(checkInterval) 116 | for !c.IsStopping() { 117 | select { 118 | case <-c.GetNotifyChannel(): 119 | c.SetStopping() 120 | service.dhcpConn.Close() 121 | case op := <-service.operates: 122 | service.handleOperate(op) 123 | case <-checkTicker.C: 124 | service.clearExpiredLeases() 125 | } 126 | } 127 | c.NotifyExit() 128 | log.Println(" service stopped") 129 | } 130 | 131 | func (service *DHCPService) clearExpiredLeases(){ 132 | var now = time.Now() 133 | var expired []string 134 | for macAddress, lease := range service.leases{ 135 | if lease.Expire.Before(now){ 136 | expired = append(expired, macAddress) 137 | } 138 | } 139 | for _, mac := range expired{ 140 | log.Printf(" release expired lease for MAC '%s'", mac) 141 | delete(service.leases, mac) 142 | } 143 | } 144 | 145 | func (service *DHCPService)handleOperate(op dhcpOperate){ 146 | var err error 147 | switch op.Type { 148 | case opAllocate: 149 | var macAddress = op.MAC 150 | if _, exists := service.leases[macAddress]; exists{ 151 | delete(service.leases, macAddress) 152 | log.Printf(" warning: previuos lease for MAC '%s' released", macAddress) 153 | } 154 | var respChan = make(chan NetworkResult, 1) 155 | service.networkModule.GetAddressByHWAddress(macAddress, respChan) 156 | var result = <- respChan 157 | if result.Error != nil{ 158 | err = result.Error 159 | op.RespChan <- operateResult{Error:err} 160 | return 161 | } 162 | var newLease = clientLease{} 163 | clientIP, clientNet, err := net.ParseCIDR(result.Internal) 164 | if err != nil{ 165 | err = result.Error 166 | op.RespChan <- operateResult{Error:err} 167 | return 168 | } 169 | newLease.IP = clientIP.String() 170 | newLease.Gateway = result.Gateway 171 | newLease.DNS = result.DNS 172 | //must confirm in duration 173 | newLease.Expire = time.Now().Add(confirmDuration) 174 | var netMask = clientNet.Mask 175 | newLease.Netmask = net.IPv4(netMask[0], netMask[1], netMask[2], netMask[3]).String() 176 | gatewayIP, err := stringToIPv4(result.Gateway) 177 | if err != nil{ 178 | err = fmt.Errorf("parse gateway fail: %s", err.Error()) 179 | op.RespChan <- operateResult{Error:err} 180 | return 181 | } 182 | var dnsBytes []byte 183 | for _, dns := range result.DNS{ 184 | ip, err := stringToIPv4(dns) 185 | if err != nil{ 186 | err = fmt.Errorf("parse DNS fail: %s", err.Error()) 187 | op.RespChan <- operateResult{Error:err} 188 | return 189 | } 190 | dnsBytes = append(dnsBytes, ip...) 191 | } 192 | newLease.Options = dhcp4.Options{ 193 | dhcp4.OptionSubnetMask: netMask, 194 | dhcp4.OptionRouter: gatewayIP, 195 | dhcp4.OptionDomainNameServer: dnsBytes, 196 | } 197 | service.leases[macAddress] = newLease 198 | log.Printf(" allocate new lease for MAC '%s', address %s/%s, gateway %s, dns %s, expire '%s'", 199 | macAddress, newLease.IP, newLease.Netmask, newLease.Gateway, strings.Join(newLease.DNS, "/"), 200 | newLease.Expire.Format(TimeFormatLayout)) 201 | op.RespChan <- operateResult{IP: clientIP, Options: newLease.Options} 202 | return 203 | 204 | case opUpdate: 205 | var now = time.Now() 206 | var macAddress = op.MAC 207 | var requestIP = op.IP 208 | if lease, exists := service.leases[macAddress]; exists{ 209 | if requestIP != lease.IP{ 210 | err = fmt.Errorf("request IP '%s' diffrent from allocated '%s' for MAC '%s'", 211 | requestIP, lease.IP, macAddress) 212 | op.RespChan <- operateResult{Error:err} 213 | return 214 | } 215 | if now.After(lease.Expire){ 216 | err = fmt.Errorf("lease of MAC '%s' expired ('%s')", macAddress, lease.Expire.Format(TimeFormatLayout)) 217 | delete(service.leases, macAddress) 218 | op.RespChan <- operateResult{Error:err} 219 | return 220 | } 221 | //update 222 | lease.Expire = now.Add(leaseTimeout) 223 | service.leases[macAddress] = lease 224 | log.Printf(" lease of MAC '%s' updated for request", macAddress) 225 | op.RespChan <- operateResult{Options: lease.Options} 226 | return 227 | }else{ 228 | err = fmt.Errorf("no lease for MAC '%s'", macAddress) 229 | op.RespChan <- operateResult{Error:err} 230 | return 231 | } 232 | case opDeallocate: 233 | var macAddress = op.MAC 234 | if _, exists := service.leases[macAddress]; exists{ 235 | log.Printf(" lease of MAC '%s' released", macAddress) 236 | delete(service.leases, macAddress) 237 | }else{ 238 | log.Printf(" warning: no lease for MAC '%s' could release", macAddress) 239 | } 240 | return 241 | case opChange: 242 | gatewayIP, err := stringToIPv4(op.Gateway) 243 | if err != nil{ 244 | log.Printf(" parse gateway fail when update servers: %s", err.Error()) 245 | return 246 | } 247 | var dnsBytes []byte 248 | for _, dns := range op.DNS{ 249 | ip, err := stringToIPv4(dns) 250 | if err != nil{ 251 | log.Printf(" parse DNS fail when update servers: %s", err.Error()) 252 | return 253 | } 254 | dnsBytes = append(dnsBytes, ip...) 255 | } 256 | for mac, lease := range service.leases{ 257 | lease.Options[dhcp4.OptionRouter] = gatewayIP 258 | lease.Options[dhcp4.OptionDomainNameServer] = dnsBytes 259 | service.leases[mac] = lease 260 | log.Printf(" servers of lease for '%s' changed", mac) 261 | } 262 | default: 263 | log.Printf(" ignore invalid op type %d", op.Type) 264 | } 265 | } 266 | 267 | func (handler *DHCPHandler) ServeDHCP(req dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) (packet dhcp4.Packet){ 268 | 269 | var err error 270 | switch msgType { 271 | case dhcp4.Discover: 272 | var macAddress = req.CHAddr().String() 273 | var respChan = make(chan operateResult, 1) 274 | handler.operates <- dhcpOperate{Type:opAllocate, MAC:macAddress, RespChan:respChan} 275 | var result = <- respChan 276 | if result.Error != nil{ 277 | err = result.Error 278 | log.Printf(" allocate lease for MAC '%s' fail: %s", macAddress, err.Error()) 279 | return 280 | } 281 | return dhcp4.ReplyPacket(req, dhcp4.Offer, handler.serverIP, result.IP, leaseTimeout, 282 | result.Options.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])) 283 | 284 | case dhcp4.Request: 285 | if requestServer, exists := options[dhcp4.OptionServerIdentifier]; exists{ 286 | //check request server 287 | var serverIP = net.IP(requestServer) 288 | if !serverIP.Equal(handler.serverIP){ 289 | //log.Printf(" ignore request for different server '%s'", serverIP.String()) 290 | return 291 | } 292 | } 293 | var requestIP = net.IP(options[dhcp4.OptionRequestedIPAddress]) 294 | if requestIP == nil { 295 | requestIP = net.IP(req.CIAddr()) 296 | } 297 | var macAddress = req.CHAddr().String() 298 | var respChan = make(chan operateResult, 1) 299 | handler.operates <- dhcpOperate{Type:opUpdate, MAC:macAddress, IP: requestIP.String(), RespChan:respChan} 300 | var result = <- respChan 301 | if result.Error != nil{ 302 | err = result.Error 303 | log.Printf(" update lease for MAC '%s' fail: %s", macAddress, err.Error()) 304 | return dhcp4.ReplyPacket(req, dhcp4.NAK, handler.serverIP, nil, 0, nil) 305 | } 306 | return dhcp4.ReplyPacket(req, dhcp4.ACK, handler.serverIP, requestIP, leaseTimeout, 307 | result.Options.SelectOrderOrAll(options[dhcp4.OptionParameterRequestList])) 308 | case dhcp4.Release, dhcp4.Decline: 309 | var macAddress = req.CHAddr().String() 310 | handler.operates <- dhcpOperate{Type:opDeallocate, MAC:macAddress} 311 | return 312 | default: 313 | //log.Printf(" ignore message type %d from %s", msgType, req.CHAddr().String()) 314 | } 315 | return 316 | } 317 | 318 | 319 | func stringToIPv4(value string) (ip net.IP, err error){ 320 | ip = net.ParseIP(value) 321 | if ip == nil{ 322 | err = fmt.Errorf("invalid address '%s'", value) 323 | return 324 | } 325 | ip = ip.To4() 326 | if ip == nil{ 327 | err = fmt.Errorf("invalid IPv4 address '%s'", value) 328 | return 329 | } 330 | return ip, nil 331 | } 332 | 333 | 334 | -------------------------------------------------------------------------------- /src/service/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/project-nano/cell/service 2 | 3 | go 1.13 4 | 5 | replace github.com/project-nano/framework => ../../../framework 6 | 7 | require ( 8 | github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 9 | github.com/julienschmidt/httprouter v1.3.0 10 | github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect 11 | github.com/klauspost/cpuid v1.2.3 // indirect 12 | github.com/krolaw/dhcp4 v0.0.0-20190909130307-a50d88189771 13 | github.com/libvirt/libvirt-go v7.4.0+incompatible 14 | github.com/pkg/errors v0.9.1 15 | github.com/project-nano/framework v1.0.9 16 | github.com/project-nano/sonar v0.0.0-20190628085230-df7942628d6f // indirect 17 | github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161 // indirect 18 | github.com/templexxx/xor v0.0.0-20191217153810-f85b25db303b // indirect 19 | github.com/xtaci/kcp-go v5.4.20+incompatible // indirect 20 | github.com/xtaci/lossyconn v0.0.0-20200209145036-adba10fffc37 // indirect 21 | golang.org/x/net v0.17.0 // indirect 22 | ) 23 | -------------------------------------------------------------------------------- /src/service/guest_initiator.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "github.com/amoghe/go-crypt" 8 | "github.com/julienschmidt/httprouter" 9 | "github.com/project-nano/framework" 10 | "log" 11 | "math/rand" 12 | "mime/multipart" 13 | "net" 14 | "net/http" 15 | "net/textproto" 16 | "strings" 17 | "time" 18 | ) 19 | 20 | type GuestInitiator struct { 21 | listener net.Listener 22 | listenAddress string 23 | listenDevice string 24 | server http.Server 25 | eventChan chan InstanceStatusChangedEvent 26 | insManager *InstanceManager 27 | networkModule NetworkModule 28 | supportedInterfaces []string 29 | generator *rand.Rand 30 | runner *framework.SimpleRunner 31 | } 32 | 33 | const ( 34 | InitiatorMagicPort = 25469 35 | ListenerName = "initiator" 36 | ) 37 | 38 | func CreateInitiator(networkModule NetworkModule, instanceManager *InstanceManager) (initiator *GuestInitiator, err error) { 39 | const ( 40 | DefaultQueueSize = 1 << 10 41 | ) 42 | magicIP, err := GetCurrentIPOfDefaultBridge() 43 | if err != nil{ 44 | log.Printf(" get default ip fail: %s", err.Error()) 45 | return 46 | } 47 | initiator = &GuestInitiator{} 48 | initiator.listenDevice = networkModule.GetBridgeName() 49 | initiator.listenAddress = fmt.Sprintf("%s:%d", magicIP, InitiatorMagicPort) 50 | initiator.generator = rand.New(rand.NewSource(time.Now().UnixNano())) 51 | if err = initiator.listenMagicAddress(); err != nil { 52 | return 53 | } 54 | if err = initiator.prepareServer();err != nil{ 55 | return 56 | } 57 | initiator.eventChan = make(chan InstanceStatusChangedEvent, DefaultQueueSize) 58 | initiator.insManager = instanceManager 59 | initiator.networkModule = networkModule 60 | initiator.runner = framework.CreateSimpleRunner(initiator.Routine) 61 | return initiator, nil 62 | } 63 | 64 | func (initiator *GuestInitiator) listenMagicAddress() (err error) { 65 | const( 66 | Protocol = "tcp" 67 | ) 68 | initiator.listener, err = net.Listen(Protocol, initiator.listenAddress) 69 | if err != nil{ 70 | return err 71 | } 72 | log.Printf(" listen at %s success", initiator.listenAddress) 73 | return nil 74 | } 75 | 76 | func (initiator *GuestInitiator) prepareServer() (err error) { 77 | var router = httprouter.New() 78 | var noHandler = NotFoundHandler{} 79 | router.NotFound = &noHandler 80 | 81 | router.GET("/:version/:id/meta-data", initiator.getMetaData) 82 | router.GET("/:version/:id/user-data", initiator.getUserData) 83 | 84 | initiator.server.Addr = initiator.listenAddress 85 | initiator.server.Handler = router 86 | 87 | initiator.supportedInterfaces = []string{"hostname", "instance-id", "local-hostname", "local-ipv4", "public-ipv4"} 88 | return nil 89 | } 90 | 91 | func (initiator *GuestInitiator) Start() error{ 92 | return initiator.runner.Start() 93 | } 94 | 95 | func (initiator *GuestInitiator) Stop() error{ 96 | return initiator.runner.Stop() 97 | } 98 | 99 | func (initiator *GuestInitiator) Routine(c framework.RoutineController) { 100 | initiator.insManager.AddEventListener(ListenerName, initiator.eventChan) 101 | go initiator.serveCloudInit() 102 | for !c.IsStopping(){ 103 | select { 104 | case <- c.GetNotifyChannel(): 105 | c.SetStopping() 106 | var ctx = context.TODO() 107 | var err = initiator.server.Shutdown(ctx) 108 | if err != nil{ 109 | log.Printf(" shutdown http server: %s", err.Error()) 110 | } 111 | case event := <- initiator.eventChan: 112 | initiator.handleGuestEvent(event) 113 | } 114 | } 115 | initiator.insManager.RemoveEventListener(ListenerName) 116 | //initiator.removeMagicAddress(initiator.listenDevice, initiator.magicNetwork) 117 | c.NotifyExit() 118 | } 119 | 120 | func (initiator *GuestInitiator) serveCloudInit(){ 121 | log.Println(" http server started") 122 | var err = initiator.server.Serve(initiator.listener) 123 | if err != nil{ 124 | log.Printf(" http server finished: %s", err.Error()) 125 | } 126 | } 127 | 128 | func (initiator *GuestInitiator) getMetaData(w http.ResponseWriter, r *http.Request, params httprouter.Params){ 129 | var err error 130 | var version = params.ByName("version") 131 | var guestID = params.ByName("id") 132 | 133 | defer func() { 134 | if nil != err{ 135 | w.WriteHeader(http.StatusInternalServerError) 136 | w.Write([]byte(err.Error())) 137 | } 138 | }() 139 | 140 | log.Printf(" query metadata from %s, version %s", r.RemoteAddr, version) 141 | var respChan = make(chan InstanceResult, 1) 142 | initiator.insManager.GetInstanceConfig(guestID, respChan) 143 | var result = <- respChan 144 | if result.Error != nil{ 145 | log.Printf(" get meta data for guest '%s' fail: %s", guestID, result.Error.Error()) 146 | err = result.Error 147 | return 148 | } 149 | 150 | w.WriteHeader(http.StatusOK) 151 | var ins = result.Instance 152 | fmt.Fprintf(w, "instance-id: %s\n", ins.ID) 153 | var hostname = strings.TrimPrefix(ins.Name, fmt.Sprintf("%s.", ins.Group)) 154 | fmt.Fprintf(w, "hostname: %s\n", hostname) 155 | if AddressAllocationCloudInit == ins.AddressAllocation{ 156 | //allocate using Cloud-Init 157 | var respChan = make(chan NetworkResult, 1) 158 | initiator.networkModule.GetCurrentConfig(respChan) 159 | var result = <- respChan 160 | if nil != result.Error{ 161 | log.Printf(" get current network config fail: %s", result.Error.Error()) 162 | err = result.Error 163 | return 164 | } 165 | 166 | //for internal interface only 167 | if "" == ins.InternalAddress{ 168 | log.Printf(" no internal address allocated for guest '%s'", ins.Name) 169 | err = fmt.Errorf(" no internal address allocated for guest '%s'", ins.Name) 170 | return 171 | } 172 | var gatewayIP = result.Gateway 173 | //var internalIP net.IP 174 | //var internalMask *net.IPNet 175 | //if internalIP, internalMask, err = net.ParseCIDR(ins.InternalAddress); err != nil{ 176 | // log.Printf(" invalid internal address '%s' allocated for guest '%s'", ins.InternalAddress, ins.Name) 177 | // err = fmt.Errorf(" invalid internal address '%s' allocated for guest '%s'", ins.InternalAddress, ins.Name) 178 | // return 179 | //} 180 | //var netmask string 181 | //if netmask, err = ipv4MaskToString(internalMask.Mask); err != nil{ 182 | // err = fmt.Errorf("convert netmask fail: %s", err.Error()) 183 | // log.Printf(" generate meta for guest '%s' fail: %s", ins.Name, err.Error()) 184 | // return 185 | //} 186 | //network-interfaces: | 187 | //iface eth0 inet static 188 | //address 192.168.1.10 189 | //network 192.168.1.0 190 | //netmask 255.255.255.0 191 | //broadcast 192.168.1.255 192 | //gateway 192.168.1.254 193 | //dns-nameservers xxx.xxx.xxx 194 | //fmt.Fprint(w, "network-interfaces: |\niface eth0 inet static\n") 195 | //fmt.Fprintf(w, "address %s\n", internalIP.String()) 196 | //fmt.Fprintf(w, "network %s\n", internalMask.IP.String()) 197 | //fmt.Fprintf(w, "netmask %s\n", netmask) 198 | //fmt.Fprintf(w, "gateway %s\n", gatewayIP) 199 | //fmt.Fprintf(w, "dns-nameservers %s\n", strings.Join(result.DNS, " ")) 200 | 201 | // 202 | //network: 203 | // version: 1 204 | // config: 205 | // - type: physical 206 | // name: eth0 207 | // mac_address: '00:11:22:33:44:55' 208 | // subnets: 209 | // - type: static 210 | // address: 192.168.23.14/24 211 | // gateway: 192.168.23.1 212 | // - type: nameserver 213 | // address: 214 | // - 192.168.23.2 215 | // - 8.8.8.8 216 | fmt.Fprint(w, "network:\n version: 1\n config:\n") 217 | fmt.Fprint(w, " - type: physical\n") 218 | fmt.Fprint(w, " name: eth0\n") 219 | fmt.Fprintf(w, " mac_address: '%s'\n", ins.HardwareAddress) 220 | fmt.Fprint(w, " subnets:\n") 221 | fmt.Fprint(w, " - type: static\n") 222 | fmt.Fprintf(w, " address: %s\n", ins.InternalAddress) 223 | fmt.Fprintf(w, " gateway: %s\n", gatewayIP) 224 | fmt.Fprint(w, " - type: nameserver\n") 225 | fmt.Fprint(w, " address:\n") 226 | for _, dns := range result.DNS{ 227 | fmt.Fprintf(w, " - %s\n", dns) 228 | } 229 | } 230 | } 231 | 232 | func ipv4MaskToString(mask net.IPMask) (s string, err error){ 233 | if net.IPv4len != len(mask){ 234 | err = fmt.Errorf("invalid mask length %d", len(mask)) 235 | return 236 | } 237 | s = fmt.Sprintf("%d.%d.%d.%d", mask[0], mask[1], mask[2], mask[3]) 238 | return 239 | } 240 | 241 | func (initiator *GuestInitiator) getUserData(w http.ResponseWriter, r *http.Request, params httprouter.Params) { 242 | var version = params.ByName("version") 243 | var guestID = params.ByName("id") 244 | 245 | log.Printf(" query user data from %s, version %s", r.RemoteAddr, version) 246 | var respChan = make(chan InstanceResult, 1) 247 | initiator.insManager.GetInstanceConfig(guestID, respChan) 248 | var result = <- respChan 249 | if result.Error != nil{ 250 | log.Printf(" get user data for guest '%s' fail: %s", guestID, result.Error.Error()) 251 | w.WriteHeader(http.StatusInternalServerError) 252 | w.Write([]byte(result.Error.Error())) 253 | return 254 | } 255 | //todo: modified flag (password/disks) 256 | var guest = result.Instance 257 | if !guest.Initialized{ 258 | data, err := initiator.buildInitialConfig(guest.GuestConfig) 259 | if err != nil{ 260 | log.Printf(" build config for guest '%s' fail: %s", guestID, err.Error()) 261 | w.WriteHeader(http.StatusInternalServerError) 262 | w.Write([]byte(result.Error.Error())) 263 | return 264 | } 265 | 266 | var partHeader = make(textproto.MIMEHeader) 267 | partHeader.Add("Content-Type", "text/cloud-config") 268 | partHeader.Add("Content-Disposition", "attachment; filename=\"cloud-config.txt\"") 269 | partHeader.Add("MIME-Version", "1.0") 270 | partHeader.Add("Content-Transfer-Encoding", "7bit") 271 | 272 | var multiWriter = multipart.NewWriter(w) 273 | w.Write([]byte(fmt.Sprintf("Content-Type: multipart/mixed; boundary=\"%s\"\n", multiWriter.Boundary()))) 274 | w.Write([]byte("MIME-Version: 1.0\n\n")) 275 | 276 | defer multiWriter.Close() 277 | partWriter, err := multiWriter.CreatePart(partHeader) 278 | if err != nil{ 279 | log.Printf(" build config part for guest '%s' fail: %s", guestID, err.Error()) 280 | w.WriteHeader(http.StatusInternalServerError) 281 | w.Write([]byte(result.Error.Error())) 282 | return 283 | } 284 | 285 | partWriter.Write([]byte(data)) 286 | var respChan = make(chan error, 1) 287 | initiator.insManager.FinishGuestInitialize(guestID, respChan) 288 | if err = <- respChan; err != nil{ 289 | log.Printf(" warning: finish guest '%s' initialize fail: %s", guestID, err.Error()) 290 | }else{ 291 | log.Printf(" guest '%s' initialized", guestID) 292 | } 293 | } 294 | 295 | 296 | } 297 | 298 | func (initiator *GuestInitiator) handleGuestEvent(event InstanceStatusChangedEvent){ 299 | 300 | } 301 | 302 | type NotFoundHandler struct { 303 | 304 | } 305 | 306 | func (handler *NotFoundHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 307 | log.Printf(" ignore %s %s from %s", r.Method, r.URL.RawQuery, r.RemoteAddr) 308 | } 309 | 310 | func (initiator *GuestInitiator)buildInitialConfig(config GuestConfig) (data string, err error) { 311 | var os = config.Template.OperatingSystem 312 | switch os { 313 | case SystemNameLinux: 314 | return initiator.buildLinuxInitialization(config) 315 | default: 316 | err = fmt.Errorf("unsupported oprating system '%s'", os) 317 | return "", err 318 | } 319 | } 320 | 321 | func (initiator *GuestInitiator) buildLinuxInitialization(config GuestConfig) (data string, err error) { 322 | const ( 323 | AdminGroup = "wheel" 324 | StartDeviceCharacter = 0x61 //'a' 325 | DevicePrefixSCSI = "sd" 326 | VolumeGroupName = "nano" 327 | DataLogicalVolumeName = "data" 328 | SystemPartitionIndex = 2 329 | RootVolume = "/dev/centos/root" 330 | SaltLength = 8 331 | ) 332 | var builder strings.Builder 333 | builder.WriteString("#cloud-config\n") 334 | if config.RootLoginEnabled{ 335 | builder.WriteString("disable_root: false\n") 336 | }else{ 337 | builder.WriteString("disable_root: true\n") 338 | } 339 | 340 | 341 | builder.WriteString("ssh_pwauth: yes\n") 342 | if config.AuthUser == AdminLinux{ 343 | //change default password 344 | fmt.Fprintf(&builder, "chpasswd:\n expire: false\n list: |\n %s:%s\n\n", config.AuthUser, config.AuthSecret) 345 | }else{ 346 | //new admin 347 | var salt = initiator.generateSalt(SaltLength) 348 | 349 | hashed, err := crypt.Crypt(config.AuthSecret, fmt.Sprintf("$6$%s$", salt)) 350 | if err != nil{ 351 | return data, err 352 | } 353 | fmt.Fprintf(&builder, "users:\n - name: %s\n passwd: %s\n lock_passwd: false\n groups: [ %s ]\n\n", config.AuthUser, hashed, AdminGroup) 354 | } 355 | 356 | builder.WriteString("bootcmd:\n") 357 | var hostname = strings.TrimPrefix(config.Name, fmt.Sprintf("%s.", config.Group)) 358 | fmt.Fprintf(&builder, " - [ hostnamectl, set-hostname, %s ]\n", hostname) 359 | 360 | var mountMap = map[string]string{} 361 | var groupDevices []string 362 | if len(config.Disks) > 1{ 363 | //data disk available 364 | for i, _ := range config.Disks[1:]{ 365 | var devName = fmt.Sprintf("/dev/%s%c", DevicePrefixSCSI, StartDeviceCharacter + i + 1)//from /dev/sdb 366 | groupDevices = append(groupDevices, devName) 367 | fmt.Fprintf(&builder, " - [ pvcreate, %s ]\n", devName) 368 | } 369 | fmt.Fprintf(&builder, " - [ vgcreate, %s , %s ]\n", VolumeGroupName, strings.Join(groupDevices, ",")) 370 | //lvcreate --name data -l 100%FREE data 371 | fmt.Fprintf(&builder, " - [ lvcreate, --name, %s, -l, 100%%FREE, %s ]\n", DataLogicalVolumeName, VolumeGroupName) 372 | var dataVolume = fmt.Sprintf("/dev/%s/%s", VolumeGroupName, DataLogicalVolumeName) 373 | fmt.Fprintf(&builder, " - [ mkfs.ext4, %s ]\n\n", dataVolume) 374 | if "" == config.DataPath{ 375 | err = errors.New("must specify mount data path in guest") 376 | return 377 | } 378 | mountMap[dataVolume] = config.DataPath 379 | } 380 | if 0 != len(mountMap){ 381 | builder.WriteString("mounts:\n") 382 | for dev, path := range mountMap{ 383 | fmt.Fprintf(&builder, " - [ %s, %s ]\n", dev, path) 384 | } 385 | builder.WriteString("\n") 386 | } 387 | var systemDev = fmt.Sprintf("/dev/%s%c%d", DevicePrefixSCSI, StartDeviceCharacter, SystemPartitionIndex) // /dev/sda2 388 | fmt.Fprintf(&builder, "growpart:\n mode: auto\n devices: ['%s']\n ignore_growroot_disabled: false\n", systemDev) 389 | fmt.Fprintf(&builder, "runcmd:\n - [ pvresize, '%s']\n - [ lvextend, '-l', '+100%%FREE', '%s']\n - [ xfs_growfs, '%s' ]\n\n", 390 | systemDev, RootVolume, RootVolume) 391 | return builder.String(), nil 392 | } 393 | 394 | func (initiator *GuestInitiator) generateSalt(length int) (salt string){ 395 | const ( 396 | CharSet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 397 | ) 398 | var result = make([]byte, length) 399 | var n = len(CharSet) 400 | for i := 0 ; i < length; i++{ 401 | result[i] = CharSet[initiator.generator.Intn(n)] 402 | } 403 | return string(result) 404 | } -------------------------------------------------------------------------------- /src/service/network_manager_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "github.com/libvirt/libvirt-go" 5 | "os" 6 | "testing" 7 | ) 8 | 9 | const ( 10 | testPathForNetworkManager = "../../test/network" 11 | ) 12 | 13 | func clearTestEnvironmentForNetworkManager() (err error) { 14 | if _, err = os.Stat(testPathForNetworkManager); !os.IsNotExist(err) { 15 | //exists 16 | err = os.RemoveAll(testPathForNetworkManager) 17 | return 18 | } 19 | return nil 20 | } 21 | 22 | func getNetworkManagerForTest() (manager *NetworkManager, err error) { 23 | const ( 24 | libvirtURL = "qemu:///system" 25 | maxMonitorPort = 50 26 | ) 27 | if err = clearTestEnvironmentForNetworkManager(); err != nil { 28 | return 29 | } 30 | if err = os.MkdirAll(testPathForNetworkManager, 0740); err != nil { 31 | return 32 | } 33 | // initial libvirt connection 34 | var virConnect *libvirt.Connect 35 | if virConnect, err = libvirt.NewConnect(libvirtURL); err != nil { 36 | return 37 | } 38 | if manager, err = CreateNetworkManager(testPathForNetworkManager, virConnect, maxMonitorPort); err != nil { 39 | return 40 | } 41 | err = manager.Start() 42 | return 43 | } 44 | 45 | func TestNetworkManager_AllocateResource(t *testing.T) { 46 | const ( 47 | instanceID = "sample" 48 | macAddress = "00:11:22:33:44:55" 49 | internalAddress = "" 50 | externalAddress = "" 51 | ) 52 | var manager *NetworkManager 53 | var err error 54 | if manager, err = getNetworkManagerForTest(); err != nil { 55 | t.Fatalf("get network manager fail: %s", err.Error()) 56 | return 57 | } 58 | defer func() { 59 | if err = manager.Stop(); err != nil { 60 | t.Error(err) 61 | } 62 | }() 63 | //allocate 64 | respChan := make(chan NetworkResult, 1) 65 | manager.AllocateInstanceResource(instanceID, macAddress, internalAddress, externalAddress, respChan) 66 | var result = <-respChan 67 | if result.Error != nil { 68 | t.Fatalf("allocate resource fail: %s", result.Error.Error()) 69 | } 70 | var monitorPort = result.MonitorPort 71 | t.Logf("monitor port %d allocated", monitorPort) 72 | t.Log("test network manager allocate resource success") 73 | } 74 | -------------------------------------------------------------------------------- /src/service/network_utility.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/binary" 5 | "net" 6 | "fmt" 7 | "github.com/libvirt/libvirt-go" 8 | "encoding/xml" 9 | ) 10 | 11 | const ( 12 | DefaultBridgeName = "br0" 13 | DefaultNetworkName = "default" 14 | ) 15 | 16 | type NetworkUtility struct { 17 | virConnect *libvirt.Connect 18 | } 19 | 20 | type virNetworkRange struct { 21 | Start string `xml:"start,attr"` 22 | End string `xml:"end,attr"` 23 | } 24 | 25 | type virNetworkNAT struct { 26 | Port *virNetworkRange `xml:"port,omitempty"` 27 | Address *virNetworkRange `xml:"address,omitempty"` 28 | } 29 | 30 | type virNetworkForward struct { 31 | NAT *virNetworkNAT `xml:"nat,omitempty"` 32 | Mode string `xml:"mode,attr"` 33 | } 34 | 35 | type virNetworkBridge struct { 36 | Name string `xml:"name,attr"` 37 | STP string `xml:"stp,attr,omitempty"` 38 | Delay string `xml:"delay,attr,omitempty"` 39 | } 40 | 41 | type virNetworkMAC struct { 42 | Address string `xml:"address,attr,omitempty"` 43 | } 44 | 45 | type virNetworkDHCP struct { 46 | Range *virNetworkRange `xml:"range,omitempty"` 47 | } 48 | 49 | type virNetworkIP struct { 50 | Address string `xml:"address,attr"` 51 | Netmask string `xml:"netmask,attr"` 52 | DHCP *virNetworkDHCP `xml:"dhcp,omitempty"` 53 | } 54 | 55 | type virNetworkDefine struct { 56 | XMLName xml.Name `xml:"network"` 57 | Name string `xml:"name"` 58 | UUID string `xml:"uuid,omitempty"` 59 | Forward *virNetworkForward `xml:"forward,omitempty"` 60 | Bridge *virNetworkBridge `xml:"bridge,omitempty"` 61 | MAC *virNetworkMAC `xml:"mac,omitempty"` 62 | IP *virNetworkIP `xml:"ip,omitempty"` 63 | } 64 | 65 | func (util *NetworkUtility) DisableDHCPonDefaultNetwork() (changed bool, err error) { 66 | changed = false 67 | virNetwork, err := util.virConnect.LookupNetworkByName(DefaultNetworkName) 68 | if err != nil{ 69 | return changed, err 70 | } 71 | desc, err := virNetwork.GetXMLDesc(0) 72 | if err != nil{ 73 | return changed, err 74 | } 75 | var define virNetworkDefine 76 | if err = xml.Unmarshal([]byte(desc), &define); err != nil{ 77 | return changed, err 78 | } 79 | if nil == define.IP{ 80 | return changed, nil 81 | } 82 | if nil == define.IP.DHCP{ 83 | return changed, nil 84 | } 85 | 86 | activated, err := virNetwork.IsActive() 87 | if err != nil{ 88 | return changed, err 89 | } 90 | if activated { 91 | if err = virNetwork.Destroy(); err != nil{ 92 | return 93 | } 94 | } 95 | //disable dhcp 96 | define.IP.DHCP = nil 97 | newConfig, err := xml.MarshalIndent(define, "", "") 98 | if err != nil{ 99 | return 100 | } 101 | virNetwork, err = util.virConnect.NetworkDefineXML(string(newConfig)) 102 | if err != nil{ 103 | return 104 | } 105 | changed = true 106 | if activated{ 107 | if err = virNetwork.Create();err != nil{ 108 | return 109 | } 110 | } 111 | return changed,nil 112 | } 113 | 114 | var ipOfDefaultBridge = "" 115 | 116 | func GetCurrentIPOfDefaultBridge() (ip string, err error){ 117 | if "" != ipOfDefaultBridge{ 118 | return ipOfDefaultBridge, nil 119 | } 120 | dev, err := net.InterfaceByName(DefaultBridgeName) 121 | if err != nil{ 122 | return "", err 123 | } 124 | addrs, err := dev.Addrs() 125 | if err != nil{ 126 | return "", err 127 | } 128 | for _, addr := range addrs{ 129 | var CIDRString = addr.String() 130 | ip, _, err := net.ParseCIDR(CIDRString) 131 | if err != nil { 132 | return "",err 133 | } 134 | var ipv4 = ip.To4() 135 | if ipv4 != nil { 136 | //v4 ip only 137 | ipOfDefaultBridge = ip.String() 138 | return ipOfDefaultBridge, nil 139 | } 140 | } 141 | return "", fmt.Errorf("no ipv4 address available in %s", DefaultBridgeName) 142 | } 143 | 144 | func UInt32ToIPv4(input uint32) string{ 145 | if 0 == input{ 146 | return "" 147 | } 148 | var bytes = make([]byte, net.IPv4len) 149 | binary.BigEndian.PutUint32(bytes, input) 150 | return net.IP(bytes).String() 151 | } 152 | 153 | func IPv4ToUInt32(input string) uint32 { 154 | if "" == input{ 155 | return 0 156 | } 157 | var ip = net.ParseIP(input) 158 | return binary.BigEndian.Uint32(ip.To4()) 159 | } 160 | -------------------------------------------------------------------------------- /src/service/service_interface.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "time" 6 | ) 7 | 8 | const ( 9 | ConfigFilePerm = 0600 10 | APIRoot = "/api" 11 | APIVersion = 1 12 | ) 13 | 14 | type InstanceResult struct { 15 | Error error 16 | Instance InstanceStatus 17 | Password string 18 | User string 19 | Policy SecurityPolicy 20 | NetworkResources map[string]InstanceNetworkResource 21 | } 22 | 23 | type InstanceMediaConfig struct { 24 | Mode InstanceMediaMode 25 | ID string 26 | URI string 27 | Host string 28 | Port uint 29 | } 30 | 31 | type InstanceMediaMode int 32 | 33 | const ( 34 | MediaModeHTTPS = InstanceMediaMode(iota) 35 | ) 36 | 37 | type InstanceModule interface { 38 | UsingStorage(name, url string, respChan chan error) 39 | DetachStorage(respChan chan error) 40 | CreateInstance(require GuestConfig, resp chan error) 41 | DeleteInstance(id string, resp chan error) 42 | GetInstanceConfig(id string, resp chan InstanceResult) 43 | GetInstanceStatus(id string, resp chan InstanceResult) 44 | GetAllInstance(resp chan []GuestConfig) 45 | ModifyGuestName(id, name string, resp chan error) 46 | ModifyGuestCore(id string, core uint, resp chan error) 47 | ModifyGuestMemory(id string, core uint, resp chan error) 48 | ModifyCPUPriority(guestID string, priority PriorityEnum, resp chan error) 49 | ModifyDiskThreshold(guestID string, readSpeed, readIOPS, writeSpeed, writeIOPS uint64, resp chan error) 50 | ModifyNetworkThreshold(guestID string, receive, send uint64, resp chan error) 51 | ModifyAutoStart(guestID string, enable bool, respChan chan error) 52 | 53 | ModifyGuestAuth(id, password, usr string, resp chan InstanceResult) 54 | GetGuestAuth(id string, resp chan InstanceResult) 55 | FinishGuestInitialize(id string, resp chan error) 56 | ResetGuestSystem(id string, resp chan error) 57 | UpdateDiskSize(guest string, index int, size uint64, resp chan error) 58 | 59 | StartInstance(id string, resp chan error) 60 | StartInstanceWithMedia(id string, media InstanceMediaConfig, resp chan error) 61 | //StartWithNetwork(id, network string, resp chan error) 62 | StopInstance(id string, reboot, force bool, resp chan error) 63 | AttachMedia(id string, media InstanceMediaConfig, resp chan error) 64 | DetachMedia(id string, resp chan error) 65 | //AttachDisk(id, target string, resp chan error) 66 | //DetachDisk(id, target string, resp chan error) 67 | IsInstanceRunning(id string, resp chan bool) 68 | GetNetworkResources(instances []string, respChan chan InstanceResult) 69 | AttachInstances(resources map[string]InstanceNetworkResource, respChan chan error) 70 | DetachInstances(instances []string, respChan chan error) 71 | MigrateInstances(instances []string, respChan chan error) 72 | ResetMonitorPassword(id string, respChan chan InstanceResult) 73 | SyncAddressAllocation(allocationMode string) 74 | //Security Policy 75 | GetSecurityPolicy(instanceID string, respChan chan InstanceResult) 76 | AddSecurityPolicyRule(instanceID string, rule SecurityPolicyRule, respChan chan error) 77 | ModifySecurityPolicyRule(instanceID string, index int, rule SecurityPolicyRule, respChan chan error) 78 | RemoveSecurityPolicyRule(instanceID string, index int, respChan chan error) 79 | ChangeDefaultSecurityPolicyAction(instanceID string, accept bool, respChan chan error) 80 | PullUpSecurityPolicyRule(instanceID string, index int, respChan chan error) 81 | PushDownSecurityPolicyRule(instanceID string, index int, respChan chan error) 82 | } 83 | 84 | type SnapshotConfig struct { 85 | Name string `json:"name"` 86 | Description string `json:"description,omitempty"` 87 | CreateTime string `json:"create_time"` 88 | IsRoot bool `json:"is_root"` 89 | IsCurrent bool `json:"is_current"` 90 | Backing string `json:"backing,omitempty"` 91 | Running bool `json:"running"` 92 | } 93 | 94 | type AttachDeviceInfo struct { 95 | Name string 96 | Protocol string 97 | Path string 98 | Attached bool 99 | Error string 100 | } 101 | 102 | type StorageResult struct { 103 | Error error 104 | Volumes []string 105 | Pool string 106 | Image string 107 | Size uint 108 | Path string 109 | Snapshot SnapshotConfig 110 | SnapshotList []SnapshotConfig 111 | Devices []AttachDeviceInfo 112 | StorageMode StoragePoolMode 113 | SystemPaths []string 114 | DataPaths []string 115 | } 116 | 117 | type BootType int 118 | 119 | const ( 120 | BootTypeNone = BootType(iota) 121 | BootTypeCloudInit 122 | ) 123 | 124 | type StorageModule interface { 125 | UsingStorage(name, protocol, host, target string, respChan chan StorageResult) 126 | DetachStorage(respChan chan error) 127 | GetAttachDevices(respChan chan StorageResult) 128 | CreateVolumes(groupName string, systemSize uint64, dataSize []uint64, bootType BootType, resp chan StorageResult) 129 | DeleteVolumes(groupName string, resp chan error) 130 | ReadDiskImage(id framework.SessionID, groupName, targetVol, sourceImage string, targetSize, imageSize uint64, 131 | mediaHost string, mediaPort uint, startChan chan error, progress chan uint, resultChan chan StorageResult) 132 | WriteDiskImage(id framework.SessionID, groupName, targetVol, sourceImage, mediaHost string, mediaPort uint, 133 | startChan chan error, progress chan uint, resultChan chan StorageResult) 134 | ResizeVolume(id framework.SessionID, groupName, targetVol string, targetSize uint64, respChan chan StorageResult) 135 | ShrinkVolume(id framework.SessionID, groupName, targetVol string, respChan chan StorageResult) 136 | //snapshot 137 | QuerySnapshot(groupName string, respChan chan StorageResult) 138 | GetSnapshot(groupName, snapshot string, respChan chan StorageResult) 139 | CreateSnapshot(groupName, snapshot, description string, respChan chan error) 140 | DeleteSnapshot(groupName, snapshot string, respChan chan error) 141 | RestoreSnapshot(groupName, snapshot string, respChan chan error) 142 | AttachVolumeGroup(groups []string, respChan chan error) 143 | DetachVolumeGroup(groups []string, respChan chan error) 144 | QueryStoragePaths(respChan chan StorageResult) 145 | ChangeDefaultStoragePath(target string, respChan chan error) 146 | ValidateVolumesForStart(groupName string, respChan chan error) 147 | } 148 | 149 | type NetworkResult struct { 150 | Error error 151 | Name string 152 | MonitorPort int 153 | External string 154 | Internal string 155 | Gateway string 156 | DNS []string 157 | Allocation string 158 | Resources map[string]InstanceNetworkResource 159 | } 160 | 161 | type NetworkModule interface { 162 | GetBridgeName() string 163 | GetCurrentConfig(resp chan NetworkResult) 164 | AllocateInstanceResource(instance, hwaddress, internal, external string, resp chan NetworkResult) 165 | DeallocateAllResource(instance string, resp chan error) 166 | AttachInstances(resources map[string]InstanceNetworkResource, resp chan NetworkResult) 167 | DetachInstances(instances []string, resp chan error) 168 | UpdateAddressAllocation(gateway string, dns []string, allocationMode string, resp chan error) 169 | GetAddressByHWAddress(hwaddress string, resp chan NetworkResult) 170 | } 171 | 172 | type Configurator struct { 173 | operateTimeout time.Duration 174 | } 175 | 176 | func (c *Configurator) SetOperateTimeout(timeoutInSeconds int) { 177 | c.operateTimeout = time.Duration(timeoutInSeconds) * time.Second 178 | } 179 | 180 | // GetOperateTimeout : get operate timeout 181 | func (c *Configurator) GetOperateTimeout() time.Duration { 182 | return c.operateTimeout 183 | } 184 | 185 | const ( 186 | defaultOperateTimeout = 10 //10 seconds 187 | ) 188 | 189 | var globalConfigurator = Configurator{ 190 | operateTimeout: defaultOperateTimeout * time.Second, 191 | } 192 | 193 | func GetConfigurator() *Configurator { 194 | return &globalConfigurator 195 | } 196 | -------------------------------------------------------------------------------- /src/task/add_security_rule.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type AddSecurityRuleExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *AddSecurityRuleExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var instanceID string 18 | var accept bool 19 | var fromIP, toIP, toPort, protocol uint 20 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil{ 21 | err = fmt.Errorf("get instance id fail: %s", err.Error()) 22 | return 23 | } 24 | if accept, err = request.GetBoolean(framework.ParamKeyAction); err != nil{ 25 | err = fmt.Errorf("get action fail: %s", err.Error()) 26 | return 27 | } 28 | if fromIP, err = request.GetUInt(framework.ParamKeyFrom); err != nil{ 29 | err = fmt.Errorf("get source address fail: %s", err.Error()) 30 | return 31 | } 32 | if toIP, err = request.GetUInt(framework.ParamKeyTo); err != nil{ 33 | err = fmt.Errorf("get target address fail: %s", err.Error()) 34 | return 35 | } 36 | if toPort, err = request.GetUInt(framework.ParamKeyPort); err != nil{ 37 | err = fmt.Errorf("get target port fail: %s", err.Error()) 38 | return 39 | }else if 0 == toPort || toPort > 0xFFFF{ 40 | err = fmt.Errorf("invalid target port %d", toPort) 41 | return 42 | } 43 | if protocol, err = request.GetUInt(framework.ParamKeyProtocol); err != nil{ 44 | err = fmt.Errorf("get protocol fail: %s", err.Error()) 45 | return 46 | } 47 | resp, _ := framework.CreateJsonMessage(framework.AddGuestRuleResponse) 48 | resp.SetFromSession(id) 49 | resp.SetToSession(request.GetFromSession()) 50 | resp.SetSuccess(false) 51 | 52 | var rule = service.SecurityPolicyRule{ 53 | Accept: accept, 54 | TargetPort: toPort, 55 | } 56 | 57 | switch protocol { 58 | case service.PolicyRuleProtocolIndexTCP: 59 | rule.Protocol = service.PolicyRuleProtocolTCP 60 | case service.PolicyRuleProtocolIndexUDP: 61 | rule.Protocol = service.PolicyRuleProtocolUDP 62 | case service.PolicyRuleProtocolIndexICMP: 63 | rule.Protocol = service.PolicyRuleProtocolICMP 64 | default: 65 | err = fmt.Errorf("invalid protocol %d for security rule", protocol) 66 | return 67 | } 68 | rule.SourceAddress = service.UInt32ToIPv4(uint32(fromIP)) 69 | rule.TargetAddress = service.UInt32ToIPv4(uint32(toIP)) 70 | 71 | var respChan = make(chan error, 1) 72 | executor.InstanceModule.AddSecurityPolicyRule(instanceID, rule, respChan) 73 | err = <- respChan 74 | if nil != err{ 75 | log.Printf("[%08X] add security rule to instance '%s' fail: %s", 76 | id, instanceID, err.Error()) 77 | resp.SetError(err.Error()) 78 | }else{ 79 | if accept{ 80 | log.Printf("[%08X] add security rule to instance '%s': accept protocol '%s' from '%s' to '%s:%d'", 81 | id, instanceID, rule.Protocol, rule.SourceAddress, rule.TargetAddress, rule.TargetPort) 82 | }else{ 83 | log.Printf("[%08X] add security rule to instance '%s': reject protocol '%s' from '%s' to '%s:%d'", 84 | id, instanceID, rule.Protocol, rule.SourceAddress, rule.TargetAddress, rule.TargetPort) 85 | } 86 | resp.SetSuccess(true) 87 | } 88 | return executor.Sender.SendMessage(resp, request.GetSender()) 89 | } 90 | -------------------------------------------------------------------------------- /src/task/attach_instance.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/cell/service" 5 | "github.com/project-nano/framework" 6 | "log" 7 | "time" 8 | ) 9 | 10 | type AttachInstanceExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | StorageModule service.StorageModule 14 | NetworkModule service.NetworkModule 15 | } 16 | 17 | func (executor *AttachInstanceExecutor) Execute(id framework.SessionID, request framework.Message, 18 | incoming chan framework.Message, terminate chan bool) (err error) { 19 | resp, _ := framework.CreateJsonMessage(framework.AttachInstanceResponse) 20 | resp.SetSuccess(false) 21 | resp.SetFromSession(id) 22 | resp.SetToSession(request.GetFromSession()) 23 | 24 | isFailover, err := request.GetBoolean(framework.ParamKeyImmediate) 25 | if err != nil { 26 | log.Printf("[%08X] recv attach instance request from %s.[%08X] but get failover flag fail: %s", 27 | id, request.GetSender(), request.GetFromSession(), err.Error()) 28 | resp.SetError(err.Error()) 29 | return executor.Sender.SendMessage(resp, request.GetSender()) 30 | } 31 | var sourceCell string 32 | if isFailover { 33 | sourceCell, err = request.GetString(framework.ParamKeyCell) 34 | if err != nil { 35 | log.Printf("[%08X] recv failover attach request from %s.[%08X] but get source cell fail: %s", 36 | id, request.GetSender(), request.GetFromSession(), err.Error()) 37 | return err 38 | } 39 | } 40 | idList, err := request.GetStringArray(framework.ParamKeyInstance) 41 | if err != nil { 42 | log.Printf("[%08X] recv attach instance request from %s.[%08X] but get target intance fail: %s", 43 | id, request.GetSender(), request.GetFromSession(), err.Error()) 44 | resp.SetError(err.Error()) 45 | return executor.Sender.SendMessage(resp, request.GetSender()) 46 | } 47 | log.Printf("[%08X] recv attach %d instance(s) request from %s.[%08X]", id, len(idList), request.GetSender(), request.GetFromSession()) 48 | var networkResource map[string]service.InstanceNetworkResource 49 | { 50 | var respChan = make(chan service.InstanceResult, 1) 51 | executor.InstanceModule.GetNetworkResources(idList, respChan) 52 | var result = <-respChan 53 | if result.Error != nil { 54 | err = result.Error 55 | resp.SetError(err.Error()) 56 | log.Printf("[%08X] get network resource fail: %s", id, err.Error()) 57 | return executor.Sender.SendMessage(resp, request.GetSender()) 58 | } 59 | networkResource = result.NetworkResources 60 | } 61 | { 62 | var respChan = make(chan service.NetworkResult, 1) 63 | executor.NetworkModule.AttachInstances(networkResource, respChan) 64 | var result = <-respChan 65 | if result.Error != nil { 66 | err = result.Error 67 | resp.SetError(err.Error()) 68 | log.Printf("[%08X] attach network resource fail: %s", id, err.Error()) 69 | return executor.Sender.SendMessage(resp, request.GetSender()) 70 | } 71 | networkResource = result.Resources 72 | } 73 | { 74 | var respChan = make(chan error, 1) 75 | executor.StorageModule.AttachVolumeGroup(idList, respChan) 76 | err = <-respChan 77 | if err != nil { 78 | resp.SetError(err.Error()) 79 | log.Printf("[%08X] attach storage resource fail: %s", id, err.Error()) 80 | executor.detachResource(id, idList, true, false, false) 81 | return executor.Sender.SendMessage(resp, request.GetSender()) 82 | } 83 | } 84 | { 85 | var respChan = make(chan error, 1) 86 | executor.InstanceModule.AttachInstances(networkResource, respChan) 87 | err = <-respChan 88 | if err != nil { 89 | resp.SetError(err.Error()) 90 | log.Printf("[%08X] attach instance resource fail: %s", id, err.Error()) 91 | executor.detachResource(id, idList, true, true, false) 92 | return executor.Sender.SendMessage(resp, request.GetSender()) 93 | } 94 | } 95 | log.Printf("[%08X] instance(s) attached", id) 96 | 97 | idList = idList[:0] 98 | var monitorPorts []uint64 99 | for instanceID, resource := range networkResource { 100 | idList = append(idList, instanceID) 101 | monitorPorts = append(monitorPorts, uint64(resource.MonitorPort)) 102 | } 103 | 104 | if isFailover { 105 | 106 | //notify migrate finish 107 | var respChan = make(chan error, 1) 108 | executor.InstanceModule.MigrateInstances(idList, respChan) 109 | err = <-respChan 110 | if err != nil { 111 | log.Printf("[%08X] migrate instaince fail: %s", id, err.Error()) 112 | executor.detachResource(id, idList, true, true, true) 113 | return nil 114 | } 115 | 116 | notify, _ := framework.CreateJsonMessage(framework.InstanceMigratedEvent) 117 | notify.SetSuccess(true) 118 | notify.SetFromSession(id) 119 | notify.SetStringArray(framework.ParamKeyInstance, idList) 120 | notify.SetUIntArray(framework.ParamKeyMonitor, monitorPorts) 121 | notify.SetBoolean(framework.ParamKeyImmediate, true) 122 | notify.SetString(framework.ParamKeyCell, sourceCell) 123 | if err = executor.Sender.SendMessage(notify, request.GetSender()); err != nil { 124 | log.Printf("[%08X] warning: notify migrate finish fail: %s", id, err.Error()) 125 | } 126 | log.Printf("[%08X] %d instance(s) migrated success when failover", id, len(idList)) 127 | return nil 128 | 129 | } else { 130 | resp.SetSuccess(true) 131 | log.Printf("[%08X] instance(s) attached", id) 132 | if err = executor.Sender.SendMessage(resp, request.GetSender()); err != nil { 133 | log.Printf("[%08X] warning: send attach response fail: %s", id, err.Error()) 134 | } 135 | //wait migrate 136 | timer := time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 137 | select { 138 | case migrateRequest := <-incoming: 139 | if migrateRequest.GetID() != framework.MigrateInstanceRequest { 140 | //detach fail 141 | log.Printf("[%08X] unexpect message received from %s when wait migrate: %d", id, migrateRequest.GetSender(), migrateRequest.GetID()) 142 | executor.detachResource(id, idList, true, true, true) 143 | return nil 144 | } 145 | var migrationID string 146 | if migrationID, err = migrateRequest.GetString(framework.ParamKeyMigration); err != nil { 147 | log.Printf("[%08X] parse migration ID from %s fail: %d", id, migrateRequest.GetSender(), err.Error()) 148 | executor.detachResource(id, idList, true, true, true) 149 | return nil 150 | } 151 | //invoke migrate 152 | var respChan = make(chan error, 1) 153 | executor.InstanceModule.MigrateInstances(idList, respChan) 154 | err = <-respChan 155 | if err != nil { 156 | log.Printf("[%08X] migrate instaince fail: %s", id, err.Error()) 157 | executor.detachResource(id, idList, true, true, true) 158 | return nil 159 | } 160 | 161 | notify, _ := framework.CreateJsonMessage(framework.InstanceMigratedEvent) 162 | notify.SetSuccess(true) 163 | notify.SetFromSession(id) 164 | notify.SetStringArray(framework.ParamKeyInstance, idList) 165 | notify.SetUIntArray(framework.ParamKeyMonitor, monitorPorts) 166 | notify.SetString(framework.ParamKeyMigration, migrationID) 167 | notify.SetBoolean(framework.ParamKeyImmediate, false) 168 | if err = executor.Sender.SendMessage(notify, request.GetSender()); err != nil { 169 | log.Printf("[%08X] warning: notify migrate finish fail: %s", id, err.Error()) 170 | } 171 | log.Printf("[%08X] %d instance(s) migrated success", id, len(idList)) 172 | return nil 173 | 174 | case <-timer.C: 175 | //timeout 176 | log.Printf("[%08X] wait migrate request timeout", id) 177 | executor.detachResource(id, idList, true, true, true) 178 | return nil 179 | } 180 | } 181 | } 182 | 183 | func (executor *AttachInstanceExecutor) detachResource(id framework.SessionID, instances []string, detachNetwork, detachVolume, detachInstance bool) { 184 | var respChan = make(chan error, 1) 185 | var err error 186 | if detachInstance { 187 | executor.InstanceModule.DetachInstances(instances, respChan) 188 | err = <-respChan 189 | if err != nil { 190 | log.Printf("[%08X] detach instance fail: %s", id, err.Error()) 191 | } 192 | } 193 | if detachVolume { 194 | executor.StorageModule.DetachVolumeGroup(instances, respChan) 195 | err = <-respChan 196 | if err != nil { 197 | log.Printf("[%08X] detach volume fail: %s", id, err.Error()) 198 | } 199 | } 200 | if detachNetwork { 201 | executor.NetworkModule.DetachInstances(instances, respChan) 202 | err = <-respChan 203 | if err != nil { 204 | log.Printf("[%08X] detach network fail: %s", id, err.Error()) 205 | } 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/task/change_security_policy_action.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type ChangeDefaultSecurityActionExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *ChangeDefaultSecurityActionExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var instanceID string 18 | var accept bool 19 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil{ 20 | err = fmt.Errorf("get instance id fail: %s", err.Error()) 21 | return 22 | } 23 | if accept, err = request.GetBoolean(framework.ParamKeyAction); err != nil{ 24 | err = fmt.Errorf("get action fail: %s", err.Error()) 25 | return 26 | } 27 | resp, _ := framework.CreateJsonMessage(framework.ChangeGuestRuleDefaultActionResponse) 28 | resp.SetFromSession(id) 29 | resp.SetToSession(request.GetFromSession()) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error, 1) 32 | executor.InstanceModule.ChangeDefaultSecurityPolicyAction(instanceID, accept, respChan) 33 | err = <- respChan 34 | if nil != err{ 35 | log.Printf("[%08X] change default security policy action of instance '%s' fail: %s", 36 | id, instanceID, err.Error()) 37 | resp.SetError(err.Error()) 38 | }else{ 39 | if accept{ 40 | log.Printf("[%08X] default security policy action of instance '%s' changed to accept", 41 | id, instanceID) 42 | }else{ 43 | log.Printf("[%08X] default security policy action of instance '%s' changed to drop", 44 | id, instanceID) 45 | } 46 | resp.SetSuccess(true) 47 | } 48 | return executor.Sender.SendMessage(resp, request.GetSender()) 49 | } 50 | -------------------------------------------------------------------------------- /src/task/change_security_rule_order.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type ChangeSecurityRuleOrderExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *ChangeSecurityRuleOrderExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var instanceID string 18 | var direction, index int 19 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil{ 20 | err = fmt.Errorf("get instance id fail: %s", err.Error()) 21 | return 22 | } 23 | if index, err = request.GetInt(framework.ParamKeyIndex); err != nil{ 24 | err = fmt.Errorf("get index fail: %s", err.Error()) 25 | return 26 | } 27 | if direction, err = request.GetInt(framework.ParamKeyMode); err != nil{ 28 | err = fmt.Errorf("get direction fail: %s", err.Error()) 29 | return 30 | } 31 | resp, _ := framework.CreateJsonMessage(framework.ChangeGuestRuleOrderResponse) 32 | resp.SetFromSession(id) 33 | resp.SetToSession(request.GetFromSession()) 34 | resp.SetSuccess(false) 35 | var respChan = make(chan error, 1) 36 | var moveUp = false 37 | if direction >= 0 { 38 | moveUp = true 39 | executor.InstanceModule.PullUpSecurityPolicyRule(instanceID, index, respChan) 40 | }else{ 41 | executor.InstanceModule.PushDownSecurityPolicyRule(instanceID, index, respChan) 42 | } 43 | 44 | err = <- respChan 45 | if nil != err{ 46 | log.Printf("[%08X] change order of %dth security rule of instance '%s' fail: %s", 47 | id, index, instanceID, err.Error()) 48 | resp.SetError(err.Error()) 49 | }else{ 50 | if moveUp{ 51 | log.Printf("[%08X] %dth security rule of instance '%s' moved up", 52 | id, index, instanceID) 53 | }else{ 54 | log.Printf("[%08X] %dth security rule of instance '%s' moved down", 55 | id, index, instanceID) 56 | } 57 | resp.SetSuccess(true) 58 | } 59 | return executor.Sender.SendMessage(resp, request.GetSender()) 60 | } 61 | -------------------------------------------------------------------------------- /src/task/change_storage_path.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type ChangeStoragePathExecutor struct { 11 | Sender framework.MessageSender 12 | Storage service.StorageModule 13 | } 14 | 15 | func (executor *ChangeStoragePathExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var newPath string 18 | if newPath, err = request.GetString(framework.ParamKeyPath); err != nil{ 19 | err = fmt.Errorf("get new path fail: %s", err.Error()) 20 | return 21 | } 22 | var respChan = make(chan error, 1) 23 | executor.Storage.ChangeDefaultStoragePath(newPath, respChan) 24 | 25 | resp, _ := framework.CreateJsonMessage(framework.ModifyCellStorageResponse) 26 | resp.SetSuccess(false) 27 | resp.SetFromSession(id) 28 | resp.SetToSession(request.GetFromSession()) 29 | 30 | err = <- respChan 31 | if err != nil{ 32 | resp.SetError(err.Error()) 33 | log.Printf("[%08X] change storage path fail: %s", id, err.Error()) 34 | return executor.Sender.SendMessage(resp, request.GetSender()) 35 | }else{ 36 | resp.SetSuccess(true) 37 | log.Printf("[%08X] default storage path changed to: %s", id, newPath) 38 | } 39 | return executor.Sender.SendMessage(resp, request.GetSender()) 40 | } -------------------------------------------------------------------------------- /src/task/create_disk_image.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "net/http" 10 | "time" 11 | ) 12 | 13 | type CreateDiskImageExecutor struct { 14 | Sender framework.MessageSender 15 | InstanceModule service.InstanceModule 16 | StorageModule service.StorageModule 17 | Client *http.Client 18 | } 19 | 20 | func (executor *CreateDiskImageExecutor) Execute(id framework.SessionID, request framework.Message, 21 | incoming chan framework.Message, terminate chan bool) (err error) { 22 | var imageID, guestID, mediaHost string 23 | var mediaPort uint 24 | if imageID, err = request.GetString(framework.ParamKeyImage); err != nil { 25 | return err 26 | } 27 | if guestID, err = request.GetString(framework.ParamKeyGuest); err != nil { 28 | return err 29 | } 30 | if mediaHost, err = request.GetString(framework.ParamKeyHost); err != nil { 31 | return err 32 | } 33 | if mediaPort, err = request.GetUInt(framework.ParamKeyPort); err != nil { 34 | return err 35 | } 36 | log.Printf("[%08X] recv create disk image from %s.[%08X], from guest '%s' to image %s@%s:%d", 37 | id, request.GetSender(), request.GetFromSession(), guestID, imageID, mediaHost, mediaPort) 38 | resp, _ := framework.CreateJsonMessage(framework.CreateDiskImageResponse) 39 | resp.SetSuccess(false) 40 | resp.SetFromSession(id) 41 | resp.SetToSession(request.GetFromSession()) 42 | var targetVolume string 43 | { 44 | var respChan = make(chan service.InstanceResult) 45 | executor.InstanceModule.GetInstanceStatus(guestID, respChan) 46 | var result = <-respChan 47 | if result.Error != nil { 48 | err = result.Error 49 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 50 | resp.SetError(err.Error()) 51 | return executor.Sender.SendMessage(resp, request.GetSender()) 52 | } 53 | if !result.Instance.Created { 54 | err = fmt.Errorf("instance '%s' not created", guestID) 55 | log.Printf("[%08X] check guest status fail: %s", id, err.Error()) 56 | resp.SetError(err.Error()) 57 | return executor.Sender.SendMessage(resp, request.GetSender()) 58 | } 59 | if result.Instance.Running { 60 | err = fmt.Errorf("instance '%s' not stopped", guestID) 61 | log.Printf("[%08X] check guest status fail: %s", id, err.Error()) 62 | resp.SetError(err.Error()) 63 | return executor.Sender.SendMessage(resp, request.GetSender()) 64 | } 65 | if 0 == len(result.Instance.StorageVolumes) { 66 | err = errors.New("no volume available") 67 | log.Printf("[%08X] check guest status fail: %s", id, err.Error()) 68 | resp.SetError(err.Error()) 69 | return executor.Sender.SendMessage(resp, request.GetSender()) 70 | } 71 | targetVolume = result.Instance.StorageVolumes[0] 72 | } 73 | var startChan = make(chan error, 1) 74 | var progressChan = make(chan uint, 1) 75 | var resultChan = make(chan service.StorageResult, 1) 76 | const ( 77 | CheckInterval = 2 * time.Second 78 | ) 79 | { 80 | //start write 81 | executor.StorageModule.WriteDiskImage(id, guestID, targetVolume, imageID, mediaHost, mediaPort, startChan, progressChan, resultChan) 82 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 83 | select { 84 | case <-timer.C: 85 | err = errors.New("start write timeout") 86 | log.Printf("[%08X] write disk image fail: %s", id, err.Error()) 87 | resp.SetError(err.Error()) 88 | return executor.Sender.SendMessage(resp, request.GetSender()) 89 | case err = <-startChan: 90 | if err != nil { 91 | log.Printf("[%08X] write disk image fail: %s", id, err.Error()) 92 | resp.SetError(err.Error()) 93 | return executor.Sender.SendMessage(resp, request.GetSender()) 94 | } 95 | //start success 96 | log.Printf("[%08X] write disk image started", id) 97 | resp.SetSuccess(true) 98 | if err = executor.Sender.SendMessage(resp, request.GetSender()); err != nil { 99 | log.Printf("[%08X] warning: notify create start to '%s' fail: %s", id, request.GetSender(), err.Error()) 100 | } 101 | 102 | } 103 | } 104 | event, _ := framework.CreateJsonMessage(framework.DiskImageUpdatedEvent) 105 | event.SetSuccess(true) 106 | event.SetFromSession(id) 107 | event.SetToSession(request.GetFromSession()) 108 | 109 | { 110 | //wait progress & result 111 | var latestUpdate = time.Now() 112 | var ticker = time.NewTicker(CheckInterval) 113 | for { 114 | select { 115 | case <-ticker.C: 116 | //check 117 | if time.Now().After(latestUpdate.Add(service.GetConfigurator().GetOperateTimeout())) { 118 | //timeout 119 | err = errors.New("timeout") 120 | log.Printf("[%08X] create disk image fail: %s", id, err.Error()) 121 | event.SetSuccess(false) 122 | event.SetError(err.Error()) 123 | return executor.Sender.SendMessage(event, request.GetSender()) 124 | } 125 | case progress := <-progressChan: 126 | latestUpdate = time.Now() 127 | event.SetUInt(framework.ParamKeyProgress, progress) 128 | event.SetBoolean(framework.ParamKeyEnable, false) 129 | log.Printf("[%08X] progress => %d %%", id, progress) 130 | if err = executor.Sender.SendMessage(event, request.GetSender()); err != nil { 131 | log.Printf("[%08X] warning: notify progress fail: %s", id, err.Error()) 132 | } 133 | case result := <-resultChan: 134 | err = result.Error 135 | if err != nil { 136 | log.Printf("[%08X] create disk image fail: %s", id, err.Error()) 137 | event.SetSuccess(false) 138 | event.SetError(err.Error()) 139 | return executor.Sender.SendMessage(event, request.GetSender()) 140 | } 141 | log.Printf("[%08X] disk image written success, %d MB in size", id, result.Size>>20) 142 | event.SetBoolean(framework.ParamKeyEnable, true) 143 | event.SetUInt(framework.ParamKeySize, result.Size) 144 | event.SetUInt(framework.ParamKeyProgress, 0) 145 | return executor.Sender.SendMessage(event, request.GetSender()) 146 | } 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/task/create_snapshot.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type CreateSnapshotExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | StorageModule service.StorageModule 16 | } 17 | 18 | func (executor *CreateSnapshotExecutor) Execute(id framework.SessionID, request framework.Message, 19 | incoming chan framework.Message, terminate chan bool) (err error) { 20 | var instanceID, snapshot, description string 21 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil { 22 | return err 23 | } 24 | if snapshot, err = request.GetString(framework.ParamKeyName); err != nil { 25 | return err 26 | } 27 | description, _ = request.GetString(framework.ParamKeyDescription) 28 | 29 | log.Printf("[%08X] recv create snapshot '%s' for guest '%s' from %s.[%08X]", 30 | id, snapshot, instanceID, request.GetSender(), request.GetFromSession()) 31 | resp, _ := framework.CreateJsonMessage(framework.CreateSnapshotResponse) 32 | resp.SetSuccess(false) 33 | resp.SetFromSession(id) 34 | resp.SetToSession(request.GetFromSession()) 35 | { 36 | var respChan = make(chan service.InstanceResult, 1) 37 | executor.InstanceModule.GetInstanceStatus(instanceID, respChan) 38 | var result = <-respChan 39 | if result.Error != nil { 40 | err = result.Error 41 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 42 | resp.SetError(err.Error()) 43 | return executor.Sender.SendMessage(resp, request.GetSender()) 44 | } 45 | 46 | err = func(instance service.InstanceStatus) (err error) { 47 | if !instance.Created { 48 | err = fmt.Errorf("instance '%s' not created", instanceID) 49 | return 50 | } 51 | //todo: allow operating on branch snapshots 52 | if instance.Running { 53 | err = errors.New("live snapshot not supported yes, shutdown instance first") 54 | return 55 | } 56 | return nil 57 | }(result.Instance) 58 | if err != nil { 59 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 60 | resp.SetError(err.Error()) 61 | return executor.Sender.SendMessage(resp, request.GetSender()) 62 | } 63 | } 64 | { 65 | var respChan = make(chan error, 1) 66 | executor.StorageModule.CreateSnapshot(instanceID, snapshot, description, respChan) 67 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 68 | select { 69 | case <-timer.C: 70 | err = errors.New("request timeout") 71 | log.Printf("[%08X] create snapshot timeout", id) 72 | resp.SetError(err.Error()) 73 | return executor.Sender.SendMessage(resp, request.GetSender()) 74 | case err = <-respChan: 75 | if err != nil { 76 | log.Printf("[%08X] create snapshot fail: %s", id, err.Error()) 77 | resp.SetError(err.Error()) 78 | } else { 79 | log.Printf("[%08X] snapshot '%s' created for guest '%s'", id, snapshot, instanceID) 80 | resp.SetSuccess(true) 81 | } 82 | return executor.Sender.SendMessage(resp, request.GetSender()) 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/task/delete_instance.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | "fmt" 8 | ) 9 | 10 | type DeleteInstanceExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | StorageModule service.StorageModule 14 | NetworkModule service.NetworkModule 15 | } 16 | 17 | func (executor *DeleteInstanceExecutor) Execute(id framework.SessionID, request framework.Message, 18 | incoming chan framework.Message, terminate chan bool) (err error) { 19 | var instanceID string 20 | instanceID, err = request.GetString(framework.ParamKeyInstance) 21 | if err != nil{ 22 | return err 23 | } 24 | log.Printf("[%08X] request delete instance '%s' from %s.[%08X]", id, 25 | instanceID, request.GetSender(), request.GetFromSession()) 26 | 27 | resp, _ := framework.CreateJsonMessage(framework.DeleteGuestResponse) 28 | resp.SetSuccess(false) 29 | resp.SetFromSession(id) 30 | resp.SetToSession(request.GetFromSession()) 31 | 32 | var config service.GuestConfig 33 | { 34 | var respChan = make(chan service.InstanceResult) 35 | executor.InstanceModule.GetInstanceStatus(instanceID, respChan) 36 | result := <- respChan 37 | if result.Error != nil{ 38 | log.Printf("[%08X] get config fail: %s", id, result.Error.Error()) 39 | return executor.ResponseToFail(request.GetSender(), resp, result.Error) 40 | } 41 | if result.Instance.Running{ 42 | err := fmt.Errorf("instance '%s' still running", instanceID) 43 | log.Printf("[%08X] delete instance fail: %s", id, err.Error()) 44 | return executor.ResponseToFail(request.GetSender(), resp, err) 45 | } 46 | config = result.Instance.GuestConfig 47 | } 48 | { 49 | //todo: detach network 50 | switch config.NetworkMode { 51 | case service.NetworkModePlain: 52 | var respChan = make(chan error) 53 | executor.NetworkModule.DeallocateAllResource(instanceID, respChan) 54 | err := <- respChan 55 | if err != nil{ 56 | log.Printf("[%08X] release network resource fail: %s", id, err.Error()) 57 | return executor.ResponseToFail(request.GetSender(), resp, err) 58 | } 59 | log.Printf("[%08X] network resource released", id) 60 | break 61 | default: 62 | return fmt.Errorf("unsupported network mode %d", config.NetworkMode) 63 | } 64 | } 65 | { 66 | //delete guest config 67 | var respChan = make(chan error) 68 | executor.InstanceModule.DeleteInstance(instanceID, respChan) 69 | err := <- respChan 70 | if err != nil{ 71 | log.Printf("[%08X] delete instance fail: %s", id, err) 72 | return executor.ResponseToFail(request.GetSender(), resp, err) 73 | } 74 | log.Printf("[%08X] instance deleted", id) 75 | } 76 | { 77 | //delete volumes 78 | switch config.StorageMode { 79 | case service.StorageModeLocal: 80 | { 81 | var respChan = make(chan error) 82 | executor.StorageModule.DeleteVolumes(instanceID, respChan) 83 | err := <- respChan 84 | if err != nil{ 85 | log.Printf("[%08X] delete volumes fail: %s", id, err) 86 | return executor.ResponseToFail(request.GetSender(), resp, err) 87 | } 88 | log.Printf("[%08X] disk volumes deleted", id) 89 | } 90 | default: 91 | return fmt.Errorf("unsupported storage mode %d", config.StorageMode) 92 | } 93 | } 94 | resp.SetSuccess(true) 95 | log.Printf("[%08X] delete finish, all resource released", id) 96 | if err = executor.Sender.SendMessage(resp, request.GetSender());err != nil{ 97 | log.Printf("[%08X] warning: send response fail: %s", id, err.Error()) 98 | return err 99 | } 100 | event, _ := framework.CreateJsonMessage(framework.GuestDeletedEvent) 101 | event.SetFromSession(id) 102 | event.SetString(framework.ParamKeyInstance, instanceID) 103 | if err = executor.Sender.SendMessage(event, request.GetSender()); err != nil{ 104 | log.Printf("[%08X] warning: notify instance deleted fail: %s", id, err.Error()) 105 | return err 106 | } 107 | return nil 108 | } 109 | 110 | func (executor *DeleteInstanceExecutor)ResponseToFail(target string, resp framework.Message, err error) error{ 111 | resp.SetSuccess(false) 112 | resp.SetError(err.Error()) 113 | return executor.Sender.SendMessage(resp, target) 114 | } 115 | -------------------------------------------------------------------------------- /src/task/delete_snapshot.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/pkg/errors" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type DeleteSnapshotExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | StorageModule service.StorageModule 16 | } 17 | 18 | func (executor *DeleteSnapshotExecutor) Execute(id framework.SessionID, request framework.Message, 19 | incoming chan framework.Message, terminate chan bool) (err error) { 20 | var instanceID string 21 | var snapshot string 22 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil { 23 | return err 24 | } 25 | if snapshot, err = request.GetString(framework.ParamKeyName); err != nil { 26 | return err 27 | } 28 | 29 | log.Printf("[%08X] recv delete snapshot '%s' of guest '%s' from %s.[%08X]", 30 | id, snapshot, instanceID, request.GetSender(), request.GetFromSession()) 31 | resp, _ := framework.CreateJsonMessage(framework.DeleteSnapshotResponse) 32 | resp.SetSuccess(false) 33 | resp.SetFromSession(id) 34 | resp.SetToSession(request.GetFromSession()) 35 | { 36 | var respChan = make(chan service.InstanceResult, 1) 37 | executor.InstanceModule.GetInstanceStatus(instanceID, respChan) 38 | var result = <-respChan 39 | if result.Error != nil { 40 | err = result.Error 41 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 42 | resp.SetError(err.Error()) 43 | return executor.Sender.SendMessage(resp, request.GetSender()) 44 | } 45 | 46 | err = func(instance service.InstanceStatus) (err error) { 47 | if !instance.Created { 48 | err = fmt.Errorf("instance '%s' not created", instanceID) 49 | return 50 | } 51 | //todo: allow operating on branch snapshots 52 | if instance.Running { 53 | err = errors.New("live snapshot not supported yes, shutdown instance first") 54 | return 55 | } 56 | return nil 57 | }(result.Instance) 58 | if err != nil { 59 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 60 | resp.SetError(err.Error()) 61 | return executor.Sender.SendMessage(resp, request.GetSender()) 62 | } 63 | } 64 | { 65 | var respChan = make(chan error, 1) 66 | executor.StorageModule.DeleteSnapshot(instanceID, snapshot, respChan) 67 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 68 | select { 69 | case <-timer.C: 70 | err = errors.New("request timeout") 71 | log.Printf("[%08X] delete snapshot timeout", id) 72 | resp.SetError(err.Error()) 73 | return executor.Sender.SendMessage(resp, request.GetSender()) 74 | case err = <-respChan: 75 | if err != nil { 76 | log.Printf("[%08X] delete snapshot fail: %s", id, err.Error()) 77 | resp.SetError(err.Error()) 78 | } else { 79 | log.Printf("[%08X] snapshot '%s' deleted from guest '%s'", id, snapshot, instanceID) 80 | resp.SetSuccess(true) 81 | } 82 | return executor.Sender.SendMessage(resp, request.GetSender()) 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/task/detach_instance.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type DetachInstanceExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | StorageModule service.StorageModule 13 | NetworkModule service.NetworkModule 14 | } 15 | 16 | func (executor *DetachInstanceExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | resp, _ := framework.CreateJsonMessage(framework.DetachInstanceResponse) 19 | resp.SetSuccess(false) 20 | resp.SetFromSession(id) 21 | resp.SetToSession(request.GetFromSession()) 22 | 23 | idList, err := request.GetStringArray(framework.ParamKeyInstance) 24 | if err != nil { 25 | log.Printf("[%08X] recv detach instance request from %s.[%08X] but get target intance fail: %s", 26 | id, request.GetSender(), request.GetFromSession(), err.Error()) 27 | resp.SetError(err.Error()) 28 | return executor.Sender.SendMessage(resp, request.GetSender()) 29 | } 30 | var count = len(idList) 31 | if 0 == count{ 32 | log.Printf("[%08X] recv purge all instances request from %s.[%08X]", id, request.GetSender(), request.GetFromSession()) 33 | }else{ 34 | log.Printf("[%08X] recv detach %d instance(s) request from %s.[%08X]", id, count, request.GetSender(), request.GetFromSession()) 35 | } 36 | 37 | var respChan = make(chan error, 1) 38 | executor.NetworkModule.DetachInstances(idList, respChan) 39 | err = <-respChan 40 | if err != nil { 41 | resp.SetError(err.Error()) 42 | log.Printf("[%08X] detach network resource fail: %s", id, err.Error()) 43 | return executor.Sender.SendMessage(resp, request.GetSender()) 44 | } 45 | executor.StorageModule.DetachVolumeGroup(idList, respChan) 46 | err = <-respChan 47 | if err != nil { 48 | resp.SetError(err.Error()) 49 | log.Printf("[%08X] detach storage volumes fail: %s", id, err.Error()) 50 | return executor.Sender.SendMessage(resp, request.GetSender()) 51 | } 52 | executor.InstanceModule.DetachInstances(idList, respChan) 53 | err = <-respChan 54 | if err != nil { 55 | resp.SetError(err.Error()) 56 | log.Printf("[%08X] detach instances fail: %s", id, err.Error()) 57 | return executor.Sender.SendMessage(resp, request.GetSender()) 58 | } 59 | resp.SetSuccess(true) 60 | if 0 == count{ 61 | log.Printf("[%08X] all instance(s) purgeed", id) 62 | }else{ 63 | log.Printf("[%08X] %d instance(s) detached", id, count) 64 | } 65 | return executor.Sender.SendMessage(resp, request.GetSender()) 66 | } -------------------------------------------------------------------------------- /src/task/eject_media.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type EjectMediaCoreExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *EjectMediaCoreExecutor)Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) error { 16 | instanceID, err := request.GetString(framework.ParamKeyInstance) 17 | if err != nil { 18 | return err 19 | } 20 | log.Printf("[%08X] request eject media from '%s' from %s.[%08X]", id, instanceID, 21 | request.GetSender(), request.GetFromSession()) 22 | 23 | resp, _ := framework.CreateJsonMessage(framework.EjectMediaResponse) 24 | resp.SetToSession(request.GetFromSession()) 25 | resp.SetFromSession(id) 26 | resp.SetSuccess(false) 27 | 28 | var respChan = make(chan error, 1) 29 | executor.InstanceModule.DetachMedia(instanceID, respChan) 30 | err = <- respChan 31 | if err != nil{ 32 | log.Printf("[%08X] eject media fail: %s", id, err.Error()) 33 | resp.SetError(err.Error()) 34 | }else{ 35 | log.Printf("[%08X] instance media ejected", id) 36 | resp.SetSuccess(true) 37 | { 38 | //notify event 39 | event, _ := framework.CreateJsonMessage(framework.MediaDetachedEvent) 40 | event.SetFromSession(id) 41 | event.SetString(framework.ParamKeyInstance, instanceID) 42 | executor.Sender.SendMessage(event, request.GetSender()) 43 | } 44 | } 45 | return executor.Sender.SendMessage(resp, request.GetSender()) 46 | } 47 | -------------------------------------------------------------------------------- /src/task/get_auth.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "log" 6 | "github.com/project-nano/cell/service" 7 | ) 8 | 9 | type GetGuestPasswordExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *GetGuestPasswordExecutor) Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) (err error) { 16 | guestID, err := request.GetString(framework.ParamKeyGuest) 17 | if err != nil{ 18 | return err 19 | } 20 | 21 | //log.Printf("[%08X] request get password of '%s' from %s.[%08X]", id, guestID, 22 | // request.GetSender(), request.GetFromSession()) 23 | 24 | var respChan = make(chan service.InstanceResult) 25 | executor.InstanceModule.GetGuestAuth(guestID, respChan) 26 | 27 | resp, _ := framework.CreateJsonMessage(framework.GetAuthResponse) 28 | resp.SetFromSession(id) 29 | resp.SetToSession(request.GetFromSession()) 30 | resp.SetSuccess(false) 31 | 32 | result := <- respChan 33 | if result.Error != nil{ 34 | resp.SetError(result.Error.Error()) 35 | log.Printf("[%08X] get password fail: %s", id, result.Error.Error()) 36 | }else{ 37 | resp.SetSuccess(true) 38 | resp.SetString(framework.ParamKeyUser, result.User) 39 | resp.SetString(framework.ParamKeySecret, result.Password) 40 | 41 | } 42 | return executor.Sender.SendMessage(resp, request.GetSender()) 43 | } -------------------------------------------------------------------------------- /src/task/get_cell_info.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type GetCellInfoExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | StorageModule service.StorageModule 13 | NetworkModule service.NetworkModule 14 | 15 | } 16 | 17 | func (executor *GetCellInfoExecutor) Execute(id framework.SessionID, request framework.Message, 18 | incoming chan framework.Message, terminate chan bool) (err error) { 19 | 20 | //todo: add instance/network info 21 | resp, _ := framework.CreateJsonMessage(framework.GetComputePoolCellResponse) 22 | resp.SetToSession(request.GetFromSession()) 23 | resp.SetFromSession(id) 24 | resp.SetSuccess(false) 25 | 26 | { 27 | //storage 28 | var respChan = make(chan service.StorageResult, 1) 29 | executor.StorageModule.GetAttachDevices(respChan) 30 | var result = <- respChan 31 | if result.Error != nil{ 32 | err = result.Error 33 | log.Printf("[%08X] fetch attach device fail: %s", id, err.Error()) 34 | resp.SetError(err.Error()) 35 | return executor.Sender.SendMessage(resp, request.GetSender()) 36 | } 37 | var names, errMessages []string 38 | var attached []uint64 39 | for _, device := range result.Devices{ 40 | names = append(names, device.Name) 41 | errMessages = append(errMessages, device.Error) 42 | if device.Attached{ 43 | attached = append(attached, 1) 44 | }else{ 45 | attached = append(attached, 0) 46 | } 47 | } 48 | resp.SetStringArray(framework.ParamKeyStorage, names) 49 | resp.SetStringArray(framework.ParamKeyError, errMessages) 50 | resp.SetUIntArray(framework.ParamKeyAttach, attached) 51 | log.Printf("[%08X] %d device(s) available", id, len(names)) 52 | } 53 | resp.SetSuccess(true) 54 | return executor.Sender.SendMessage(resp, request.GetSender()) 55 | } -------------------------------------------------------------------------------- /src/task/get_instance_config.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type GetInstanceConfigExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *GetInstanceConfigExecutor) Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) (err error) { 16 | var instanceID string 17 | instanceID, err = request.GetString(framework.ParamKeyInstance) 18 | if err != nil{ 19 | return err 20 | } 21 | log.Printf("[%08X] request get config of instance '%s' from %s.[%08X]", 22 | id, instanceID, request.GetSender(), request.GetFromSession()) 23 | var respChan = make(chan service.InstanceResult) 24 | executor.InstanceModule.GetInstanceConfig(instanceID, respChan) 25 | 26 | resp, _ := framework.CreateJsonMessage(framework.GetGuestResponse) 27 | resp.SetFromSession(id) 28 | resp.SetToSession(request.GetFromSession()) 29 | 30 | result := <- respChan 31 | if result.Error != nil{ 32 | resp.SetSuccess(false) 33 | resp.SetError(result.Error.Error()) 34 | log.Printf("[%08X] get instance status fail: %s", id, result.Error.Error()) 35 | return executor.Sender.SendMessage(resp, request.GetSender()) 36 | } 37 | var c = result.Instance.GuestConfig 38 | resp.SetSuccess(true) 39 | c.Marshal(resp) 40 | 41 | log.Printf("[%08X] query instance config success", id) 42 | return executor.Sender.SendMessage(resp, request.GetSender()) 43 | } 44 | -------------------------------------------------------------------------------- /src/task/get_instance_status.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type GetInstanceStatusExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *GetInstanceStatusExecutor) Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) (err error) { 16 | var instanceID string 17 | instanceID, err = request.GetString(framework.ParamKeyInstance) 18 | if err != nil{ 19 | return err 20 | } 21 | //log.Printf("[%08X] request get status of instance '%s' from %s.[%08X]", 22 | // id, instanceID, request.GetSender(), request.GetFromSession()) 23 | var respChan = make(chan service.InstanceResult) 24 | executor.InstanceModule.GetInstanceStatus(instanceID, respChan) 25 | 26 | resp, _ := framework.CreateJsonMessage(framework.GetInstanceStatusResponse) 27 | resp.SetFromSession(id) 28 | resp.SetToSession(request.GetFromSession()) 29 | 30 | result := <- respChan 31 | if result.Error != nil{ 32 | resp.SetSuccess(false) 33 | resp.SetError(result.Error.Error()) 34 | log.Printf("[%08X] get instance status fail: %s", id, result.Error.Error()) 35 | return executor.Sender.SendMessage(resp, request.GetSender()) 36 | } 37 | var s = result.Instance 38 | resp.SetSuccess(true) 39 | s.Marshal(resp) 40 | //log.Printf("[%08X] query instance status success", id) 41 | return executor.Sender.SendMessage(resp, request.GetSender()) 42 | } -------------------------------------------------------------------------------- /src/task/get_security_policy.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type GetSecurityPolicyExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *GetSecurityPolicyExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var instanceID string 18 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil{ 19 | err = fmt.Errorf("get instance id fail: %s", err.Error()) 20 | return 21 | } 22 | 23 | resp, _ := framework.CreateJsonMessage(framework.GetGuestRuleResponse) 24 | resp.SetFromSession(id) 25 | resp.SetToSession(request.GetFromSession()) 26 | resp.SetSuccess(false) 27 | 28 | var respChan = make(chan service.InstanceResult, 1) 29 | executor.InstanceModule.GetSecurityPolicy(instanceID, respChan) 30 | var result = <- respChan 31 | if nil != result.Error{ 32 | err = result.Error 33 | log.Printf("[%08X] get security policy of instance '%s' fail: %s", 34 | id, instanceID, err.Error()) 35 | resp.SetError(err.Error()) 36 | }else{ 37 | var policy = result.Policy 38 | var fromIP, toIP, toPort, protocols, actions []uint64 39 | for index, rule := range policy.Rules{ 40 | fromIP = append(fromIP, uint64(service.IPv4ToUInt32(rule.SourceAddress))) 41 | toIP = append(toIP, uint64(service.IPv4ToUInt32(rule.TargetAddress))) 42 | toPort = append(toPort, uint64(rule.TargetPort)) 43 | switch rule.Protocol { 44 | case service.PolicyRuleProtocolTCP: 45 | protocols = append(protocols, uint64(service.PolicyRuleProtocolIndexTCP)) 46 | case service.PolicyRuleProtocolUDP: 47 | protocols = append(protocols, uint64(service.PolicyRuleProtocolIndexUDP)) 48 | case service.PolicyRuleProtocolICMP: 49 | protocols = append(protocols, uint64(service.PolicyRuleProtocolIndexICMP)) 50 | default: 51 | log.Printf("[%08X] warning: invalid protocol %s on %dth security rule of instance '%s'", 52 | id, rule.Protocol, index, instanceID) 53 | continue 54 | } 55 | if rule.Accept{ 56 | actions = append(actions, service.PolicyRuleActionAccept) 57 | }else{ 58 | actions = append(actions, service.PolicyRuleActionReject) 59 | } 60 | } 61 | if policy.Accept{ 62 | actions = append(actions, service.PolicyRuleActionAccept) 63 | log.Printf("[%08X] %d security rule(s) available for instance '%s', accept by default", 64 | id, len(toPort), instanceID) 65 | }else{ 66 | actions = append(actions, service.PolicyRuleActionReject) 67 | log.Printf("[%08X] %d security rule(s) available for instance '%s', reject by default", 68 | id, len(toPort), instanceID) 69 | } 70 | resp.SetUIntArray(framework.ParamKeyFrom, fromIP) 71 | resp.SetUIntArray(framework.ParamKeyTo, toIP) 72 | resp.SetUIntArray(framework.ParamKeyPort, toPort) 73 | resp.SetUIntArray(framework.ParamKeyProtocol, protocols) 74 | resp.SetUIntArray(framework.ParamKeyAction, actions) 75 | resp.SetSuccess(true) 76 | } 77 | return executor.Sender.SendMessage(resp, request.GetSender()) 78 | } 79 | -------------------------------------------------------------------------------- /src/task/get_snapshot.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | "time" 9 | ) 10 | 11 | type GetSnapshotExecutor struct { 12 | Sender framework.MessageSender 13 | StorageModule service.StorageModule 14 | } 15 | 16 | func (executor *GetSnapshotExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | var instanceID, snapshotName string 19 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil { 20 | return err 21 | } 22 | if snapshotName, err = request.GetString(framework.ParamKeyName); err != nil { 23 | return err 24 | } 25 | 26 | log.Printf("[%08X] recv get snapshot '%s' for guest '%s' from %s.[%08X]", 27 | id, snapshotName, instanceID, request.GetSender(), request.GetFromSession()) 28 | resp, _ := framework.CreateJsonMessage(framework.GetSnapshotResponse) 29 | resp.SetSuccess(false) 30 | resp.SetFromSession(id) 31 | resp.SetToSession(request.GetFromSession()) 32 | { 33 | var respChan = make(chan service.StorageResult, 1) 34 | executor.StorageModule.GetSnapshot(instanceID, snapshotName, respChan) 35 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 36 | select { 37 | case <-timer.C: 38 | err = errors.New("request timeout") 39 | log.Printf("[%08X] get snapshot timeout", id) 40 | resp.SetError(err.Error()) 41 | return executor.Sender.SendMessage(resp, request.GetSender()) 42 | case result := <-respChan: 43 | if result.Error != nil { 44 | err = result.Error 45 | log.Printf("[%08X] get snapshot fail: %s", id, err.Error()) 46 | resp.SetError(err.Error()) 47 | } else { 48 | var snapshot = result.Snapshot 49 | resp.SetBoolean(framework.ParamKeyStatus, snapshot.Running) 50 | resp.SetString(framework.ParamKeyDescription, snapshot.Description) 51 | resp.SetString(framework.ParamKeyCreate, snapshot.CreateTime) 52 | resp.SetSuccess(true) 53 | } 54 | return executor.Sender.SendMessage(resp, request.GetSender()) 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/task/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/project-nano/cell/task 2 | 3 | go 1.13 4 | 5 | replace ( 6 | github.com/project-nano/cell/service => ../service 7 | github.com/project-nano/framework => ../../../framework 8 | ) 9 | 10 | require ( 11 | github.com/pkg/errors v0.9.1 12 | github.com/project-nano/cell/service v0.0.0-00010101000000-000000000000 13 | github.com/project-nano/framework v1.0.9 14 | ) 15 | -------------------------------------------------------------------------------- /src/task/handle_address_pool_changed.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/framework" 6 | "github.com/project-nano/cell/service" 7 | "log" 8 | "strings" 9 | ) 10 | 11 | type HandleAddressPoolChangedExecutor struct { 12 | InstanceModule service.InstanceModule 13 | NetworkModule service.NetworkModule 14 | } 15 | 16 | func (executor *HandleAddressPoolChangedExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | var allocationMode, gateway string 19 | var dns []string 20 | if gateway, err = request.GetString(framework.ParamKeyGateway); err != nil{ 21 | return err 22 | } 23 | if dns, err = request.GetStringArray(framework.ParamKeyServer); err != nil{ 24 | return 25 | } 26 | if allocationMode, err = request.GetString(framework.ParamKeyMode); err != nil{ 27 | err = fmt.Errorf("get allocation mode fail: %s", err.Error()) 28 | return 29 | } 30 | switch allocationMode { 31 | case service.AddressAllocationNone: 32 | case service.AddressAllocationDHCP: 33 | case service.AddressAllocationCloudInit: 34 | break 35 | default: 36 | err = fmt.Errorf("invalid allocation mode :%s", allocationMode) 37 | return 38 | } 39 | var respChan = make(chan error, 1) 40 | executor.NetworkModule.UpdateAddressAllocation(gateway, dns, allocationMode, respChan) 41 | err = <- respChan 42 | if err != nil{ 43 | log.Printf("[%08X] update address allocation fail when address pool changed from %s.[%08X]: %s", 44 | id, request.GetSender(), request.GetFromSession(), err.Error()) 45 | }else{ 46 | log.Printf("[%08X] address allocation updated to mode %s, gateway: %s, DNS: %s", 47 | id, allocationMode, gateway, strings.Join(dns, "/")) 48 | if service.AddressAllocationNone != allocationMode{ 49 | executor.InstanceModule.SyncAddressAllocation(allocationMode) 50 | } 51 | } 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /src/task/handle_compute_cell_removed.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type HandleComputeCellRemovedExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | StorageModule service.StorageModule 13 | } 14 | 15 | func (executor *HandleComputeCellRemovedExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | log.Printf("[%08X] recv cell removed from %s", id, request.GetSender()) 18 | var respChan = make(chan error, 1) 19 | { 20 | //detach instance module 21 | executor.InstanceModule.DetachStorage(respChan) 22 | err = <- respChan 23 | if err != nil{ 24 | log.Printf("[%08X] detach instance module fail: %s", id, err.Error()) 25 | return nil 26 | } 27 | log.Printf("[%08X] instance module detached", id) 28 | } 29 | { 30 | //detach storage module 31 | executor.StorageModule.DetachStorage(respChan) 32 | err = <- respChan 33 | if err != nil{ 34 | log.Printf("[%08X] detach storage module fail: %s", id, err.Error()) 35 | return nil 36 | } 37 | log.Printf("[%08X] storage module detached", id) 38 | } 39 | return nil 40 | } -------------------------------------------------------------------------------- /src/task/handle_compute_pool_ready.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/framework" 6 | "github.com/project-nano/cell/service" 7 | "log" 8 | ) 9 | 10 | type HandleComputePoolReadyExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | StorageModule service.StorageModule 14 | NetworkModule service.NetworkModule 15 | } 16 | 17 | func (executor *HandleComputePoolReadyExecutor) Execute(id framework.SessionID, request framework.Message, 18 | incoming chan framework.Message, terminate chan bool) (err error) { 19 | poolName, err := request.GetString(framework.ParamKeyPool) 20 | if err != nil{ 21 | return err 22 | } 23 | storageName, err := request.GetString(framework.ParamKeyStorage) 24 | if err != nil{ 25 | return 26 | } 27 | networkName, err := request.GetString(framework.ParamKeyNetwork) 28 | if err != nil{ 29 | return 30 | } 31 | resp, _ := framework.CreateJsonMessage(framework.ComputeCellReadyEvent) 32 | resp.SetFromSession(id) 33 | resp.SetToSession(request.GetFromSession()) 34 | resp.SetSuccess(false) 35 | 36 | if "" == storageName{ 37 | log.Printf("[%08X] recv compute pool '%s' ready from %s", id, poolName, request.GetSender()) 38 | //try detach 39 | var respChan = make(chan error, 1) 40 | executor.StorageModule.DetachStorage(respChan) 41 | err = <- respChan 42 | if err != nil{ 43 | resp.SetError(err.Error()) 44 | log.Printf("[%08X] detach storage fail: %s", id, err.Error()) 45 | return executor.Sender.SendMessage(resp, request.GetSender()) 46 | } 47 | }else{ 48 | var protocol, host, target string 49 | if protocol, err = request.GetString(framework.ParamKeyType); err != nil{ 50 | return 51 | } 52 | if host, err = request.GetString(framework.ParamKeyHost); err != nil{ 53 | return 54 | } 55 | if target, err = request.GetString(framework.ParamKeyTarget); err != nil{ 56 | return 57 | } 58 | log.Printf("[%08X] recv compute pool '%s' ready using storage '%s' from %s", id, poolName, storageName, request.GetSender()) 59 | var storageURL string 60 | { 61 | var respChan = make(chan service.StorageResult, 1) 62 | executor.StorageModule.UsingStorage(storageName, protocol, host, target, respChan) 63 | var result = <- respChan 64 | if result.Error != nil{ 65 | err = result.Error 66 | resp.SetError(err.Error()) 67 | log.Printf("[%08X] using storage fail: %s", id, err.Error()) 68 | return executor.Sender.SendMessage(resp, request.GetSender()) 69 | } 70 | //storage ready 71 | storageURL = result.Path 72 | } 73 | { 74 | var respChan = make(chan error, 1) 75 | executor.InstanceModule.UsingStorage(storageName, storageURL, respChan) 76 | err = <- respChan 77 | if err != nil{ 78 | resp.SetError(err.Error()) 79 | log.Printf("[%08X] update storage URL of instance to '%s' fail: %s", id, storageURL, err.Error()) 80 | return executor.Sender.SendMessage(resp, request.GetSender()) 81 | } 82 | } 83 | 84 | } 85 | var allocationMode string 86 | if "" != networkName{ 87 | var gateway string 88 | var dns []string 89 | if gateway, err = request.GetString(framework.ParamKeyGateway); err != nil{ 90 | return 91 | } 92 | 93 | if dns, err = request.GetStringArray(framework.ParamKeyServer); err != nil{ 94 | return 95 | } 96 | if allocationMode, err = request.GetString(framework.ParamKeyMode); err != nil{ 97 | err = fmt.Errorf("get allocation mode fail: %s", err.Error()) 98 | return 99 | } 100 | switch allocationMode { 101 | case service.AddressAllocationNone: 102 | case service.AddressAllocationDHCP: 103 | case service.AddressAllocationCloudInit: 104 | break 105 | default: 106 | err = fmt.Errorf("invalid allocation mode :%s", allocationMode) 107 | return 108 | } 109 | var respChan = make(chan error, 1) 110 | executor.NetworkModule.UpdateAddressAllocation(gateway, dns, allocationMode, respChan) 111 | err = <- respChan 112 | if err != nil{ 113 | resp.SetError(err.Error()) 114 | log.Printf("[%08X] update address allocation fail: %s", id, err.Error()) 115 | return executor.Sender.SendMessage(resp, request.GetSender()) 116 | } 117 | } 118 | if service.AddressAllocationNone != allocationMode{ 119 | executor.InstanceModule.SyncAddressAllocation(allocationMode) 120 | } 121 | var respChan = make(chan []service.GuestConfig) 122 | executor.InstanceModule.GetAllInstance(respChan) 123 | allConfig := <- respChan 124 | var count = uint(len(allConfig)) 125 | 126 | 127 | resp.SetSuccess(true) 128 | resp.SetUInt(framework.ParamKeyCount, count) 129 | if 0 == count{ 130 | log.Printf("[%08X] no instance configured", id) 131 | return executor.Sender.SendMessage(resp, request.GetSender()) 132 | } 133 | 134 | var names, ids, users, groups, secrets, addresses, systems, createTime, internal, external, hardware []string 135 | var cores, options, enables, progress, status, monitors, memories, disks, diskCounts, cpuPriorities, ioLimits []uint64 136 | for _, config := range allConfig { 137 | names = append(names, config.Name) 138 | ids = append(ids, config.ID) 139 | users = append(users, config.User) 140 | groups = append(groups, config.Group) 141 | cores = append(cores, uint64(config.Cores)) 142 | if config.AutoStart{ 143 | options = append(options, 1) 144 | }else{ 145 | options = append(options, 0) 146 | } 147 | if config.Created{ 148 | enables = append(enables, 1) 149 | progress = append(progress, 0) 150 | }else{ 151 | enables = append(enables, 0) 152 | progress = append(progress, uint64(config.Progress)) 153 | } 154 | if config.Running{ 155 | status = append(status, service.InstanceStatusRunning) 156 | }else{ 157 | status = append(status, service.InstanceStatusStopped) 158 | } 159 | monitors = append(monitors, uint64(config.MonitorPort)) 160 | secrets = append(secrets, config.MonitorSecret) 161 | memories = append(memories, uint64(config.Memory)) 162 | var diskCount = len(config.Disks) 163 | diskCounts = append(diskCounts, uint64(diskCount)) 164 | for _, diskSize := range config.Disks{ 165 | disks = append(disks, diskSize) 166 | } 167 | addresses = append(addresses, config.NetworkAddress) 168 | var operatingSystem string 169 | if nil != config.Template{ 170 | operatingSystem = config.Template.OperatingSystem 171 | }else{ 172 | operatingSystem = config.System 173 | } 174 | systems = append(systems, operatingSystem) 175 | createTime = append(createTime, config.CreateTime) 176 | internal = append(internal, config.InternalAddress) 177 | external = append(external, config.ExternalAddress) 178 | hardware = append(hardware, config.HardwareAddress) 179 | cpuPriorities = append(cpuPriorities, uint64(config.CPUPriority)) 180 | ioLimits = append(ioLimits, []uint64{config.ReadSpeed, config.WriteSpeed, 181 | config.ReadIOPS, config.WriteIOPS, config.ReceiveSpeed, config.SendSpeed}...) 182 | } 183 | resp.SetStringArray(framework.ParamKeyName, names) 184 | resp.SetStringArray(framework.ParamKeyInstance, ids) 185 | resp.SetStringArray(framework.ParamKeyUser, users) 186 | resp.SetStringArray(framework.ParamKeyGroup, groups) 187 | resp.SetStringArray(framework.ParamKeySecret, secrets) 188 | resp.SetStringArray(framework.ParamKeyAddress, addresses) 189 | resp.SetStringArray(framework.ParamKeySystem, systems) 190 | resp.SetStringArray(framework.ParamKeyCreate, createTime) 191 | resp.SetStringArray(framework.ParamKeyInternal, internal) 192 | resp.SetStringArray(framework.ParamKeyExternal, external) 193 | resp.SetStringArray(framework.ParamKeyHardware, hardware) 194 | resp.SetUIntArray(framework.ParamKeyCore, cores) 195 | resp.SetUIntArray(framework.ParamKeyOption, options) 196 | resp.SetUIntArray(framework.ParamKeyEnable, enables) 197 | resp.SetUIntArray(framework.ParamKeyProgress, progress) 198 | resp.SetUIntArray(framework.ParamKeyStatus, status) 199 | resp.SetUIntArray(framework.ParamKeyMonitor, monitors) 200 | resp.SetUIntArray(framework.ParamKeyMemory, memories) 201 | resp.SetUIntArray(framework.ParamKeyCount, diskCounts) 202 | resp.SetUIntArray(framework.ParamKeyDisk, disks) 203 | resp.SetUIntArray(framework.ParamKeyPriority, cpuPriorities) 204 | resp.SetUIntArray(framework.ParamKeyLimit, ioLimits) 205 | log.Printf("[%08X] %d instance config(s) reported", id, count) 206 | return executor.Sender.SendMessage(resp, request.GetSender()) 207 | } 208 | -------------------------------------------------------------------------------- /src/task/insert_media.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type InsertMediaCoreExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *InsertMediaCoreExecutor) Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) (err error) { 16 | var instanceID, mediaSource, host string 17 | var port uint 18 | instanceID, err = request.GetString(framework.ParamKeyInstance) 19 | if err != nil { 20 | return err 21 | } 22 | if mediaSource, err = request.GetString(framework.ParamKeyMedia); err != nil{ 23 | return err 24 | } 25 | if host, err = request.GetString(framework.ParamKeyHost); err != nil{ 26 | return err 27 | } 28 | if port, err = request.GetUInt(framework.ParamKeyPort); err != nil{ 29 | return err 30 | } 31 | 32 | log.Printf("[%08X] request insert media '%s' into '%s' from %s.[%08X]", id, mediaSource, instanceID, 33 | request.GetSender(), request.GetFromSession()) 34 | 35 | resp, _ := framework.CreateJsonMessage(framework.InsertMediaResponse) 36 | resp.SetToSession(request.GetFromSession()) 37 | resp.SetFromSession(id) 38 | resp.SetSuccess(false) 39 | 40 | var respChan = make(chan error, 1) 41 | var media = service.InstanceMediaConfig{Mode: service.MediaModeHTTPS, ID:mediaSource, Host:host, Port:port} 42 | executor.InstanceModule.AttachMedia(instanceID, media, respChan) 43 | err = <- respChan 44 | if err != nil{ 45 | log.Printf("[%08X] insert media fail: %s", id, err.Error()) 46 | resp.SetError(err.Error()) 47 | }else{ 48 | log.Printf("[%08X] instance media inserted", id) 49 | resp.SetSuccess(true) 50 | { 51 | //notify event 52 | event, _ := framework.CreateJsonMessage(framework.MediaAttachedEvent) 53 | event.SetFromSession(id) 54 | event.SetString(framework.ParamKeyInstance, instanceID) 55 | event.SetString(framework.ParamKeyMedia, mediaSource) 56 | executor.Sender.SendMessage(event, request.GetSender()) 57 | } 58 | } 59 | return executor.Sender.SendMessage(resp, request.GetSender()) 60 | } 61 | -------------------------------------------------------------------------------- /src/task/modify_auth.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/cell/service" 5 | "github.com/project-nano/framework" 6 | "log" 7 | "math/rand" 8 | ) 9 | 10 | type ModifyGuestPasswordExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | RandomGenerator *rand.Rand 14 | } 15 | 16 | func (executor *ModifyGuestPasswordExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | const ( 19 | PasswordLength = 10 20 | ) 21 | var guestID, user, password string 22 | 23 | if guestID, err = request.GetString(framework.ParamKeyGuest);err != nil{ 24 | return err 25 | } 26 | if user, err = request.GetString(framework.ParamKeyUser);err != nil{ 27 | return err 28 | } 29 | if password, err = request.GetString(framework.ParamKeySecret);err != nil{ 30 | return err 31 | } 32 | 33 | if "" == password{ 34 | password = executor.generatePassword(PasswordLength) 35 | log.Printf("[%08X] new password '%s' generated for modify auth", id, password) 36 | } 37 | 38 | var respChan = make(chan service.InstanceResult) 39 | executor.InstanceModule.ModifyGuestAuth(guestID, password, user, respChan) 40 | 41 | resp, _ := framework.CreateJsonMessage(framework.ModifyAuthResponse) 42 | resp.SetFromSession(id) 43 | resp.SetToSession(request.GetFromSession()) 44 | resp.SetSuccess(false) 45 | 46 | result := <- respChan 47 | if result.Error != nil{ 48 | resp.SetError(result.Error.Error()) 49 | log.Printf("[%08X] modify password fail: %s", id, result.Error.Error()) 50 | }else{ 51 | resp.SetSuccess(true) 52 | resp.SetString(framework.ParamKeyUser, result.User) 53 | resp.SetString(framework.ParamKeySecret, result.Password) 54 | } 55 | return executor.Sender.SendMessage(resp, request.GetSender()) 56 | } 57 | 58 | func (executor *ModifyGuestPasswordExecutor)generatePassword(length int) (string){ 59 | const ( 60 | Letters = "~!@#$%^&*()_[]-=+0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 61 | ) 62 | var result = make([]byte, length) 63 | var n = len(Letters) 64 | for i := 0 ; i < length; i++{ 65 | result[i] = Letters[executor.RandomGenerator.Intn(n)] 66 | } 67 | return string(result) 68 | } 69 | -------------------------------------------------------------------------------- /src/task/modify_auto_start.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type ModifyAutoStartExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *ModifyAutoStartExecutor)Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var guestID string 18 | var enable bool 19 | if guestID, err = request.GetString(framework.ParamKeyGuest); err != nil { 20 | err = fmt.Errorf("get guest id fail: %s", err.Error()) 21 | return 22 | } 23 | if enable, err = request.GetBoolean(framework.ParamKeyEnable); err != nil{ 24 | err = fmt.Errorf("get enable flag fail: %s", err.Error()) 25 | return 26 | } 27 | resp, _ := framework.CreateJsonMessage(framework.ModifyAutoStartResponse) 28 | resp.SetToSession(request.GetFromSession()) 29 | resp.SetFromSession(id) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error, 1) 32 | executor.InstanceModule.ModifyAutoStart(guestID, enable, respChan) 33 | if err = <- respChan; err != nil{ 34 | log.Printf("[%08X] modify auto start fail: %s", id, err.Error()) 35 | resp.SetError(err.Error()) 36 | }else{ 37 | if enable{ 38 | log.Printf("[%08X] auto start of guest '%s' enabled", id, guestID) 39 | }else{ 40 | log.Printf("[%08X] auto start of guest '%s' disabled", id, guestID) 41 | } 42 | resp.SetSuccess(true) 43 | } 44 | return executor.Sender.SendMessage(resp, request.GetSender()) 45 | } 46 | -------------------------------------------------------------------------------- /src/task/modify_core.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "log" 6 | "github.com/project-nano/cell/service" 7 | ) 8 | 9 | type ModifyGuestCoreExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *ModifyGuestCoreExecutor)Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) error { 16 | guestID, err := request.GetString(framework.ParamKeyGuest) 17 | if err != nil { 18 | return err 19 | } 20 | cores, err := request.GetUInt(framework.ParamKeyCore) 21 | if err != nil { 22 | return err 23 | } 24 | log.Printf("[%08X] request modifying cores of '%s' from %s.[%08X]", id, guestID, 25 | request.GetSender(), request.GetFromSession()) 26 | 27 | resp, _ := framework.CreateJsonMessage(framework.ModifyCoreResponse) 28 | resp.SetToSession(request.GetFromSession()) 29 | resp.SetFromSession(id) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error) 32 | executor.InstanceModule.ModifyGuestCore(guestID, cores, respChan) 33 | err = <- respChan 34 | if err != nil{ 35 | log.Printf("[%08X] modify core fail: %s", id, err.Error()) 36 | resp.SetError(err.Error()) 37 | }else{ 38 | log.Printf("[%08X] cores of guest '%s' changed to %d", id, guestID, cores) 39 | resp.SetSuccess(true) 40 | } 41 | return executor.Sender.SendMessage(resp, request.GetSender()) 42 | } 43 | -------------------------------------------------------------------------------- /src/task/modify_cpu_priority.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type ModifyCPUPriorityExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *ModifyCPUPriorityExecutor)Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) error { 16 | guestID, err := request.GetString(framework.ParamKeyGuest) 17 | if err != nil { 18 | return err 19 | } 20 | priorityValue, err := request.GetUInt(framework.ParamKeyPriority) 21 | if err != nil { 22 | return err 23 | } 24 | log.Printf("[%08X] request changing CPU priority of guest '%s' to %d from %s.[%08X]", id, guestID, 25 | priorityValue, request.GetSender(), request.GetFromSession()) 26 | 27 | resp, _ := framework.CreateJsonMessage(framework.ModifyPriorityResponse) 28 | resp.SetToSession(request.GetFromSession()) 29 | resp.SetFromSession(id) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error, 1) 32 | executor.InstanceModule.ModifyCPUPriority(guestID, service.PriorityEnum(priorityValue), respChan) 33 | err = <- respChan 34 | if err != nil{ 35 | log.Printf("[%08X] modify CPU priority fail: %s", id, err.Error()) 36 | resp.SetError(err.Error()) 37 | }else{ 38 | log.Printf("[%08X] CPU priority of guest '%s' changed to %d", id, guestID, priorityValue) 39 | resp.SetSuccess(true) 40 | } 41 | return executor.Sender.SendMessage(resp, request.GetSender()) 42 | } 43 | -------------------------------------------------------------------------------- /src/task/modify_guest_name.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type ModifyGuestNameExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *ModifyGuestNameExecutor)Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) error { 16 | guestID, err := request.GetString(framework.ParamKeyGuest) 17 | if err != nil { 18 | return err 19 | } 20 | name, err := request.GetString(framework.ParamKeyName) 21 | if err != nil { 22 | return err 23 | } 24 | log.Printf("[%08X] request rename guest '%s' from %s.[%08X]", id, guestID, 25 | request.GetSender(), request.GetFromSession()) 26 | 27 | resp, _ := framework.CreateJsonMessage(framework.ModifyGuestNameResponse) 28 | resp.SetToSession(request.GetFromSession()) 29 | resp.SetFromSession(id) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error) 32 | executor.InstanceModule.ModifyGuestName(guestID, name, respChan) 33 | err = <- respChan 34 | if err != nil{ 35 | log.Printf("[%08X] rename guest fail: %s", id, err.Error()) 36 | resp.SetError(err.Error()) 37 | }else{ 38 | log.Printf("[%08X] guest '%s' renamed to %s", id, guestID, name) 39 | resp.SetSuccess(true) 40 | } 41 | return executor.Sender.SendMessage(resp, request.GetSender()) 42 | } 43 | -------------------------------------------------------------------------------- /src/task/modify_memory.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | ) 8 | 9 | type ModifyGuestMemoryExecutor struct { 10 | Sender framework.MessageSender 11 | InstanceModule service.InstanceModule 12 | } 13 | 14 | func (executor *ModifyGuestMemoryExecutor)Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) error { 16 | guestID, err := request.GetString(framework.ParamKeyGuest) 17 | if err != nil { 18 | return err 19 | } 20 | memory, err := request.GetUInt(framework.ParamKeyMemory) 21 | if err != nil { 22 | return err 23 | } 24 | log.Printf("[%08X] request modifying memory of '%s' from %s.[%08X]", id, guestID, 25 | request.GetSender(), request.GetFromSession()) 26 | 27 | resp, _ := framework.CreateJsonMessage(framework.ModifyMemoryResponse) 28 | resp.SetToSession(request.GetFromSession()) 29 | resp.SetFromSession(id) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error) 32 | executor.InstanceModule.ModifyGuestMemory(guestID, memory, respChan) 33 | err = <- respChan 34 | if err != nil{ 35 | log.Printf("[%08X] modify memory fail: %s", id, err.Error()) 36 | resp.SetError(err.Error()) 37 | }else{ 38 | log.Printf("[%08X] memory of guest '%s' changed to %d MB", id, guestID, memory / (1 << 20)) 39 | resp.SetSuccess(true) 40 | } 41 | return executor.Sender.SendMessage(resp, request.GetSender()) 42 | } 43 | 44 | -------------------------------------------------------------------------------- /src/task/modify_security_rule.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type ModifySecurityRuleExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *ModifySecurityRuleExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var instanceID string 18 | var index int 19 | var accept bool 20 | var fromIP, toIP, toPort, protocol uint 21 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil{ 22 | err = fmt.Errorf("get instance id fail: %s", err.Error()) 23 | return 24 | } 25 | if index, err = request.GetInt(framework.ParamKeyIndex); err != nil{ 26 | err = fmt.Errorf("get rule index fail: %s", err.Error()) 27 | return 28 | } 29 | if accept, err = request.GetBoolean(framework.ParamKeyAction); err != nil{ 30 | err = fmt.Errorf("get action fail: %s", err.Error()) 31 | return 32 | } 33 | if fromIP, err = request.GetUInt(framework.ParamKeyFrom); err != nil{ 34 | err = fmt.Errorf("get source address fail: %s", err.Error()) 35 | return 36 | } 37 | if toIP, err = request.GetUInt(framework.ParamKeyTo); err != nil{ 38 | err = fmt.Errorf("get target address fail: %s", err.Error()) 39 | return 40 | } 41 | if toPort, err = request.GetUInt(framework.ParamKeyPort); err != nil{ 42 | err = fmt.Errorf("get target port fail: %s", err.Error()) 43 | return 44 | }else if 0 == toPort || toPort > 0xFFFF{ 45 | err = fmt.Errorf("invalid target port %d", toPort) 46 | return 47 | } 48 | if protocol, err = request.GetUInt(framework.ParamKeyProtocol); err != nil{ 49 | err = fmt.Errorf("get protocol fail: %s", err.Error()) 50 | return 51 | } 52 | resp, _ := framework.CreateJsonMessage(framework.ModifyGuestRuleResponse) 53 | resp.SetFromSession(id) 54 | resp.SetToSession(request.GetFromSession()) 55 | resp.SetSuccess(false) 56 | 57 | var rule = service.SecurityPolicyRule{ 58 | Accept: accept, 59 | TargetPort: toPort, 60 | } 61 | 62 | switch protocol { 63 | case service.PolicyRuleProtocolIndexTCP: 64 | rule.Protocol = service.PolicyRuleProtocolTCP 65 | case service.PolicyRuleProtocolIndexUDP: 66 | rule.Protocol = service.PolicyRuleProtocolUDP 67 | case service.PolicyRuleProtocolIndexICMP: 68 | rule.Protocol = service.PolicyRuleProtocolICMP 69 | default: 70 | err = fmt.Errorf("invalid protocol %d for security rule", protocol) 71 | return 72 | } 73 | rule.SourceAddress = service.UInt32ToIPv4(uint32(fromIP)) 74 | rule.TargetAddress = service.UInt32ToIPv4(uint32(toIP)) 75 | 76 | var respChan = make(chan error, 1) 77 | executor.InstanceModule.ModifySecurityPolicyRule(instanceID, index, rule, respChan) 78 | err = <- respChan 79 | if nil != err{ 80 | log.Printf("[%08X] modify %dth security rule of instance '%s' fail: %s", 81 | id, index, instanceID, err.Error()) 82 | resp.SetError(err.Error()) 83 | }else{ 84 | if accept{ 85 | log.Printf("[%08X] %dth security rule of instance '%s' changed to accept protocol '%s' from '%s' to '%s:%d'", 86 | id, index, instanceID, rule.Protocol, rule.SourceAddress, rule.TargetAddress, rule.TargetPort) 87 | }else{ 88 | log.Printf("[%08X] %dth security rule of instance '%s' changed to reject protocol '%s' from '%s' to '%s:%d'", 89 | id, index, instanceID, rule.Protocol, rule.SourceAddress, rule.TargetAddress, rule.TargetPort) 90 | } 91 | resp.SetSuccess(true) 92 | } 93 | return executor.Sender.SendMessage(resp, request.GetSender()) 94 | } 95 | -------------------------------------------------------------------------------- /src/task/query_snapshot.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | "time" 9 | ) 10 | 11 | type QuerySnapshotExecutor struct { 12 | Sender framework.MessageSender 13 | StorageModule service.StorageModule 14 | } 15 | 16 | func (executor *QuerySnapshotExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | var instanceID string 19 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil { 20 | return err 21 | } 22 | 23 | log.Printf("[%08X] recv query snapshots for guest '%s' from %s.[%08X]", 24 | id, instanceID, request.GetSender(), request.GetFromSession()) 25 | resp, _ := framework.CreateJsonMessage(framework.QuerySnapshotResponse) 26 | resp.SetSuccess(false) 27 | resp.SetFromSession(id) 28 | resp.SetToSession(request.GetFromSession()) 29 | { 30 | var respChan = make(chan service.StorageResult, 1) 31 | executor.StorageModule.QuerySnapshot(instanceID, respChan) 32 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 33 | select { 34 | case <-timer.C: 35 | err = errors.New("request timeout") 36 | log.Printf("[%08X] query snapshot timeout", id) 37 | resp.SetError(err.Error()) 38 | return executor.Sender.SendMessage(resp, request.GetSender()) 39 | case result := <-respChan: 40 | if result.Error != nil { 41 | err = result.Error 42 | log.Printf("[%08X] query snapshot fail: %s", id, err.Error()) 43 | resp.SetError(err.Error()) 44 | } else { 45 | var snapshotList = result.SnapshotList 46 | var names, backings []string 47 | var rootFlags, currentFlags []uint64 48 | for _, snapshot := range snapshotList { 49 | names = append(names, snapshot.Name) 50 | backings = append(backings, snapshot.Backing) 51 | if snapshot.IsRoot { 52 | rootFlags = append(rootFlags, 1) 53 | } else { 54 | rootFlags = append(rootFlags, 0) 55 | } 56 | if snapshot.IsCurrent { 57 | currentFlags = append(currentFlags, 1) 58 | } else { 59 | currentFlags = append(currentFlags, 0) 60 | } 61 | } 62 | resp.SetStringArray(framework.ParamKeyName, names) 63 | resp.SetStringArray(framework.ParamKeyPrevious, backings) 64 | resp.SetUIntArray(framework.ParamKeySource, rootFlags) 65 | resp.SetUIntArray(framework.ParamKeyCurrent, currentFlags) 66 | log.Printf("[%08X] %d snapshot(s) available for guest '%s'", id, len(snapshotList), instanceID) 67 | resp.SetSuccess(true) 68 | } 69 | return executor.Sender.SendMessage(resp, request.GetSender()) 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/task/query_storage_paths.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/cell/service" 5 | "github.com/project-nano/framework" 6 | "log" 7 | ) 8 | 9 | type QueryStoragePathExecutor struct { 10 | Sender framework.MessageSender 11 | Storage service.StorageModule 12 | } 13 | 14 | func (executor *QueryStoragePathExecutor) Execute(id framework.SessionID, request framework.Message, 15 | incoming chan framework.Message, terminate chan bool) (err error) { 16 | var respChan = make(chan service.StorageResult, 1) 17 | executor.Storage.QueryStoragePaths(respChan) 18 | 19 | resp, _ := framework.CreateJsonMessage(framework.QueryCellStorageResponse) 20 | resp.SetSuccess(false) 21 | resp.SetFromSession(id) 22 | resp.SetToSession(request.GetFromSession()) 23 | 24 | var result = <- respChan 25 | if result.Error != nil{ 26 | err = result.Error 27 | resp.SetError(err.Error()) 28 | log.Printf("[%08X] query storage paths fail: %s", id, err.Error()) 29 | }else{ 30 | //parse result 31 | resp.SetSuccess(true) 32 | resp.SetUInt(framework.ParamKeyMode, uint(result.StorageMode)) 33 | resp.SetStringArray(framework.ParamKeySystem, result.SystemPaths) 34 | resp.SetStringArray(framework.ParamKeyData, result.DataPaths) 35 | } 36 | return executor.Sender.SendMessage(resp, request.GetSender()) 37 | } -------------------------------------------------------------------------------- /src/task/remove_security_rule.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type RemoveSecurityRuleExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *RemoveSecurityRuleExecutor) Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) (err error) { 17 | var instanceID string 18 | var index int 19 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil{ 20 | err = fmt.Errorf("get instance id fail: %s", err.Error()) 21 | return 22 | } 23 | if index, err = request.GetInt(framework.ParamKeyIndex); err != nil{ 24 | err = fmt.Errorf("get rule index fail: %s", err.Error()) 25 | return 26 | } 27 | resp, _ := framework.CreateJsonMessage(framework.RemoveGuestRuleResponse) 28 | resp.SetFromSession(id) 29 | resp.SetToSession(request.GetFromSession()) 30 | resp.SetSuccess(false) 31 | var respChan = make(chan error, 1) 32 | executor.InstanceModule.RemoveSecurityPolicyRule(instanceID, index, respChan) 33 | err = <- respChan 34 | if nil != err{ 35 | log.Printf("[%08X] remove %dth security rule of instance '%s' fail: %s", 36 | id, index, instanceID, err.Error()) 37 | resp.SetError(err.Error()) 38 | }else{ 39 | log.Printf("[%08X] %dth security rule of instance '%s' removed", 40 | id, index, instanceID) 41 | resp.SetSuccess(true) 42 | } 43 | return executor.Sender.SendMessage(resp, request.GetSender()) 44 | } -------------------------------------------------------------------------------- /src/task/reset_guest_system.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type ResetGuestSystemExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | StorageModule service.StorageModule 16 | } 17 | 18 | func (executor *ResetGuestSystemExecutor) Execute(id framework.SessionID, request framework.Message, 19 | incoming chan framework.Message, terminate chan bool) (err error) { 20 | var guestID, imageID, mediaHost string 21 | var mediaPort, imageSize uint 22 | if guestID, err = request.GetString(framework.ParamKeyGuest); err != nil { 23 | return 24 | } 25 | if imageID, err = request.GetString(framework.ParamKeyImage); err != nil { 26 | return 27 | } 28 | if mediaHost, err = request.GetString(framework.ParamKeyHost); err != nil { 29 | return err 30 | } 31 | if mediaPort, err = request.GetUInt(framework.ParamKeyPort); err != nil { 32 | return err 33 | } 34 | if imageSize, err = request.GetUInt(framework.ParamKeySize); err != nil { 35 | return err 36 | } 37 | log.Printf("[%08X] recv reset system of guest '%s' to image '%s' from %s.[%08X]", 38 | id, guestID, imageID, request.GetSender(), request.GetFromSession()) 39 | resp, _ := framework.CreateJsonMessage(framework.ResetSystemResponse) 40 | resp.SetFromSession(id) 41 | resp.SetToSession(request.GetFromSession()) 42 | resp.SetSuccess(false) 43 | 44 | var systemVolume string 45 | var systemSize uint64 46 | { 47 | var respChan = make(chan service.InstanceResult, 1) 48 | //check instance 49 | executor.InstanceModule.GetInstanceStatus(guestID, respChan) 50 | var result = <-respChan 51 | if result.Error != nil { 52 | err = result.Error 53 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 54 | resp.SetError(err.Error()) 55 | return executor.Sender.SendMessage(resp, request.GetSender()) 56 | } 57 | var ins = result.Instance 58 | if ins.Running { 59 | err = fmt.Errorf("guest '%s' is still running", ins.Name) 60 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 61 | resp.SetError(err.Error()) 62 | return executor.Sender.SendMessage(resp, request.GetSender()) 63 | } 64 | if 0 == len(ins.StorageVolumes) { 65 | err = fmt.Errorf("no volumes available for guest '%s'", ins.Name) 66 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 67 | resp.SetError(err.Error()) 68 | return executor.Sender.SendMessage(resp, request.GetSender()) 69 | } 70 | systemVolume = ins.StorageVolumes[0] 71 | systemSize = ins.Disks[0] 72 | } 73 | { 74 | //write system volume 75 | var startChan = make(chan error, 1) 76 | var progressChan = make(chan uint, 1) 77 | var resultChan = make(chan service.StorageResult, 1) 78 | executor.StorageModule.ReadDiskImage(id, guestID, systemVolume, imageID, systemSize, uint64(imageSize), mediaHost, mediaPort, 79 | startChan, progressChan, resultChan) 80 | //wait start 81 | { 82 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 83 | select { 84 | case err = <-startChan: 85 | if err != nil { 86 | log.Printf("[%08X] start reset system image fail: %s", id, err.Error()) 87 | resp.SetError(err.Error()) 88 | return executor.Sender.SendMessage(resp, request.GetSender()) 89 | } else { 90 | //started 91 | log.Printf("[%08X] reset system image started...", id) 92 | resp.SetSuccess(true) 93 | executor.Sender.SendMessage(resp, request.GetSender()) 94 | } 95 | 96 | case <-timer.C: 97 | //wait start timeout 98 | err = errors.New("start reset system image timeout") 99 | resp.SetError(err.Error()) 100 | return executor.Sender.SendMessage(resp, request.GetSender()) 101 | } 102 | } 103 | //update progress&wait finish 104 | const ( 105 | CheckInterval = 2 * time.Second 106 | ) 107 | 108 | resetEvent, _ := framework.CreateJsonMessage(framework.SystemResetEvent) 109 | resetEvent.SetFromSession(id) 110 | resetEvent.SetSuccess(false) 111 | resetEvent.SetString(framework.ParamKeyGuest, guestID) 112 | 113 | updateEvent, _ := framework.CreateJsonMessage(framework.GuestUpdatedEvent) 114 | updateEvent.SetFromSession(id) 115 | updateEvent.SetSuccess(true) 116 | updateEvent.SetString(framework.ParamKeyInstance, guestID) 117 | 118 | var latestUpdate = time.Now() 119 | var ticker = time.NewTicker(CheckInterval) 120 | for { 121 | select { 122 | case <-ticker.C: 123 | //check 124 | if time.Now().After(latestUpdate.Add(service.GetConfigurator().GetOperateTimeout())) { 125 | //timeout 126 | err = errors.New("wait reset progress timeout") 127 | log.Printf("[%08X] reset system image fail: %s", id, err.Error()) 128 | resetEvent.SetError(err.Error()) 129 | return executor.Sender.SendMessage(resetEvent, request.GetSender()) 130 | } 131 | case progress := <-progressChan: 132 | latestUpdate = time.Now() 133 | updateEvent.SetUInt(framework.ParamKeyProgress, progress) 134 | log.Printf("[%08X] progress => %d %%", id, progress) 135 | if err = executor.Sender.SendMessage(updateEvent, request.GetSender()); err != nil { 136 | log.Printf("[%08X] warning: notify progress fail: %s", id, err.Error()) 137 | } 138 | case result := <-resultChan: 139 | err = result.Error 140 | if err != nil { 141 | log.Printf("[%08X] reset system image fail: %s", id, err.Error()) 142 | resetEvent.SetSuccess(false) 143 | resetEvent.SetError(err.Error()) 144 | return executor.Sender.SendMessage(resetEvent, request.GetSender()) 145 | } 146 | log.Printf("[%08X] reset system image success, %d MB in size", id, result.Size>>20) 147 | { 148 | var errChan = make(chan error, 1) 149 | executor.InstanceModule.ResetGuestSystem(guestID, errChan) 150 | if err = <-errChan; err != nil { 151 | log.Printf("[%08X] reset guest system fail: %s", id, err.Error()) 152 | resetEvent.SetSuccess(false) 153 | resetEvent.SetError(err.Error()) 154 | return executor.Sender.SendMessage(resetEvent, request.GetSender()) 155 | } 156 | 157 | } 158 | //notify guest created 159 | resetEvent.SetSuccess(true) 160 | 161 | if err = executor.Sender.SendMessage(resetEvent, request.GetSender()); err != nil { 162 | log.Printf("[%08X] warning: notify instance created fail: %s", id, err.Error()) 163 | } 164 | return nil 165 | } 166 | } 167 | 168 | } 169 | return nil 170 | } 171 | -------------------------------------------------------------------------------- /src/task/reset_monitor_secret.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | ) 10 | 11 | type ResetMonitorSecretExecutor struct { 12 | Sender framework.MessageSender 13 | InstanceModule service.InstanceModule 14 | } 15 | 16 | func (executor *ResetMonitorSecretExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | var guestID string 19 | if guestID, err = request.GetString(framework.ParamKeyGuest);err != nil{ 20 | err = fmt.Errorf("get guest id fail: %s", err.Error()) 21 | return err 22 | } 23 | var respChan = make(chan service.InstanceResult) 24 | executor.InstanceModule.ResetMonitorPassword(guestID, respChan) 25 | 26 | resp, _ := framework.CreateJsonMessage(framework.ResetSecretResponse) 27 | resp.SetFromSession(id) 28 | resp.SetToSession(request.GetFromSession()) 29 | resp.SetSuccess(false) 30 | 31 | var password string 32 | result := <- respChan 33 | if result.Error != nil{ 34 | err = result.Error 35 | 36 | }else{ 37 | password = result.Password 38 | if "" == password{ 39 | err = errors.New("new password is empty") 40 | } 41 | } 42 | if err != nil{ 43 | resp.SetError(err.Error()) 44 | log.Printf("[%08X] reset monitor secret fail: %s", id, err.Error()) 45 | }else{ 46 | resp.SetSuccess(true) 47 | resp.SetString(framework.ParamKeySecret, password) 48 | } 49 | return executor.Sender.SendMessage(resp, request.GetSender()) 50 | } 51 | -------------------------------------------------------------------------------- /src/task/resize_volume.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type ResizeGuestVolumeExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | StorageModule service.StorageModule 16 | } 17 | 18 | func (executor *ResizeGuestVolumeExecutor) Execute(id framework.SessionID, request framework.Message, 19 | incoming chan framework.Message, terminate chan bool) (err error) { 20 | var guestID string 21 | var index, size uint 22 | if guestID, err = request.GetString(framework.ParamKeyGuest); err != nil { 23 | return err 24 | } 25 | if index, err = request.GetUInt(framework.ParamKeyDisk); err != nil { 26 | return err 27 | } 28 | if size, err = request.GetUInt(framework.ParamKeySize); err != nil { 29 | return err 30 | } 31 | log.Printf("[%08X] recv resize disk of guest '%s' from %s.[%08X]", 32 | id, guestID, request.GetSender(), request.GetFromSession()) 33 | resp, _ := framework.CreateJsonMessage(framework.ResizeDiskResponse) 34 | resp.SetSuccess(false) 35 | resp.SetFromSession(id) 36 | resp.SetToSession(request.GetFromSession()) 37 | var targetVolume string 38 | var targetSize = uint64(size) 39 | var targetIndex = int(index) 40 | { 41 | var respChan = make(chan service.InstanceResult) 42 | executor.InstanceModule.GetInstanceStatus(guestID, respChan) 43 | var result = <-respChan 44 | if result.Error != nil { 45 | err = result.Error 46 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 47 | resp.SetError(err.Error()) 48 | return executor.Sender.SendMessage(resp, request.GetSender()) 49 | } 50 | 51 | err = func(instance service.InstanceStatus, index int, size uint64) (err error) { 52 | if !instance.Created { 53 | err = fmt.Errorf("instance '%s' not created", guestID) 54 | return 55 | } 56 | if instance.Running { 57 | err = fmt.Errorf("instance '%s' not stopped", guestID) 58 | return 59 | } 60 | var volumeCount = len(instance.StorageVolumes) 61 | if 0 == volumeCount { 62 | err = errors.New("no volume available") 63 | return 64 | } 65 | if index >= volumeCount { 66 | err = fmt.Errorf("invalid disk index %d", index) 67 | return 68 | } 69 | if instance.Disks[index] >= size { 70 | err = fmt.Errorf("must larger than current volume size %d GB", instance.Disks[index]>>30) 71 | return 72 | } 73 | return nil 74 | }(result.Instance, targetIndex, targetSize) 75 | if err != nil { 76 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 77 | resp.SetError(err.Error()) 78 | return executor.Sender.SendMessage(resp, request.GetSender()) 79 | } 80 | targetVolume = result.Instance.StorageVolumes[targetIndex] 81 | } 82 | var resultChan = make(chan service.StorageResult, 1) 83 | { 84 | executor.StorageModule.ResizeVolume(id, guestID, targetVolume, targetSize, resultChan) 85 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 86 | select { 87 | case <-timer.C: 88 | err = errors.New("request timeout") 89 | log.Printf("[%08X] resize disk timeout", id) 90 | resp.SetError(err.Error()) 91 | return executor.Sender.SendMessage(resp, request.GetSender()) 92 | case result := <-resultChan: 93 | if result.Error != nil { 94 | err = result.Error 95 | log.Printf("[%08X] resize disk fail: %s", id, err.Error()) 96 | resp.SetError(err.Error()) 97 | } else { 98 | { 99 | var respChan = make(chan error) 100 | executor.InstanceModule.UpdateDiskSize(guestID, targetIndex, targetSize, respChan) 101 | err = <-respChan 102 | if err != nil { 103 | log.Printf("[%08X] update disk size fail: %s", id, err.Error()) 104 | resp.SetError(err.Error()) 105 | return executor.Sender.SendMessage(resp, request.GetSender()) 106 | } 107 | } 108 | log.Printf("[%08X] volume %s changed to %d GiB", id, targetVolume, targetSize>>30) 109 | resp.SetSuccess(true) 110 | } 111 | return executor.Sender.SendMessage(resp, request.GetSender()) 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/task/restore_snapshot.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type RestoreSnapshotExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | StorageModule service.StorageModule 16 | } 17 | 18 | func (executor *RestoreSnapshotExecutor) Execute(id framework.SessionID, request framework.Message, 19 | incoming chan framework.Message, terminate chan bool) (err error) { 20 | var instanceID string 21 | var snapshot string 22 | if instanceID, err = request.GetString(framework.ParamKeyInstance); err != nil { 23 | return err 24 | } 25 | if snapshot, err = request.GetString(framework.ParamKeyName); err != nil { 26 | return err 27 | } 28 | 29 | log.Printf("[%08X] recv restore guest '%s' to snapshot '%s' from %s.[%08X]", 30 | id, instanceID, snapshot, request.GetSender(), request.GetFromSession()) 31 | resp, _ := framework.CreateJsonMessage(framework.RestoreSnapshotResponse) 32 | resp.SetSuccess(false) 33 | resp.SetFromSession(id) 34 | resp.SetToSession(request.GetFromSession()) 35 | { 36 | var respChan = make(chan service.InstanceResult, 1) 37 | executor.InstanceModule.GetInstanceStatus(instanceID, respChan) 38 | var result = <-respChan 39 | if result.Error != nil { 40 | err = result.Error 41 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 42 | resp.SetError(err.Error()) 43 | return executor.Sender.SendMessage(resp, request.GetSender()) 44 | } 45 | 46 | err = func(instance service.InstanceStatus) (err error) { 47 | if !instance.Created { 48 | err = fmt.Errorf("instance '%s' not created", instanceID) 49 | return 50 | } 51 | //todo: allow operating on branch snapshots 52 | if instance.Running { 53 | err = errors.New("live snapshot not supported yes, shutdown instance first") 54 | return 55 | } 56 | return nil 57 | }(result.Instance) 58 | if err != nil { 59 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 60 | resp.SetError(err.Error()) 61 | return executor.Sender.SendMessage(resp, request.GetSender()) 62 | } 63 | } 64 | { 65 | var respChan = make(chan error, 1) 66 | executor.StorageModule.RestoreSnapshot(instanceID, snapshot, respChan) 67 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 68 | select { 69 | case <-timer.C: 70 | err = errors.New("request timeout") 71 | log.Printf("[%08X] restore snapshot timeout", id) 72 | resp.SetError(err.Error()) 73 | return executor.Sender.SendMessage(resp, request.GetSender()) 74 | case err = <-respChan: 75 | if err != nil { 76 | log.Printf("[%08X] restore snapshot fail: %s", id, err.Error()) 77 | resp.SetError(err.Error()) 78 | } else { 79 | log.Printf("[%08X] guest '%s' restored to snapshot '%s'", id, instanceID, snapshot) 80 | resp.SetSuccess(true) 81 | } 82 | return executor.Sender.SendMessage(resp, request.GetSender()) 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/task/set_disk_threshold.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "log" 7 | "fmt" 8 | ) 9 | 10 | type ModifyDiskThresholdExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *ModifyDiskThresholdExecutor)Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) error { 17 | guestID, err := request.GetString(framework.ParamKeyGuest) 18 | if err != nil { 19 | return err 20 | } 21 | limitParameters, err := request.GetUIntArray(framework.ParamKeyLimit) 22 | if err != nil { 23 | return err 24 | } 25 | const ( 26 | ReadSpeedOffset = iota 27 | WriteSpeedOffset 28 | ReadIOPSOffset 29 | WriteIOPSOffset 30 | ValidLimitParametersCount = 4 31 | ) 32 | 33 | if ValidLimitParametersCount != len(limitParameters){ 34 | var err = fmt.Errorf("invalid QoS parameters count %d", len(limitParameters)) 35 | return err 36 | } 37 | var readSpeed = limitParameters[ReadSpeedOffset] 38 | var writeSpeed = limitParameters[WriteSpeedOffset] 39 | var readIOPS = limitParameters[ReadIOPSOffset] 40 | var writeIOPS = limitParameters[WriteIOPSOffset] 41 | 42 | log.Printf("[%08X] request modifying disk threshold of guest '%s' from %s.[%08X]", id, guestID, 43 | request.GetSender(), request.GetFromSession()) 44 | 45 | resp, _ := framework.CreateJsonMessage(framework.ModifyDiskThresholdResponse) 46 | resp.SetToSession(request.GetFromSession()) 47 | resp.SetFromSession(id) 48 | resp.SetSuccess(false) 49 | var respChan = make(chan error, 1) 50 | executor.InstanceModule.ModifyDiskThreshold(guestID, readSpeed, readIOPS, writeSpeed, writeIOPS, respChan) 51 | err = <- respChan 52 | if err != nil{ 53 | log.Printf("[%08X] modify disk threshold fail: %s", id, err.Error()) 54 | resp.SetError(err.Error()) 55 | }else{ 56 | //log.Printf("[%08X] disk threshold of guest '%s' changed to read (%d MB/s, %d ops), write (%d MB/s, %d ops)", id, guestID, 57 | // readSpeed >> 20, readIOPS, writeSpeed >> 20, writeIOPS) 58 | log.Printf("[%08X] disk threshold of guest '%s' changed to read %d, write %d per second", id, guestID, 59 | readIOPS, writeIOPS) 60 | resp.SetSuccess(true) 61 | } 62 | return executor.Sender.SendMessage(resp, request.GetSender()) 63 | } 64 | -------------------------------------------------------------------------------- /src/task/set_network_threshold.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "log" 5 | "github.com/project-nano/framework" 6 | "github.com/project-nano/cell/service" 7 | "fmt" 8 | ) 9 | 10 | type ModifyNetworkThresholdExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | } 14 | 15 | func (executor *ModifyNetworkThresholdExecutor)Execute(id framework.SessionID, request framework.Message, 16 | incoming chan framework.Message, terminate chan bool) error { 17 | guestID, err := request.GetString(framework.ParamKeyGuest) 18 | if err != nil { 19 | return err 20 | } 21 | limitParameters, err := request.GetUIntArray(framework.ParamKeyLimit) 22 | if err != nil { 23 | return err 24 | } 25 | const ( 26 | ReceiveOffset = iota 27 | SendOffset 28 | ValidLimitParametersCount = 2 29 | ) 30 | 31 | if ValidLimitParametersCount != len(limitParameters){ 32 | var err = fmt.Errorf("invalid QoS parameters count %d", len(limitParameters)) 33 | return err 34 | } 35 | var receiveSpeed = limitParameters[ReceiveOffset] 36 | var sendSpeed = limitParameters[SendOffset] 37 | 38 | log.Printf("[%08X] request modifying network threshold of guest '%s' from %s.[%08X]", id, guestID, 39 | request.GetSender(), request.GetFromSession()) 40 | 41 | resp, _ := framework.CreateJsonMessage(framework.ModifyNetworkThresholdResponse) 42 | resp.SetToSession(request.GetFromSession()) 43 | resp.SetFromSession(id) 44 | resp.SetSuccess(false) 45 | var respChan = make(chan error, 1) 46 | executor.InstanceModule.ModifyNetworkThreshold(guestID, receiveSpeed, sendSpeed, respChan) 47 | err = <- respChan 48 | if err != nil{ 49 | log.Printf("[%08X] modify network threshold fail: %s", id, err.Error()) 50 | resp.SetError(err.Error()) 51 | }else{ 52 | log.Printf("[%08X] network threshold of guest '%s' changed to receive %d Kps, send %d Kps", id, guestID, 53 | receiveSpeed >> 10, sendSpeed >> 10) 54 | resp.SetSuccess(true) 55 | } 56 | return executor.Sender.SendMessage(resp, request.GetSender()) 57 | } 58 | -------------------------------------------------------------------------------- /src/task/shrink_volume.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/framework" 8 | "log" 9 | "time" 10 | ) 11 | 12 | type ShrinkGuestVolumeExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | StorageModule service.StorageModule 16 | } 17 | 18 | func (executor *ShrinkGuestVolumeExecutor) Execute(id framework.SessionID, request framework.Message, 19 | incoming chan framework.Message, terminate chan bool) (err error) { 20 | var guestID string 21 | var index uint 22 | if guestID, err = request.GetString(framework.ParamKeyGuest); err != nil { 23 | return err 24 | } 25 | if index, err = request.GetUInt(framework.ParamKeyDisk); err != nil { 26 | return err 27 | } 28 | log.Printf("[%08X] recv shrink disk of guest '%s' from %s.[%08X]", 29 | id, guestID, request.GetSender(), request.GetFromSession()) 30 | 31 | resp, _ := framework.CreateJsonMessage(framework.ResizeDiskResponse) 32 | resp.SetSuccess(false) 33 | resp.SetFromSession(id) 34 | resp.SetToSession(request.GetFromSession()) 35 | var targetVolume string 36 | { 37 | var respChan = make(chan service.InstanceResult) 38 | executor.InstanceModule.GetInstanceStatus(guestID, respChan) 39 | var result = <-respChan 40 | if result.Error != nil { 41 | err = result.Error 42 | log.Printf("[%08X] get instance fail: %s", id, err.Error()) 43 | resp.SetError(err.Error()) 44 | return executor.Sender.SendMessage(resp, request.GetSender()) 45 | } 46 | 47 | err = func(instance service.InstanceStatus, index int) (err error) { 48 | if !instance.Created { 49 | err = fmt.Errorf("instance '%s' not created", guestID) 50 | return 51 | } 52 | if instance.Running { 53 | err = fmt.Errorf("instance '%s' not stopped", guestID) 54 | return 55 | } 56 | var volumeCount = len(instance.StorageVolumes) 57 | if 0 == volumeCount { 58 | err = errors.New("no volume available") 59 | return 60 | } 61 | if index >= volumeCount { 62 | err = fmt.Errorf("invalid disk index %d", index) 63 | return 64 | } 65 | return nil 66 | }(result.Instance, int(index)) 67 | if err != nil { 68 | log.Printf("[%08X] check instance fail: %s", id, err.Error()) 69 | resp.SetError(err.Error()) 70 | return executor.Sender.SendMessage(resp, request.GetSender()) 71 | } 72 | targetVolume = result.Instance.StorageVolumes[int(index)] 73 | } 74 | var resultChan = make(chan service.StorageResult, 1) 75 | { 76 | executor.StorageModule.ShrinkVolume(id, guestID, targetVolume, resultChan) 77 | var timer = time.NewTimer(service.GetConfigurator().GetOperateTimeout()) 78 | select { 79 | case <-timer.C: 80 | err = errors.New("request timeout") 81 | log.Printf("[%08X] shrink disk timeout", id) 82 | resp.SetError(err.Error()) 83 | return executor.Sender.SendMessage(resp, request.GetSender()) 84 | case result := <-resultChan: 85 | if result.Error != nil { 86 | err = result.Error 87 | log.Printf("[%08X] shrink disk fail: %s", id, err.Error()) 88 | resp.SetError(err.Error()) 89 | } else { 90 | log.Printf("[%08X] volume %s shrank successfully", id, targetVolume) 91 | resp.SetSuccess(true) 92 | } 93 | return executor.Sender.SendMessage(resp, request.GetSender()) 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/task/start_instance.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "fmt" 5 | "github.com/project-nano/cell/service" 6 | "github.com/project-nano/framework" 7 | "log" 8 | ) 9 | 10 | type StartInstanceExecutor struct { 11 | Sender framework.MessageSender 12 | InstanceModule service.InstanceModule 13 | StorageModule service.StorageModule 14 | } 15 | 16 | func (executor *StartInstanceExecutor) Execute(id framework.SessionID, request framework.Message, 17 | incoming chan framework.Message, terminate chan bool) (err error) { 18 | const ( 19 | InstanceMediaOptionNone uint = iota 20 | InstanceMediaOptionImage 21 | InstanceMediaOptionNetwork 22 | ) 23 | 24 | resp, _ := framework.CreateJsonMessage(framework.StartInstanceResponse) 25 | resp.SetFromSession(id) 26 | resp.SetToSession(request.GetFromSession()) 27 | var notified = false 28 | defer func() { 29 | if !notified && err != nil { 30 | resp.SetError(err.Error()) 31 | _ = executor.Sender.SendMessage(resp, request.GetSender()) 32 | } 33 | }() 34 | 35 | var instanceID string 36 | instanceID, err = request.GetString(framework.ParamKeyInstance) 37 | if err != nil { 38 | return 39 | } 40 | var mediaOption uint 41 | mediaOption, err = request.GetUInt(framework.ParamKeyOption) 42 | if err != nil { 43 | return 44 | } 45 | var respChan = make(chan error, 1) 46 | executor.StorageModule.ValidateVolumesForStart(instanceID, respChan) 47 | err = <-respChan 48 | if err != nil { 49 | log.Printf("[%08X] recv start instance '%s' from %s.[%08X] but validate volumes fail: %s", 50 | id, instanceID, request.GetSender(), request.GetFromSession(), err.Error()) 51 | return 52 | } 53 | 54 | var mediaSource string 55 | switch mediaOption { 56 | case InstanceMediaOptionNone: 57 | //no media attached 58 | log.Printf("[%08X] request start instance '%s' from %s.[%08X]", 59 | id, instanceID, request.GetSender(), request.GetFromSession()) 60 | executor.InstanceModule.StartInstance(instanceID, respChan) 61 | case InstanceMediaOptionImage: 62 | var host string 63 | var port uint 64 | if host, err = request.GetString(framework.ParamKeyHost); err != nil { 65 | return 66 | } 67 | if mediaSource, err = request.GetString(framework.ParamKeySource); err != nil { 68 | return 69 | } 70 | if port, err = request.GetUInt(framework.ParamKeyPort); err != nil { 71 | return 72 | } 73 | var media = service.InstanceMediaConfig{Mode: service.MediaModeHTTPS, ID: mediaSource, Host: host, Port: port} 74 | log.Printf("[%08X] request start instance '%s' with media '%s' (host %s:%d) from %s.[%08X]", 75 | id, instanceID, mediaSource, host, port, request.GetSender(), request.GetFromSession()) 76 | executor.InstanceModule.StartInstanceWithMedia(instanceID, media, respChan) 77 | default: 78 | return fmt.Errorf("unsupported media option %d", mediaOption) 79 | } 80 | 81 | err = <-respChan 82 | if err != nil { 83 | log.Printf("[%08X] start instance fail: %s", id, err.Error()) 84 | return 85 | } 86 | resp.SetSuccess(true) 87 | log.Printf("[%08X] start instance success", id) 88 | notified = true 89 | if err = executor.Sender.SendMessage(resp, request.GetSender()); err != nil { 90 | log.Printf("[%08X] warning: send response fail: %s", id, err.Error()) 91 | return err 92 | } 93 | //notify 94 | event, _ := framework.CreateJsonMessage(framework.GuestStartedEvent) 95 | event.SetFromSession(id) 96 | event.SetString(framework.ParamKeyInstance, instanceID) 97 | if err = executor.Sender.SendMessage(event, request.GetSender()); err != nil { 98 | log.Printf("[%08X] warning: notify instance started fail: %s", id, err.Error()) 99 | return err 100 | } 101 | if InstanceMediaOptionImage == mediaOption { 102 | //notify media attached 103 | attached, _ := framework.CreateJsonMessage(framework.MediaAttachedEvent) 104 | attached.SetFromSession(id) 105 | attached.SetString(framework.ParamKeyInstance, instanceID) 106 | attached.SetString(framework.ParamKeyMedia, mediaSource) 107 | if err = executor.Sender.SendMessage(attached, request.GetSender()); err != nil { 108 | log.Printf("[%08X] warning: notify media attached fail: %s", id, err.Error()) 109 | return err 110 | } 111 | } 112 | return nil 113 | } 114 | -------------------------------------------------------------------------------- /src/task/stop_instance.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "github.com/project-nano/framework" 5 | "github.com/project-nano/cell/service" 6 | "fmt" 7 | "log" 8 | "time" 9 | "errors" 10 | ) 11 | 12 | type StopInstanceExecutor struct { 13 | Sender framework.MessageSender 14 | InstanceModule service.InstanceModule 15 | } 16 | 17 | func (executor *StopInstanceExecutor) Execute(id framework.SessionID, request framework.Message, 18 | incoming chan framework.Message, terminate chan bool) (err error) { 19 | var instanceID string 20 | instanceID, err = request.GetString(framework.ParamKeyInstance) 21 | if err != nil{ 22 | return err 23 | } 24 | options, err := request.GetUIntArray(framework.ParamKeyOption) 25 | if err != nil{ 26 | return err 27 | } 28 | const ( 29 | ValidOptionCount = 2 30 | ) 31 | if len(options) != ValidOptionCount{ 32 | return fmt.Errorf("unexpected option count %d / %d", len(options), ValidOptionCount) 33 | } 34 | var reboot = 1 == options[0] 35 | var force = 1 == options[1] 36 | if reboot{ 37 | if force{ 38 | log.Printf("[%08X] request force reboot instance '%s' from %s.[%08X]", 39 | id, instanceID, request.GetSender(), request.GetFromSession()) 40 | }else{ 41 | log.Printf("[%08X] request reboot instance '%s' from %s.[%08X]", 42 | id, instanceID, request.GetSender(), request.GetFromSession()) 43 | } 44 | }else if force{ 45 | log.Printf("[%08X] request force stop instance '%s' from %s.[%08X]", 46 | id, instanceID, request.GetSender(), request.GetFromSession()) 47 | }else{ 48 | log.Printf("[%08X] request stop instance '%s' from %s.[%08X]", 49 | id, instanceID, request.GetSender(), request.GetFromSession()) 50 | } 51 | 52 | var respChan = make(chan error) 53 | executor.InstanceModule.StopInstance(instanceID, reboot, force, respChan) 54 | err = <- respChan 55 | 56 | resp, _ := framework.CreateJsonMessage(framework.StopInstanceResponse) 57 | resp.SetFromSession(id) 58 | resp.SetToSession(request.GetFromSession()) 59 | if err != nil{ 60 | resp.SetSuccess(false) 61 | resp.SetError(err.Error()) 62 | log.Printf("[%08X] stop instance fail: %s", id, err.Error()) 63 | return executor.Sender.SendMessage(resp, request.GetSender()) 64 | } 65 | resp.SetSuccess(true) 66 | log.Printf("[%08X] stop instance success", id) 67 | if err = executor.Sender.SendMessage(resp, request.GetSender());err != nil{ 68 | log.Printf("[%08X] warning: send response fail: %s", id, err.Error()) 69 | return err 70 | } 71 | if reboot{ 72 | return nil 73 | } 74 | 75 | { 76 | //wait for instance stopped 77 | const ( 78 | CheckInterval = 1*time.Second 79 | WaitTimeout = 1*time.Minute 80 | ) 81 | 82 | ticker := time.NewTicker(CheckInterval) 83 | timer := time.NewTimer(WaitTimeout) 84 | for{ 85 | select{ 86 | case <- ticker.C: 87 | var respChan = make(chan bool) 88 | executor.InstanceModule.IsInstanceRunning(instanceID, respChan) 89 | running := <- respChan 90 | if !running{ 91 | log.Printf("[%08X] instance '%s' stopped", id, instanceID) 92 | event, _ := framework.CreateJsonMessage(framework.GuestStoppedEvent) 93 | event.SetFromSession(id) 94 | event.SetString(framework.ParamKeyInstance, instanceID) 95 | if err = executor.Sender.SendMessage(event, request.GetSender()); err != nil{ 96 | log.Printf("[%08X] warning: notify instance stopped fail: %s", id, err.Error()) 97 | } 98 | return err 99 | } 100 | case <- timer.C: 101 | //timeout 102 | log.Printf("[%08X] warning: instance not stopped in expected duration", id) 103 | return errors.New("stop not finished") 104 | } 105 | } 106 | } 107 | } 108 | 109 | -------------------------------------------------------------------------------- /transaction_manager.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "github.com/project-nano/cell/service" 7 | "github.com/project-nano/cell/task" 8 | "github.com/project-nano/framework" 9 | "math/rand" 10 | "net/http" 11 | "time" 12 | ) 13 | 14 | type TransactionManager struct { 15 | *framework.TransactionEngine 16 | } 17 | 18 | func CreateTransactionManager(sender framework.MessageSender, instanceModule *service.InstanceManager, 19 | storageModule *service.StorageManager, networkModule *service.NetworkManager) (manager *TransactionManager, err error) { 20 | var engine *framework.TransactionEngine 21 | if engine, err = framework.CreateTransactionEngine(); err != nil { 22 | return nil, err 23 | } 24 | client := &http.Client{ 25 | Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}, 26 | } 27 | generator := rand.New(rand.NewSource(time.Now().UnixNano())) 28 | 29 | manager = &TransactionManager{engine} 30 | if err = manager.RegisterExecutor(framework.GetComputePoolCellRequest, 31 | &task.GetCellInfoExecutor{sender, instanceModule, storageModule, networkModule}); err != nil { 32 | return nil, err 33 | } 34 | 35 | if err = manager.RegisterExecutor(framework.CreateGuestRequest, 36 | &task.CreateInstanceExecutor{sender, instanceModule, storageModule, networkModule, generator}); err != nil { 37 | return nil, err 38 | } 39 | if err = manager.RegisterExecutor(framework.DeleteGuestRequest, 40 | &task.DeleteInstanceExecutor{sender, instanceModule, storageModule, networkModule}); err != nil { 41 | return nil, err 42 | } 43 | if err = manager.RegisterExecutor(framework.GetGuestRequest, 44 | &task.GetInstanceConfigExecutor{sender, instanceModule}); err != nil { 45 | return nil, err 46 | } 47 | if err = manager.RegisterExecutor(framework.GetInstanceStatusRequest, 48 | &task.GetInstanceStatusExecutor{sender, instanceModule}); err != nil { 49 | return nil, err 50 | } 51 | if err = manager.RegisterExecutor(framework.StartInstanceRequest, 52 | &task.StartInstanceExecutor{sender, instanceModule, storageModule}); err != nil { 53 | return nil, err 54 | } 55 | if err = manager.RegisterExecutor(framework.StopInstanceRequest, 56 | &task.StopInstanceExecutor{sender, instanceModule}); err != nil { 57 | return nil, err 58 | } 59 | if err = manager.RegisterExecutor(framework.AttachInstanceRequest, 60 | &task.AttachInstanceExecutor{sender, instanceModule, storageModule, networkModule}); err != nil { 61 | return nil, err 62 | } 63 | if err = manager.RegisterExecutor(framework.DetachInstanceRequest, 64 | &task.DetachInstanceExecutor{sender, instanceModule, storageModule, networkModule}); err != nil { 65 | return nil, err 66 | } 67 | if err = manager.RegisterExecutor(framework.ModifyGuestNameRequest, 68 | &task.ModifyGuestNameExecutor{sender, instanceModule}); err != nil { 69 | return nil, err 70 | } 71 | if err = manager.RegisterExecutor(framework.ModifyCoreRequest, 72 | &task.ModifyGuestCoreExecutor{sender, instanceModule}); err != nil { 73 | return nil, err 74 | } 75 | if err = manager.RegisterExecutor(framework.ModifyMemoryRequest, 76 | &task.ModifyGuestMemoryExecutor{sender, instanceModule}); err != nil { 77 | return nil, err 78 | } 79 | 80 | if err = manager.RegisterExecutor(framework.ModifyPriorityRequest, 81 | &task.ModifyCPUPriorityExecutor{sender, instanceModule}); err != nil { 82 | return nil, err 83 | } 84 | if err = manager.RegisterExecutor(framework.ModifyDiskThresholdRequest, 85 | &task.ModifyDiskThresholdExecutor{sender, instanceModule}); err != nil { 86 | return nil, err 87 | } 88 | if err = manager.RegisterExecutor(framework.ModifyNetworkThresholdRequest, 89 | &task.ModifyNetworkThresholdExecutor{sender, instanceModule}); err != nil { 90 | return nil, err 91 | } 92 | 93 | if err = manager.RegisterExecutor(framework.ModifyAuthRequest, 94 | &task.ModifyGuestPasswordExecutor{sender, instanceModule, generator}); err != nil { 95 | return nil, err 96 | } 97 | if err = manager.RegisterExecutor(framework.GetAuthRequest, 98 | &task.GetGuestPasswordExecutor{sender, instanceModule}); err != nil { 99 | return nil, err 100 | } 101 | if err = manager.RegisterExecutor(framework.ResetSystemRequest, 102 | &task.ResetGuestSystemExecutor{sender, instanceModule, storageModule}); err != nil { 103 | return nil, err 104 | } 105 | if err = manager.RegisterExecutor(framework.InsertMediaRequest, 106 | &task.InsertMediaCoreExecutor{sender, instanceModule}); err != nil { 107 | return nil, err 108 | } 109 | if err = manager.RegisterExecutor(framework.EjectMediaRequest, 110 | &task.EjectMediaCoreExecutor{sender, instanceModule}); err != nil { 111 | return nil, err 112 | } 113 | 114 | if err = manager.RegisterExecutor(framework.ComputePoolReadyEvent, 115 | &task.HandleComputePoolReadyExecutor{sender, instanceModule, storageModule, networkModule}); err != nil { 116 | return nil, err 117 | } 118 | if err = manager.RegisterExecutor(framework.ComputeCellRemovedEvent, 119 | &task.HandleComputeCellRemovedExecutor{sender, instanceModule, storageModule}); err != nil { 120 | return nil, err 121 | } 122 | if err = manager.RegisterExecutor(framework.CreateDiskImageRequest, 123 | &task.CreateDiskImageExecutor{sender, instanceModule, storageModule, client}); err != nil { 124 | return nil, err 125 | } 126 | if err = manager.RegisterExecutor(framework.ResizeDiskRequest, 127 | &task.ResizeGuestVolumeExecutor{sender, instanceModule, storageModule}); err != nil { 128 | return nil, err 129 | } 130 | if err = manager.RegisterExecutor(framework.ShrinkDiskRequest, 131 | &task.ShrinkGuestVolumeExecutor{sender, instanceModule, storageModule}); err != nil { 132 | return nil, err 133 | } 134 | if err = manager.RegisterExecutor(framework.QuerySnapshotRequest, 135 | &task.QuerySnapshotExecutor{sender, storageModule}); err != nil { 136 | return nil, err 137 | } 138 | if err = manager.RegisterExecutor(framework.GetSnapshotRequest, 139 | &task.GetSnapshotExecutor{sender, storageModule}); err != nil { 140 | return nil, err 141 | } 142 | if err = manager.RegisterExecutor(framework.AddressPoolChangedEvent, 143 | &task.HandleAddressPoolChangedExecutor{instanceModule, networkModule}); err != nil { 144 | return nil, err 145 | } 146 | if err = manager.RegisterExecutor(framework.CreateSnapshotRequest, 147 | &task.CreateSnapshotExecutor{sender, instanceModule, storageModule}); err != nil { 148 | return nil, err 149 | } 150 | if err = manager.RegisterExecutor(framework.DeleteSnapshotRequest, 151 | &task.DeleteSnapshotExecutor{sender, instanceModule, storageModule}); err != nil { 152 | return nil, err 153 | } 154 | if err = manager.RegisterExecutor(framework.RestoreSnapshotRequest, 155 | &task.RestoreSnapshotExecutor{sender, instanceModule, storageModule}); err != nil { 156 | return nil, err 157 | } 158 | if err = manager.RegisterExecutor(framework.ResetSecretRequest, 159 | &task.ResetMonitorSecretExecutor{ 160 | Sender: sender, 161 | InstanceModule: instanceModule, 162 | }); err != nil { 163 | err = fmt.Errorf("register reset monitor secret fail: %s", err.Error()) 164 | return 165 | } 166 | if err = manager.RegisterExecutor(framework.QueryCellStorageRequest, 167 | &task.QueryStoragePathExecutor{ 168 | Sender: sender, 169 | Storage: storageModule, 170 | }); err != nil { 171 | err = fmt.Errorf("register query storage paths fail: %s", err.Error()) 172 | return 173 | } 174 | if err = manager.RegisterExecutor(framework.ModifyCellStorageRequest, 175 | &task.ChangeStoragePathExecutor{ 176 | Sender: sender, 177 | Storage: storageModule, 178 | }); err != nil { 179 | err = fmt.Errorf("register change storage path fail: %s", err.Error()) 180 | return 181 | } 182 | //security policy 183 | if err = manager.RegisterExecutor(framework.GetGuestRuleRequest, 184 | &task.GetSecurityPolicyExecutor{ 185 | Sender: sender, 186 | InstanceModule: instanceModule, 187 | }); err != nil { 188 | err = fmt.Errorf("register get security policy fail: %s", err.Error()) 189 | return 190 | } 191 | if err = manager.RegisterExecutor(framework.AddGuestRuleRequest, 192 | &task.AddSecurityRuleExecutor{ 193 | Sender: sender, 194 | InstanceModule: instanceModule, 195 | }); err != nil { 196 | err = fmt.Errorf("register add security rule fail: %s", err.Error()) 197 | return 198 | } 199 | if err = manager.RegisterExecutor(framework.ModifyGuestRuleRequest, 200 | &task.ModifySecurityRuleExecutor{ 201 | Sender: sender, 202 | InstanceModule: instanceModule, 203 | }); err != nil { 204 | err = fmt.Errorf("register modify security rule fail: %s", err.Error()) 205 | return 206 | } 207 | if err = manager.RegisterExecutor(framework.ChangeGuestRuleDefaultActionRequest, 208 | &task.ChangeDefaultSecurityActionExecutor{ 209 | Sender: sender, 210 | InstanceModule: instanceModule, 211 | }); err != nil { 212 | err = fmt.Errorf("register change default security action fail: %s", err.Error()) 213 | return 214 | } 215 | if err = manager.RegisterExecutor(framework.ChangeGuestRuleOrderRequest, 216 | &task.ChangeSecurityRuleOrderExecutor{ 217 | Sender: sender, 218 | InstanceModule: instanceModule, 219 | }); err != nil { 220 | err = fmt.Errorf("register change security rule order fail: %s", err.Error()) 221 | return 222 | } 223 | if err = manager.RegisterExecutor(framework.RemoveGuestRuleRequest, 224 | &task.RemoveSecurityRuleExecutor{ 225 | Sender: sender, 226 | InstanceModule: instanceModule, 227 | }); err != nil { 228 | err = fmt.Errorf("register remove security rule fail: %s", err.Error()) 229 | return 230 | } 231 | if err = manager.RegisterExecutor(framework.ModifyAutoStartRequest, 232 | &task.ModifyAutoStartExecutor{ 233 | Sender: sender, 234 | InstanceModule: instanceModule, 235 | }); err != nil { 236 | err = fmt.Errorf("register modify auto start fail: %s", err.Error()) 237 | return 238 | } 239 | return manager, nil 240 | } 241 | --------------------------------------------------------------------------------