├── .gitignore ├── CHANGES.rst ├── Cargo.lock ├── Cargo.toml ├── LICENSE.txt ├── NOTICE ├── README.rst ├── build.rs ├── client ├── client.rs ├── config.rs └── main.rs ├── fectl ├── __init__.py ├── apps │ ├── __init__.py │ ├── aiohttp.py │ └── aiohttp_config.py ├── config.py ├── errors.py ├── path.py ├── run.py ├── utils.py └── workers │ ├── __init__.py │ ├── asyncio.py │ ├── base.py │ ├── gevent.py │ └── socket.py ├── fectld.toml ├── setup.py ├── src ├── addrinfo.rs ├── client.rs ├── cmd.rs ├── config.rs ├── config_helpers.rs ├── event.rs ├── exec.rs ├── io.rs ├── logging.rs ├── main.rs ├── master.rs ├── master_types.rs ├── process.rs ├── service.rs ├── socket.rs ├── utils.rs └── worker.rs └── tests └── asyncio_tests.py /.gitignore: -------------------------------------------------------------------------------- 1 | /doc 2 | /gh-pages 3 | __pycache__ 4 | 5 | *.so 6 | *.out 7 | *.pyc 8 | *.pid 9 | *.sock 10 | *~ 11 | fectl/fectl 12 | fectl/fectld 13 | fectl/fectl.* 14 | fectl/_gen_* 15 | build/ 16 | dist/ 17 | target/ 18 | *.egg-info/ 19 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | CHANGES 2 | ^^^^^^^ 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fectl" 3 | version = "0.2.8" 4 | authors = ["Nikolay Kim "] 5 | description = "Process management utility" 6 | readme = "README.md" 7 | keywords = [] 8 | homepage = "https://github.com/fafhrd91/fectl" 9 | repository = "https://github.com/fafhrd91/fectl.git" 10 | documentation = "https://fafhrd91.github.io/fafhrd91/fectl/" 11 | categories = ["development-tools::ffi"] 12 | license = "Apache-2.0" 13 | exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"] 14 | build = "build.rs" 15 | 16 | [[bin]] 17 | name = "fectld" 18 | path = "src/main.rs" 19 | 20 | [[bin]] 21 | name = "fectl" 22 | path = "client/main.rs" 23 | 24 | [dependencies] 25 | actix = "0.5" 26 | 27 | libc = "0.2" 28 | nix = "0.9" 29 | net2 = "0.2" 30 | byteorder = "1.1" 31 | chrono = "0.4" 32 | boxfnonce = "*" 33 | 34 | # tokio 35 | bytes = "0.4" 36 | mio = "0.6" 37 | futures = "0.1" 38 | tokio-core = "=0.1.12" 39 | tokio-io = "=0.1.5" 40 | tokio-signal = "=0.1.5" 41 | tokio-uds = "=0.1.7" 42 | 43 | # logging 44 | time = "*" 45 | log = "0.4" 46 | env_logger = "0.5" 47 | 48 | # cli 49 | structopt = "0.2" 50 | structopt-derive = "0.2" 51 | 52 | # config 53 | toml = "*" 54 | serde = "1.0" 55 | serde_json = "1.0" 56 | serde_derive = "1.0" 57 | 58 | [profile.release] 59 | lto = true 60 | opt-level = 3 61 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017-NOW Nikolay Kim 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | fectl 2 | 3 | 2017-NO (c) Nikolay Kim 4 | 5 | fectl is released under the Apache-2.0 license. See the LICENSE.txt 6 | file for the complete license. 7 | 8 | 9 | fectl.config 10 | ------------ 11 | 2009-2017 (c) Benoît Chesneau 12 | 2009-2015 (c) Paul J. Davis 13 | 14 | Permission is hereby granted, free of charge, to any person 15 | obtaining a copy of this software and associated documentation 16 | files (the "Software"), to deal in the Software without 17 | restriction, including without limitation the rights to use, 18 | copy, modify, merge, publish, distribute, sublicense, and/or sell 19 | copies of the Software, and to permit persons to whom the 20 | Software is furnished to do so, subject to the following 21 | conditions: 22 | 23 | The above copyright notice and this permission notice shall be 24 | included in all copies or substantial portions of the Software. 25 | 26 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 28 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 30 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 31 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 32 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 | OTHER DEALINGS IN THE SOFTWARE. 34 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | A Process Control System 2 | ======================== 3 | 4 | fectl is a client/server system that allows its users to monitor and control a number of processes on UNIX-like operating systems. 5 | 6 | It is similar to supervisord. Unlike supervisord controlled process has to support fectl and communicate with master with specific protocol. 7 | This gives ability to use custom loading capabilities, worker heartbeats, custom workers communications, etc. Downside of this decision, it is not 8 | possible to run arbitrary process, each application has to support communication protocol. 9 | 10 | 11 | Configuration 12 | ------------- 13 | 14 | By default `fectld` uses `fectld.toml` file from current directory. It is possible to override 15 | this by specifing `-c` option. Configuraiton file uses `toml `_ format. 16 | 17 | 18 | ``[master]`` Section Settings 19 | ----------------------------- 20 | 21 | The :file:`fectld.toml` file contains a section named 22 | ``[master]`` under which configuration parameters for an master process should be inserted. 23 | If the configuration file has no ``[master]`` section, default values will be used. The 24 | allowable configuration values are as follows. 25 | 26 | 27 | ``[master]`` Section Values 28 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 | 30 | ``sock`` 31 | 32 | A path to a UNIX domain socket (e.g. :file:`/tmp/fectld.sock`) 33 | on which fectl will listen for client requests. 34 | :program:`fectld` uses custom json protocol to communicate with master process 35 | over this socket. 36 | 37 | *Default*: fectld.sock 38 | 39 | *Required*: No. 40 | 41 | ``directory`` 42 | 43 | When :program:`fectld` daemonizes, switch to this directory. 44 | 45 | *Default*: do not cd 46 | 47 | *Required*: No. 48 | 49 | 50 | ``pid`` 51 | 52 | A path to a file where pid of the master process should be 53 | stored (e.g. :file:`/var/run/fectld.pid`) 54 | 55 | *Default*: Do not store pid 56 | 57 | *Required*: No. 58 | 59 | 60 | ``gid`` 61 | 62 | Instruct :program:`fectld` to switch groups to this UNIX group 63 | account before doing any meaningful processing. Value of this 64 | field could be actual groupd id or group name. 65 | 66 | *Default*: do not switch groups 67 | 68 | *Required*: No. 69 | 70 | ``uid`` 71 | 72 | Instruct :program:`fectld` to switch users to this UNIX user 73 | account before doing any meaningful processing. Value of this 74 | field could be actual user id or user name. 75 | 76 | *Default*: do not switch users 77 | 78 | *Required*: No. 79 | 80 | ``stdout`` 81 | 82 | A path to a file where `fectld` should redirect stdout. 83 | 84 | *Default*: do not redirect stdout 85 | 86 | *Required*: No. 87 | 88 | 89 | ``stderr`` 90 | 91 | A path to a file where `fectld` should redirect stderr. 92 | 93 | *Default*: do not redirect stderr 94 | 95 | *Required*: No. 96 | 97 | 98 | ``[[socket]]`` Section Settings 99 | ------------------------------- 100 | 101 | :program:`fectld` can manage inet sockets for worker processes. i.e. it can open listening socket 102 | and pass file descriptors into work via environment variable. The 103 | allowable configuration values are as follows. 104 | 105 | ``name`` 106 | 107 | A name of the socket. File descriptor is available in worker process as `FECTL_FD_%(name)` 108 | environment variable. 109 | 110 | *Required*: Yes. 111 | 112 | ``port`` 113 | 114 | A port number. 115 | 116 | *Required*: Yes. 117 | 118 | ``host`` 119 | 120 | A host name. 121 | 122 | *Required*: No. 123 | 124 | 125 | ``backlog`` 126 | 127 | The maximum number of pending connections. 128 | 129 | This refers to the number of clients that can be waiting to be served. 130 | Exceeding this number results in the client getting an error when 131 | attempting to connect. It should only affect servers under significant 132 | load. 133 | 134 | Must be a positive integer. Generally set in the 64-2048 range. 135 | 136 | *Default*: 256 137 | 138 | *Required*: No. 139 | 140 | 141 | ``proto`` 142 | 143 | Socket protocol to use. Three options are available *tcp4* - ipv4, 144 | *tcp6* - ipv6, *unix" - unix domain socket path. 145 | 146 | *Default*: tcp4 147 | 148 | *Required*: No. 149 | 150 | 151 | ``service`` 152 | 153 | List of services that can access this socket. 154 | 155 | *Default*: all services can access socket. 156 | 157 | *Required*: No. 158 | 159 | 160 | ``app`` 161 | 162 | Worker specific setting. Value of the ``app`` field is available as ``FECTL_APP_%(name)`` 163 | environment variable. 164 | 165 | *Required*: No. 166 | 167 | ``arguments`` 168 | 169 | List of worker specific settings. Value of the ``arguments`` field is available as ``FECTL_ARGS_%(name)`` 170 | environment variable. 171 | 172 | *Required*: No. 173 | 174 | .. note:: 175 | 176 | ``app`` and ``arguments`` are used by specific worker. i.e. Python's `asyncio` worker can load `aiohttp` application 177 | with specific set of arguments. 178 | 179 | 180 | ``[[service]]`` Section Settings 181 | -------------------------------- 182 | 183 | Each managed application can be configured with ``[[service]]`` section. It is possible to 184 | specify number of workers, various timeouts, and command line. The 185 | allowable configuration values are as follows. 186 | 187 | 188 | ``name`` 189 | 190 | A name of the service. This name is used as service identifier, all cammands that can be send 191 | to service require this name. 192 | 193 | *Required*: Yes. 194 | 195 | 196 | ``num`` 197 | 198 | A number of workers to start. Must be a positive integer. 199 | 200 | *Required*: Yes. 201 | 202 | ``command`` 203 | 204 | An application start command. ``fectld`` passes configuration (like socket fds, app config, etc) 205 | in environment variables. Application has to support ``fectl`` communication protocol. ``fectl`` 206 | provides several workers implementation for python, like asyncio and gevent workers. 207 | 208 | *Required*: Yes. 209 | 210 | ``directory`` 211 | 212 | Before :program:`fectl` executes command, switch to this directory. 213 | 214 | *Default*: do not cd 215 | 216 | *Required*: No. 217 | 218 | ``restarts`` 219 | 220 | Number of restarts before marking worker as failed. 221 | 222 | *Default*: 3 223 | 224 | *Required*: No. 225 | 226 | ``gid`` 227 | 228 | Switch worker process to run as this group. 229 | 230 | A valid group id (as an integer) or the name of a user that can be 231 | retrieved with a call to ``libc::getgrnam(value)`` or ``None`` to not 232 | change the worker processes group. If :program:`fectl` can not change group, 233 | worker failes to start. 234 | 235 | *Required*: No. 236 | 237 | ``uid`` 238 | 239 | Switch worker processes to run as this user. 240 | A valid user id (as an integer) or the name of a user that can be 241 | retrieved with a call to ``libc::getpwnam(value)`` or ``None`` to not 242 | change the worker process user. If :program:`fectld` can not change group, 243 | worker failes to start. 244 | 245 | *Required*: No. 246 | 247 | ``timeout`` 248 | 249 | Worker has to send `heartbeat` messages to master process. Workers silent for more than this many 250 | seconds are killed and restarted. 251 | 252 | *Default*: 10 253 | 254 | *Required*: No. 255 | 256 | ``startup_timeout`` 257 | 258 | Timeout for worker startup. After start, workers have this much time to report 259 | readyness state. Workers that do not report `loaded` state to master are force killed and 260 | get restarted. After three attempts service marked as failed. 261 | 262 | *Default*: 30 263 | 264 | *Required*: No. 265 | 266 | ``shutdown_timeout`` 267 | 268 | Timeout for graceful workers shutdown. After receiving a restart or stop signal, 269 | workers have this much time to finish serving requests or any other activity. Workers still alive after 270 | the timeout (starting from the receipt of the restart signal) are force killed. 271 | 272 | *Default*: 30 273 | 274 | *Required*: No. 275 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::io::Write; 3 | use std::path::PathBuf; 4 | 5 | fn main() { 6 | let out = env::var("OUT_DIR").expect("should not fail"); 7 | let dst = PathBuf::from(out).join("version.rs"); 8 | let mut f = std::fs::OpenOptions::new() 9 | .write(true).truncate(true).create(true).open(dst).expect(""); 10 | 11 | f.write_all(format!( 12 | "pub struct PkgInfo {{ 13 | pub name: &'static str, 14 | pub description: &'static str, 15 | pub authors: &'static str, 16 | pub version: &'static str, 17 | pub version_major: u8, 18 | pub version_minor: u8, 19 | pub version_patch: u8, 20 | pub version_pre: &'static str, 21 | }} 22 | 23 | pub const PKG_INFO: PkgInfo = PkgInfo {{ 24 | name: \"{}\", 25 | description: \"{}\", 26 | authors: \"{}\", 27 | version: \"{}\", 28 | version_major: {}, 29 | version_minor: {}, 30 | version_patch: {}, 31 | version_pre: \"{}\", 32 | }};", 33 | env::var("CARGO_PKG_NAME").unwrap_or("".to_owned()), 34 | env::var("CARGO_PKG_DESCRIPTION").unwrap_or("".to_owned()), 35 | env::var("CARGO_PKG_AUTHORS").unwrap_or("".to_owned()), 36 | env::var("CARGO_PKG_VERSION").unwrap_or("0.0.0".to_owned()), 37 | env::var("CARGO_PKG_VERSION_MAJOR").unwrap_or("0".to_owned()), 38 | env::var("CARGO_PKG_VERSION_MINOR").unwrap_or("0".to_owned()), 39 | env::var("CARGO_PKG_VERSION_PATCH").unwrap_or("0".to_owned()), 40 | env::var("CARGO_PKG_VERSION_PRE").unwrap_or("".to_owned()), 41 | ).as_bytes()).unwrap(); 42 | } 43 | -------------------------------------------------------------------------------- /client/client.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::io::{self, Read, Write}; 3 | use std::time::Duration; 4 | use std::os::unix::net::UnixStream; 5 | 6 | use chrono::prelude::*; 7 | use serde_json as json; 8 | use byteorder::{BigEndian, ByteOrder}; 9 | use bytes::{BufMut, BytesMut}; 10 | use tokio_io::codec::{Encoder, Decoder}; 11 | 12 | use version::PKG_INFO; 13 | use event::Reason; 14 | use master_types::{MasterRequest, MasterResponse}; 15 | 16 | /// Console commands 17 | #[derive(Clone, Debug)] 18 | pub enum ClientCommand { 19 | Start(String), 20 | Pause(String), 21 | Resume(String), 22 | Reload(String), 23 | Restart(String), 24 | Stop(String), 25 | Status(String), 26 | SPid(String), 27 | Pid, 28 | Quit, 29 | Version, 30 | VersionCheck, 31 | } 32 | 33 | /// Send command to master 34 | pub fn send_command(stream: &mut UnixStream, req: MasterRequest) -> Result<(), io::Error> { 35 | let mut buf = BytesMut::new(); 36 | ClientTransportCodec.encode(req, &mut buf)?; 37 | 38 | stream.write_all(buf.as_ref()) 39 | } 40 | 41 | /// read master response 42 | pub fn read_response(stream: &mut UnixStream, buf: &mut BytesMut) 43 | -> Result 44 | { 45 | loop { 46 | buf.reserve(1024); 47 | 48 | unsafe { 49 | match stream.read(buf.bytes_mut()) { 50 | Ok(n) => { 51 | buf.advance_mut(n); 52 | 53 | if let Some(resp) = ClientTransportCodec.decode(buf)? { 54 | return Ok(resp) 55 | } else { 56 | if n == 0 { 57 | return Err(io::Error::new(io::ErrorKind::Other, "closed")) 58 | } 59 | } 60 | }, 61 | Err(e) => return Err(e), 62 | } 63 | } 64 | } 65 | } 66 | 67 | fn try_read_response(stream: &mut UnixStream, buf: &mut BytesMut) 68 | -> Result 69 | { 70 | let mut retry = 5; 71 | loop { 72 | match read_response(stream, buf) { 73 | Ok(resp) => { 74 | debug!("Master response: {:?}", resp); 75 | return Ok(resp); 76 | } 77 | Err(err) => match err.kind() { 78 | io::ErrorKind::TimedOut => 79 | if retry > 0 { 80 | retry -= 1; 81 | continue 82 | } 83 | io::ErrorKind::WouldBlock => { 84 | thread::sleep(Duration::from_millis(100)); 85 | continue 86 | } 87 | _ => return Err(err) 88 | } 89 | } 90 | } 91 | } 92 | 93 | /// Run client command 94 | pub fn run(cmd: ClientCommand, sock: &str) -> bool { 95 | // create commands listener and also check if service process is running 96 | let mut buf = BytesMut::new(); 97 | let mut stream = match UnixStream::connect(&sock) { 98 | Ok(mut conn) => { 99 | conn.set_read_timeout(Some(Duration::new(1, 0))).expect("Couldn't set read timeout"); 100 | let _ = send_command(&mut conn, MasterRequest::Ping); 101 | 102 | if try_read_response(&mut conn, &mut buf).is_ok() { 103 | conn 104 | } else { 105 | error!("Master process is not responding."); 106 | return false 107 | } 108 | } 109 | Err(err) => { 110 | match err.kind() { 111 | io::ErrorKind::PermissionDenied => { 112 | error!("Can not connect to master. Permission denied. {}", sock); 113 | }, 114 | _ => { 115 | error!("Can not connect to master {}: {}", sock, err); 116 | } 117 | } 118 | return false 119 | } 120 | }; 121 | 122 | // Send command 123 | let res = match cmd.clone() { 124 | ClientCommand::Status(name) => 125 | send_command(&mut stream, MasterRequest::Status(name)), 126 | ClientCommand::SPid(name) => 127 | send_command(&mut stream, MasterRequest::SPid(name)), 128 | ClientCommand::Pause(name) => { 129 | println!("Pause `{}` service.", name); 130 | send_command(&mut stream, MasterRequest::Pause(name)) 131 | } 132 | ClientCommand::Resume(name) => { 133 | println!("Resume `{}` service.", name); 134 | send_command(&mut stream, MasterRequest::Resume(name)) 135 | } 136 | ClientCommand::Start(name) => { 137 | print!("Starting `{}` service.", name); 138 | send_command(&mut stream, MasterRequest::Start(name)) 139 | } 140 | ClientCommand::Reload(name) => { 141 | print!("Reloading `{}` service.", name); 142 | send_command(&mut stream, MasterRequest::Reload(name)) 143 | } 144 | ClientCommand::Restart(name) => { 145 | print!("Restarting `{}` service", name); 146 | send_command(&mut stream, MasterRequest::Restart(name)) 147 | } 148 | ClientCommand::Stop(name) => { 149 | print!("Stopping `{}` service.", name); 150 | send_command(&mut stream, MasterRequest::Stop(name)) 151 | } 152 | ClientCommand::Pid => { 153 | send_command(&mut stream, MasterRequest::Pid) 154 | } 155 | ClientCommand::Version | ClientCommand::VersionCheck => { 156 | send_command(&mut stream, MasterRequest::Version) 157 | } 158 | ClientCommand::Quit => { 159 | print!("Quiting."); 160 | send_command(&mut stream, MasterRequest::Quit) 161 | } 162 | }; 163 | let _ = io::stdout().flush(); 164 | 165 | if let Err(err) = res { 166 | error!("Can not send command {:?} error: {}", cmd, err); 167 | return false 168 | } 169 | 170 | // read response 171 | loop { 172 | match try_read_response(&mut stream, &mut buf) { 173 | Ok(MasterResponse::Pong) => { 174 | print!("."); 175 | let _ = io::stdout().flush(); 176 | } 177 | Ok(MasterResponse::Done) => { 178 | println!(); 179 | return true 180 | } 181 | Ok(MasterResponse::Pid(pid)) => { 182 | println!("{}", pid); 183 | return true 184 | } 185 | Ok(MasterResponse::Version(ver)) => { 186 | match cmd { 187 | ClientCommand::VersionCheck => 188 | return ver.ends_with(PKG_INFO.version), 189 | _ => { 190 | println!("{}", ver); 191 | return true 192 | } 193 | } 194 | } 195 | Ok(MasterResponse::ServiceStarted) | Ok(MasterResponse::ServiceStopped) => { 196 | println!("done"); 197 | return true 198 | } 199 | Ok(MasterResponse::ServiceStatus(status)) => { 200 | println!("Service status: {}", status.0); 201 | for worker in status.1 { 202 | for ev in worker.1 { 203 | let dt = Local.timestamp(ev.timestamp as i64, 0); 204 | print!("{} {}: ", worker.0, dt.format("%Y-%m-%d %H:%M:%S")); 205 | if let Some(ref pid) = ev.pid { 206 | print!("(pid:{}) ", pid) 207 | } 208 | print!("{:?}", ev.state); 209 | match ev.reason { 210 | Reason::None | Reason::Initial => (), 211 | _ => print!(", reason: {:?}", ev.reason), 212 | } 213 | println!(); 214 | } 215 | } 216 | return true 217 | } 218 | Ok(MasterResponse::ServiceWorkerPids(pids)) => { 219 | for pid in pids { 220 | println!("{}", pid); 221 | } 222 | return true 223 | } 224 | Ok(MasterResponse::ServiceFailed) => { 225 | println!("failed."); 226 | return false 227 | }, 228 | Ok(MasterResponse::ErrorNotReady) => { 229 | error!("Service is loading"); 230 | return false 231 | } 232 | Ok(MasterResponse::ErrorUnknownService) => { 233 | error!("Service is unknown"); 234 | return false 235 | } 236 | Ok(MasterResponse::ErrorServiceStarting) => { 237 | error!("Service is starting"); 238 | return false 239 | } 240 | Ok(MasterResponse::ErrorServiceReloading) => { 241 | error!("Service is restarting"); 242 | return false 243 | } 244 | Ok(MasterResponse::ErrorServiceStopping) => { 245 | error!("Service is stopping"); 246 | return false 247 | } 248 | Ok(resp) => println!("MSG: {:?}", resp), 249 | Err(err) => { 250 | println!("Error: {:?}", err); 251 | error!("Master process is not responding."); 252 | return false 253 | } 254 | } 255 | } 256 | } 257 | 258 | pub struct ClientTransportCodec; 259 | 260 | impl Encoder for ClientTransportCodec 261 | { 262 | type Item = MasterRequest; 263 | type Error = io::Error; 264 | 265 | fn encode(&mut self, msg: MasterRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { 266 | let msg = json::to_string(&msg).unwrap(); 267 | let msg_ref: &[u8] = msg.as_ref(); 268 | 269 | dst.reserve(msg_ref.len() + 2); 270 | dst.put_u16::(msg_ref.len() as u16); 271 | dst.put(msg_ref); 272 | 273 | Ok(()) 274 | } 275 | } 276 | 277 | impl Decoder for ClientTransportCodec 278 | { 279 | type Item = MasterResponse; 280 | type Error = io::Error; 281 | 282 | fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { 283 | let size = { 284 | if src.len() < 2 { 285 | return Ok(None) 286 | } 287 | BigEndian::read_u16(src.as_ref()) as usize 288 | }; 289 | 290 | if src.len() >= size + 2 { 291 | src.split_to(2); 292 | let buf = src.split_to(size); 293 | Ok(Some(json::from_slice::(&buf)?)) 294 | } else { 295 | Ok(None) 296 | } 297 | } 298 | } 299 | -------------------------------------------------------------------------------- /client/config.rs: -------------------------------------------------------------------------------- 1 | use structopt::StructOpt; 2 | use client::ClientCommand; 3 | 4 | 5 | #[derive(StructOpt, Debug)] 6 | struct Cli { 7 | /// Master process unix socket file path 8 | #[structopt(long="sock", short="m", default_value="fectld.sock")] 9 | sock: String, 10 | 11 | /// Run command (Supported commands: status, start, reload, restart, stop) 12 | command: String, 13 | 14 | /// Service name 15 | name: Option, 16 | } 17 | 18 | 19 | pub fn load_config() -> Option<(ClientCommand, String)> { 20 | // cmd arguments 21 | let args = Cli::from_args(); 22 | let cmd = args.command.to_lowercase().trim().to_owned(); 23 | let sock = args.sock.clone(); 24 | 25 | // check client args 26 | match cmd.as_str() { 27 | "pid" => 28 | return Some((ClientCommand::Pid, sock)), 29 | "quit" => 30 | return Some((ClientCommand::Quit, sock)), 31 | "version" => 32 | return Some((ClientCommand::Version, sock)), 33 | "version-check" => 34 | return Some((ClientCommand::VersionCheck, sock)), 35 | _ => () 36 | } 37 | 38 | let name = match args.name { 39 | None => { 40 | println!("Service name is required"); 41 | return None 42 | } 43 | Some(ref name) => 44 | name.clone() 45 | }; 46 | 47 | let cmd = match cmd.as_str() { 48 | "status" => ClientCommand::Status(name), 49 | "spid" => ClientCommand::SPid(name), 50 | "start" => ClientCommand::Start(name), 51 | "stop" => ClientCommand::Stop(name), 52 | "reload" => ClientCommand::Reload(name), 53 | "restart" => ClientCommand::Restart(name), 54 | "pause" => ClientCommand::Pause(name), 55 | "resume" => ClientCommand::Resume(name), 56 | _ => { 57 | println!("Unknown command: {}", cmd); 58 | return None 59 | } 60 | }; 61 | return Some((cmd, sock)) 62 | } 63 | -------------------------------------------------------------------------------- /client/main.rs: -------------------------------------------------------------------------------- 1 | extern crate env_logger; 2 | #[macro_use] extern crate log; 3 | 4 | extern crate serde_json; 5 | #[macro_use] extern crate serde_derive; 6 | 7 | extern crate structopt; 8 | #[macro_use] extern crate structopt_derive; 9 | 10 | extern crate chrono; 11 | extern crate byteorder; 12 | extern crate bytes; 13 | extern crate tokio_io; 14 | 15 | mod client; 16 | mod config; 17 | mod version { 18 | include!(concat!(env!("OUT_DIR"), "/version.rs")); 19 | } 20 | mod event { 21 | include!("../src/event.rs"); 22 | } 23 | mod master_types { 24 | include!("../src/master_types.rs"); 25 | } 26 | 27 | 28 | fn main() { 29 | let _ = env_logger::init(); 30 | 31 | let success = match config::load_config() { 32 | Some((cmd, sock)) => client::run(cmd, &sock), 33 | None => false, 34 | }; 35 | std::process::exit(if success {0} else {1}); 36 | } 37 | -------------------------------------------------------------------------------- /fectl/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.2.0' 2 | -------------------------------------------------------------------------------- /fectl/apps/__init__.py: -------------------------------------------------------------------------------- 1 | from .aiohttp_config import AiohttpSettings 2 | 3 | AIOHTTP_SETTINGS = AiohttpSettings() 4 | 5 | 6 | APPS = { 7 | "aiohttp": "fectl.apps.aiohttp.AiohttpRunner", 8 | } 9 | -------------------------------------------------------------------------------- /fectl/apps/aiohttp.py: -------------------------------------------------------------------------------- 1 | """Aiohttp runner for aiohttp.web""" 2 | 3 | import asyncio 4 | import logging 5 | import os 6 | import socket 7 | import ssl 8 | 9 | from .. import config, utils, errors 10 | from ..workers import WorkerType 11 | from . import AIOHTTP_SETTINGS 12 | 13 | 14 | class AiohttpRunner: 15 | 16 | LOG_FORMAT = '%a %l %u %t "%r" %s %b "%{Referrer}i" "%{User-Agent}i"' 17 | 18 | def __init__(self, worker, sock, arguments): # pragma: no cover 19 | if worker.TYPE != WorkerType.Asyncio: 20 | raise errors.UnsupportedWorker( 21 | 'Aiohttp application requires asyncio worker') 22 | 23 | self.loop = asyncio.get_event_loop() 24 | self.sock = sock 25 | self.server = None 26 | self.handler = None 27 | self.cfg = AIOHTTP_SETTINGS(arguments) 28 | 29 | try: 30 | self.ssl = self._ssl_context(self.cfg) if self.cfg.is_ssl else None 31 | except Exception as exc: 32 | raise utils.ConfigurationError( 33 | 'Can not create ssl context: %s' % exc) 34 | 35 | def make_handler(self, app): 36 | access_log = self.log.access_log if self.cfg.accesslog else None 37 | return app.make_handler( 38 | loop=self.loop, 39 | # logger=self.log, 40 | slow_request_timeout=self.cfg.slow_request_timeout, 41 | keepalive_timeout=self.cfg.keepalive, 42 | access_log=access_log, 43 | access_log_format=self.cfg.access_log_format) 44 | 45 | @asyncio.coroutine 46 | def init(self): 47 | import aiohttp 48 | 49 | if self.cfg.app is None: 50 | raise config.ConfigurationError( 51 | 'Aiohttp application is required. ' 52 | 'Please provide `app=...` config') 53 | 54 | if isinstance(self.cfg.app, aiohttp.web.Application): 55 | self.app = self.cfg.app 56 | else: 57 | try: 58 | self.app = self.cfg.app() 59 | if asyncio.iscoroutine(self.app): 60 | self.app = yield from self.app 61 | except: 62 | logging.exception('Can not load application') 63 | raise config.ConfigurationError( 64 | 'Can not load application: %s' % self.cfg.app) 65 | 66 | yield from self.app.startup() 67 | self.handler = self.make_handler(self.app) 68 | 69 | @asyncio.coroutine 70 | def start(self): 71 | if self.server is None: 72 | if (hasattr(socket, 'AF_UNIX') and 73 | self.sock.family == socket.AF_UNIX): 74 | self.server = yield from self.loop.create_unix_server( 75 | self.handler, sock=self.sock.dup(), ssl=self.ssl) 76 | else: 77 | self.server = yield from self.loop.create_server( 78 | self.handler, sock=self.sock.dup(), ssl=self.ssl) 79 | 80 | @asyncio.coroutine 81 | def stop(self): 82 | if self.server is not None: 83 | # stop accepting connections 84 | logging.info("Stopping aiohttp server: %s, connections: %s", 85 | os.getpid(), len(self.handler.connections)) 86 | self.server.close() 87 | yield from self.server.wait_closed() 88 | self.server = None 89 | 90 | if self.handler is not None: 91 | # stop alive connections 92 | yield from self.handler.shutdown( 93 | timeout=self.cfg.graceful_timeout / 100 * 95) 94 | self.handler = None 95 | 96 | # send on_shutdown event 97 | yield from self.app.shutdown() 98 | 99 | # cleanup application 100 | yield from self.app.cleanup() 101 | 102 | @asyncio.coroutine 103 | def pause(self): 104 | if self.server is not None: 105 | # stop accepting connections 106 | logging.info("Stop accepting conections: %s", os.getpid()) 107 | self.server.close() 108 | yield from self.server.wait_closed() 109 | self.server = None 110 | 111 | @asyncio.coroutine 112 | def resume(self): 113 | yield from self.start() 114 | 115 | @staticmethod 116 | def _ssl_context(cfg): 117 | """ Creates SSLContext instance for usage in asyncio.create_server. 118 | 119 | See ssl.SSLSocket.__init__ for more details. 120 | """ 121 | ctx = ssl.SSLContext(cfg.ssl_version) 122 | ctx.load_cert_chain(cfg.certfile, cfg.keyfile) 123 | ctx.verify_mode = cfg.cert_reqs 124 | if cfg.ca_certs: 125 | ctx.load_verify_locations(cfg.ca_certs) 126 | if cfg.ciphers: 127 | ctx.set_ciphers(cfg.ciphers) 128 | return ctx 129 | -------------------------------------------------------------------------------- /fectl/apps/aiohttp_config.py: -------------------------------------------------------------------------------- 1 | from .. import config 2 | from ..path import DottedNameResolver 3 | 4 | 5 | class App(config.Setting): 6 | name = "app" 7 | section = "Application" 8 | cli = ["--app"] 9 | meta = "STRING" 10 | desc = """\ 11 | web.Application instance. 12 | 13 | Callable that returns web.Application instance or 14 | python path to web.Application instance. 15 | """ 16 | 17 | @staticmethod 18 | def validator(val): 19 | import aiohttp 20 | try: 21 | app = DottedNameResolver().resolve(val) 22 | if callable(app) or isinstance(app, aiohttp.web.Application): 23 | return app 24 | except: 25 | pass 26 | 27 | raise config.ConfigurationError( 28 | "Can not load aiohttp application: %s", val) 29 | 30 | 31 | class AccessLogFormat(config.Setting): 32 | name = "access_log_format" 33 | section = "Logging" 34 | cli = ["--access-logformat"] 35 | meta = "STRING" 36 | validator = config.validate_string 37 | default = '%a %l %u %t "%r" %s %b "%{Referrer}i" "%{User-Agent}i"' 38 | desc = """\ 39 | The access log format. 40 | 41 | =========== =========== 42 | Identifier Description 43 | =========== =========== 44 | %% The percent sign 45 | %a Remote IP-address (IP-address of proxy if using reverse proxy) 46 | %t Time when the request was started to process 47 | %P The process ID of the child that serviced the request 48 | %r First line of request 49 | %s Response status code 50 | %b Size of response in bytes, including HTTP headers 51 | %T Time taken to serve the request, in seconds 52 | %Tf Time taken to serve the request, in seconds with floating fraction 53 | in .06f format 54 | %D Time taken to serve the request, in microseconds 55 | %{FOO}i request.headers['FOO'] 56 | %{FOO}o response.headers['FOO'] 57 | %{FOO}e os.environ['FOO'] 58 | =========== =========== 59 | """ 60 | 61 | 62 | class AiohttpSettings(config.Settings): 63 | 64 | def __init__(self): 65 | super(AiohttpSettings, self).__init__() 66 | 67 | self.add(App) 68 | self.add(config.GracefulTimeout) 69 | self.add(config.Keepalive) 70 | self.add(config.SlowRequestTimeout) 71 | self.add(config.LimitRequestLine) 72 | self.add(config.LimitRequestFields) 73 | self.add(config.LimitRequestFieldSize) 74 | self.add(config.AccessLog) 75 | self.add(AccessLogFormat) 76 | self.add(config.ErrorLog) 77 | self.add(config.Loglevel) 78 | self.add(config.CaptureOutput) 79 | self.add(config.LogConfig) 80 | self.add(config.Procname) 81 | self.add(config.DefaultProcName) 82 | self.add(config.KeyFile) 83 | self.add(config.CertFile) 84 | self.add(config.SSLVersion) 85 | self.add(config.CertReqs) 86 | self.add(config.CACerts) 87 | self.add(config.Ciphers) 88 | 89 | def __call__(self, arguments): 90 | return AiohttpConfig(self.make_settings(arguments)) 91 | 92 | 93 | class AiohttpConfig(object): 94 | 95 | def __init__(self, settings, arguments=None): 96 | self.settings = settings 97 | self.arguments = arguments 98 | 99 | def __getattr__(self, name): 100 | if name not in self.settings: 101 | raise AttributeError("No configuration setting for: %s" % name) 102 | return self.settings[name].get() 103 | 104 | def __setattr__(self, name, value): 105 | if name != "settings" and name in self.settings: 106 | raise AttributeError("Invalid access!") 107 | super(AiohttpConfig, self).__setattr__(name, value) 108 | 109 | def set(self, name, value): 110 | if name not in self.settings: 111 | raise AttributeError("No configuration setting for: %s" % name) 112 | self.settings[name].set(value) 113 | 114 | @property 115 | def proc_name(self): 116 | pn = self.settings['proc_name'].get() 117 | if pn is not None: 118 | return pn 119 | else: 120 | return self.settings['default_proc_name'].get() 121 | 122 | # @property 123 | # def logger_class(self): 124 | # uri = self.settings['logger_class'].get() 125 | # if uri == "simple": 126 | # # support the default 127 | # uri = LoggerClass.default 128 | 129 | # logger_class = util.load_class( 130 | # uri, 131 | # default="gunicorn.glogging.Logger", 132 | # section="gunicorn.loggers") 133 | 134 | # if hasattr(logger_class, "install"): 135 | # logger_class.install() 136 | # return logger_class 137 | 138 | @property 139 | def is_ssl(self): 140 | return self.certfile or self.keyfile 141 | 142 | @property 143 | def ssl_options(self): 144 | opts = {} 145 | for name, value in self.settings.items(): 146 | if value.section == 'SSL': 147 | opts[name] = value.get() 148 | return opts 149 | -------------------------------------------------------------------------------- /fectl/errors.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class ConfigurationError(Exception): 4 | """ Exception raised on config error """ 5 | 6 | 7 | class UnsupportedWorker(ConfigurationError): 8 | """ Worker is not supported by application """ 9 | -------------------------------------------------------------------------------- /fectl/path.py: -------------------------------------------------------------------------------- 1 | """Copy of pyramid's path resolver, 2 | https://github.com/Pylons/pyramid/blob/master/pyramid/path.py """ 3 | 4 | import imp 5 | import os 6 | import pkg_resources 7 | import six 8 | import sys 9 | 10 | 11 | ignore_types = [imp.C_EXTENSION, imp.C_BUILTIN] 12 | init_names = ['__init__%s' % x[0] for x in imp.get_suffixes() if 13 | x[0] and x[2] not in ignore_types] 14 | 15 | 16 | def caller_path(path, level=2): 17 | if not os.path.isabs(path): 18 | module = caller_module(level + 1) 19 | prefix = package_path(module) 20 | path = os.path.join(prefix, path) 21 | return path 22 | 23 | 24 | def caller_module(level=2, sys=sys): 25 | module_globals = sys._getframe(level).f_globals 26 | module_name = module_globals.get('__name__') or '__main__' 27 | module = sys.modules[module_name] 28 | return module 29 | 30 | 31 | def package_name(pkg_or_module): 32 | """ If this function is passed a module, return the dotted Python 33 | package name of the package in which the module lives. If this 34 | function is passed a package, return the dotted Python package 35 | name of the package itself.""" 36 | if pkg_or_module is None or pkg_or_module.__name__ == '__main__': 37 | return '__main__' 38 | pkg_name = pkg_or_module.__name__ 39 | pkg_filename = getattr(pkg_or_module, '__file__', None) 40 | if pkg_filename is None: 41 | # Namespace packages do not have __init__.py* files, 42 | # and so have no __file__ attribute 43 | return pkg_name 44 | splitted = os.path.split(pkg_filename) 45 | if splitted[-1] in init_names: 46 | # it's a package 47 | return pkg_name 48 | return pkg_name.rsplit('.', 1)[0] 49 | 50 | 51 | def package_of(pkg_or_module): 52 | """ Return the package of a module or return the package itself """ 53 | pkg_name = package_name(pkg_or_module) 54 | __import__(pkg_name) 55 | return sys.modules[pkg_name] 56 | 57 | 58 | def caller_package(level=2, caller_module=caller_module): 59 | # caller_module in arglist for tests 60 | module = caller_module(level + 1) 61 | f = getattr(module, '__file__', '') 62 | if (('__init__.py' in f) or ('__init__$py' in f)): # empty at >>> 63 | # Module is a package 64 | return module 65 | # Go up one level to get package 66 | package_name = module.__name__.rsplit('.', 1)[0] 67 | return sys.modules[package_name] 68 | 69 | 70 | def package_path(package): 71 | # computing the abspath is actually kinda expensive so we memoize 72 | # the result 73 | prefix = getattr(package, '__abspath__', None) 74 | if prefix is None: 75 | prefix = pkg_resources.resource_filename(package.__name__, '') 76 | # pkg_resources doesn't care whether we feed it a package 77 | # name or a module name within the package, the result 78 | # will be the same: a directory name to the package itself 79 | try: 80 | package.__abspath__ = prefix 81 | except: 82 | # this is only an optimization, ignore any error 83 | pass 84 | return prefix 85 | 86 | 87 | class _CALLER_PACKAGE(object): 88 | def __repr__(self): # pragma: no cover (for docs) 89 | return 'fectl.path.CALLER_PACKAGE' 90 | 91 | 92 | CALLER_PACKAGE = _CALLER_PACKAGE() 93 | 94 | 95 | class Resolver(object): 96 | def __init__(self, package=CALLER_PACKAGE): 97 | if package in (None, CALLER_PACKAGE): 98 | self.package = package 99 | else: 100 | if isinstance(package, six.string_types): 101 | try: 102 | __import__(package) 103 | except ImportError: 104 | raise ValueError( 105 | 'The dotted name %r cannot be imported' % (package,) 106 | ) 107 | package = sys.modules[package] 108 | self.package = package_of(package) 109 | 110 | def get_package_name(self): 111 | if self.package is CALLER_PACKAGE: 112 | package_name = caller_package().__name__ 113 | else: 114 | package_name = self.package.__name__ 115 | return package_name 116 | 117 | def get_package(self): 118 | if self.package is CALLER_PACKAGE: 119 | package = caller_package() 120 | else: 121 | package = self.package 122 | return package 123 | 124 | 125 | class DottedNameResolver(Resolver): 126 | """ A class used to resolve a :term:`dotted Python name` to a package or 127 | module object. 128 | 129 | .. versionadded:: 1.3 130 | 131 | The constructor accepts a single argument named ``package`` which may be 132 | any of: 133 | 134 | - A fully qualified (not relative) dotted name to a module or package 135 | 136 | - a Python module or package object 137 | 138 | - The value ``None`` 139 | 140 | - The constant value :attr:`fectl.path.CALLER_PACKAGE`. 141 | 142 | The default value is :attr:`fectl.path.CALLER_PACKAGE`. 143 | 144 | The ``package`` is used when a relative dotted name is supplied to the 145 | :meth:`~fectl.path.DottedNameResolver.resolve` method. A dotted name 146 | which has a ``.`` (dot) or ``:`` (colon) as its first character is 147 | treated as relative. 148 | 149 | If ``package`` is ``None``, the resolver will only be able to resolve 150 | fully qualified (not relative) names. Any attempt to resolve a 151 | relative name will result in an :exc:`ValueError` exception. 152 | 153 | If ``package`` is :attr:`fectl.path.CALLER_PACKAGE`, 154 | the resolver will treat relative dotted names as relative to 155 | the caller of the :meth:`~fectl.path.DottedNameResolver.resolve` 156 | method. 157 | 158 | If ``package`` is a *module* or *module name* (as opposed to a package or 159 | package name), its containing package is computed and this 160 | package used to derive the package name (all names are resolved relative 161 | to packages, never to modules). For example, if the ``package`` argument 162 | to this type was passed the string ``xml.dom.expatbuilder``, and 163 | ``.mindom`` is supplied to the 164 | :meth:`~fectl.path.DottedNameResolver.resolve` method, the resulting 165 | import would be for ``xml.minidom``, because ``xml.dom.expatbuilder`` is 166 | a module object, not a package object. 167 | 168 | If ``package`` is a *package* or *package name* (as opposed to a module or 169 | module name), this package will be used to relative compute 170 | dotted names. For example, if the ``package`` argument to this type was 171 | passed the string ``xml.dom``, and ``.minidom`` is supplied to the 172 | :meth:`~fectl.path.DottedNameResolver.resolve` method, the resulting 173 | import would be for ``xml.minidom``. 174 | """ 175 | def resolve(self, dotted): 176 | """ 177 | This method resolves a dotted name reference to a global Python 178 | object (an object which can be imported) to the object itself. 179 | 180 | Two dotted name styles are supported: 181 | 182 | - ``pkg_resources``-style dotted names where non-module attributes 183 | of a package are separated from the rest of the path using a ``:`` 184 | e.g. ``package.module:attr``. 185 | 186 | - ``zope.dottedname``-style dotted names where non-module 187 | attributes of a package are separated from the rest of the path 188 | using a ``.`` e.g. ``package.module.attr``. 189 | 190 | These styles can be used interchangeably. If the supplied name 191 | contains a ``:`` (colon), the ``pkg_resources`` resolution 192 | mechanism will be chosen, otherwise the ``zope.dottedname`` 193 | resolution mechanism will be chosen. 194 | 195 | If the ``dotted`` argument passed to this method is not a string, a 196 | :exc:`ValueError` will be raised. 197 | 198 | When a dotted name cannot be resolved, a :exc:`ValueError` error is 199 | raised. 200 | 201 | Example: 202 | 203 | .. code-block:: python 204 | 205 | r = DottedNameResolver() 206 | v = r.resolve('xml') # v is the xml module 207 | 208 | """ 209 | if not isinstance(dotted, six.string_types): 210 | raise ValueError('%r is not a string' % (dotted,)) 211 | package = self.package 212 | if package is CALLER_PACKAGE: 213 | package = caller_package() 214 | return self._resolve(dotted, package) 215 | 216 | def maybe_resolve(self, dotted): 217 | """ 218 | This method behaves just like 219 | :meth:`~fectl.path.DottedNameResolver.resolve`, except if the 220 | ``dotted`` value passed is not a string, it is simply returned. For 221 | example: 222 | 223 | .. code-block:: python 224 | 225 | import xml 226 | r = DottedNameResolver() 227 | v = r.maybe_resolve(xml) 228 | # v is the xml module; no exception raised 229 | """ 230 | if isinstance(dotted, six.string_types): 231 | package = self.package 232 | if package is CALLER_PACKAGE: 233 | package = caller_package() 234 | return self._resolve(dotted, package) 235 | return dotted 236 | 237 | def _resolve(self, dotted, package): 238 | if ':' in dotted: 239 | return self._pkg_resources_style(dotted, package) 240 | else: 241 | return self._zope_dottedname_style(dotted, package) 242 | 243 | def _pkg_resources_style(self, value, package): 244 | """ package.module:attr style """ 245 | if value.startswith(('.', ':')): 246 | if not package: 247 | raise ValueError( 248 | 'relative name %r irresolveable without package' % (value,) 249 | ) 250 | if value in ['.', ':']: 251 | value = package.__name__ 252 | else: 253 | value = package.__name__ + value 254 | # Calling EntryPoint.load with an argument is deprecated. 255 | # See https://pythonhosted.org/setuptools/history.html#id8 256 | ep = pkg_resources.EntryPoint.parse('x=%s' % value) 257 | if hasattr(ep, 'resolve'): 258 | # setuptools>=10.2 259 | return ep.resolve() # pragma: NO COVER 260 | else: 261 | return ep.load(False) # pragma: NO COVER 262 | 263 | def _zope_dottedname_style(self, value, package): 264 | """ package.module.attr style """ 265 | module = getattr(package, '__name__', None) # package may be None 266 | if not module: 267 | module = None 268 | if value == '.': 269 | if module is None: 270 | raise ValueError( 271 | 'relative name %r irresolveable without package' % (value,) 272 | ) 273 | name = module.split('.') 274 | else: 275 | name = value.split('.') 276 | if not name[0]: 277 | if module is None: 278 | raise ValueError( 279 | 'relative name %r irresolveable without ' 280 | 'package' % (value,) 281 | ) 282 | module = module.split('.') 283 | name.pop(0) 284 | while not name[0]: 285 | module.pop() 286 | name.pop(0) 287 | name = module + name 288 | 289 | used = name.pop(0) 290 | found = __import__(used) 291 | for n in name: 292 | used += '.' + n 293 | try: 294 | found = getattr(found, n) 295 | except AttributeError: 296 | __import__(used) 297 | found = getattr(found, n) # pragma: no cover 298 | 299 | return found 300 | 301 | 302 | class AssetResolver(Resolver): 303 | """ A class used to resolve an :term:`asset specification` to an 304 | :term:`asset descriptor`. 305 | 306 | .. versionadded:: 1.3 307 | 308 | The constructor accepts a single argument named ``package`` which may be 309 | any of: 310 | 311 | - A fully qualified (not relative) dotted name to a module or package 312 | 313 | - a Python module or package object 314 | 315 | - The value ``None`` 316 | 317 | - The constant value :attr:`fectl.path.CALLER_PACKAGE`. 318 | 319 | The default value is :attr:`fectl.path.CALLER_PACKAGE`. 320 | 321 | The ``package`` is used when a relative asset specification is supplied 322 | to the :meth:`~fectl.path.AssetResolver.resolve` method. An asset 323 | specification without a colon in it is treated as relative. 324 | 325 | If ``package`` is ``None``, the resolver will 326 | only be able to resolve fully qualified (not relative) asset 327 | specifications. Any attempt to resolve a relative asset specification 328 | will result in an :exc:`ValueError` exception. 329 | 330 | If ``package`` is :attr:`fectl.path.CALLER_PACKAGE`, 331 | the resolver will treat relative asset specifications as 332 | relative to the caller of the :meth:`~fectl.path.AssetResolver.resolve` 333 | method. 334 | 335 | If ``package`` is a *module* or *module name* (as opposed to a package or 336 | package name), its containing package is computed and this 337 | package is used to derive the package name (all names are resolved relative 338 | to packages, never to modules). For example, if the ``package`` argument 339 | to this type was passed the string ``xml.dom.expatbuilder``, and 340 | ``template.pt`` is supplied to the 341 | :meth:`~fectl.path.AssetResolver.resolve` method, the resulting absolute 342 | asset spec would be ``xml.minidom:template.pt``, because 343 | ``xml.dom.expatbuilder`` is a module object, not a package object. 344 | 345 | If ``package`` is a *package* or *package name* (as opposed to a module or 346 | module name), this package will be used to compute relative 347 | asset specifications. For example, if the ``package`` argument to this 348 | type was passed the string ``xml.dom``, and ``template.pt`` is supplied 349 | to the :meth:`~fectl.path.AssetResolver.resolve` method, the resulting 350 | absolute asset spec would be ``xml.minidom:template.pt``. 351 | """ 352 | def resolve(self, spec): 353 | """ 354 | Resolve the asset spec named as ``spec`` to an object that has the 355 | attributes and methods described in 356 | :class:`fectl.interfaces.IAssetDescriptor`. 357 | 358 | If ``spec`` is an absolute filename 359 | (e.g. ``/path/to/myproject/templates/foo.pt``) or an absolute asset 360 | spec (e.g. ``myproject:templates.foo.pt``), an asset descriptor is 361 | returned without taking into account the ``package`` passed to this 362 | class' constructor. 363 | 364 | If ``spec`` is a *relative* asset specification (an asset 365 | specification without a ``:`` in it, e.g. ``templates/foo.pt``), the 366 | ``package`` argument of the constructor is used as the package 367 | portion of the asset spec. For example: 368 | 369 | .. code-block:: python 370 | 371 | a = AssetResolver('myproject') 372 | resolver = a.resolve('templates/foo.pt') 373 | print(resolver.abspath()) 374 | # -> /path/to/myproject/templates/foo.pt 375 | 376 | If the AssetResolver is constructed without a ``package`` argument of 377 | ``None``, and a relative asset specification is passed to 378 | ``resolve``, an :exc:`ValueError` exception is raised. 379 | """ 380 | if os.path.isabs(spec): 381 | return FSAssetDescriptor(spec) 382 | path = spec 383 | if ':' in path: 384 | package_name, path = spec.split(':', 1) 385 | else: 386 | if self.package is CALLER_PACKAGE: 387 | package_name = caller_package().__name__ 388 | else: 389 | package_name = getattr(self.package, '__name__', None) 390 | if package_name is None: 391 | raise ValueError( 392 | 'relative spec %r irresolveable without package' % (spec,) 393 | ) 394 | return PkgResourcesAssetDescriptor(package_name, path) 395 | 396 | 397 | class PkgResourcesAssetDescriptor(object): 398 | pkg_resources = pkg_resources 399 | 400 | def __init__(self, pkg_name, path): 401 | self.pkg_name = pkg_name 402 | self.path = path 403 | 404 | def absspec(self): 405 | return '%s:%s' % (self.pkg_name, self.path) 406 | 407 | def abspath(self): 408 | return os.path.abspath( 409 | self.pkg_resources.resource_filename(self.pkg_name, self.path)) 410 | 411 | def stream(self): 412 | return self.pkg_resources.resource_stream(self.pkg_name, self.path) 413 | 414 | def isdir(self): 415 | return self.pkg_resources.resource_isdir(self.pkg_name, self.path) 416 | 417 | def listdir(self): 418 | return self.pkg_resources.resource_listdir(self.pkg_name, self.path) 419 | 420 | def exists(self): 421 | return self.pkg_resources.resource_exists(self.pkg_name, self.path) 422 | 423 | 424 | class FSAssetDescriptor(object): 425 | 426 | def __init__(self, path): 427 | self.path = os.path.abspath(path) 428 | 429 | def absspec(self): 430 | raise NotImplementedError 431 | 432 | def abspath(self): 433 | return self.path 434 | 435 | def stream(self): 436 | return open(self.path, 'rb') 437 | 438 | def isdir(self): 439 | return os.path.isdir(self.path) 440 | 441 | def listdir(self): 442 | return os.listdir(self.path) 443 | 444 | def exists(self): 445 | return os.path.exists(self.path) 446 | -------------------------------------------------------------------------------- /fectl/run.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | import argparse 4 | import logging 5 | import os 6 | import sys 7 | 8 | from . import utils 9 | from .path import DottedNameResolver 10 | from .utils import ConfigurationError 11 | 12 | WORKER_INIT_FAILED = 99 13 | WORKER_BOOT_FAILED = 100 14 | 15 | ARGS = argparse.ArgumentParser(description='FECTL Worker') 16 | SUBPARSERS = ARGS.add_subparsers( 17 | title='Worker type', 18 | description='Worker type to use (i.e. asyncio, gevent, etc)', 19 | dest='worker') 20 | 21 | ARGS_GEVENT = SUBPARSERS.add_parser('gevent', help='Gevent based worker') 22 | ARGS_GEVENT.add_argument('--app', dest='app', action='store', required=False, 23 | help='Application to start') 24 | 25 | ARGS_ASYNCIO = SUBPARSERS.add_parser('asyncio', help='Asyncio based worker') 26 | ARGS_ASYNCIO.add_argument('--loop', dest='loop', action='store', 27 | default="default", required=False, 28 | choices=['default', 'uvloop', 'tokio'], 29 | help='Select asyncio event loop') 30 | ARGS_ASYNCIO.add_argument('--app', dest='app', action='store', required=False, 31 | help='Application to start') 32 | ARGS_ASYNCIO.add_argument('--debug', dest='debug', action='store_true', 33 | required=False, default=False, 34 | help='Enable event loop debug mode') 35 | 36 | 37 | def run(): 38 | try: 39 | args = ARGS.parse_args() 40 | except: 41 | sys.exit(WORKER_INIT_FAILED) 42 | 43 | sys.argv[1:] = [] 44 | try: 45 | worker_cls = utils.load_class(args.worker) 46 | except ConfigurationError as exc: 47 | logging.error( 48 | "Can not load worker class '%s': %s", args.worker, exc) 49 | sys.exit(WORKER_INIT_FAILED) 50 | except: 51 | logging.exception("Can not load worker class: %s", args.worker) 52 | sys.exit(WORKER_INIT_FAILED) 53 | 54 | if args.app: 55 | try: 56 | app_cls = DottedNameResolver().resolve(args.app) 57 | except ConfigurationError as exc: 58 | logging.error( 59 | "Can not load application class '%s': %s", args.app, exc) 60 | sys.exit(WORKER_INIT_FAILED) 61 | except: 62 | logging.exception("Can not load application class: %s", args.app) 63 | sys.exit(WORKER_INIT_FAILED) 64 | else: 65 | app_cls = None 66 | 67 | try: 68 | worker = worker_cls(app_cls, os.getppid(), args) 69 | worker._init_process() 70 | except SystemExit: 71 | raise 72 | except ConfigurationError: 73 | sys.exit(WORKER_INIT_FAILED) 74 | except BaseException as exc: 75 | logging.exception("Can not initialize worker %r: %s", worker_cls, exc) 76 | sys.exit(WORKER_BOOT_FAILED) 77 | 78 | try: 79 | worker._run() 80 | except SystemExit: 81 | raise 82 | except ConfigurationError: 83 | sys.exit(WORKER_INIT_FAILED) 84 | except BaseException as exc: 85 | logging.exception("Can not run worker: %s", exc) 86 | sys.exit(WORKER_BOOT_FAILED) 87 | else: 88 | sys.exit(0) 89 | 90 | 91 | if __name__ == '__main__': 92 | run() 93 | -------------------------------------------------------------------------------- /fectl/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | import fcntl 4 | import json 5 | import inspect 6 | import os 7 | import struct 8 | import traceback 9 | 10 | from . import apps, workers 11 | from .path import DottedNameResolver 12 | from .errors import ConfigurationError 13 | 14 | 15 | def set_non_blocking(fd): 16 | flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK 17 | fcntl.fcntl(fd, fcntl.F_SETFL, flags) 18 | 19 | 20 | def close_on_exec(fd): 21 | flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC 22 | fcntl.fcntl(fd, fcntl.F_SETFD, flags) 23 | 24 | 25 | def pack_message(cmd, data=None): 26 | msg = {'cmd': str(cmd)} 27 | if data is not None: 28 | msg['data'] = data 29 | 30 | msg = json.dumps(msg).encode('utf-8') 31 | return struct.pack('>h', len(msg)) + msg 32 | 33 | 34 | CMD_PREPARE = 'prepare' 35 | CMD_START = 'start' 36 | CMD_PAUSE = 'pause' 37 | CMD_RESUME = 'resume' 38 | CMD_STOP = 'stop' 39 | CMD_HEARTBEAT = 'hb' 40 | 41 | ALL_COMMANDS = (CMD_PREPARE, CMD_START, 42 | CMD_PAUSE, CMD_RESUME, CMD_STOP, CMD_HEARTBEAT) 43 | 44 | 45 | def unpack_message(data): 46 | msg = json.loads(data) 47 | cmd = msg['cmd'] 48 | if cmd not in ALL_COMMANDS: 49 | return None, None 50 | 51 | return cmd, None 52 | 53 | 54 | def load_class(uri): 55 | if inspect.isclass(uri): 56 | return uri 57 | 58 | if uri in workers.WORKERS: 59 | uri, check = workers.WORKERS[uri] 60 | if check is not None: 61 | check() 62 | 63 | try: 64 | return DottedNameResolver().resolve(uri) 65 | except: 66 | exc = traceback.format_exc() 67 | msg = "class uri %r invalid or not found: \n\n[%s]" 68 | raise ConfigurationError(msg % (uri, exc)) 69 | 70 | 71 | def load_app(uri): 72 | if uri in apps.APPS: 73 | uri = apps.APPS[uri] 74 | 75 | try: 76 | return DottedNameResolver().resolve(uri) 77 | except: 78 | exc = traceback.format_exc() 79 | msg = "loader app %r invalid or not found: \n\n[%s]" 80 | raise ConfigurationError(msg % (uri, exc)) 81 | -------------------------------------------------------------------------------- /fectl/workers/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from enum import Enum 3 | 4 | from ..errors import ConfigurationError 5 | 6 | 7 | class WorkerType(Enum): 8 | Gevent = 'gevent' 9 | Asyncio = 'asyncio' 10 | 11 | 12 | def check_gevent(): 13 | try: 14 | import gevent 15 | except: 16 | raise ConfigurationError("gevent package is not installed") 17 | 18 | 19 | def check_asyncio(): 20 | if sys.version_info < (3, 4): 21 | raise ConfigurationError("At least python 3.4 is required") 22 | 23 | 24 | WORKERS = { 25 | WorkerType.Gevent.value: ( 26 | "fectl.workers.gevent.GeventWorker", check_gevent), 27 | WorkerType.Asyncio.value: ( 28 | "fectl.workers.asyncio.AsyncioWorker", check_asyncio), 29 | } 30 | -------------------------------------------------------------------------------- /fectl/workers/asyncio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import logging 4 | import signal 5 | import struct 6 | import sys 7 | 8 | from .. import utils 9 | from . import WorkerType 10 | from .base import Worker 11 | 12 | 13 | class AsyncioWorker(Worker): 14 | 15 | TYPE = WorkerType.Asyncio 16 | 17 | def __init__(self, app, ppid, args): 18 | super().__init__(app, ppid, args) 19 | 20 | self._stopping = None 21 | self._exit_code = 0 22 | self._loop_type = args.loop 23 | self._read_queue = None 24 | self._write_queue = None 25 | self._notify_waiter = None 26 | self._apps = [] 27 | 28 | def _init_process(self): 29 | # load event loop 30 | if self._loop_type == 'default': 31 | # use default event loop 32 | pass 33 | elif self._loop_type == 'uvloop': 34 | try: 35 | import uvloop 36 | except ImportError: 37 | raise utils.ConfigurationError('uvloop is not available') 38 | 39 | # Setup uvloop policy, so that every 40 | # asyncio.get_event_loop() will create an instance 41 | # of uvloop event loop. 42 | asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) 43 | elif self._loop_type == 'tokio': 44 | try: 45 | import tokio 46 | except ImportError: 47 | raise utils.ConfigurationError('tokio is not available') 48 | 49 | # Setup tokio policy, so that every 50 | # asyncio.get_event_loop() will create an instance 51 | # of uvloop event loop. 52 | asyncio.set_event_loop_policy(tokio.EventLoopPolicy()) 53 | else: 54 | raise utils.ConfigurationError( 55 | 'Unknown loop type: %s' % self._loop_type) 56 | 57 | # create new event_loop after fork 58 | asyncio.get_event_loop().close() 59 | 60 | loop = asyncio.new_event_loop() 61 | if self._args.debug: 62 | loop.set_debug(True) 63 | 64 | self._loop = loop 65 | 66 | # read/write queues to master 67 | self._read_queue = asyncio.Queue(loop=loop) 68 | self._write_queue = asyncio.Queue(loop=loop) 69 | 70 | # convert callbacks to coroutine 71 | self._on_msg = [asyncio.coroutine(cb) for cb in self._on_msg] 72 | self._on_shutdown = [asyncio.coroutine(cb) for cb in self._on_shutdown] 73 | 74 | for sock in self._sockets.values(): 75 | sock.set_nonblocking() 76 | 77 | asyncio.set_event_loop(loop) 78 | super()._init_process() 79 | 80 | def notify(self, cmd, data=None): 81 | self._write_queue.put_nowait(utils.pack_message(cmd, data)) 82 | 83 | def _run(self): 84 | self._read_task = self._loop.create_task(self._read_loop()) 85 | self._write_task = self._loop.create_task(self._write_loop()) 86 | 87 | self._runner = asyncio.ensure_future( 88 | self._try_run_loop(), loop=self._loop) 89 | try: 90 | self._loop.run_until_complete(self._runner) 91 | finally: 92 | self._loop.close() 93 | 94 | sys.exit(self._exit_code) 95 | 96 | @asyncio.coroutine 97 | def _try_run_loop(self): 98 | exc = None 99 | try: 100 | yield from self._run_loop() 101 | except utils.ConfigurationError as e: 102 | exc = e 103 | self.notify(self.MSG_CFG_ERROR, str(e)) 104 | except BaseException as e: 105 | exc = e 106 | 107 | if self._stopping is None: 108 | self._stopping = asyncio.ensure_future( 109 | self._stop(), loop=self._loop) 110 | 111 | yield from self._stopping 112 | 113 | if exc is not None: 114 | raise exc 115 | 116 | @asyncio.coroutine 117 | def _run_loop(self): 118 | # init main application 119 | if self._application is not None: 120 | try: 121 | self._application(self) 122 | except utils.ConfigurationError: 123 | raise 124 | except BaseException as exc: 125 | logging.exception("Application init exception: %s", exc) 126 | raise 127 | 128 | # load apps 129 | for sock in self._sockets.values(): 130 | app = sock.load_app(self) 131 | if app is not None: 132 | yield from app.init() 133 | self._apps.append(app) 134 | 135 | self.notify(self.MSG_LOADED) 136 | 137 | try: 138 | while self._alive: 139 | self.heartbeat() 140 | 141 | # If our parent changed then we shutdown. 142 | if self._ppid != os.getppid(): 143 | self._alive = False 144 | logging.info("Parent changed, shutting down: %s", self) 145 | else: 146 | yield from self._wait_next_notify() 147 | except BaseException: 148 | logging.exception("Worker run loop exeception") 149 | pass 150 | 151 | def _wait_next_notify(self): 152 | self._notify_waiter_done() 153 | 154 | self._notify_waiter = waiter = self._loop.create_future() 155 | self._loop.call_later(1.0, self._notify_waiter_done) 156 | 157 | return waiter 158 | 159 | def _notify_waiter_done(self): 160 | waiter = self._notify_waiter 161 | if waiter is not None and not waiter.done(): 162 | waiter.set_result(True) 163 | 164 | self._notify_waiter = None 165 | 166 | def _init_signals(self): 167 | # Set up signals through the event loop API. 168 | self._loop.add_signal_handler( 169 | signal.SIGQUIT, self._handle_quit, signal.SIGQUIT, None) 170 | 171 | self._loop.add_signal_handler( 172 | signal.SIGTERM, self._handle_exit, signal.SIGTERM, None) 173 | 174 | self._loop.add_signal_handler( 175 | signal.SIGINT, self._handle_quit, signal.SIGINT, None) 176 | 177 | self._loop.add_signal_handler( 178 | signal.SIGWINCH, self._handle_winch, signal.SIGWINCH, None) 179 | 180 | self._loop.add_signal_handler( 181 | signal.SIGUSR1, self._handle_usr1, signal.SIGUSR1, None) 182 | 183 | self._loop.add_signal_handler( 184 | signal.SIGABRT, self._handle_abort, signal.SIGABRT, None) 185 | 186 | # Don't let SIGTERM and SIGUSR1 disturb active requests 187 | # by interrupting system calls 188 | signal.siginterrupt(signal.SIGTERM, False) 189 | signal.siginterrupt(signal.SIGUSR1, False) 190 | 191 | @asyncio.coroutine 192 | def _stop(self): 193 | # stop accepting connections 194 | try: 195 | tasks = [asyncio.ensure_future(app.pause(), loop=self._loop) 196 | for app in self._apps] 197 | yield from asyncio.gather(*tasks, loop=self.loop) 198 | except: 199 | pass 200 | 201 | # stop apps 202 | try: 203 | tasks = [asyncio.ensure_future(app.stop(), loop=self._loop) 204 | for app in self._apps] 205 | yield from asyncio.gather(*tasks, loop=self.loop) 206 | except: 207 | pass 208 | 209 | # on_stop callbacks 210 | try: 211 | tasks = [asyncio.ensure_future(cb(), loop=self._loop) 212 | for cb in self._on_shutdown] 213 | yield from asyncio.gather(*tasks, loop=self.loop) 214 | except: 215 | pass 216 | 217 | yield from asyncio.sleep(0.1, loop=self._loop) 218 | 219 | self._read_task.cancel() 220 | self._read_task = None 221 | self._write_task.cancel() 222 | self._write_task = None 223 | 224 | def _handle_quit(self, sig, frame): 225 | if self._stopping is not None: 226 | self._loop.call_later(0.1, self._notify_waiter_done) 227 | else: 228 | self._alive = False 229 | 230 | # init closing process 231 | self._stopping = asyncio.ensure_future( 232 | self._stop(), loop=self._loop) 233 | 234 | # close loop 235 | self._loop.call_later(0.1, self._notify_waiter_done) 236 | 237 | def _handle_abort(self, sig, frame): 238 | self._alive = False 239 | self._exit_code = 1 240 | sys.exit(1) 241 | 242 | def _write_loop(self): 243 | pipe = os.fdopen(self._master_pipe[1], "w") 244 | tr, proto = yield from self._loop.connect_write_pipe( 245 | WriteProtocol, pipe) 246 | 247 | while True: 248 | try: 249 | item = yield from self._write_queue.get() 250 | if not proto.write(item): 251 | break 252 | except (BaseException, RuntimeError, asyncio.CancelledError): 253 | break 254 | except Exception: 255 | logging.exception('Worker write loop exception') 256 | break 257 | 258 | self._alive = False 259 | 260 | def _read_loop(self): 261 | pipe = os.fdopen(self._master_pipe[0], "r") 262 | tr, proto = yield from self._loop.connect_read_pipe( 263 | ReadProtocol, pipe) 264 | proto._read_queue = self._read_queue 265 | 266 | while True: 267 | try: 268 | cmd, data = yield from self._read_queue.get() 269 | if cmd == self.CMD_HEARTBEAT: 270 | continue 271 | 272 | elif cmd == self.CMD_PAUSE: 273 | for app in self._apps: 274 | yield from app.pause() 275 | 276 | elif cmd == self.CMD_RESUME: 277 | for app in self._apps: 278 | yield from app.resume() 279 | 280 | elif cmd == self.CMD_START: 281 | for app in self._apps: 282 | yield from app.start() 283 | 284 | elif cmd == self.CMD_STOP: 285 | # init closing process 286 | self._stopping = asyncio.ensure_future( 287 | self._stop(), loop=self._loop) 288 | 289 | else: 290 | for cb in self._on_msg: 291 | try: 292 | yield from cb(cmd, data) 293 | except: 294 | logging.exception('Exception in message handler') 295 | except asyncio.CancelledError: 296 | break 297 | except (Exception, BaseException, RuntimeError): 298 | logging.exception('Worker write loop exception') 299 | break 300 | 301 | self._alive = False 302 | 303 | 304 | class WriteProtocol: 305 | 306 | def __init__(self, *args): 307 | self._transport = None 308 | 309 | def connection_made(self, transport): 310 | self._transport = transport 311 | 312 | def connection_lost(self, exc=None): 313 | self._transport = None 314 | 315 | def write(self, data): 316 | if self._transport is None: 317 | return False 318 | else: 319 | self._transport.write(data) 320 | return True 321 | 322 | 323 | class ReadProtocol: 324 | 325 | def __init__(self, *args): 326 | self._buf = bytearray() 327 | self._transport = None 328 | self._read_queue = None 329 | 330 | def connection_made(self, transport): 331 | self._transport = transport 332 | 333 | def connection_lost(self, exc=None): 334 | self._transport = None 335 | 336 | def eof_received(self): 337 | pass 338 | 339 | def data_received(self, data): 340 | self._buf += data 341 | 342 | if self._read_queue is not None and len(self._buf) >= 2: 343 | data = self._buf[:2] 344 | size = struct.unpack('>h', data)[0] 345 | 346 | if len(self._buf) >= size + 2: 347 | data = self._buf[2:size+2] 348 | cmd, data = utils.unpack_message(data) 349 | self._buf = self._buf[size+2:] 350 | self._read_queue.put_nowait((cmd, data)) 351 | -------------------------------------------------------------------------------- /fectl/workers/base.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | import itertools 4 | import os 5 | import random 6 | import signal 7 | import sys 8 | import time 9 | 10 | from .. import utils 11 | from .socket import Socket 12 | 13 | 14 | _sentinel = object() 15 | 16 | 17 | class Worker(object): 18 | 19 | TYPE = None 20 | 21 | MSG_LOADED = 'loaded' 22 | MSG_RELOAD = 'reload' 23 | MSG_RESTART = 'restart' 24 | MSG_HEARTBEAT = 'hb' 25 | MSG_CFG_ERROR = 'cfgerror' 26 | 27 | CMD_PREPARE = 'prepare' 28 | CMD_START = 'start' 29 | CMD_PAUSE = 'pause' 30 | CMD_RESUME = 'resume' 31 | CMD_STOP = 'stop' 32 | CMD_HEARTBEAT = 'hb' 33 | 34 | ALL_COMMANDS = (CMD_PREPARE, CMD_START, 35 | CMD_PAUSE, CMD_RESUME, CMD_STOP, CMD_HEARTBEAT) 36 | 37 | SIGNALS = [getattr(signal, "SIG%s" % x) 38 | for x in "ABRT HUP QUIT INT TERM USR1 WINCH CHLD".split()] 39 | 40 | def __init__(self, application, ppid, args): 41 | self._application = application 42 | self._ppid = ppid 43 | self._alive = True 44 | self._pipe = None 45 | self._sockets = {} 46 | self._args = args 47 | self._on_msg = [] 48 | self._on_shutdown = [] 49 | 50 | # service name 51 | self._name = os.environ.get('FECTL_SRV_NAME') 52 | 53 | # extract master communication pipe 54 | fd = os.environ.get('FECTL_FD') 55 | if fd is None: 56 | raise utils.ConfigurationError( 57 | "Can not get master process communication FD") 58 | 59 | try: 60 | self._master_pipe = tuple(int(v) for v in fd.split(':', 1)) 61 | except: 62 | raise utils.ConfigurationError( 63 | "Can not decode FECTL_FD_R: %s" % fd) 64 | 65 | self._sockets = Socket.load() 66 | 67 | def get_socket(self, name, default=_sentinel): 68 | sock = self._sockets.get(name, default) 69 | if sock is _sentinel: 70 | raise KeyError(name) 71 | 72 | return sock.socket 73 | 74 | def get_socket_fd(self, name, default=_sentinel): 75 | try: 76 | sock = self._sockets[name] 77 | return sock.socket.fileno() 78 | except KeyError: 79 | if default is not _sentinel: 80 | return default 81 | raise 82 | 83 | def notify(self, cmd, data=None): 84 | raise NotImplementedError() 85 | 86 | def heartbeat(self): 87 | self.notify(self.MSG_HEARTBEAT) 88 | 89 | def on_shutdown(self, cb): 90 | """ register callback for graceful shutdown process """ 91 | self._on_shutdown.append(cb) 92 | 93 | def _run(self): 94 | """This is the mainloop of a worker process.""" 95 | raise NotImplementedError() 96 | 97 | def _init_process(self): 98 | try: 99 | import setproctitle 100 | if self._name is not None: 101 | setproctitle.setproctitle('fectl %s worker' % self._name) 102 | except: 103 | pass 104 | 105 | try: 106 | random.seed(os.urandom(64)) 107 | except NotImplementedError: 108 | random.seed('%s.%s' % (time.time(), os.getpid())) 109 | 110 | self._pipe = os.pipe() 111 | for fd in itertools.chain(self._pipe, self._master_pipe): 112 | utils.close_on_exec(fd) 113 | utils.set_non_blocking(fd) 114 | 115 | self._f_read = os.fdopen(self._master_pipe[0], 'r') 116 | self._f_write = os.fdopen(self._master_pipe[1], 'w') 117 | 118 | self._init_signals() 119 | 120 | def _init_signals(self): 121 | # reset signaling 122 | [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS] 123 | 124 | # init new signaling 125 | signal.signal(signal.SIGQUIT, self._handle_quit) 126 | signal.signal(signal.SIGTERM, self._handle_exit) 127 | signal.signal(signal.SIGINT, self._handle_quit) 128 | signal.signal(signal.SIGWINCH, self._handle_winch) 129 | signal.signal(signal.SIGUSR1, self._handle_usr1) 130 | signal.signal(signal.SIGABRT, self._handle_abort) 131 | 132 | # Don't let SIGTERM and SIGUSR1 disturb active requests 133 | # by interrupting system calls 134 | signal.siginterrupt(signal.SIGTERM, False) 135 | signal.siginterrupt(signal.SIGUSR1, False) 136 | 137 | if hasattr(signal, 'set_wakeup_fd'): 138 | signal.set_wakeup_fd(self._pipe[1]) 139 | 140 | def _handle_usr1(self, sig, frame): 141 | pass 142 | 143 | def _handle_exit(self, sig, frame): 144 | self._alive = False 145 | 146 | def _handle_quit(self, sig, frame): 147 | self._alive = False 148 | time.sleep(0.1) 149 | sys.exit(0) 150 | 151 | def _handle_abort(self, sig, frame): 152 | self._alive = False 153 | sys.exit(1) 154 | 155 | def _handle_winch(self, sig, fname): 156 | # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD. 157 | pass 158 | -------------------------------------------------------------------------------- /fectl/workers/gevent.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | import os 4 | import struct 5 | import sys 6 | import logging 7 | 8 | import gevent 9 | from gevent.queue import Queue 10 | from gevent.fileobject import FileObjectPosix 11 | 12 | from .. import utils 13 | from . import WorkerType 14 | from .base import Worker 15 | 16 | 17 | class GeventWorker(Worker): 18 | 19 | TYPE = WorkerType.Gevent 20 | 21 | def _patch(self): 22 | from gevent import monkey 23 | from gevent.socket import socket as g_socket 24 | 25 | monkey.noisy = False 26 | monkey.patch_all(subprocess=True) 27 | 28 | # patch sockets 29 | for name, sock in self._sockets.items(): 30 | sock.orig_socket = sock.socket 31 | s = sock.socket 32 | if sys.version_info[0] == 3: 33 | sock.socket = g_socket( 34 | s.family, s.type, fileno=s.sock.fileno()) 35 | else: 36 | sock.socket = g_socket(s.family, s.type, _sock=s) 37 | 38 | def notify(self, cmd, data=None): 39 | self._write_queue.put(utils.pack_message(cmd, data)) 40 | 41 | def _init_process(self): 42 | # monkey patch here 43 | self._patch() 44 | 45 | # reinit the hub 46 | from gevent import hub 47 | hub.reinit() 48 | 49 | self._write_queue = Queue() 50 | self._read_queue = Queue() 51 | 52 | # then initialize the process 53 | super(GeventWorker, self)._init_process() 54 | 55 | def _write_loop(self): 56 | f = FileObjectPosix(self._master_pipe[1], 'w', 0) 57 | 58 | while True: 59 | try: 60 | f.write(self._write_queue.get()) 61 | except: 62 | self._alive = False 63 | break 64 | 65 | def _read_loop(self): 66 | f = FileObjectPosix(self._master_pipe[0], 'r', 0) 67 | 68 | while True: 69 | try: 70 | data = f.read(2) 71 | size = struct.unpack('>h', data)[0] 72 | data = f.read(size) 73 | cmd, data = utils.unpack_message(data) 74 | except: 75 | # master is dead probably 76 | self._alive = False 77 | break 78 | 79 | def _run(self): 80 | gevent.spawn(self._read_loop) 81 | gevent.spawn(self._write_loop) 82 | 83 | try: 84 | self._application(self) 85 | except BaseException as exc: 86 | logging.exception("Application init exception: %s", exc) 87 | raise 88 | 89 | self.notify(self.MSG_LOADED) 90 | 91 | while self._alive: 92 | self.heartbeat() 93 | gevent.sleep(1.0) 94 | 95 | if self._ppid != os.getppid(): 96 | logging.info("Parent changed, shutting down") 97 | self._alive = False 98 | break 99 | 100 | for cb in self._on_shutdown: 101 | try: 102 | cb() 103 | except BaseException as exc: 104 | logging.info("Shutdown callback exception: %s", exc) 105 | 106 | def _handle_quit(self, sig, frame): 107 | # Move this out of the signal handler so we can use blocking calls. 108 | gevent.spawn(super(GeventWorker, self)._handle_quit, sig, frame) 109 | 110 | 111 | if __name__ == "__main__": 112 | worker = GeventWorker() 113 | worker.init_process() 114 | worker.run() 115 | -------------------------------------------------------------------------------- /fectl/workers/socket.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | import json 4 | import os 5 | import logging 6 | import socket 7 | 8 | from .. import utils 9 | 10 | 11 | class Socket: 12 | 13 | def __init__(self, name, socket, app, arguments): 14 | self.name = name 15 | self.socket = socket 16 | self.app = app 17 | self.arguments = arguments 18 | 19 | def set_nonblocking(self): 20 | self.socket.setblocking(False) 21 | 22 | def load_app(self, worker): 23 | if self.app is None: 24 | return None 25 | 26 | app = utils.load_app(self.app) 27 | try: 28 | return app(worker, self.socket, self.arguments) 29 | except utils.ConfigurationError: 30 | raise 31 | except: 32 | logging.exception('Can not initialize app: %s', self.app) 33 | raise utils.ConfigurationError 34 | 35 | return app 36 | 37 | @classmethod 38 | def load(cls): 39 | socks = {} 40 | apps = {} 41 | arguments = {} 42 | 43 | for key, value in os.environ.items(): 44 | if key.startswith("FECTL_FD_"): 45 | params = value.split(',') 46 | try: 47 | fd = int(params[0]) 48 | params = dict(map(lambda s: s.split(':', 1), params[1:])) 49 | family = int(params.get('FAMILY', 0)) 50 | socktype = int(params.get('SOCKETTYPE', 0)) 51 | proto = int(params.get('PROTO', 0)) 52 | sock = socket.fromfd(fd, family, socktype, proto) 53 | socks[key[9:]] = sock 54 | except OSError: 55 | raise 56 | except: 57 | raise RuntimeError("Can not decode %s: %s" % (key, value)) 58 | 59 | if key.startswith("FECTL_APP_"): 60 | apps[key[10:]] = value.strip() 61 | 62 | if key.startswith("FECTL_ARGS_"): 63 | args = {} 64 | for arg in json.loads(value.strip()): 65 | arg = [s.strip() for s in arg.split('=', 1)] 66 | if len(arg) == 1: 67 | args[arg[0]] = None 68 | else: 69 | args[arg[0]] = arg[1] 70 | 71 | arguments[key[11:]] = args 72 | 73 | sockets = {} 74 | for name, sock in socks.items(): 75 | sockets[name] = Socket( 76 | name, sock, apps.get(name), arguments.get(name, {})) 77 | 78 | return sockets 79 | -------------------------------------------------------------------------------- /fectld.toml: -------------------------------------------------------------------------------- 1 | [master] 2 | pid = "fectld.pid" 3 | sock = "fectld.sock" 4 | directory = "./" 5 | 6 | [[service]] 7 | name = "test" 8 | num = 1 9 | directory = "./tests/" 10 | timeout = 5 11 | startup_timeout = 10 12 | shutdown_timeout = 10 13 | command = "python -m fectl.run asyncio" 14 | environ = ["ENV_KEY1=value1"] 15 | 16 | [logging] 17 | name = "ctl" 18 | service = "console" 19 | level = "debug" 20 | 21 | [[socket]] 22 | name = "http" 23 | port = 9080 24 | ip = "0.0.0.0" 25 | service = ["test"] 26 | 27 | app = "aiohttp" 28 | arguments = ["app=asyncio_tests:init"] 29 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import codecs 3 | import re 4 | import sys 5 | import os.path 6 | import subprocess 7 | from setuptools import setup 8 | from setuptools_rust import RustExtension, Binding, Strip, build_ext 9 | from setuptools.command.test import test as TestCommand 10 | from distutils.errors import ( 11 | DistutilsPlatformError, DistutilsExecError, CCompilerError) 12 | 13 | 14 | class BuildFailed(Exception): 15 | pass 16 | 17 | 18 | class ve_build_ext(build_ext): 19 | # This class allows C extension building to fail. 20 | 21 | def run(self): 22 | build_ext.run(self) 23 | 24 | def build_extension(self, ext): 25 | try: 26 | build_ext.build_extension(self, ext) 27 | except (CCompilerError, DistutilsExecError, 28 | DistutilsPlatformError, ValueError): 29 | raise BuildFailed() 30 | 31 | 32 | with codecs.open(os.path.join(os.path.abspath(os.path.dirname( 33 | __file__)), 'fectl', '__init__.py'), 'r', 'latin1') as fp: 34 | try: 35 | version = re.findall(r"^__version__ = '([^']+)'\r?$", 36 | fp.read(), re.M)[0] 37 | except IndexError: 38 | raise RuntimeError('Unable to determine version.') 39 | 40 | 41 | class PyTest(TestCommand): 42 | user_options = [] 43 | 44 | def run(self): 45 | import subprocess 46 | import sys 47 | errno = subprocess.call([sys.executable, '-m', 'pytest', 'tests']) 48 | raise SystemExit(errno) 49 | 50 | 51 | install_requires = ['six'] 52 | tests_require = install_requires + ['pytest', 'pytest-timeout'] 53 | 54 | if sys.version_info < (3, 4): 55 | install_requires.append('enum') 56 | 57 | 58 | def read(f): 59 | return open(os.path.join(os.path.dirname(__file__), f)).read().strip() 60 | 61 | 62 | setup_args = dict( 63 | name='fectl', 64 | version=version, 65 | description='Simple process manager', 66 | long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))), 67 | classifiers=[ 68 | 'License :: OSI Approved :: Apache Software License', 69 | 'Intended Audience :: Developers', 70 | 'Programming Language :: Python', 71 | 'Programming Language :: Python :: 2.7', 72 | 'Programming Language :: Python :: 3', 73 | 'Programming Language :: Python :: 3.4', 74 | 'Programming Language :: Python :: 3.5', 75 | 'Programming Language :: Python :: 3.6', 76 | 'Development Status :: 5 - Production/Stable', 77 | 'Operating System :: POSIX', 78 | 'Operating System :: MacOS :: MacOS X', 79 | ], 80 | author='Nikolay Kim', 81 | author_email='fafhrd91@gmail.com', 82 | url='https://github.com/fafhrd91/fectl/', 83 | license='Apache 2', 84 | packages=['fectl'], 85 | rust_extensions=[ 86 | RustExtension({'fectl': 'fectl.fectl', 87 | 'fectld': 'fectl.fectld'}, 'Cargo.toml', 88 | binding=Binding.Exec, script=True, strip=Strip.All)], 89 | install_requires=install_requires, 90 | tests_require=tests_require, 91 | include_package_data=True, 92 | zip_safe=False, 93 | cmdclass=dict(build_ext=ve_build_ext, test=PyTest), 94 | ) 95 | 96 | try: 97 | setup(**setup_args) 98 | except BuildFailed: 99 | raise 100 | print("************************************************************") 101 | print("Cannot compile C accelerator module, use pure python version") 102 | print("************************************************************") 103 | setup(**setup_args) 104 | -------------------------------------------------------------------------------- /src/addrinfo.rs: -------------------------------------------------------------------------------- 1 | // this is copy from https://github.com/keeperofdakeys/dns-lookup 2 | #![allow(dead_code)] 3 | 4 | use libc; 5 | use std::mem; 6 | use std::ffi::{CStr, CString, NulError}; 7 | use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr}; 8 | use std::ptr; 9 | use std::io; 10 | use std::fmt; 11 | use std::error::Error; 12 | use std::os::raw::c_int; 13 | 14 | pub const AI_PASSIVE: c_int = 0x0001; 15 | pub const AI_CANONNAME: c_int = 0x0002; 16 | pub const AI_NUMERICHOST: c_int = 0x0004; 17 | pub const AI_NUMERICSERV: c_int = 0x0400; 18 | 19 | 20 | #[derive(Copy, Clone, Debug)] 21 | /// Address family 22 | pub enum Family { 23 | /// Unspecified 24 | Unspec, 25 | /// Ipv4 26 | Inet, 27 | /// Ipv6 28 | Inet6, 29 | /// Unix domain soxket 30 | Unix, 31 | /// Some other 32 | Other(c_int), 33 | } 34 | 35 | 36 | impl Family { 37 | pub fn from_int(int: c_int) -> Self { 38 | match int { 39 | 0 => Family::Unspec, 40 | libc::AF_INET => Family::Inet, 41 | libc::AF_INET6 => Family::Inet6, 42 | libc::AF_UNIX => Family::Unix, 43 | v => Family::Other(v), 44 | } 45 | } 46 | 47 | pub fn to_int(&self) -> c_int { 48 | match *self { 49 | Family::Unspec => 0, 50 | Family::Inet => libc::AF_INET, 51 | Family::Inet6 => libc::AF_INET6, 52 | Family::Unix => libc::AF_UNIX, 53 | Family::Other(v) => v, 54 | } 55 | } 56 | } 57 | 58 | 59 | #[derive(Copy, Clone, Debug)] 60 | /// Types of Sockets 61 | pub enum SocketType { 62 | /// Sequenced, reliable, connection-based byte streams. 63 | Stream, 64 | /// Connectionless, unreliable datagrams of fixed max length. 65 | DGram, 66 | /// Raw protocol interface. 67 | Raw, 68 | /// Some other 69 | Other(c_int), 70 | } 71 | 72 | 73 | impl SocketType { 74 | pub fn from_int(int: c_int) -> Self { 75 | match int { 76 | libc::SOCK_STREAM => SocketType::Stream, 77 | libc::SOCK_DGRAM => SocketType::DGram, 78 | libc::SOCK_RAW => SocketType::Raw, 79 | v => SocketType::Other(v), 80 | } 81 | } 82 | 83 | pub fn to_int(&self) -> c_int { 84 | match *self { 85 | SocketType::Stream => libc::SOCK_STREAM, 86 | SocketType::DGram => libc::SOCK_DGRAM, 87 | SocketType::Raw => libc::SOCK_RAW, 88 | SocketType::Other(v) => v, 89 | } 90 | } 91 | } 92 | 93 | 94 | #[derive(Copy, Clone, Debug)] 95 | /// Socket Protocol 96 | pub enum Protocol { 97 | /// Unspecificed. 98 | Unspec, 99 | /// Local to host (pipes and file-domain). 100 | Local, 101 | /// POSIX name for PF_LOCAL. 102 | Unix, 103 | /// IP Protocol Family. 104 | Inet, 105 | TCP, 106 | UDP, 107 | Other(c_int), 108 | } 109 | 110 | 111 | impl Protocol { 112 | pub fn from_int(int: c_int) -> Self { 113 | match int { 114 | 0 => Protocol::Unspec, 115 | 1 => Protocol::Local, 116 | 2 => Protocol::Inet, 117 | 6 => Protocol::TCP, 118 | 17 => Protocol::UDP, 119 | v => Protocol::Other(v), 120 | } 121 | } 122 | 123 | pub fn to_int(&self) -> c_int { 124 | match *self { 125 | Protocol::Unspec => 0, 126 | Protocol::Local => libc::PF_LOCAL, 127 | Protocol::Unix => libc::PF_UNIX, 128 | Protocol::Inet => libc::PF_INET, 129 | Protocol::TCP => 6, 130 | Protocol::UDP => 17, 131 | Protocol::Other(v) => v, 132 | } 133 | } 134 | } 135 | 136 | 137 | #[derive(Clone, Debug)] 138 | pub struct AddrInfo { 139 | pub flags: c_int, 140 | pub family: Family, 141 | pub socktype: SocketType, 142 | pub protocol: Protocol, 143 | pub sockaddr: SocketAddr, 144 | pub canonname: Option, 145 | } 146 | 147 | impl AddrInfo { 148 | pub fn new(flags: c_int, family: Family, 149 | socktype: SocketType, protocol: Protocol, 150 | addr: SocketAddr, canonname: Option) -> AddrInfo { 151 | AddrInfo { 152 | flags, 153 | family, 154 | socktype, 155 | protocol, 156 | sockaddr: addr, 157 | canonname } 158 | } 159 | 160 | unsafe fn from_ptr<'a>(a: *mut libc::addrinfo) -> Result { 161 | let addrinfo = *a; 162 | 163 | Ok(AddrInfo { 164 | flags: 0, 165 | family: Family::from_int(addrinfo.ai_family), 166 | socktype: SocketType::from_int(addrinfo.ai_socktype), 167 | protocol: Protocol::from_int(addrinfo.ai_protocol), 168 | sockaddr: 169 | sockaddr_to_addr( 170 | mem::transmute(addrinfo.ai_addr), addrinfo.ai_addrlen as usize)?, 171 | canonname: if addrinfo.ai_canonname.is_null() { None } else { 172 | Some(CStr::from_ptr( 173 | addrinfo.ai_canonname).to_str().unwrap_or("unset").to_owned()) }, 174 | }) 175 | } 176 | } 177 | 178 | 179 | fn sockaddr_to_addr(storage: &libc::sockaddr_storage, len: usize) -> io::Result { 180 | match storage.ss_family as c_int { 181 | libc::AF_INET => { 182 | assert!(len as usize >= mem::size_of::()); 183 | Ok( 184 | unsafe { 185 | let sock = *(storage as *const _ as *const libc::sockaddr_in); 186 | let ip = &*(&sock.sin_addr as *const libc::in_addr as *const Ipv4Addr); 187 | SocketAddr::V4(SocketAddrV4::new(ip.clone(), u16::from_be(sock.sin_port))) 188 | } 189 | ) 190 | } 191 | libc::AF_INET6 => { 192 | assert!(len as usize >= mem::size_of::()); 193 | Ok( 194 | unsafe { 195 | let sock = *(storage as *const _ as *const libc::sockaddr_in6); 196 | let ip = &*(&sock.sin6_addr as *const libc::in6_addr as *const Ipv6Addr); 197 | SocketAddr::V6(SocketAddrV6::new( 198 | ip.clone(), u16::from_be(sock.sin6_port), 199 | u32::from_be(sock.sin6_flowinfo), 0)) 200 | } 201 | ) 202 | } 203 | _ => { 204 | Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid argument")) 205 | } 206 | } 207 | } 208 | 209 | 210 | pub struct LookupParams { 211 | host: Option, 212 | port: Option, 213 | family: c_int, 214 | flags: c_int, 215 | socktype: SocketType, 216 | } 217 | 218 | impl LookupParams { 219 | pub fn new(host: Option, port: Option, 220 | family: c_int, flags: c_int, socktype: SocketType) -> LookupParams { 221 | LookupParams { 222 | host, 223 | port, 224 | family, 225 | flags, 226 | socktype, 227 | } 228 | } 229 | } 230 | 231 | 232 | pub struct LookupAddrInfo { 233 | orig: *mut libc::addrinfo, 234 | cur: *mut libc::addrinfo, 235 | } 236 | 237 | 238 | /// Lookup a addr info via dns, return an iterator of addr infos. 239 | pub fn lookup_addrinfo( 240 | host: Option, port: Option, 241 | family: c_int, flags: c_int, socktype: SocketType) -> Result { 242 | let mut res = ptr::null_mut(); 243 | let hints = libc::addrinfo { 244 | ai_flags: flags, 245 | ai_family: family, 246 | ai_socktype: socktype.to_int(), 247 | ai_protocol: 0, 248 | ai_addrlen: 0, 249 | ai_canonname: ptr::null_mut(), 250 | ai_addr: ptr::null_mut(), 251 | ai_next: ptr::null_mut(), 252 | }; 253 | 254 | let tmp_h; 255 | let c_host = if let Some(host) = host { 256 | tmp_h = CString::new(host)?; 257 | tmp_h.as_ptr() 258 | } else { 259 | ptr::null() 260 | }; 261 | 262 | let tmp_p; 263 | let c_srv = if let Some(port) = port { 264 | tmp_p = CString::new(port)?; 265 | tmp_p.as_ptr() 266 | } else { 267 | ptr::null() 268 | }; 269 | 270 | unsafe { 271 | let lres = libc::getaddrinfo(c_host, c_srv, &hints, &mut res); 272 | match lres { 273 | 0 => Ok(LookupAddrInfo { orig: res, cur: res }), 274 | _ => Err(LookupError::Generic), 275 | } 276 | } 277 | } 278 | 279 | impl Iterator for LookupAddrInfo { 280 | type Item = AddrInfo; 281 | 282 | fn next(&mut self) -> Option { 283 | unsafe { 284 | loop { 285 | if self.cur.is_null() { 286 | return None 287 | } else { 288 | let ret = AddrInfo::from_ptr(self.cur); 289 | self.cur = (*self.cur).ai_next as *mut libc::addrinfo; 290 | if let Ok(ret) = ret { 291 | return Some(ret) 292 | } 293 | } 294 | } 295 | } 296 | } 297 | } 298 | 299 | unsafe impl Sync for LookupAddrInfo {} 300 | unsafe impl Send for LookupAddrInfo {} 301 | 302 | impl Drop for LookupAddrInfo { 303 | fn drop(&mut self) { 304 | unsafe { libc::freeaddrinfo(self.orig) } 305 | } 306 | } 307 | 308 | 309 | /// Errors that can occur looking up a hostname. 310 | pub enum LookupError { 311 | /// A generic IO error 312 | IOError(io::Error), 313 | /// A Null Error 314 | NulError(NulError), 315 | /// Other error 316 | Other(String), 317 | /// An unspecific error 318 | Generic 319 | } 320 | 321 | 322 | impl From for LookupError { 323 | fn from(err: io::Error) -> Self { 324 | LookupError::IOError(err) 325 | } 326 | } 327 | 328 | impl From for io::Error { 329 | fn from(err: LookupError) -> Self { 330 | match err { 331 | LookupError::IOError(err) => err, 332 | LookupError::Other(err_str) => io::Error::new(io::ErrorKind::Other, err_str), 333 | LookupError::NulError(_) => io::Error::new(io::ErrorKind::Other, "nil pointer"), 334 | LookupError::Generic => io::Error::new(io::ErrorKind::Other, "generic error"), 335 | } 336 | } 337 | } 338 | 339 | impl From for LookupError { 340 | fn from(err: NulError) -> Self { 341 | LookupError::NulError(err) 342 | } 343 | } 344 | 345 | impl<'a> From<&'a str> for LookupError { 346 | fn from(err: &'a str) -> Self { 347 | LookupError::Other(err.to_owned()) 348 | } 349 | } 350 | 351 | impl Error for LookupError { 352 | fn description(&self) -> &str { 353 | match *self { 354 | LookupError::IOError(_) => "IO Error", 355 | LookupError::Other(ref err_str) => &err_str, 356 | LookupError::NulError(_) => "nil pointer", 357 | LookupError::Generic => "generic error", 358 | } 359 | } 360 | 361 | fn cause(&self) -> Option<&Error> { 362 | match *self { 363 | LookupError::IOError(ref err) => Some(err), 364 | _ => None 365 | } 366 | } 367 | } 368 | 369 | impl fmt::Display for LookupError { 370 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 371 | write!(f, "{}", self.description()) 372 | } 373 | } 374 | 375 | impl fmt::Debug for LookupError { 376 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 377 | write!(f, "{}", self.description()) 378 | } 379 | } 380 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::io::{self, Read, Write}; 3 | use std::time::Duration; 4 | use std::os::unix::net::UnixStream; 5 | 6 | use serde_json as json; 7 | use byteorder::{BigEndian, ByteOrder}; 8 | use bytes::{BufMut, BytesMut}; 9 | use tokio_io::codec::{Encoder, Decoder}; 10 | 11 | use config::MasterConfig; 12 | use master_types::{MasterRequest, MasterResponse}; 13 | 14 | /// Master alive status 15 | pub enum AliveStatus { 16 | /// Master process is alive 17 | Alive, 18 | /// Master process is not alive 19 | NotAlive, 20 | /// Unix socket is connected but master process does not respond 21 | NotResponding, 22 | } 23 | 24 | 25 | /// Send command to master 26 | pub fn send_command(stream: &mut UnixStream, req: MasterRequest) -> Result<(), io::Error> { 27 | let mut buf = BytesMut::new(); 28 | ClientTransportCodec.encode(req, &mut buf)?; 29 | 30 | stream.write_all(buf.as_ref()) 31 | } 32 | 33 | /// read master response 34 | pub fn read_response(stream: &mut UnixStream, buf: &mut BytesMut) 35 | -> Result 36 | { 37 | loop { 38 | buf.reserve(1024); 39 | 40 | unsafe { 41 | match stream.read(buf.bytes_mut()) { 42 | Ok(n) => { 43 | buf.advance_mut(n); 44 | 45 | if let Some(resp) = ClientTransportCodec.decode(buf)? { 46 | return Ok(resp) 47 | } else { 48 | if n == 0 { 49 | return Err(io::Error::new(io::ErrorKind::Other, "closed")) 50 | } 51 | } 52 | }, 53 | Err(e) => return Err(e), 54 | } 55 | } 56 | } 57 | } 58 | 59 | fn try_read_response(stream: &mut UnixStream, buf: &mut BytesMut) 60 | -> Result 61 | { 62 | let mut retry = 5; 63 | loop { 64 | match read_response(stream, buf) { 65 | Ok(resp) => { 66 | debug!("Master response: {:?}", resp); 67 | return Ok(resp); 68 | } 69 | Err(err) => match err.kind() { 70 | io::ErrorKind::TimedOut => 71 | if retry > 0 { 72 | retry -= 1; 73 | continue 74 | } 75 | io::ErrorKind::WouldBlock => { 76 | thread::sleep(Duration::from_millis(100)); 77 | continue 78 | } 79 | _ => return Err(err) 80 | } 81 | } 82 | } 83 | } 84 | 85 | /// Check if master process is alive. Try to connect over unix socket 86 | /// and send `Ping` command 87 | pub fn is_alive(cfg: &MasterConfig) -> AliveStatus { 88 | match UnixStream::connect(&cfg.sock) { 89 | Ok(mut conn) => { 90 | conn.set_read_timeout(Some(Duration::new(1, 0))).expect("Couldn't set read timeout"); 91 | let _ = send_command(&mut conn, MasterRequest::Ping); 92 | 93 | if try_read_response(&mut conn, &mut BytesMut::new()).is_ok() { 94 | AliveStatus::Alive 95 | } else { 96 | AliveStatus::NotResponding 97 | } 98 | } 99 | Err(_) => { 100 | AliveStatus::NotAlive 101 | } 102 | } 103 | } 104 | 105 | pub struct ClientTransportCodec; 106 | 107 | impl Encoder for ClientTransportCodec 108 | { 109 | type Item = MasterRequest; 110 | type Error = io::Error; 111 | 112 | fn encode(&mut self, msg: MasterRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { 113 | let msg = json::to_string(&msg).unwrap(); 114 | let msg_ref: &[u8] = msg.as_ref(); 115 | 116 | dst.reserve(msg_ref.len() + 2); 117 | dst.put_u16::(msg_ref.len() as u16); 118 | dst.put(msg_ref); 119 | 120 | Ok(()) 121 | } 122 | } 123 | 124 | impl Decoder for ClientTransportCodec 125 | { 126 | type Item = MasterResponse; 127 | type Error = io::Error; 128 | 129 | fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { 130 | let size = { 131 | if src.len() < 2 { 132 | return Ok(None) 133 | } 134 | BigEndian::read_u16(src.as_ref()) as usize 135 | }; 136 | 137 | if src.len() >= size + 2 { 138 | src.split_to(2); 139 | let buf = src.split_to(size); 140 | Ok(Some(json::from_slice::(&buf)?)) 141 | } else { 142 | Ok(None) 143 | } 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/cmd.rs: -------------------------------------------------------------------------------- 1 | use std::rc::Rc; 2 | use std::collections::HashMap; 3 | 4 | use nix::unistd::getpid; 5 | use nix::sys::wait::{waitpid, WaitStatus, WNOHANG}; 6 | 7 | use actix::Response; 8 | use actix::prelude::*; 9 | use actix::actors::signal; 10 | use futures::Future; 11 | 12 | use config::Config; 13 | use event::{Reason, ServiceStatus}; 14 | use process::ProcessError; 15 | use service::{self, FeService, StartStatus, ReloadStatus, ServiceOperationError}; 16 | 17 | #[derive(Debug)] 18 | /// Command center errors 19 | pub enum CommandError { 20 | /// command center is not in Running state 21 | NotReady, 22 | /// service is not known 23 | UnknownService, 24 | /// service is stopped 25 | ServiceStopped, 26 | /// underlying service error 27 | Service(ServiceOperationError), 28 | } 29 | 30 | 31 | #[derive(PartialEq, Debug)] 32 | enum State { 33 | Starting, 34 | Running, 35 | Stopping, 36 | } 37 | 38 | pub struct CommandCenter { 39 | cfg: Rc, 40 | state: State, 41 | system: Addr, 42 | services: HashMap>, 43 | stop_waiter: Option>, 44 | stopping: usize, 45 | } 46 | 47 | impl CommandCenter { 48 | 49 | pub fn start(cfg: Rc) -> Addr { 50 | CommandCenter { 51 | cfg, 52 | state: State::Starting, 53 | system: Arbiter::system(), 54 | services: HashMap::new(), 55 | stop_waiter: None, 56 | stopping: 0, 57 | }.start() 58 | } 59 | 60 | fn exit(&mut self, success: bool) { 61 | if let Some(waiter) = self.stop_waiter.take() { 62 | waiter.set(true); 63 | } 64 | 65 | if success { 66 | self.system.do_send(actix::msgs::SystemExit(0)); 67 | } else { 68 | self.system.do_send(actix::msgs::SystemExit(1)); 69 | } 70 | } 71 | 72 | fn stop(&mut self, ctx: &mut Context, graceful: bool) 73 | { 74 | if self.state != State::Stopping { 75 | info!("Stopping service"); 76 | 77 | self.state = State::Stopping; 78 | for service in self.services.values() { 79 | self.stopping += 1; 80 | service.send(service::Stop(graceful, Reason::Exit)) 81 | .into_actor(self) 82 | .then(|res, srv, _| { 83 | srv.stopping -= 1; 84 | let exit = srv.stopping == 0; 85 | if exit { 86 | srv.exit(true); 87 | } 88 | match res { 89 | Ok(_) => actix::fut::ok(()), 90 | Err(_) => actix::fut::err(()), 91 | } 92 | }).spawn(ctx); 93 | }; 94 | } 95 | } 96 | } 97 | 98 | 99 | pub struct ServicePids(pub String); 100 | 101 | impl Message for ServicePids { 102 | type Result = Result, CommandError>; 103 | } 104 | 105 | impl Handler for CommandCenter { 106 | type Result = Response, CommandError>; 107 | 108 | fn handle(&mut self, msg: ServicePids, _: &mut Context) -> Self::Result { 109 | match self.state { 110 | State::Running => { 111 | match self.services.get(&msg.0) { 112 | Some(service) => 113 | Response::async( 114 | service.send(service::Pids).map_err(|_| CommandError::UnknownService) 115 | ), 116 | None => Response::reply(Err(CommandError::UnknownService)) 117 | } 118 | } 119 | _ => Response::reply(Err(CommandError::NotReady)) 120 | } 121 | } 122 | } 123 | 124 | #[derive(Message)] 125 | #[rtype(result="Result")] 126 | pub struct Stop; 127 | 128 | impl Handler for CommandCenter { 129 | type Result = Response; 130 | 131 | fn handle(&mut self, _: Stop, ctx: &mut Context) -> Self::Result { 132 | self.stop(ctx, true); 133 | 134 | if self.stop_waiter.is_none() { 135 | self.stop_waiter = Some(actix::Condition::default()); 136 | } 137 | 138 | if let Some(ref mut waiter) = self.stop_waiter { 139 | Response::async(waiter.wait().map_err(|_| ())) 140 | } else { 141 | unreachable!(); 142 | } 143 | } 144 | } 145 | 146 | 147 | /// Start Service by `name` 148 | pub struct StartService(pub String); 149 | 150 | impl Message for StartService { 151 | type Result = Result; 152 | } 153 | 154 | impl Handler for CommandCenter { 155 | type Result = Response; 156 | 157 | fn handle(&mut self, msg: StartService, _: &mut Context) -> Self::Result { 158 | match self.state { 159 | State::Running => { 160 | info!("Starting service {:?}", msg.0); 161 | match self.services.get(&msg.0) { 162 | Some(service) => 163 | Response::async( 164 | service.send(service::Start).then(|res| match res { 165 | Ok(Ok(status)) => Ok(status), 166 | Ok(Err(err)) => Err(CommandError::Service(err)), 167 | Err(_) => Err(CommandError::NotReady) 168 | })), 169 | None => 170 | Response::reply(Err(CommandError::UnknownService)) 171 | } 172 | } 173 | _ => { 174 | warn!("Can not reload in system in `{:?}` state", self.state); 175 | Response::reply(Err(CommandError::NotReady)) 176 | } 177 | } 178 | } 179 | } 180 | 181 | /// Stop Service by `name` 182 | pub struct StopService(pub String, pub bool); 183 | 184 | impl Message for StopService { 185 | type Result = Result<(), CommandError>; 186 | } 187 | 188 | impl Handler for CommandCenter { 189 | type Result = Response<(), CommandError>; 190 | 191 | fn handle(&mut self, msg: StopService, _: &mut Context) -> Self::Result { 192 | match self.state { 193 | State::Running => { 194 | info!("Stopping service {:?}", msg.0); 195 | match self.services.get(&msg.0) { 196 | Some(service) => 197 | Response::async( 198 | service.send(service::Stop(msg.1, Reason::ConsoleRequest)) 199 | .then(|res| match res { 200 | Ok(Ok(_)) => Ok(()), 201 | _ => Err(CommandError::ServiceStopped), 202 | })), 203 | None => 204 | Response::reply(Err(CommandError::UnknownService)) 205 | } 206 | } 207 | _ => { 208 | warn!("Can not reload in system in `{:?}` state", self.state); 209 | Response::reply(Err(CommandError::NotReady)) 210 | } 211 | } 212 | } 213 | } 214 | 215 | /// Service status message 216 | pub struct StatusService(pub String); 217 | 218 | impl Message for StatusService { 219 | type Result = Result; 220 | } 221 | 222 | impl Handler for CommandCenter { 223 | type Result = Response; 224 | 225 | fn handle(&mut self, msg: StatusService, _: &mut Context) -> Self::Result { 226 | match self.state { 227 | State::Running => { 228 | match self.services.get(&msg.0) { 229 | Some(service) => 230 | Response::async( 231 | service.send(service::Status).then(|res| match res { 232 | Ok(Ok(status)) => Ok(status), 233 | _ => Err(CommandError::UnknownService) 234 | })), 235 | None => 236 | Response::reply(Err(CommandError::UnknownService)), 237 | } 238 | } 239 | _ => Response::reply(Err(CommandError::NotReady)) 240 | } 241 | } 242 | } 243 | 244 | /// Pause service message 245 | pub struct PauseService(pub String); 246 | 247 | impl Message for PauseService { 248 | type Result = Result<(), CommandError>; 249 | } 250 | 251 | impl Handler for CommandCenter { 252 | type Result = Response<(), CommandError>; 253 | 254 | fn handle(&mut self, msg: PauseService, _: &mut Context) -> Self::Result { 255 | match self.state { 256 | State::Running => { 257 | info!("Pause service {:?}", msg.0); 258 | match self.services.get(&msg.0) { 259 | Some(service) => 260 | Response::async( 261 | service.send(service::Pause).then(|res| match res { 262 | Ok(Ok(_)) => Ok(()), 263 | Ok(Err(err)) => Err(CommandError::Service(err)), 264 | Err(_) => Err(CommandError::UnknownService) 265 | })), 266 | None => Response::reply(Err(CommandError::UnknownService)) 267 | } 268 | } 269 | _ => { 270 | warn!("Can not reload in system in `{:?}` state", self.state); 271 | Response::reply(Err(CommandError::NotReady)) 272 | } 273 | } 274 | } 275 | } 276 | 277 | /// Resume service message 278 | pub struct ResumeService(pub String); 279 | 280 | impl Message for ResumeService { 281 | type Result = Result<(), CommandError>; 282 | } 283 | 284 | impl Handler for CommandCenter { 285 | type Result = Response<(), CommandError>; 286 | 287 | fn handle(&mut self, msg: ResumeService, _: &mut Context) -> Self::Result { 288 | match self.state { 289 | State::Running => { 290 | info!("Resume service {:?}", msg.0); 291 | match self.services.get(&msg.0) { 292 | Some(service) => 293 | Response::async( 294 | service.send(service::Resume).then(|res| match res { 295 | Ok(Ok(_)) => Ok(()), 296 | Ok(Err(err)) => Err(CommandError::Service(err)), 297 | Err(_) => Err(CommandError::UnknownService) 298 | })), 299 | None => 300 | Response::reply(Err(CommandError::UnknownService)) 301 | } 302 | } 303 | _ => { 304 | warn!("Can not reload in system in `{:?}` state", self.state); 305 | Response::reply(Err(CommandError::NotReady)) 306 | } 307 | } 308 | } 309 | } 310 | 311 | /// Reload service 312 | pub struct ReloadService(pub String, pub bool); 313 | 314 | impl Message for ReloadService { 315 | type Result = Result; 316 | } 317 | 318 | impl Handler for CommandCenter { 319 | type Result = Response; 320 | 321 | fn handle(&mut self, msg: ReloadService, _: &mut Context) -> Self::Result { 322 | match self.state { 323 | State::Running => { 324 | info!("Reloading service {:?}", msg.0); 325 | let graceful = msg.1; 326 | match self.services.get(&msg.0) { 327 | Some(service) => 328 | Response::async( 329 | service.send(service::Reload(graceful)).then(|res| match res { 330 | Ok(Ok(status)) => Ok(status), 331 | Ok(Err(err)) => Err(CommandError::Service(err)), 332 | Err(_) => Err(CommandError::UnknownService) 333 | })), 334 | None => 335 | Response::reply(Err(CommandError::UnknownService)) 336 | } 337 | } 338 | _ => { 339 | warn!("Can not reload in system in `{:?}` state", self.state); 340 | Response::reply(Err(CommandError::NotReady)) 341 | } 342 | } 343 | } 344 | } 345 | 346 | /// reload all services 347 | pub struct ReloadAll; 348 | 349 | impl Message for ReloadAll { 350 | type Result = (); 351 | } 352 | 353 | impl Handler for CommandCenter { 354 | type Result = (); 355 | 356 | fn handle(&mut self, _: ReloadAll, _: &mut Context) -> Self::Result { 357 | match self.state { 358 | State::Running => { 359 | info!("reloading all services"); 360 | for srv in self.services.values() { 361 | srv.do_send(service::Reload(true)); 362 | } 363 | } 364 | _ => warn!("Can not reload in system in `{:?}` state", self.state) 365 | } 366 | } 367 | } 368 | 369 | /// Handle ProcessEvent (SIGHUP, SIGINT, etc) 370 | impl Handler for CommandCenter { 371 | type Result = (); 372 | 373 | fn handle(&mut self, msg: signal::Signal, ctx: &mut Context) { 374 | match msg.0 { 375 | signal::SignalType::Int => { 376 | info!("SIGINT received, exiting"); 377 | self.stop(ctx, false); 378 | } 379 | signal::SignalType::Hup => { 380 | info!("SIGHUP received, reloading"); 381 | // self.handle(ReloadAll, ctx); 382 | } 383 | signal::SignalType::Term => { 384 | info!("SIGTERM received, stopping"); 385 | self.stop(ctx, true); 386 | } 387 | signal::SignalType::Quit => { 388 | info!("SIGQUIT received, exiting"); 389 | self.stop(ctx, false); 390 | } 391 | signal::SignalType::Child => { 392 | info!("SIGCHLD received"); 393 | debug!("Reap workers"); 394 | loop { 395 | match waitpid(None, Some(WNOHANG)) { 396 | Ok(WaitStatus::Exited(pid, code)) => { 397 | info!("Worker {} exit code: {}", pid, code); 398 | let err = ProcessError::from(code); 399 | for srv in self.services.values_mut() { 400 | srv.do_send( 401 | service::ProcessExited(pid, err.clone()) 402 | ); 403 | } 404 | continue 405 | } 406 | Ok(WaitStatus::Signaled(pid, sig, _)) => { 407 | info!("Worker {} exit by signal {:?}", pid, sig); 408 | let err = ProcessError::Signal(sig as usize); 409 | for srv in self.services.values_mut() { 410 | srv.do_send( 411 | service::ProcessExited(pid, err.clone()) 412 | ); 413 | } 414 | continue 415 | }, 416 | Ok(_) => (), 417 | Err(_) => (), 418 | } 419 | break 420 | } 421 | } 422 | } 423 | } 424 | } 425 | 426 | 427 | impl Actor for CommandCenter { 428 | type Context = Context; 429 | 430 | fn started(&mut self, ctx: &mut Context) 431 | { 432 | info!("Starting ctl service: {}", getpid()); 433 | 434 | // listen for process signals 435 | let addr: Addr = ctx.address(); 436 | Arbiter::system_registry().get::() 437 | .do_send(signal::Subscribe(addr.recipient())); 438 | 439 | // start services 440 | for cfg in &self.cfg.services { 441 | let service = FeService::start(cfg.num, cfg.clone()); 442 | self.services.insert(cfg.name.clone(), service); 443 | } 444 | self.state = State::Running; 445 | } 446 | 447 | fn stopping(&mut self, _: &mut Context) -> Running { 448 | self.exit(true); 449 | Running::Stop 450 | } 451 | } 452 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use std; 2 | use std::path::Path; 3 | use std::error::Error; 4 | use std::io::prelude::*; 5 | use std::ffi::OsString; 6 | 7 | use nix; 8 | use nix::unistd::{Gid, Uid}; 9 | use toml; 10 | use structopt::StructOpt; 11 | 12 | use socket; 13 | use config_helpers; 14 | 15 | pub struct Config { 16 | pub master: MasterConfig, 17 | pub sockets: Vec, 18 | pub logging: LoggingConfig, 19 | pub services: Vec, 20 | } 21 | 22 | /// Master process configuration 23 | /// 24 | /// ```toml 25 | /// [master] 26 | /// daemon = true 27 | /// pid = "fectl.pid" 28 | /// sock = "fectl.sock" 29 | /// directory = "/path/to/dir" 30 | /// ``` 31 | #[derive(Debug)] 32 | pub struct MasterConfig { 33 | /// Start master process in daemon mode 34 | pub daemon: bool, 35 | /// Path to file with process pid 36 | pub pid: Option, 37 | /// Path to controller unix domain socket 38 | pub sock: OsString, 39 | /// Change to specified directory before apps loading. 40 | pub directory: OsString, 41 | 42 | /// Set group id 43 | pub gid: Option, 44 | /// Set uid id 45 | pub uid: Option, 46 | 47 | /// Redirect stdout 48 | pub stdout: Option, 49 | /// Redirect stderr 50 | pub stderr: Option, 51 | } 52 | 53 | impl MasterConfig 54 | { 55 | /// remove pid and sock files 56 | pub fn remove_files(&self) { 57 | if let Some(ref pid) = self.pid { 58 | let _ = std::fs::remove_file(pid); 59 | } 60 | let _ = std::fs::remove_file(&self.sock); 61 | } 62 | 63 | /// load pid of the master process 64 | pub fn load_pid(&self) -> Option { 65 | if let Some(ref pid) = self.pid { 66 | if let Ok(mut file) = std::fs::File::open(pid) { 67 | let mut buf = Vec::new(); 68 | if file.read_to_end(&mut buf).is_ok() { 69 | let spid = String::from_utf8_lossy(buf.as_ref()); 70 | if let Ok(pid) = spid.parse::() { 71 | return Some(nix::unistd::Pid::from_raw(pid)) 72 | } 73 | } 74 | } 75 | } 76 | None 77 | } 78 | 79 | /// save pid to filesystem 80 | pub fn save_pid(&self) -> Result<(), std::io::Error> { 81 | if let Some(ref pid) = self.pid { 82 | let mut file = std::fs::File::create(pid)?; 83 | file.write_all(nix::unistd::getpid().to_string().as_ref())?; 84 | } 85 | Ok(()) 86 | } 87 | } 88 | 89 | #[derive(Deserialize, Debug)] 90 | struct TomlConfig { 91 | master: Option, 92 | logging: Option, 93 | #[serde(default = "config_helpers::default_vec")] 94 | socket: Vec, 95 | #[serde(default = "config_helpers::default_vec")] 96 | service: Vec, 97 | } 98 | 99 | #[derive(Deserialize, Debug)] 100 | struct TomlMasterConfig { 101 | #[serde(default = "config_helpers::default_sock")] 102 | pub sock: String, 103 | pub pid: Option, 104 | pub directory: Option, 105 | 106 | #[serde(default)] 107 | #[serde(deserialize_with="config_helpers::deserialize_gid_field")] 108 | pub gid: Option, 109 | 110 | #[serde(default)] 111 | #[serde(deserialize_with="config_helpers::deserialize_uid_field")] 112 | pub uid: Option, 113 | 114 | pub stdout: Option, 115 | pub stderr: Option, 116 | } 117 | 118 | 119 | #[derive(Deserialize, Debug, PartialEq)] 120 | #[allow(non_camel_case_types)] 121 | pub enum Proto { 122 | tcp4, 123 | tcp6, 124 | unix, 125 | } 126 | 127 | /// Socket configuration 128 | /// 129 | /// ```toml 130 | /// [[socket]] 131 | /// name = "http" 132 | /// port = 8080 133 | /// ip = "0.0.0.0" 134 | /// service = ["test"] 135 | /// loader = "aiohttp" 136 | /// arguments = ["arg1", "arg2", "arg3"] 137 | /// ``` 138 | #[derive(Deserialize, Debug)] 139 | pub struct SocketConfig { 140 | pub name: String, 141 | pub port: u32, 142 | pub host: Option, 143 | #[serde(default = "config_helpers::default_backlog")] 144 | pub backlog: u16, 145 | #[serde(default = "config_helpers::default_proto")] 146 | pub proto: Proto, 147 | #[serde(default = "config_helpers::default_vec")] 148 | pub service: Vec, 149 | pub app: Option, 150 | #[serde(default = "config_helpers::default_vec")] 151 | pub arguments: Vec, 152 | } 153 | 154 | #[derive(Deserialize, Clone, Debug)] 155 | pub struct ServiceConfig { 156 | /// Service name 157 | pub name: String, 158 | 159 | /// Number of workers to start 160 | pub num: u16, 161 | 162 | /// Worker start command 163 | pub command: String, 164 | 165 | /// Number of restarts before marking worker as failed, default 3 166 | #[serde(default="config_helpers::default_restarts")] 167 | pub restarts: u16, 168 | 169 | /// Change to specified directory before service worker loading. 170 | pub directory: Option, 171 | 172 | /// Switch worker process to run as this group. 173 | /// 174 | /// A valid group id (as an integer) or the name of a user that can be 175 | /// retrieved with a call to ``libc::getgrnam(value)`` or ``None`` to not 176 | /// change the worker processes group. 177 | #[serde(default)] 178 | #[serde(deserialize_with="config_helpers::deserialize_gid_field")] 179 | pub gid: Option, 180 | 181 | /// Switch worker processes to run as this user. 182 | /// 183 | /// A valid user id (as an integer) or the name of a user that can be 184 | /// retrieved with a call to ``libc::getpwnam(value)`` or ``None`` to not 185 | /// change the worker process user. 186 | #[serde(default)] 187 | #[serde(deserialize_with="config_helpers::deserialize_uid_field")] 188 | pub uid: Option, 189 | 190 | /// Workers silent for more than this many seconds are killed and restarted. 191 | /// 192 | /// Generally set to ten seconds. Only set this noticeably higher if 193 | /// you're sure of the repercussions for sync workers. For the non sync 194 | /// workers it just means that the worker process is still communicating and 195 | /// is not tied to the length of time required to handle a single request. 196 | #[serde(default="config_helpers::default_timeout")] 197 | pub timeout: u32, 198 | 199 | /// Timeout for worker startup. 200 | /// 201 | /// After start, workers have this much time to report radyness state. 202 | /// Workers that do not report `loaded` state to master are force killed and 203 | /// get restarted. 204 | #[serde(default="config_helpers::default_startup_timeout")] 205 | pub startup_timeout: u32, 206 | 207 | /// Timeout for graceful workers shutdown. 208 | /// 209 | /// After receiving a restart or stop signal, workers have this much time to finish 210 | /// serving requests. Workers still alive after the timeout (starting from 211 | /// the receipt of the restart signal) are force killed. 212 | #[serde(default="config_helpers::default_shutdown_timeout")] 213 | pub shutdown_timeout: u32, 214 | 215 | /// A path to a file where `fectld` should redirect `stdout` for this service. 216 | /// 217 | /// By default redirect for stdout is not enabled 218 | pub stdout: Option, 219 | 220 | /// A path to a file where `fectld` should redirect `stderr` for this service. 221 | /// 222 | /// By default redirect for stderr is not enabled 223 | pub stderr: Option, 224 | 225 | } 226 | 227 | /// Loging configuration 228 | /// 229 | /// ```toml 230 | /// [logging] 231 | /// level = "info" 232 | /// facility = "user" 233 | /// ``` 234 | #[derive(Deserialize, Debug)] 235 | pub struct LoggingConfig { 236 | pub name: String, 237 | pub service: String, 238 | pub level: Option, 239 | pub facility: Option, 240 | } 241 | 242 | impl Default for LoggingConfig { 243 | fn default() -> Self { 244 | LoggingConfig { 245 | name: "default".to_owned(), 246 | service: "console".to_owned(), 247 | level: Some("info".to_owned()), 248 | facility: None, 249 | } 250 | } 251 | } 252 | 253 | 254 | /// Command line arguments 255 | #[derive(StructOpt, Debug)] 256 | struct Cli { 257 | /// Sets a custom config file for fectld 258 | #[structopt(long="config", short="c", default_value="fectld.toml")] 259 | config: String, 260 | 261 | /// Run in background 262 | #[structopt(long="daemon", short="d")] 263 | daemon: bool, 264 | } 265 | 266 | 267 | pub fn load_config() -> Option { 268 | let args = Cli::from_args(); 269 | 270 | let mut cfg_str = String::new(); 271 | if let Err(err) = std::fs::File::open(args.config) 272 | .and_then(|mut f| f.read_to_string(&mut cfg_str)) 273 | { 274 | println!("Can not read configuration file due to: {}", err.description()); 275 | return None 276 | } 277 | 278 | let cfg: TomlConfig = match toml::from_str(&cfg_str) { 279 | Ok(cfg) => cfg, 280 | Err(err) => { 281 | println!("Can not parse config file: {}", err); 282 | return None 283 | } 284 | }; 285 | 286 | // master config 287 | let toml_master = cfg.master.unwrap_or(TomlMasterConfig { 288 | sock: config_helpers::default_sock(), 289 | directory: None, 290 | pid: None, 291 | gid: None, 292 | uid: None, 293 | stdout: None, 294 | stderr: None, 295 | }); 296 | 297 | // check if working directory exists 298 | let directory = if let Some(ref dir) = toml_master.directory { 299 | match std::fs::canonicalize(dir) { 300 | Ok(path) => path.into_os_string(), 301 | Err(err) => { 302 | println!("Error accessing working directory: {}", err); 303 | return None 304 | } 305 | } 306 | } else { 307 | match std::env::current_dir() { 308 | Ok(d) => d.into_os_string(), 309 | Err(_) => return None, 310 | } 311 | }; 312 | 313 | // canonizalize pid file path 314 | let pid = if let Some(pid) = toml_master.pid { 315 | Some(Path::new(&directory).join(&pid).into_os_string()) 316 | } else { 317 | None 318 | }; 319 | 320 | let master = MasterConfig { 321 | // set default value from command line 322 | daemon: args.daemon, 323 | 324 | // canonizalize socket path 325 | sock: Path::new(&directory).join(&toml_master.sock).into_os_string(), 326 | 327 | pid, 328 | gid: toml_master.gid, 329 | uid: toml_master.uid, 330 | 331 | // check if working directory exists 332 | directory, 333 | 334 | // redirect stdout/stdout to specifi files 335 | stdout: toml_master.stdout, 336 | stderr: toml_master.stderr, 337 | }; 338 | 339 | // sockets config 340 | let sockets = match socket::Socket::load_config(&cfg.socket) { 341 | Ok(sockets) => sockets, 342 | Err(err) => { 343 | println!("{}", err); 344 | return None 345 | } 346 | }; 347 | 348 | Some(Config { 349 | master, 350 | sockets, 351 | services: cfg.service, 352 | logging: cfg.logging.unwrap_or(LoggingConfig::default()), 353 | }) 354 | } 355 | -------------------------------------------------------------------------------- /src/config_helpers.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CString; 2 | 3 | use libc; 4 | use nix::unistd::{Gid, Uid}; 5 | use serde; 6 | use serde_json as json; 7 | 8 | use config::Proto; 9 | 10 | 11 | pub fn default_vec() -> Vec { 12 | Vec::new() 13 | } 14 | 15 | pub fn default_sock() -> String { 16 | "fectld.sock".to_owned() 17 | } 18 | 19 | pub fn default_backlog() -> u16 { 20 | 256 21 | } 22 | 23 | pub fn default_proto() -> Proto { 24 | Proto::tcp4 25 | } 26 | 27 | pub fn default_restarts() -> u16 { 28 | 3 29 | } 30 | 31 | pub fn default_timeout() -> u32 { 32 | 10 33 | } 34 | 35 | pub fn default_startup_timeout() -> u32 { 36 | 30 37 | } 38 | 39 | pub fn default_shutdown_timeout() -> u32 { 40 | 30 41 | } 42 | 43 | /// Deserialize `gid` field into `Gid` 44 | pub(crate) fn deserialize_gid_field<'de, D>(de: D) -> Result, D::Error> 45 | where D: serde::Deserializer<'de> 46 | { 47 | let deser_result: json::Value = serde::Deserialize::deserialize(de)?; 48 | match deser_result { 49 | json::Value::String(ref s) => 50 | if let Ok(name) = CString::new(s.as_str()) { 51 | unsafe { 52 | let ptr = libc::getgrnam(name.as_ptr()); 53 | return if ptr.is_null() { 54 | Err(serde::de::Error::custom("Can not convert group name to group id")) 55 | } else { 56 | Ok(Some(Gid::from_raw((*ptr).gr_gid))) 57 | }; 58 | } 59 | } else { 60 | return Err(serde::de::Error::custom("Can not convert to plain string")) 61 | } 62 | json::Value::Number(num) => { 63 | if let Some(num) = num.as_u64() { 64 | if num <= u64::from(u32::max_value()) { 65 | return Ok(Some(Gid::from_raw(num as libc::gid_t))) 66 | } 67 | } 68 | } 69 | _ => (), 70 | } 71 | Err(serde::de::Error::custom("Unexpected value")) 72 | } 73 | 74 | /// Deserialize `uid` field into `Uid` 75 | pub fn deserialize_uid_field<'de, D>(de: D) -> Result, D::Error> 76 | where D: serde::Deserializer<'de> 77 | { 78 | let deser_result: json::Value = serde::Deserialize::deserialize(de)?; 79 | match deser_result { 80 | json::Value::String(ref s) => 81 | if let Ok(name) = CString::new(s.as_str()) { 82 | unsafe { 83 | let ptr = libc::getpwnam(name.as_ptr()); 84 | return if ptr.is_null() { 85 | Err(serde::de::Error::custom("Can not convert user name to user id")) 86 | } else { 87 | Ok(Some(Uid::from_raw((*ptr).pw_uid))) 88 | }; 89 | } 90 | } else { 91 | return Err(serde::de::Error::custom("Can not convert to plain string")) 92 | } 93 | json::Value::Number(num) => { 94 | if let Some(num) = num.as_u64() { 95 | if num <= u64::from(u32::max_value()) { 96 | return Ok(Some(Uid::from_raw(num as u32))); 97 | } 98 | } 99 | } 100 | _ => (), 101 | } 102 | Err(serde::de::Error::custom("Unexpected value")) 103 | } 104 | -------------------------------------------------------------------------------- /src/event.rs: -------------------------------------------------------------------------------- 1 | use std; 2 | use std::collections::VecDeque; 3 | use std::time::{UNIX_EPOCH, SystemTime}; 4 | 5 | pub type ServiceStatus = (String, Vec<(String, Vec)>); 6 | 7 | #[derive(Clone, Copy, Serialize, Deserialize, Debug)] 8 | pub enum State { 9 | Starting, 10 | Reloading, 11 | Restarting, 12 | Running, 13 | StoppingOld, 14 | Stopping, 15 | Failed, 16 | Stopped, 17 | Paused, 18 | RestartFailed, 19 | ReloadFailed, 20 | } 21 | 22 | #[derive(Clone, Serialize, Deserialize, Debug)] 23 | pub enum Reason { 24 | None, 25 | Initial, 26 | Exit, 27 | ConsoleRequest, 28 | WorkerRequest, 29 | SomeWorkersFailed, 30 | WorkerError(String), 31 | FailedToStart(Option), 32 | HeartbeatFailed, 33 | StartupTimeout, 34 | StopTimeout, 35 | InitFailed, 36 | BootFailed, 37 | Signal(usize), 38 | ExitCode(i8), 39 | NewProcessDied, 40 | RestartFailedStartingWorker, 41 | RestartFailedRunningWorker, 42 | RestoreAftreFailed, 43 | ReloadAftreTimeout, 44 | } 45 | 46 | #[derive(Clone, Serialize, Deserialize, Debug)] 47 | pub struct Event { 48 | pub state: State, 49 | pub reason: Reason, 50 | pub timestamp: u64, 51 | pub pid: Option, 52 | } 53 | 54 | impl Event { 55 | 56 | pub fn new(state: State, reason: Reason, pid: Option) -> Event { 57 | Event { 58 | state, 59 | reason, 60 | pid, 61 | timestamp: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() 62 | } 63 | } 64 | } 65 | 66 | 67 | pub struct Events { 68 | max: usize, 69 | events: VecDeque, 70 | } 71 | 72 | impl Events { 73 | /// Create new `Events` 74 | pub fn new(max: usize) -> Events { 75 | Events { max, events: VecDeque::new() } 76 | } 77 | 78 | /// Add new event 79 | pub fn add(&mut self, state: State, reason: Reason, pid: Option) { 80 | if self.events.len() >= self.max { 81 | self.events.pop_front(); 82 | } 83 | self.events.push_back(Event::new(state, reason, pid)); 84 | } 85 | } 86 | 87 | 88 | impl<'a> std::convert::From<&'a Events> for Vec 89 | { 90 | fn from(ob: &'a Events) -> Self { 91 | ob.events.iter().cloned().collect() 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/exec.rs: -------------------------------------------------------------------------------- 1 | // Execute worker process in child process 2 | use std; 3 | use std::ffi::CString; 4 | use std::io::{Read, Write}; 5 | use std::os::unix::io::{RawFd, AsRawFd, FromRawFd}; 6 | 7 | use libc; 8 | use bytes::{BytesMut, Buf, BufMut, IntoBuf}; 9 | use byteorder::BigEndian; 10 | use serde_json as json; 11 | use nix::unistd::{chdir, dup2, execve, setuid, setgid}; 12 | 13 | use utils; 14 | use worker::{WorkerCommand, WorkerMessage}; 15 | use config::ServiceConfig; 16 | use process::{WORKER_INIT_FAILED, WORKER_BOOT_FAILED}; 17 | 18 | 19 | fn send_msg(file: &mut std::fs::File, msg: WorkerMessage) { 20 | let msg = json::to_string(&msg).unwrap(); 21 | let msg_ref: &[u8] = msg.as_ref(); 22 | 23 | let mut buf = BytesMut::with_capacity(msg_ref.len() + 2); 24 | buf.put_u16::(msg_ref.len() as u16); 25 | buf.put(msg_ref); 26 | if let Err(err) = file.write_all(buf.as_ref()) { 27 | error!("Failed to notify master: {}", err); 28 | std::process::exit(WORKER_INIT_FAILED as i32); 29 | } 30 | } 31 | 32 | pub fn exec_worker(idx: usize, cfg: &ServiceConfig, read: RawFd, write: RawFd) { 33 | // notify master 34 | let mut file = unsafe{ std::fs::File::from_raw_fd(write) }; 35 | send_msg(&mut file, WorkerMessage::forked); 36 | 37 | // read master response 38 | let mut buffer = [0; 2]; 39 | let mut file = unsafe{ std::fs::File::from_raw_fd(read) }; 40 | if let Err(err) = file.read_exact(&mut buffer) { 41 | error!("Failed to read master response: {}", err); 42 | std::process::exit(WORKER_INIT_FAILED as i32); 43 | } 44 | let size = buffer.into_buf().get_u16::(); 45 | let mut buffer = Vec::with_capacity(size as usize); 46 | unsafe {buffer.set_len(size as usize)}; 47 | if let Err(err) = file.read_exact(&mut buffer) { 48 | error!("Failed to read master response: {}", err); 49 | std::process::exit(WORKER_INIT_FAILED as i32); 50 | } 51 | match json::from_slice::(&buffer) { 52 | Ok(WorkerCommand::prepare) => (), 53 | Ok(_) | Err(_) => { 54 | error!("Can not decode master's message: {:?}", &buffer); 55 | std::process::exit(WORKER_INIT_FAILED as i32); 56 | } 57 | } 58 | 59 | // change dir 60 | if let Some(ref dir) = cfg.directory { 61 | if let Err(err) = chdir::(dir.as_ref()) { 62 | error!("Can not change directory {:?} err: {:?}", dir, err); 63 | send_msg(&mut file, WorkerMessage::cfgerror( 64 | format!("Can not change directory to {}", dir))); 65 | std::process::exit(WORKER_INIT_FAILED as i32); 66 | } 67 | } 68 | 69 | // set uid 70 | if let Some(uid) = cfg.uid { 71 | if let Err(err) = setuid(uid) { 72 | send_msg(&mut file, WorkerMessage::cfgerror( 73 | format!("Can not set worker uid, err: {}", err))); 74 | std::process::exit(WORKER_INIT_FAILED as i32); 75 | } 76 | } 77 | 78 | // set gid 79 | if let Some(gid) = cfg.gid { 80 | if let Err(err) = setgid(gid) { 81 | send_msg(&mut file, WorkerMessage::cfgerror( 82 | format!("Can not set worker gid, err: {}", err))); 83 | std::process::exit(WORKER_INIT_FAILED as i32); 84 | } 85 | } 86 | 87 | // prepare command and arguments 88 | let mut iter = cfg.command.split_whitespace(); 89 | let path = if let Some(path) = iter.next() { 90 | if let Some(path) = utils::find_path(path) { 91 | path 92 | } else { 93 | error!("Can not find executable"); 94 | send_msg(&mut file, WorkerMessage::cfgerror( 95 | format!("Can not find executable: {}", path))); 96 | std::process::exit(WORKER_INIT_FAILED as i32); 97 | } 98 | } else { 99 | error!("Can not find executable"); 100 | send_msg(&mut file, WorkerMessage::cfgerror( 101 | "Can not find executable".to_owned())); 102 | std::process::exit(WORKER_INIT_FAILED as i32); 103 | }; 104 | let mut args: Vec<_> = vec![CString::new(path.as_str()).unwrap()]; 105 | args.extend(iter.map(|s| CString::new(s).unwrap()).collect::>()); 106 | 107 | // redirect stdout and stderr 108 | if let Some(ref stdout) = cfg.stdout { 109 | match std::fs::OpenOptions::new().append(true).create(true).open(stdout) 110 | { 111 | Ok(f) => { 112 | let _ = dup2(f.as_raw_fd(), libc::STDOUT_FILENO); 113 | } 114 | Err(err) => { 115 | send_msg(&mut file, WorkerMessage::cfgerror( 116 | format!("Can open stdout file {}: {}", stdout, err))); 117 | std::process::exit(WORKER_INIT_FAILED as i32); 118 | } 119 | } 120 | } 121 | 122 | if let Some(ref stderr) = cfg.stderr { 123 | match std::fs::OpenOptions::new().append(true).create(true).open(stderr) 124 | { 125 | Ok(f) => { 126 | let _ = dup2(f.as_raw_fd(), libc::STDERR_FILENO); 127 | 128 | }, 129 | Err(err) => { 130 | send_msg(&mut file, WorkerMessage::cfgerror( 131 | format!("Can open stderr file {}: {}", stderr, err))); 132 | std::process::exit(WORKER_INIT_FAILED as i32); 133 | } 134 | } 135 | } 136 | 137 | debug!("Starting worker: {:?}", cfg.command); 138 | 139 | let mut env = utils::get_env_vars(true); 140 | env.push(CString::new(format!("FECTL_FD={}:{}", read, write)).unwrap()); 141 | env.push(CString::new(format!("FECTL_SRV_NAME={}", cfg.name)).unwrap()); 142 | env.push(CString::new(format!("FECTL_PROC_IDX={}", idx)).unwrap()); 143 | match execve(&CString::new(path).unwrap(), &args, &env) { 144 | Ok(_) => unreachable!(), 145 | Err(err) => { 146 | error!("Can not execute command: \"{}\" with error: {:?}", cfg.command, err); 147 | std::process::exit(WORKER_BOOT_FAILED as i32); 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/io.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::fs::File; 3 | use std::io::{Read, Write}; 4 | use std::os::unix::io::{IntoRawFd, AsRawFd, FromRawFd, RawFd}; 5 | 6 | use mio; 7 | use mio::unix::EventedFd; 8 | use futures::{Poll, Async}; 9 | use tokio_io::{AsyncRead, AsyncWrite}; 10 | use tokio_core::reactor::{Handle, PollEvented}; 11 | use nix::fcntl::{fcntl, FcntlArg, OFlag, O_NONBLOCK}; 12 | 13 | 14 | pub struct PipeFile { 15 | read: Io, 16 | read_poll: PollEvented, 17 | write: Io, 18 | write_poll: PollEvented, 19 | } 20 | 21 | impl PipeFile { 22 | pub fn new(read: RawFd, write: RawFd, handle: &Handle) -> PipeFile { 23 | PipeFile { 24 | read: unsafe{ Io::from_raw_fd(read) }, 25 | read_poll: PollEvented::new(unsafe{ Io::from_raw_fd(read) }, handle).unwrap(), 26 | write: unsafe{ Io::from_raw_fd(write) }, 27 | write_poll: PollEvented::new(unsafe{ Io::from_raw_fd(write) }, handle).unwrap(), 28 | } 29 | } 30 | } 31 | 32 | impl Read for PipeFile { 33 | fn read(&mut self, dst: &mut [u8]) -> io::Result { 34 | match self.read_poll.poll_read() { 35 | Async::Ready(_) => match self.read.read(dst) { 36 | Ok(size) => { 37 | self.read_poll.need_read(); 38 | Ok(size) 39 | }, 40 | Err(err) => Err(err) 41 | } 42 | Async::NotReady => Err(io::Error::new(io::ErrorKind::WouldBlock, "")), 43 | } 44 | } 45 | } 46 | 47 | impl Write for PipeFile { 48 | fn write(&mut self, src: &[u8]) -> io::Result { 49 | match self.write_poll.poll_write() { 50 | Async::Ready(_) => match self.write.write(src) { 51 | Ok(size) => { 52 | self.read_poll.need_write(); 53 | Ok(size) 54 | }, 55 | Err(err) => Err(err) 56 | } 57 | Async::NotReady => Err(io::Error::new(io::ErrorKind::WouldBlock, "")), 58 | } 59 | } 60 | 61 | fn flush(&mut self) -> io::Result<()> { 62 | (&self.write).flush() 63 | } 64 | } 65 | 66 | impl AsyncRead for PipeFile {} 67 | 68 | impl AsyncWrite for PipeFile { 69 | fn shutdown(&mut self) -> Poll<(), io::Error> { 70 | Ok(().into()) 71 | } 72 | } 73 | 74 | /// Manages a FD 75 | #[derive(Debug)] 76 | pub struct Io { 77 | fd: File, 78 | } 79 | 80 | impl Io { 81 | /// Try to clone the FD 82 | pub fn try_clone(&self) -> io::Result { 83 | Ok(Io { fd: self.fd.try_clone()? }) 84 | } 85 | } 86 | 87 | impl FromRawFd for Io { 88 | unsafe fn from_raw_fd(fd: RawFd) -> Io { 89 | let flags = fcntl(fd, FcntlArg::F_GETFL).unwrap(); 90 | let _ = fcntl(fd, FcntlArg::F_SETFL( 91 | OFlag::from_bits_truncate(flags) | O_NONBLOCK)); 92 | 93 | Io { fd: File::from_raw_fd(fd) } 94 | } 95 | } 96 | 97 | impl IntoRawFd for Io { 98 | fn into_raw_fd(self) -> RawFd { 99 | self.fd.into_raw_fd() 100 | } 101 | } 102 | 103 | impl AsRawFd for Io { 104 | fn as_raw_fd(&self) -> RawFd { 105 | self.fd.as_raw_fd() 106 | } 107 | } 108 | 109 | impl mio::Evented for Io { 110 | fn register(&self, poll: &mio::Poll, 111 | token: mio::Token, interest: mio::Ready, 112 | opts: mio::PollOpt) -> io::Result<()> 113 | { 114 | EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts) 115 | } 116 | 117 | fn reregister(&self, poll: &mio::Poll, 118 | token: mio::Token, interest: mio::Ready, 119 | opts: mio::PollOpt) -> io::Result<()> 120 | { 121 | EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts) 122 | } 123 | 124 | fn deregister(&self, poll: &mio::Poll) -> io::Result<()> { 125 | EventedFd(&self.as_raw_fd()).deregister(poll) 126 | } 127 | } 128 | 129 | impl Read for Io { 130 | fn read(&mut self, dst: &mut [u8]) -> io::Result { 131 | (&self.fd).read(dst) 132 | } 133 | } 134 | 135 | impl<'a> Read for &'a Io { 136 | fn read(&mut self, dst: &mut [u8]) -> io::Result { 137 | (&self.fd).read(dst) 138 | } 139 | } 140 | 141 | impl Write for Io { 142 | fn write(&mut self, src: &[u8]) -> io::Result { 143 | (&self.fd).write(src) 144 | } 145 | 146 | fn flush(&mut self) -> io::Result<()> { 147 | (&self.fd).flush() 148 | } 149 | } 150 | 151 | impl<'a> Write for &'a Io { 152 | fn write(&mut self, src: &[u8]) -> io::Result { 153 | (&self.fd).write(src) 154 | } 155 | 156 | fn flush(&mut self) -> io::Result<()> { 157 | (&self.fd).flush() 158 | } 159 | } 160 | 161 | 162 | impl AsyncRead for Io {} 163 | 164 | impl<'a> AsyncRead for &'a Io {} 165 | 166 | impl AsyncWrite for Io { 167 | fn shutdown(&mut self) -> Poll<(), io::Error> { 168 | Ok(().into()) 169 | } 170 | } 171 | 172 | impl<'a> AsyncWrite for &'a Io { 173 | fn shutdown(&mut self) -> Poll<(), io::Error> { 174 | Ok(().into()) 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/logging.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | use std::str::FromStr; 3 | 4 | use time; 5 | use log::LevelFilter; 6 | use env_logger::Builder; 7 | 8 | use version::PKG_INFO; 9 | use config::LoggingConfig; 10 | 11 | pub fn init_logging(cfg: &LoggingConfig) { 12 | let level = cfg.level.as_ref().and_then( 13 | |s| match LevelFilter::from_str(&s) { 14 | Ok(lvl) => Some(lvl), 15 | Err(_) => { 16 | println!("Can not parse log level value, using `info` level"); 17 | Some(LevelFilter::Info) 18 | } 19 | }).unwrap_or(LevelFilter::Info); 20 | 21 | Builder::new() 22 | .format(|buf, record| { 23 | let t = time::now(); 24 | write!(buf, "{},{:03} - {} - {}\n", 25 | time::strftime("%Y-%m-%d %H:%M:%S", &t).unwrap(), 26 | t.tm_nsec / 1000_000, 27 | record.level(), 28 | record.args() 29 | )}) 30 | .filter(Some(PKG_INFO.name), level) 31 | .init(); 32 | } 33 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate time; 2 | extern crate env_logger; 3 | #[macro_use] extern crate log; 4 | 5 | extern crate structopt; 6 | #[macro_use] extern crate structopt_derive; 7 | 8 | extern crate serde; 9 | extern crate serde_json; 10 | #[macro_use] extern crate serde_derive; 11 | 12 | extern crate byteorder; 13 | extern crate toml; 14 | extern crate mio; 15 | extern crate nix; 16 | extern crate net2; 17 | extern crate libc; 18 | extern crate bytes; 19 | extern crate futures; 20 | extern crate tokio_core; 21 | extern crate tokio_uds; 22 | extern crate tokio_io; 23 | 24 | #[macro_use] 25 | extern crate actix; 26 | 27 | mod addrinfo; 28 | mod client; 29 | mod config; 30 | mod config_helpers; 31 | mod cmd; 32 | mod exec; 33 | mod event; 34 | mod logging; 35 | mod master; 36 | mod master_types; 37 | mod service; 38 | mod socket; 39 | mod worker; 40 | mod process; 41 | mod io; 42 | mod utils; 43 | 44 | mod version { 45 | include!(concat!(env!("OUT_DIR"), "/version.rs")); 46 | } 47 | 48 | fn main() { 49 | let sys = actix::System::new("fectl"); 50 | let loaded = match config::load_config() { 51 | Some(cfg) => master::start(cfg), 52 | None => false, 53 | }; 54 | let code = if loaded { 55 | sys.run() 56 | } else { 57 | 1 58 | }; 59 | std::process::exit(code); 60 | } 61 | -------------------------------------------------------------------------------- /src/master.rs: -------------------------------------------------------------------------------- 1 | use std; 2 | use std::io; 3 | use std::rc::Rc; 4 | use std::ffi::OsStr; 5 | use std::time::Duration; 6 | use std::thread; 7 | use std::os::unix::io::AsRawFd; 8 | use std::os::unix::net::UnixListener as StdUnixListener; 9 | 10 | use nix; 11 | use libc; 12 | use serde_json as json; 13 | use byteorder::{BigEndian , ByteOrder}; 14 | use bytes::{BytesMut, BufMut}; 15 | use futures::Stream; 16 | use tokio_core::reactor::Timeout; 17 | use tokio_uds::{UnixStream, UnixListener}; 18 | use tokio_io::AsyncRead; 19 | use tokio_io::io::WriteHalf; 20 | use tokio_io::codec::{FramedRead, Encoder, Decoder}; 21 | 22 | use actix::prelude::*; 23 | 24 | use client; 25 | use logging; 26 | use config::Config; 27 | use version::PKG_INFO; 28 | use cmd::{self, CommandCenter, CommandError}; 29 | use service::{StartStatus, ReloadStatus, ServiceOperationError}; 30 | use master_types::{MasterRequest, MasterResponse}; 31 | 32 | pub struct Master { 33 | cfg: Rc, 34 | cmd: Addr, 35 | } 36 | 37 | impl Actor for Master { 38 | type Context = Context; 39 | } 40 | 41 | #[derive(Message)] 42 | struct NetStream(UnixStream, std::os::unix::net::SocketAddr); 43 | 44 | impl StreamHandler for Master { 45 | 46 | fn handle(&mut self, msg: NetStream, _: &mut Context) { 47 | let cmd = self.cmd.clone(); 48 | 49 | MasterClient::create(|ctx| { 50 | let (r, w) = msg.0.split(); 51 | ctx.add_stream(FramedRead::new(r, MasterTransportCodec)); 52 | 53 | MasterClient{ 54 | cmd, 55 | framed: actix::io::FramedWrite::new(w, MasterTransportCodec, ctx)} 56 | }) 57 | } 58 | } 59 | 60 | impl Drop for Master { 61 | fn drop(&mut self) { 62 | self.cfg.master.remove_files(); 63 | } 64 | } 65 | 66 | struct MasterClient { 67 | cmd: Addr, 68 | framed: actix::io::FramedWrite, MasterTransportCodec>, 69 | } 70 | 71 | impl Actor for MasterClient { 72 | type Context = Context; 73 | 74 | fn started(&mut self, ctx: &mut Self::Context) { 75 | self.hb(ctx); 76 | } 77 | } 78 | 79 | impl actix::io::WriteHandler for MasterClient {} 80 | 81 | impl StreamHandler for MasterClient { 82 | 83 | fn handle(&mut self, msg: MasterRequest, ctx: &mut Self::Context) { 84 | ctx.notify(msg); 85 | } 86 | } 87 | 88 | impl MasterClient { 89 | 90 | fn hb(&self, ctx: &mut Context) { 91 | let fut = Timeout::new(Duration::new(1, 0), Arbiter::handle()) 92 | .unwrap() 93 | .actfuture() 94 | .then(|_, act: &mut MasterClient, ctx: &mut Context| { 95 | act.framed.write(MasterResponse::Pong); 96 | act.hb(ctx); 97 | actix::fut::ok(()) 98 | }); 99 | ctx.spawn(fut); 100 | } 101 | 102 | fn handle_error(&mut self, err: CommandError, _: &mut Context) { 103 | match err { 104 | CommandError::NotReady => 105 | self.framed.write(MasterResponse::ErrorNotReady), 106 | CommandError::UnknownService => 107 | self.framed.write(MasterResponse::ErrorUnknownService), 108 | CommandError::ServiceStopped => 109 | self.framed.write(MasterResponse::ErrorServiceStopped), 110 | CommandError::Service(err) => match err { 111 | ServiceOperationError::Starting => 112 | self.framed.write(MasterResponse::ErrorServiceStarting), 113 | ServiceOperationError::Reloading => 114 | self.framed.write(MasterResponse::ErrorServiceReloading), 115 | ServiceOperationError::Stopping => 116 | self.framed.write(MasterResponse::ErrorServiceStopping), 117 | ServiceOperationError::Running => 118 | self.framed.write(MasterResponse::ErrorServiceRunning), 119 | ServiceOperationError::Stopped => 120 | self.framed.write(MasterResponse::ErrorServiceStopped), 121 | ServiceOperationError::Failed => 122 | self.framed.write(MasterResponse::ErrorServiceFailed), 123 | } 124 | }; 125 | } 126 | 127 | fn stop(&mut self, name: String, ctx: &mut Context) { 128 | info!("Client command: Stop service '{}'", name); 129 | 130 | self.cmd.send(cmd::StopService(name, true)) 131 | .into_actor(self) 132 | .then(|res, srv, ctx| { 133 | match res { 134 | Err(_) => (), 135 | Ok(Err(err)) => match err { 136 | CommandError::ServiceStopped => 137 | srv.framed.write(MasterResponse::ServiceStarted), 138 | _ => srv.handle_error(err, ctx), 139 | } 140 | Ok(Ok(_)) => 141 | srv.framed.write(MasterResponse::ServiceStopped), 142 | }; 143 | actix::fut::ok(()) 144 | }).spawn(ctx); 145 | } 146 | 147 | fn reload(&mut self, name: String, ctx: &mut Context, graceful: bool) 148 | { 149 | info!("Client command: Reload service '{}'", name); 150 | 151 | self.cmd.send(cmd::ReloadService(name, graceful)) 152 | .into_actor(self) 153 | .then(|res, srv, ctx| { 154 | match res { 155 | Err(_) => (), 156 | Ok(Err(err)) => srv.handle_error(err, ctx), 157 | Ok(Ok(res)) => { 158 | match res { 159 | ReloadStatus::Success => 160 | srv.framed.write(MasterResponse::ServiceStarted), 161 | ReloadStatus::Failed => 162 | srv.framed.write(MasterResponse::ServiceFailed), 163 | ReloadStatus::Stopping => 164 | srv.framed.write(MasterResponse::ErrorServiceStopping), 165 | }; 166 | } 167 | } 168 | actix::fut::ok(()) 169 | }).spawn(ctx); 170 | } 171 | 172 | fn start_service(&mut self, name: String, ctx: &mut Context) { 173 | info!("Client command: Start service '{}'", name); 174 | 175 | self.cmd.send(cmd::StartService(name)) 176 | .into_actor(self) 177 | .then(|res, srv, ctx| { 178 | match res { 179 | Err(_) => (), 180 | Ok(Err(err)) => srv.handle_error(err, ctx), 181 | Ok(Ok(res)) => { 182 | match res { 183 | StartStatus::Success => 184 | srv.framed.write(MasterResponse::ServiceStarted), 185 | StartStatus::Failed => 186 | srv.framed.write(MasterResponse::ServiceFailed), 187 | StartStatus::Stopping => 188 | srv.framed.write(MasterResponse::ErrorServiceStopping), 189 | }; 190 | } 191 | } 192 | actix::fut::ok(()) 193 | }).spawn(ctx); 194 | } 195 | } 196 | 197 | impl Message for MasterRequest { 198 | type Result = (); 199 | } 200 | 201 | impl Handler for MasterClient { 202 | type Result = (); 203 | 204 | fn handle(&mut self, msg: MasterRequest, ctx: &mut Context) { 205 | match msg { 206 | MasterRequest::Ping => { 207 | self.framed.write(MasterResponse::Pong); 208 | }, 209 | MasterRequest::Start(name) => 210 | self.start_service(name, ctx), 211 | MasterRequest::Reload(name) => 212 | self.reload(name, ctx, true), 213 | MasterRequest::Restart(name) => 214 | self.reload(name, ctx, false), 215 | MasterRequest::Stop(name) => 216 | self.stop(name, ctx), 217 | MasterRequest::Pause(name) => { 218 | info!("Client command: Pause service '{}'", name); 219 | self.cmd.send(cmd::PauseService(name)) 220 | .into_actor(self) 221 | .then(|res, srv, ctx| { 222 | match res { 223 | Err(_) => (), 224 | Ok(Err(err)) => srv.handle_error(err, ctx), 225 | Ok(Ok(_)) => { 226 | srv.framed.write(MasterResponse::Done); 227 | }, 228 | }; 229 | actix::fut::ok(()) 230 | }).spawn(ctx); 231 | } 232 | MasterRequest::Resume(name) => { 233 | info!("Client command: Resume service '{}'", name); 234 | self.cmd.send(cmd::ResumeService(name)) 235 | .into_actor(self) 236 | .then(|res, srv, ctx| { 237 | match res { 238 | Err(_) => (), 239 | Ok(Err(err)) => srv.handle_error(err, ctx), 240 | Ok(Ok(_)) => { 241 | srv.framed.write(MasterResponse::Done); 242 | }, 243 | }; 244 | actix::fut::ok(()) 245 | }).spawn(ctx); 246 | } 247 | MasterRequest::Status(name) => { 248 | debug!("Client command: Service status '{}'", name); 249 | self.cmd.send(cmd::StatusService(name)) 250 | .into_actor(self) 251 | .then(|res, srv, ctx| { 252 | match res { 253 | Err(_) => (), 254 | Ok(Err(err)) => srv.handle_error(err, ctx), 255 | Ok(Ok(status)) => { 256 | srv.framed.write(MasterResponse::ServiceStatus(status)); 257 | }, 258 | }; 259 | actix::fut::ok(()) 260 | }).spawn(ctx); 261 | } 262 | MasterRequest::SPid(name) => { 263 | debug!("Client command: Service status '{}'", name); 264 | self.cmd.send(cmd::ServicePids(name)) 265 | .into_actor(self) 266 | .then(|res, srv, ctx| { 267 | match res { 268 | Err(_) => (), 269 | Ok(Err(err)) => srv.handle_error(err, ctx), 270 | Ok(Ok(pids)) => { 271 | srv.framed.write(MasterResponse::ServiceWorkerPids(pids)); 272 | }, 273 | }; 274 | actix::fut::ok(()) 275 | }).spawn(ctx); 276 | } 277 | MasterRequest::Pid => { 278 | self.framed.write(MasterResponse::Pid( 279 | format!("{}", nix::unistd::getpid()))); 280 | }, 281 | MasterRequest::Version => { 282 | self.framed.write(MasterResponse::Version( 283 | format!("{} {}", PKG_INFO.name, PKG_INFO.version))); 284 | }, 285 | MasterRequest::Quit => { 286 | self.cmd.send(cmd::Stop) 287 | .into_actor(self) 288 | .then(|_, act, _| { 289 | act.framed.write(MasterResponse::Done); 290 | actix::fut::ok(()) 291 | }).spawn(ctx); 292 | } 293 | }; 294 | } 295 | } 296 | 297 | 298 | /// Codec for Master transport 299 | struct MasterTransportCodec; 300 | 301 | impl Decoder for MasterTransportCodec 302 | { 303 | type Item = MasterRequest; 304 | type Error = io::Error; 305 | 306 | fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { 307 | let size = { 308 | if src.len() < 2 { 309 | return Ok(None) 310 | } 311 | BigEndian::read_u16(src.as_ref()) as usize 312 | }; 313 | 314 | if src.len() >= size + 2 { 315 | src.split_to(2); 316 | let buf = src.split_to(size); 317 | Ok(Some(json::from_slice::(&buf)?)) 318 | } else { 319 | Ok(None) 320 | } 321 | } 322 | } 323 | 324 | impl Encoder for MasterTransportCodec 325 | { 326 | type Item = MasterResponse; 327 | type Error = io::Error; 328 | 329 | fn encode(&mut self, msg: MasterResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { 330 | let msg = json::to_string(&msg).unwrap(); 331 | let msg_ref: &[u8] = msg.as_ref(); 332 | 333 | dst.reserve(msg_ref.len() + 2); 334 | dst.put_u16::(msg_ref.len() as u16); 335 | dst.put(msg_ref); 336 | 337 | Ok(()) 338 | } 339 | } 340 | 341 | const HOST: &str = "127.0.0.1:57897"; 342 | 343 | /// Start master process 344 | pub fn start(cfg: Config) -> bool { 345 | // init logging 346 | logging::init_logging(&cfg.logging); 347 | 348 | info!("Starting fectl process"); 349 | 350 | // change working dir 351 | if let Err(err) = nix::unistd::chdir::(cfg.master.directory.as_ref()) { 352 | error!("Can not change directory {:?} err: {}", cfg.master.directory, err); 353 | return false 354 | } 355 | 356 | // check if other app is running 357 | for idx in 0..10 { 358 | match std::net::TcpListener::bind(HOST) { 359 | Ok(listener) => { 360 | std::mem::forget(listener); 361 | break 362 | } 363 | Err(_) => { 364 | if idx == 8 { 365 | error!("Can not start: Another process is running."); 366 | return false 367 | } 368 | info!("Trying to bind address, sleep for 5 seconds"); 369 | thread::sleep(Duration::new(5, 0)); 370 | } 371 | } 372 | } 373 | 374 | // create commands listener and also check if service process is running 375 | let lst = match StdUnixListener::bind(&cfg.master.sock) { 376 | Ok(lst) => lst, 377 | Err(err) => match err.kind() { 378 | io::ErrorKind::PermissionDenied => { 379 | error!("Can not create socket file {:?} err: Permission denied.", 380 | cfg.master.sock); 381 | return false 382 | }, 383 | io::ErrorKind::AddrInUse => { 384 | match client::is_alive(&cfg.master) { 385 | client::AliveStatus::Alive => { 386 | error!("Can not start: Another process is running."); 387 | return false 388 | }, 389 | client::AliveStatus::NotResponding => { 390 | error!("Master process is not responding."); 391 | if let Some(pid) = cfg.master.load_pid() { 392 | error!("Master process: (pid:{})", pid); 393 | } else { 394 | error!("Can not load pid of the master process."); 395 | } 396 | return false 397 | }, 398 | client::AliveStatus::NotAlive => { 399 | // remove socket and try again 400 | let _ = std::fs::remove_file(&cfg.master.sock); 401 | match StdUnixListener::bind(&cfg.master.sock) { 402 | Ok(lst) => lst, 403 | Err(err) => { 404 | error!("Can not create listener socket: {}", err); 405 | return false 406 | } 407 | } 408 | } 409 | } 410 | } 411 | _ => { 412 | error!("Can not create listener socket: {}", err); 413 | return false 414 | } 415 | } 416 | }; 417 | 418 | // try to save pid 419 | if let Err(err) = cfg.master.save_pid() { 420 | error!("Can not write pid file {:?} err: {}", cfg.master.pid, err); 421 | return false 422 | } 423 | 424 | // set uid 425 | if let Some(uid) = cfg.master.uid { 426 | if let Err(err) = nix::unistd::setuid(uid) { 427 | error!("Can not set process uid, err: {}", err); 428 | return false 429 | } 430 | } 431 | 432 | // set gid 433 | if let Some(gid) = cfg.master.gid { 434 | if let Err(err) = nix::unistd::setgid(gid) { 435 | error!("Can not set process gid, err: {}", err); 436 | return false 437 | } 438 | } 439 | 440 | let daemon = cfg.master.daemon; 441 | if daemon { 442 | if let Err(err) = nix::unistd::daemon(true, false) { 443 | error!("Can not daemonize process: {}", err); 444 | return false 445 | } 446 | 447 | // close stdin 448 | let _ = nix::unistd::close(libc::STDIN_FILENO); 449 | 450 | // redirect stdout and stderr 451 | if let Some(ref stdout) = cfg.master.stdout { 452 | match std::fs::OpenOptions::new().append(true).create(true).open(stdout) 453 | { 454 | Ok(f) => { 455 | let _ = nix::unistd::dup2(f.as_raw_fd(), libc::STDOUT_FILENO); 456 | } 457 | Err(err) => 458 | error!("Can open stdout file {}: {}", stdout, err), 459 | } 460 | } 461 | if let Some(ref stderr) = cfg.master.stderr { 462 | match std::fs::OpenOptions::new().append(true).create(true).open(stderr) 463 | { 464 | Ok(f) => { 465 | let _ = nix::unistd::dup2(f.as_raw_fd(), libc::STDERR_FILENO); 466 | 467 | }, 468 | Err(err) => error!("Can open stderr file {}: {}", stderr, err) 469 | } 470 | } 471 | 472 | // continue start process 473 | nix::sys::stat::umask(nix::sys::stat::Mode::from_bits(0o22).unwrap()); 474 | } 475 | 476 | let cfg = Rc::new(cfg); 477 | 478 | // create uds stream 479 | let lst = match UnixListener::from_listener(lst, Arbiter::handle()) { 480 | Ok(lst) => lst, 481 | Err(err) => { 482 | error!("Can not create unix socket listener {:?}", err); 483 | return false 484 | } 485 | }; 486 | 487 | // command center 488 | let cmd = CommandCenter::start(cfg.clone()); 489 | 490 | // start uds master server 491 | let _: () = Master::create(|ctx| { 492 | ctx.add_stream(lst.incoming().map(|(s, a)| NetStream(s, a))); 493 | Master{cfg, cmd}} 494 | ); 495 | 496 | if !daemon { 497 | println!(); 498 | } 499 | true 500 | } 501 | -------------------------------------------------------------------------------- /src/master_types.rs: -------------------------------------------------------------------------------- 1 | use event::ServiceStatus; 2 | 3 | /// Master command 4 | #[allow(non_camel_case_types)] 5 | #[derive(Serialize, Deserialize, Debug)] 6 | #[serde(tag="cmd", content="data")] 7 | pub enum MasterRequest { 8 | /// Ping master process 9 | Ping, 10 | /// Status 11 | Status(String), 12 | /// Service pids 13 | SPid(String), 14 | /// Start service 15 | Start(String), 16 | /// Pause service 17 | Pause(String), 18 | /// Resume service 19 | Resume(String), 20 | /// Gracefully reload service 21 | Reload(String), 22 | /// Restart service 23 | Restart(String), 24 | /// Gracefully stop service 25 | Stop(String), 26 | /// Pid of the master process 27 | Pid, 28 | /// Quit process 29 | Quit, 30 | /// Version if the master 31 | Version, 32 | } 33 | 34 | /// Master responses 35 | #[allow(non_camel_case_types)] 36 | #[derive(Serialize, Deserialize, Debug)] 37 | #[serde(tag="cmd", content="data")] 38 | pub enum MasterResponse { 39 | Pong, 40 | Done, 41 | /// Pid of the master process 42 | Pid(String), 43 | /// Version of the master process 44 | Version(String), 45 | 46 | /// Service started 47 | ServiceStarted, 48 | /// Service Stopped 49 | ServiceStopped, 50 | /// Service failed, service is not available 51 | ServiceFailed, 52 | /// Service status 53 | ServiceStatus(ServiceStatus), 54 | /// Service workers pids 55 | ServiceWorkerPids(Vec), 56 | 57 | /// System not ready 58 | ErrorNotReady, 59 | /// Service is unknown 60 | ErrorUnknownService, 61 | /// Service is starting 62 | ErrorServiceStarting, 63 | /// Service is running 64 | ErrorServiceRunning, 65 | /// Service is reloading 66 | ErrorServiceReloading, 67 | /// Service is stopping 68 | ErrorServiceStopping, 69 | /// Service is stopped 70 | ErrorServiceStopped, 71 | /// Service is failed 72 | ErrorServiceFailed, 73 | } 74 | -------------------------------------------------------------------------------- /src/process.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std; 4 | use std::io; 5 | use std::error::Error; 6 | use std::os::unix::io::RawFd; 7 | use std::time::{Duration, Instant}; 8 | 9 | use serde_json as json; 10 | use byteorder::{ByteOrder, BigEndian}; 11 | use bytes::{BytesMut, BufMut}; 12 | use tokio_io::AsyncRead; 13 | use tokio_io::io::WriteHalf; 14 | use tokio_io::codec::{FramedRead, Encoder, Decoder}; 15 | use nix::sys::signal::{kill, Signal}; 16 | use nix::unistd::{close, pipe, fork, ForkResult, Pid}; 17 | 18 | use actix::prelude::*; 19 | 20 | use config::ServiceConfig; 21 | use io::PipeFile; 22 | use worker::{WorkerMessage, WorkerCommand}; 23 | use event::Reason; 24 | use exec::exec_worker; 25 | use service::{self, FeService}; 26 | 27 | const HEARTBEAT: u64 = 2; 28 | const WORKER_TIMEOUT: i32 = 98; 29 | pub const WORKER_INIT_FAILED: i32 = 99; 30 | pub const WORKER_BOOT_FAILED: i32 = 100; 31 | 32 | pub struct Process { 33 | idx: usize, 34 | pid: Pid, 35 | state: ProcessState, 36 | hb: Instant, 37 | addr: Addr, 38 | timeout: Duration, 39 | startup_timeout: u64, 40 | shutdown_timeout: u64, 41 | framed: actix::io::FramedWrite, TransportCodec>, 42 | } 43 | 44 | impl Actor for Process { 45 | type Context = Context; 46 | 47 | fn stopping(&mut self, ctx: &mut Context) -> Running { 48 | self.kill(ctx, false); 49 | Running::Stop 50 | } 51 | } 52 | 53 | impl StreamHandler for Process { 54 | 55 | fn finished(&mut self, ctx: &mut Context) { 56 | self.kill(ctx, false); 57 | ctx.stop(); 58 | } 59 | 60 | fn handle(&mut self, msg: ProcessMessage, ctx: &mut Self::Context) { 61 | ctx.notify(msg); 62 | } 63 | } 64 | 65 | #[derive(Debug)] 66 | enum ProcessState { 67 | Starting, 68 | Failed, 69 | Running, 70 | Stopping, 71 | } 72 | 73 | #[derive(PartialEq, Debug, Message)] 74 | pub enum ProcessMessage { 75 | Message(WorkerMessage), 76 | StartupTimeout, 77 | StopTimeout, 78 | Heartbeat, 79 | Kill, 80 | } 81 | 82 | #[derive(Debug, Clone)] 83 | pub enum ProcessError { 84 | /// Heartbeat failed 85 | Heartbeat, 86 | /// Worker startup process failed, possibly application initialization failed 87 | FailedToStart(Option), 88 | /// Timeout during startup 89 | StartupTimeout, 90 | /// Timeout during graceful stop 91 | StopTimeout, 92 | /// Worker configuratin error 93 | ConfigError(String), 94 | /// Worker init failed 95 | InitFailed, 96 | /// Worker boot failed 97 | BootFailed, 98 | /// Worker received signal 99 | Signal(usize), 100 | /// Worker exited with code 101 | ExitCode(i8), 102 | } 103 | 104 | impl ProcessError { 105 | pub fn from(code: i8) -> ProcessError { 106 | match code as i32 { 107 | WORKER_TIMEOUT => ProcessError::StartupTimeout, 108 | WORKER_INIT_FAILED => ProcessError::InitFailed, 109 | WORKER_BOOT_FAILED => ProcessError::BootFailed, 110 | code => ProcessError::ExitCode(code as i8), 111 | } 112 | } 113 | } 114 | 115 | impl<'a> std::convert::From<&'a ProcessError> for Reason 116 | { 117 | fn from(ob: &'a ProcessError) -> Self { 118 | match *ob { 119 | ProcessError::Heartbeat => Reason::HeartbeatFailed, 120 | ProcessError::FailedToStart(ref err) => 121 | Reason::FailedToStart( 122 | if let &Some(ref e) = err { Some(format!("{}", e))} else {None}), 123 | ProcessError::StartupTimeout => Reason::StartupTimeout, 124 | ProcessError::StopTimeout => Reason::StopTimeout, 125 | ProcessError::ConfigError(ref err) => Reason::WorkerError(err.clone()), 126 | ProcessError::InitFailed => Reason::InitFailed, 127 | ProcessError::BootFailed => Reason::BootFailed, 128 | ProcessError::Signal(sig) => Reason::Signal(sig), 129 | ProcessError::ExitCode(code) => Reason::ExitCode(code), 130 | } 131 | } 132 | } 133 | 134 | 135 | impl Process { 136 | 137 | pub fn start(idx: usize, cfg: &ServiceConfig, addr: Addr) 138 | -> (Pid, Option>) 139 | { 140 | // fork process and esteblish communication 141 | let (pid, pipe) = match Process::fork(idx, cfg) { 142 | Ok(res) => res, 143 | Err(err) => { 144 | let pid = Pid::from_raw(-1); 145 | addr.do_send( 146 | service::ProcessFailed( 147 | idx, pid, 148 | ProcessError::FailedToStart(Some(format!("{}", err))))); 149 | 150 | return (pid, None) 151 | } 152 | }; 153 | 154 | let timeout = Duration::new(u64::from(cfg.timeout), 0); 155 | let startup_timeout = u64::from(cfg.startup_timeout); 156 | let shutdown_timeout = u64::from(cfg.shutdown_timeout); 157 | 158 | // start Process service 159 | let addr = Process::create(move |ctx| { 160 | let (r, w) = pipe.split(); 161 | ctx.add_stream(FramedRead::new(r, TransportCodec)); 162 | ctx.notify_later(ProcessMessage::StartupTimeout, 163 | Duration::new(startup_timeout as u64, 0)); 164 | Process { 165 | idx, pid, addr, timeout, startup_timeout, shutdown_timeout, 166 | state: ProcessState::Starting, 167 | hb: Instant::now(), 168 | framed: actix::io::FramedWrite::new(w, TransportCodec, ctx) 169 | }}); 170 | (pid, Some(addr)) 171 | } 172 | 173 | fn fork(idx: usize, cfg: &ServiceConfig) -> Result<(Pid, PipeFile), io::Error> 174 | { 175 | let (p_read, p_write, ch_read, ch_write) = Process::create_pipes()?; 176 | 177 | // fork 178 | let pid = match fork() { 179 | Ok(ForkResult::Parent{ child }) => child, 180 | Ok(ForkResult::Child) => { 181 | let _ = close(p_write); 182 | let _ = close(ch_read); 183 | exec_worker(idx, cfg, p_read, ch_write); 184 | unreachable!(); 185 | }, 186 | Err(err) => { 187 | error!("Fork failed: {}", err.description()); 188 | return Err(io::Error::new(io::ErrorKind::Other, err.description())) 189 | } 190 | }; 191 | 192 | // initialize worker communication channel 193 | let _ = close(p_read); 194 | let _ = close(ch_write); 195 | let pipe = PipeFile::new(ch_read, p_write, Arbiter::handle()); 196 | 197 | Ok((pid, pipe)) 198 | } 199 | 200 | fn create_pipes() -> Result<(RawFd, RawFd, RawFd, RawFd), io::Error> { 201 | // open communication pipes 202 | let (p_read, p_write) = match pipe() { 203 | Ok((r, w)) => (r, w), 204 | Err(err) => { 205 | error!("Can not create pipe: {}", err); 206 | return Err(io::Error::new( 207 | io::ErrorKind::Other, format!("Can not create pipe: {}", err))) 208 | } 209 | }; 210 | let (ch_read, ch_write) = match pipe() { 211 | Ok((r, w)) => (r, w), 212 | Err(err) => { 213 | error!("Can not create pipe: {}", err); 214 | return Err(io::Error::new( 215 | io::ErrorKind::Other, format!("Can not create pipe: {}", err))) 216 | } 217 | }; 218 | Ok((p_read, p_write, ch_read, ch_write)) 219 | } 220 | 221 | fn kill(&self, ctx: &mut Context, graceful: bool) { 222 | if graceful { 223 | ctx.notify_later(ProcessMessage::Kill, Duration::new(1, 0)); 224 | } else { 225 | let _ = kill(self.pid, Signal::SIGKILL); 226 | ctx.terminate(); 227 | } 228 | } 229 | } 230 | 231 | impl Drop for Process { 232 | fn drop(&mut self) { 233 | let _ = kill(self.pid, Signal::SIGKILL); 234 | } 235 | } 236 | 237 | impl actix::io::WriteHandler for Process {} 238 | 239 | impl Handler for Process { 240 | type Result = (); 241 | 242 | fn handle(&mut self, msg: ProcessMessage, ctx: &mut Context) { 243 | match msg { 244 | ProcessMessage::Message(msg) => match msg { 245 | WorkerMessage::forked => { 246 | debug!("Worker forked (pid:{})", self.pid); 247 | self.framed.write(WorkerCommand::prepare); 248 | } 249 | WorkerMessage::loaded => { 250 | match self.state { 251 | ProcessState::Starting => { 252 | debug!("Worker loaded (pid:{})", self.pid); 253 | self.addr.do_send( 254 | service::ProcessLoaded(self.idx, self.pid)); 255 | 256 | // start heartbeat timer 257 | self.state = ProcessState::Running; 258 | self.hb = Instant::now(); 259 | ctx.notify_later( 260 | ProcessMessage::Heartbeat, Duration::new(HEARTBEAT, 0)); 261 | }, 262 | _ => { 263 | warn!("Received `loaded` message from worker (pid:{})", self.pid); 264 | } 265 | } 266 | } 267 | WorkerMessage::hb => { 268 | self.hb = Instant::now(); 269 | } 270 | WorkerMessage::reload => { 271 | // worker requests reload 272 | info!("Worker requests reload (pid:{})", self.pid); 273 | self.addr.do_send( 274 | service::ProcessMessage( 275 | self.idx, self.pid, WorkerMessage::reload)); 276 | } 277 | WorkerMessage::restart => { 278 | // worker requests reload 279 | info!("Worker requests restart (pid:{})", self.pid); 280 | self.addr.do_send( 281 | service::ProcessMessage( 282 | self.idx, self.pid, WorkerMessage::restart)); 283 | } 284 | WorkerMessage::cfgerror(msg) => { 285 | error!("Worker config error: {} (pid:{})", msg, self.pid); 286 | self.addr.do_send( 287 | service::ProcessFailed( 288 | self.idx, self.pid, ProcessError::ConfigError(msg))); 289 | } 290 | } 291 | ProcessMessage::StartupTimeout => { 292 | if let ProcessState::Starting = self.state { 293 | error!("Worker startup timeout after {} secs", self.startup_timeout); 294 | self.addr.do_send( 295 | service::ProcessFailed( 296 | self.idx, self.pid, ProcessError::StartupTimeout)); 297 | 298 | self.state = ProcessState::Failed; 299 | let _ = kill(self.pid, Signal::SIGKILL); 300 | ctx.stop(); 301 | return 302 | } 303 | } 304 | ProcessMessage::StopTimeout => { 305 | if let ProcessState::Stopping = self.state { 306 | info!("Worker shutdown timeout aftre {} secs", self.shutdown_timeout); 307 | self.addr.do_send( 308 | service::ProcessFailed( 309 | self.idx, self.pid, ProcessError::StopTimeout)); 310 | 311 | self.state = ProcessState::Failed; 312 | let _ = kill(self.pid, Signal::SIGKILL); 313 | ctx.stop(); 314 | return 315 | } 316 | } 317 | ProcessMessage::Heartbeat => { 318 | // makes sense only in running state 319 | if let ProcessState::Running = self.state { 320 | if Instant::now().duration_since(self.hb) > self.timeout { 321 | // heartbeat timed out 322 | error!("Worker heartbeat failed (pid:{}) after {:?} secs", 323 | self.pid, self.timeout); 324 | self.addr.do_send( 325 | service::ProcessFailed( 326 | self.idx, self.pid, ProcessError::Heartbeat)); 327 | } else { 328 | // send heartbeat to worker process and reset hearbeat timer 329 | self.framed.write(WorkerCommand::hb); 330 | ctx.notify_later( 331 | ProcessMessage::Heartbeat, Duration::new(HEARTBEAT, 0)); 332 | } 333 | } 334 | } 335 | ProcessMessage::Kill => { 336 | let _ = kill(self.pid, Signal::SIGKILL); 337 | ctx.stop(); 338 | return 339 | } 340 | } 341 | } 342 | } 343 | 344 | #[derive(Message)] 345 | pub struct SendCommand(pub WorkerCommand); 346 | 347 | impl Handler for Process { 348 | type Result = (); 349 | 350 | fn handle(&mut self, msg: SendCommand, _: &mut Context) { 351 | self.framed.write(msg.0); 352 | } 353 | } 354 | 355 | #[derive(Message)] 356 | pub struct StartProcess; 357 | 358 | impl Handler for Process { 359 | type Result = (); 360 | 361 | fn handle(&mut self, _: StartProcess, _: &mut Context) { 362 | self.framed.write(WorkerCommand::start); 363 | } 364 | } 365 | 366 | #[derive(Message)] 367 | pub struct PauseProcess; 368 | 369 | impl Handler for Process { 370 | type Result = (); 371 | 372 | fn handle(&mut self, _: PauseProcess, _: &mut Context) { 373 | self.framed.write(WorkerCommand::pause); 374 | } 375 | } 376 | 377 | #[derive(Message)] 378 | pub struct ResumeProcess; 379 | 380 | impl Handler for Process { 381 | type Result = (); 382 | 383 | fn handle(&mut self, _: ResumeProcess, _: &mut Context) { 384 | self.framed.write(WorkerCommand::resume); 385 | } 386 | } 387 | 388 | #[derive(Message)] 389 | pub struct StopProcess; 390 | 391 | impl Handler for Process { 392 | type Result = (); 393 | 394 | fn handle(&mut self, _: StopProcess, ctx: &mut Context) 395 | { 396 | info!("Stopping worker: (pid:{})", self.pid); 397 | match self.state { 398 | ProcessState::Running => { 399 | self.state = ProcessState::Stopping; 400 | 401 | self.framed.write(WorkerCommand::stop); 402 | ctx.notify_later( 403 | ProcessMessage::StopTimeout, 404 | Duration::new(self.shutdown_timeout, 0)); 405 | let _ = kill(self.pid, Signal::SIGTERM); 406 | }, 407 | _ => { 408 | let _ = kill(self.pid, Signal::SIGQUIT); 409 | ctx.terminate(); 410 | } 411 | } 412 | } 413 | } 414 | 415 | #[derive(Message)] 416 | pub struct QuitProcess(pub bool); 417 | 418 | impl Handler for Process { 419 | type Result = (); 420 | 421 | fn handle(&mut self, msg: QuitProcess, ctx: &mut Context) { 422 | if msg.0 { 423 | let _ = kill(self.pid, Signal::SIGQUIT); 424 | self.kill(ctx, true); 425 | } else { 426 | self.kill(ctx, false); 427 | let _ = kill(self.pid, Signal::SIGKILL); 428 | ctx.terminate(); 429 | } 430 | } 431 | } 432 | 433 | pub struct TransportCodec; 434 | 435 | impl Decoder for TransportCodec { 436 | type Item = ProcessMessage; 437 | type Error = io::Error; 438 | 439 | fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { 440 | let size = { 441 | if src.len() < 2 { 442 | return Ok(None) 443 | } 444 | BigEndian::read_u16(src.as_ref()) as usize 445 | }; 446 | 447 | if src.len() >= size + 2 { 448 | src.split_to(2); 449 | let buf = src.split_to(size); 450 | Ok(Some(ProcessMessage::Message(json::from_slice::(&buf)?))) 451 | } else { 452 | Ok(None) 453 | } 454 | } 455 | } 456 | 457 | impl Encoder for TransportCodec { 458 | type Item = WorkerCommand; 459 | type Error = io::Error; 460 | 461 | fn encode(&mut self, msg: WorkerCommand, dst: &mut BytesMut) -> Result<(), Self::Error> { 462 | let msg = json::to_string(&msg).unwrap(); 463 | let msg_ref: &[u8] = msg.as_ref(); 464 | 465 | dst.reserve(msg_ref.len() + 2); 466 | dst.put_u16::(msg_ref.len() as u16); 467 | dst.put(msg_ref); 468 | 469 | Ok(()) 470 | } 471 | } 472 | -------------------------------------------------------------------------------- /src/service.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std; 4 | use std::time::Duration; 5 | use nix::unistd::Pid; 6 | 7 | use actix::prelude::*; 8 | use actix::Response; 9 | use futures::Future; 10 | 11 | use event::{Event, Reason}; 12 | use config::ServiceConfig; 13 | use worker::{Worker, WorkerMessage}; 14 | use process::ProcessError; 15 | 16 | /// Service state 17 | enum ServiceState { 18 | Running, 19 | Failed, 20 | Stopped, 21 | Starting(actix::Condition), 22 | Reloading(actix::Condition), 23 | Stopping(actix::Condition<()>), 24 | } 25 | 26 | impl ServiceState { 27 | 28 | fn description(&self) -> &'static str { 29 | match *self { 30 | ServiceState::Running => "running", 31 | ServiceState::Failed => "failed", 32 | ServiceState::Stopped => "stopped", 33 | ServiceState::Starting(_) => "starting", 34 | ServiceState::Reloading(_) => "reloading", 35 | ServiceState::Stopping(_) => "stopping", 36 | } 37 | } 38 | 39 | fn error(&self) -> ServiceOperationError { 40 | match *self { 41 | ServiceState::Running => ServiceOperationError::Running, 42 | ServiceState::Failed => ServiceOperationError::Failed, 43 | ServiceState::Stopped => ServiceOperationError::Stopped, 44 | ServiceState::Starting(_) => ServiceOperationError::Starting, 45 | ServiceState::Reloading(_) => ServiceOperationError::Reloading, 46 | ServiceState::Stopping(_) => ServiceOperationError::Stopping, 47 | } 48 | } 49 | } 50 | 51 | #[derive(Debug)] 52 | /// Service errors 53 | pub enum ServiceOperationError { 54 | Starting, 55 | Reloading, 56 | Stopping, 57 | Running, 58 | Stopped, 59 | Failed, 60 | } 61 | 62 | #[derive(Clone, Debug)] 63 | pub enum StartStatus { 64 | Success, 65 | Failed, 66 | Stopping, 67 | } 68 | 69 | #[derive(Clone, Debug)] 70 | pub enum ReloadStatus { 71 | Success, 72 | Failed, 73 | Stopping, 74 | } 75 | 76 | pub struct FeService { 77 | name: String, 78 | state: ServiceState, 79 | paused: bool, 80 | workers: Vec, 81 | } 82 | 83 | impl FeService { 84 | 85 | pub fn start(num: u16, cfg: ServiceConfig) -> Addr 86 | { 87 | FeService::create(move |ctx| { 88 | // create4 workers 89 | let mut workers = Vec::new(); 90 | for idx in 0..num as usize { 91 | workers.push(Worker::new(idx, cfg.clone(), ctx.address())); 92 | } 93 | 94 | FeService { 95 | name: cfg.name.clone(), 96 | state: ServiceState::Starting(actix::Condition::default()), 97 | paused: false, 98 | workers} 99 | }) 100 | } 101 | 102 | fn check_loading_workers(&mut self, restart_stopped: bool) -> (bool, bool) { 103 | let mut in_process = false; 104 | let mut failed = false; 105 | 106 | for worker in &mut self.workers { 107 | if worker.is_failed() { 108 | failed = true; 109 | } 110 | else if worker.is_stopped() { 111 | if restart_stopped { 112 | // strange 113 | worker.reload(true, Reason::None); 114 | in_process = true; 115 | } 116 | } 117 | else if !worker.is_running() { 118 | in_process = true; 119 | } 120 | } 121 | (failed, in_process) 122 | } 123 | 124 | // update internal state 125 | fn update(&mut self) { 126 | let state = std::mem::replace(&mut self.state, ServiceState::Failed); 127 | 128 | match state { 129 | ServiceState::Starting(task) => { 130 | let (failed, in_process) = self.check_loading_workers(true); 131 | 132 | // if we have failed workers, stop all and change service state to failed 133 | if failed { 134 | if in_process { 135 | for worker in &mut self.workers { 136 | if !(worker.is_stopped() || worker.is_failed()) { 137 | worker.stop(Reason::SomeWorkersFailed) 138 | } 139 | } 140 | self.state = ServiceState::Starting(task); 141 | } else { 142 | task.set(StartStatus::Failed); 143 | self.state = ServiceState::Failed; 144 | } 145 | } else { 146 | if !in_process { 147 | task.set(StartStatus::Success); 148 | self.state = ServiceState::Running; 149 | } else { 150 | self.state = ServiceState::Starting(task); 151 | } 152 | } 153 | }, 154 | ServiceState::Reloading(task) => { 155 | let (failed, in_process) = self.check_loading_workers(true); 156 | 157 | // if we have failed workers, stop all and change service state to failed 158 | if failed { 159 | if in_process { 160 | for worker in &mut self.workers { 161 | if !(worker.is_stopped() || worker.is_failed()) { 162 | worker.stop(Reason::SomeWorkersFailed) 163 | } 164 | } 165 | self.state = ServiceState::Reloading(task); 166 | } else { 167 | task.set(ReloadStatus::Failed); 168 | self.state = ServiceState::Failed; 169 | } 170 | } else { 171 | if !in_process { 172 | task.set(ReloadStatus::Success); 173 | self.state = ServiceState::Running; 174 | } else { 175 | self.state = ServiceState::Reloading(task); 176 | } 177 | } 178 | }, 179 | ServiceState::Stopping(task) => { 180 | let (_, in_process) = self.check_loading_workers(false); 181 | 182 | if !in_process { 183 | task.set(()); 184 | self.state = ServiceState::Stopped; 185 | } else { 186 | self.state = ServiceState::Stopping(task); 187 | } 188 | }, 189 | state => self.state = state, 190 | } 191 | } 192 | 193 | fn message(&mut self, pid: Pid, message: WorkerMessage) { 194 | for worker in &mut self.workers { 195 | worker.message(pid, &message) 196 | } 197 | } 198 | 199 | } 200 | 201 | 202 | impl Actor for FeService { 203 | 204 | type Context = Context; 205 | 206 | fn started(&mut self, _: &mut Context) { 207 | // start workers 208 | for worker in &mut self.workers { 209 | worker.start(Reason::Initial); 210 | } 211 | } 212 | } 213 | 214 | #[derive(Message)] 215 | pub struct ProcessMessage(pub usize, pub Pid, pub WorkerMessage); 216 | 217 | impl Handler for FeService { 218 | type Result = (); 219 | 220 | fn handle(&mut self, msg: ProcessMessage, _: &mut Context) { 221 | self.workers[msg.0].message(msg.1, &msg.2); 222 | self.update(); 223 | } 224 | } 225 | 226 | #[derive(Message)] 227 | pub struct ProcessFailed(pub usize, pub Pid, pub ProcessError); 228 | 229 | impl Handler for FeService { 230 | type Result = (); 231 | 232 | fn handle(&mut self, msg: ProcessFailed, ctx: &mut Context) { 233 | // TODO: delay failure processing, needs better approach 234 | ctx.run_later(Duration::new(5, 0), move |act, _| { 235 | act.workers[msg.0].exited(msg.1, &msg.2); 236 | act.update(); 237 | }); 238 | } 239 | } 240 | 241 | #[derive(Message)] 242 | pub struct ProcessLoaded(pub usize, pub Pid); 243 | 244 | impl Handler for FeService { 245 | type Result = (); 246 | 247 | fn handle(&mut self, msg: ProcessLoaded, _: &mut Context) { 248 | self.workers[msg.0].loaded(msg.1); 249 | self.update(); 250 | } 251 | } 252 | 253 | #[derive(Message)] 254 | pub struct ProcessExited(pub Pid, pub ProcessError); 255 | 256 | impl Handler for FeService { 257 | type Result = (); 258 | 259 | fn handle(&mut self, msg: ProcessExited, _: &mut Context) { 260 | for worker in &mut self.workers { 261 | worker.exited(msg.0, &msg.1); 262 | } 263 | self.update(); 264 | } 265 | } 266 | 267 | /// Service status command 268 | pub struct Pids; 269 | 270 | impl Message for Pids { 271 | type Result = Vec; 272 | } 273 | 274 | impl Handler for FeService { 275 | type Result = MessageResult; 276 | 277 | fn handle(&mut self, _: Pids, _: &mut Context) -> Self::Result { 278 | let mut pids = Vec::new(); 279 | for worker in &self.workers { 280 | if let Some(pid) = worker.pid() { 281 | pids.push(format!("{}", pid)); 282 | } 283 | } 284 | MessageResult(pids) 285 | } 286 | } 287 | 288 | /// Service status command 289 | pub struct Status; 290 | 291 | impl Message for Status { 292 | type Result = Result<(String, Vec<(String, Vec)>), ()>; 293 | } 294 | 295 | impl Handler for FeService { 296 | type Result = Result<(String, Vec<(String, Vec)>), ()>; 297 | 298 | fn handle(&mut self, _: Status, _: &mut Context) -> Self::Result { 299 | let mut events: Vec<(String, Vec)> = Vec::new(); 300 | for worker in &self.workers { 301 | events.push( 302 | (format!("worker({})", worker.idx + 1), Vec::from(&worker.events))); 303 | } 304 | 305 | let status = match self.state { 306 | ServiceState::Running => if self.paused { "paused" } else { "running" } 307 | _ => self.state.description() 308 | }; 309 | Ok((status.to_owned(), events)) 310 | } 311 | } 312 | 313 | /// Start service command 314 | pub struct Start; 315 | 316 | impl Message for Start { 317 | type Result = Result; 318 | } 319 | 320 | impl Handler for FeService { 321 | type Result = Response; 322 | 323 | fn handle(&mut self, _: Start, _: &mut Context) -> Self::Result 324 | { 325 | match self.state { 326 | ServiceState::Starting(ref mut task) => { 327 | Response::async(task.wait().map_err(|_| ServiceOperationError::Failed)) 328 | } 329 | ServiceState::Failed | ServiceState::Stopped => { 330 | debug!("Starting service: {:?}", self.name); 331 | let mut task = actix::Condition::default(); 332 | let rx = task.wait(); 333 | self.paused = false; 334 | self.state = ServiceState::Starting(task); 335 | for worker in &mut self.workers { 336 | worker.start(Reason::ConsoleRequest); 337 | } 338 | Response::async(rx.map_err(|_| ServiceOperationError::Failed)) 339 | } 340 | _ => Response::reply(Err(self.state.error())) 341 | } 342 | } 343 | } 344 | 345 | /// Pause service command 346 | pub struct Pause; 347 | 348 | impl Message for Pause { 349 | type Result = Result<(), ServiceOperationError>; 350 | } 351 | 352 | impl Handler for FeService { 353 | type Result = Result<(), ServiceOperationError>; 354 | 355 | fn handle(&mut self, _: Pause, _: &mut Context) -> Self::Result 356 | { 357 | match self.state { 358 | ServiceState::Running => { 359 | debug!("Pause service: {:?}", self.name); 360 | for worker in &mut self.workers { 361 | worker.pause(Reason::ConsoleRequest); 362 | } 363 | self.paused = true; 364 | Ok(()) 365 | } 366 | _ => Err(self.state.error()) 367 | } 368 | } 369 | } 370 | 371 | /// Resume service command 372 | pub struct Resume; 373 | 374 | impl Message for Resume { 375 | type Result = Result<(), ServiceOperationError>; 376 | } 377 | 378 | impl Handler for FeService { 379 | type Result = Result<(), ServiceOperationError>; 380 | 381 | fn handle(&mut self, _: Resume, _: &mut Context) -> Self::Result { 382 | match self.state { 383 | ServiceState::Running => { 384 | debug!("Resume service: {:?}", self.name); 385 | for worker in &mut self.workers { 386 | worker.resume(Reason::ConsoleRequest); 387 | } 388 | self.paused = false; 389 | Ok(()) 390 | } 391 | _ => Err(self.state.error()) 392 | } 393 | } 394 | } 395 | 396 | /// Reload service 397 | pub struct Reload(pub bool); 398 | 399 | impl Message for Reload { 400 | type Result = Result; 401 | } 402 | 403 | impl Handler for FeService { 404 | type Result = Response; 405 | 406 | fn handle(&mut self, msg: Reload, _: &mut Context) -> Self::Result { 407 | match self.state { 408 | ServiceState::Reloading(ref mut task) => { 409 | Response::async(task.wait().map_err(|_| ServiceOperationError::Failed)) 410 | } 411 | ServiceState::Running | ServiceState::Failed | ServiceState::Stopped => { 412 | debug!("Reloading service: {:?}", self.name); 413 | let mut task = actix::Condition::default(); 414 | let rx = task.wait(); 415 | self.paused = false; 416 | self.state = ServiceState::Reloading(task); 417 | for worker in &mut self.workers { 418 | worker.reload(msg.0, Reason::ConsoleRequest); 419 | } 420 | Response::async(rx.map_err(|_| ServiceOperationError::Failed)) 421 | } 422 | _ => Response::reply(Err(self.state.error())) 423 | } 424 | } 425 | } 426 | 427 | /// Stop service command 428 | pub struct Stop(pub bool, pub Reason); 429 | 430 | impl Message for Stop { 431 | type Result = Result<(), ()>; 432 | } 433 | 434 | impl Handler for FeService { 435 | type Result = Response<(), ()>; 436 | 437 | fn handle(&mut self, msg: Stop, _: &mut Context) -> Self::Result { 438 | let state = std::mem::replace(&mut self.state, ServiceState::Stopped); 439 | 440 | match state { 441 | ServiceState::Failed | ServiceState::Stopped => { 442 | self.state = state; 443 | return Response::reply(Err(())) 444 | }, 445 | ServiceState::Stopping(mut task) => { 446 | let rx = task.wait(); 447 | self.state = ServiceState::Stopping(task); 448 | return Response::async(rx.map(|_| ()).map_err(|_| ())); 449 | }, 450 | ServiceState::Starting(task) => { 451 | task.set(StartStatus::Stopping); 452 | } 453 | ServiceState::Reloading(task) => { 454 | task.set(ReloadStatus::Stopping); 455 | } 456 | ServiceState::Running => () 457 | } 458 | 459 | // stop workers 460 | let mut task = actix::Condition::default(); 461 | let rx = task.wait(); 462 | self.paused = false; 463 | self.state = ServiceState::Stopping(task); 464 | for worker in &mut self.workers { 465 | if msg.0 { 466 | worker.stop(msg.1.clone()); 467 | } else { 468 | worker.quit(msg.1.clone()); 469 | } 470 | } 471 | self.update(); 472 | 473 | Response::async(rx.map(|_| ()).map_err(|_| ())) 474 | } 475 | } 476 | -------------------------------------------------------------------------------- /src/socket.rs: -------------------------------------------------------------------------------- 1 | use std; 2 | use std::io; 3 | use std::net::TcpListener; 4 | use std::error::Error; 5 | use std::os::unix::io::AsRawFd; 6 | 7 | use serde_json as json; 8 | use net2::TcpBuilder; 9 | use net2::unix::UnixTcpBuilderExt; 10 | use nix::fcntl::{fcntl, FcntlArg, FdFlag, FD_CLOEXEC}; 11 | 12 | use addrinfo; 13 | use config::{Proto, SocketConfig}; 14 | 15 | 16 | pub struct Socket { 17 | pub name: String, 18 | pub listener: TcpListener, 19 | pub info: addrinfo::AddrInfo, 20 | } 21 | 22 | 23 | impl Socket { 24 | 25 | fn new(name: String, listener: TcpListener, 26 | info: addrinfo::AddrInfo, cfg: &SocketConfig) -> Socket { 27 | let fd = listener.as_raw_fd(); 28 | std::env::set_var(format!("FECTL_FD_{}", name), 29 | format!("{},FAMILY:{},SOCKETTYPE:{},PROTO:{}", 30 | fd.to_string(), 31 | info.family.to_int(), 32 | info.socktype.to_int(), 33 | info.protocol.to_int())); 34 | // loader 35 | if let Some(ref app) = cfg.app { 36 | std::env::set_var(format!("FECTL_APP_{}", name), app); 37 | 38 | // encode arguments 39 | if !cfg.arguments.is_empty() { 40 | let args = json::to_string(&cfg.arguments).unwrap(); 41 | std::env::set_var(format!("FECTL_ARGS_{}", name), args); 42 | } 43 | } 44 | 45 | let mut flags = FdFlag::from_bits_truncate(fcntl(fd, FcntlArg::F_GETFD).unwrap()); 46 | flags.remove(FD_CLOEXEC); 47 | let _ = fcntl(fd, FcntlArg::F_SETFD(flags)); 48 | 49 | Socket { 50 | name, 51 | listener, 52 | info, 53 | } 54 | } 55 | 56 | pub fn load_config(cfg: &[SocketConfig]) -> Result, std::io::Error> 57 | { 58 | let mut services = Vec::new(); 59 | 60 | for sock in cfg.iter() { 61 | // resolve addresses 62 | let lookup = addrinfo::lookup_addrinfo( 63 | sock.host.clone(), Some(sock.port.to_string()), 0, 64 | addrinfo::AI_PASSIVE, addrinfo::SocketType::Stream)?; 65 | let addrs: Vec = lookup.collect(); 66 | if addrs.is_empty() { 67 | return Err(io::Error::new( 68 | io::ErrorKind::Other, "getaddrinfo() returned empty list")) 69 | } 70 | 71 | // start listen 72 | let mut found = false; 73 | for addr in addrs { 74 | let builder = match addr.family { 75 | addrinfo::Family::Inet => { 76 | if sock.proto == Proto::tcp6 { 77 | continue 78 | } 79 | if let Ok(b) = TcpBuilder::new_v4() { 80 | b 81 | } else { 82 | continue 83 | } 84 | } 85 | addrinfo::Family::Inet6 => { 86 | if sock.proto == Proto::tcp4 { 87 | continue 88 | } 89 | if let Ok(b) = TcpBuilder::new_v6() { 90 | let _ = b.only_v6(true); 91 | b 92 | } else { 93 | continue 94 | } 95 | }, 96 | _ => continue 97 | }; 98 | 99 | let _ = builder.reuse_address(true); 100 | let _ = builder.reuse_port(true); 101 | 102 | match builder.bind(addr.sockaddr) { 103 | Ok(_) => { 104 | if let Ok(lst) = builder.listen(i32::from(sock.backlog)) { 105 | info!("Init listener on {:?}", addr.sockaddr); 106 | let mut addr = addr.clone(); 107 | addr.sockaddr = lst.local_addr().expect("should not fail"); 108 | services.push(Socket::new(sock.name.clone(), lst, addr, sock)); 109 | found = true; 110 | break; 111 | } 112 | }, 113 | Err(err) => { 114 | println!("Can not bind to address: \"{}\" {:?}", 115 | addr.sockaddr, err.description()); 116 | } 117 | } 118 | } 119 | if !found { 120 | return Err(io::Error::new( 121 | io::ErrorKind::Other, 122 | format!("Can not start listener for `{}` service", sock.name))) 123 | } 124 | } 125 | Ok(services) 126 | } 127 | } 128 | 129 | 130 | impl Drop for Socket { 131 | 132 | fn drop(&mut self) { 133 | std::env::remove_var(format!("FD_{}", self.name)); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::Path; 3 | use std::ffi::CString; 4 | 5 | use nix::unistd::Pid; 6 | 7 | 8 | /// find file in `PATH` environ 9 | pub(crate) fn find_path(name: &str) -> Option 10 | { 11 | let path = Path::new(name); 12 | if path.is_file() { 13 | return Some(path.to_string_lossy().as_ref().to_owned()) 14 | } 15 | 16 | env::var_os("PATH").and_then(|paths| { 17 | env::split_paths(&paths).filter_map(|dir| { 18 | let full_path = dir.join(&path); 19 | if full_path.is_file() { 20 | Some(full_path.to_string_lossy().as_ref().to_owned()) 21 | } else { 22 | None 23 | } 24 | }).next() 25 | }) 26 | } 27 | 28 | pub fn get_env_vars(all: bool) -> Vec { 29 | let mut env = Vec::new(); 30 | for (k, v) in env::vars() { 31 | if (all && !k.starts_with('_')) || ( 32 | k.starts_with("FECTL_FD") || k.starts_with("LANG") || k.starts_with("LC_")) 33 | { 34 | env.push(CString::new(format!("{}={}", k, v)).unwrap()); 35 | } 36 | } 37 | env 38 | } 39 | 40 | 41 | pub fn str(pid: Pid) -> Option { 42 | Some(format!("{}", pid)) 43 | } 44 | -------------------------------------------------------------------------------- /tests/asyncio_tests.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import textwrap 3 | 4 | from aiohttp.web import Application, Response, StreamResponse, run_app 5 | 6 | 7 | def app(worker): 8 | pass 9 | 10 | 11 | async def intro(request): 12 | txt = textwrap.dedent("""\ 13 | Type {url}/hello/John {url}/simple or {url}/change_body 14 | in browser url bar 15 | """).format(url='127.0.0.1:8080') 16 | binary = txt.encode('utf8') 17 | resp = StreamResponse() 18 | resp.content_length = len(binary) 19 | resp.content_type = 'text/plain' 20 | await resp.prepare(request) 21 | resp.write(binary) 22 | return resp 23 | 24 | 25 | async def simple(request): 26 | return Response(text="Simple answer") 27 | 28 | 29 | async def init(): 30 | app = Application() 31 | app.router.add_get('/', intro) 32 | app.router.add_get('/simple', simple) 33 | return app 34 | --------------------------------------------------------------------------------