├── .github ├── dependabot.yml └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── client-core ├── Cargo.toml └── src │ ├── codec.rs │ ├── id_gen.rs │ ├── lib.rs │ ├── manager.rs │ ├── quic_conn.rs │ └── tcp_conn.rs ├── client-proto ├── Cargo.toml ├── build.rs └── src │ └── lib.rs ├── client-uniffi ├── Cargo.toml ├── build.rs ├── src │ ├── featureprobe_link.udl │ └── lib.rs ├── tests │ ├── bindings │ │ ├── test.kts │ │ └── test.swift │ └── test_generated_bindings.rs └── uniffi.toml ├── fplink.png ├── protos ├── client.proto └── service.proto ├── server-base ├── Cargo.toml └── src │ ├── config.rs │ ├── conn.rs │ ├── context.rs │ ├── id_gen.rs │ ├── lib.rs │ ├── node.rs │ └── utils.rs ├── server-core ├── Cargo.toml └── src │ ├── lib.rs │ ├── lifecycle.rs │ ├── operator.rs │ └── repository.rs ├── server-grpc ├── Cargo.toml └── src │ ├── cluster.rs │ ├── dispatcher.rs │ ├── lib.rs │ ├── operator.rs │ └── server.rs ├── server-hproxy ├── Cargo.toml └── src │ ├── http.rs │ ├── lib.rs │ └── proxy.rs ├── server-listener ├── Cargo.toml └── src │ ├── accepter.rs │ ├── codec.rs │ ├── lib.rs │ ├── listener.rs │ ├── peek.rs │ ├── quic.rs │ ├── tcp.rs │ ├── tls.rs │ └── ws.rs ├── server-proto ├── Cargo.toml ├── build.rs └── src │ └── lib.rs ├── server-state ├── Cargo.toml └── src │ ├── cluster.rs │ ├── lib.rs │ ├── service.rs │ └── state.rs ├── server ├── Cargo.toml ├── build.rs └── src │ └── lib.rs └── src ├── lib.rs └── main.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | 13 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Build 20 | run: cargo build --verbose 21 | - name: Run tests 22 | run: cargo test --verbose 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | **/target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | .env 10 | .DS_Store 11 | *.swp 12 | *.idea 13 | *.code-workspace 14 | *.vscode 15 | log 16 | logs 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "feature-probe-link" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [workspace] 9 | members = [ 10 | "client-core", 11 | "client-proto", 12 | "client-uniffi", 13 | "server-proto", 14 | "server-base", 15 | "server-state", 16 | "server-core", 17 | "server-listener", 18 | "server-hproxy", 19 | "server-grpc", 20 | "server", 21 | ] 22 | 23 | [features] 24 | default = ["server"] 25 | client = [] 26 | 27 | [dependencies] 28 | 29 | server = { optional = true, path = "./server" } 30 | 31 | # this profile is for client, should replace in Makefile when build for server 32 | [profile.release] 33 | lto = true 34 | strip = true # Automatically strip symbols from the binary. 35 | opt-level = "s" # Optimize for size. 36 | codegen-units = 1 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 FeatureProbe 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FeatureProbe Link (Alpha Version) 2 | 3 | This is alpha version and should not be considered ready for production use while this message is visible. 4 | 5 | FeatureProbe Link is a **generel purpose** full duplex network component to make the information flow between `Client` and `Business Service` easier. 6 | 7 | For mobile device, FeatureProbe Link support TCP and QUIC protocols. For pure JavaScript environtment (like Browser or Electron), Websocket protocol is supported. 8 | 9 | Apps can use `Link Client` SDK to recieve bytes from `Business Service` (usually developer's micro-service) or send bytes to `Business Service` through `Link Server`. The most common senario is Instant-Messaging Apps. 10 | 11 | FeatureProbe use this component to make toggle update blazing fast. 12 | 13 | ![FeatureProbe Link Architecture](./fplink.png) 14 | ## Contributing 15 | We are working on continue evolving FeatureProbe core, making it flexible and easier to use. 16 | Development of FeatureProbe happens in the open on GitHub, and we are grateful to the 17 | community for contributing bugfixes and improvements. 18 | 19 | Please read [CONTRIBUTING](https://github.com/FeatureProbe/featureprobe/blob/master/CONTRIBUTING.md) 20 | for details on our code of conduct, and the process for taking part in improving FeatureProbe. 21 | 22 | ## License 23 | 24 | This project is licensed under the Apache 2.0 License - see the [LICENSE](LICENSE) file for details. 25 | -------------------------------------------------------------------------------- /client-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "client_core" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | async-trait = "0.1" 11 | bytes = "1.0" 12 | backtrace = "0.3" 13 | crossbeam-channel = "0.5" 14 | log = { version = "0.4", features = ["std", "release_max_level_info"] } 15 | serde = { version = "1.0", features = ["derive"] } 16 | serde_json = "1.0" 17 | stream-cancel = "0.8" 18 | time = "0.3" 19 | url = "2.2" 20 | webpki = "0.21" 21 | quinn = { version = "0.8", default-features = false, features = ["tls-rustls"] } 22 | tokio = { version = "1", features = ["full"] } 23 | tokio-util = { version = "0.7", features = ["codec"] } 24 | tokio-rustls = { version = "0.23", features = ["dangerous_configuration"] } 25 | rustls = { version = "0.20", features = ["dangerous_configuration"] } 26 | rustls-pemfile = "0.2.1" 27 | futures = "0.3" 28 | anyhow = "1.0.22" 29 | lazy_static = "1.4" 30 | parking_lot = "0.11" 31 | flate2 = "1.0.19" 32 | thiserror = "1.0" 33 | reqwest = { version = "0.11", default-features = false, features = [ 34 | "json", 35 | "rustls-tls", 36 | "gzip", 37 | ] } 38 | 39 | client-proto = { path = "../client-proto" } 40 | 41 | [dev-dependencies] 42 | directories-next = "1.0.1" 43 | tracing-appender = "0.2" 44 | tracing-core = "0.1" 45 | tracing-log = "0.1" 46 | tracing-subscriber = { version = "0.3", features = ["local-time", "env-filter"] } 47 | -------------------------------------------------------------------------------- /client-core/src/codec.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use bytes::{BufMut, BytesMut}; 3 | use client_proto as codec; 4 | use client_proto::proto::packet::Packet; 5 | use tokio_util::codec::{Decoder as TokioDecoder, Encoder as TokioEncoder}; 6 | 7 | #[derive(Default)] 8 | pub struct Codec { 9 | codec: codec::Codec, 10 | } 11 | 12 | impl TokioEncoder for Codec { 13 | type Error = Error; 14 | 15 | fn encode(&mut self, item: Packet, dst: &mut BytesMut) -> Result<(), Self::Error> { 16 | let bytes = self.codec.encode(item)?; 17 | if dst.remaining_mut() < bytes.len() { 18 | dst.reserve(bytes.len()); 19 | } 20 | dst.put(bytes); 21 | Ok(()) 22 | } 23 | } 24 | 25 | impl TokioDecoder for Codec { 26 | type Item = Packet; 27 | type Error = Error; 28 | 29 | fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { 30 | if buf.is_empty() { 31 | return Ok(None); 32 | } 33 | self.codec.decode(buf).map_err(|e| e.into()) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /client-core/src/id_gen.rs: -------------------------------------------------------------------------------- 1 | use lazy_static::lazy_static; 2 | use parking_lot::Mutex; 3 | 4 | lazy_static! { 5 | pub static ref ID_GEN: IdGen = IdGen::new(); 6 | } 7 | 8 | #[derive(Default)] 9 | pub struct IdGen { 10 | inner: Mutex, 11 | } 12 | 13 | #[derive(Default)] 14 | struct IdGenInner { 15 | cur_seq: u64, 16 | cur_ts: u64, 17 | } 18 | 19 | impl IdGen { 20 | pub fn new() -> Self { 21 | Self { 22 | inner: Mutex::new(IdGenInner::default()), 23 | } 24 | } 25 | 26 | pub fn generate(&self, addr: &str, protocol: &str) -> String { 27 | let ts = crate::now_ts(); 28 | let mut inner = self.inner.lock(); 29 | inner.cur_seq += 1; 30 | inner.cur_ts = ts; 31 | 32 | format!("{}_{}_{}_{}", protocol, addr, ts, inner.cur_seq) 33 | } 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use super::*; 39 | 40 | #[test] 41 | fn test_generate_extract() { 42 | let addr = "1.2.3.4"; 43 | let id_gen = IdGen::new(); 44 | let unique_id = id_gen.generate(addr, "tcp"); 45 | println!("gen id {}", unique_id); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /client-core/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod codec; 2 | mod id_gen; 3 | mod manager; 4 | mod quic_conn; 5 | mod tcp_conn; 6 | 7 | pub use client_proto::proto::Message; 8 | 9 | use async_trait::async_trait; 10 | use client_proto::proto::packet::Packet; 11 | use lazy_static::lazy_static; 12 | use rustls::{Certificate, ServerName}; 13 | use std::{collections::HashMap, sync::Arc, sync::Weak, time::SystemTime}; 14 | use thiserror::Error; 15 | use tokio::runtime::{Builder, Runtime}; 16 | 17 | lazy_static! { 18 | pub static ref TOKIO_RUNTIME: Runtime = Builder::new_multi_thread() 19 | .enable_all() 20 | .worker_threads(4) 21 | .thread_name("featureprobe") 22 | .build() 23 | .expect("can not start tokio runtime"); 24 | } 25 | 26 | pub trait PlatformCallback: Send + Sync { 27 | fn auth(&self); 28 | fn recv(&self, message: Message); 29 | fn state_change(&self, old: u8, new: u8); 30 | } 31 | 32 | #[derive(Clone, Copy, PartialEq, Eq)] 33 | pub enum NetworkType { 34 | TypeUnknown, 35 | TypeNoNet, 36 | TypeWiFi, 37 | Type2G, 38 | Type3G, 39 | Type4G, 40 | Type5G, 41 | } 42 | 43 | impl Default for NetworkType { 44 | fn default() -> Self { 45 | NetworkType::TypeUnknown 46 | } 47 | } 48 | 49 | pub struct LinkClient {} 50 | 51 | impl LinkClient { 52 | #[allow(clippy::new_without_default)] 53 | pub fn new() -> Self { 54 | let _enter = TOKIO_RUNTIME.enter(); 55 | 56 | Self {} 57 | } 58 | 59 | pub fn open(&self) {} 60 | 61 | pub fn send(&self, _message: Message) {} 62 | 63 | pub fn close(&self) {} 64 | 65 | pub fn state(&self) -> u8 { 66 | 0 67 | } 68 | 69 | pub fn go_background(&self) {} 70 | 71 | pub fn go_foreground(&self) {} 72 | 73 | pub fn network_change(&self, _old: NetworkType, _new: NetworkType) {} 74 | 75 | pub fn set_attrs(&self, _attrs: HashMap) {} 76 | } 77 | 78 | #[derive(Debug, Error)] 79 | pub enum Error { 80 | #[error("SendError {msg}")] 81 | SendError { msg: String }, 82 | 83 | #[error("RecvError {msg}")] 84 | RecvError { msg: String }, 85 | } 86 | 87 | #[async_trait] 88 | pub trait Connection: Send + Sync { 89 | async fn open(&self) -> bool; 90 | 91 | async fn send(&self, packet: Packet) -> Result<(), Error>; 92 | 93 | async fn close(&self); 94 | 95 | async fn state(&self) -> u8; 96 | 97 | async fn is_same_conn(&self, unique_id: &str) -> bool; 98 | } 99 | 100 | #[derive(Clone)] 101 | struct WeakManager { 102 | manager: Option>, 103 | } 104 | 105 | impl WeakManager { 106 | fn new(manager: Option>) -> Self { 107 | Self { manager } 108 | } 109 | 110 | fn upgrade(&self) -> Option> { 111 | if let Some(ref manager) = self.manager { 112 | if let Some(manager) = manager.upgrade() { 113 | return Some(manager); 114 | } 115 | } 116 | None 117 | } 118 | } 119 | 120 | #[derive(Debug, PartialEq, Copy, Clone)] 121 | pub enum State { 122 | Init = 1, 123 | Connecting = 2, 124 | Connected = 3, 125 | DisConnected = 4, 126 | Closed = 5, 127 | } 128 | 129 | impl Default for State { 130 | fn default() -> Self { 131 | State::Init 132 | } 133 | } 134 | 135 | #[allow(clippy::from_over_into)] 136 | impl Into for State { 137 | fn into(self) -> usize { 138 | self as usize 139 | } 140 | } 141 | 142 | #[allow(clippy::from_over_into)] 143 | impl Into for State { 144 | fn into(self) -> u8 { 145 | self as u8 146 | } 147 | } 148 | 149 | pub fn now_ts() -> u64 { 150 | let start = std::time::SystemTime::now(); 151 | let since_the_epoch = start 152 | .duration_since(std::time::UNIX_EPOCH) 153 | .expect("time went backwards"); 154 | since_the_epoch.as_millis() as u64 155 | } 156 | 157 | // for test 158 | pub struct SkipServerVerification; 159 | 160 | impl SkipServerVerification { 161 | pub fn new() -> Arc { 162 | Arc::new(Self) 163 | } 164 | } 165 | 166 | impl rustls::client::ServerCertVerifier for SkipServerVerification { 167 | fn verify_server_cert( 168 | &self, 169 | _end_entity: &rustls::Certificate, 170 | _intermediates: &[Certificate], 171 | _server_name: &ServerName, 172 | _scts: &mut dyn Iterator, 173 | _ocsp_response: &[u8], 174 | _now: SystemTime, 175 | ) -> Result { 176 | Ok(rustls::client::ServerCertVerified::assertion()) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /client-core/src/manager.rs: -------------------------------------------------------------------------------- 1 | use crate::{now_ts, Error, NetworkType}; 2 | use crate::{tcp_conn::TcpConnection, Connection, PlatformCallback, State}; 3 | use client_proto::proto::packet::Packet; 4 | use client_proto::proto::{Message, Ping}; 5 | use std::sync::Arc; 6 | use std::time::{Duration, Instant}; 7 | use tokio::sync::mpsc::{unbounded_channel as tchannel, UnboundedSender as TSender}; 8 | use tokio::sync::{Mutex, RwLock}; 9 | use tokio::time::{interval, timeout}; 10 | 11 | const CONNECTIVITY_CHECK_MS: u64 = 1000; 12 | const PING_INTERVAL_SEC: u64 = 3; 13 | const MAX_PENDING_PING_NUM: u8 = 3; 14 | 15 | #[derive(Clone, Default)] 16 | pub struct ConnManager { 17 | inner: Arc, 18 | } 19 | 20 | #[derive(Default)] 21 | pub struct ConnManagerInner { 22 | pub main_conn: RwLock>>, 23 | pub timeout_ms: u64, 24 | pub host: String, 25 | pub ssl: bool, 26 | 27 | platform_ck: Option>, 28 | open_lock: Arc>, 29 | conn_state: RwLock, 30 | ping_tx: RwLock>>, 31 | pending_ping: Mutex, 32 | network_type: Mutex, 33 | } 34 | 35 | impl ConnManager { 36 | #[allow(dead_code)] 37 | pub fn open(self: &Arc) { 38 | log::trace!("manager open"); 39 | let slf = self.clone(); 40 | tokio::spawn(async move { 41 | let _lock = slf.inner.open_lock.lock().await; 42 | if slf.is_opened().await { 43 | return; 44 | } 45 | slf.preemptive_open().await; 46 | slf.auto_reconnect(); 47 | slf.start_ping_pong(); 48 | }); 49 | } 50 | 51 | #[allow(dead_code)] 52 | pub fn send(&self, message: Message) { 53 | // TODO: send queue 54 | let slf = self.clone(); 55 | tokio::spawn(async move { 56 | let _ = slf.send_packet(Packet::Message(message)); 57 | }); 58 | } 59 | 60 | pub async fn set_main_conn_if_none(self: &Arc, conn: Box) { 61 | let slf = self.clone(); 62 | log::trace!("set_main_conn_if_none get lock"); 63 | let mut main_conn = self.inner.main_conn.write().await; 64 | log::trace!("set_main_conn_if_none got lock"); 65 | if main_conn.is_none() { 66 | *main_conn = Some(conn); 67 | tokio::task::spawn_blocking(move || match slf.inner.platform_ck { 68 | Some(ref cb) => cb.auth(), 69 | None => {} 70 | }); 71 | } 72 | } 73 | 74 | pub async fn recv(self: &Arc, packet: Result) { 75 | let slf = self.clone(); 76 | match packet { 77 | Ok(Packet::Message(message)) => { 78 | tokio::task::spawn_blocking(move || match slf.inner.platform_ck { 79 | Some(ref cb) => cb.recv(message), 80 | None => {} 81 | }); 82 | } 83 | Ok(Packet::Pong(pong)) => { 84 | log::debug!("recv pong, rtt is {}", now_ts() - pong.timestamp); 85 | slf.reset_pending_ping().await; 86 | } 87 | Err(e) => { 88 | log::error!("manager {:?}", e); 89 | self.cleanup().await 90 | } 91 | _ => log::info!("unsupport message type: {:?}", packet), 92 | } 93 | } 94 | 95 | pub async fn conn_state(&self) -> State { 96 | let guard = self.inner.conn_state.read().await; 97 | *guard 98 | } 99 | 100 | pub async fn set_conn_state(&self, new_state: State, unique_id: Option<&str>) { 101 | log::trace!("set_conn_state get lock"); 102 | let main_conn = self.inner.main_conn.read().await; 103 | log::trace!("set_conn_state got lock"); 104 | log::trace!( 105 | "manager set_conn_state {:?} by {:?} current main_conn {:?}", 106 | new_state, 107 | unique_id, 108 | main_conn.is_some() 109 | ); 110 | match unique_id { 111 | Some(unique_id) if main_conn.is_some() => { 112 | if let Some(ref conn) = *main_conn { 113 | if conn.is_same_conn(unique_id).await { 114 | self._set_conn_state(new_state).await 115 | } 116 | } 117 | } 118 | _ => self._set_conn_state(new_state).await, 119 | } 120 | } 121 | 122 | pub async fn is_connected(&self) -> bool { 123 | matches!(self.conn_state().await, State::Connected) 124 | } 125 | 126 | pub async fn is_closed(&self) -> bool { 127 | matches!(self.conn_state().await, State::Closed) 128 | } 129 | 130 | pub async fn drop_main_conn(&self) { 131 | log::trace!("manager drop main_conn get lock"); 132 | let mut main_conn = self.inner.main_conn.write().await; 133 | log::trace!("manager drop main_conn got lock"); 134 | main_conn.take(); // drop main_conn 135 | } 136 | 137 | async fn cleanup(&self) { 138 | if self.is_closed().await { 139 | return; 140 | } 141 | self.drop_main_conn().await; 142 | self.set_conn_state(State::DisConnected, None).await; 143 | } 144 | 145 | fn start_ping_pong(self: &Arc) { 146 | let slf = self.clone(); 147 | tokio::spawn(async move { 148 | let (ping_tx, mut ping_rx) = tchannel(); 149 | let mut guard = slf.inner.ping_tx.write().await; 150 | *guard = Some(ping_tx); 151 | drop(guard); 152 | 153 | loop { 154 | let _ = timeout(Duration::from_secs(PING_INTERVAL_SEC), ping_rx.recv()).await; 155 | match slf.conn_state().await { 156 | State::Connected if slf.pending_ping_num().await < MAX_PENDING_PING_NUM => { 157 | slf.inc_pending_ping().await; 158 | slf.do_send(Packet::Ping(Ping { 159 | timestamp: now_ts(), 160 | })) 161 | .await; 162 | } 163 | State::Connected => { 164 | log::error!("max pending ping"); 165 | slf.reset_pending_ping().await; 166 | tokio::spawn(async move { slf.cleanup().await }); 167 | break; 168 | } 169 | _ => slf.reset_pending_ping().await, 170 | } 171 | } 172 | }); 173 | } 174 | 175 | // call multiple times create multiple connections, fastest wins as current connection 176 | async fn preemptive_open(self: &Arc) { 177 | self.set_conn_state(State::Connecting, None).await; 178 | log::debug!("preemptive opening"); 179 | let conn = TcpConnection::new( 180 | self.inner.host.clone(), 181 | self.inner.ssl, 182 | self.inner.timeout_ms, 183 | ) 184 | .with_manager(Arc::downgrade(self)); 185 | if conn.open().await { 186 | self.set_main_conn_if_none(Box::new(conn)).await; 187 | } 188 | } 189 | 190 | fn auto_reconnect(self: &Arc) { 191 | let slf = self.clone(); 192 | tokio::spawn(async move { 193 | let mut interval = interval(Duration::from_millis(CONNECTIVITY_CHECK_MS)); 194 | let mut last_run_time = Instant::now(); 195 | loop { 196 | if slf.is_closed().await { 197 | break; 198 | } 199 | let now = Instant::now(); 200 | if now.duration_since(last_run_time).as_millis() >= CONNECTIVITY_CHECK_MS as u128 { 201 | last_run_time = now; 202 | slf.reconnect_if_need().await; 203 | } 204 | interval.tick().await; 205 | } 206 | }); 207 | } 208 | 209 | async fn reconnect_if_need(self: &Arc) { 210 | // if state is closed, mean no need to reconnect 211 | // only reconnect when state is disconnected 212 | log::trace!("reconnect_if_need get read lock"); 213 | let main_conn = self.inner.main_conn.read().await; 214 | log::trace!("reconnect_if_need got read lock"); 215 | log::debug!( 216 | "main_conn {:?} current state {:?}", 217 | main_conn.is_some(), 218 | self.conn_state().await 219 | ); 220 | if (self.conn_state().await == State::DisConnected 221 | || self.conn_state().await == State::Connecting) 222 | && self.network_type().await != NetworkType::TypeNoNet 223 | { 224 | log::trace!("need reconnect"); 225 | let slf = self.clone(); 226 | tokio::spawn(async move { slf.preemptive_open().await }); 227 | } 228 | } 229 | 230 | #[allow(dead_code)] 231 | async fn set_network_type(&self, network_type: NetworkType) -> NetworkType { 232 | let mut guard = self.inner.network_type.lock().await; 233 | let old_network_type = *guard; 234 | *guard = network_type; 235 | old_network_type 236 | } 237 | 238 | async fn network_type(&self) -> NetworkType { 239 | let guard = self.inner.network_type.lock().await; 240 | *guard 241 | } 242 | 243 | async fn do_send(self: &Arc, packet: Packet) { 244 | let slf = self.clone(); 245 | tokio::spawn(async move { slf.send_packet(packet).await }); 246 | } 247 | 248 | async fn send_packet(&self, packet: Packet) { 249 | log::trace!("do_send get lock"); 250 | let main_conn = self.inner.main_conn.read().await; 251 | log::trace!("do_send got lock"); 252 | match *main_conn { 253 | Some(ref conn) => { 254 | if let Err(e) = conn.send(packet).await { 255 | log::error!("send error: {}", e.to_string()); 256 | drop(main_conn); // drop lock 257 | self.cleanup().await; 258 | } 259 | } 260 | None => log::info!("send failed, conn not ready"), 261 | }; 262 | } 263 | 264 | async fn is_opened(&self) -> bool { 265 | matches!( 266 | self.conn_state().await, 267 | State::Connecting | State::Connected 268 | ) 269 | } 270 | 271 | async fn _set_conn_state(&self, new_state: State) { 272 | let mut guard = self.inner.conn_state.write().await; 273 | *guard = new_state; 274 | } 275 | 276 | async fn pending_ping_num(&self) -> u8 { 277 | let pending_ping = self.inner.pending_ping.lock().await; 278 | *pending_ping 279 | } 280 | 281 | async fn reset_pending_ping(&self) { 282 | let mut pending_ping = self.inner.pending_ping.lock().await; 283 | *pending_ping = 0; 284 | } 285 | 286 | async fn inc_pending_ping(&self) { 287 | let mut pending_ping = self.inner.pending_ping.lock().await; 288 | *pending_ping += 1; 289 | } 290 | } 291 | 292 | #[cfg(test)] 293 | mod tests { 294 | 295 | use super::*; 296 | use std::time::Duration; 297 | 298 | #[tokio::test(flavor = "multi_thread", worker_threads = 4)] 299 | async fn test_manager() { 300 | // let _ = tracing_subscriber::fmt().with_env_filter("trace").init(); 301 | let manager = Arc::new(ConnManager { 302 | inner: Arc::new(ConnManagerInner { 303 | host: "127.0.0.1:8082".to_owned(), 304 | timeout_ms: 100, 305 | ..Default::default() 306 | }), 307 | }); 308 | 309 | manager.open(); 310 | 311 | let message = Message { 312 | namespace: "test".to_owned(), 313 | path: "/test".to_owned(), 314 | metadata: Default::default(), 315 | body: Default::default(), 316 | expire_at: None, 317 | }; 318 | 319 | manager.send(message); 320 | 321 | tokio::time::sleep(Duration::from_secs(2)).await; 322 | } 323 | } 324 | -------------------------------------------------------------------------------- /client-core/src/quic_conn.rs: -------------------------------------------------------------------------------- 1 | use crate::{Connection, Error}; 2 | use async_trait::async_trait; 3 | use client_proto::proto::packet::Packet; 4 | 5 | pub struct QuicConnection {} 6 | 7 | #[async_trait] 8 | impl Connection for QuicConnection { 9 | async fn open(&self) -> bool { 10 | return false; 11 | } 12 | 13 | async fn send(&self, _p: Packet) -> Result<(), Error> { 14 | Ok(()) 15 | } 16 | 17 | async fn close(&self) {} 18 | 19 | async fn state(&self) -> u8 { 20 | 0 21 | } 22 | 23 | async fn is_same_conn(&self, _unique_id: &str) -> bool { 24 | false 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /client-core/src/tcp_conn.rs: -------------------------------------------------------------------------------- 1 | use crate::codec::Codec; 2 | use crate::id_gen::ID_GEN; 3 | use crate::manager::ConnManager; 4 | use crate::{Connection, State, WeakManager}; 5 | use crate::{Error, SkipServerVerification}; 6 | use anyhow::{anyhow, Result as AnyResult}; 7 | use async_trait::async_trait; 8 | use client_proto::proto::packet::Packet; 9 | use futures::sink::SinkExt; 10 | use futures::stream::{SplitSink, SplitStream, StreamExt}; 11 | use rustls::{ClientConfig, RootCertStore, ServerName}; 12 | use std::fmt::Display; 13 | use std::net::ToSocketAddrs; 14 | use std::pin::Pin; 15 | use std::sync::{Arc, Weak}; 16 | use std::task::{Context, Poll}; 17 | use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; 18 | use tokio::net::TcpStream; 19 | use tokio::sync::Mutex; 20 | use tokio::time::Duration as TDuration; 21 | use tokio_rustls::client::TlsStream; 22 | use tokio_rustls::TlsConnector; 23 | use tokio_util::codec::Framed; 24 | 25 | type TcpFramed = Framed; 26 | type TcpSplitSink = SplitSink; 27 | type TcpSplitStream = SplitStream; 28 | 29 | pub struct TcpConnection { 30 | pub unique_id: String, 31 | pub connecting_ts: u64, 32 | host: String, 33 | is_ssl: bool, 34 | timeout_ms: u64, 35 | manager: WeakManager, 36 | open_lock: Arc>, 37 | conn_state: Mutex, 38 | sender: Mutex>, 39 | connected_ms: Mutex, 40 | } 41 | 42 | #[async_trait] 43 | impl Connection for TcpConnection { 44 | async fn open(&self) -> bool { 45 | let _lock = self.open_lock.lock().await; 46 | if self.is_opened().await { 47 | return false; 48 | } 49 | 50 | log::debug!("{} is opening", self); 51 | if let Some(ref manager) = &self.manager.upgrade() { 52 | if manager.is_connected().await { 53 | log::info!("some tcp is opened, skip"); 54 | return false; 55 | } 56 | } 57 | 58 | self.change_conn_state(State::Connecting).await; 59 | match self 60 | .open_tcp_stream(&self.host, self.is_ssl, self.timeout_ms) 61 | .await 62 | { 63 | Ok(conn) => { 64 | self.handle_tcp_stream(conn).await; 65 | log::debug!("{} open success", self); 66 | return true; 67 | } 68 | Err(e) => { 69 | log::debug!("{} open failed {}", self, e.to_string()); 70 | return false; 71 | } 72 | } 73 | } 74 | 75 | async fn send(&self, packet: Packet) -> Result<(), Error> { 76 | let mut sender_guard = self.sender.lock().await; 77 | match *sender_guard { 78 | Some(ref mut s) => { 79 | log::info!("{} send {:?}", self, packet); 80 | s.send(packet) 81 | .await 82 | .map_err(|e| Error::SendError { msg: e.to_string() })?; 83 | } 84 | None => log::warn!("{} packet drop, conn not ready", self), 85 | }; 86 | // false 87 | Ok(()) 88 | } 89 | 90 | async fn close(&self) {} 91 | 92 | async fn state(&self) -> u8 { 93 | self.conn_state().await.into() 94 | } 95 | 96 | async fn is_same_conn(&self, unique_id: &str) -> bool { 97 | self.unique_id.eq(unique_id) 98 | } 99 | } 100 | 101 | impl TcpConnection { 102 | pub fn new(host: String, is_ssl: bool, timeout_ms: u64) -> Self { 103 | Self { 104 | unique_id: ID_GEN.generate(&host, "tcp"), 105 | manager: WeakManager::new(None), 106 | host, 107 | is_ssl, 108 | timeout_ms, 109 | open_lock: Arc::new(Mutex::new(0)), 110 | conn_state: Mutex::new(State::Init), 111 | sender: Mutex::new(None), 112 | connecting_ts: crate::now_ts(), 113 | connected_ms: Mutex::new(0), 114 | } 115 | } 116 | 117 | pub fn with_manager(mut self, manager: Weak) -> Self { 118 | self.manager = WeakManager::new(Some(manager)); 119 | self 120 | } 121 | 122 | pub async fn is_opened(&self) -> bool { 123 | matches!( 124 | self.conn_state().await, 125 | State::Connecting | State::Connected 126 | ) 127 | } 128 | 129 | pub async fn open_tcp_stream( 130 | &self, 131 | host: &str, 132 | ssl: bool, 133 | timeout_ms: u64, 134 | ) -> AnyResult { 135 | log::debug!("{} open_tcp_stream", self); 136 | 137 | let socket_addr = host 138 | .to_socket_addrs()? 139 | .next() 140 | .ok_or_else(|| anyhow!("couldn't resolve to an address"))?; 141 | 142 | log::info!("{} connecting addr: {:?}", self, socket_addr); 143 | match tokio::time::timeout( 144 | TDuration::from_millis(timeout_ms), 145 | TcpStream::connect(socket_addr), 146 | ) 147 | .await 148 | { 149 | Err(e) => Err(anyhow!("{} connect timeout {}", self, e)), 150 | Ok(stream_result) => match stream_result { 151 | Err(e) => Err(anyhow!("{} connect failed {}", self, e)), 152 | Ok(stream) if ssl => self.open_tcp_tls_stream(timeout_ms, stream, host).await, 153 | Ok(stream) => { 154 | log::debug!("{} stream opened", self); 155 | Ok(TStream::Tcp(Box::new(stream))) 156 | } 157 | }, 158 | } 159 | } 160 | 161 | pub async fn open_tcp_tls_stream( 162 | &self, 163 | timeout_ms: u64, 164 | stream: TcpStream, 165 | host: &str, 166 | ) -> AnyResult { 167 | log::debug!("{} stream opened, start tls connecting", self); 168 | let server_name = ServerName::try_from(host)?; 169 | match tokio::time::timeout( 170 | TDuration::from_millis(timeout_ms), 171 | TlsConnector::from(client_config()).connect(server_name, stream), 172 | ) 173 | .await 174 | { 175 | Err(e) => Err(anyhow!("{} connect tls timeout {}", self, e)), 176 | Ok(stream) => match stream { 177 | Err(e) => Err(anyhow!("{} tls connect failed: {}", self, e)), 178 | Ok(tls_stream) => { 179 | log::debug!("{} tls done", self); 180 | Ok(TStream::Tls(Box::new(tls_stream))) 181 | } 182 | }, 183 | } 184 | } 185 | 186 | pub async fn handle_tcp_stream(&self, stream: TStream) { 187 | log::info!("{} handle_tcp_stream", self); 188 | let framed_stream = Framed::new(stream, Codec::default()); 189 | let (sender, receiver) = framed_stream.split(); 190 | let mut send_guard = self.sender.lock().await; 191 | *send_guard = Some(sender); 192 | drop(send_guard); 193 | 194 | self.do_recv(receiver).await; 195 | self.change_conn_state(State::Connected).await; 196 | } 197 | 198 | pub async fn do_recv(&self, mut receiver: TcpSplitStream) { 199 | let manager = self.manager.clone(); 200 | tokio::spawn(async move { 201 | while let Some(p) = receiver.next().await { 202 | log::trace!("do_recv {:?}", p); 203 | let manager = manager.clone(); 204 | manager_recv( 205 | manager, 206 | p.map_err(|e| Error::RecvError { msg: e.to_string() }), 207 | ) 208 | .await 209 | } 210 | }); 211 | } 212 | 213 | pub async fn conn_state(&self) -> State { 214 | let guard = self.conn_state.lock().await; 215 | *guard 216 | } 217 | 218 | pub async fn change_conn_state(&self, new_state: State) { 219 | let current_state = self.conn_state().await; 220 | let old_state = current_state; 221 | 222 | match new_state { 223 | State::Connected => self.update_connected_ms().await, 224 | _ => self.clear_connected_ms().await, 225 | } 226 | 227 | if old_state != new_state { 228 | log::trace!("{} from {:?} to {:?}", self, old_state, new_state); 229 | self.set_conn_state(new_state).await; 230 | 231 | if let Some(ref manager) = &self.manager.upgrade() { 232 | manager 233 | .set_conn_state(new_state, Some(&self.unique_id)) 234 | .await; 235 | } 236 | } 237 | } 238 | 239 | pub async fn update_connected_ms(&self) { 240 | let ts = self.connecting_ts; 241 | let now = crate::now_ts(); 242 | let mut ms_guard = self.connected_ms.lock().await; 243 | let d = (now - ts) as u64; 244 | *ms_guard = d; 245 | } 246 | 247 | pub async fn clear_connected_ms(&self) { 248 | let mut ms_guard = self.connected_ms.lock().await; 249 | *ms_guard = 0; 250 | } 251 | 252 | pub async fn set_conn_state(&self, new_state: State) { 253 | let mut guard = self.conn_state.lock().await; 254 | *guard = new_state; 255 | } 256 | } 257 | 258 | async fn manager_recv(manager: WeakManager, packet: Result) { 259 | if let Some(ref manager) = manager.upgrade() { 260 | manager.recv(packet).await 261 | } 262 | } 263 | 264 | impl Display for TcpConnection { 265 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 266 | write!(f, "{}", self.unique_id) 267 | } 268 | } 269 | 270 | impl Drop for TcpConnection { 271 | fn drop(&mut self) { 272 | log::info!("{} dropped", self); 273 | } 274 | } 275 | 276 | pub enum TStream { 277 | Tcp(Box), 278 | Tls(Box>), 279 | } 280 | 281 | impl AsyncWrite for TStream { 282 | fn poll_write( 283 | mut self: Pin<&mut Self>, 284 | cx: &mut Context<'_>, 285 | buf: &[u8], 286 | ) -> Poll> { 287 | match &mut *self { 288 | Self::Tcp(ref mut s) => AsyncWrite::poll_write(Pin::new(s), cx, buf), 289 | Self::Tls(ref mut s) => AsyncWrite::poll_write(Pin::new(s), cx, buf), 290 | } 291 | } 292 | 293 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 294 | match &mut *self { 295 | Self::Tcp(ref mut s) => AsyncWrite::poll_flush(Pin::new(s), cx), 296 | Self::Tls(ref mut s) => AsyncWrite::poll_flush(Pin::new(s), cx), 297 | } 298 | } 299 | 300 | fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 301 | match &mut *self { 302 | Self::Tcp(ref mut s) => AsyncWrite::poll_shutdown(Pin::new(s), cx), 303 | Self::Tls(ref mut s) => AsyncWrite::poll_shutdown(Pin::new(s), cx), 304 | } 305 | } 306 | } 307 | 308 | impl AsyncRead for TStream { 309 | fn poll_read( 310 | mut self: Pin<&mut Self>, 311 | cx: &mut Context<'_>, 312 | buf: &mut ReadBuf<'_>, 313 | ) -> Poll> { 314 | match &mut *self { 315 | Self::Tcp(ref mut s) => AsyncRead::poll_read(Pin::new(s), cx, buf), 316 | Self::Tls(ref mut s) => AsyncRead::poll_read(Pin::new(s), cx, buf), 317 | } 318 | } 319 | } 320 | 321 | fn client_config() -> Arc { 322 | let mut client_config = ClientConfig::builder() 323 | .with_safe_defaults() 324 | .with_root_certificates(RootCertStore::empty()) 325 | .with_no_client_auth(); 326 | client_config.alpn_protocols = vec![Vec::from("tcp")]; 327 | client_config 328 | .dangerous() 329 | .set_certificate_verifier(SkipServerVerification::new()); 330 | Arc::new(client_config) 331 | } 332 | 333 | #[cfg(test)] 334 | mod tests { 335 | 336 | use super::*; 337 | use client_proto::proto::Message; 338 | use std::time::Duration; 339 | 340 | #[tokio::test(flavor = "multi_thread", worker_threads = 4)] 341 | async fn test_conn() { 342 | let _ = tracing_subscriber::fmt().with_env_filter("trace").init(); 343 | let tcp_conn = TcpConnection::new("127.0.0.1:8082".to_owned(), false, 100); 344 | let conn: &dyn Connection = &tcp_conn; 345 | let c = &conn; 346 | 347 | c.open().await; 348 | 349 | let message = Message { 350 | namespace: "test".to_owned(), 351 | path: "/test".to_owned(), 352 | metadata: Default::default(), 353 | body: Default::default(), 354 | expire_at: None, 355 | }; 356 | 357 | let _ = conn.send(Packet::Message(message.clone())).await; 358 | let _ = conn.send(Packet::Message(message.clone())).await; 359 | let _ = conn.send(Packet::Message(message)).await; 360 | 361 | tokio::time::sleep(Duration::from_secs(2)).await; 362 | } 363 | } 364 | -------------------------------------------------------------------------------- /client-proto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "client-proto" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | prost = "0.10" 10 | bytes = "1.0" 11 | 12 | [build-dependencies] 13 | prost-build = "0.10" 14 | -------------------------------------------------------------------------------- /client-proto/build.rs: -------------------------------------------------------------------------------- 1 | use std::io::Result; 2 | fn main() -> Result<()> { 3 | prost_build::compile_protos(&["../protos/client.proto"], &["../protos"])?; 4 | Ok(()) 5 | } 6 | -------------------------------------------------------------------------------- /client-proto/src/lib.rs: -------------------------------------------------------------------------------- 1 | use bytes::{Buf, Bytes, BytesMut}; 2 | pub use prost::{DecodeError, EncodeError, Message}; 3 | 4 | pub mod proto { 5 | include!(concat!(env!("OUT_DIR"), "/featureprobe.link.rs")); 6 | } 7 | 8 | const VARINT_MAX_LEN: usize = 10; 9 | 10 | #[derive(Default)] 11 | pub struct Codec { 12 | decode_len: Option, 13 | } 14 | 15 | impl Codec { 16 | pub fn encode(&self, packet: proto::packet::Packet) -> Result { 17 | let p = proto::Packet { 18 | packet: Some(packet), 19 | }; 20 | let mut buf = BytesMut::with_capacity(p.encoded_len()); 21 | p.encode_length_delimited(&mut buf)?; 22 | Ok(buf.freeze()) 23 | } 24 | 25 | pub fn decode( 26 | &mut self, 27 | buf: &mut BytesMut, 28 | ) -> Result, DecodeError> { 29 | if buf.len() < VARINT_MAX_LEN { 30 | return self.decode_length_delimiter(buf); 31 | } 32 | 33 | self._decode(buf) 34 | } 35 | 36 | fn decode_length_delimiter( 37 | &mut self, 38 | buf: &mut BytesMut, 39 | ) -> Result, DecodeError> { 40 | match self.decode_len { 41 | Some(_) => self._decode(buf), 42 | None => { 43 | let mut b = buf.clone(); 44 | let new_buf = &mut b; 45 | match prost::decode_length_delimiter(new_buf) { 46 | Ok(_) => self._decode(buf), 47 | Err(_) => Ok(None), 48 | } 49 | } 50 | } 51 | } 52 | 53 | fn _decode( 54 | &mut self, 55 | buf: &mut BytesMut, 56 | ) -> Result, DecodeError> { 57 | let len = if let Some(len) = self.decode_len.take() { 58 | len 59 | } else { 60 | prost::decode_length_delimiter(buf as &mut dyn bytes::Buf)? 61 | }; 62 | 63 | if len > buf.len() { 64 | self.decode_len = Some(len); 65 | return Ok(None); 66 | } 67 | let b = &buf[0..len]; 68 | let p = proto::Packet::decode(b)?.packet; 69 | buf.advance(len); 70 | Ok(p) 71 | } 72 | } 73 | 74 | #[cfg(test)] 75 | mod tests { 76 | use bytes::BufMut; 77 | 78 | use super::*; 79 | pub fn build_packet(namespace: String, body_len: usize) -> proto::packet::Packet { 80 | let message: proto::Message = proto::Message { 81 | namespace, 82 | path: "path".to_owned(), 83 | metadata: Default::default(), 84 | body: vec![1; body_len], 85 | expire_at: None, 86 | }; 87 | proto::packet::Packet::Message(message) 88 | } 89 | 90 | #[test] 91 | fn test_decode_empty() -> Result<(), prost::DecodeError> { 92 | let mut codec = Codec::default(); 93 | let mut bm = BytesMut::new(); 94 | let result = codec.decode(&mut bm); 95 | 96 | assert!(result.is_ok()); 97 | assert!(result.unwrap().is_none()); 98 | Ok(()) 99 | } 100 | 101 | #[test] 102 | fn test_decode() -> Result<(), prost::DecodeError> { 103 | let mut codec = Codec::default(); 104 | let request = String::from("Hello, World!"); 105 | let request = build_packet(request, 4); 106 | let request_vector = codec.encode(request).unwrap(); 107 | let request_vector = [request_vector].concat(); 108 | let mut bm = BytesMut::from(request_vector.as_slice()); 109 | 110 | let result = codec.decode(&mut bm); 111 | assert!(result.is_ok()); 112 | 113 | Ok(()) 114 | } 115 | 116 | #[test] 117 | fn test_decode_multiple() -> Result<(), prost::DecodeError> { 118 | let mut codec = Codec::default(); 119 | let request = String::from("Hello, World!"); 120 | let request = build_packet(request, 4); 121 | let request_vector = codec.encode(request).unwrap(); 122 | let request_vector = [request_vector.clone(), request_vector].concat(); 123 | let mut bm = BytesMut::from(request_vector.as_slice()); 124 | 125 | let result = codec.decode(&mut bm); 126 | assert!(result.is_ok()); 127 | assert!(result.unwrap().is_some()); 128 | 129 | let result = codec.decode(&mut bm); 130 | assert!(result.is_ok()); 131 | assert!(result.unwrap().is_some()); 132 | Ok(()) 133 | } 134 | 135 | #[test] 136 | fn test_decode_partial() -> Result<(), prost::DecodeError> { 137 | let mut codec = Codec::default(); 138 | let request = String::from("Hello, World!"); 139 | let request = build_packet(request, 4); 140 | let request_vector = codec.encode(request).unwrap(); 141 | let request_vector = [request_vector].concat(); 142 | 143 | let len = request_vector.len(); 144 | let mut bm = BytesMut::from(&request_vector[0..len / 2]); 145 | let result = codec.decode(&mut bm)?; 146 | assert!(result.is_none()); 147 | 148 | bm.put(&request_vector[len / 2..]); 149 | let result = codec.decode(&mut bm)?; 150 | assert!(result.is_some()); 151 | Ok(()) 152 | } 153 | 154 | #[test] 155 | fn test_decode_partial_varint() -> Result<(), prost::DecodeError> { 156 | let mut codec = Codec::default(); 157 | let request = String::from("Hello, World!"); 158 | let request = build_packet(request, 1000); 159 | let request_vector = codec.encode(request).unwrap(); 160 | let request_vector = [request_vector].concat(); 161 | 162 | let mut bm = BytesMut::from(&request_vector[0..1]); 163 | let result = codec.decode(&mut bm)?; 164 | assert!(result.is_none()); 165 | 166 | bm.put(&request_vector[1..]); 167 | let result = codec.decode(&mut bm)?; 168 | assert!(result.is_some()); 169 | 170 | let mut bm = BytesMut::from(&request_vector[0..2]); 171 | let result = codec.decode(&mut bm)?; 172 | assert!(result.is_none()); 173 | 174 | bm.put(&request_vector[2..]); 175 | let result = codec.decode(&mut bm)?; 176 | assert!(result.is_some()); 177 | 178 | let mut bm = BytesMut::from(&request_vector[0..3]); 179 | let result = codec.decode(&mut bm)?; 180 | assert!(result.is_none()); 181 | 182 | bm.put(&request_vector[3..]); 183 | let result = codec.decode(&mut bm)?; 184 | assert!(result.is_some()); 185 | 186 | let mut bm = BytesMut::from(&request_vector[0..4]); 187 | let result = codec.decode(&mut bm)?; 188 | assert!(result.is_none()); 189 | 190 | bm.put(&request_vector[4..]); 191 | let result = codec.decode(&mut bm)?; 192 | assert!(result.is_some()); 193 | Ok(()) 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /client-uniffi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "client-uniffi" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lib] 7 | name = "featureprobe_link_ffi" 8 | crate-type = ["cdylib", "staticlib"] 9 | 10 | [dependencies] 11 | uniffi_macros = "0.18" 12 | uniffi = { version = "0.18", features = [ "builtin-bindgen" ] } 13 | 14 | client_core = { path = "../client-core" } 15 | 16 | [build-dependencies] 17 | uniffi_build = { version = "0.18", features = [ "builtin-bindgen" ] } -------------------------------------------------------------------------------- /client-uniffi/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | uniffi_build::generate_scaffolding("./src/featureprobe_link.udl").unwrap(); 3 | } 4 | -------------------------------------------------------------------------------- /client-uniffi/src/featureprobe_link.udl: -------------------------------------------------------------------------------- 1 | namespace FeatureProbeLink { }; 2 | 3 | interface FPLink { 4 | constructor(); 5 | void open(); 6 | void send(FPMessage message); 7 | void close(); 8 | u8 state(); 9 | void go_background(); 10 | void go_foreground(); 11 | void network_change(FPNetworkType old, FPNetworkType new); 12 | void set_attrs(record attrs); 13 | }; 14 | 15 | callback interface FPlatformCallback { 16 | void auth(); 17 | void recv(FPMessage message); 18 | void state_change(u8 old, u8 new); 19 | }; 20 | 21 | dictionary FPMessage { 22 | string namespace; 23 | string path; 24 | record metadata; 25 | sequence body; 26 | u64? expire_at; 27 | }; 28 | 29 | enum FPNetworkType { 30 | "TypeUnknown", 31 | "TypeNoNet", 32 | "TypeWiFi", 33 | "Type2G", 34 | "Type3G", 35 | "Type4G", 36 | "Type5G", 37 | }; 38 | -------------------------------------------------------------------------------- /client-uniffi/src/lib.rs: -------------------------------------------------------------------------------- 1 | use client_core::{ 2 | LinkClient as FPLink, Message as FPMessage, NetworkType as FPNetworkType, 3 | PlatformCallback as FPlatformCallback, 4 | }; 5 | 6 | uniffi_macros::include_scaffolding!("featureprobe_link"); 7 | -------------------------------------------------------------------------------- /client-uniffi/tests/bindings/test.kts: -------------------------------------------------------------------------------- 1 | import com.featureprobe.link.*; 2 | 3 | val link = FpLink() 4 | 5 | println("toggle value is $link") -------------------------------------------------------------------------------- /client-uniffi/tests/bindings/test.swift: -------------------------------------------------------------------------------- 1 | import FeatureProbeLink 2 | 3 | let link = FpLink() 4 | print("link is \(link)") 5 | -------------------------------------------------------------------------------- /client-uniffi/tests/test_generated_bindings.rs: -------------------------------------------------------------------------------- 1 | uniffi_macros::build_foreign_language_testcases!( 2 | ["src/featureprobe_link.udl",], 3 | ["tests/bindings/test.kts", "tests/bindings/test.swift",] 4 | ); 5 | -------------------------------------------------------------------------------- /client-uniffi/uniffi.toml: -------------------------------------------------------------------------------- 1 | [bindings.kotlin] 2 | package_name = "com.featureprobe.link" 3 | cdylib_name = "featureprobe_link_ffi" 4 | 5 | [bindings.swift] 6 | cdylib_name = "featureprobe_link_ffi" 7 | module_name = "FeatureProbeLink" -------------------------------------------------------------------------------- /fplink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FeatureProbe/feature-probe-link/336d39964ef1ebd24a2149b87349b5b879fb2e20/fplink.png -------------------------------------------------------------------------------- /protos/client.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package featureprobe.link; 3 | option java_package = "com.featureprobe.link"; 4 | 5 | message Packet { 6 | oneof packet { 7 | Ping ping = 1; 8 | Pong pong = 2; 9 | Message message = 3; 10 | } 11 | } 12 | 13 | message Ping { uint64 timestamp = 1; } 14 | 15 | message Pong { uint64 timestamp = 1; } 16 | 17 | message Message { 18 | string namespace = 1; 19 | string path = 2; 20 | map metadata = 3; 21 | bytes body = 4; 22 | optional uint64 expire_at = 5; 23 | } -------------------------------------------------------------------------------- /protos/service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package featureprobe.link; 3 | option java_package = "com.featureprobe.link"; 4 | 5 | import "client.proto"; 6 | 7 | message Trace { 8 | string trace_id = 1; 9 | string span_id = 2; 10 | } 11 | 12 | message JoinReq { 13 | string sid = 1; 14 | string namespace = 2; 15 | optional string room_prefix = 3; 16 | string room = 4; 17 | Trace trace = 10; 18 | } 19 | 20 | message JoinResp { bool success = 1; } 21 | 22 | message BulkJoinReq { 23 | string sid = 1; 24 | string namespace = 2; 25 | map rooms = 3; 26 | Trace trace = 10; 27 | } 28 | 29 | message BulkJoinResp { bool success = 1; } 30 | 31 | message LeaveReq { 32 | string sid = 1; 33 | string namespace = 2; 34 | optional string room_prefix = 3; 35 | string room = 4; 36 | Trace trace = 10; 37 | } 38 | 39 | message LeaveResp { bool success = 1; } 40 | 41 | message EmitReq { 42 | Message message = 1; 43 | optional string room_prefix = 2; 44 | repeated string rooms = 3; 45 | bool success_channel = 4; 46 | Trace trace = 10; 47 | } 48 | 49 | message BulkEmitReq { repeated EmitReq requests = 1; } 50 | 51 | message EmitSidReq { 52 | string sid = 1; 53 | Message message = 2; 54 | Trace trace = 10; 55 | } 56 | 57 | message EmitSidResp { 58 | bool success = 1; 59 | bool sent = 2; 60 | } 61 | 62 | message EmitResp { 63 | bool success = 1; 64 | repeated string rooms = 2; 65 | repeated EmitStatus status = 3; 66 | } 67 | 68 | message BulkEmitResp { repeated EmitResp responses = 1; } 69 | 70 | message ConnRoomReq { 71 | string namespace = 1; 72 | optional string room_prefix = 2; 73 | string room = 3; 74 | Trace trace = 10; 75 | } 76 | 77 | message Rooms { repeated string rooms = 1; } 78 | 79 | message ConnRooms { 80 | string sid = 1; 81 | map rooms = 2; 82 | } 83 | 84 | message EmitStatus { 85 | ConnRooms rooms = 1; 86 | bool sent = 2; 87 | } 88 | 89 | message ConnRoomResp { 90 | bool success = 1; 91 | repeated ConnRooms rooms = 2; 92 | } 93 | 94 | message GetRoomsReq { 95 | string namespace = 1; 96 | optional string room_prefix = 2; 97 | Rooms Rooms = 3; 98 | Trace trace = 10; 99 | } 100 | 101 | message GetChannelsResp { 102 | string namespace = 1; 103 | optional string room_prefix = 2; 104 | Rooms Rooms = 3; 105 | } 106 | 107 | service LinkService { 108 | rpc Join(JoinReq) returns (JoinResp) {} 109 | rpc Leave(LeaveReq) returns (LeaveResp) {} 110 | rpc BulkJoin(BulkJoinReq) returns (BulkJoinResp) {} 111 | rpc EmitSid(EmitSidReq) returns (EmitSidResp) {} 112 | rpc Emit(EmitReq) returns (EmitResp) {} 113 | rpc BulkEmit(BulkEmitReq) returns (BulkEmitResp) {} 114 | rpc GetConnRooms(ConnRoomReq) returns (ConnRoomResp) {} 115 | rpc GetRooms(GetRoomsReq) returns (GetChannelsResp) {} 116 | } 117 | 118 | message MessageReq { 119 | string sid = 1; 120 | Message message = 2; 121 | map rooms = 3; 122 | Trace trace = 10; 123 | } 124 | 125 | message MessageResp { int64 success_num = 1; } 126 | 127 | service MessageService { 128 | rpc HandleMessage(MessageReq) returns (MessageResp) {} 129 | } -------------------------------------------------------------------------------- /server-base/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-base" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | num_cpus = "1.0" 11 | log = "0.4" 12 | config = "0.11" 13 | async-trait = "0.1" 14 | rand = "^0.8" 15 | parking_lot = "0.11" 16 | lazy_static = "1.4" 17 | tokio = { version = "1", features = ["full"] } 18 | serde = { version = "1.0", features = ["derive"] } 19 | serde_json = "1.0" 20 | minstant = "0.1" 21 | tonic = "0.7" 22 | 23 | server-proto = { path = "../server-proto" } 24 | 25 | -------------------------------------------------------------------------------- /server-base/src/config.rs: -------------------------------------------------------------------------------- 1 | use config::File; 2 | pub use config::{Config, ConfigError, Environment, Source, Value}; 3 | use std::collections::HashMap; 4 | use std::sync::Arc; 5 | 6 | #[derive(Clone, Debug)] 7 | pub struct Service { 8 | pub etcd_name: String, 9 | pub fallback_addr: Option, 10 | } 11 | 12 | #[derive(Clone, Debug)] 13 | pub struct Hproxy { 14 | pub origin_url: String, 15 | pub rewrite_url: String, 16 | pub timeout: u64, 17 | } 18 | 19 | #[derive(Clone)] 20 | pub struct FPConfig { 21 | config: Arc, 22 | } 23 | 24 | impl FPConfig { 25 | pub fn new() -> Self { 26 | FPConfig::default() 27 | } 28 | 29 | pub fn new_with_config(config: Config) -> Self { 30 | Self { 31 | config: Arc::new(config), 32 | } 33 | } 34 | 35 | pub fn config_content(&self) -> String { 36 | let mut config_context = String::new(); 37 | let kvs: HashMap = self.config.collect().unwrap_or_else(|_| HashMap::new()); 38 | for (k, v) in kvs { 39 | config_context.push_str(&format!("_fplink_conf||config={} => {}\r\n", k, v)); 40 | } 41 | config_context 42 | } 43 | 44 | pub fn show_config(&self) { 45 | let kvs: HashMap = self.config.collect().unwrap_or_else(|_| HashMap::new()); 46 | for (k, v) in kvs { 47 | log::info!("_fplink_conf||config={} => {}", k, v); 48 | } 49 | } 50 | 51 | pub fn cert_path(&self) -> Option { 52 | match self.config.get_str("cert_path") { 53 | Ok(path) if !path.trim().is_empty() => Some(path), 54 | _ => None, 55 | } 56 | } 57 | 58 | pub fn cert_path_quic(&self) -> Option { 59 | match self.config.get_str("cert_path_quic") { 60 | Ok(path) if !path.trim().is_empty() => Some(path), 61 | _ => self.cert_path(), 62 | } 63 | } 64 | 65 | pub fn etcd_addr(&self) -> String { 66 | self.config 67 | .get_str("etcd") 68 | .unwrap_or_else(|_| "localhost:2379".to_owned()) 69 | } 70 | 71 | pub fn service_id(&self) -> String { 72 | self.config 73 | .get_str("service_id") 74 | .unwrap_or_else(|_| "fplink--grpc".to_owned()) 75 | } 76 | 77 | pub fn etcd_prefix_base(&self) -> String { 78 | let mut prefix = self 79 | .config 80 | .get_str("etcd_prefix") 81 | .unwrap_or_else(|_| "/".to_owned()); 82 | if !prefix.ends_with('/') { 83 | prefix.push('/'); 84 | } 85 | prefix 86 | } 87 | 88 | pub fn etcd_prefix_broadcast(&self) -> String { 89 | let mut prefix = self 90 | .config 91 | .get_str("etcd_prefix_broadcast") 92 | .unwrap_or_else(|_| self.etcd_prefix_base()); 93 | if !prefix.ends_with('/') { 94 | prefix.push('/'); 95 | } 96 | prefix 97 | } 98 | 99 | pub fn conn_listen_addr(&self) -> String { 100 | self.config 101 | .get_str("listen_addr") 102 | .unwrap_or_else(|_| "0.0.0.0:8082".to_owned()) 103 | } 104 | 105 | pub fn conn_listen_addr_deprecated(&self) -> String { 106 | self.config 107 | .get_str("listen_addr2") 108 | .unwrap_or_else(|_| "0.0.0.0:8083".to_owned()) 109 | } 110 | 111 | pub fn peer_listen_addr(&self) -> String { 112 | self.config 113 | .get_str("peer_listen_addr") 114 | .unwrap_or_else(|_| "0.0.0.0:6321".to_owned()) 115 | } 116 | 117 | pub fn service_listen_addr(&self) -> String { 118 | self.config 119 | .get_str("listen_service_addr") 120 | .unwrap_or_else(|_| "0.0.0.0:1215".to_owned()) 121 | } 122 | 123 | pub fn hostname(&self) -> String { 124 | self.config 125 | .get_str("hostname") 126 | .unwrap_or_else(|_| "127.0.0.1".to_owned()) 127 | } 128 | 129 | pub fn zone(&self) -> Option { 130 | if let Ok(zone) = self.config.get_str("zone") { 131 | return Some(zone); 132 | } 133 | None 134 | } 135 | 136 | pub fn hproxy_map(&self) -> HashMap { 137 | let mut hproxy_map: HashMap = HashMap::new(); 138 | let mut index = 1; 139 | while let Ok(hproxy_str) = self.config.get_str(&format!("hproxy_{}", index)) { 140 | index += 1; 141 | let tokens: Vec<&str> = hproxy_str.trim().split('#').collect(); 142 | let hproxy: Hproxy; 143 | 144 | let origin_url: String = if let Some(&o_url) = tokens.get(0) { 145 | o_url.trim().to_string() 146 | } else { 147 | log::warn!("invalid hproxy {}", hproxy_str); 148 | continue; 149 | }; 150 | 151 | if origin_url.is_empty() { 152 | log::warn!("invalid hproxy {}", hproxy_str); 153 | continue; 154 | } 155 | if let Some(&rewrite_url) = tokens.get(1) { 156 | let rewrite_url = rewrite_url.trim().to_string(); 157 | if rewrite_url.is_empty() { 158 | log::warn!("invalid hproxy {}", hproxy_str); 159 | continue; 160 | } 161 | let mut timeout = self.hproxy_timeout(); 162 | if let Some(&fallback) = tokens.get(2) { 163 | if let Ok(t) = fallback.trim().parse::() { 164 | timeout = t; 165 | } else { 166 | log::warn!("parse timeout failed {}", hproxy_str); 167 | } 168 | } else { 169 | log::warn!("hproxy {} not set timeout", hproxy_str); 170 | } 171 | hproxy = Hproxy { 172 | origin_url: origin_url.clone(), 173 | rewrite_url, 174 | timeout, 175 | }; 176 | hproxy_map.insert(origin_url, hproxy); 177 | } else { 178 | log::warn!("invalid hproxy {}", hproxy_str); 179 | continue; 180 | } 181 | } 182 | hproxy_map 183 | } 184 | 185 | pub fn service_map(&self) -> HashMap { 186 | let mut ns_service_map = HashMap::new(); 187 | let mut index = 1; 188 | while let Ok(service_str) = self.config.get_str(&format!("service_{}", index)) { 189 | index += 1; 190 | let tokens: Vec<&str> = service_str.trim().split('#').collect(); 191 | let service: Service; 192 | 193 | let namespace: String = if let Some(&ns) = tokens.get(0) { 194 | ns.trim().to_string() 195 | } else { 196 | log::warn!("invalid service {}", service_str); 197 | continue; 198 | }; 199 | 200 | if namespace.is_empty() { 201 | log::warn!("invalid service {}", service_str); 202 | continue; 203 | } 204 | if let Some(&etcd_name) = tokens.get(1) { 205 | let etcd_name = etcd_name.trim().to_string(); 206 | if etcd_name.is_empty() { 207 | log::warn!("invalid service {}", service_str); 208 | continue; 209 | } 210 | let mut fallback_addr = None; 211 | if let Some(&fallback) = tokens.get(2) { 212 | fallback_addr = Some(fallback.trim().to_string()); 213 | } else { 214 | log::warn!("service {} not set fallback addr", service_str); 215 | } 216 | service = Service { 217 | etcd_name, 218 | fallback_addr, 219 | }; 220 | ns_service_map.insert(namespace, service); 221 | } else { 222 | log::warn!("invalid service {}", service_str); 223 | continue; 224 | } 225 | } 226 | ns_service_map 227 | } 228 | 229 | pub fn cluster_grpc_timeout_ms(&self) -> u64 { 230 | self.config 231 | .get_int("grpc_cluster_timeout_ms") 232 | .unwrap_or(500) as u64 233 | } 234 | 235 | pub fn service_grpc_timeout_ms(&self) -> u64 { 236 | if let Ok(timeout) = self.config.get_int("service_grpc_timeout_ms") { 237 | timeout as u64 238 | } else { 239 | self.config.get_int("biz_grpc_timeout_ms").unwrap_or(1000) as u64 240 | } 241 | } 242 | 243 | pub fn hproxy_timeout(&self) -> u64 { 244 | if let Ok(timeout) = self.config.get::("hproxy_http_timeout_ms") { 245 | timeout 246 | } else { 247 | 10_000 248 | } 249 | } 250 | 251 | pub fn quic_trace_log(&self) -> bool { 252 | self.config.get_bool("quic_trace_log").unwrap_or(false) 253 | } 254 | 255 | pub fn worker_num(&self) -> usize { 256 | let worker_num = match self.get_int("runtime_worker_num") { 257 | Ok(num) => num as usize, 258 | Err(_) => num_cpus::get(), 259 | }; 260 | log::info!("tokio runtime_worker_num is: {}", worker_num); 261 | worker_num 262 | } 263 | 264 | // default 8k 265 | pub fn tokio_codec_size(&self) -> usize { 266 | self.get_int("tokio_codec_size") 267 | .map(|size| size as usize) 268 | .unwrap_or(8 * 1024) 269 | } 270 | 271 | pub fn log_directory(&self) -> String { 272 | self.get_str("log_directory") 273 | .unwrap_or_else(|_| "/tmp/fplink".to_owned()) 274 | } 275 | } 276 | 277 | impl std::ops::Deref for FPConfig { 278 | type Target = Config; 279 | 280 | fn deref(&self) -> &Self::Target { 281 | &self.config 282 | } 283 | } 284 | 285 | impl Default for FPConfig { 286 | fn default() -> Self { 287 | let mut config = Config::default(); 288 | let config_path = "./resources/fplink_config.toml"; 289 | if std::path::Path::new(config_path).exists() { 290 | let file = File::with_name(config_path).required(false); 291 | log::info!("config load from file {:?}", file); 292 | let _ = config.merge(file).expect("merge config file error!"); 293 | } else { 294 | log::info!("{} not found!", config_path); 295 | } 296 | 297 | let _ = config 298 | .merge(Environment::with_prefix("fplink")) 299 | .expect("merge env config error!"); 300 | 301 | Self { 302 | config: Arc::new(config), 303 | } 304 | } 305 | } 306 | #[cfg(test)] 307 | mod tests { 308 | use super::*; 309 | 310 | #[test] 311 | fn test_service_map() { 312 | let mut config = Config::new(); 313 | let _ = config.set("service_1", " ns1#name1#fallback1 "); 314 | let _ = config.set("service_2", " ns2#name2#fallback2 "); 315 | let _ = config.set("service_3", " ns3#name3 "); 316 | let _ = config.set("service_4", " ## "); 317 | let _ = config.set("service_5", " #name5# "); 318 | let _ = config.set("service_6", " #### "); 319 | let _ = config.set("service_7", ""); 320 | let _ = config.set("service_8", " ns8##"); 321 | 322 | let config = FPConfig::new_with_config(config); 323 | let service_map = config.service_map(); 324 | 325 | assert_eq!(service_map.len(), 3); 326 | assert_eq!(service_map.get("ns1").unwrap().etcd_name, "name1"); 327 | assert_eq!( 328 | service_map.get("ns1").unwrap().fallback_addr, 329 | Some("fallback1".to_string()) 330 | ); 331 | 332 | assert_eq!( 333 | service_map.get("ns3").unwrap().fallback_addr.is_none(), 334 | true 335 | ); 336 | } 337 | 338 | #[test] 339 | fn test_hproxy_map() { 340 | let mut config = Config::new(); 341 | let _ = config.set("hproxy_1", "http://www.baidu.com#http://10.172.19.11#2000"); 342 | let _ = config.set("hproxy_2", " http://www.souhu.com# http://10.172.20.11 # "); 343 | let _ = config.set("hproxy_3", "#http://10.172.19.18#1000"); 344 | let _ = config.set("hproxy_4", "http://www.bing.com##1000"); 345 | let _ = config.set("hproxy_5", " ##"); 346 | let _ = config.set("hproxy_6", " ##1000"); 347 | let _ = config.set("hproxy_7", " http://www.google.com# http://10.172.20.12 "); 348 | 349 | let config = FPConfig::new_with_config(config); 350 | let hproxy_map: HashMap = config.hproxy_map(); 351 | 352 | assert_eq!(hproxy_map.len(), 3); 353 | assert_eq!( 354 | "http://10.172.19.11", 355 | hproxy_map.get("http://www.baidu.com").unwrap().rewrite_url 356 | ); 357 | assert_eq!( 358 | 2000 as u64, 359 | hproxy_map.get("http://www.baidu.com").unwrap().timeout 360 | ); 361 | 362 | assert_eq!( 363 | "http://10.172.20.11", 364 | hproxy_map.get("http://www.souhu.com").unwrap().rewrite_url 365 | ); 366 | assert_eq!( 367 | 10000 as u64, 368 | hproxy_map.get("http://www.souhu.com").unwrap().timeout 369 | ); 370 | 371 | assert_eq!( 372 | "http://10.172.20.12", 373 | hproxy_map.get("http://www.google.com").unwrap().rewrite_url 374 | ); 375 | assert_eq!( 376 | 10000 as u64, 377 | hproxy_map.get("http://www.google.com").unwrap().timeout 378 | ); 379 | } 380 | } 381 | -------------------------------------------------------------------------------- /server-base/src/conn.rs: -------------------------------------------------------------------------------- 1 | use crate::context::ConnContext; 2 | use crate::Protocol; 3 | use server_proto::proto::Message; 4 | use std::sync::Arc; 5 | 6 | #[derive(Clone)] 7 | pub struct Conn { 8 | pub inner: Arc, 9 | } 10 | 11 | impl Conn { 12 | pub fn id(&self) -> &String { 13 | &self.inner.conn_id 14 | } 15 | 16 | pub fn protocol(&self) -> &Protocol { 17 | &self.inner.proto 18 | } 19 | 20 | pub fn create_time(&self) -> u64 { 21 | self.inner.create_time 22 | } 23 | 24 | #[allow(clippy::result_unit_err)] 25 | pub fn push(&self, message: Message) -> Result<(), ()> { 26 | self.inner.sender.send(message) 27 | } 28 | } 29 | 30 | impl PartialEq for Conn { 31 | fn eq(&self, other: &Self) -> bool { 32 | self.inner.conn_id == other.inner.conn_id 33 | } 34 | } 35 | 36 | impl Eq for Conn {} 37 | 38 | impl std::fmt::Debug for Conn { 39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 40 | write!( 41 | f, 42 | "Conn {{ id: {} ,proto: {} }}", 43 | self.inner.conn_id, self.inner.proto 44 | ) 45 | } 46 | } 47 | 48 | impl std::hash::Hash for Conn { 49 | fn hash(&self, state: &mut H) { 50 | self.inner.conn_id.hash(state); 51 | } 52 | } 53 | 54 | impl From> for Conn { 55 | fn from(context: Arc) -> Self { 56 | Self { inner: context } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /server-base/src/context.rs: -------------------------------------------------------------------------------- 1 | use crate::conn::Conn; 2 | use crate::Protocol; 3 | use crate::{LifeCycle, SendMessage}; 4 | use server_proto::proto::Message; 5 | use std::net::SocketAddr; 6 | use std::sync::Arc; 7 | 8 | pub struct ConnContext { 9 | pub proto: Protocol, 10 | pub timeout: u64, 11 | pub create_time: u64, 12 | pub conn_id: String, 13 | pub sender: Box, 14 | pub lifecycle: Arc, 15 | pub peer_addr: Option, 16 | } 17 | 18 | impl ConnContext { 19 | #[allow(clippy::result_unit_err)] 20 | pub fn send(&self, msg: Message) -> Result<(), ()> { 21 | self.sender.send(msg) 22 | } 23 | 24 | /// - accept incoming message from socket 25 | /// - route this message to upstream by server-core 26 | pub fn accept_message(&self, msg: Message) { 27 | self.lifecycle 28 | .on_message_incoming(&self.conn_id, &self.proto, msg); 29 | } 30 | 31 | pub fn on_conn_create(self: &Arc) { 32 | self.lifecycle.on_conn_create(Conn::from(self.clone())); 33 | } 34 | 35 | pub fn on_conn_destroy(self: &Arc) { 36 | self.lifecycle.on_conn_destroy(Conn::from(self.clone())); 37 | } 38 | 39 | pub fn should_timeout(&self) -> bool { 40 | self.lifecycle.should_timeout() 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /server-base/src/id_gen.rs: -------------------------------------------------------------------------------- 1 | use crate::Protocol; 2 | use parking_lot::Mutex; 3 | use std::sync::Arc; 4 | 5 | #[derive(Clone)] 6 | pub struct IdGen { 7 | inner: Arc, 8 | } 9 | struct Inner { 10 | pub node_id: String, 11 | pub cur_seq: Mutex, 12 | pub cur_ts: Mutex, 13 | } 14 | 15 | impl Inner { 16 | pub fn generate(&self, proto: Protocol) -> String { 17 | let ts = crate::now_ts_milli(); 18 | let mut cur_seq = self.cur_seq.lock(); 19 | let mut cur_ts = self.cur_ts.lock(); 20 | if *cur_ts == ts { 21 | *cur_seq += 1; 22 | } else { 23 | *cur_seq = 0; 24 | *cur_ts = ts; 25 | } 26 | 27 | format!("{}_{}_{}_{}", self.node_id, ts, *cur_seq, proto) 28 | } 29 | } 30 | 31 | impl IdGen { 32 | pub fn new(node_id: String) -> Self { 33 | Self { 34 | inner: Arc::new(Inner { 35 | node_id, 36 | cur_seq: Mutex::new(0), 37 | cur_ts: Mutex::new(0), 38 | }), 39 | } 40 | } 41 | 42 | pub fn node_id(conn_id: &str) -> Option { 43 | conn_id.split('_').next().map(|node_id| node_id.to_owned()) 44 | } 45 | 46 | pub fn conn_id(&self, proto: Protocol) -> String { 47 | self.inner.generate(proto) 48 | } 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use super::*; 54 | 55 | #[test] 56 | fn test_generate_extract() { 57 | let node_id = String::from("this-is-fake-node-id"); 58 | let id_generator = IdGen::new(node_id.clone()); 59 | let generated_conn_id = id_generator.conn_id(Protocol::Quic); 60 | let extract_node_id = IdGen::node_id(&generated_conn_id); 61 | 62 | assert_eq!(Some(node_id), extract_node_id); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /server-base/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | mod config; 5 | mod conn; 6 | mod context; 7 | mod id_gen; 8 | mod node; 9 | mod utils; 10 | 11 | pub use crate::config::*; 12 | pub use crate::utils::*; 13 | pub use conn::Conn; 14 | pub use context::ConnContext; 15 | pub use id_gen::IdGen; 16 | pub use minstant; 17 | pub use node::{RegistryNode, ServerNode, ServiceNode}; 18 | pub use server_proto as codec; 19 | pub use server_proto::proto; 20 | pub use server_proto::tonic; 21 | pub use tokio; 22 | pub use tonic::transport::Channel; 23 | 24 | use async_trait::async_trait; 25 | use parking_lot::RwLock; 26 | use server_proto::proto::*; 27 | use std::net::SocketAddr; 28 | 29 | lazy_static! { 30 | pub static ref USER_PORT_LISTEN: RwLock = RwLock::new(false); 31 | pub static ref USER_CONN_STOP: RwLock = RwLock::new(false); 32 | } 33 | 34 | #[async_trait] 35 | pub trait PushConn: Send + Sync { 36 | async fn push(&self, req: EmitSidReq); 37 | } 38 | 39 | #[async_trait] 40 | pub trait Dispatch: Send + Sync { 41 | async fn dispatch(&self, namespace: String, request: MessageReq) -> bool; 42 | } 43 | 44 | #[async_trait] 45 | pub trait BuiltinService: Send + Sync { 46 | async fn on_message(&self, conn_id: &str, peer_addr: Option, message: Message); 47 | } 48 | 49 | #[async_trait] 50 | pub trait CoreOperation: Send + Sync + Clone + 'static { 51 | async fn join(&self, request: JoinReq) -> bool; 52 | 53 | async fn leave(&self, request: LeaveReq) -> bool; 54 | 55 | async fn bulk_join(&self, request: BulkJoinReq) -> bool; 56 | 57 | async fn emit(&self, request: EmitReq) -> EmitResp; 58 | 59 | async fn emit_sid(&self, request: EmitSidReq) -> bool; 60 | 61 | async fn get_conn_rooms(&self, request: ConnRoomReq) -> Vec; 62 | 63 | async fn get_rooms(&self, request: GetRoomsReq) -> Vec; 64 | } 65 | 66 | pub trait LifeCycle: Send + Sync { 67 | fn new_conn_id(&self, protocol: Protocol) -> String; 68 | 69 | fn on_conn_create(&self, conn: Conn); 70 | 71 | fn on_message_incoming(&self, conn_id: &str, protocol: &Protocol, message: Message); 72 | 73 | fn on_conn_destroy(&self, conn: Conn); 74 | 75 | fn should_timeout(&self) -> bool; 76 | } 77 | 78 | pub trait SendMessage: Send + Sync { 79 | #[allow(clippy::result_unit_err)] 80 | fn send(&self, msg: Message) -> Result<(), ()>; 81 | } 82 | 83 | pub trait RecvMessage: Send + Sync { 84 | type Item; 85 | 86 | #[allow(clippy::result_unit_err)] 87 | fn recv(&self, item: Self::Item) -> Result, ()>; 88 | } 89 | 90 | #[derive(Clone, Debug, PartialEq, Copy)] 91 | pub enum Protocol { 92 | Tcp, 93 | Websocket, 94 | Quic, 95 | } 96 | 97 | // log online time will use this 98 | impl std::fmt::Display for Protocol { 99 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 100 | match *self { 101 | Protocol::Tcp => write!(f, "tcp"), 102 | Protocol::Websocket => write!(f, "ws"), 103 | Protocol::Quic => write!(f, "quic"), 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /server-base/src/node.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use std::hash::{Hash, Hasher}; 3 | 4 | #[derive(Clone, Eq, Debug, Default, Serialize, Deserialize)] 5 | pub struct ServerNode { 6 | pub node_id: String, 7 | pub hostname: String, 8 | pub port: u16, // cluster grpc port 9 | pub outer_grpc_port: u16, // service grpc port 10 | pub created_ts: u64, 11 | #[serde(skip_serializing_if = "Option::is_none")] 12 | pub zone: Option, 13 | } 14 | 15 | #[derive(Clone, Debug, Default, Serialize, Deserialize)] 16 | pub struct RegistryNode { 17 | pub listen_addr: String, 18 | pub use_ssl: bool, 19 | } 20 | 21 | impl RegistryNode { 22 | pub fn new>(addr: T, use_ssl: bool) -> Self { 23 | let addr = addr.into(); 24 | Self { 25 | listen_addr: addr, 26 | use_ssl, 27 | } 28 | } 29 | } 30 | 31 | #[derive(Clone, Eq, Debug, Default, Serialize, Deserialize)] 32 | #[serde(rename_all = "camelCase")] 33 | pub struct ServiceNode { 34 | pub instance_id: String, 35 | pub service_id: String, 36 | pub host: String, 37 | pub port: u16, 38 | pub created_ts: Option, 39 | #[serde(skip_serializing_if = "Option::is_none")] 40 | pub zone: Option, 41 | } 42 | 43 | impl ServiceNode { 44 | pub fn from_fplink>(service_id: T, node: ServerNode) -> Self { 45 | Self::new( 46 | service_id.into(), 47 | node.node_id, 48 | node.zone, 49 | node.hostname, 50 | node.outer_grpc_port, 51 | Some(node.created_ts), 52 | ) 53 | } 54 | 55 | pub fn new( 56 | service_id: String, 57 | instance_id: String, 58 | zone: Option, 59 | host: String, 60 | port: u16, 61 | created_ts: Option, 62 | ) -> Self { 63 | Self { 64 | service_id, 65 | instance_id, 66 | zone, 67 | host, 68 | port, 69 | created_ts, 70 | } 71 | } 72 | } 73 | 74 | impl PartialEq for ServerNode { 75 | fn eq(&self, other: &ServerNode) -> bool { 76 | self.node_id == other.node_id 77 | } 78 | } 79 | 80 | impl Hash for ServerNode { 81 | fn hash(&self, state: &mut H) { 82 | self.node_id.hash(state); 83 | } 84 | } 85 | 86 | impl ServerNode { 87 | pub fn new>( 88 | node_id: T, 89 | listen_host: T, 90 | listen_port: u16, 91 | backend_listen_port: u16, 92 | zone: Option, 93 | ) -> Self { 94 | Self { 95 | node_id: node_id.into(), 96 | hostname: listen_host.into(), 97 | port: listen_port, 98 | outer_grpc_port: backend_listen_port, 99 | created_ts: crate::now_ts_milli(), 100 | zone, 101 | } 102 | } 103 | } 104 | 105 | impl PartialEq for ServiceNode { 106 | fn eq(&self, other: &ServiceNode) -> bool { 107 | self.instance_id == other.instance_id 108 | } 109 | } 110 | 111 | impl Hash for ServiceNode { 112 | fn hash(&self, state: &mut H) { 113 | self.instance_id.hash(state); 114 | } 115 | } 116 | 117 | #[cfg(test)] 118 | mod tests { 119 | use super::*; 120 | use serde_json; 121 | 122 | #[test] 123 | fn test_deserlize() { 124 | let string = "{\"serviceId\":\"service\",\"host\":\"10.96.102.109\",\"port\":8080,\"secure\":false,\"uri\":null,\"scheme\":\"http\",\"instanceId\":\"10.96.102.109\", \"unknown_field\": \"\"}"; 125 | let node: ServiceNode = serde_json::from_str(string).unwrap(); 126 | assert_eq!(node.service_id, "service".to_owned()); 127 | assert_eq!(node.zone, None); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /server-base/src/utils.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; 2 | use std::time::Duration; 3 | 4 | pub fn now_ts_milli() -> u64 { 5 | now_ts_micro() / 1000 // milliseconds 6 | } 7 | 8 | pub fn now_ts_micro() -> u64 { 9 | let anchor = minstant::Anchor::new(); 10 | let now = minstant::Instant::now(); 11 | let ts = now.as_unix_nanos(&anchor); // nanoseconds 12 | ts / 1000 // microseconds 13 | } 14 | 15 | lazy_static! { 16 | static ref MAX_LOCK_DURATION: Duration = Duration::from_secs(20); 17 | } 18 | 19 | /// shortcut wl and rl. 20 | pub trait HandyRwLock { 21 | fn wl(&self) -> RwLockWriteGuard<'_, T>; 22 | fn rl(&self) -> RwLockReadGuard<'_, T>; 23 | } 24 | 25 | impl HandyRwLock for RwLock { 26 | fn wl(&self) -> RwLockWriteGuard<'_, T> { 27 | match self.try_write_for(*MAX_LOCK_DURATION) { 28 | Some(guard) => guard, 29 | None => panic!("lock max duration exceed, maybe deadlocked!!"), 30 | } 31 | } 32 | 33 | fn rl(&self) -> RwLockReadGuard<'_, T> { 34 | match self.try_read_for(*MAX_LOCK_DURATION) { 35 | Some(guard) => guard, 36 | None => panic!("lock max duration exceed, maybe deadlocked!!"), 37 | } 38 | } 39 | } 40 | 41 | pub trait HandyMutex { 42 | fn l(&self) -> MutexGuard<'_, T>; 43 | } 44 | 45 | impl HandyMutex for Mutex { 46 | fn l(&self) -> MutexGuard<'_, T> { 47 | match self.try_lock_for(*MAX_LOCK_DURATION) { 48 | Some(guard) => guard, 49 | None => panic!("lock max duration exceed, maybe deadlocked!!"), 50 | } 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | mod tests { 56 | use super::*; 57 | 58 | #[test] 59 | fn test_now_ts() { 60 | assert!(now_ts_milli() > 0); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /server-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-core" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | [dependencies] 8 | log = "0.4" 9 | rand = "^0.8" 10 | parking_lot = "0.11" 11 | bytes = "1.0" 12 | async-trait = "0.1" 13 | dashmap = { version = "4.0", features = ["raw-api"]} 14 | fxhash = "0.2" 15 | 16 | server-base = { path = "../server-base" } -------------------------------------------------------------------------------- /server-core/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod lifecycle; 2 | mod operator; 3 | mod repository; 4 | 5 | pub use crate::operator::CoreOperator; 6 | -------------------------------------------------------------------------------- /server-core/src/lifecycle.rs: -------------------------------------------------------------------------------- 1 | use crate::CoreOperator; 2 | use server_base::{proto::Message, tokio}; 3 | use server_base::{BuiltinService, HandyRwLock, IdGen, Protocol}; 4 | use server_base::{Conn, LifeCycle}; 5 | 6 | use std::collections::HashMap; 7 | use std::net::SocketAddr; 8 | use std::sync::Arc; 9 | 10 | struct Inner { 11 | operator: CoreOperator, 12 | sid_gen: IdGen, 13 | builtin_services: Arc>>, 14 | } 15 | 16 | impl Inner { 17 | pub fn new( 18 | operator: CoreOperator, 19 | sid_gen: IdGen, 20 | builtin_services: Arc>>, 21 | ) -> Self { 22 | Self { 23 | operator, 24 | sid_gen, 25 | builtin_services, 26 | } 27 | } 28 | } 29 | 30 | #[derive(Clone)] 31 | pub struct ConnLifeCycle { 32 | inner: Arc, 33 | } 34 | 35 | impl ConnLifeCycle { 36 | pub fn new( 37 | operator: CoreOperator, 38 | sid_gen: IdGen, 39 | builtin_services: Arc>>, 40 | ) -> Self { 41 | let inner = Arc::new(Inner::new(operator, sid_gen, builtin_services)); 42 | Self { inner } 43 | } 44 | 45 | fn peer_addr(&self, conn_id: &str) -> Option { 46 | self.inner 47 | .operator 48 | .conn(conn_id) 49 | .and_then(|conn| conn.inner.peer_addr) 50 | } 51 | } 52 | 53 | impl LifeCycle for ConnLifeCycle { 54 | fn new_conn_id(&self, protocol: Protocol) -> String { 55 | self.inner.sid_gen.conn_id(protocol) 56 | } 57 | 58 | /// connection create, connection can bind id from return value 59 | fn on_conn_create(&self, conn: Conn) { 60 | self.inner.operator.reg_conn(conn); 61 | } 62 | 63 | fn on_message_incoming(&self, conn_id: &str, protocol: &Protocol, message: Message) { 64 | let message = message; 65 | let namespace = message.namespace.clone(); 66 | 67 | // perf: do not create temp String every time 68 | match protocol { 69 | Protocol::Tcp => { 70 | // meter!("tcp_incoming_message_meter", namespace); 71 | } 72 | Protocol::Websocket => { 73 | // meter!("ws_incoming_message_meter", namespace); 74 | } 75 | Protocol::Quic => { 76 | // meter!("quic_incoming_message_meter", namespace); 77 | } 78 | } 79 | let conn_id_string = conn_id.to_owned(); 80 | 81 | if let Some(builtin_service) = self.inner.builtin_services.get(&namespace) { 82 | let service_clone = builtin_service.clone(); 83 | let peer_addr = self.peer_addr(conn_id); 84 | tokio::spawn(async move { 85 | service_clone 86 | .on_message(&conn_id_string, peer_addr, message) 87 | .await; 88 | }); 89 | } else { 90 | self.inner.operator.dispatch_message(conn_id, message); 91 | } 92 | } 93 | 94 | fn on_conn_destroy(&self, conn: Conn) { 95 | self.inner.operator.unreg_conn(&conn); 96 | } 97 | 98 | fn should_timeout(&self) -> bool { 99 | let conn_stop = server_base::USER_CONN_STOP.rl(); 100 | *conn_stop 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /server-core/src/operator.rs: -------------------------------------------------------------------------------- 1 | use super::repository::MemoryRepository; 2 | use async_trait::async_trait; 3 | use dashmap::DashMap; 4 | use fxhash::FxBuildHasher; 5 | use server_base::proto::{ 6 | BulkJoinReq, ConnRoomReq, ConnRooms, EmitReq, EmitResp, EmitSidReq, EmitStatus, GetRoomsReq, 7 | JoinReq, LeaveReq, Message, MessageReq, Rooms, 8 | }; 9 | use server_base::{tokio, Conn, CoreOperation, Dispatch, PushConn}; 10 | use std::borrow::ToOwned; 11 | use std::collections::{HashMap, HashSet}; 12 | use std::sync::Arc; 13 | 14 | type ConnectionId = String; 15 | 16 | #[derive(Clone)] 17 | pub struct CoreOperator { 18 | inner: Arc, 19 | } 20 | 21 | struct Inner { 22 | conns: DashMap, 23 | repository: MemoryRepository, 24 | dispatcher: Box, 25 | } 26 | 27 | impl CoreOperator { 28 | pub fn new(dispatcher: Box) -> Self { 29 | let conns = DashMap::default(); 30 | Self { 31 | inner: Arc::new(Inner { 32 | repository: MemoryRepository::new(), 33 | conns, 34 | dispatcher, 35 | }), 36 | } 37 | } 38 | } 39 | 40 | impl CoreOperator { 41 | pub fn conn(&self, conn_id: &str) -> Option { 42 | self.inner.conns.get(conn_id).map(|conn| (*conn).clone()) 43 | } 44 | 45 | fn emit_to( 46 | &self, 47 | namespace: &str, 48 | message: Message, 49 | conn: &Conn, 50 | hitting: &HashSet, 51 | ) -> Option<(bool, HashSet, EmitStatus)> { 52 | let mut sent_rooms: HashSet = HashSet::new(); 53 | if self.push_to_conn(conn.id(), message).is_ok() { 54 | sent_rooms = &sent_rooms | hitting; 55 | let mut status = EmitStatus { 56 | rooms: None, 57 | sent: true, 58 | }; 59 | if let Some(c) = self.conn_rooms(conn, namespace) { 60 | status.rooms = Some(c); 61 | } 62 | Some((true, sent_rooms, status)) 63 | } else { 64 | // sender use unbounded_channel, send will not return error unless closed 65 | // only when connection is gone, push failed is ok 66 | Option::None 67 | } 68 | } 69 | 70 | //---------------connection outgoing operation----------------------------- 71 | 72 | #[allow(clippy::result_unit_err)] 73 | pub fn push_to_conn(&self, conn_id: &str, msg: Message) -> Result<(), ()> { 74 | log::trace!("push_to_conn: conn_id: {}, {:?}", conn_id, msg); 75 | if let Some(conn) = self.conn(conn_id) { 76 | conn.push(msg) 77 | } else { 78 | log::warn!("Connection Id:{} not found", conn_id); 79 | Err(()) 80 | } 81 | } 82 | 83 | //---------------connection incoming operation----------------------------- 84 | 85 | pub fn reg_conn(&self, conn: Conn) { 86 | log::info!("reg_conn||conn_id={}", conn.id()); 87 | self.inner.conns.insert(conn.id().to_string(), conn); 88 | } 89 | 90 | pub fn unreg_conn(&self, conn: &Conn) { 91 | log::info!("unreg_conn||conn_id={}", conn.id()); 92 | self.inner.repository.remove_channels(conn); 93 | self.inner.conns.remove(conn.id()); 94 | } 95 | 96 | pub fn dispatch_message(&self, conn_id: &str, message: Message) { 97 | let namespace = message.namespace.clone(); 98 | let channel_map = self.list_channels(conn_id, &namespace); 99 | 100 | let rooms: HashMap = if let Some(rooms) = channel_map { 101 | rooms 102 | .iter() 103 | .map(|(k, vs)| { 104 | let cs = Rooms { 105 | rooms: vs.iter().cloned().collect::>(), 106 | }; 107 | (k.clone(), cs) 108 | }) 109 | .collect() 110 | } else { 111 | Default::default() 112 | }; 113 | let request = MessageReq { 114 | sid: conn_id.to_string(), 115 | message: Some(Message { 116 | namespace: namespace.clone(), 117 | metadata: message.metadata, 118 | path: message.path, 119 | body: message.body.to_vec(), 120 | expire_at: None, 121 | }), 122 | rooms, 123 | trace: None, 124 | }; 125 | let s = self.clone(); 126 | tokio::spawn(async move { 127 | s.inner 128 | .dispatcher 129 | .dispatch(namespace.clone(), request) 130 | .await; 131 | }); 132 | } 133 | 134 | //---------------channel operation----------------------------- 135 | 136 | fn join_operation( 137 | &self, 138 | conn_id: &str, 139 | namespace: &str, 140 | room_prefix: &str, 141 | channel: &str, 142 | ) -> bool { 143 | if let Some(conn) = self.conn(conn_id) { 144 | self.inner 145 | .repository 146 | .join_room(conn, namespace, room_prefix, channel); 147 | log::info!( 148 | "_fplink_join||conn_id={}||ns={}||cf={}|channel={}", 149 | conn_id, 150 | namespace, 151 | room_prefix, 152 | channel 153 | ); 154 | return true; 155 | } 156 | false 157 | } 158 | 159 | fn bulk_join_operation( 160 | &self, 161 | conn_id: &str, 162 | namespace: &str, 163 | rooms: &HashMap, 164 | ) -> bool { 165 | if let Some(conn) = self.conn(conn_id) { 166 | self.inner.repository.bulk_join_room(conn, namespace, rooms); 167 | return true; 168 | } 169 | false 170 | } 171 | 172 | pub fn leave_operation(&self, conn_id: &str, namespace: &str, k: &str, v: &str) -> bool { 173 | if let Some(conn) = self.conn(conn_id) { 174 | self.inner.repository.leave_room(&conn, namespace, k, v); 175 | return true; 176 | } 177 | false 178 | } 179 | 180 | pub fn search_by_room( 181 | &self, 182 | namespace: &str, 183 | room_prefix: &str, 184 | rooms: &[String], 185 | ) -> HashMap> { 186 | self.inner 187 | .repository 188 | .search_by_room(namespace, room_prefix, Some(rooms)) 189 | } 190 | 191 | pub fn list_channels( 192 | &self, 193 | sid: &str, 194 | namespace: &str, 195 | ) -> Option>> { 196 | self.inner.repository.list_channels(sid, namespace) 197 | } 198 | 199 | pub fn list_ns_channels( 200 | &self, 201 | conn_id: &str, 202 | ) -> Option>>> { 203 | self.inner.repository.list_ns_channels(conn_id) 204 | } 205 | 206 | fn conn_rooms(&self, conn: &Conn, namespace: &str) -> Option { 207 | self.list_channels(conn.id(), namespace) 208 | .map(|channel_map| ConnRooms { 209 | sid: conn.id().to_owned(), 210 | rooms: channel_map 211 | .iter() 212 | .map(|(k, vs)| { 213 | let ts = Rooms { 214 | rooms: vs.iter().cloned().collect::>(), 215 | }; 216 | (k.clone(), ts) 217 | }) 218 | .collect(), 219 | }) 220 | } 221 | } 222 | 223 | #[async_trait] 224 | impl CoreOperation for CoreOperator { 225 | async fn join(&self, request: JoinReq) -> bool { 226 | let room_prefix = room_prefix(&request.room_prefix); 227 | let (connection_id, namespace, channel) = (&request.sid, &request.namespace, &request.room); 228 | self.join_operation(connection_id, namespace, room_prefix, channel) 229 | } 230 | 231 | async fn leave(&self, request: LeaveReq) -> bool { 232 | let room_prefix = room_prefix(&request.room_prefix); 233 | let (connection_id, namespace, channel) = (&request.sid, &request.namespace, &request.room); 234 | self.leave_operation(connection_id, namespace, room_prefix, channel) 235 | } 236 | 237 | async fn bulk_join(&self, request: BulkJoinReq) -> bool { 238 | let (connection_id, namespace, rooms) = (&request.sid, &request.namespace, &request.rooms); 239 | self.bulk_join_operation(connection_id, namespace, rooms) 240 | } 241 | 242 | async fn emit_sid(&self, request: EmitSidReq) -> bool { 243 | let message = match request.message { 244 | Some(m) => m, 245 | None => return false, 246 | }; 247 | self.push_to_conn(&request.sid, message).is_ok() 248 | } 249 | 250 | async fn emit(&self, request: EmitReq) -> EmitResp { 251 | let message = match request.message { 252 | Some(message) => message, 253 | None => { 254 | return EmitResp { 255 | success: false, 256 | rooms: vec![], 257 | status: vec![], 258 | }; 259 | } 260 | }; 261 | let namespace = &message.namespace; 262 | let room_prefix = match request.room_prefix { 263 | Some(ref cf) => cf, 264 | None => "default", 265 | }; 266 | let conns_with_hitting = self.search_by_room(namespace, room_prefix, &request.rooms); 267 | let mut success = true; 268 | let mut status: Vec = vec![]; 269 | let mut sent_rooms = HashSet::new(); // TODO allocate before updating element 270 | 271 | for (conn, hitting) in conns_with_hitting { 272 | if let Some((is_success, sent, s)) = 273 | self.emit_to(namespace, message.clone(), &conn, &hitting) 274 | { 275 | success = success && is_success; 276 | status.push(s); 277 | sent_rooms = &sent_rooms | &sent; 278 | } 279 | } 280 | 281 | EmitResp { 282 | success, 283 | rooms: sent_rooms.into_iter().collect::>(), 284 | status, 285 | } 286 | } 287 | 288 | async fn get_conn_rooms(&self, request: ConnRoomReq) -> Vec { 289 | let namespace = &request.namespace; 290 | let channel = &request.room; 291 | let room_prefix = room_prefix(&request.room_prefix); 292 | 293 | let hit_conns = self.search_by_room(namespace, room_prefix, &[channel.to_owned()]); 294 | hit_conns 295 | .iter() 296 | .filter_map(|(conn, _)| self.conn_rooms(conn, namespace)) 297 | .collect::>() 298 | } 299 | 300 | async fn get_rooms(&self, request: GetRoomsReq) -> Vec { 301 | let ns = &request.namespace; 302 | let room_prefix = room_prefix(&request.room_prefix); 303 | let mut rooms_ref = None; 304 | if let Some(channels) = &request.rooms { 305 | rooms_ref = Some(channels.rooms.as_slice()) 306 | } 307 | let map: HashMap> = 308 | self.inner 309 | .repository 310 | .search_by_room(ns, room_prefix, rooms_ref); 311 | 312 | let mut exist_channels = HashSet::new(); 313 | for vals in map.values() { 314 | for val in vals { 315 | exist_channels.insert(val.to_owned()); 316 | } 317 | } 318 | 319 | exist_channels.into_iter().collect() 320 | } 321 | } 322 | 323 | #[async_trait] 324 | impl PushConn for CoreOperator { 325 | async fn push(&self, req: EmitSidReq) { 326 | self.emit_sid(req).await; 327 | } 328 | } 329 | 330 | fn room_prefix(room_prefix: &Option) -> &str { 331 | match room_prefix { 332 | Some(ref cf) => cf, 333 | None => "default", 334 | } 335 | } 336 | -------------------------------------------------------------------------------- /server-core/src/repository.rs: -------------------------------------------------------------------------------- 1 | use dashmap::DashMap; 2 | use fxhash::FxBuildHasher; 3 | use server_base::Conn; 4 | use std::collections::{HashMap, HashSet}; 5 | use std::sync::Arc; 6 | 7 | fn gen_index_key(namespace: &str, k: &str, v: &str) -> String { 8 | format!("{}::{}::{}", namespace, k, v) 9 | } 10 | 11 | type ConnId = String; 12 | type Namespace = String; 13 | type RoomPrefix = String; 14 | type Rooms = HashSet; 15 | type IndexKey = String; 16 | type NsChannelMap = HashMap>; 17 | 18 | #[derive(Debug, Clone)] 19 | pub struct MemoryRepository { 20 | storage: Arc>, 21 | indexing: Arc, FxBuildHasher>>, 22 | } 23 | 24 | impl MemoryRepository { 25 | pub fn new() -> Self { 26 | MemoryRepository { 27 | storage: Arc::new(DashMap::default()), 28 | indexing: Arc::new(DashMap::default()), 29 | } 30 | } 31 | 32 | pub fn search_by_room( 33 | &self, 34 | ns: &str, 35 | room_prefix: &str, 36 | rooms: Option<&[String]>, 37 | ) -> HashMap { 38 | let mut rv: HashMap> = HashMap::new(); 39 | if rooms.is_none() { 40 | return rv; 41 | } 42 | for channel in rooms.unwrap() { 43 | if let Some(conns) = self.indexing.get(&gen_index_key(ns, room_prefix, channel)) { 44 | conns.iter().for_each(|conn| { 45 | rv.entry(conn.clone()).or_default().insert(channel.clone()); 46 | }); 47 | } 48 | } 49 | rv 50 | } 51 | 52 | #[allow(dead_code)] 53 | pub fn search_all_channels(&self, ns: &str, room_prefix: &str) -> Rooms { 54 | let mut rooms: HashSet = HashSet::new(); 55 | self.storage.iter().for_each(|ns_channels| { 56 | if let Some(ns_channels) = ns_channels.get(ns) { 57 | if let Some(c) = ns_channels.get(room_prefix) { 58 | rooms.extend(c.clone()) 59 | } 60 | } 61 | }); 62 | rooms 63 | } 64 | 65 | pub fn join_room(&self, conn: Conn, ns: &str, room_prefix: &str, channel: &str) { 66 | let mut channel_map = HashMap::new(); 67 | channel_map.insert(room_prefix.to_owned(), channel.to_owned()); 68 | self.store(&conn, ns, &channel_map); 69 | self.index(conn, ns, room_prefix, channel); 70 | } 71 | 72 | pub fn leave_room(&self, conn: &Conn, ns: &str, room_prefix: &str, channel: &str) { 73 | let mut removed = None; 74 | if let Some(mut ns_channels) = self.storage.get_mut(conn.id()) { 75 | if let Some(rooms) = ns_channels.get_mut(ns) { 76 | if let Some(rooms) = rooms.get_mut(room_prefix) { 77 | if rooms.remove(channel) { 78 | removed = Some(channel); 79 | } 80 | } 81 | } 82 | } 83 | if let Some(removed_channel) = removed { 84 | self.remove_index(conn, ns, room_prefix, removed_channel); 85 | } 86 | } 87 | 88 | pub fn bulk_join_room(&self, conn: Conn, ns: &str, rooms: &HashMap) { 89 | self.store(&conn, ns, rooms); 90 | rooms.iter().for_each(|(room_prefix, channel)| { 91 | self.index(conn.clone(), ns, room_prefix, channel); 92 | }); 93 | } 94 | 95 | pub fn remove_channels(&self, conn: &Conn) { 96 | if let Some((_conn_id, ns_channels)) = self.storage.remove(conn.id()) { 97 | for (ns, rooms) in ns_channels.iter() { 98 | for (room_prefix, rooms) in rooms.iter() { 99 | for channel in rooms.iter() { 100 | self.remove_index(conn, ns, room_prefix, channel); 101 | } 102 | } 103 | } 104 | } 105 | } 106 | 107 | pub fn list_ns_channels(&self, sid: &str) -> Option { 108 | // too expansive to trace latency here 109 | self.storage 110 | .get(sid) 111 | .map(|ns_channels| (*ns_channels).clone()) 112 | } 113 | 114 | pub fn list_channels(&self, sid: &str, ns: &str) -> Option> { 115 | // too expansive to trace latency here 116 | self.storage.get(sid).and_then(|conn| conn.get(ns).cloned()) 117 | } 118 | 119 | fn store(&self, conn: &Conn, ns: &str, rooms: &HashMap) { 120 | let mut ns_channels = self.storage.entry(conn.id().to_owned()).or_default(); 121 | let ns_channel = ns_channels.entry(ns.to_owned()).or_default(); 122 | rooms.iter().for_each(|(room_prefix, channel)| { 123 | let rooms = ns_channel.entry(room_prefix.to_owned()).or_default(); 124 | rooms.insert(channel.to_owned()); 125 | }); 126 | } 127 | 128 | fn index(&self, conn: Conn, ns: &str, chanel_family: &str, channel: &str) { 129 | self.indexing 130 | .entry(gen_index_key(ns, chanel_family, channel)) 131 | .or_default() 132 | .insert(conn); 133 | } 134 | 135 | fn remove_index(&self, conn: &Conn, ns: &str, room_prefix: &str, channel: &str) { 136 | let index_key = gen_index_key(ns, room_prefix, channel); 137 | let mut hit = false; 138 | let mut empty = false; 139 | if let Some(mut conns) = self.indexing.get_mut(&index_key) { 140 | hit = conns.remove(conn); 141 | empty = conns.is_empty(); 142 | } 143 | if empty { 144 | self.indexing.remove(&index_key); 145 | } 146 | if !hit { 147 | // meter!("remove_index_miss") 148 | } 149 | } 150 | } 151 | 152 | #[cfg(test)] 153 | mod tests { 154 | use super::*; 155 | use server_base::{proto::Message, Protocol}; 156 | use server_base::{Conn, ConnContext, LifeCycle, SendMessage}; 157 | 158 | #[test] 159 | fn test_gen_index_key() { 160 | assert_eq!("ns::key::val", gen_index_key("ns", "key", "val")); 161 | } 162 | 163 | #[test] 164 | fn test_bind() { 165 | let m = bind_some(); 166 | assert!(m.storage.contains_key("conn_1")); 167 | assert!(m.storage.contains_key("conn_2")); 168 | assert!(m.indexing.contains_key("ns::uid::2001")); 169 | assert!(m.indexing.contains_key("ns::uid::2002")); 170 | assert!(m.indexing.contains_key("ns::os::ios")); 171 | } 172 | 173 | #[test] 174 | fn test_unbind() { 175 | let m = bind_some(); 176 | let conn_2 = mock_conn_sender("conn_2"); 177 | m.leave_room(&conn_2, "ns", "os", "ios"); 178 | m.leave_room(&conn_2, "ns", "uid", "2002"); 179 | assert!(m.storage.contains_key("conn_1")); 180 | assert!(m.storage.contains_key("conn_2")); 181 | assert!(m.indexing.contains_key("ns::uid::2001")); 182 | assert!(!m.indexing.contains_key("ns::uid::2002")); 183 | assert!(!m.indexing.contains_key("ns::os::ios")); 184 | } 185 | 186 | #[test] 187 | fn test_remove_conn_rooms() { 188 | let m = bind_some(); 189 | let conn_2 = mock_conn_sender("conn_2"); 190 | m.remove_channels(&conn_2); 191 | assert!(m.storage.contains_key("conn_1")); 192 | assert!(!m.storage.contains_key("conn_2")); 193 | assert!(m.indexing.contains_key("ns::uid::2001")); 194 | assert!(!m.indexing.contains_key("ns::uid::2002")); 195 | assert!(!m.indexing.contains_key("ns::os::ios")); 196 | } 197 | 198 | #[test] 199 | fn test_list_channels_by_id() { 200 | let m = bind_some(); 201 | let res = m.list_channels("conn_2", "ns").unwrap(); 202 | let mut expect: HashMap> = HashMap::new(); 203 | expect 204 | .entry("uid".to_owned()) 205 | .or_default() 206 | .insert("2002".to_owned()); 207 | expect 208 | .entry("os".to_owned()) 209 | .or_default() 210 | .insert("ios".to_owned()); 211 | assert_eq!(expect, res); 212 | } 213 | 214 | #[test] 215 | fn test_list_all_channels_by_id() { 216 | let m = bind_some(); 217 | let res = m.list_ns_channels("conn_2").unwrap(); 218 | let mut expect: HashMap>> = HashMap::new(); 219 | expect 220 | .entry("ns".to_owned()) 221 | .or_default() 222 | .entry("uid".to_owned()) 223 | .or_default() 224 | .insert("2002".to_owned()); 225 | expect 226 | .entry("ns".to_owned()) 227 | .or_default() 228 | .entry("os".to_owned()) 229 | .or_default() 230 | .insert("ios".to_owned()); 231 | assert_eq!(expect, res); 232 | } 233 | 234 | #[test] 235 | fn test_search_all_channelues() { 236 | let m = bind_some(); 237 | let res = m.search_all_channels("ns", "uid"); 238 | let mut expect = HashSet::new(); 239 | expect.insert("2001".to_owned()); 240 | expect.insert("2002".to_owned()); 241 | assert_eq!(expect, res); 242 | } 243 | 244 | #[test] 245 | fn test_search_by_channels() { 246 | let m = bind_some(); 247 | let res = m.search_by_room("ns", "uid", Some(&["2001".to_owned(), "2002".to_owned()])); 248 | let mut expect: HashMap> = HashMap::new(); 249 | expect 250 | .entry(mock_conn_sender("conn_1")) 251 | .or_default() 252 | .insert("2001".to_owned()); 253 | expect 254 | .entry(mock_conn_sender("conn_2")) 255 | .or_default() 256 | .insert("2002".to_owned()); 257 | assert_eq!(expect, res); 258 | } 259 | 260 | fn bind_some() -> MemoryRepository { 261 | let m = MemoryRepository::new(); 262 | let conn = mock_conn_sender("conn_1"); 263 | let ns = "ns"; 264 | m.join_room(conn, ns, "uid", "2001"); 265 | let mut rooms = HashMap::new(); 266 | rooms.insert("uid".to_owned(), "2002".to_owned()); 267 | rooms.insert("os".to_owned(), "ios".to_owned()); 268 | let conn = mock_conn_sender("conn_2"); 269 | m.bulk_join_room(conn, ns, &rooms); 270 | m 271 | } 272 | 273 | #[test] 274 | fn test_index() { 275 | let m = MemoryRepository::new(); 276 | let conn = mock_conn_sender("conn_1"); 277 | let ns = "ns"; 278 | let room_prefix = "key"; 279 | let channel_1 = "value1"; 280 | let channel_2 = "value2"; 281 | m.index(conn.clone(), ns, room_prefix, channel_1); 282 | m.index(conn.clone(), ns, room_prefix, channel_2); 283 | assert!(m 284 | .indexing 285 | .contains_key(&gen_index_key(ns, room_prefix, channel_1))); 286 | assert!(m 287 | .indexing 288 | .contains_key(&gen_index_key(ns, room_prefix, channel_2))); 289 | 290 | m.remove_index(&conn, ns, room_prefix, channel_1); 291 | assert!(!m 292 | .indexing 293 | .contains_key(&gen_index_key(ns, room_prefix, channel_1))); 294 | assert!(m 295 | .indexing 296 | .contains_key(&gen_index_key(ns, room_prefix, channel_2))); 297 | } 298 | 299 | #[derive(Default)] 300 | struct MockConnLifeCycle {} 301 | 302 | impl LifeCycle for MockConnLifeCycle { 303 | fn new_conn_id(&self, _protocol: Protocol) -> String { 304 | "some_conn_id".to_owned() 305 | } 306 | 307 | fn on_conn_create(&self, _conn: Conn) {} 308 | 309 | fn on_message_incoming(&self, _conn_id: &str, _protocol: &Protocol, _message: Message) {} 310 | 311 | fn on_conn_destroy(&self, _conn: Conn) {} 312 | 313 | fn should_timeout(&self) -> bool { 314 | false 315 | } 316 | } 317 | 318 | #[derive(Default)] 319 | struct Tcp {} 320 | 321 | impl SendMessage for Tcp { 322 | fn send(&self, _msg: Message) -> Result<(), ()> { 323 | Ok(()) 324 | } 325 | } 326 | 327 | fn mock_conn_sender(conn_id: &str) -> Conn { 328 | Conn { 329 | inner: Arc::new(ConnContext { 330 | proto: Protocol::Tcp, 331 | timeout: 60, 332 | create_time: 1234, 333 | conn_id: conn_id.to_string(), 334 | sender: Box::new(Tcp::default()), 335 | lifecycle: Arc::new(MockConnLifeCycle::default()), 336 | peer_addr: None, 337 | }), 338 | } 339 | } 340 | 341 | #[test] 342 | fn test_concurrent() { 343 | let m = MemoryRepository::new(); 344 | let mut joins = Vec::new(); 345 | let thread_num = 50; 346 | let cycle_num = 100; 347 | for i in 0..thread_num { 348 | let mc = m.clone(); 349 | let start = i * cycle_num; 350 | let end = start + cycle_num; 351 | let join = std::thread::spawn(move || { 352 | for i in start..end { 353 | let conn = mock_conn_sender(&format!("conn_{}", i)); 354 | let mut rooms = HashMap::new(); 355 | rooms.insert("uid".to_owned(), format!("uid_{}", i)); 356 | rooms.insert("os".to_owned(), "ios".to_owned()); 357 | mc.bulk_join_room(conn, "ns", &rooms); 358 | } 359 | }); 360 | joins.push(join); 361 | } 362 | for join in joins { 363 | join.join().unwrap(); 364 | } 365 | for i in 0..cycle_num * thread_num { 366 | assert!(m.storage.contains_key(&format!("conn_{}", i))); 367 | assert!(m.indexing.contains_key(&format!("ns::uid::uid_{}", i))); 368 | } 369 | 370 | let mut joins = Vec::new(); 371 | for i in 0..thread_num { 372 | let mc = m.clone(); 373 | let start = i * cycle_num; 374 | let end = start + cycle_num; 375 | let join = std::thread::spawn(move || { 376 | for i in start..end { 377 | let conn = mock_conn_sender(&format!("conn_{}", i)); 378 | mc.leave_room(&conn, "ns", "uid", &format!("uid_{}", i)); 379 | } 380 | }); 381 | joins.push(join); 382 | } 383 | for join in joins { 384 | join.join().unwrap(); 385 | } 386 | for i in 0..cycle_num * thread_num { 387 | assert!(m.storage.contains_key(&format!("conn_{}", i))); 388 | assert!(!m.indexing.contains_key(&format!("ns::uid::uid_{}", i))); 389 | } 390 | } 391 | } 392 | -------------------------------------------------------------------------------- /server-grpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-grpc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | [dependencies] 8 | futures = "0.3" 9 | log = "0.4" 10 | async-trait = "0.1" 11 | cached = "0.23" 12 | parking_lot = "0.11" # move to base 13 | 14 | server-base = { path = "../server-base" } 15 | server-state = { path = "../server-state" } -------------------------------------------------------------------------------- /server-grpc/src/dispatcher.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use cached::proc_macro::cached; 3 | use server_base::proto::message_service_client::MessageServiceClient; 4 | use server_base::proto::MessageReq; 5 | use server_base::tonic::Request; 6 | use server_base::Dispatch; 7 | use server_state::Services; 8 | use std::sync::Arc; 9 | use std::time::Duration; 10 | 11 | #[cached(time = 60)] 12 | fn grpc_dispatch_retries() -> u8 { 13 | server_state::config() 14 | .get_int("grpc_dispatch_retries") 15 | .unwrap_or(3) as u8 16 | } 17 | 18 | #[derive(Clone)] 19 | pub struct Dispatcher { 20 | clients: Arc, 21 | zone: Option, 22 | } 23 | 24 | #[cached(time = 60)] 25 | fn grpc_timeout() -> Duration { 26 | let timeout = server_state::config().service_grpc_timeout_ms(); 27 | Duration::from_millis(timeout) 28 | } 29 | 30 | #[async_trait] 31 | impl Dispatch for Dispatcher { 32 | async fn dispatch(&self, namespace: String, request: MessageReq) -> bool { 33 | for _ in 0..grpc_dispatch_retries() { 34 | let mut request = Request::new(request.clone()); 35 | request.set_timeout(grpc_timeout()); 36 | match self.clients.pick_client(&namespace, self.zone.clone()) { 37 | Some((addr, client)) => { 38 | let client = client.as_ref().to_owned(); 39 | match MessageServiceClient::new(client) 40 | .handle_message(request) 41 | .await 42 | { 43 | Ok(resp) => { 44 | let resp = resp.into_inner(); 45 | return resp.success_num > 0; 46 | } 47 | Err(e) => { 48 | log::warn!("on_message_async_opt err:{}, {}, {}", namespace, addr, e); 49 | } 50 | } 51 | } 52 | None => { 53 | log::warn!("service client unavailable for namespace: {}", namespace,); 54 | return false; 55 | } 56 | } 57 | } 58 | false 59 | } 60 | } 61 | 62 | impl Default for Dispatcher { 63 | fn default() -> Self { 64 | let clients = Arc::new(server_state::service_client_state()); 65 | let zone = server_state::get_zone(); 66 | Self { clients, zone } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /server-grpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod cluster; 2 | mod dispatcher; 3 | mod operator; 4 | mod server; 5 | 6 | pub use cluster::ClusterForwarder; 7 | pub use dispatcher::Dispatcher; 8 | pub use operator::ClusterOperator; 9 | pub use server::ServerStub; 10 | use server_base::{proto::link_service_server::LinkServiceServer, tonic, CoreOperation}; 11 | 12 | pub fn grpc_listen(addr: &str, operator: O) 13 | where 14 | O: CoreOperation + 'static, 15 | { 16 | log::info!("gRPC server is listening at: {}", addr); 17 | let stub = ServerStub::new(addr.to_owned(), operator); 18 | let addr = addr.parse().expect("failed to parse grpc addr"); 19 | let config = server_state::config(); 20 | let max_concurrent_streams = config.get::("grpc_max_concurrent_streams").ok(); 21 | let initial_stream_window_size = config 22 | .get::("grpc_initial_stream_window_size") 23 | .unwrap_or(65535 * 100); // 6.5M 24 | let initial_connection_window_size = config 25 | .get::("grpc_initial_connection_window_size") 26 | .unwrap_or(65535 * 200); // 13M 27 | let concurrency_limit_per_connection = config 28 | .get::("grpc_concurrency_limit_per_connection") 29 | .unwrap_or(1024); 30 | 31 | server_base::tokio::spawn(async move { 32 | if let Err(e) = tonic::transport::Server::builder() 33 | .max_concurrent_streams(max_concurrent_streams) 34 | .initial_stream_window_size(initial_stream_window_size) 35 | .initial_connection_window_size(initial_connection_window_size) 36 | .concurrency_limit_per_connection(concurrency_limit_per_connection) 37 | .add_service(LinkServiceServer::new(stub)) 38 | .serve(addr) 39 | .await 40 | { 41 | log::error!("failed to start grpc {} : {}", addr, e); 42 | } 43 | }); 44 | } 45 | -------------------------------------------------------------------------------- /server-grpc/src/operator.rs: -------------------------------------------------------------------------------- 1 | use super::{ClusterForwarder, CoreOperation}; 2 | use async_trait::async_trait; 3 | use server_base::proto::*; 4 | use server_base::IdGen; 5 | use server_state::NodeOperation; 6 | use std::sync::Arc; 7 | 8 | #[derive(Clone)] 9 | pub struct ClusterOperator 10 | where 11 | N: NodeOperation, 12 | C: CoreOperation, 13 | { 14 | inner: Arc>, 15 | } 16 | 17 | struct Inner 18 | where 19 | N: NodeOperation, 20 | C: CoreOperation, 21 | { 22 | cluster: ClusterForwarder, 23 | node: N, 24 | myself: C, 25 | } 26 | 27 | impl Inner 28 | where 29 | N: NodeOperation, 30 | C: CoreOperation, 31 | { 32 | pub fn new(node: N, cluster: ClusterForwarder, myself: C) -> Self { 33 | Self { 34 | node, 35 | cluster, 36 | myself, 37 | } 38 | } 39 | } 40 | 41 | impl ClusterOperator 42 | where 43 | N: NodeOperation, 44 | C: CoreOperation, 45 | { 46 | pub fn new(node_operator: N, core_operator: C, cluster: ClusterForwarder) -> Self { 47 | let inner = Arc::new(Inner::new(node_operator, cluster, core_operator)); 48 | Self { inner } 49 | } 50 | } 51 | 52 | #[async_trait] 53 | impl CoreOperation for ClusterOperator 54 | where 55 | N: NodeOperation, 56 | C: CoreOperation, 57 | { 58 | async fn join(&self, request: JoinReq) -> bool { 59 | if let Some(node_id) = IdGen::node_id(&request.sid) { 60 | if self.inner.node.is_myself(&node_id) { 61 | self.inner.myself.join(request).await 62 | } else { 63 | match self.inner.cluster.join(&node_id, request).await { 64 | Ok(is_success) => is_success, 65 | Err(e) => { 66 | log::error!("join failed! err = {:?}", e); 67 | false 68 | } 69 | } 70 | } 71 | } else { 72 | log::error!( 73 | "join failed: node_id CAN NOT be extracted from sid: {}", 74 | &request.sid 75 | ); 76 | false 77 | } 78 | } 79 | 80 | async fn leave(&self, request: LeaveReq) -> bool { 81 | if let Some(node_id) = IdGen::node_id(&request.sid) { 82 | if self.inner.node.is_myself(&node_id) { 83 | self.inner.myself.leave(request).await 84 | } else { 85 | match self.inner.cluster.leave(&node_id, request).await { 86 | Ok(is_success) => is_success, 87 | Err(e) => { 88 | log::error!("leave failed! err = {:?}", e); 89 | false 90 | } 91 | } 92 | } 93 | } else { 94 | log::error!( 95 | "leave failed: node_id CAN NOT be extracted from sid: {}", 96 | &request.sid 97 | ); 98 | false 99 | } 100 | } 101 | 102 | async fn bulk_join(&self, request: BulkJoinReq) -> bool { 103 | if let Some(node_id) = IdGen::node_id(&request.sid) { 104 | if self.inner.node.is_myself(&node_id) { 105 | self.inner.myself.bulk_join(request).await 106 | } else { 107 | match self.inner.cluster.bulk_join(&node_id, request).await { 108 | Ok(is_success) => is_success, 109 | Err(e) => { 110 | log::error!("bulk_join failed! err = {:?}", e); 111 | false 112 | } 113 | } 114 | } 115 | } else { 116 | log::error!( 117 | "bulk_join failed: not found client for sid: {}", 118 | &request.sid 119 | ); 120 | false 121 | } 122 | } 123 | 124 | async fn emit_sid(&self, request: EmitSidReq) -> bool { 125 | if let Some(node_id) = IdGen::node_id(&request.sid) { 126 | if self.inner.node.is_myself(&node_id) { 127 | self.inner.myself.emit_sid(request).await 128 | } else { 129 | match self.inner.cluster.emit_sid(&node_id, request).await { 130 | Ok(is_success) => is_success, 131 | Err(e) => { 132 | log::error!("emit_sid failed! err = {:?}", e); 133 | false 134 | } 135 | } 136 | } 137 | } else { 138 | log::error!( 139 | "emit_sid failed: not found client for sid: {}", 140 | &request.sid 141 | ); 142 | false 143 | } 144 | } 145 | 146 | async fn emit(&self, request: EmitReq) -> EmitResp { 147 | let (mut local_resp, cluster_resp) = futures::join!( 148 | self.inner.myself.emit(request.clone()), 149 | self.inner.cluster.emit(request) 150 | ); 151 | let success = local_resp.success && cluster_resp.success; 152 | let mut rooms: Vec = cluster_resp.rooms; 153 | let mut status = cluster_resp.status; 154 | status.append(&mut local_resp.status); 155 | rooms.append(&mut local_resp.rooms); 156 | // remove duplicated 157 | rooms.sort(); 158 | rooms.dedup(); 159 | 160 | EmitResp { 161 | success, 162 | rooms, 163 | status, 164 | } 165 | } 166 | 167 | async fn get_conn_rooms(&self, request: ConnRoomReq) -> Vec { 168 | let mut local_channels = self.inner.myself.get_conn_rooms(request.clone()).await; 169 | let mut rooms = self.inner.cluster.get_conn_rooms(request).await; 170 | rooms.append(&mut local_channels); 171 | rooms 172 | } 173 | 174 | async fn get_rooms(&self, request: GetRoomsReq) -> Vec { 175 | let mut local_channels = self.inner.myself.get_rooms(request.clone()).await; 176 | let mut rooms = self.inner.cluster.get_rooms(request).await; 177 | rooms.append(&mut local_channels); 178 | rooms 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /server-grpc/src/server.rs: -------------------------------------------------------------------------------- 1 | use server_base::proto::link_service_server::LinkService; 2 | use server_base::proto::*; 3 | use server_base::{tonic, CoreOperation}; 4 | 5 | #[derive(Clone, Default)] 6 | pub struct ServerStub { 7 | operator: O, 8 | _addr: String, 9 | } 10 | 11 | impl ServerStub { 12 | pub fn new(_addr: String, operator: O) -> Self { 13 | Self { _addr, operator } 14 | } 15 | } 16 | 17 | #[tonic::async_trait] 18 | impl LinkService for ServerStub { 19 | async fn join( 20 | &self, 21 | request: tonic::Request, 22 | ) -> Result, tonic::Status> { 23 | let request = request.into_inner(); 24 | let success = self.operator.join(request).await; 25 | Ok(tonic::Response::new(JoinResp { success })) 26 | } 27 | 28 | async fn leave( 29 | &self, 30 | request: tonic::Request, 31 | ) -> Result, tonic::Status> { 32 | let request = request.into_inner(); 33 | let success = self.operator.leave(request).await; 34 | Ok(tonic::Response::new(LeaveResp { success })) 35 | } 36 | 37 | async fn bulk_join( 38 | &self, 39 | request: tonic::Request, 40 | ) -> Result, tonic::Status> { 41 | let request = request.into_inner(); 42 | let success = self.operator.bulk_join(request).await; 43 | Ok(tonic::Response::new(BulkJoinResp { success })) 44 | } 45 | 46 | async fn emit_sid( 47 | &self, 48 | request: tonic::Request, 49 | ) -> Result, tonic::Status> { 50 | let request = request.into_inner(); 51 | let success = self.operator.emit_sid(request).await; 52 | Ok(tonic::Response::new(EmitSidResp { 53 | success, 54 | sent: success, 55 | })) 56 | } 57 | 58 | async fn emit( 59 | &self, 60 | request: tonic::Request, 61 | ) -> Result, tonic::Status> { 62 | let request = request.into_inner(); 63 | let response = self.operator.emit(request).await; 64 | Ok(tonic::Response::new(response)) 65 | } 66 | 67 | async fn bulk_emit( 68 | &self, 69 | request: tonic::Request, 70 | ) -> Result, tonic::Status> { 71 | let bulk_request = request.into_inner(); 72 | let futures = bulk_request.requests.into_iter().map(|req| async { 73 | let response = self.operator.emit(req).await; 74 | response 75 | }); 76 | let responses: Vec = futures::future::join_all(futures) 77 | .await 78 | .into_iter() 79 | .collect(); 80 | Ok(tonic::Response::new(BulkEmitResp { responses })) 81 | } 82 | 83 | async fn get_conn_rooms( 84 | &self, 85 | request: tonic::Request, 86 | ) -> Result, tonic::Status> { 87 | let request = request.into_inner(); 88 | let operator = self.operator.clone(); 89 | let rooms = operator.get_conn_rooms(request).await; 90 | 91 | Ok(tonic::Response::new(ConnRoomResp { 92 | success: !rooms.is_empty(), 93 | rooms, 94 | })) 95 | } 96 | 97 | async fn get_rooms( 98 | &self, 99 | request: tonic::Request, 100 | ) -> Result, tonic::Status> { 101 | let request = request.into_inner(); 102 | let namespace = request.namespace.clone(); 103 | let room_prefix = request.room_prefix.clone(); 104 | let rooms = self.operator.get_rooms(request).await; 105 | Ok(tonic::Response::new(GetChannelsResp { 106 | namespace, 107 | room_prefix, 108 | rooms: Some(Rooms { rooms }), 109 | })) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /server-hproxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-hproxy" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | [dependencies] 8 | url = "2.2" 9 | log = "0.4" 10 | rand = "^0.8" 11 | regex = "1.5.6" 12 | futures = "0.3" 13 | async-trait = "0.1" 14 | reqwest = { version = "0.11", default-features = false, features = [ 15 | "rustls-tls", 16 | ] } 17 | parking_lot = "0.11" 18 | http = "0.2" 19 | cached = "0.23" 20 | thiserror = "1.0" 21 | 22 | server-base = { path = "../server-base" } 23 | server-state = { path = "../server-state" } 24 | 25 | [dev-dependencies] 26 | config = { version = "0.11", default_features = false, features = ["json"] } 27 | -------------------------------------------------------------------------------- /server-hproxy/src/http.rs: -------------------------------------------------------------------------------- 1 | use crate::{Error, HpResult, CALL_ID, METHOD}; 2 | use http::HeaderMap; 3 | use reqwest::header::{HeaderName, HeaderValue}; 4 | use reqwest::{Client as HttpClient, Method, RequestBuilder}; 5 | use server_base::proto::MessageReq; 6 | use std::collections::HashMap; 7 | use std::str::FromStr; 8 | 9 | pub struct HpRequestBuilder { 10 | pub metadata: HashMap, 11 | pub url: String, 12 | pub body: String, 13 | pub conn_id: String, 14 | pub method: Option, 15 | pub call_id: Option, 16 | } 17 | 18 | impl HpRequestBuilder { 19 | pub fn build(&self, url: &str, c: HttpClient) -> HpResult { 20 | if self.call_id.is_none() { 21 | return Err(Error::NoCallId { 22 | url: url.to_owned(), 23 | }); 24 | } 25 | if let Some(method) = &self.method { 26 | let method = 27 | Method::from_str(&method.to_uppercase()).map_err(|_| Error::InvalidMethod { 28 | method: method.to_owned(), 29 | url: url.to_owned(), 30 | })?; 31 | let b = c 32 | .request(method, url) 33 | .headers(get_header_map(&self.metadata)) 34 | .body(self.body.clone()); 35 | log::debug!("build_request {:?}", b); 36 | Ok(b) 37 | } else { 38 | Err(Error::NoMethod { 39 | url: url.to_owned(), 40 | }) 41 | } 42 | } 43 | } 44 | 45 | impl TryFrom for HpRequestBuilder { 46 | type Error = Error; 47 | 48 | fn try_from(item: MessageReq) -> Result { 49 | let message = match item.message { 50 | Some(m) => m, 51 | None => return Err(Error::NoMessage { sid: item.sid }), 52 | }; 53 | let mut metadata = message.metadata.clone(); 54 | let method = metadata.remove(METHOD); 55 | let call_id = metadata.remove(CALL_ID); 56 | let conn_id = item.sid.clone(); 57 | let url = message.path; 58 | let body = message.body; 59 | 60 | Ok(HpRequestBuilder { 61 | metadata, 62 | body: String::from_utf8(body).unwrap_or_default(), 63 | url, 64 | method, 65 | conn_id, 66 | call_id, 67 | }) 68 | } 69 | } 70 | 71 | fn get_header_map(metadata: &HashMap) -> HeaderMap { 72 | log::info!("get_header_map_in={:?}", metadata); 73 | let mut headers = HeaderMap::new(); 74 | for (k, v) in metadata.iter() { 75 | match HeaderName::from_bytes(k.as_bytes()) { 76 | Ok(n) => match v.parse::() { 77 | Ok(v) => { 78 | headers.insert(n, v); 79 | } 80 | Err(e) => log::warn!("invalid_header_value={:?}||header_key={}||err={}", v, k, e), 81 | }, 82 | Err(e) => log::warn!("invalid_header_key={}||error={:?}", k, e), 83 | } 84 | } 85 | log::info!("get_header_map={:?}", headers); 86 | headers 87 | } 88 | 89 | #[cfg(test)] 90 | mod tests { 91 | use super::*; 92 | 93 | #[test] 94 | fn test_no_call_id() { 95 | let builder = HpRequestBuilder { 96 | conn_id: "123".to_owned(), 97 | metadata: HashMap::new(), 98 | body: "".to_owned(), 99 | call_id: None, 100 | method: Some("GET".to_owned()), 101 | url: "/".to_owned(), 102 | }; 103 | let c = HttpClient::new(); 104 | assert_eq!(builder.build("url", c).is_err(), true); 105 | } 106 | 107 | #[test] 108 | fn test_no_method() { 109 | let builder = HpRequestBuilder { 110 | conn_id: "123".to_owned(), 111 | metadata: HashMap::new(), 112 | body: "".to_owned(), 113 | call_id: Some("123".to_owned()), 114 | method: None, 115 | url: "/".to_owned(), 116 | }; 117 | let c = HttpClient::new(); 118 | assert_eq!(builder.build("url", c).is_err(), true); 119 | } 120 | 121 | #[test] 122 | fn test_build() { 123 | use http::method::Method; 124 | 125 | let mut metadata = HashMap::new(); 126 | metadata.insert("trace".to_owned(), "aaa".to_owned()); 127 | let builder = HpRequestBuilder { 128 | conn_id: "123".to_owned(), 129 | metadata, 130 | body: "".to_owned(), 131 | call_id: Some("123".to_owned()), 132 | method: Some("GET".to_owned()), 133 | url: "/".to_owned(), 134 | }; 135 | let c = HttpClient::new(); 136 | let url = "http://www.bing.com/"; 137 | let client = builder.build(url, c); 138 | assert_eq!(client.is_ok(), true); 139 | let b = client.unwrap().build(); 140 | println!("{:?}", b); 141 | assert_eq!(b.is_ok(), true); 142 | let r = b.unwrap(); 143 | assert_eq!(r.method(), Method::GET); 144 | assert_eq!(r.headers().get("trace").is_some(), true); 145 | assert_eq!(format!("{}", r.url()), url); 146 | } 147 | 148 | #[test] 149 | fn test_header_map() { 150 | let mut metadata: HashMap = HashMap::new(); 151 | metadata.insert(METHOD.to_owned(), "GET".to_owned()); 152 | let header_map = get_header_map(&metadata); 153 | assert_eq!(header_map.get(METHOD.to_owned()).is_some(), true); 154 | assert_eq!(header_map.get("trace".to_owned()).is_some(), false); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /server-hproxy/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod http; 2 | mod proxy; 3 | 4 | use crate::http::HpRequestBuilder; 5 | use proxy::HttpProxy; 6 | use server_base::PushConn; 7 | use thiserror::Error; 8 | 9 | type HpResult = std::result::Result; 10 | 11 | pub const HPROXY_NAMESPACE: &str = "__http_proxy"; 12 | 13 | const HTTP_STATUS_CODE: &str = "__status_code"; 14 | const CALL_ID: &str = "__call_id"; 15 | const METHOD: &str = "__method"; 16 | 17 | #[derive(Debug, Error)] 18 | pub enum Error { 19 | #[error("No CallId {url}")] 20 | NoCallId { url: String }, 21 | 22 | #[error("No Method {url}")] 23 | NoMethod { url: String }, 24 | 25 | #[error("Invalid Method {method} of {url}")] 26 | InvalidMethod { method: String, url: String }, 27 | 28 | #[error("No Message in MessageReq of conn {sid}")] 29 | NoMessage { sid: String }, 30 | } 31 | 32 | pub fn build_http_proxy(pusher: G) -> Option> 33 | where 34 | G: PushConn + Clone, 35 | { 36 | if server_state::config().hproxy_map().is_empty() { 37 | log::info!("not set hproxy!"); 38 | return None; 39 | } 40 | Some(HttpProxy::new(pusher)) 41 | } 42 | -------------------------------------------------------------------------------- /server-listener/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-listener" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | [dependencies] 8 | anyhow = "1.0" 9 | bytes = "1.0" 10 | futures = "0.3" 11 | tokio-util = { version = "0.7", features = ["codec"] } 12 | tokio-stream = { version = "0.1", default-features = false, features = [ 13 | "net", 14 | "time", 15 | "fs", 16 | "io-util", 17 | ] } 18 | tokio-rustls = "0.23" 19 | log = "0.4" 20 | quinn = { version = "0.8", default-features = false, features = ["tls-rustls", "ring"] } 21 | rustls = "0.20" 22 | rustls-pemfile = "0.2.1" 23 | tokio-tungstenite = "0.16" 24 | stream-cancel = "0.8" 25 | parking_lot = "0.11" 26 | tungstenite = { version = "0.16", default-features = false, features = [ 27 | "rustls-tls-webpki-roots", 28 | ] } 29 | rand = "^0.8" 30 | cached = "0.23" 31 | 32 | server-base = { path = "../server-base" } 33 | server-state = { path = "../server-state" } 34 | -------------------------------------------------------------------------------- /server-listener/src/accepter.rs: -------------------------------------------------------------------------------- 1 | use futures::sink::SinkExt; 2 | use futures::{Sink, Stream, StreamExt}; 3 | use parking_lot::Mutex; 4 | use server_base::tokio; 5 | use server_base::{ConnContext, HandyMutex, RecvMessage}; 6 | use std::sync::Arc; 7 | use std::time::{Duration, Instant}; 8 | use stream_cancel::{Trigger, Valve, Valved}; 9 | use tokio::sync::mpsc::UnboundedReceiver; 10 | use tokio_stream::wrappers::UnboundedReceiverStream; 11 | 12 | pub struct Accepter { 13 | context: Arc, 14 | incoming_time: Arc>, 15 | } 16 | 17 | impl Accepter { 18 | pub fn new(context: ConnContext) -> Self { 19 | Self { 20 | context: Arc::new(context), 21 | incoming_time: Arc::new(Mutex::new(Instant::now())), 22 | } 23 | } 24 | 25 | fn start_timer(&self, trigger: Arc>>) { 26 | let incoming_time = self.incoming_time.clone(); 27 | let timeout_secs = self.context.timeout; 28 | tokio::spawn(async move { 29 | let mut elapsed = 0; 30 | loop { 31 | if elapsed < timeout_secs { 32 | let delay = tokio::time::sleep(Duration::from_secs(timeout_secs - elapsed)); 33 | delay.await; 34 | } else { 35 | break; 36 | } 37 | elapsed = incoming_time.l().elapsed().as_secs(); 38 | } 39 | trigger.l().take() 40 | }); 41 | } 42 | 43 | pub fn accept_stream( 44 | self, 45 | reader: R, 46 | mut writer: W, 47 | recv_msg: Box>, 48 | receiver: UnboundedReceiver, 49 | ) where 50 | R: Stream> + Unpin + Send + 'static, 51 | W: Sink + Unpin + Send + 'static, 52 | E: std::fmt::Debug + Send, 53 | T: Send + 'static, 54 | { 55 | self.context.on_conn_create(); 56 | let (trigger, valve) = Valve::new(); 57 | let trigger = Arc::new(Mutex::new(Some(trigger))); 58 | self.start_timer(trigger.clone()); 59 | let receiver_stream = UnboundedReceiverStream::new(receiver); 60 | let mut receiver = valve.wrap(receiver_stream); 61 | let reader = valve.wrap(reader); 62 | tokio::spawn(async move { 63 | self.do_read(reader, recv_msg, trigger).await; 64 | }); 65 | 66 | tokio::spawn(async move { 67 | while let Some(msg) = receiver.next().await { 68 | if let Err(e) = writer.send(msg).await { 69 | log::warn!("send err: {:?}, write channel finished!", e); 70 | return; 71 | } 72 | } 73 | }); 74 | } 75 | 76 | async fn do_read( 77 | self, 78 | mut reader: Valved, 79 | recv_msg: Box>, 80 | trigger: Arc>>, 81 | ) where 82 | R: Stream> + Unpin + Send + 'static, 83 | E: std::fmt::Debug + Send, 84 | T: Send + 'static, 85 | { 86 | while let Some(result) = reader.next().await { 87 | let item = match result { 88 | Ok(item) => item, 89 | Err(e) => { 90 | log::warn!("read err: {:?}", e); 91 | break; 92 | } 93 | }; 94 | match recv_msg.recv(item) { 95 | Ok(Some(message)) => { 96 | self.context.accept_message(message); 97 | } 98 | Ok(None) => {} 99 | Err(e) => { 100 | log::warn!("read error {:?}", e); 101 | break; 102 | } 103 | } 104 | // update incoming time 105 | if !self.context.should_timeout() { 106 | let mut time = self.incoming_time.l(); 107 | *time = Instant::now(); 108 | } 109 | } 110 | if let Some(trigger) = trigger.l().take() { 111 | trigger.cancel(); 112 | } 113 | } 114 | } 115 | 116 | impl Drop for Accepter { 117 | fn drop(&mut self) { 118 | self.context.on_conn_destroy(); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /server-listener/src/codec.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use bytes::{BufMut, BytesMut}; 3 | use server_base::{codec, proto::packet::Packet}; 4 | use tokio_util::codec::{Decoder as TokioDecoder, Encoder as TokioEncoder}; 5 | 6 | #[derive(Default)] 7 | pub struct Codec { 8 | codec: codec::Codec, 9 | } 10 | 11 | impl TokioEncoder for Codec { 12 | type Error = Error; 13 | 14 | fn encode(&mut self, item: Packet, dst: &mut BytesMut) -> Result<(), Self::Error> { 15 | log::trace!("encocde {:?}", item); 16 | let bytes = self.codec.encode(item)?; 17 | log::trace!("encocded {:?}", bytes); 18 | if dst.remaining_mut() < bytes.len() { 19 | dst.reserve(bytes.len()); 20 | } 21 | dst.put(bytes); 22 | Ok(()) 23 | } 24 | } 25 | 26 | impl TokioDecoder for Codec { 27 | type Item = Packet; 28 | type Error = Error; 29 | 30 | fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { 31 | if src.is_empty() { 32 | return Ok(None); 33 | } 34 | let m = self.codec.decode(src).map_err(|e| e.into()); 35 | log::debug!("decode {:?}", m); 36 | m 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /server-listener/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod accepter; 2 | mod codec; 3 | mod listener; 4 | mod peek; 5 | mod quic; 6 | mod tcp; 7 | mod tls; 8 | mod ws; 9 | 10 | pub use quic::listen_quic; 11 | // use snafu::Snafu; 12 | pub use tls::listen_tls; 13 | 14 | use server_base::tokio; 15 | use stream_cancel::Valve; 16 | 17 | fn user_port_valve() -> Valve { 18 | let (trigger, valve) = Valve::new(); 19 | tokio::spawn(async move { 20 | loop { 21 | tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; 22 | let should_stop = server_base::USER_PORT_LISTEN.read(); 23 | if *should_stop { 24 | drop(trigger); 25 | break; 26 | } 27 | } 28 | }); 29 | valve 30 | } 31 | -------------------------------------------------------------------------------- /server-listener/src/listener.rs: -------------------------------------------------------------------------------- 1 | use crate::peek::PeekStream; 2 | use crate::tcp::tcp_accept_stream; 3 | use crate::tls::{TlsAcceptorBuilder, ALPN_TCP, ALPN_WS}; 4 | use crate::ws::ws_accept_stream; 5 | use server_base::tokio; 6 | use server_base::LifeCycle; 7 | use std::net::SocketAddr; 8 | use std::sync::Arc; 9 | use tokio::io::{AsyncRead, AsyncWrite}; 10 | use tokio::net::{TcpListener, TcpStream}; 11 | use tokio_rustls::{Accept, TlsAcceptor}; 12 | use tokio_stream::wrappers::TcpListenerStream; 13 | use tokio_stream::StreamExt; 14 | 15 | const DEFAULT_TIMEOUT: u64 = 30; 16 | 17 | pub struct Builder { 18 | address: Option, 19 | lifecycle: Option>, 20 | accept_builder: TlsAcceptorBuilder, 21 | timeout: u64, 22 | } 23 | 24 | impl Builder { 25 | pub fn new(addr: &str) -> Self { 26 | Builder { 27 | address: addr.parse().ok(), 28 | lifecycle: None, 29 | accept_builder: TlsAcceptorBuilder::new(), 30 | timeout: DEFAULT_TIMEOUT, 31 | } 32 | } 33 | 34 | pub fn with_timeout(mut self, timeout: u64) -> Self { 35 | self.timeout = timeout; 36 | self 37 | } 38 | 39 | pub fn with_cert(mut self, pem_file: Option) -> Self { 40 | if let Some(ref file) = pem_file { 41 | match self 42 | .accept_builder 43 | .with_cert_pem_file(std::path::Path::new(&file)) 44 | { 45 | Ok(_) => log::info!("load cert: {:?} success", file), 46 | Err(e) => log::warn!("add_cert_pem_file failed: {:?}", e), 47 | } 48 | } 49 | self 50 | } 51 | 52 | pub fn with_lifecycle(mut self, lifecycle: Arc) -> Self { 53 | self.lifecycle = Some(lifecycle); 54 | self 55 | } 56 | 57 | pub fn build(self) -> Result { 58 | if self.address.is_none() { 59 | return Err("address is none".to_owned()); 60 | } 61 | 62 | Ok(Listener { 63 | address: self.address.unwrap(), 64 | timeout: self.timeout, 65 | tls_acceptor: self.accept_builder.build(), 66 | lifecycle: self.lifecycle.unwrap(), 67 | }) 68 | } 69 | } 70 | 71 | pub struct Listener { 72 | address: SocketAddr, 73 | timeout: u64, 74 | tls_acceptor: Option, 75 | lifecycle: Arc, 76 | } 77 | 78 | impl Listener { 79 | async fn accept_tls_stream( 80 | tls_stream: tokio_rustls::server::TlsStream, 81 | timeout: u64, 82 | peer_addr: Option, 83 | lifecycle: Arc, 84 | ) { 85 | let (_, session) = tls_stream.get_ref(); 86 | if let Some(alpn) = session.alpn_protocol() { 87 | if let Ok(utf8_alpn) = std::str::from_utf8(alpn) { 88 | log::info!("tls alpn: {}", utf8_alpn); 89 | match utf8_alpn { 90 | ALPN_TCP => { 91 | tcp_accept_stream(timeout, tls_stream, lifecycle, peer_addr).await; 92 | } 93 | ALPN_WS => { 94 | ws_accept_stream(timeout, tls_stream, lifecycle, peer_addr).await; 95 | } 96 | _ => log::warn!("unknown ALPN {}", utf8_alpn), 97 | } 98 | return; 99 | } 100 | } 101 | 102 | detect_accept(tls_stream, timeout, lifecycle, peer_addr).await 103 | } 104 | 105 | pub async fn listen(self) { 106 | let timeout = self.timeout; 107 | let listener = TcpListener::bind(&self.address) 108 | .await 109 | .expect("bind tcp failed!"); 110 | let valve = crate::user_port_valve(); 111 | let mut incoming = valve.wrap(TcpListenerStream::new(listener)); 112 | 113 | while let Some(stream_result) = incoming.next().await { 114 | match stream_result { 115 | Ok(stream) => { 116 | let lifecycle = self.lifecycle.clone(); 117 | let tls_acceptor = self.tls_acceptor.clone(); 118 | tokio::spawn(async move { 119 | let peer_addr = stream.peer_addr().ok(); 120 | if let Some(ref acceptor) = tls_acceptor { 121 | accept_tls_stream( 122 | acceptor.accept(stream), 123 | timeout, 124 | peer_addr, 125 | lifecycle, 126 | ) 127 | .await; 128 | } else { 129 | detect_accept(stream, timeout, lifecycle, peer_addr).await; 130 | } 131 | }); 132 | } 133 | Err(e) => log::error!("incoming err: {:?}", e), 134 | } 135 | } 136 | log::error!("server_stop_listen: {:?}", self.address); 137 | } 138 | } 139 | 140 | async fn detect_accept( 141 | stream: S, 142 | timeout: u64, 143 | lifecycle: Arc, 144 | peer_addr: Option, 145 | ) { 146 | let mut peek_stream = PeekStream::new(stream); 147 | if let Some(buf) = peek_stream.peek(1).await { 148 | if let Ok(s) = std::str::from_utf8(&buf) { 149 | if s.starts_with('G') { 150 | log::info!("detect protocol ws"); 151 | ws_accept_stream(timeout, peek_stream, lifecycle, peer_addr).await; 152 | return; 153 | } 154 | } 155 | } 156 | tcp_accept_stream(timeout, peek_stream, lifecycle, peer_addr).await; 157 | } 158 | 159 | async fn accept_tls_stream( 160 | accept_steam: Accept, 161 | timeout: u64, 162 | peer_addr: Option, 163 | lifecycle: Arc, 164 | ) { 165 | match accept_steam.await { 166 | Ok(tls_stream) => { 167 | Listener::accept_tls_stream(tls_stream, timeout, peer_addr, lifecycle).await; 168 | } 169 | Err(e) => { 170 | log::warn!("tls_stream accept failed: {:?}", e) 171 | } 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /server-listener/src/peek.rs: -------------------------------------------------------------------------------- 1 | use server_base::tokio; 2 | use std::io; 3 | use std::pin::Pin; 4 | use std::task::{Context, Poll}; 5 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, ReadBuf}; 6 | 7 | pub(crate) struct PeekStream { 8 | stream: S, 9 | peek_buf: Option>, 10 | } 11 | 12 | impl PeekStream { 13 | pub fn new(stream: S) -> Self { 14 | PeekStream { 15 | stream, 16 | peek_buf: None, 17 | } 18 | } 19 | 20 | pub async fn peek(&mut self, length: u8) -> Option> { 21 | let mut buf = [0, length]; 22 | let n = match self.stream.read(&mut buf).await { 23 | Ok(n) => n, 24 | Err(_) => return None, 25 | }; 26 | let buf = buf[0..n].to_vec(); 27 | self.peek_buf = Some(buf.clone()); 28 | Some(buf) 29 | } 30 | } 31 | 32 | impl AsyncRead for PeekStream { 33 | fn poll_read( 34 | self: Pin<&mut Self>, 35 | cx: &mut Context<'_>, 36 | buf: &mut ReadBuf<'_>, 37 | ) -> Poll> { 38 | let me = self.get_mut(); 39 | if let Some(peek_buf) = me.peek_buf.take() { 40 | buf.put_slice(&peek_buf); 41 | } 42 | let stream = &mut me.stream; 43 | Pin::new(stream).poll_read(cx, buf) 44 | } 45 | } 46 | 47 | impl AsyncWrite for PeekStream { 48 | fn poll_write( 49 | self: Pin<&mut Self>, 50 | cx: &mut Context<'_>, 51 | buf: &[u8], 52 | ) -> Poll> { 53 | let me = self.get_mut(); 54 | let stream = &mut me.stream; 55 | Pin::new(stream).poll_write(cx, buf) 56 | } 57 | 58 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 59 | let me = self.get_mut(); 60 | let stream = &mut me.stream; 61 | Pin::new(stream).poll_flush(cx) 62 | } 63 | 64 | fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 65 | let me = self.get_mut(); 66 | let stream = &mut me.stream; 67 | Pin::new(stream).poll_shutdown(cx) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /server-listener/src/quic.rs: -------------------------------------------------------------------------------- 1 | use crate::accepter::Accepter; 2 | use crate::codec::Codec; 3 | use anyhow::Result as AnyResult; 4 | use cached::proc_macro::cached; 5 | use futures::StreamExt; 6 | use quinn::congestion::BbrConfig; 7 | use quinn::ServerConfig; 8 | use server_base::proto::{packet::Packet, Message, Pong}; 9 | use server_base::tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; 10 | use server_base::{tokio, ConnContext, LifeCycle, Protocol, RecvMessage, SendMessage}; 11 | use std::sync::Arc; 12 | use std::time::Duration; 13 | use tokio_util::codec::{FramedRead, FramedWrite}; 14 | 15 | const ALPN_QUIC: &[&[u8]] = &[b"hq-29"]; 16 | const QUIC_KEEP_ALIVE: u64 = 15; 17 | const QUIC_MAX_IDLE: u64 = 30; 18 | 19 | pub async fn listen_quic( 20 | addr: String, 21 | timeout_secs: u64, 22 | cert_path: String, 23 | lifecycle: Arc, 24 | ) { 25 | let mut err = None; 26 | // retry for integration test UDP bind error 27 | for _ in 0..3 { 28 | match do_listen_quic( 29 | addr.clone(), 30 | timeout_secs, 31 | cert_path.clone(), 32 | lifecycle.clone(), 33 | ) 34 | .await 35 | { 36 | Err(e) => err = Some(e), 37 | Ok(()) => err = None, 38 | } 39 | if err.is_none() { 40 | break; 41 | } 42 | log::error!("listen quic failed, try again {:?}", err); 43 | tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; 44 | } 45 | if err.is_some() { 46 | panic!("error listen quic {:?}", err); 47 | } 48 | } 49 | 50 | async fn do_listen_quic( 51 | addr: String, 52 | timeout_secs: u64, 53 | cert_path: String, 54 | lifecycle: Arc, 55 | ) -> AnyResult<()> { 56 | let valve = crate::user_port_valve(); 57 | let mut incoming = { 58 | let addr = addr.parse()?; 59 | let (endpoint, incoming) = quinn::Endpoint::server(server_config(cert_path)?, addr)?; 60 | log::info!("listening quic on {:?}", endpoint.local_addr()); 61 | valve.wrap(incoming) 62 | }; 63 | 64 | while let Some(conn) = incoming.next().await { 65 | log::info!("quic connection incoming"); 66 | tokio::spawn(accept_quic(conn, timeout_secs, lifecycle.clone())); 67 | } 68 | log::info!("quic server stop {}", addr); 69 | Ok(()) 70 | } 71 | 72 | fn server_config(cert_path: String) -> AnyResult { 73 | let (certs, key) = crate::tls::cert_key(std::path::Path::new(&cert_path))?; 74 | let mut server_crypto = rustls::ServerConfig::builder() 75 | .with_safe_defaults() 76 | .with_no_client_auth() 77 | .with_single_cert(certs, key)?; 78 | server_crypto.alpn_protocols = ALPN_QUIC.iter().map(|&x| x.into()).collect(); 79 | server_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); 80 | let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_crypto)); 81 | Arc::get_mut(&mut server_config.transport) 82 | .unwrap() 83 | .congestion_controller_factory(Arc::new(BbrConfig::default())) 84 | .max_concurrent_uni_streams(0_u8.into()) 85 | .keep_alive_interval(Some(Duration::from_secs(QUIC_KEEP_ALIVE))) 86 | .max_idle_timeout(Some(Duration::from_secs(QUIC_MAX_IDLE).try_into().unwrap())); // safe to unwrap 87 | 88 | Ok(server_config) 89 | } 90 | 91 | async fn accept_quic(conn: quinn::Connecting, timeout_secs: u64, lifecycle: Arc) { 92 | let quinn::NewConnection { mut bi_streams, .. } = match conn.await { 93 | Ok(c) => c, 94 | Err(e) => return log::error!("quinn new connection error: {:?}", e), 95 | }; 96 | 97 | async { 98 | log::info!("quic connection established"); 99 | 100 | // Each stream initiated by the client constitutes a new request. 101 | while let Some(stream) = bi_streams.next().await { 102 | log::info!("quic new stream incoming"); 103 | let stream = match stream { 104 | Err(quinn::ConnectionError::ApplicationClosed { .. }) => { 105 | log::info!("quic connection closed"); 106 | return; 107 | } 108 | Err(e) => { 109 | log::warn!("quic connection error {}", e); 110 | return; 111 | } 112 | Ok(s) => s, 113 | }; 114 | tokio::spawn(accept_quic_stream(stream, timeout_secs, lifecycle.clone())); 115 | } 116 | } 117 | .await; 118 | } 119 | 120 | async fn accept_quic_stream( 121 | (send, recv): (quinn::SendStream, quinn::RecvStream), 122 | timeout: u64, 123 | lifecycle: Arc, 124 | ) { 125 | #[cached(time = 60)] 126 | fn tokio_codec_size() -> usize { 127 | server_state::config().tokio_codec_size() 128 | } 129 | 130 | log::trace!("quic handle new stream"); 131 | let (sender, receiver) = unbounded_channel(); 132 | let context = ConnContext { 133 | proto: Protocol::Quic, 134 | timeout, 135 | create_time: server_base::now_ts_milli(), 136 | conn_id: lifecycle.new_conn_id(Protocol::Quic), 137 | sender: Box::new(Quic { 138 | sender: sender.clone(), 139 | }), 140 | lifecycle, 141 | peer_addr: None, 142 | }; 143 | let reader = FramedRead::with_capacity(recv, Codec::default(), tokio_codec_size()); 144 | let writer = FramedWrite::new(send, Codec::default()); 145 | Accepter::new(context).accept_stream(reader, writer, Box::new(Quic { sender }), receiver); 146 | log::trace!("quic stream accept"); 147 | } 148 | 149 | struct Quic { 150 | sender: UnboundedSender, 151 | } 152 | 153 | impl RecvMessage for Quic { 154 | type Item = Packet; 155 | 156 | fn recv(&self, item: Self::Item) -> Result, ()> { 157 | match item { 158 | Packet::Ping(ping) => { 159 | let _ = self.sender.send(Packet::Pong(Pong { 160 | timestamp: ping.timestamp, 161 | })); 162 | } 163 | Packet::Message(msg) => return Ok(Some(msg)), 164 | _ => log::warn!("quic unknown packet: {:?}", item), 165 | } 166 | Ok(None) 167 | } 168 | } 169 | 170 | impl SendMessage for Quic { 171 | fn send(&self, msg: Message) -> Result<(), ()> { 172 | self.sender.send(Packet::Message(msg)).map_err(|_| ()) 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /server-listener/src/tcp.rs: -------------------------------------------------------------------------------- 1 | use crate::accepter::Accepter; 2 | use crate::codec::Codec; 3 | use cached::proc_macro::cached; 4 | use futures::StreamExt; 5 | use server_base::proto::{packet::Packet, Message, Pong}; 6 | use server_base::{tokio, ConnContext, LifeCycle, Protocol, RecvMessage, SendMessage}; 7 | use std::net::SocketAddr; 8 | use std::sync::Arc; 9 | use tokio::io::{AsyncRead, AsyncWrite}; 10 | use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; 11 | use tokio_util::codec::Framed; 12 | 13 | pub async fn tcp_accept_stream( 14 | timeout: u64, 15 | stream: S, 16 | lifecycle: Arc, 17 | peer_addr: Option, 18 | ) { 19 | #[cached(time = 60)] 20 | fn tokio_codec_size() -> usize { 21 | server_state::config().tokio_codec_size() 22 | } 23 | 24 | let (sender, receiver) = unbounded_channel(); 25 | let context = ConnContext { 26 | proto: Protocol::Tcp, 27 | timeout, 28 | create_time: server_base::now_ts_milli(), 29 | conn_id: lifecycle.new_conn_id(Protocol::Tcp), 30 | sender: Box::new(Tcp { 31 | sender: sender.clone(), 32 | }), 33 | lifecycle, 34 | peer_addr, 35 | }; 36 | 37 | let frame = Framed::with_capacity(stream, Codec::default(), tokio_codec_size()); 38 | let (writer, reader) = frame.split(); 39 | Accepter::new(context).accept_stream(reader, writer, Box::new(Tcp { sender }), receiver) 40 | } 41 | 42 | struct Tcp { 43 | sender: UnboundedSender, 44 | } 45 | 46 | impl RecvMessage for Tcp { 47 | type Item = Packet; 48 | 49 | fn recv(&self, item: Self::Item) -> Result, ()> { 50 | match item { 51 | Packet::Ping(ping) => { 52 | let _ = self.sender.send(Packet::Pong(Pong { 53 | timestamp: ping.timestamp, 54 | })); 55 | } 56 | Packet::Message(msg) => return Ok(Some(msg)), 57 | _ => log::warn!("tcp unknown packet: {:?}", item), 58 | } 59 | Ok(None) 60 | } 61 | } 62 | 63 | impl SendMessage for Tcp { 64 | fn send(&self, msg: Message) -> Result<(), ()> { 65 | self.sender.send(Packet::Message(msg)).map_err(|_| ()) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /server-listener/src/tls.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use rustls::{Certificate, PrivateKey, ServerConfig}; 3 | use server_base::LifeCycle; 4 | use std::path::Path; 5 | use std::sync::Arc; 6 | use tokio_rustls::TlsAcceptor; 7 | 8 | pub const ALPN_TCP: &str = "tcp"; 9 | pub const ALPN_WS: &str = "http/1.1"; 10 | 11 | pub struct TlsAcceptorBuilder { 12 | cert_chain: Vec, 13 | private_key: Option, 14 | } 15 | 16 | impl TlsAcceptorBuilder { 17 | pub fn new() -> Self { 18 | TlsAcceptorBuilder { 19 | cert_chain: vec![], 20 | private_key: None, 21 | } 22 | } 23 | 24 | pub fn with_cert_pem_file(&mut self, path: &Path) -> Result<()> { 25 | let (certs, key) = crate::tls::cert_key(path)?; 26 | self.cert_chain = certs; 27 | self.private_key = Some(key); 28 | Ok(()) 29 | } 30 | 31 | pub fn build(self) -> Option { 32 | let private_key = self.private_key?; 33 | if let Ok(mut server_config) = ServerConfig::builder() 34 | .with_safe_defaults() 35 | .with_no_client_auth() 36 | .with_single_cert(self.cert_chain, private_key) 37 | { 38 | server_config.alpn_protocols = vec![Vec::from(ALPN_TCP), Vec::from(ALPN_WS)]; 39 | server_config.key_log = Arc::new(rustls::KeyLogFile::new()); 40 | Some(TlsAcceptor::from(Arc::new(server_config))) 41 | } else { 42 | log::warn!("tls acceptor build failed"); 43 | None 44 | } 45 | } 46 | } 47 | 48 | pub fn cert_key(path: &std::path::Path) -> Result<(Vec, PrivateKey)> { 49 | let key = std::fs::read(path)?; 50 | let key = if path.extension().map_or(false, |x| x == "der") { 51 | rustls::PrivateKey(key) 52 | } else { 53 | let pkcs8 = rustls_pemfile::pkcs8_private_keys(&mut &*key)?; 54 | match pkcs8.into_iter().next() { 55 | Some(x) => rustls::PrivateKey(x), 56 | None => { 57 | let rsa = rustls_pemfile::rsa_private_keys(&mut &*key)?; 58 | match rsa.into_iter().next() { 59 | Some(x) => rustls::PrivateKey(x), 60 | None => bail!("No Private Key Found"), 61 | } 62 | } 63 | } 64 | }; 65 | let cert_chain = std::fs::read(path)?; 66 | let cert_chain = if path.extension().map_or(false, |x| x == "der") { 67 | vec![rustls::Certificate(cert_chain)] 68 | } else { 69 | rustls_pemfile::certs(&mut &*cert_chain)? 70 | .into_iter() 71 | .map(rustls::Certificate) 72 | .collect() 73 | }; 74 | 75 | Ok((cert_chain, key)) 76 | } 77 | 78 | pub async fn listen_tls( 79 | addr: String, 80 | timeout_secs: u64, 81 | cert: Option, 82 | lifecycle: Arc, 83 | ) { 84 | use crate::listener::Builder; 85 | let builder = Builder::new(addr.as_str()) 86 | .with_cert(cert) 87 | .with_timeout(timeout_secs) 88 | .with_lifecycle(lifecycle); 89 | match builder.build() { 90 | Ok(listener) => listener.listen().await, 91 | Err(e) => panic!("start share tls server failed: {}", e), 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /server-listener/src/ws.rs: -------------------------------------------------------------------------------- 1 | use crate::accepter::Accepter; 2 | use bytes::{Bytes, BytesMut}; 3 | use futures::stream::StreamExt; 4 | use server_base::codec::{DecodeError, EncodeError, Message}; 5 | use server_base::proto; 6 | use server_base::tokio; 7 | use server_base::{ConnContext, LifeCycle, Protocol, RecvMessage, SendMessage}; 8 | use std::net::SocketAddr; 9 | use std::sync::Arc; 10 | use tokio::io::{AsyncRead, AsyncWrite}; 11 | use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; 12 | use tokio_tungstenite::accept_async; 13 | use tungstenite::Message as WsMessage; 14 | 15 | pub struct WsCodec {} 16 | 17 | impl WsCodec { 18 | pub fn new() -> Self { 19 | Self {} 20 | } 21 | 22 | pub fn encode(&self, item: proto::Message) -> Result { 23 | let packet = proto::packet::Packet::Message(item); 24 | let p = proto::Packet { 25 | packet: Some(packet), 26 | }; 27 | let mut buf = BytesMut::with_capacity(p.encoded_len()); 28 | p.encode(&mut buf)?; 29 | Ok(buf.freeze()) 30 | } 31 | 32 | pub fn decode(&self, src: &mut BytesMut) -> Result, DecodeError> { 33 | let packet = proto::Packet::decode(src)?.packet; 34 | match packet { 35 | Some(proto::packet::Packet::Message(m)) => Ok(Some(m)), 36 | _ => Ok(None), 37 | } 38 | } 39 | } 40 | 41 | pub async fn ws_accept_stream( 42 | timeout: u64, 43 | stream: S, 44 | lifecycle: Arc, 45 | peer_addr: Option, 46 | ) { 47 | match accept_async(stream).await { 48 | Ok(websocket) => { 49 | let (sender, receiver) = unbounded_channel(); 50 | let context = ConnContext { 51 | proto: Protocol::Websocket, 52 | timeout, 53 | create_time: server_base::now_ts_milli(), 54 | conn_id: lifecycle.new_conn_id(Protocol::Websocket), 55 | sender: Box::new(Ws { 56 | codec: WsCodec::new(), 57 | sender: sender.clone(), 58 | }), 59 | lifecycle, 60 | peer_addr, 61 | }; 62 | let (writer, reader) = websocket.split(); 63 | Accepter::new(context).accept_stream( 64 | reader, 65 | writer, 66 | Box::new(Ws { 67 | codec: WsCodec::new(), 68 | sender, 69 | }), 70 | receiver, 71 | ); 72 | } 73 | Err(e) => { 74 | log::warn!("websocket accept error: {}", e); 75 | } 76 | } 77 | } 78 | 79 | struct Ws { 80 | codec: WsCodec, 81 | sender: UnboundedSender, 82 | } 83 | 84 | impl SendMessage for Ws { 85 | fn send(&self, msg: proto::Message) -> Result<(), ()> { 86 | let bytes = self 87 | .codec 88 | .encode(msg) 89 | .map_err(|e| log::warn!("ws encode err: {:?}", e))?; 90 | self.sender 91 | .send(WsMessage::Binary(bytes.to_vec())) 92 | .map_err(|_| ()) 93 | } 94 | } 95 | 96 | impl RecvMessage for Ws { 97 | type Item = WsMessage; 98 | 99 | fn recv(&self, item: Self::Item) -> Result, ()> { 100 | match item { 101 | WsMessage::Text(_) => {} 102 | WsMessage::Binary(vec) => match self 103 | .codec 104 | .decode(&mut bytes::BytesMut::from(vec.as_slice())) 105 | { 106 | Ok(Some(message)) => return Ok(Some(message)), 107 | Ok(None) => {} 108 | Err(e) => { 109 | log::warn!("ws decode err: {:?}", e); 110 | return Err(()); 111 | } 112 | }, 113 | // Does Ws need ping pong? 114 | WsMessage::Ping(vec) => { 115 | let _ = self.sender.send(WsMessage::Pong(vec)); 116 | } 117 | WsMessage::Pong(_) => {} 118 | WsMessage::Close(close_frame) => { 119 | log::warn!("websocket will closed: {:?}", close_frame); 120 | return Err(()); 121 | } 122 | } 123 | Ok(None) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /server-proto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-proto" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | tonic = "0.7" 10 | prost = "0.10" 11 | bytes = "1.0" 12 | 13 | [build-dependencies] 14 | tonic-build = "0.7" 15 | -------------------------------------------------------------------------------- /server-proto/build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), Box> { 2 | let proto_files = ["../protos/client.proto", "../protos/service.proto"]; 3 | 4 | for proto_file in proto_files.iter() { 5 | tonic_build::compile_protos(proto_file)?; 6 | } 7 | 8 | Ok(()) 9 | } 10 | -------------------------------------------------------------------------------- /server-proto/src/lib.rs: -------------------------------------------------------------------------------- 1 | use bytes::{Buf, Bytes, BytesMut}; 2 | pub use prost::{DecodeError, EncodeError, Message}; 3 | pub use tonic; 4 | 5 | pub mod proto { 6 | include!(concat!(env!("OUT_DIR"), "/featureprobe.link.rs")); 7 | } 8 | 9 | const VARINT_MAX_LEN: usize = 10; 10 | 11 | #[derive(Default)] 12 | pub struct Codec { 13 | decode_len: Option, 14 | } 15 | 16 | impl Codec { 17 | pub fn encode(&self, packet: proto::packet::Packet) -> Result { 18 | let p = proto::Packet { 19 | packet: Some(packet), 20 | }; 21 | let mut buf = BytesMut::with_capacity(p.encoded_len()); 22 | p.encode_length_delimited(&mut buf)?; 23 | Ok(buf.freeze()) 24 | } 25 | 26 | pub fn decode( 27 | &mut self, 28 | buf: &mut BytesMut, 29 | ) -> Result, DecodeError> { 30 | if buf.len() < VARINT_MAX_LEN { 31 | return self.decode_length_delimiter(buf); 32 | } 33 | 34 | self._decode(buf) 35 | } 36 | 37 | fn decode_length_delimiter( 38 | &mut self, 39 | buf: &mut BytesMut, 40 | ) -> Result, DecodeError> { 41 | match self.decode_len { 42 | Some(_) => self._decode(buf), 43 | None => { 44 | let mut b = buf.clone(); 45 | let new_buf = &mut b; 46 | match prost::decode_length_delimiter(new_buf) { 47 | Ok(_) => self._decode(buf), 48 | Err(_) => Ok(None), 49 | } 50 | } 51 | } 52 | } 53 | 54 | fn _decode( 55 | &mut self, 56 | buf: &mut BytesMut, 57 | ) -> Result, DecodeError> { 58 | let len = if let Some(len) = self.decode_len.take() { 59 | len 60 | } else { 61 | prost::decode_length_delimiter(buf as &mut dyn bytes::Buf)? 62 | }; 63 | 64 | if len > buf.len() { 65 | self.decode_len = Some(len); 66 | return Ok(None); 67 | } 68 | let b = &buf[0..len]; 69 | let p = proto::Packet::decode(b)?.packet; 70 | buf.advance(len); 71 | Ok(p) 72 | } 73 | } 74 | 75 | #[cfg(test)] 76 | mod tests { 77 | use bytes::BufMut; 78 | 79 | use super::*; 80 | pub fn build_packet(namespace: String, body_len: usize) -> proto::packet::Packet { 81 | let message: proto::Message = proto::Message { 82 | namespace, 83 | path: "path".to_owned(), 84 | metadata: Default::default(), 85 | body: vec![1; body_len], 86 | expire_at: None, 87 | }; 88 | proto::packet::Packet::Message(message) 89 | } 90 | 91 | #[test] 92 | fn test_decode_empty() -> Result<(), prost::DecodeError> { 93 | let mut codec = Codec::default(); 94 | let mut bm = BytesMut::new(); 95 | let result = codec.decode(&mut bm); 96 | 97 | assert!(result.is_ok()); 98 | assert!(result.unwrap().is_none()); 99 | Ok(()) 100 | } 101 | 102 | #[test] 103 | fn test_decode() -> Result<(), prost::DecodeError> { 104 | let mut codec = Codec::default(); 105 | let request = String::from("Hello, World!"); 106 | let request = build_packet(request, 4); 107 | let request_vector = codec.encode(request).unwrap(); 108 | let request_vector = [request_vector].concat(); 109 | let mut bm = BytesMut::from(request_vector.as_slice()); 110 | 111 | let result = codec.decode(&mut bm); 112 | assert!(result.is_ok()); 113 | 114 | Ok(()) 115 | } 116 | 117 | #[test] 118 | fn test_decode_multiple() -> Result<(), prost::DecodeError> { 119 | let mut codec = Codec::default(); 120 | let request = String::from("Hello, World!"); 121 | let request = build_packet(request, 4); 122 | let request_vector = codec.encode(request).unwrap(); 123 | let request_vector = [request_vector.clone(), request_vector].concat(); 124 | let mut bm = BytesMut::from(request_vector.as_slice()); 125 | 126 | let result = codec.decode(&mut bm); 127 | assert!(result.is_ok()); 128 | assert!(result.unwrap().is_some()); 129 | 130 | let result = codec.decode(&mut bm); 131 | assert!(result.is_ok()); 132 | assert!(result.unwrap().is_some()); 133 | Ok(()) 134 | } 135 | 136 | #[test] 137 | fn test_decode_partial() -> Result<(), prost::DecodeError> { 138 | let mut codec = Codec::default(); 139 | let request = String::from("Hello, World!"); 140 | let request = build_packet(request, 4); 141 | let request_vector = codec.encode(request).unwrap(); 142 | let request_vector = [request_vector].concat(); 143 | 144 | let len = request_vector.len(); 145 | let mut bm = BytesMut::from(&request_vector[0..len / 2]); 146 | let result = codec.decode(&mut bm)?; 147 | assert!(result.is_none()); 148 | 149 | bm.put(&request_vector[len / 2..]); 150 | let result = codec.decode(&mut bm)?; 151 | assert!(result.is_some()); 152 | Ok(()) 153 | } 154 | 155 | #[test] 156 | fn test_decode_partial_varint() -> Result<(), prost::DecodeError> { 157 | let mut codec = Codec::default(); 158 | let request = String::from("Hello, World!"); 159 | let request = build_packet(request, 1000); 160 | let request_vector = codec.encode(request).unwrap(); 161 | let request_vector = [request_vector].concat(); 162 | 163 | let mut bm = BytesMut::from(&request_vector[0..1]); 164 | let result = codec.decode(&mut bm)?; 165 | assert!(result.is_none()); 166 | 167 | bm.put(&request_vector[1..]); 168 | let result = codec.decode(&mut bm)?; 169 | assert!(result.is_some()); 170 | 171 | let mut bm = BytesMut::from(&request_vector[0..2]); 172 | let result = codec.decode(&mut bm)?; 173 | assert!(result.is_none()); 174 | 175 | bm.put(&request_vector[2..]); 176 | let result = codec.decode(&mut bm)?; 177 | assert!(result.is_some()); 178 | 179 | let mut bm = BytesMut::from(&request_vector[0..3]); 180 | let result = codec.decode(&mut bm)?; 181 | assert!(result.is_none()); 182 | 183 | bm.put(&request_vector[3..]); 184 | let result = codec.decode(&mut bm)?; 185 | assert!(result.is_some()); 186 | 187 | let mut bm = BytesMut::from(&request_vector[0..4]); 188 | let result = codec.decode(&mut bm)?; 189 | assert!(result.is_none()); 190 | 191 | bm.put(&request_vector[4..]); 192 | let result = codec.decode(&mut bm)?; 193 | assert!(result.is_some()); 194 | Ok(()) 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /server-state/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-state" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | log = "0.4" 11 | rand = "^0.8" 12 | lazy_static = "1.4" 13 | parking_lot = "0.11" 14 | serde = { version = "1.0", features = ["derive"] } 15 | serde_json = "1.0" 16 | 17 | server-base = { path = "../server-base" } -------------------------------------------------------------------------------- /server-state/src/cluster.rs: -------------------------------------------------------------------------------- 1 | use crate::{Connect, GrpcClient}; 2 | use parking_lot::RwLock; 3 | use server_base::HandyRwLock; 4 | use std::collections::HashMap; 5 | use std::sync::Arc; 6 | 7 | pub type Clusters = Cluster; 8 | 9 | #[derive(Clone)] 10 | pub struct Cluster { 11 | //map for node_id to grpc_client 12 | clients: Arc>>, 13 | // map for addr to node_id 14 | addr_node_map: Arc>>, 15 | } 16 | 17 | impl Default for Cluster { 18 | fn default() -> Self { 19 | Cluster { 20 | clients: Arc::new(RwLock::new(HashMap::new())), 21 | addr_node_map: Arc::new(RwLock::new(HashMap::new())), 22 | } 23 | } 24 | } 25 | 26 | impl Cluster { 27 | pub(crate) fn new() -> Self { 28 | Self::default() 29 | } 30 | 31 | pub fn get_node_id(&self, addr: &str) -> Option { 32 | self.addr_node_map.rl().get(addr).cloned() 33 | } 34 | 35 | fn add_node_id(&mut self, addr: &str, node_id: &str) -> Option { 36 | self.addr_node_map 37 | .wl() 38 | .insert(addr.to_owned(), node_id.to_owned()) 39 | } 40 | 41 | fn remove_node_id(&mut self, addr: &str, node_id: &str) { 42 | if let Some(old_node_id) = self.get_node_id(addr) { 43 | if node_id == old_node_id { 44 | self.addr_node_map.wl().remove(addr); 45 | } 46 | } 47 | } 48 | 49 | pub fn add_client(&mut self, node_id: &str, host: &str, port: u16) { 50 | let timeout_ms = crate::config().cluster_grpc_timeout_ms(); 51 | let addr = format!("{}:{}", host, port); 52 | if let Some(old_node_id) = self.add_node_id(&addr, node_id) { 53 | self.clients.wl().remove(old_node_id.as_str()); 54 | } 55 | let c = T::connect(addr, timeout_ms); 56 | self.clients.wl().insert(node_id.to_string(), c); 57 | } 58 | 59 | pub fn remove_client(&mut self, node_id: &str, host: &str, port: u16) { 60 | let addr = format!("{}:{}", host, port); 61 | log::warn!("remove client for node: {}, {}", node_id, addr); 62 | self.clients.wl().remove(node_id); 63 | self.remove_node_id(&addr, node_id); 64 | } 65 | 66 | pub fn pick_client(&self, node_id: &str) -> Option { 67 | self.clients.rl().get(node_id).cloned() 68 | } 69 | 70 | pub fn get_clients(&self) -> Arc>> { 71 | Arc::clone(&self.clients) 72 | } 73 | 74 | pub fn len(&self) -> usize { 75 | self.clients.rl().len() 76 | } 77 | 78 | pub fn is_empty(&self) -> bool { 79 | self.len() == 0 80 | } 81 | 82 | pub fn all_clients(&self) -> Vec { 83 | self.clients.rl().keys().cloned().collect() 84 | } 85 | 86 | pub fn all_nodes(&self) -> HashMap { 87 | self.addr_node_map.rl().clone() 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /server-state/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | mod cluster; 5 | mod service; 6 | mod state; 7 | 8 | pub use cluster::Clusters; 9 | pub use service::Services; 10 | pub use state::{NodeOperation, State}; 11 | 12 | use parking_lot::RwLock; 13 | use server_base::tonic::transport::Endpoint; 14 | use server_base::Channel; 15 | use server_base::FPConfig; 16 | use server_base::HandyRwLock; 17 | use std::fmt; 18 | use std::sync::atomic::{AtomicUsize, Ordering}; 19 | use std::time::Duration; 20 | 21 | lazy_static! { 22 | // these are initiated only once 23 | static ref CONFIG: RwLock = RwLock::new(FPConfig::new()); 24 | 25 | // these are maintained by fplink-cluster 26 | static ref CLUSTER_CLIENT_STATE: Clusters = Clusters::new(); 27 | static ref SERVICE_CLIENT_STATE: Services = Services::new(); 28 | static ref CLUSTER_STATE: State = State::new(config()); 29 | 30 | // these are maintained by fplink-core 31 | static ref TCP_CONNS_COUNT: AtomicUsize = AtomicUsize::new(0); 32 | static ref WS_CONNS_COUNT: AtomicUsize = AtomicUsize::new(0); 33 | static ref QUIC_CONNS_COUNT: AtomicUsize = AtomicUsize::new(0); 34 | } 35 | 36 | pub fn set_config(config: FPConfig) { 37 | let mut guard = CONFIG.wl(); 38 | *guard = config; 39 | } 40 | 41 | pub fn config() -> FPConfig { 42 | let guard = CONFIG.rl(); 43 | guard.clone() 44 | } 45 | 46 | pub fn get_zone() -> Option { 47 | let guard = CONFIG.rl(); 48 | guard.zone() 49 | } 50 | 51 | pub fn cluster_state() -> State { 52 | CLUSTER_STATE.clone() 53 | } 54 | 55 | pub fn cluster_client_state() -> Clusters { 56 | CLUSTER_CLIENT_STATE.clone() 57 | } 58 | 59 | pub fn service_client_state() -> Services { 60 | SERVICE_CLIENT_STATE.clone() 61 | } 62 | 63 | pub fn inc_tcp_count() { 64 | TCP_CONNS_COUNT.fetch_add(1, Ordering::SeqCst); 65 | } 66 | 67 | pub fn dec_tcp_count() { 68 | TCP_CONNS_COUNT.fetch_sub(1, Ordering::SeqCst); 69 | } 70 | 71 | pub fn get_tcp_count() -> isize { 72 | TCP_CONNS_COUNT.load(Ordering::Relaxed) as isize 73 | } 74 | 75 | pub fn inc_ws_count() { 76 | WS_CONNS_COUNT.fetch_add(1, Ordering::SeqCst); 77 | } 78 | 79 | pub fn dec_ws_count() { 80 | WS_CONNS_COUNT.fetch_sub(1, Ordering::SeqCst); 81 | } 82 | 83 | pub fn get_ws_count() -> isize { 84 | WS_CONNS_COUNT.load(Ordering::Relaxed) as isize 85 | } 86 | 87 | pub fn inc_quic_count() { 88 | QUIC_CONNS_COUNT.fetch_add(1, Ordering::SeqCst); 89 | } 90 | 91 | pub fn dec_quic_count() { 92 | QUIC_CONNS_COUNT.fetch_sub(1, Ordering::SeqCst); 93 | } 94 | 95 | pub fn get_quic_count() -> isize { 96 | QUIC_CONNS_COUNT.load(Ordering::Relaxed) as isize 97 | } 98 | 99 | pub fn get_total_conn_count() -> isize { 100 | get_quic_count() + get_tcp_count() + get_ws_count() 101 | } 102 | 103 | pub trait Connect: Default { 104 | type Item: Clone; 105 | fn connect>(addr: S, timeout_ms: u64) -> Self::Item; 106 | } 107 | 108 | #[derive(Default, Clone)] 109 | pub struct GrpcClient { 110 | addr: Option, 111 | } 112 | 113 | impl fmt::Display for GrpcClient { 114 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 115 | write!(f, "grpc_client: {:?}", self.addr.as_ref()) 116 | } 117 | } 118 | 119 | impl Connect for GrpcClient { 120 | type Item = Channel; 121 | fn connect>(addr: S, timeout_ms: u64) -> Self::Item { 122 | let addr = format!("http://{}", addr.into()); 123 | let endpoint: Endpoint = addr.try_into().expect("invalid addr"); 124 | let endpoint = endpoint.timeout(Duration::from_millis(timeout_ms)); 125 | endpoint.connect_lazy() 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /server-state/src/service.rs: -------------------------------------------------------------------------------- 1 | use crate::{Connect, GrpcClient}; 2 | use parking_lot::RwLock; 3 | use server_base::HandyRwLock; 4 | use std::collections::HashMap; 5 | use std::fmt; 6 | use std::sync::atomic::{AtomicUsize, Ordering}; 7 | use std::sync::Arc; 8 | 9 | pub type Services = Service; 10 | 11 | struct ServiceClient { 12 | client: Arc, 13 | zone: Option, 14 | } 15 | 16 | struct ServiceClients { 17 | clients: HashMap>>, 18 | fallback_client: Option<(String, Arc)>, 19 | counter: AtomicUsize, 20 | } 21 | 22 | impl Default for ServiceClients { 23 | fn default() -> Self { 24 | Self { 25 | clients: Default::default(), 26 | fallback_client: None, 27 | counter: AtomicUsize::new(0), 28 | } 29 | } 30 | } 31 | impl ServiceClients { 32 | fn set_fallback(&mut self, addr: &str, client: Arc) { 33 | self.fallback_client = Some((addr.into(), client)); 34 | } 35 | 36 | fn pick_client(&self, zone: Option) -> Option<(String, Arc)> { 37 | let total_count = self.clients.len(); 38 | let zone_clients = self 39 | .clients 40 | .iter() 41 | .filter(|(_, client)| client.zone.eq(&zone)) 42 | .collect::>(); 43 | let zone_count = zone_clients.len(); 44 | if total_count == 0 { 45 | match &self.fallback_client { 46 | None => { 47 | log::error!("pick_client no alive clients, no fallback"); 48 | None 49 | } 50 | Some((addr, client)) => { 51 | log::error!("pick_client no alive clients, with fallback {:?}", addr); 52 | Some((addr.to_string(), Arc::clone(client))) 53 | } 54 | } 55 | } else if zone_count == 0 { 56 | let index = self.counter.fetch_add(1, Ordering::SeqCst) % total_count; 57 | self.clients.iter().nth(index).map(|(addr, client)| { 58 | log::trace!("pick_client {} ignore zone {:?}", addr, zone); 59 | (addr.to_string(), Arc::clone(&client.client)) 60 | }) 61 | } else { 62 | let index = self.counter.fetch_add(1, Ordering::SeqCst) % zone_count; 63 | zone_clients.get(index).map(|(addr, client)| { 64 | log::trace!("pick_client {} with zone {:?}", addr, zone); 65 | (addr.to_string(), Arc::clone(&client.client)) 66 | }) 67 | } 68 | } 69 | } 70 | 71 | impl fmt::Display for ServiceClients { 72 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 73 | let addrs: Vec = self.clients.keys().cloned().collect(); 74 | let fallback = match self.fallback_client { 75 | Some((ref ip, _)) => ip.clone(), 76 | None => "no fallback".to_string(), 77 | }; 78 | write!(f, "addrs: {:?}, fallback: {}", addrs, fallback) 79 | } 80 | } 81 | 82 | #[derive(Clone)] 83 | pub struct Service { 84 | inner: Arc>>, 85 | } 86 | 87 | impl Service { 88 | pub(crate) fn new() -> Self { 89 | Self::default() 90 | } 91 | 92 | pub fn add_fallback(&self, namespace: &str, addr: &str) { 93 | self.inner.wl().add_fallback(namespace, addr); 94 | } 95 | 96 | pub fn add_client(&self, namespace: &str, addr: &str, zone: Option) { 97 | self.inner.wl().add_client(namespace, addr, zone); 98 | } 99 | 100 | pub fn remove_client(&self, namespace: &str, addr: &str) { 101 | self.inner.wl().remove_client(namespace, addr); 102 | } 103 | 104 | pub fn pick_client( 105 | &self, 106 | namespace: &str, 107 | zone: Option, 108 | ) -> Option<(String, Arc<::Item>)> { 109 | self.inner 110 | .rl() 111 | .pick_client(namespace, zone) 112 | .map(|(addr, client)| (addr, Arc::clone(&client))) 113 | } 114 | 115 | pub fn all_clients(&self) -> HashMap { 116 | self.inner 117 | .rl() 118 | .0 119 | .iter() 120 | .map(|(ns, ns_clients)| (ns.clone(), ns_clients.to_string())) 121 | .collect() 122 | } 123 | } 124 | 125 | impl Default for Service { 126 | fn default() -> Self { 127 | Self { 128 | inner: Arc::new(RwLock::new(Inner::::new())), 129 | } 130 | } 131 | } 132 | 133 | struct Inner(HashMap>); 134 | 135 | impl Default for Inner { 136 | fn default() -> Self { 137 | Self(Default::default()) 138 | } 139 | } 140 | 141 | impl Inner { 142 | fn new() -> Self { 143 | Self::default() 144 | } 145 | 146 | fn add_fallback(&mut self, namespace: &str, addr: &str) { 147 | let timeout = crate::config().service_grpc_timeout_ms(); 148 | let client = T::connect(addr, timeout); 149 | let fallback_client = Arc::new(client); 150 | 151 | if let Some(ref mut ns_clients) = self.0.get_mut(namespace) { 152 | ns_clients.set_fallback(addr, fallback_client); 153 | } else { 154 | let mut ns_clients = ServiceClients::default(); 155 | ns_clients.set_fallback(addr, fallback_client); 156 | self.0.insert(namespace.to_string(), ns_clients); 157 | } 158 | 159 | log::info!( 160 | "add fallback to business Clients: ns:{}, addr:{}", 161 | namespace, 162 | addr 163 | ); 164 | } 165 | 166 | fn add_client(&mut self, namespace: &str, addr: &str, zone: Option) { 167 | let timeout = crate::config().service_grpc_timeout_ms(); 168 | let client = T::connect(addr, timeout); 169 | let client = Arc::new(ServiceClient { 170 | client: Arc::new(client), 171 | zone, 172 | }); 173 | let ns_clients = self 174 | .0 175 | .entry(namespace.to_string()) 176 | .or_insert_with(Default::default); 177 | ns_clients.clients.insert(addr.to_string(), client); 178 | 179 | log::info!("add client to Clients: ns:{}, addr:{}", namespace, addr); 180 | } 181 | 182 | fn remove_client(&mut self, namespace: &str, addr: &str) { 183 | if let Some(ns_clients) = self.0.get_mut(namespace) { 184 | ns_clients.clients.remove(addr); 185 | log::info!( 186 | "remove client from Clients: ns:{}, addr:{}", 187 | namespace, 188 | addr 189 | ); 190 | } else { 191 | log::warn!( 192 | "trying to remove non-exist client from Clients, ns:{} addr:{}", 193 | namespace, 194 | addr 195 | ); 196 | } 197 | } 198 | 199 | fn pick_client( 200 | &self, 201 | namespace: &str, 202 | zone: Option, 203 | ) -> Option<(String, Arc<::Item>)> { 204 | if let Some(ns_clients) = self.0.get(namespace) { 205 | ns_clients.pick_client(zone) 206 | } else { 207 | log::warn!("trying to pick non-exist client from ns:{} ", namespace,); 208 | None 209 | } 210 | } 211 | } 212 | 213 | #[cfg(test)] 214 | mod tests { 215 | use super::*; 216 | 217 | #[derive(Default)] 218 | struct MockClient {} 219 | 220 | impl Connect for MockClient { 221 | type Item = u8; 222 | fn connect>(_addr: S, _timeout_ms: u64) -> Self::Item { 223 | 0 224 | } 225 | } 226 | 227 | fn init() -> Service { 228 | let client_state = Service::::default(); 229 | client_state.add_client("ns", "addr0", Some("zone1".to_owned())); 230 | client_state.add_client("ns", "addr1", Some("zone1".to_owned())); 231 | client_state.add_client("ns", "addr2", Some("zone2".to_owned())); 232 | client_state.add_client("ns", "addr3", None); 233 | client_state.add_fallback("ns", "addr4"); 234 | client_state 235 | } 236 | 237 | #[test] 238 | fn test_pick_multiple_from_same_zone() { 239 | let client_state = init(); 240 | for _ in 0..100 { 241 | match client_state.pick_client("ns", Some("zone1".to_owned())) { 242 | Some((addr, _c)) => match addr.as_str() { 243 | "addr0" => assert!(true), 244 | "addr1" => assert!(true), 245 | "addr2" => assert!(false), 246 | "addr3" => assert!(false), 247 | "addr4" => assert!(false), 248 | _ => assert!(false), 249 | }, 250 | None => assert!(false), 251 | } 252 | } 253 | } 254 | 255 | #[test] 256 | fn test_pick_one_from_zone2() { 257 | let client_state = init(); 258 | for _ in 0..100 { 259 | match client_state.pick_client("ns", Some("zone2".to_owned())) { 260 | Some((addr, _c)) => match addr.as_str() { 261 | "addr2" => assert!(true), 262 | _ => assert!(false), 263 | }, 264 | None => assert!(false), 265 | } 266 | } 267 | } 268 | 269 | #[test] 270 | fn test_pick_unknown_zone() { 271 | let client_state = init(); 272 | for _ in 0..100 { 273 | match client_state.pick_client("ns", Some("unknown".to_owned())) { 274 | Some((addr, _c)) => match addr.as_str() { 275 | "addr0" => assert!(true), 276 | "addr1" => assert!(true), 277 | "addr2" => assert!(true), 278 | "addr3" => assert!(true), 279 | "addr4" => assert!(false), // should not fallback 280 | _ => assert!(false), 281 | }, 282 | None => assert!(false), 283 | } 284 | } 285 | } 286 | 287 | #[test] 288 | fn test_pick_none_zone() { 289 | let client_state = init(); 290 | for _ in 0..100 { 291 | match client_state.pick_client("ns", None) { 292 | Some((addr, _c)) => match addr.as_str() { 293 | "addr0" => assert!(true), 294 | "addr1" => assert!(true), 295 | "addr2" => assert!(true), 296 | "addr3" => assert!(true), 297 | "addr4" => assert!(false), // should not fallback 298 | _ => assert!(false), 299 | }, 300 | None => assert!(false), 301 | } 302 | } 303 | } 304 | 305 | #[test] 306 | fn test_pick_empty_str_zone() { 307 | let client_state = init(); 308 | for _ in 0..100 { 309 | match client_state.pick_client("ns", Some("".to_owned())) { 310 | Some((addr, _c)) => match addr.as_str() { 311 | "addr0" => assert!(true), 312 | "addr1" => assert!(true), 313 | "addr2" => assert!(true), 314 | "addr3" => assert!(true), 315 | "addr4" => assert!(false), // should not fallback 316 | _ => assert!(false), 317 | }, 318 | None => assert!(false), 319 | } 320 | } 321 | } 322 | 323 | #[test] 324 | fn test_fallback() { 325 | let client_state = Service::::default(); 326 | client_state.add_fallback("ns", "addr4"); 327 | for _ in 0..100 { 328 | match client_state.pick_client("ns", Some("".to_owned())) { 329 | Some((addr, _c)) => match addr.as_str() { 330 | "addr4" => assert!(true), // should fallback 331 | _ => assert!(false), 332 | }, 333 | None => assert!(false), 334 | } 335 | } 336 | } 337 | } 338 | -------------------------------------------------------------------------------- /server-state/src/state.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::RwLock; 2 | use server_base::{FPConfig, HandyRwLock, RegistryNode, ServerNode, ServiceNode}; 3 | use std::collections::{HashMap, HashSet}; 4 | use std::sync::Arc; 5 | 6 | pub trait NodeOperation: Send + Sync + Clone + 'static { 7 | fn server_node(&self) -> &ServerNode; 8 | 9 | fn registry_node(&self) -> &RegistryNode; 10 | 11 | fn is_myself(&self, node_id: &str) -> bool; 12 | 13 | fn find_node_by_host(&self, hostname: &str) -> Option; 14 | 15 | fn find_node_by_id(&self, node_id: &str) -> Option; 16 | } 17 | 18 | #[derive(Clone, Debug)] 19 | pub struct State { 20 | inner: Arc, 21 | } 22 | 23 | impl State { 24 | pub(crate) fn new(config: FPConfig) -> Self { 25 | let node_id = rand::random::().to_string(); 26 | let hostname = config.hostname(); 27 | let zone = config.zone(); 28 | let server_node = ServerNode::new(node_id, hostname.clone(), 6321, 1215, zone); 29 | let registry_node = RegistryNode::new( 30 | hostname + ":8082", // FIXME: hard code 8082 read from conf 31 | config.cert_path().is_some(), 32 | ); 33 | let inner = Inner::new(server_node, registry_node); 34 | 35 | Self { 36 | inner: Arc::new(inner), 37 | } 38 | } 39 | 40 | pub fn show_init_info(&self) { 41 | let server_node = self.server_node(); 42 | let registry_node = self.registry_node(); 43 | log::info!("_fplink_state:||server_node= {:?}", server_node); 44 | log::info!("_fplink_state:||registry_node= {:?}", registry_node); 45 | } 46 | 47 | pub fn update_cluster_nodes(&mut self, nodes: HashSet) { 48 | let mut lock = self.inner.cluster_nodes.wl(); 49 | *lock = nodes; 50 | } 51 | 52 | pub fn update_service_nodes(&mut self, namespace: &str, nodes: HashSet) { 53 | let mut lock = self.inner.service_nodes.wl(); 54 | lock.insert(namespace.into(), nodes); 55 | } 56 | 57 | pub fn cluster_nodes(&self) -> HashSet { 58 | self.inner.cluster_nodes.rl().clone() 59 | } 60 | 61 | pub fn service_nodes(&self) -> HashMap> { 62 | self.inner.service_nodes.rl().clone() 63 | } 64 | } 65 | 66 | impl NodeOperation for State { 67 | fn server_node(&self) -> &ServerNode { 68 | &self.inner.server_node 69 | } 70 | 71 | fn registry_node(&self) -> &RegistryNode { 72 | &self.inner.registry_node 73 | } 74 | 75 | fn is_myself(&self, node_id: &str) -> bool { 76 | self.server_node().node_id == node_id 77 | } 78 | 79 | fn find_node_by_host(&self, hostname: &str) -> Option { 80 | let server_node = self.server_node(); 81 | if server_node.hostname == hostname { 82 | return Some(server_node.clone()); 83 | } 84 | 85 | self.inner 86 | .cluster_nodes 87 | .rl() 88 | .iter() 89 | .find(|node| node.hostname == hostname) 90 | .cloned() 91 | } 92 | 93 | fn find_node_by_id(&self, node_id: &str) -> Option { 94 | let server_node = self.server_node(); 95 | if server_node.node_id == node_id { 96 | return Some(server_node.clone()); 97 | } 98 | 99 | self.inner 100 | .cluster_nodes 101 | .rl() 102 | .iter() 103 | .find(|node| node.node_id == node_id) 104 | .cloned() 105 | } 106 | } 107 | 108 | #[derive(Debug)] 109 | struct Inner { 110 | server_node: ServerNode, 111 | registry_node: RegistryNode, 112 | cluster_nodes: RwLock>, 113 | service_nodes: RwLock>>, 114 | } 115 | 116 | impl Inner { 117 | pub fn new(server_node: ServerNode, registry_node: RegistryNode) -> Self { 118 | Self { 119 | server_node, 120 | registry_node, 121 | cluster_nodes: Default::default(), 122 | service_nodes: Default::default(), 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server" 3 | version = "0.1.0" 4 | edition = "2021" 5 | resolver = "2" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [build-dependencies] 10 | vergen = "3" 11 | 12 | [dependencies] 13 | log = "0.4" 14 | tracing-appender = "0.2" 15 | tracing-core = "0.1" 16 | tracing-log = "0.1" 17 | tracing-subscriber = { version = "0.3", features = ["local-time", "env-filter"] } 18 | 19 | server-base = { path = "../server-base" } 20 | server-state = { path = "../server-state" } 21 | server-core = { path = "../server-core" } 22 | server-listener = { path = "../server-listener" } 23 | server-hproxy = { path = "../server-hproxy" } 24 | server-grpc = { path = "../server-grpc" } 25 | 26 | -------------------------------------------------------------------------------- /server/build.rs: -------------------------------------------------------------------------------- 1 | use vergen::{generate_cargo_keys, ConstantsFlags}; 2 | 3 | fn main() { 4 | // Setup the flags, toggling off the 'SEMVER_FROM_CARGO_PKG' flag 5 | let mut flags = ConstantsFlags::all(); 6 | flags.toggle(ConstantsFlags::SEMVER_FROM_CARGO_PKG); 7 | 8 | // Generate the 'cargo:' key output 9 | generate_cargo_keys(ConstantsFlags::all()).expect("Unable to generate the cargo keys!"); 10 | } 11 | -------------------------------------------------------------------------------- /server/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, net::SocketAddr, sync::Arc}; 2 | 3 | use server_base::{ 4 | proto::{EmitSidReq, Message}, 5 | tokio, 6 | tonic::async_trait, 7 | BuiltinService, FPConfig, IdGen, PushConn, 8 | }; 9 | use server_core::{lifecycle::ConnLifeCycle, CoreOperator}; 10 | use server_grpc::{ClusterForwarder, ClusterOperator, Dispatcher}; 11 | use server_hproxy::HPROXY_NAMESPACE; 12 | use server_listener::{listen_quic, listen_tls}; 13 | use server_state::NodeOperation; 14 | use tracing_appender::{non_blocking::WorkerGuard, rolling}; 15 | use tracing_core::LevelFilter; 16 | use tracing_subscriber::{ 17 | fmt::layer, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, Layer, 18 | }; 19 | 20 | pub type BuiltinServiceMap = HashMap>; 21 | const CONN_EXPIRE_TIMEOUT: u64 = 30; 22 | 23 | pub fn main() { 24 | let config = server_state::config(); 25 | let _guard = init_logger(&config); 26 | show_build_info(); 27 | server_state::cluster_state().show_init_info(); 28 | tokio::runtime::Builder::new_multi_thread() 29 | .enable_all() 30 | .worker_threads(config.worker_num()) 31 | .thread_name("feature-probe") 32 | .build() 33 | .expect("can not start tokio runtime") 34 | .block_on(async { 35 | let core = CoreOperator::new(Box::new(Dispatcher::default())); 36 | start_grpc_server(core.clone()); 37 | tokio::spawn(start_conn_listener(core)); 38 | tokio::signal::ctrl_c().await.expect("shut down"); 39 | }); 40 | } 41 | 42 | fn init_logger(config: &FPConfig) -> (WorkerGuard, WorkerGuard, WorkerGuard) { 43 | let dir = config.log_directory(); 44 | let (info_appender, info_guard) = 45 | tracing_appender::non_blocking(rolling::daily(&dir, "info.log")); 46 | let (warn_appender, warn_guard) = 47 | tracing_appender::non_blocking(rolling::daily(&dir, "warn.log")); 48 | let (err_appender, err_guard) = 49 | tracing_appender::non_blocking(rolling::daily(&dir, "error.log")); 50 | let stdout = layer() 51 | .with_line_number(true) 52 | .with_filter(LevelFilter::TRACE); 53 | let info = layer() 54 | .with_writer(info_appender) 55 | .with_line_number(true) 56 | .with_filter(LevelFilter::INFO); 57 | let warn = layer() 58 | .with_writer(warn_appender) 59 | .with_line_number(true) 60 | .with_filter(LevelFilter::WARN); 61 | let err = layer() 62 | .with_writer(err_appender) 63 | .with_line_number(true) 64 | .with_filter(LevelFilter::ERROR); 65 | 66 | tracing_subscriber::registry() 67 | .with(stdout) 68 | .with(info) 69 | .with(warn) 70 | .with(err) 71 | .init(); 72 | 73 | (info_guard, warn_guard, err_guard) 74 | } 75 | 76 | fn show_build_info() { 77 | log::info!("__conf||commit={}", env!("VERGEN_SHA")); 78 | log::info!("__conf||commit_date={}", env!("VERGEN_COMMIT_DATE")); 79 | log::info!("__conf||build_ts={}", env!("VERGEN_BUILD_TIMESTAMP")); 80 | log::info!("__conf||target={}", env!("VERGEN_TARGET_TRIPLE")); 81 | } 82 | 83 | async fn start_conn_listener(core: CoreOperator) { 84 | let builtin_services = Arc::new(init_builtin_services(core.clone())); 85 | let config = server_state::config(); 86 | let node_id = server_state::cluster_state().server_node().node_id.clone(); 87 | let sid_gen = IdGen::new(node_id); 88 | let listen_addr = config.conn_listen_addr(); 89 | let cert_path = config.cert_path(); 90 | let lifecycle = Arc::new(ConnLifeCycle::new(core, sid_gen, builtin_services.clone())); 91 | log::info!("tls cert path: {:?}", cert_path); 92 | 93 | if let Some(cert_path) = config.cert_path_quic() { 94 | tokio::spawn(listen_quic( 95 | listen_addr.clone(), 96 | CONN_EXPIRE_TIMEOUT, 97 | cert_path, 98 | lifecycle.clone(), 99 | )); 100 | } 101 | 102 | tokio::spawn(listen_tls( 103 | listen_addr, 104 | CONN_EXPIRE_TIMEOUT, 105 | cert_path, 106 | lifecycle, 107 | )); 108 | } 109 | 110 | fn init_builtin_services(core: CoreOperator) -> BuiltinServiceMap { 111 | let mut builtin_handlers: HashMap> = HashMap::new(); 112 | builtin_handlers.insert( 113 | "__ECHO".to_owned(), 114 | Arc::new(EchoService { 115 | pusher: Box::new(core.clone()), 116 | }), 117 | ); 118 | if let Some(http_proxy) = server_hproxy::build_http_proxy(core) { 119 | let http_proxy = Arc::new(http_proxy); 120 | builtin_handlers.insert(HPROXY_NAMESPACE.to_string(), http_proxy); 121 | } 122 | builtin_handlers 123 | } 124 | 125 | fn start_grpc_server(core: CoreOperator) { 126 | let config = server_state::config(); 127 | let cluster_addr = config.peer_listen_addr(); 128 | let forwarded_grpc = core.clone(); 129 | server_grpc::grpc_listen(&cluster_addr, forwarded_grpc); 130 | 131 | let service_addr = server_state::config().service_listen_addr(); 132 | let core_operator = core; 133 | let cluster_forwarder = ClusterForwarder::new(); 134 | let node_operator = server_state::cluster_state(); 135 | let cluster_operator = ClusterOperator::new(node_operator, core_operator, cluster_forwarder); 136 | server_grpc::grpc_listen(&service_addr, cluster_operator); 137 | } 138 | 139 | struct EchoService { 140 | pusher: Box, 141 | } 142 | 143 | #[async_trait] 144 | impl BuiltinService for EchoService { 145 | async fn on_message(&self, sid: &str, _peer_addr: Option, message: Message) { 146 | log::info!("EchoService recv {:?} from {}", message, sid); 147 | let sid = sid.to_owned(); 148 | let message = Some(message); 149 | let req = EmitSidReq { 150 | sid, 151 | message, 152 | trace: None, 153 | }; 154 | log::info!("EchoService push {:?}", req); 155 | self.pusher.push(req).await; 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | if cfg!(feature = "server") { 3 | server::main() 4 | } 5 | } 6 | --------------------------------------------------------------------------------