├── CMakeLists.txt
├── README.md
├── include
└── semaphore.h
├── logo.png
├── logo_license.txt
├── package.xml
├── src
├── client.cpp
├── semaphore.cpp
├── server.cpp
└── server_component.cpp
└── srv
├── AddTopic.srv
├── RegisterClient.srv
└── RemoveTopic.srv
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.8)
2 | project(tcp_tunnel)
3 |
4 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
5 | add_compile_options(-Wall -Wextra -Wpedantic)
6 | endif()
7 |
8 | set(CMAKE_BUILD_TYPE "Release")
9 |
10 | find_package(ament_cmake REQUIRED)
11 | find_package(rclcpp REQUIRED)
12 | find_package(std_msgs REQUIRED)
13 | find_package(rclcpp_components REQUIRED)
14 | find_package(rosidl_default_generators REQUIRED)
15 |
16 | rosidl_generate_interfaces(${PROJECT_NAME}
17 | "srv/RegisterClient.srv"
18 | "srv/AddTopic.srv"
19 | "srv/RemoveTopic.srv"
20 | DEPENDENCIES std_msgs
21 | )
22 |
23 | include_directories(include)
24 |
25 | add_executable(server src/server.cpp src/semaphore.cpp)
26 | ament_target_dependencies(server
27 | rclcpp
28 | std_msgs
29 | )
30 |
31 | add_library(server_component src/server_component.cpp src/semaphore.cpp)
32 | ament_target_dependencies(server_component
33 | rclcpp
34 | std_msgs
35 | rclcpp_components
36 | )
37 | rclcpp_components_register_node(
38 | server_component
39 | PLUGIN "TCPTunnelServer"
40 | EXECUTABLE server_component_exec
41 | )
42 |
43 | add_executable(client src/client.cpp src/semaphore.cpp)
44 | ament_target_dependencies(client
45 | rclcpp
46 | std_msgs
47 | )
48 |
49 | rosidl_get_typesupport_target(cpp_typesupport_target ${PROJECT_NAME} "rosidl_typesupport_cpp")
50 | target_link_libraries(client "${cpp_typesupport_target}")
51 | target_link_libraries(server "${cpp_typesupport_target}")
52 | target_link_libraries(server_component "${cpp_typesupport_target}")
53 | ament_export_dependencies(rosidl_default_runtime)
54 |
55 | install(TARGETS client server
56 | DESTINATION lib/${PROJECT_NAME}
57 | )
58 |
59 | ament_export_targets(export_server_component)
60 | install(TARGETS server_component
61 | EXPORT export_server_component
62 | ARCHIVE DESTINATION lib
63 | LIBRARY DESTINATION lib
64 | RUNTIME DESTINATION bin
65 | )
66 |
67 | ament_package()
68 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ---
6 |
7 | # ros2_tcp_tunnel
8 | Nodes that allow reliable TCP relay of ROS 2 topics between remote machines.
9 |
10 | ## Basic Usage
11 | On the publishing machine, run the server node using the following command:
12 | ```bash
13 | ros2 run tcp_tunnel server
14 | ```
15 |
16 | On the subscribing machine, run the client node using the following command:
17 | ```bash
18 | ros2 run tcp_tunnel client --ros-args -p client_ip:=""
19 | ```
20 |
21 | Once both nodes are running, topics can be added to the TCP tunnel dynamically using the following service call:
22 | ```bash
23 | ros2 service call /tcp_tunnel_client/add_topic tcp_tunnel/srv/AddTopic "topic:
24 | data: ''
25 | tunnel_queue_size:
26 | data: ''
27 | server_namespace:
28 | data: ''"
29 | ```
30 | The `tunnel_queue_size` field can be left empty to use the default value of 2.
31 | If the server node is located in the global namespace (default), the `server_namespace` field can be left empty or can be set to '/'.
32 | This will create a new topic named `/tcp_tunnel_client/` published on the subscribing machine in which the messages of the original topic are relayed.
33 |
34 | Topics can be removed from the TCP tunnel at any time using the following service call:
35 | ```bash
36 | ros2 service call /tcp_tunnel_client/remove_topic tcp_tunnel/srv/RemoveTopic "topic:
37 | data: ''"
38 | ```
39 | The provided topic name must be the name of the topic at the exit of the TCP tunnel.
40 |
41 | ## Advanced Usage
42 | ### Tuning the tunnel queue size
43 | The TCP tunnel uses a message acknowledgement mechanism between the server and client nodes.
44 | The tunnel queue size parameter controls how many messages can be sent simultaneously through the TCP tunnel.
45 | When the tunnel queue is full, the server drops messages received on the original topic until an acknowledgement is received from the client, after which the next message received on the original topic will be sent through the tunnel.
46 | If messages are published on the original topic at a higher rate than what can pass through the tunnel, this mechanism will ensure that the tunnel latency does not grow indefinitely.
47 | The tunnel queue size should be increased by the user until the publishing rate of the relayed topic reaches a plateau.
48 | Be careful not to overshoot the queue size however, because a tunnel queue size that is too high can increase the latency of the TCP tunnel.
49 |
50 | ### Providing a list of topics on startup of the client node
51 | It is possible to provide a YAML file listing all the topics to add to the TCP tunnel when starting the client node.
52 | For instance, if the user wanted to add the topics `/foo` and `/bar` automatically on startup of the client node, a YAML file with the following content should be created:
53 | ```yaml
54 | - topic: /foo
55 | tunnel_queue_size: 2
56 | server_namespace: /
57 |
58 | - topic: /bar
59 | tunnel_queue_size: 2
60 | server_namespace: /
61 | ```
62 | The `tunnel_queue_size` field can be left blank to use the default value of 2.
63 | If a server node is located in the global namespace (default), its `server_namespace` field can be left blank or can be set to '/'.
64 | This topic list can then be passed to the client node on startup using the following command:
65 | ```bash
66 | ros2 run tcp_tunnel client --ros-args -p client_ip:="" -p initial_topic_list_file_name:=""
67 | ```
68 |
69 | ### Running multiple client nodes simultaneously
70 | In order to run multiple client nodes simultaneously, each client node must be located in its own namespace.
71 | This can be achieved by running the client nodes with the following command:
72 | ```bash
73 | ros2 run tcp_tunnel client --ros-args -p client_ip:="" -r __ns:=""
74 | ```
75 | Make sure to provide a namespace starting with '/'.
76 | It is then possible to call the client nodes `add_topic` services in their respective namespace:
77 | ```bash
78 | ros2 service call /tcp_tunnel_client/add_topic tcp_tunnel/srv/AddTopic "topic:
79 | data: ''
80 | tunnel_queue_size:
81 | data: ''
82 | server_namespace:
83 | data: ''"
84 | ```
85 |
86 | ### Running multiple server nodes simultaneously
87 | Similarly, to run multiple server nodes simultaneously, each server node must be located in its own namespace.
88 | This can be achieved by running the server nodes with the following command:
89 | ```bash
90 | ros2 run tcp_tunnel server --ros-args -r __ns:=""
91 | ```
92 | Make sure to provide a namespace starting with '/'.
93 | Then, when adding a topic to the TCP tunnel, the proper server namespace must be passed in the service call:
94 | ```bash
95 | ros2 service call /tcp_tunnel_client/add_topic tcp_tunnel/srv/AddTopic "topic:
96 | data: ''
97 | tunnel_queue_size:
98 | data: ''
99 | server_namespace:
100 | data: ''"
101 | ```
102 |
--------------------------------------------------------------------------------
/include/semaphore.h:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | // taken from https://stackoverflow.com/questions/4792449/c0x-has-no-semaphores-how-to-synchronize-threads
5 | class Semaphore
6 | {
7 | public:
8 | explicit Semaphore(const int& initialCount);
9 | bool acquire();
10 | bool tryAcquire();
11 | void release();
12 | void wakeUp();
13 |
14 | private:
15 | std::mutex mutex_;
16 | std::condition_variable condition_;
17 | unsigned long count_ = 0;
18 | };
19 |
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/norlab-ulaval/ros2_tcp_tunnel/6f40e5933b3cf67017b470115f6d18254402d196/logo.png
--------------------------------------------------------------------------------
/logo_license.txt:
--------------------------------------------------------------------------------
1 | The logo of this repository contains a modified version of the ROS 2 Humble logo, which is licensed under the Creative Commons Attribution-NonCommercial 4.0 International License. The logo of ros2_tcp_tunnel is therefore also licensed under the Creative Commons Attribution-NonCommercial 4.0 International License. The original version of the ROS 2 Humble logo can be found here: https://github.com/ros-infrastructure/artwork.
2 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | tcp_tunnel
5 | 0.0.1
6 | Nodes that allow reliable TCP relay of ROS 2 topics between remote machines.
7 | sp
8 | BSD-2.0
9 |
10 | ament_cmake
11 |
12 | rclcpp
13 | std_msgs
14 | rclcpp_components
15 |
16 | rosidl_default_generators
17 |
18 | rosidl_default_runtime
19 |
20 | rosidl_interface_packages
21 |
22 |
23 | ament_cmake
24 |
25 |
26 |
--------------------------------------------------------------------------------
/src/client.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "semaphore.h"
10 |
11 | const std::map RELIABILITY_POLICIES = {{"BestEffort", rclcpp::ReliabilityPolicy::BestEffort},
12 | {"Reliable", rclcpp::ReliabilityPolicy::Reliable},
13 | {"SystemDefault", rclcpp::ReliabilityPolicy::SystemDefault},
14 | {"Unknown", rclcpp::ReliabilityPolicy::Unknown}};
15 | const std::map DURABILITY_POLICIES = {{"Volatile", rclcpp::DurabilityPolicy::Volatile},
16 | {"TransientLocal", rclcpp::DurabilityPolicy::TransientLocal},
17 | {"SystemDefault", rclcpp::DurabilityPolicy::SystemDefault},
18 | {"Unknown", rclcpp::DurabilityPolicy::Unknown}};
19 | const std::map LIVELINESS_POLICIES = {{"Automatic", rclcpp::LivelinessPolicy::Automatic},
20 | {"ManualByTopic", rclcpp::LivelinessPolicy::ManualByTopic},
21 | {"SystemDefault", rclcpp::LivelinessPolicy::SystemDefault},
22 | {"Unknown", rclcpp::LivelinessPolicy::Unknown}};
23 | const char CONFIRMATION_CHARACTER = '\0';
24 |
25 | class ServiceCaller : public rclcpp::Node
26 | {
27 | public:
28 | ServiceCaller():
29 | rclcpp::Node("service_caller")
30 | {
31 | }
32 | };
33 |
34 | class TCPTunnelClient : public rclcpp::Node
35 | {
36 | public:
37 | TCPTunnelClient():
38 | Node("tcp_tunnel_client")
39 | {
40 | this->declare_parameter("client_ip", "127.0.0.1");
41 | this->get_parameter("client_ip", clientIp);
42 | this->declare_parameter("initial_topic_list_file_name", "");
43 | this->get_parameter("initial_topic_list_file_name", initialTopicListFileName);
44 |
45 | if(!initialTopicListFileName.empty())
46 | {
47 | if(initialTopicListFileName.substr(initialTopicListFileName.size() - 5, 5) != ".yaml")
48 | {
49 | RCLCPP_ERROR(this->get_logger(), "Initial topic list file name must end with \".yaml\".");
50 | exit(1);
51 | }
52 |
53 | std::this_thread::sleep_for(std::chrono::seconds(1)); // wait a bit to make sure topics are available
54 |
55 | std::ifstream initialTopicListFile(initialTopicListFileName);
56 | if(!initialTopicListFile.good())
57 | {
58 | RCLCPP_ERROR(this->get_logger(), "Cannot open initial topic list file.");
59 | exit(1);
60 | }
61 |
62 | std::string line;
63 | std::string topicName;
64 | bool hasReadTopicName = false;
65 | std::string tunnelQueueSize;
66 | bool hasReadTunnelQueueSize = false;
67 | while(std::getline(initialTopicListFile, line))
68 | {
69 | if(line.find_first_not_of(' ') != std::string::npos)
70 | {
71 | if((!hasReadTopicName && line.substr(0, 9) != "- topic: ") || (hasReadTopicName && !hasReadTunnelQueueSize && line.substr(0, 21) != " tunnel_queue_size: ") ||
72 | (hasReadTopicName && hasReadTunnelQueueSize && line.substr(0, 20) != " server_namespace: "))
73 | {
74 | RCLCPP_ERROR(this->get_logger(), "Initial topic list file does not list topics in the expected format, please refer to the README.");
75 | exit(1);
76 | }
77 |
78 | if(!hasReadTopicName)
79 | {
80 | topicName = line.substr(9);
81 | hasReadTopicName = true;
82 | }
83 | else if(!hasReadTunnelQueueSize)
84 | {
85 | tunnelQueueSize = line.substr(21);
86 | hasReadTunnelQueueSize = true;
87 | }
88 | else
89 | {
90 | std::string serverNamespace = line.substr(20);
91 | addTopic(topicName, tunnelQueueSize, serverNamespace);
92 | hasReadTopicName = false;
93 | hasReadTunnelQueueSize = false;
94 | }
95 | }
96 | }
97 | initialTopicListFile.close();
98 | }
99 |
100 | std::string prefix = this->get_namespace();
101 | if(prefix.back() != '/')
102 | {
103 | prefix += "/";
104 | }
105 | addTopicService = this->create_service(prefix + "tcp_tunnel_client/add_topic",
106 | std::bind(&TCPTunnelClient::addTopicServiceCallback, this, std::placeholders::_1));
107 | removeTopicService = this->create_service(prefix + "tcp_tunnel_client/remove_topic",
108 | std::bind(&TCPTunnelClient::removeTopicServiceCallback, this, std::placeholders::_1));
109 |
110 | RCLCPP_INFO_STREAM(this->get_logger(),
111 | "Initialization done, topics can now be added to the TCP tunnel dynamically using the " << prefix << "tcp_tunnel_client/add_topic service.");
112 | }
113 |
114 | ~TCPTunnelClient()
115 | {
116 | for(size_t i = 0; i < publishingThreads.size(); ++i)
117 | {
118 | if(publishingThreads[i].joinable())
119 | {
120 | publishingThreads[i].join();
121 | }
122 | if(confirmationThreads[i].joinable())
123 | {
124 | confirmationSemaphores[i]->wakeUp();
125 | confirmationThreads[i].join();
126 | }
127 | if(socketStatuses[i])
128 | {
129 | close(connectedSockets[i]);
130 | close(listeningSockets[i]);
131 | }
132 | }
133 | }
134 |
135 | private:
136 | void addTopicServiceCallback(const std::shared_ptr req)
137 | {
138 | addTopic(req->topic.data, req->tunnel_queue_size.data, req->server_namespace.data);
139 | }
140 |
141 | void addTopic(const std::string& topicName, const std::string& tunnelQueueSizeStr, const std::string& serverNamespace)
142 | {
143 | // convert tunnel queue size to unsigned long
144 | unsigned long tunnelQueueSize = 2;
145 | if(!tunnelQueueSizeStr.empty())
146 | {
147 | for(char character: tunnelQueueSizeStr)
148 | {
149 | if(!std::isdigit(character))
150 | {
151 | RCLCPP_ERROR_STREAM(this->get_logger(), "Cannot add topic " << topicName << " to TCP tunnel, the passed tunnel queue size is not a valid positive integer.");
152 | return;
153 | }
154 | }
155 | try
156 | {
157 | tunnelQueueSize = std::stoul(tunnelQueueSizeStr);
158 |
159 | if(tunnelQueueSize == 0)
160 | {
161 | RCLCPP_ERROR_STREAM(this->get_logger(), "Cannot add topic " << topicName << " to TCP tunnel, the passed tunnel queue size must be greater than 0.");
162 | return;
163 | }
164 | }
165 | catch(const std::out_of_range& exception)
166 | {
167 | tunnelQueueSize = std::numeric_limits::max();
168 | RCLCPP_WARN_STREAM(this->get_logger(), "Passed value for tunnel queue size was clipped to " << tunnelQueueSize << " for topic " << topicName << ".");
169 | }
170 | }
171 |
172 | // make sure that the topic is not already in the tunnel
173 | std::string clientPrefix = this->get_namespace();
174 | if(clientPrefix.back() != '/')
175 | {
176 | clientPrefix += "/";
177 | }
178 | clientPrefix += "tcp_tunnel_client";
179 | if(topicName.front() != '/')
180 | {
181 | clientPrefix += "/";
182 | }
183 | for(size_t i = 0; i < publishers.size(); ++i)
184 | {
185 | if(socketStatuses[i] && publishers[i]->get_topic_name() == clientPrefix + topicName)
186 | {
187 | RCLCPP_WARN_STREAM(this->get_logger(), "Cannot add topic " << topicName << " to TCP tunnel, this topic is already in the tunnel.");
188 | return;
189 | }
190 | }
191 |
192 | // create socket
193 | int sockfd;
194 | struct sockaddr_in serv_addr;
195 |
196 | sockfd = socket(AF_INET, SOCK_STREAM, 0);
197 | if(sockfd < 0)
198 | {
199 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while creating a socket connection for topic " << topicName << ".");
200 | return;
201 | }
202 |
203 | if(fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
204 | {
205 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while trying to set a socket flag for topic " << topicName << ".");
206 | close(sockfd);
207 | return;
208 | }
209 |
210 | bzero(&serv_addr, sizeof(serv_addr));
211 | serv_addr.sin_family = AF_INET;
212 | serv_addr.sin_addr.s_addr = INADDR_ANY;
213 | serv_addr.sin_port = htons(0);
214 | if(bind(sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr)) < 0)
215 | {
216 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while trying to bind to socket for topic " << topicName << ".");
217 | close(sockfd);
218 | return;
219 | }
220 |
221 | socklen_t socklen = sizeof(serv_addr);
222 | bzero(&serv_addr, socklen);
223 | if(getsockname(sockfd, (struct sockaddr*)&serv_addr, &socklen) < 0)
224 | {
225 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while trying to retrieve TCP port assigned for topic " << topicName << ".");
226 | close(sockfd);
227 | return;
228 | }
229 |
230 | // call register_client service
231 | std::shared_ptr registerClientRequest = std::make_shared();
232 | registerClientRequest->topic.data = topicName;
233 | registerClientRequest->tunnel_queue_size.data = tunnelQueueSize;
234 | std_msgs::msg::String ip;
235 | ip.data = clientIp;
236 | registerClientRequest->client_ip = ip;
237 | std_msgs::msg::UInt16 port;
238 | port.data = ntohs(serv_addr.sin_port);
239 | registerClientRequest->client_port = port;
240 |
241 | std::string serverPrefix = serverNamespace;
242 | if(serverPrefix.back() != '/')
243 | {
244 | serverPrefix += "/";
245 | }
246 | std::shared_ptr serviceCallerNode = std::make_shared();
247 | rclcpp::Client::SharedPtr registerClientClient = serviceCallerNode->create_client(
248 | serverPrefix + "tcp_tunnel_server/register_client");
249 | rclcpp::detail::FutureAndRequestId registerClientFuture = registerClientClient->async_send_request(registerClientRequest);
250 |
251 | // connect to server
252 | int newsockfd = -1;
253 | struct sockaddr_in cli_addr;
254 | socklen_t clilen;
255 |
256 | listen(sockfd, 5);
257 | clilen = sizeof(cli_addr);
258 | std::chrono::time_point startTime = std::chrono::steady_clock::now();
259 | while(std::chrono::steady_clock::now() - startTime < std::chrono::duration(10) && newsockfd < 0)
260 | {
261 | newsockfd = accept(sockfd, (struct sockaddr*)&cli_addr, &clilen);
262 | }
263 | if(newsockfd < 0)
264 | {
265 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while accepting connection for topic " << topicName << ".");
266 | close(sockfd);
267 | return;
268 | }
269 |
270 | int flag = 1;
271 | if(setsockopt(newsockfd, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int)) < 0)
272 | {
273 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while setting socket options for topic " << topicName << ".");
274 | close(newsockfd);
275 | close(sockfd);
276 | return;
277 | }
278 |
279 | if(fcntl(newsockfd, F_SETFL, O_NONBLOCK) < 0)
280 | {
281 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while trying to set a socket flag for topic " << topicName << ".");
282 | close(newsockfd);
283 | close(sockfd);
284 | return;
285 | }
286 |
287 | // retrieve topic info from server
288 | rclcpp::spin_until_future_complete(serviceCallerNode, registerClientFuture);
289 | tcp_tunnel::srv::RegisterClient::Response::SharedPtr registerClientResponse = registerClientFuture.get();
290 | if(!registerClientResponse->topic_exists.data)
291 | {
292 | RCLCPP_ERROR_STREAM(this->get_logger(), "Cannot add topic " << topicName << " to TCP tunnel, this topic doesn't exist.");
293 | close(newsockfd);
294 | close(sockfd);
295 | return;
296 | }
297 | std::string topicType = registerClientResponse->topic_type.data;
298 | rclcpp::QoS qos(rclcpp::KeepLast(1));
299 | qos.reliability(RELIABILITY_POLICIES.at(registerClientResponse->reliability_policy.data));
300 | qos.durability(DURABILITY_POLICIES.at(registerClientResponse->durability_policy.data));
301 | qos.liveliness(LIVELINESS_POLICIES.at(registerClientResponse->liveliness_policy.data));
302 |
303 | listeningSockets.push_back(sockfd);
304 | connectedSockets.push_back(newsockfd);
305 | socketStatuses.push_back(true);
306 |
307 | // create publisher
308 | publishers.push_back(this->create_generic_publisher(clientPrefix + topicName, topicType, qos));
309 |
310 | // initialize confirmation thread
311 | confirmationSemaphores.emplace_back(std::make_unique(0));
312 | confirmationThreads.emplace_back(&TCPTunnelClient::sendConfirmationLoop, this, confirmationThreads.size());
313 |
314 | // create thread
315 | publishingThreads.emplace_back(&TCPTunnelClient::publishMessageLoop, this, publishingThreads.size());
316 |
317 | RCLCPP_INFO_STREAM(this->get_logger(), "Successfully added topic to TCP tunnel, new topic " << clientPrefix + topicName << " has been created.");
318 | }
319 |
320 | void removeTopicServiceCallback(const std::shared_ptr req)
321 | {
322 | int topicId = -1;
323 | for(size_t i = 0; i < publishers.size(); ++i)
324 | {
325 | if(socketStatuses[i] && publishers[i]->get_topic_name() == req->topic.data)
326 | {
327 | topicId = i;
328 | break;
329 | }
330 | }
331 | if(topicId == -1)
332 | {
333 | RCLCPP_WARN_STREAM(this->get_logger(), "Cannot remove topic " << req->topic.data << ", this topic is not in the TCP tunnel.");
334 | return;
335 | }
336 |
337 | pthread_cancel(publishingThreads[topicId].native_handle());
338 | publishingThreads[topicId].join();
339 | socketStatuses[topicId] = false;
340 | publishers[topicId].reset();
341 | close(connectedSockets[topicId]);
342 | close(listeningSockets[topicId]);
343 | confirmationSemaphores[topicId]->release();
344 | confirmationThreads[topicId].join();
345 |
346 | RCLCPP_INFO_STREAM(this->get_logger(), "Successfully removed topic " << req->topic.data << " from TCP tunnel.");
347 | }
348 |
349 | void publishMessageLoop(int threadId)
350 | {
351 | rclcpp::SerializedMessage msg;
352 | while(rclcpp::ok())
353 | {
354 | if(!readFromSocket(threadId, &msg.get_rcl_serialized_message().buffer_length, sizeof(size_t)))
355 | {
356 | return;
357 | }
358 | msg.reserve(msg.get_rcl_serialized_message().buffer_length);
359 | if(!readFromSocket(threadId, msg.get_rcl_serialized_message().buffer, msg.get_rcl_serialized_message().buffer_length))
360 | {
361 | return;
362 | }
363 | confirmationSemaphores[threadId]->release();
364 | try
365 | {
366 | publishers[threadId]->publish(msg);
367 | }
368 | catch(const rclcpp::exceptions::RCLError& error)
369 | {
370 | return;
371 | }
372 | }
373 | }
374 |
375 | bool readFromSocket(const int& socketId, void* buffer, const size_t& nbBytesToRead, const bool& isMainThread = true)
376 | {
377 | size_t nbBytesRead = 0;
378 | while(rclcpp::ok() && nbBytesRead < nbBytesToRead)
379 | {
380 | int n = read(connectedSockets[socketId], ((char*)buffer) + nbBytesRead, nbBytesToRead - nbBytesRead);
381 | if(n > 0)
382 | {
383 | nbBytesRead += n;
384 | }
385 | else if(n == 0 || errno == ECONNRESET || errno == EBADF)
386 | {
387 | if(isMainThread)
388 | {
389 | RCLCPP_INFO_STREAM(this->get_logger(), "Connection closed by server for topic " << publishers[socketId]->get_topic_name() << ".");
390 | socketStatuses[socketId] = false;
391 | publishers[socketId].reset();
392 | close(connectedSockets[socketId]);
393 | close(listeningSockets[socketId]);
394 | confirmationSemaphores[socketId]->release();
395 | }
396 | return false;
397 | }
398 | else if(errno == EWOULDBLOCK || errno == EAGAIN)
399 | {
400 | std::this_thread::sleep_for(std::chrono::milliseconds(10));
401 | }
402 | else
403 | {
404 | throw std::runtime_error(
405 | std::string("Error \"") + strerror(errno) + "\" occurred while reading from socket for topic " + publishers[socketId]->get_topic_name() + ".");
406 | }
407 | }
408 | return nbBytesRead == nbBytesToRead;
409 | }
410 |
411 | void sendConfirmationLoop(int threadId)
412 | {
413 | while(rclcpp::ok())
414 | {
415 | if(!confirmationSemaphores[threadId]->acquire())
416 | {
417 | return;
418 | }
419 | if(!writeToSocket(threadId, &CONFIRMATION_CHARACTER, sizeof(char), false))
420 | {
421 | return;
422 | }
423 | }
424 | }
425 |
426 | bool writeToSocket(const int& socketId, const void* buffer, const size_t& nbBytesToWrite, const bool& isMainThread = true)
427 | {
428 | size_t nbBytesWritten = 0;
429 | while(rclcpp::ok() && nbBytesWritten < nbBytesToWrite)
430 | {
431 | int n = send(connectedSockets[socketId], ((char*)buffer) + nbBytesWritten, nbBytesToWrite - nbBytesWritten, MSG_NOSIGNAL);
432 | if(n >= 0)
433 | {
434 | nbBytesWritten += n;
435 | }
436 | else if(errno == EPIPE || errno == ECONNRESET || errno == EBADF)
437 | {
438 | if(isMainThread)
439 | {
440 | RCLCPP_INFO_STREAM(this->get_logger(), "Connection closed by server for topic " << publishers[socketId]->get_topic_name() << ".");
441 | socketStatuses[socketId] = false;
442 | publishers[socketId].reset();
443 | close(connectedSockets[socketId]);
444 | close(listeningSockets[socketId]);
445 | confirmationSemaphores[socketId]->release();
446 | }
447 | return false;
448 | }
449 | else if(errno == EWOULDBLOCK || errno == EAGAIN)
450 | {
451 | std::this_thread::sleep_for(std::chrono::milliseconds(10));
452 | }
453 | else
454 | {
455 | throw std::runtime_error(
456 | std::string("Error \"") + strerror(errno) + "\" occurred while writing to socket for topic " + publishers[socketId]->get_topic_name() + ".");
457 | }
458 | }
459 | return nbBytesWritten == nbBytesToWrite;
460 | }
461 |
462 | rclcpp::Service::SharedPtr addTopicService;
463 | rclcpp::Service::SharedPtr removeTopicService;
464 | std::vector publishers;
465 | std::vector listeningSockets;
466 | std::vector connectedSockets;
467 | std::vector socketStatuses;
468 | std::vector publishingThreads;
469 | std::vector confirmationThreads;
470 | std::vector> confirmationSemaphores;
471 | std::string clientIp;
472 | std::string initialTopicListFileName;
473 | };
474 |
475 | int main(int argc, char** argv)
476 | {
477 | rclcpp::init(argc, argv);
478 | rclcpp::spin(std::make_shared());
479 | rclcpp::shutdown();
480 | return 0;
481 | }
482 |
--------------------------------------------------------------------------------
/src/semaphore.cpp:
--------------------------------------------------------------------------------
1 | #include "semaphore.h"
2 | #include
3 |
4 | Semaphore::Semaphore(const int& initialCount):
5 | mutex_(), condition_(), count_(initialCount)
6 | {
7 | }
8 |
9 | bool Semaphore::acquire()
10 | {
11 | std::unique_lock lock(mutex_);
12 | while(rclcpp::ok() && !count_)
13 | {
14 | condition_.wait(lock);
15 | }
16 | --count_;
17 | return rclcpp::ok();
18 | }
19 |
20 | bool Semaphore::tryAcquire()
21 | {
22 | std::lock_guard lock(mutex_);
23 | if(count_)
24 | {
25 | --count_;
26 | return true;
27 | }
28 | return false;
29 | }
30 |
31 | void Semaphore::release()
32 | {
33 | std::lock_guard lock(mutex_);
34 | ++count_;
35 | condition_.notify_one();
36 | }
37 |
38 | void Semaphore::wakeUp()
39 | {
40 | condition_.notify_all();
41 | }
42 |
--------------------------------------------------------------------------------
/src/server.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include "semaphore.h"
8 |
9 | const std::map RELIABILITY_POLICIES = {{rclcpp::ReliabilityPolicy::BestEffort, "BestEffort"},
10 | {rclcpp::ReliabilityPolicy::Reliable, "Reliable"},
11 | {rclcpp::ReliabilityPolicy::SystemDefault, "SystemDefault"},
12 | {rclcpp::ReliabilityPolicy::Unknown, "Unknown"}};
13 | const std::map DURABILITY_POLICIES = {{rclcpp::DurabilityPolicy::Volatile, "Volatile"},
14 | {rclcpp::DurabilityPolicy::TransientLocal, "TransientLocal"},
15 | {rclcpp::DurabilityPolicy::SystemDefault, "SystemDefault"},
16 | {rclcpp::DurabilityPolicy::Unknown, "Unknown"}};
17 | const std::map LIVELINESS_POLICIES = {{rclcpp::LivelinessPolicy::Automatic, "Automatic"},
18 | {rclcpp::LivelinessPolicy::ManualByTopic, "ManualByTopic"},
19 | {rclcpp::LivelinessPolicy::SystemDefault, "SystemDefault"},
20 | {rclcpp::LivelinessPolicy::Unknown, "Unknown"}};
21 |
22 | class TCPTunnelServer : public rclcpp::Node
23 | {
24 | public:
25 | TCPTunnelServer():
26 | Node("tcp_tunnel_server")
27 | {
28 | std::string prefix = this->get_namespace();
29 | if(prefix.back() != '/')
30 | {
31 | prefix += "/";
32 | }
33 | registerClientService = this->create_service(prefix + "tcp_tunnel_server/register_client",
34 | std::bind(&TCPTunnelServer::registerClientCallback, this, std::placeholders::_1,
35 | std::placeholders::_2));
36 | }
37 |
38 | ~TCPTunnelServer()
39 | {
40 | for(size_t i = 0; i < sockets.size(); ++i)
41 | {
42 | if(confirmationThreads[i].joinable())
43 | {
44 | confirmationSemaphores[i]->wakeUp();
45 | confirmationThreads[i].join();
46 | }
47 | if(socketStatuses[i])
48 | {
49 | close(sockets[i]);
50 | }
51 | }
52 | }
53 |
54 | private:
55 | void registerClientCallback(const std::shared_ptr req, std::shared_ptr res)
56 | {
57 | std::string topicName = req->topic.data;
58 |
59 | // create socket
60 | int sockfd;
61 | struct sockaddr_in serv_addr;
62 |
63 | sockfd = socket(AF_INET, SOCK_STREAM, 0);
64 | if(sockfd < 0)
65 | {
66 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while creating a socket for topic " << topicName << ".");
67 | return;
68 | }
69 |
70 | int flag = 1;
71 | if(setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int)) < 0)
72 | {
73 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while setting socket options for topic " << topicName << ".");
74 | close(sockfd);
75 | return;
76 | }
77 |
78 | if(fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
79 | {
80 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while trying to set a socket flag for topic " << topicName << ".");
81 | close(sockfd);
82 | return;
83 | }
84 |
85 | // connect to client
86 | int n = -1;
87 | bzero(&serv_addr, sizeof(serv_addr));
88 | serv_addr.sin_family = AF_INET;
89 | serv_addr.sin_addr.s_addr = inet_addr(req->client_ip.data.c_str());
90 | serv_addr.sin_port = htons(req->client_port.data);
91 | std::chrono::time_point startTime = std::chrono::steady_clock::now();
92 | while(std::chrono::steady_clock::now() - startTime < std::chrono::duration(10) && n < 0)
93 | {
94 | n = connect(sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));
95 | }
96 | if(n < 0)
97 | {
98 | RCLCPP_ERROR_STREAM(this->get_logger(),
99 | "Error \"" << strerror(errno) << "\" occurred while trying to connect to " << req->client_ip.data << " on port " << req->client_port.data
100 | << " for topic " << topicName << ".");
101 | close(sockfd);
102 | return;
103 | }
104 |
105 | // fetch topic info
106 | if(this->get_topic_names_and_types().count(topicName) == 0 || this->get_publishers_info_by_topic(topicName).empty())
107 | {
108 | RCLCPP_ERROR_STREAM(this->get_logger(), "Cannot add topic " << topicName << " to TCP tunnel, this topic doesn't exist.");
109 | close(sockfd);
110 | res->topic_exists.data = false;
111 | return;
112 | }
113 | std::string topicType = this->get_topic_names_and_types()[topicName][0];
114 | rclcpp::QoS qos = this->get_publishers_info_by_topic(topicName)[0].qos_profile();
115 |
116 | sockets.push_back(sockfd);
117 | socketStatuses.push_back(true);
118 |
119 | // initialize confirmation thread
120 | confirmationSemaphores.emplace_back(std::make_unique(req->tunnel_queue_size.data));
121 | confirmationThreads.emplace_back(&TCPTunnelServer::receiveConfirmationLoop, this, confirmationThreads.size());
122 |
123 | // create subscription
124 | subscriptions.push_back(this->create_generic_subscription(topicName, topicType, qos.keep_last(1),
125 | std::bind(&TCPTunnelServer::subscriptionCallback, this, std::placeholders::_1, subscriptions.size())));
126 |
127 | // return topic info to client
128 | res->topic_exists.data = true;
129 | res->topic_type.data = topicType;
130 | res->reliability_policy.data = RELIABILITY_POLICIES.at(qos.reliability());
131 | res->durability_policy.data = DURABILITY_POLICIES.at(qos.durability());
132 | res->liveliness_policy.data = LIVELINESS_POLICIES.at(qos.liveliness());
133 |
134 | RCLCPP_INFO_STREAM(this->get_logger(), "Successfully registered client for topic " << topicName << ".");
135 | }
136 |
137 | void subscriptionCallback(std::shared_ptr msg, const int& subscriptionId)
138 | {
139 | if(!confirmationSemaphores[subscriptionId]->tryAcquire())
140 | {
141 | return;
142 | }
143 | if(!writeToSocket(subscriptionId, &msg->get_rcl_serialized_message().buffer_length, sizeof(size_t)))
144 | {
145 | return;
146 | }
147 | if(!writeToSocket(subscriptionId, msg->get_rcl_serialized_message().buffer, msg->get_rcl_serialized_message().buffer_length))
148 | {
149 | return;
150 | }
151 | }
152 |
153 | bool writeToSocket(const int& socketId, const void* buffer, const size_t& nbBytesToWrite, const bool& isMainThread = true)
154 | {
155 | size_t nbBytesWritten = 0;
156 | while(rclcpp::ok() && nbBytesWritten < nbBytesToWrite)
157 | {
158 | int n = send(sockets[socketId], ((char*)buffer) + nbBytesWritten, nbBytesToWrite - nbBytesWritten, MSG_NOSIGNAL);
159 | if(n >= 0)
160 | {
161 | nbBytesWritten += n;
162 | }
163 | else if(errno == EPIPE || errno == ECONNRESET || errno == EBADF)
164 | {
165 | if(isMainThread)
166 | {
167 | RCLCPP_INFO_STREAM(this->get_logger(), "Connection closed by client for topic " << subscriptions[socketId]->get_topic_name() << ".");
168 | socketStatuses[socketId] = false;
169 | subscriptions[socketId].reset();
170 | close(sockets[socketId]);
171 | }
172 | else
173 | {
174 | confirmationSemaphores[socketId]->release();
175 | }
176 | return false;
177 | }
178 | else if(errno == EWOULDBLOCK || errno == EAGAIN)
179 | {
180 | std::this_thread::sleep_for(std::chrono::milliseconds(10));
181 | }
182 | else
183 | {
184 | throw std::runtime_error(
185 | std::string("Error \"") + strerror(errno) + "\" occurred while writing to socket for topic " + subscriptions[socketId]->get_topic_name() + ".");
186 | }
187 | }
188 | return nbBytesWritten == nbBytesToWrite;
189 | }
190 |
191 | void receiveConfirmationLoop(int threadId)
192 | {
193 | while(rclcpp::ok())
194 | {
195 | if(!readFromSocket(threadId, &confirmationBuffer, sizeof(char), false))
196 | {
197 | return;
198 | }
199 | confirmationSemaphores[threadId]->release();
200 | }
201 | }
202 |
203 | bool readFromSocket(const int& socketId, void* buffer, const size_t& nbBytesToRead, const bool& isMainThread = true)
204 | {
205 | size_t nbBytesRead = 0;
206 | while(rclcpp::ok() && nbBytesRead < nbBytesToRead)
207 | {
208 | int n = read(sockets[socketId], ((char*)buffer) + nbBytesRead, nbBytesToRead - nbBytesRead);
209 | if(n > 0)
210 | {
211 | nbBytesRead += n;
212 | }
213 | else if(n == 0 || errno == ECONNRESET || errno == EBADF)
214 | {
215 | if(isMainThread)
216 | {
217 | RCLCPP_INFO_STREAM(this->get_logger(), "Connection closed by client for topic " << subscriptions[socketId]->get_topic_name() << ".");
218 | socketStatuses[socketId] = false;
219 | subscriptions[socketId].reset();
220 | close(sockets[socketId]);
221 | }
222 | else
223 | {
224 | confirmationSemaphores[socketId]->release();
225 | }
226 | return false;
227 | }
228 | else if(errno == EWOULDBLOCK || errno == EAGAIN)
229 | {
230 | std::this_thread::sleep_for(std::chrono::milliseconds(10));
231 | }
232 | else
233 | {
234 | throw std::runtime_error(
235 | std::string("Error \"") + strerror(errno) + "\" occurred while reading from socket for topic " + subscriptions[socketId]->get_topic_name() + ".");
236 | }
237 | }
238 | return nbBytesRead == nbBytesToRead;
239 | }
240 |
241 | rclcpp::Service::SharedPtr registerClientService;
242 | std::vector subscriptions;
243 | std::vector sockets;
244 | std::vector socketStatuses;
245 | std::vector confirmationThreads;
246 | std::vector> confirmationSemaphores;
247 | char confirmationBuffer;
248 | };
249 |
250 | int main(int argc, char** argv)
251 | {
252 | rclcpp::init(argc, argv);
253 | std::shared_ptr node = std::make_shared();
254 | rclcpp::executors::MultiThreadedExecutor executor;
255 | executor.add_node(node);
256 | executor.spin();
257 | rclcpp::shutdown();
258 | return 0;
259 | }
260 |
--------------------------------------------------------------------------------
/src/server_component.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include "semaphore.h"
8 | #include
9 |
10 | const std::map RELIABILITY_POLICIES = {{rclcpp::ReliabilityPolicy::BestEffort, "BestEffort"},
11 | {rclcpp::ReliabilityPolicy::Reliable, "Reliable"},
12 | {rclcpp::ReliabilityPolicy::SystemDefault, "SystemDefault"},
13 | {rclcpp::ReliabilityPolicy::Unknown, "Unknown"}};
14 | const std::map DURABILITY_POLICIES = {{rclcpp::DurabilityPolicy::Volatile, "Volatile"},
15 | {rclcpp::DurabilityPolicy::TransientLocal, "TransientLocal"},
16 | {rclcpp::DurabilityPolicy::SystemDefault, "SystemDefault"},
17 | {rclcpp::DurabilityPolicy::Unknown, "Unknown"}};
18 | const std::map LIVELINESS_POLICIES = {{rclcpp::LivelinessPolicy::Automatic, "Automatic"},
19 | {rclcpp::LivelinessPolicy::ManualByTopic, "ManualByTopic"},
20 | {rclcpp::LivelinessPolicy::SystemDefault, "SystemDefault"},
21 | {rclcpp::LivelinessPolicy::Unknown, "Unknown"}};
22 |
23 | class TCPTunnelServer : public rclcpp::Node
24 | {
25 | public:
26 | TCPTunnelServer(const rclcpp::NodeOptions & options):
27 | Node("tcp_tunnel_server", options)
28 | {
29 | std::string prefix = this->get_namespace();
30 | if(prefix.back() != '/')
31 | {
32 | prefix += "/";
33 | }
34 | registerClientService = this->create_service(prefix + "tcp_tunnel_server/register_client",
35 | std::bind(&TCPTunnelServer::registerClientCallback, this, std::placeholders::_1,
36 | std::placeholders::_2));
37 | }
38 |
39 | ~TCPTunnelServer()
40 | {
41 | for(size_t i = 0; i < sockets.size(); ++i)
42 | {
43 | if(confirmationThreads[i].joinable())
44 | {
45 | confirmationSemaphores[i]->wakeUp();
46 | confirmationThreads[i].join();
47 | }
48 | if(socketStatuses[i])
49 | {
50 | close(sockets[i]);
51 | }
52 | }
53 | }
54 |
55 | private:
56 | void registerClientCallback(const std::shared_ptr req, std::shared_ptr res)
57 | {
58 | std::string topicName = req->topic.data;
59 |
60 | // create socket
61 | int sockfd;
62 | struct sockaddr_in serv_addr;
63 |
64 | sockfd = socket(AF_INET, SOCK_STREAM, 0);
65 | if(sockfd < 0)
66 | {
67 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while creating a socket for topic " << topicName << ".");
68 | return;
69 | }
70 |
71 | int flag = 1;
72 | if(setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int)) < 0)
73 | {
74 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while setting socket options for topic " << topicName << ".");
75 | close(sockfd);
76 | return;
77 | }
78 |
79 | if(fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
80 | {
81 | RCLCPP_ERROR_STREAM(this->get_logger(), "Error \"" << strerror(errno) << "\" occurred while trying to set a socket flag for topic " << topicName << ".");
82 | close(sockfd);
83 | return;
84 | }
85 |
86 | // connect to client
87 | int n = -1;
88 | bzero(&serv_addr, sizeof(serv_addr));
89 | serv_addr.sin_family = AF_INET;
90 | serv_addr.sin_addr.s_addr = inet_addr(req->client_ip.data.c_str());
91 | serv_addr.sin_port = htons(req->client_port.data);
92 | std::chrono::time_point startTime = std::chrono::steady_clock::now();
93 | while(std::chrono::steady_clock::now() - startTime < std::chrono::duration(10) && n < 0)
94 | {
95 | n = connect(sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));
96 | }
97 | if(n < 0)
98 | {
99 | RCLCPP_ERROR_STREAM(this->get_logger(),
100 | "Error \"" << strerror(errno) << "\" occurred while trying to connect to " << req->client_ip.data << " on port " << req->client_port.data
101 | << " for topic " << topicName << ".");
102 | close(sockfd);
103 | return;
104 | }
105 |
106 | // fetch topic info
107 | if(this->get_topic_names_and_types().count(topicName) == 0 || this->get_publishers_info_by_topic(topicName).empty())
108 | {
109 | RCLCPP_ERROR_STREAM(this->get_logger(), "Cannot add topic " << topicName << " to TCP tunnel, this topic doesn't exist.");
110 | close(sockfd);
111 | res->topic_exists.data = false;
112 | return;
113 | }
114 | std::string topicType = this->get_topic_names_and_types()[topicName][0];
115 | rclcpp::QoS qos = this->get_publishers_info_by_topic(topicName)[0].qos_profile();
116 |
117 | sockets.push_back(sockfd);
118 | socketStatuses.push_back(true);
119 |
120 | // initialize confirmation thread
121 | confirmationSemaphores.emplace_back(std::make_unique(req->tunnel_queue_size.data));
122 | confirmationThreads.emplace_back(&TCPTunnelServer::receiveConfirmationLoop, this, confirmationThreads.size());
123 |
124 | // create subscription
125 | subscriptions.push_back(this->create_generic_subscription(topicName, topicType, qos.keep_last(1),
126 | std::bind(&TCPTunnelServer::subscriptionCallback, this, std::placeholders::_1, subscriptions.size())));
127 |
128 | // return topic info to client
129 | res->topic_exists.data = true;
130 | res->topic_type.data = topicType;
131 | res->reliability_policy.data = RELIABILITY_POLICIES.at(qos.reliability());
132 | res->durability_policy.data = DURABILITY_POLICIES.at(qos.durability());
133 | res->liveliness_policy.data = LIVELINESS_POLICIES.at(qos.liveliness());
134 |
135 | RCLCPP_INFO_STREAM(this->get_logger(), "Successfully registered client for topic " << topicName << ".");
136 | }
137 |
138 | void subscriptionCallback(std::shared_ptr msg, const int& subscriptionId)
139 | {
140 | if(!confirmationSemaphores[subscriptionId]->tryAcquire())
141 | {
142 | return;
143 | }
144 | if(!writeToSocket(subscriptionId, &msg->get_rcl_serialized_message().buffer_length, sizeof(size_t)))
145 | {
146 | return;
147 | }
148 | if(!writeToSocket(subscriptionId, msg->get_rcl_serialized_message().buffer, msg->get_rcl_serialized_message().buffer_length))
149 | {
150 | return;
151 | }
152 | }
153 |
154 | bool writeToSocket(const int& socketId, const void* buffer, const size_t& nbBytesToWrite, const bool& isMainThread = true)
155 | {
156 | size_t nbBytesWritten = 0;
157 | while(rclcpp::ok() && nbBytesWritten < nbBytesToWrite)
158 | {
159 | int n = send(sockets[socketId], ((char*)buffer) + nbBytesWritten, nbBytesToWrite - nbBytesWritten, MSG_NOSIGNAL);
160 | if(n >= 0)
161 | {
162 | nbBytesWritten += n;
163 | }
164 | else if(errno == EPIPE || errno == ECONNRESET || errno == EBADF)
165 | {
166 | if(isMainThread)
167 | {
168 | RCLCPP_INFO_STREAM(this->get_logger(), "Connection closed by client for topic " << subscriptions[socketId]->get_topic_name() << ".");
169 | socketStatuses[socketId] = false;
170 | subscriptions[socketId].reset();
171 | close(sockets[socketId]);
172 | }
173 | else
174 | {
175 | confirmationSemaphores[socketId]->release();
176 | }
177 | return false;
178 | }
179 | else if(errno == EWOULDBLOCK || errno == EAGAIN)
180 | {
181 | std::this_thread::sleep_for(std::chrono::milliseconds(10));
182 | }
183 | else
184 | {
185 | throw std::runtime_error(
186 | std::string("Error \"") + strerror(errno) + "\" occurred while writing to socket for topic " + subscriptions[socketId]->get_topic_name() + ".");
187 | }
188 | }
189 | return nbBytesWritten == nbBytesToWrite;
190 | }
191 |
192 | void receiveConfirmationLoop(int threadId)
193 | {
194 | while(rclcpp::ok())
195 | {
196 | if(!readFromSocket(threadId, &confirmationBuffer, sizeof(char), false))
197 | {
198 | return;
199 | }
200 | confirmationSemaphores[threadId]->release();
201 | }
202 | }
203 |
204 | bool readFromSocket(const int& socketId, void* buffer, const size_t& nbBytesToRead, const bool& isMainThread = true)
205 | {
206 | size_t nbBytesRead = 0;
207 | while(rclcpp::ok() && nbBytesRead < nbBytesToRead)
208 | {
209 | int n = read(sockets[socketId], ((char*)buffer) + nbBytesRead, nbBytesToRead - nbBytesRead);
210 | if(n > 0)
211 | {
212 | nbBytesRead += n;
213 | }
214 | else if(n == 0 || errno == ECONNRESET || errno == EBADF)
215 | {
216 | if(isMainThread)
217 | {
218 | RCLCPP_INFO_STREAM(this->get_logger(), "Connection closed by client for topic " << subscriptions[socketId]->get_topic_name() << ".");
219 | socketStatuses[socketId] = false;
220 | subscriptions[socketId].reset();
221 | close(sockets[socketId]);
222 | }
223 | else
224 | {
225 | confirmationSemaphores[socketId]->release();
226 | }
227 | return false;
228 | }
229 | else if(errno == EWOULDBLOCK || errno == EAGAIN)
230 | {
231 | std::this_thread::sleep_for(std::chrono::milliseconds(10));
232 | }
233 | else
234 | {
235 | throw std::runtime_error(
236 | std::string("Error \"") + strerror(errno) + "\" occurred while reading from socket for topic " + subscriptions[socketId]->get_topic_name() + ".");
237 | }
238 | }
239 | return nbBytesRead == nbBytesToRead;
240 | }
241 |
242 | rclcpp::Service::SharedPtr registerClientService;
243 | std::vector subscriptions;
244 | std::vector sockets;
245 | std::vector socketStatuses;
246 | std::vector confirmationThreads;
247 | std::vector> confirmationSemaphores;
248 | char confirmationBuffer;
249 | };
250 |
251 | RCLCPP_COMPONENTS_REGISTER_NODE(TCPTunnelServer)
252 |
--------------------------------------------------------------------------------
/srv/AddTopic.srv:
--------------------------------------------------------------------------------
1 | std_msgs/String topic
2 | std_msgs/String tunnel_queue_size
3 | std_msgs/String server_namespace
4 | ---
--------------------------------------------------------------------------------
/srv/RegisterClient.srv:
--------------------------------------------------------------------------------
1 | std_msgs/String topic
2 | std_msgs/UInt64 tunnel_queue_size
3 | std_msgs/String client_ip
4 | std_msgs/UInt16 client_port
5 | ---
6 | std_msgs/Bool topic_exists
7 | std_msgs/String topic_type
8 | std_msgs/String reliability_policy
9 | std_msgs/String durability_policy
10 | std_msgs/String liveliness_policy
11 |
--------------------------------------------------------------------------------
/srv/RemoveTopic.srv:
--------------------------------------------------------------------------------
1 | std_msgs/String topic
2 | ---
--------------------------------------------------------------------------------