├── start.sh
├── __pycache__
├── game_demo.cpython-311.pyc
├── map_process.cpython-311.pyc
├── npc_control.cpython-311.pyc
└── socket_server.cpython-311.pyc
├── .idea
├── vcs.xml
├── .gitignore
├── inspectionProfiles
│ ├── profiles_settings.xml
│ └── Project_Default.xml
├── misc.xml
├── modules.xml
└── PRS-Trial-Version.iml
├── clean_port.sh
├── prs_demo.py
├── prs_requirements.txt
├── data
└── npc_data.json
├── test_demo.ipynb
├── README.md
├── robot
└── PRS_Robot.urdf
├── document
└── api.md
├── game_demo.py
├── map_process.py
├── socket_server.py
└── npc_control.py
/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Start the Unity .x86_64 executable
4 | ./unity/PRS.x86_64
5 |
--------------------------------------------------------------------------------
/__pycache__/game_demo.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PRS-Organization/PRS-Trial-Version/HEAD/__pycache__/game_demo.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/map_process.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PRS-Organization/PRS-Trial-Version/HEAD/__pycache__/map_process.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/npc_control.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PRS-Organization/PRS-Trial-Version/HEAD/__pycache__/npc_control.cpython-311.pyc
--------------------------------------------------------------------------------
/__pycache__/socket_server.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PRS-Organization/PRS-Trial-Version/HEAD/__pycache__/socket_server.cpython-311.pyc
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/clean_port.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Find processes with port 8000 and kill them
4 | netstat -ap | grep 8000 | awk '{print $7}' | awk -F '/' '{print $1}' | while read -r pid; do
5 | echo "Killing process with PID: $pid"
6 | kill -9 "$pid"
7 | done
8 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/PRS-Trial-Version.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/prs_demo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | from socket_server import *
3 |
4 | if __name__ == '__main__':
5 | # Environment initialization
6 | prs = PrsEnv(is_print=0, not_test_mode=1)
7 | prs.npc_start(5)
8 | # How many NPCs are there
9 | ma = np.array(prs.server.maps.floor3)
10 | # print(ma.shape)(172, 228)
11 | prs.agent.rotate_right(degree=30)
12 | # prs.agent.goto_target_goal((2, 130, 120), position_mode=1)
13 | # robot navigate to floor3 point (130, 120)
14 | # API document come soon!
15 | map_room = prs.server.maps.floor3
16 |
17 | time.sleep(60)
18 | prs.finish_env()
--------------------------------------------------------------------------------
/prs_requirements.txt:
--------------------------------------------------------------------------------
1 | ansitable==0.9.10
2 | asyncio==3.4.3
3 | cfgv==3.4.0
4 | colored==1.4.4
5 | contourpy==1.2.0
6 | cycler==0.12.1
7 | DateTime==5.5
8 | distlib==0.3.8
9 | filelock==3.13.3
10 | fonttools==4.50.0
11 | identify==2.5.35
12 | kiwisolver==1.4.5
13 | matplotlib==3.8.3
14 | nodeenv==1.8.0
15 | numpy==1.26.4
16 | opencv-python==4.9.0.80
17 | packaging==24.0
18 | pgraph-python==0.6.2
19 | pillow==10.2.0
20 | platformdirs==4.2.0
21 | pre-commit==3.7.0
22 | progress==1.6
23 | pygame==2.5.2
24 | pyparsing==3.1.2
25 | python-dateutil==2.9.0.post0
26 | pytz==2024.1
27 | PyYAML==6.0.1
28 | roboticstoolbox-python==1.1.0
29 | rtb-data==1.0.1
30 | scikit-fmm==2023.4.2
31 | scipy==1.11.1
32 | six==1.16.0
33 | spatialgeometry==1.1.0
34 | spatialmath-python==1.1.9
35 | swift-sim==1.1.0
36 | typing_extensions==4.10.0
37 | virtualenv==20.25.1
38 | websockets==12.0
39 | zope.interface==6.2
40 |
--------------------------------------------------------------------------------
/data/npc_data.json:
--------------------------------------------------------------------------------
1 | {"npc": [{"description": " ", "information": "id: 0, name: , age: , gender: , room: , office: , job: ", "name": "Npc_CasualMale"}, {"description": " ", "information": "id: 1, name: , age: , gender: , room: , office: , job: ", "name": "Npc_CasualMale"}, {"description": " ", "information": "id: 2, name: , age: , gender: , room: , office: , job: ", "name": "Npc_BusinessMale"}, {"description": " ", "information": "id: 3, name: , age: , gender: , room: , office: , job: ", "name": "Npc_CasualFemale"}, {"description": " ", "information": "id: 4, name: , age: , gender: , room: , office: , job: ", "name": "Npc_BusinessFemale"}, {"description": " ", "information": "id: 5, name: , age: , gender: , room: , office: , job: ", "name": "Npc_UniformMale"}, {"description": " ", "information": "id: 6, name: , age: , gender: , room: , office: , job: ", "name": "Npc_UniformMale"}, {"description": " ", "information": "id: 7, name: , age: , gender: , room: , office: , job: ", "name": "Npc_UniformFemale"}, {"description": " ", "information": "id: 8, name: , age: , gender: , room: , office: , job: ", "name": "Npc_UniformFemale"}, {"description": " ", "information": "id: 9, name: , age: , gender: , room: , office: , job: ", "name": "Npc_UniformFemale"}]}
--------------------------------------------------------------------------------
/test_demo.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "id": "417071ec",
7 | "metadata": {},
8 | "outputs": [
9 | {
10 | "name": "stdout",
11 | "output_type": "stream",
12 | "text": [
13 | "pygame 2.5.2 (SDL 2.28.3, Python 3.11.5)\n",
14 | "Hello from the pygame community. https://www.pygame.org/contribute.html\n",
15 | "PRS environment beta is starting without interaction\n",
16 | "Please open the Unity program (start.sh)\n",
17 | "PRS challenge task and benchmark come soon!\n"
18 | ]
19 | }
20 | ],
21 | "source": [
22 | "from socket_server import *\n",
23 | "# Environment initialization\n",
24 | "prs = PrsEnv(is_print=0, not_test_mode=0)"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": null,
30 | "id": "19a9dd9f",
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "# npc behavior\n",
35 | "prs.npc_start(5)"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "id": "7d358a1a",
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "# action sequence of robot\n"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "id": "d2f268f3",
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "prs.finish_env()"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "bed19f72",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": []
65 | }
66 | ],
67 | "metadata": {
68 | "kernelspec": {
69 | "display_name": "Python 3 (ipykernel)",
70 | "language": "python",
71 | "name": "python3"
72 | },
73 | "language_info": {
74 | "codemirror_mode": {
75 | "name": "ipython",
76 | "version": 3
77 | },
78 | "file_extension": ".py",
79 | "mimetype": "text/x-python",
80 | "name": "python",
81 | "nbconvert_exporter": "python",
82 | "pygments_lexer": "ipython3",
83 | "version": "3.11.5"
84 | }
85 | },
86 | "nbformat": 4,
87 | "nbformat_minor": 5
88 | }
89 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PRS-Trial-Version
2 | ## [PRS Delivery Benchmark](https://github.com/PRS-Organization/prs-delivery)
3 | Trial version for prs platform (python project). Please note that the complete experience requires downloading the Unity resource.
4 | More API and dataset is coming soon!
5 |
6 | ## Quick Start Guide for PRS Platform Demo
7 |
8 | Follow these steps to quickly set up and run the PRS demo:
9 |
10 | 1. Clone the PRS demo repository:
11 | ```
12 | git clone https://github.com/PRS-Organization/PRS-Trial-Version.git
13 | ```
14 | 2. Ensure you have a Python virtual environment (Python version >= 3.9) activated.
15 |
16 | 3. Install the required Python packages:
17 | ```
18 | pip install -r prs_requirements.txt
19 | ```
20 | 4. Download the Unity executable file (Ubuntu version) from [Google Drive](https://drive.google.com/file/d/1-LnzNA2D7v9jWnJlpknCeX_cOjofr7_V/view?usp=sharing) and save it as `prs_unity_demo.rar`.
21 |
22 | 5. Extract the `prs_unity_demo.rar` file into the project folder:
23 | ```
24 | unrar x prs_unity_demo.rar
25 | ```
26 | Note: This should create a `unity` folder. Give it necessary permissions:
27 | ```
28 | sudo chmod 777 -R unity
29 | ```
30 | 6. Start running the demo:
31 | ```
32 | python prs_demo.py
33 | ```
34 | or start with only unity application:
35 | ```
36 | bash start.sh
37 | ```
38 | 7. If you encounter a port occupation error, run:
39 | ```
40 | bash clean_port.sh
41 | ```
42 | 8. After running the Python script, you can open another terminal and execute ```start.sh``` or directly run `unity/PRS.x86_64`.
43 |
44 | 9. Wait a few seconds for Unity to render the graphics.
45 |
46 | 10. In Unity, you can control the camera movement using the keyboard keys W, A, S, D, Q, and E.
47 |
48 | 11. To close the demo, first close the Unity program (or press Esc), then stop the Python program (Ctrl+C or Ctrl+Z), and finally run:
49 | ```
50 | bash clean_port.sh
51 | ```
52 | Note: In this version, there's no function to end the environment due to its long-running nature.
53 |
54 | 12. Please note that this is just a test demo, and there is no interactive behavior in the environment.
55 |
56 | Stay tuned for the upcoming complete API documentation and task benchmarks!
57 |
58 | ## More API Guidance
59 | [PRS API](document/api.md)
60 |
61 |
--------------------------------------------------------------------------------
/robot/PRS_Robot.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
--------------------------------------------------------------------------------
/document/api.md:
--------------------------------------------------------------------------------
1 | # PRS API
2 | We provide **API** that includes various functions:
3 | - [Environment API](#environment-api)
4 | - [Task API](#task-api)
5 | - [Robot API](#robot-api)
6 | - [Map API](#map-and-position-api)
7 | - [NPC API](#npc-api)
8 |
9 | ## Environment API
10 | Operations: Accelerate, Terminate, Initiate
11 |
12 | Instantiate environment class
13 | ```
14 | prs = PrsEnv()
15 | ```
16 | Start autonomous activities for NPCs, specifying the number of activated NPCs
17 | ```
18 | prs.npc_start(3)
19 | ```
20 | Accelerate the scene simulation by specifying the speed multiplier
21 | ```
22 | prs.sim_speed(2)
23 | ```
24 | set environment time
25 | ```
26 | prs.env_time.set_time(2024, 6, 1, 9, 0)
27 | ```
28 | Terminate the environment and end the process
29 | ```
30 | prs.finish_env()
31 | ```
32 |
33 | ## Task API
34 |
35 | Initialize the task program, 0 stands for searching and delivery tasks
36 | ```
37 | prs.task_choice(0)
38 | ```
39 | Evaluate the task result
40 | ```
41 | prs.task_evaluate()
42 | ```
43 | ## Robot API
44 |
45 | Retrieve vision data, specifying the camera type (0 for head, 1 for hand)
46 | ```
47 | prs.agent.observation_camera(camera_type=0)
48 | ```
49 | Move the robot to a target location, specifying the position mode (0 for world, 1 for grid map)
50 | ```
51 | prs.agent.goto_target_goal((2, 130, 120), position_mode=1)
52 | ```
53 | Retrieve robot position information, returning position (pos) and detailed information (info)
54 | ```
55 | prs.agent.pos_query()
56 | ```
57 | Rotate the robot, specifying the degree of rotation (positive for right, negative for left)
58 | ```
59 | prs.agent.rotate_right(degree=30)
60 | ```
61 | Control robot joints, specifying the joint ID and target
62 | ```
63 | prs.agent.joint_control(joint_id=1, target=20)
64 | ```
65 | Adjust robot direction alignment based on input coordinates
66 | ```
67 | prs.agent.direction_adjust(position=(22.38, 0.1, -0.17))
68 | ```
69 | Perform inverse kinematics (IK) for the robot, specifying relative coordinates
70 | ```
71 | prs.agent.ik_process(x=0, y=1, z=0.1)
72 | ```
73 | Calculate 6 Degrees of Freedom (6-DoF) IK for the robot, specifying the target coordinates and rotation matrix (yaw, pitch, roll)
74 | ```
75 | prs.agent.input_pos(prs.agent.robot, x=0.2, y=0, z=0.75, phi=0, theta=1.25, psi=0, plot=0)
76 | ```
77 | Move the robot forward by specifying the distance
78 | ```
79 | prs.agent.move_forward(dis=1.0)
80 | ```
81 | Navigate to a specific destination, specifying the location name
82 | ```
83 | prs.agent.go_to_destination('kitchen')
84 | ```
85 | Retrieve the latest map
86 | ```
87 | prs.agent.get_all_map()
88 | ```
89 | Calculate IK for relative coordinates and target values, specifying world coordinates
90 | ```
91 | prs.agent.ik_calculation((-10.1, 0.1, 1.6))
92 | ```
93 | Calculate IK target, specifying relative coordinates
94 | ```
95 | prs.agent.ik_process(0.25, 0.1, 0.79)
96 | ```
97 | Control robot arms, specifying joint target values
98 | ```
99 | prs.agent.arm_control([0, 0, 0, 0, 0])
100 | ```
101 | Grab an object, specifying the object's ID
102 | ```
103 | prs.agent.grasp_object(17)
104 | ```
105 | Move towards the vicinity of an object, specifying the object ID or functional feature
106 | ```
107 | prs.agent.go_to_target_object(feature='Seat')
108 | ```
109 | Walk towards and grab the target object
110 | ```
111 | prs.agent.goto_and_grasp('apple')
112 | ```
113 | Release the held object
114 | ```
115 | depth_m = prs.agent.get_depth(0)
116 | ```
117 | Retrieve depth information, specifying mode (0 for head camera, 1 for hand camera)
118 | ```
119 | prs.agent.get_depth()
120 | ```
121 | Retrieve camera semantic segmentation, specifying mode (0 for head camera, 1 for hand camera), tags contain object information
122 | ```
123 | seg, tags = prs.agent.get_segmentation(0)
124 | ```
125 | Head camera twisted downwards
126 | ```
127 | prs.agent.joint_control(joint_id=4, target=20)
128 | ```
129 | Using the camera to observe the surroundings with the given angle
130 | ```
131 | prs.agent.observation(degree=0, camera=0)
132 | ```
133 | prs.agent.observation(camera=0, degree=0)
134 | Request Visual Interaction
135 |
136 | ```
137 | prs.agent.request_interaction()
138 | ```
139 | This function requests an interaction with the visual system.
140 | Input: A two-dimensional matrix marking the target and the operation type:
141 | 0: recognize
142 | 1: grasp
143 | 2: approach target
144 | 3, etc.
145 | ```
146 | prs.agent.interaction()
147 | ```
148 |
149 | ## Map and Position API
150 |
151 | Determine the area based on world coordinates, returning the room or None
152 | ```
153 | prs.objs_data.point_determine((15.7, -5.1, -19.5))
154 | ```
155 | Retrieve information from the 2D grid map, specifying world coordinates [-10.1, 0.1, -6.1], output includes floor, map_i, map_j, and obstacle status
156 | ```
157 | prs.server.maps.get_point_info({'x': -10.1, 'y': 0.1, 'z': -6.1})
158 | ```
159 | Retrieve world position based on floor, map_i, and map_j coordinates
160 | ```
161 | prs.server.maps.get_world_position(1, 89, 108)
162 | ```
163 | Retrieve 2D grid maps for floors 1, 2, and 3
164 | ```
165 | map1, map2, map3 = prs.server.maps.floor1, prs.server.maps.floor2, prs.server.maps.floor3
166 | ```
167 | Retrieve the latest map
168 | ```
169 | prs.agent.get_all_map()
170 | ```
171 | Parse object information based on instructions, querying a specific object
172 | ```
173 | obj_list = prs.objs_data.object_parsing(instruction, ['apple'])
174 | ```
175 | Global query for an object's ID, specifying the object's name
176 | ```
177 | obj_list = prs.objs_data.object_query(['apple'])
178 | ```
179 | Query object information based on ID
180 | ```
181 | prs.object_query(obj_id=100)
182 | ```
183 |
184 | ## NPC API
185 |
186 | Execute actions for NPCs based on action IDs, including looping actions, interactive actions, and performance actions
187 | ```
188 | prs.npcs[0].npc_action(tar_action='stand')
189 | ```
190 | Retrieve photos around the NPC, from the camera perspective above the head
191 | ```
192 | prs.npcs[0].observation_surrounding()
193 | ```
194 | Retrieve the current NPC's position, returning position and detailed NPC information
195 | ```
196 | prs.npcs[0].where_npc()
197 | ```
198 | NPC move to a specific place based on preset location lists
199 | ```
200 | prs.npcs[0].go_to_place()
201 | ```
202 | Retrieve detailed information about the NPC, including position and other information
203 | ```
204 | pos, npc_info = prs.npcs[0].query_information()
205 | ```
206 | Move towards the vicinity of an object, specifying the distance range to the target
207 | ```
208 | res, obj = prs.npcs[0].go_to_object('Seat')
209 | ```
--------------------------------------------------------------------------------
/game_demo.py:
--------------------------------------------------------------------------------
1 | import pygame
2 | import sys
3 |
4 |
5 | class WebDemo(object):
6 | def __init__(self, server=None):
7 | # Initialize Pygame
8 | pygame.init()
9 |
10 | self.server = server
11 | # Set window size
12 | self.screen = pygame.display.set_mode((800, 600))
13 | pygame.display.set_caption("PRS Env Demo")
14 |
15 | # Set Color
16 | self.WHITE = (255, 255, 255)
17 | self.BLACK = (0, 0, 0)
18 | self.text = 'This is a long text example that needs to be wrapped in multiple lines to fit the given area. Hopefully, this function will handle line wrapping properly.'
19 | self.running = False
20 | # Set Font
21 | self.font = pygame.font.SysFont(None, 24)
22 | self.button_state = 0
23 | # Add game start prompt text
24 | self.start_text = self.font.render("Game is beginning", True, self.BLACK)
25 | self.start_text_rect = self.start_text.get_rect()
26 | self.start_text_rect.center = (400, 50)
27 |
28 | # Define button coordinates and size
29 | self.button_width = 100
30 | self.button_height = 50
31 | self.button_margin = 20
32 | self.buttons = []
33 | for i in range(25):
34 | if i < 5:
35 | button = pygame.Rect(100 + i * (self.button_width + self.button_margin), 50, self.button_width, self.button_height)
36 | elif i < 10:
37 | button = pygame.Rect(100 + (i-5) * (self.button_width + self.button_margin), 110, self.button_width, self.button_height)
38 | elif i < 15:
39 | button = pygame.Rect(100 + (i-10) * (self.button_width + self.button_margin), 170, self.button_width, self.button_height)
40 | elif i < 20:
41 | button = pygame.Rect(100 + (i-15) * (self.button_width + self.button_margin), 230, self.button_width, self.button_height)
42 | elif i < 25:
43 | button = pygame.Rect(100 + (i-20) * (self.button_width + self.button_margin), 290, self.button_width, self.button_height)
44 |
45 | self.buttons.append(button)
46 |
47 |
48 | def button_clicked(self, index):
49 | result = f"Button {index + 1} clicked"
50 | # print(result)
51 | return result
52 |
53 | # Rendering text onto buttons
54 |
55 | def render_text_on_button(self, text, button):
56 | text_surface = self.font.render(text, True, self.WHITE)
57 | text_rect = text_surface.get_rect()
58 | text_rect.center = button.center
59 | self.screen.blit(text_surface, text_rect)
60 |
61 | # Rendered Text
62 | def render_text(self, screen, text):
63 | if len(text) > 100:
64 | text = text[-100:]
65 | text_surface = self.font.render(text, True, self.BLACK)
66 | text_rect = text_surface.get_rect()
67 | text_rect.center = (400, 500)
68 | screen.blit(text_surface, text_rect)
69 | return text_rect
70 |
71 | def render_text_with_wrap(self, rect, max_lines):
72 | words = [word.split(' ') for word in self.text.splitlines()] # 分割单词
73 | space = self.font.size(' ')[0] # 单词间距
74 | max_width, max_height = rect.width, rect.height
75 | x, y = rect.topleft
76 | line_spacing = 2
77 |
78 | final_text = ''
79 | line_count = 0
80 |
81 | for line in words:
82 | for word in line:
83 | word_surface = self.font.render(word, True, self.BLACK)
84 | word_width, word_height = word_surface.get_size()
85 |
86 | if x + word_width < rect.right:
87 | final_text += word + ' '
88 | x += word_width + space
89 | else:
90 | x = rect.left
91 | y += word_height + line_spacing
92 | if y > max_height or line_count >= max_lines:
93 | return final_text
94 | final_text += '\n'
95 | line_count += 1
96 |
97 | return final_text
98 |
99 | def render_multiline_text(self, width=600):
100 | words = [word.split(' ') for word in self.text.splitlines()]
101 | space = self.font.size(' ')[0] # 获取字体空格的宽度
102 | max_width, max_height = width, self.font.get_height()
103 | lines = []
104 | for line in words:
105 | for word in line:
106 | if self.font.size(' '.join(line))[0] >= max_width:
107 | line.insert(-1, '\n') # 换行
108 | lines.extend(line[:-1])
109 | line = line[-1:]
110 | words.insert(words.index(line), line)
111 | break
112 | else:
113 | lines.extend(line)
114 | lines_surface = pygame.Surface((max_width, max_height * len(lines)))
115 | lines_surface.fill((0, 0, 0, 0))
116 | text_pos = pygame.Rect(0, 0, max_width, max_height)
117 | for line in lines:
118 | for word in line:
119 | word_surface = self.font.render(word, True, self.BLACK)
120 | if text_pos.x + word_surface.get_width() >= max_width:
121 | text_pos.x = 0
122 | text_pos.y += max_height
123 | lines_surface.blit(word_surface, text_pos)
124 | text_pos.x += word_surface.get_width() + space
125 | text_pos.y += max_height
126 | text_pos.x = 0
127 | return lines_surface
128 |
129 |
130 | def run(self, env, time_manager):
131 | self.running = True
132 | text = "Press"
133 | while self.running:
134 | self.screen.fill(self.WHITE)
135 |
136 | # Display game start prompt text
137 | text_rect = self.render_text(self.screen, self.text)
138 | # text_rect = pygame.Rect(300, 300, 400, 200) # 设置文本显示区域
139 | # final_text = self.render_text_with_wrap(text_rect, 8)
140 | # print(final_text)
141 | # text_surface = self.font.render(final_text, True, self.BLACK)
142 | self.screen.blit(self.start_text, self.start_text_rect, text_rect)
143 | # self.screen.blit(text_surface, text_rect)
144 |
145 | text = self.font.render(str(time_manager.current_date), True, self.BLACK)
146 | self.screen.blit(text, (10, 10))
147 | text = self.font.render('Day: '+str(time_manager.time_difference())+' Week: ' + str(time_manager.weekday_now()), True,self.BLACK)
148 | self.screen.blit(text, (10, 25))
149 | # print(self.buttons)
150 | for i, button in enumerate(self.buttons):
151 | pygame.draw.rect(self.screen, self.BLACK, button)
152 | self.render_text_on_button(f'Button {i + 1}', button)
153 |
154 | for event in pygame.event.get():
155 | if event.type == pygame.QUIT:
156 | running = False
157 | elif event.type == pygame.MOUSEBUTTONDOWN:
158 | if event.button == 1:
159 | x, y = event.pos
160 | for i, button in enumerate(self.buttons):
161 | if button.collidepoint(x, y):
162 | result = self.button_clicked(i)
163 | self.button_state = i + 1
164 | # render_text(screen, result)
165 | env.chioce(i+1)
166 | # text = result
167 | elif event.type == pygame.KEYDOWN:
168 | if event.key == pygame.K_q:
169 | print('game over')
170 | self.running = False
171 | self.server.state = 0
172 | pygame.quit()
173 | elif event.key == pygame.K_ESCAPE:
174 | print('game over')
175 | # print(self.button_state)
176 | self.running = False
177 | self.server.state = 0
178 | pygame.quit()
179 |
180 | try:
181 | pygame.display.flip()
182 | except:
183 | pygame.quit()
184 |
185 | # Close the Pygame window and terminate the program
186 | # pygame.quit()
187 | # sys.exit()
188 | if __name__ == '__main__':
189 | game = WebDemo()
190 | game.run()
--------------------------------------------------------------------------------
/map_process.py:
--------------------------------------------------------------------------------
1 | import json
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import copy
5 |
6 |
7 | class RoomMap(object):
8 |
9 | def __init__(self):
10 | self.floor1 = None
11 | self.floor2 = None
12 | self.floor3 = None
13 | # F1, f2, f3 = -16.69344711303711, -5.217403411865234, -0.0499998964369297
14 | self.height_floor1 = -16.693447
15 | self.height_floor2 = -5.2174
16 | self.height_floor3 = -0.0499999
17 | self.floor1_x0, self.floor1_y0, self.floor1_z0 = None, None, None
18 | self.maps_info = [
19 | {'x0': None, 'y0': None, 'z0': None, 'scale': None, 'width': None, 'height': None, 'grid': None},
20 | {'x0': None, 'y0': None, 'z0': None, 'scale': None, 'width': None, 'height': None, 'grid': None},
21 | {'x0': None, 'y0': None, 'z0': None, 'scale': None, 'width': None, 'height': None, 'grid': None}
22 | ]
23 | self.floors = [self.height_floor1, self.height_floor2, self.height_floor3]
24 |
25 | def get_world_position(self, n, i, j):
26 | x0, y0 = self.maps_info[n]['x0'], self.maps_info[n]['z0']
27 | scale, width, height = self.maps_info[n]['scale'], self.maps_info[n]['width'], self.maps_info[n]['height']
28 | x, y = x0 + i * scale, y0 + j * scale
29 | return x, self.floors[n], y
30 |
31 | def get_grid_position(self, n, x, y):
32 | x0, y0 = self.maps_info[n]['x0'], self.maps_info[n]['z0']
33 | scale, width, height = self.maps_info[n]['scale'], self.maps_info[n]['width'], self.maps_info[n]['height']
34 | # x, y = x0 + i * scale, y0 + j * scale
35 | i, j = (x - x0) / scale, (y - y0) / scale
36 | return round(i), round(j)
37 |
38 | def get_point_info(self, point):
39 | try:
40 | x, y, z = point['x'], point['y'], point['z']
41 | except:
42 | x, y, z = point[0], point[1], point[2]
43 |
44 | ind, value = min(enumerate(self.floors), key=lambda lis: abs(y - lis[1]))
45 | point_i, point_j = self.get_grid_position(ind, x, z)
46 | try:
47 | return ind, point_i, point_j, self.maps_info[ind]['grid'][point_i][point_j]
48 | except:
49 | i_max, j_max = np.array(self.maps_info[ind]['grid']).shape[0], np.array(self.maps_info[ind]['grid']).shape[
50 | 1]
51 | p_i, p_j = min(point_i, i_max - 1), min(point_j, j_max - 1)
52 | return ind, p_i, p_j, self.maps_info[ind]['grid'][p_i][p_j]
53 |
54 | def get_an_aligned_world_coordinate_randomly(self, floor, x, y, random=1):
55 | point_i, floor_layer, point_j = self.get_world_position(floor, x, y)
56 | accuracy = self.maps_info[floor]['scale']
57 | random_x_i = np.random.uniform(point_i, point_i + accuracy)
58 | random_y_j = np.random.uniform(point_j, point_j + accuracy)
59 | return random_x_i, random_y_j
60 |
61 | def get_an_accessible_area(self, x, y, z, radius_meter=1.5, mode=0, sort=1, inflation=0):
62 | # mode 0 represent the world position, 1 is the matrix map(x=floor_n, y=map_i, z=map_j)
63 | if not mode:
64 | floor, map_i, map_j, is_obstacle = self.get_point_info([x, y, z])
65 | else:
66 | floor, map_i, map_j = round(x), round(y), round(z)
67 | is_obstacle = self.maps_info[floor]['grid'][map_i][map_j]
68 | map_array = np.array(self.maps_info[floor]['grid'])
69 | radius = radius_meter / self.maps_info[floor]['scale']
70 | # Determine the scope of the query domain
71 | min_i, max_i = round(max(0, map_i - radius)), round(min(map_array.shape[0] - 1, map_i + radius))
72 | min_j, max_j = round(max(0, map_j - radius)), round(min(map_array.shape[1] - 1, map_j + radius))
73 | # Find feasible points within the specified radius
74 | valid_points = []
75 | # exclude points with a distance of 1 from obstacles
76 | valid_points = []
77 | for i in range(min_i, max_i + 1):
78 | for j in range(min_j, max_j + 1):
79 | if map_array[i, j] != 0 and ((i - map_i) ** 2 + (j - map_j) ** 2) <= radius ** 2:
80 | too_close_to_obstacle = False
81 | if inflation:
82 | for ii in range(max(0, i - 1), min(map_array.shape[0], i + 2)):
83 | for jj in range(max(0, j - 1), min(map_array.shape[1], j + 2)):
84 | if map_array[ii, jj] == 0:
85 | too_close_to_obstacle = True
86 | if not too_close_to_obstacle:
87 | valid_points.append((i, j))
88 | if sort:
89 | # Calculate the distance from each feasible point to a given point
90 | distances = [np.sqrt((i - map_i) ** 2 + (j - map_j) ** 2) for i, j in valid_points]
91 | # Sort feasible points in ascending order of distance
92 | sorted_valid_points = [point for _, point in sorted(zip(distances, valid_points))]
93 | # print('here: ', len(sorted_valid_points), ' in radius ', radius_meter, ', scale', self.maps_info[floor]['scale'])
94 | return floor, sorted_valid_points
95 | else:
96 | return floor, valid_points
97 |
98 | def add_room(self, json_data):
99 | # parsing map information
100 | map_id = json_data['mapId']
101 | floor = json_data['mapName']
102 | width = json_data['width']
103 | height = json_data['height']
104 | points = json_data['points']
105 | scale = json_data['accuracy']
106 | positions = json_data['minPoint']
107 | n_length = scale
108 | x, y = 0, 0
109 | # Create a 2D matrix map
110 | # Analyze points and add point information to the map matrix
111 | n = 0
112 | map_data = []
113 | xx, yy = [], []
114 | # json_m = {"mapId": 1, "mapName": "F3", "width": 51, "height": 68, "accuracy": 1.0, "points": []}
115 | po = eval(points)
116 | for point in points:
117 | # point_data = json.loads(point)
118 | map_data.append(1)
119 | matrix = [[0 for _ in range(height)] for _ in range(width)]
120 | navMapPoints = [[None for _ in range(height)] for _ in range(width)]
121 | for i in range(width):
122 | for j in range(height):
123 | index = i * height + j
124 | matrix[i][j] = map_data[index]
125 | navMapPoints[i][j] = {"x": x + i * n_length, "y": y + j * n_length, "data": map_data[index]}
126 | flag = None
127 | if floor == 'F1':
128 | self.floor1 = po
129 | flag = 0
130 | elif floor == 'F2':
131 | self.floor2 = po
132 | flag = 1
133 | elif floor == 'F3':
134 | self.floor3 = po
135 | flag = 2
136 | # plt.show()
137 | self.maps_info[flag]['scale'] = scale
138 | self.maps_info[flag]['width'] = width
139 | self.maps_info[flag]['height'] = height
140 | self.maps_info[flag]['grid'] = po
141 | self.maps_info[flag]['x0'] = positions['x']
142 | self.maps_info[flag]['y0'] = positions['z']
143 | self.maps_info[flag]['z0'] = positions['z']
144 | self.floors[flag] = positions['y']
145 | # matrix_map = np.array(map_data).reshape((width, height))
146 | # Convert all values uniformly to values greater than or equal to 0
147 | # Create and initialize matrix
148 | # matrix = [[0 for _ in range(max_y)] for _ in range(max_x)]
149 |
150 | def draw(self, i, n, j):
151 | if isinstance(n, int) and isinstance(i, int) and isinstance(j, int):
152 | pass
153 | else:
154 | n, i, j, is_obstacle = self.get_point_info((i, n, j))
155 | mat = copy.deepcopy(self.maps_info[n]['grid'])
156 | mat[i][j] = 5
157 | plt.imshow(mat, cmap='gray')
158 | plt.title('Floor{}'.format(n))
159 | plt.show()
160 |
161 | def plot_map(self):
162 | map = self.floor3
163 | plt.imshow(map, cmap='gray')
164 | plt.title('Room Map')
165 | plt.grid(False)
166 | plt.axis('off')
167 | plt.xticks([])
168 | plt.yticks([])
169 | plt.show()
170 |
171 | '''
172 | points:["{\"viability\":false,\"position\":{\"x\":-19.94000244140625,\"y\":-0.0499998964369297,\"z\":-59.400001525878909}}"]
173 | points:[[0/1,x,y,z],[0/1,-19.94000,-0.04999,-59.4000],[0/1,x,y,z]]
174 | '''
175 |
176 | if __name__ == '__main__':
177 | # read Map.json
178 | file_name = "map/map4.json"
179 | with open(file_name, 'r') as file:
180 | json_data = json.load(file)
181 | map = RoomMap()
182 | map.add_room(json_data)
--------------------------------------------------------------------------------
/socket_server.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import asyncio
3 | import numpy as np
4 | import os
5 | import sys
6 | import time
7 | import subprocess
8 | # import atexit
9 |
10 | from socket import *
11 | import threading
12 | import json
13 | import struct
14 | from enum import Enum
15 | from game_demo import *
16 | import ast
17 | import pickle
18 | from map_process import RoomMap
19 | from npc_control import Npc, Agent
20 | import datetime
21 | from multiprocessing import Process, Queue, Value, Lock
22 |
23 |
24 | class EnvTime(object):
25 | def __init__(self, speed=120, year=2025, month=3, day=12, hour=6, minute=50, second=0, end=2050):
26 | # Define start date. At a rate of speed(120) times
27 | self.start_date = datetime.datetime(year, month, day, hour, minute, second)
28 | # Define time multiplier
29 | self.time_multiplier = speed
30 | self.running = 1
31 | self.end = end
32 | # Simulation time
33 | self.current_date = self.start_date
34 | self.start_time = self.start_date
35 | # self.current_date.isoweekday()
36 | self.week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
37 |
38 | def set_time(self, year=2025, month=3, day=12, hour=6, minute=50, second=0, end=2050):
39 | self.current_date = datetime.datetime(year, month, day, hour, minute, second)
40 | print(self.current_date)
41 |
42 | def time_simulation(self, stop_event):
43 | while True:
44 | # print(stop_event.is_set())
45 | if not self.running or stop_event.is_set():
46 | break
47 | # print("Current Date:", self.current_date)
48 | # Accelerate at 120 times the speed
49 | time_delta = datetime.timedelta(seconds=1) # Add one more day
50 | self.current_date += time_delta * self.time_multiplier
51 | # Control simulation speed
52 | time.sleep(1) # Update every second
53 | # Termination conditions can be added, such as stopping simulation when a specific date is reached
54 | if self.current_date.year > self.end:
55 | break
56 |
57 | def time_difference(self):
58 | time_diff = self.current_date - self.start_time
59 | hours = time_diff.total_seconds() // 3600
60 | # print("The time difference is% d hours" % hours)
61 | return time_diff.days
62 |
63 | def weekday_now(self):
64 | return self.week[self.current_date.weekday()]
65 |
66 | def simulation_start(self):
67 | self.start_time = self.current_date
68 |
69 |
70 | # message define
71 | class MsgCmd(Enum):
72 | # 0 disconnects, 1 server sends behavior instructions, 2 servers send status requests, 3 clients reply with behavior callbacks,
73 | # 4 clients reply with target status, 5 instructions to robots, 6 requests/feedback about robot clients
74 | EXIT = 0
75 | Instruction = 1
76 | Request = 2
77 | Result = 3
78 | State = 4
79 | Control = 5
80 | Information = 6
81 | Server_Update_Config = 7
82 | Server_Device_Create_Request = 8
83 | Server_Device_Inform_Request = 9
84 | Server_Device_Status_Request = 10
85 | Client_Device_Status_Response = 11
86 | Server_Config_Inform = 12
87 | Client_Config_Response = 13
88 |
89 |
90 | class Server(object):
91 | def __init__(self, stop_event):
92 | self.stop_event = stop_event
93 | self.state = 1
94 | self.clients = []
95 | self.messages = []
96 | self.information = ''
97 | # 1.Create a socket
98 | self.sock = socket(AF_INET, SOCK_STREAM)
99 | # 2. Prepare to connect to the server and establish a connection
100 | serve_ip = 'localhost'
101 | serve_port = 8000 # search for available port starting from 8000
102 | # tcp_socket.connect((serve_ip,serve_port))
103 | # Connect to the server, establish a connection, with parameters in tuple form
104 | tcp_address = ('localhost', serve_port)
105 | # Provide a mechanism for checking ports
106 | sock_result = 0
107 | while not sock_result:
108 | try:
109 | self.sock.bind(tcp_address)
110 | sock_result = 1
111 | except:
112 | serve_port += 1
113 | tcp_address = ('localhost', serve_port)
114 | with open('unity/PRS_Data/StreamingAssets/config.json', 'r') as file:
115 | env_data = json.load(file)
116 | env_data["serverConnectionPort"] = serve_port
117 | with open('unity/PRS_Data/StreamingAssets/config.json', 'w') as file:
118 | json.dump(env_data, file)
119 | print('server started: ', str(tcp_address))
120 | MAX_CONNECTION = 100
121 | # Start listening for connections
122 | self.sock.listen(MAX_CONNECTION)
123 | self.headerSize = 12
124 | self.count = 0
125 | # self.robot = PRS_IK()
126 | # robot ik algorithm
127 | self.maps = RoomMap()
128 | self.notes = {}
129 | self.byte_stream = bytes()
130 | self.header_length = 0
131 | self.sock.settimeout(10)
132 |
133 | def wait_for_connection(self):
134 | while True:
135 | try:
136 | now_client, addr = self.sock.accept()
137 | print('Connected by', now_client)
138 | self.state = 2
139 | now_client.settimeout(300)
140 | self.clients.append([addr, now_client])
141 | except: pass
142 | for index_client, n_client in enumerate(self.clients):
143 | # result = self.sock.connect_ex(n_client)
144 | try:
145 | result = n_client[1].getsockname()
146 | r = n_client[1].getpeername()
147 | # print('===========perfect connection============')
148 | except Exception as e:
149 | print(e, n_client[0], 'Connected Closed Now')
150 | try:
151 | self.clients.remove(n_client)
152 | if len(self.clients) == 0 and self.state == 2:
153 | self.state = 0
154 | self.stop_event.set()
155 | except:
156 | pass
157 | if not self.state or self.stop_event.is_set():
158 | print(self.state, 'No waiting for connection')
159 | self.sock.close()
160 | break
161 | if len(self.clients):
162 | time.sleep(0.5)
163 | else:
164 | time.sleep(0.01)
165 |
166 | def check_connection(self):
167 | pass
168 | # for index_client, n_client in enumerate(self.clients):
169 | # # result = self.sock.connect_ex(n_client)
170 | # rrr = n_client[1].recv(1024)
171 | # result = n_client[1].getsockname()
172 | # r = n_client[1].getpeername()
173 |
174 | def handle_data(self, n_client):
175 | # receive message from client -> information process
176 | data = n_client.recv(10240000)
177 | if not data:
178 | return 0
179 | else:
180 | self.messages.append(data)
181 | # print('---------------------------------', 'Received: msg')
182 | # ------------------parsing info from unity---------------------
183 | # self.send_back({'result': 1})
184 | return 1
185 |
186 | def message_process(self):
187 | while True:
188 | if not self.state or self.stop_event.is_set():
189 | self.state = 0
190 | print(self.state, 'Processing Completed')
191 | break
192 | if len(self.messages) > 0:
193 | for msg_i, msg in enumerate(self.messages):
194 | try:
195 | self.unpack(msg)
196 | except Exception as e:
197 | print('.........parsing error............', e, type(msg))
198 | self.state = 0
199 | finally:
200 | del self.messages[msg_i]
201 | else:
202 | time.sleep(0.005)
203 |
204 | def receive_data(self):
205 | while True:
206 | # self.check_connection()
207 | for n_client in self.clients:
208 | try:
209 | # Processing received message
210 | res = self.handle_data(n_client[1])
211 | except Exception as e:
212 | print(e, n_client[0], 'Connected closed')
213 | try:
214 | self.clients.remove(n_client)
215 | if len(self.clients) == 0 and self.state == 2:
216 | self.state = 0
217 | self.stop_event.set()
218 | except:
219 | pass
220 | time.sleep(0.005)
221 | if not self.state or self.stop_event.is_set():
222 | print(self.state, 'Connection closed')
223 | self.sock.close()
224 | break
225 |
226 | def send_data(self, cmd=1, data={"requestIndex":10,"npcId":0,"actionId":0,"actionPara":""}, recv=0):
227 | send_finish = 0
228 | while not send_finish:
229 | if len(self.clients)==0: break
230 | for n_client in self.clients:
231 | self.check_connection()
232 | try:
233 | if cmd < 15:
234 | data['requestIndex'] = self.count
235 | self.count = self.count + 1
236 | elif cmd == 0:
237 | self.state = 0
238 | msg, msg_data = self.pack(cmd, data, recv)
239 | n_client[1].send(msg)
240 | send_finish = 1
241 | return data['requestIndex']
242 | break
243 | except Exception as e:
244 | # print(e, n_client[0])
245 | try:
246 | self.clients.remove(n_client)
247 | if len(self.clients) == 0:
248 | self.state = 0
249 | except: pass
250 | return False
251 |
252 | def send_back(self, response={'result': 0}):
253 | f = 0
254 | while not f:
255 | for n_client in self.clients:
256 | self.check_connection()
257 | try:
258 | info = json.dumps(response)
259 | n_client[1].send(info.encode("utf8"))
260 | print('Sent: ', info.encode("utf8"))
261 | f = 1
262 | return 1
263 | except Exception as e:
264 | print(e, n_client[0])
265 | try:
266 | self.clients.remove(n_client)
267 | except: pass
268 |
269 | def pack(self, cmd, _body, _recv=0):
270 | body = json.dumps(_body)
271 | # Convert the message body to Json format and convert it to byte encoding
272 | header = [body.__len__(), cmd, _recv]
273 | # Form a list of message headers in order
274 | headPack= struct.pack("3I", *header)
275 | # Use struct to package message headers and obtain byte encoding
276 | sendData = headPack+body.encode("utf8")
277 | # Combine message header bytes and message body bytes together
278 | return sendData, body
279 |
280 | def handle_msg(self, headPack ,body):
281 | """Classify and process received message strings"""
282 | # data processing
283 | cmd= 'ad'
284 | try:
285 | cmd = MsgCmd(headPack[1]).name # Get the value of Code\
286 | except Exception as e:
287 | print(headPack[1])
288 | # print('python get================cmd is', cmd)
289 | is_recv = headPack[2]
290 | # print("Received 1 packet->bodySize:{}, cmd:{}, recv:{}".format(headPack[0], cmd, is_recv))
291 | body = body.replace("false", "False")
292 | body = body.replace("true", "True")
293 | body = body.replace("none", "None")
294 | p = json.loads(body) # Decode and deserialize strings into JSON objects
295 | dict_data = ast.literal_eval(p)
296 | # self.information += str(cmd) + str(body)
297 | # Check the message type
298 | dict_d = copy.deepcopy(dict_data)
299 | del dict_d['requestIndex']
300 | self.notes[dict_data['requestIndex']] = dict_d
301 | if cmd == "EXIT":
302 | self.state = 0
303 | print('0. Env is over, exit!')
304 | return
305 | elif cmd == "Result": pass
306 | # print('3、Execution results from Unity', dict_data)
307 | elif cmd == "State": pass
308 | # Storing parameter information
309 | # print('4、Detailed information obtained id: {}'.format(dict_data['requestIndex']))
310 | elif cmd == "Control": pass
311 | # IK is here
312 | elif cmd == "Information": pass
313 | # print("6、This is robot information", dict_data['requestIndex'], ', length- ', len(dict_data),)
314 | else:
315 | print("\nUnknown cmd: {0}".format(cmd))
316 | # Continue receiving messages
317 | #self._recv_bytes()
318 |
319 | def unpack(self, data):
320 | headPack = struct.unpack('3I', bytearray(data[:self.headerSize]))
321 | bodySize = headPack[0]
322 | body = data[self.headerSize:self.headerSize + bodySize]
323 | try:
324 | self.handle_msg(headPack, body.decode("utf8"))
325 | except ValueError:
326 | if not self.header_length or len(self.byte_stream) == 0:
327 | self.header_length = headPack
328 | self.byte_stream += body
329 | else:
330 | self.byte_stream += data
331 | if len(self.byte_stream) >= self.header_length[0]:
332 | # data_byte = self.byte_stream.decode("utf8")
333 | self.handle_msg(self.header_length, self.byte_stream.decode())
334 | self.byte_stream = bytes()
335 | self.header_length = 0
336 | return 1
337 |
338 | def unpack_pro(self, data, msgHandler):
339 | dataBuffer = bytes()
340 | if data:
341 | self.dataBuffer += data
342 | while True:
343 | # Jump out of the function to continue receiving data when there is insufficient data in the message header
344 | if len(self.dataBuffer) < self.headerSize:
345 | # print("Packet (% s Byte) is smaller than the length of the message header, causing a small loop to break out" % len(self.dataBuffer))
346 | break
347 | # struct: represent Network order,3I represents 3个unsigned int
348 | # msg_length = struct.unpack("I", bytearray(msg[:4]))[0]
349 | # Obtain information length
350 | headPack = struct.unpack('3I', bytearray(self.dataBuffer[:self.headerSize]))
351 | # Decode the message header
352 | # Get message body length
353 | bodySize = headPack[0]
354 | # Handling subcontracting situations, jumping out of the function to continue receiving data
355 | if len(self.dataBuffer) < self.headerSize + bodySize:
356 | # print("Packet (% s Byte) incomplete (total of% s Bytes), skipping small loop“ % (len(self.dataBuffer), self.headerSize + bodySize))
357 | break
358 | # Read the content of the message body
359 | body = self.dataBuffer[self.headerSize:self.headerSize + bodySize]
360 | self.handle_msg(headPack, body.decode("utf8"))
361 | # Handling of packet sticking and obtaining the next part of the data packet
362 | self.dataBuffer = self.dataBuffer[self.headerSize + bodySize:]
363 | if len(self.dataBuffer) != 0:
364 | return True # Continue receiving messages
365 | else:
366 | return False # No longer receiving messages
367 | else:
368 | return False # No longer receiving messages
369 |
370 | def wait_for_respond(self, id, times=60):
371 | info = None
372 | for ii in range(int(times)):
373 | time.sleep(0.1)
374 | try:
375 | info = self.notes[id]
376 | break
377 | except Exception as e:
378 | pass
379 | return info
380 |
381 | def object_query(self, obj_id=0):
382 | for i in range(5):
383 | instruction = {"requestIndex": 0, "targetType": 1, "targetId": obj_id}
384 | r_id = self.send_data(2, instruction, 1)
385 | object_info = self.wait_for_respond(r_id, 60)
386 | if object_info is not None:
387 | break
388 | time.sleep(0.1)
389 | if object_info:
390 | object_info = eval(object_info['statusDetail'])
391 | return object_info
392 |
393 | def object_nearby_detect(self, obj_id=0):
394 | instruction = {"requestIndex": 1, "targetType": 20, "targetId": obj_id}
395 | r_id = self.send_data(2, instruction, 1)
396 | object_info = self.wait_for_respond(r_id, 60)
397 | if object_info:
398 | object_info = eval(object_info['statusDetail'])
399 | return object_info['touchedIds']
400 | if object_info:
401 | object_info = eval(object_info['statusDetail'])
402 | try:
403 | return object_info['touchedIds']
404 | except:
405 | return None
406 | return None
407 |
408 | def object_transform(self, obj_type=0, target_id=4, pos=(0, 0, 0), rotation=0):
409 | # obj_type = 0: npc, obj_type = 1: items in the env
410 | try:
411 | position = {"x": pos[0], "y": pos[1], "z": pos[2]}
412 | except:
413 | position = {"x": pos['x'], "y": pos['y'], "z": pos['z']}
414 | instruction = {"requestIndex": 1, "objectTransformHandles": []}
415 | para = {"objectType": obj_type, "objectId": target_id, "objectPos": position, "objectDir":
416 | {"x": 0, "y": 90, "z": 0}}
417 | instruction['objectTransformHandles'].append(para)
418 | r_id = self.send_data(12, instruction, 1)
419 | object_info = self.wait_for_respond(r_id, 60)
420 | time.sleep(0.2)
421 | return object_info
422 |
423 | def env_finish(self, process, npcs):
424 | if process:
425 | process.terminate()
426 | # Waiting for the process to end (optional, but recommended)
427 | process.wait()
428 | self.send_data(0, {"requestIndex": 10, "actionId": 1}, 0)
429 | # movement demo
430 | self.state = 0
431 | for npc in npcs:
432 | npc.running = 0
433 | self.stop_event.set()
434 | self.sock.close()
435 | # print(self.state, type(self.state))
436 | print(threading.active_count(), ' ------ env is ready to end')
437 | time.sleep(3)
438 | print(threading.active_count(), ' ------ thank you for using')
439 |
440 |
441 | class Command(object):
442 |
443 | def __init__(self, ser, game, per, objects):
444 | self.id = 0
445 | self.web = game
446 | self.server = ser
447 | self.tar = [0, 1]
448 | self.near_items = None
449 | self.object_data = objects
450 | # 0 disconnects, 1 server sends behavior instructions, 2 servers send status requests, 3 clients reply with behavior callbacks, 4 clients reply with target status, 5 instructions to robots, 6 requests/feedback about robot clients
451 | self.instruction = [
452 | [0, {'this is': 'an example for python command to unity API'}, 0],
453 |
454 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 10, "actionPara": "{\"itemId\":177}"}, 1],
455 | # [1, {"requestIndex":10,"npcId":0,"actionId":0,"actionPara":""}, 1],
456 | # python ins index, npc,
457 | # [[0,npc_john,position],[]]
458 | # info -> items.json id->name
459 |
460 | # 1 npc stand
461 | # [1, {"requestIndex":10,"npcId":0,"actionId":10,"actionPara":"{\"itemId\":177}"}, 1],
462 | # npc sit
463 | [1, {"requestIndex":10,"npcId":0,"actionId":1,"actionPara":"{\"destination\":{\"x\":-14.56,\"y\":0.0,\"z\":-4.3}}"}, 1],
464 | # # 2 npc walk to (x,y,z)
465 | # [1, {"requestIndex":10,"npcId":0,"actionId":2,"actionPara":"{\"angle\":50}"}, 1],
466 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 0, "actionPara": ""}, 1],
467 | # 3 npc turn n degrees
468 | [1, {"requestIndex":10,"npcId":0,"actionId":100,"actionPara": "{\"handType\":-1,\"itemId\":1}"}, 1],
469 | # 4 npc pick
470 | [1, {"requestIndex":10,"npcId":0,"actionId":101,"actionPara":"{\"handType\":-1,\"position\":{\"x\":5.0,\"y\":12.0,\"z\":5.0}}"}, 1],
471 | # 5 npc put
472 | [1, {"requestIndex":10,"npcId":0,"actionId":300,"actionPara":"{\"expressionType\":100}"}, 1],
473 | # 6 npc emoji
474 | [1, {"requestIndex":10,"npcId":0,"actionId":300,"actionPara":"{\"expressionType\":101}"}, 1],
475 | # 7 npc stand
476 | # [1, {"requestIndex":10,"npcId":0,"actionId":300,"actionPara":"{\"expressionType\":102}"}, 1],
477 | [1, {"requestIndex":10,"npcId":0,"actionId":102,"actionPara":"{\"handType\":-1}"},1],
478 | # 8 npc stand
479 | [2, {"requestIndex":0,"targetType":0,"targetId":0}, 1],
480 | # [2, {"requestIndex": 0, "targetType": 1, "targetId": 2}, 1],
481 | # 9 npc information query
482 | # [2, {"requestIndex":0,"targetType":1,"targetId":3}, 1],
483 | [2, {"requestIndex": 101, "targetType": 2, "targetId": 1}, 1],
484 | # 10 object information query
485 | [5, {"requestIndex": 0, "actionId": 0, "actionPara": "{\"distance\":1.0}"}, 1],
486 | # 11 robot move forward
487 | [5, {"requestIndex": 1, "actionId": 1, "actionPara": "{\"degree\":90}"}, 1],
488 | # 12 robot turn
489 | [5, {"requestIndex": 2, "actionId": 2, "actionPara": "{\"degree\":90}"}, 1],
490 | # 13 robot turn
491 | [5, {"requestIndex": 10, "requireId": 0}, 1],
492 | # 14 robot position
493 | [5, {"requestIndex": 11, "requireId": 1}, 1],
494 | # 15 robot joint
495 |
496 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":100}"}, 1],
497 | # 16
498 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":101}"}, 1],
499 | # 17
500 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":102}"}, 1],
501 | # 18
502 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":103}"}, 1],
503 | # 19
504 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":104}"}, 1],
505 | # 20
506 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":-1}"}, 1],
507 | # 21
508 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":105}"}, 1],
509 | # 22
510 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":106}"}, 1],
511 | # 23
512 | [1, {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": "{\"showType\":200}"}, 1],
513 | # 24
514 | # [1, {"requestIndex": 10, "npcId": 3, "actionId": 400, "actionPara": "{\"showType\":201}"}, 1],
515 | [1, {"requestIndex":0,"targetType":10,"targetId":0}, 1]
516 | # 25
517 | ]
518 | self.npc = per
519 |
520 | def chioce(self, index):
521 | print("click the Button ", index)
522 | # Convert strings to dictionaries
523 | data = self.instruction[index]
524 | if index == 2:
525 | # data = self.instruction[2]
526 | action_dict = json.loads(data[1]["actionPara"])
527 |
528 | # 设置 x 和 y 的随机值
529 | # action_dict["destination"]["x"] = round(np.random.uniform(-1, 1), 2)
530 | # action_dict["destination"]["z"] = round(np.random.uniform(-1, 1), 2)
531 |
532 | # 更新 actionPara 字符串
533 | data[1]["actionPara"] = json.dumps(action_dict)
534 | self.instruction[index] = data
535 |
536 | elif index == 3:
537 | # [1, {"requestIndex": 10, "npcId": 0, "actionId": 2, "actionPara": "{\"angle\":50}"}, 1],
538 | for ii in range(10):
539 | data[1]["npcId"] = ii
540 | self.send_to_client(data)
541 | elif index == 4:
542 | action_dict = json.loads(data[1]["actionPara"])
543 | action_dict["itemId"] = self.tar[1]
544 | data[1]["actionPara"] = json.dumps(action_dict)
545 | self.instruction[index] = data
546 | elif index == 1:
547 | action_dict = json.loads(data[1]["actionPara"])
548 | tar = self.object_data.object_parsing(self.near_items, ['Stool', 'Chair'])
549 | action_dict["itemId"] = tar
550 | data[1]["actionPara"] = json.dumps(action_dict)
551 | self.instruction[index] = data
552 | # ---------instruction send----------
553 | ins_id = self.send_to_client(self.instruction[index])
554 | # print(ins_id, 'sended')
555 | if index == 9:
556 | ins = self.object_data.check_feedback(self.server, ins_id)
557 | self.near_items = ins
558 | self.tar[0] = self.object_data.object_parsing(ins, ['Stool'])
559 | self.tar[1] = self.object_data.object_parsing(ins, ['Apple'])
560 | # get response of unity from server messages
561 | self.web.text = self.server.information
562 |
563 | def send_to_client(self, inf):
564 | res = self.server.send_data(inf[0], inf[1], inf[2])
565 | # def send_data(self, cmd=1, data={"requestIndex":10,"npcId":0,"actionId":0,"actionPara":""}, recv=0):
566 | return res
567 |
568 |
569 | class ObjectsData(object):
570 |
571 | def __init__(self):
572 | with open('unity/PRS_Data/StreamingAssets/itemInfo.json', 'r') as file:
573 | json_data = json.load(file)
574 | with open('unity/PRS_Data/StreamingAssets/roomInfo.json', 'r') as file:
575 | room_data = json.load(file)
576 | with open('unity/PRS_Data/StreamingAssets/segmentationTagColorInfo.json', 'r') as file:
577 | seg_tag_data = json.load(file)
578 | with open('data/npc_data.json', 'r') as file:
579 | json_npc = json.load(file)
580 | with open('unity/PRS_Data/StreamingAssets/receptacleInfo.json', 'r') as file:
581 | json_receptcle = json.load(file)
582 | rece = json_receptcle['receptacleItemBounds']
583 | # decode JSON
584 | seg_data = []
585 | rgb_id = dict()
586 | for index_tag, item_tag in enumerate(seg_tag_data['TagColors']):
587 | # data_i = json.loads(item_tag)
588 | seg_data.append(item_tag)
589 | r_n, g_n, b_n = float(item_tag['color']['r']), float(item_tag['color']['g']), float(item_tag['color']['b'])
590 | r_n, g_n, b_n = '{:.2f}'.format(r_n), '{:.2f}'.format(g_n), '{:.2f}'.format(b_n)
591 | rgb = (r_n, g_n, b_n)
592 | rgb_id[rgb] = index_tag
593 | if item_tag['tag'] == "Untagged":
594 | self.background = rgb
595 |
596 | env_objects = []
597 | for json_i in json_data['statusDetails']:
598 | data = json.loads(json_i)
599 | env_objects.append(data)
600 | env_rooms = []
601 | for json_i in room_data['statusDetails']:
602 | data = json.loads(json_i)
603 | env_rooms.append(data)
604 | room_index = []
605 | for room_i, roo in enumerate(env_rooms):
606 | # print()
607 | xx, zz = [], []
608 | for point in roo['roomBoudaryPoints']:
609 | xx.append(point['x'])
610 | zz.append(point['z'])
611 | name = roo['roomName']
612 | # na = name.split('_')
613 | room_index.append({'name': name, 'x': [min(xx), max(xx)], 'y': roo['roomCenter']['y'], 'z': [min(zz), max(zz)]})
614 | # print('----------------')
615 | buliding_rooms = [dict(), dict(), dict()]
616 | for ro in env_rooms:
617 | y = ro['roomCenter']['y']
618 | if y > -0.8:
619 | buliding_rooms[2][ro['roomName']] = ro
620 | elif y > -10:
621 | buliding_rooms[1][ro['roomName']] = ro
622 | else:
623 | buliding_rooms[0][ro['roomName']] = ro
624 | self.buliding_rooms = buliding_rooms
625 | self.room_area = room_index
626 |
627 | self.objects = env_objects
628 | self.rooms = env_rooms
629 | self.segment_tag = seg_data
630 | self.rgb_to_id = rgb_id
631 | self.characters = json_npc['npc']
632 | self.room_receptacles = None
633 | self.sematic_map = [None, None, None]
634 | # print(env_rooms)
635 | grab_obj = [
636 | 'BoxedChocolate01', 'InjectableMedicationBottle_2', 'InjectableMedicationBottle_1', 'Apple_2', 'Kiwi',
637 | 'BoxedCake02', 'ButterSauce', 'PlasticBottle03', 'BoxedCake01', 'PlasticBottle03WithGreenLid',
638 | 'WaterBottle_Blue_3',
639 | 'WaterBottle_Blue_1', 'PlasticBottle03WithYellowLid', 'TomatoSauce', 'Spoon_2', 'Tomato', 'Cup_3', 'Cup_2',
640 | 'RedBeansCan', 'BaggedCake02', 'RedChill', 'MeatCan03', 'MeatCan01', 'PeaCan01', 'Cup_1', 'MeatCan02',
641 | 'ChocolateSauce', 'BaggedCake01', 'Spoon_1', 'MobilePhone_3', 'PlasticBottle03WithRedLid',
642 | 'ChiliSauce', 'MobilePhone_1', 'ConsolegamingPad', 'MobilePhone_2', 'Spoon_3', 'BoxedCake03', 'HoneySauce',
643 | 'Apple_1', 'Banana', 'BaggedCake03', 'BoxedChocolate02', 'InjectableMedicationBottle_3',
644 | 'WaterBottle_Blue_2',
645 | 'PeaCan02', 'PlasticBottle03WithBlueLid', 'Apple_3', 'PeanutSauce']
646 | items = dict()
647 | for obj in self.objects:
648 | name = obj['itemName']
649 | fea = obj['features']
650 | if "Grabable" in fea and name in grab_obj:
651 | items[name] = dict()
652 | items[name]["id"] = obj['itemId']
653 | items[name]['position'] = obj['position']
654 | # print(items)
655 | self.grasp_items = items
656 | receptacles_information = {'F3_KitchenRoom': {'receptacles': [
657 | {'name': 'Small Table (3)', 'feature': 'yellow', 'pos': {'x': -15.496, 'y': 0.0, 'z': -8.361}},
658 | {'name': 'Table', 'feature': 'wooden dining', 'pos': {'x': -15.996, 'y': 0.0, 'z': -5.042}},
659 | {'name': 'Kitchen Counter', 'feature': 'left', 'pos': {'x': -10.483, 'y': 0.0, 'z': -4.688}},
660 | {'name': 'Kitchen Wardrobe', 'feature': 'wooden high', 'pos': {'x': -10.527, 'y': 0.0, 'z': -6.603}},
661 | {'name': 'Dinner Counter', 'feature': 'middle', 'pos': {'x': -13.109, 'y': 0.0, 'z': -6.124}}]},
662 | 'F3_Bedroom_5': {'receptacles': [{'name': 'Table', 'feature': 'sturdy',
663 | 'pos': {'x': 13.58, 'y': 0.01, 'z': -27.902}},
664 | {'name': 'Bed', 'feature': 'stylish',
665 | 'pos': {'x': 14.683, 'y': 0.01, 'z': -28.336}}]},
666 | 'F2_HallRoom02': {'receptacles': [{'name': 'Table (1)', 'feature': '',
667 | 'pos': {'x': 26.88, 'y': -5.167, 'z': -7.638}},
668 | {'name': 'Table (2)', 'feature': '',
669 | 'pos': {'x': 26.88, 'y': -5.167, 'z': -5.174}}]},
670 | 'F2_HallRoom03': {'receptacles': [{'name': 'Table (1)', 'feature': '',
671 | 'pos': {'x': 26.073, 'y': -5.167, 'z': 4.748}},
672 | {'name': 'Reception Counter', 'feature': '',
673 | 'pos': {'x': 21.354, 'y': -5.167, 'z': 5.55}},
674 | {'name': 'Table (3)', 'feature': '',
675 | 'pos': {'x': 28.461, 'y': -5.167, 'z': 4.516}},
676 | {'name': 'Table (2)', 'feature': '',
677 | 'pos': {'x': 27.206, 'y': -5.167, 'z': 5.7}},
678 | {'name': 'Table (4)', 'feature': '',
679 | 'pos': {'x': 28.461, 'y': -5.167, 'z': 6.89}}]},
680 | 'F3_HallRoom02': {'receptacles': [{'name': 'Table (1)', 'feature': 'wall-adjacent',
681 | 'pos': {'x': 26.88, 'y': 0.0, 'z': -7.638}},
682 | {'name': 'Table (2)', 'feature': 'light brown',
683 | 'pos': {'x': 26.88, 'y': 0.0, 'z': -5.174}}]},
684 | 'F3_OfficeSpaceRoom': {'receptacles': [{'name': 'Desk (8)', 'feature': 'rectangular',
685 | 'pos': {'x': 2.841, 'y': -0.002,
686 | 'z': 3.755}},
687 | {'name': 'Desk (3)', 'feature': 'compact',
688 | 'pos': {'x': 7.973, 'y': -0.002,
689 | 'z': 7.732}},
690 | {'name': 'Desk (4)', 'feature': 'modern',
691 | 'pos': {'x': 1.812, 'y': -0.003,
692 | 'z': 3.755}},
693 | {'name': 'Desk (6)', 'feature': 'wooden',
694 | 'pos': {'x': 2.836, 'y': -0.002,
695 | 'z': 7.745}},
696 | {'name': 'Desk (2)', 'feature': 'functional',
697 | 'pos': {'x': 7.978, 'y': -0.002,
698 | 'z': 3.784}},
699 | {'name': 'Desk (1)', 'feature': 'office',
700 | 'pos': {'x': 6.949, 'y': -0.003,
701 | 'z': 3.784}},
702 | {'name': 'Side Table', 'feature': 'tall',
703 | 'pos': {'x': 4.838, 'y': 0.0, 'z': 5.93}},
704 | {'name': 'Desk (7)', 'feature': 'office',
705 | 'pos': {'x': 6.944, 'y': -0.003,
706 | 'z': 7.732}},
707 | {'name': 'Desk (5)', 'feature': 'work',
708 | 'pos': {'x': 1.807, 'y': -0.003,
709 | 'z': 7.745}}]}, 'F3_Bedroom_3': {
710 | 'receptacles': [
711 | {'name': 'Table', 'feature': 'wall-mounted', 'pos': {'x': 13.58, 'y': 0.01, 'z': -20.22}},
712 | {'name': 'Bed', 'feature': 'modern', 'pos': {'x': 14.683, 'y': 0.01, 'z': -20.654}}]},
713 | 'F3_HallRoom01': {'receptacles': [{'name': 'Polygon Table', 'feature': 'stone',
714 | 'pos': {'x': 12.727, 'y': 0.0, 'z': -5.639}}]},
715 | 'F3_GymRoom': {'receptacles': [{'name': 'Furniture_Gym_Desk', 'feature': 'dark gray',
716 | 'pos': {'x': 10.945, 'y': 0.0, 'z': 3.209}}]},
717 | 'F3_Bedroom_4': {'receptacles': [{'name': 'Table', 'feature': 'minimalist',
718 | 'pos': {'x': 25.7, 'y': 0.01, 'z': -29.25}},
719 | {'name': 'Bed', 'feature': 'dark',
720 | 'pos': {'x': 24.597, 'y': 0.01, 'z': -28.816}}]},
721 | 'F3_OfficeRoom01': {'receptacles': [{'name': 'Desk 4', 'feature': 'versatile',
722 | 'pos': {'x': -3.505, 'y': 0.0, 'z': -6.917}}]},
723 | 'F3_LivingRoom': {'receptacles': [{'name': 'Dinner Counter', 'feature': 'marble',
724 | 'pos': {'x': -3.09, 'y': 0.0, 'z': 5.787}},
725 | {'name': 'Coffee table', 'feature': 'low',
726 | 'pos': {'x': -6.92, 'y': 0.041, 'z': 6.23}}]},
727 | 'F3_HallRoom03': {'receptacles': [{'name': 'Table (4)', 'feature': 'square',
728 | 'pos': {'x': 28.461, 'y': 0.0, 'z': 6.89}},
729 | {'name': 'Reception Counter',
730 | 'feature': 'marmoreal',
731 | 'pos': {'x': 21.354, 'y': 0.0, 'z': 5.55}},
732 | {'name': 'Table (2)', 'feature': 'rectangular',
733 | 'pos': {'x': 27.206, 'y': 0.0, 'z': 5.7}},
734 | {'name': 'Table (3)', 'feature': 'small',
735 | 'pos': {'x': 28.461, 'y': 0.0, 'z': 4.516}},
736 | {'name': 'Table (1)', 'feature': 'dark',
737 | 'pos': {'x': 26.073, 'y': 0.0, 'z': 4.748}}]},
738 | 'F3_Bedroom_9': {'receptacles': [{'name': 'Table', 'feature': 'elegant',
739 | 'pos': {'x': 13.58, 'y': 0.01, 'z': -43.28}},
740 | {'name': 'Bed', 'feature': 'comfortable',
741 | 'pos': {'x': 14.683, 'y': 0.01, 'z': -43.714}}]},
742 | 'F3_Bedroom_2': {'receptacles': [{'name': 'Table', 'feature': 'rectangular',
743 | 'pos': {'x': 25.7, 'y': 0.01, 'z': -21.56}},
744 | {'name': 'Bed', 'feature': 'simple',
745 | 'pos': {'x': 24.597, 'y': 0.01, 'z': -21.126}}]},
746 | 'F3_Bedroom_0': {'receptacles': [{'name': 'Table', 'feature': 'wooden',
747 | 'pos': {'x': 25.7, 'y': 0.01, 'z': -13.878}},
748 | {'name': 'Bed', 'feature': 'wonderful',
749 | 'pos': {'x': 24.597, 'y': 0.01, 'z': -13.444}}]},
750 | 'F3_Bedroom_7': {'receptacles': [{'name': 'Table', 'feature': 'brown',
751 | 'pos': {'x': 13.58, 'y': 0.01, 'z': -35.59}},
752 | {'name': 'Bed', 'feature': 'dark colored',
753 | 'pos': {'x': 14.683, 'y': 0.01, 'z': -36.024}}]},
754 | 'F3_Bedroom_6': {'receptacles': [{'name': 'Table', 'feature': 'decorative',
755 | 'pos': {'x': 25.7, 'y': 0.01, 'z': -36.938}},
756 | {'name': 'Bed', 'feature': 'contemporary',
757 | 'pos': {'x': 24.597, 'y': 0.01, 'z': -36.504}}]},
758 | 'F3_ConferenceRoom': {'receptacles': [
759 | {'name': 'Meeting Table Large', 'feature': 'elongated',
760 | 'pos': {'x': 4.88, 'y': 0.0, 'z': -5.752}}]}, 'F3_Bedroom_1': {'receptacles': [
761 | {'name': 'Table', 'feature': 'rectangular', 'pos': {'x': 13.58, 'y': 0.01, 'z': -12.53}},
762 | {'name': 'Bed', 'feature': 'tidy', 'pos': {'x': 14.683, 'y': 0.01, 'z': -12.964}}]}, 'F3_Bedroom_11': {
763 | 'receptacles': [{'name': 'Table', 'feature': 'wooden', 'pos': {'x': 13.58, 'y': 0.01, 'z': -50.962}},
764 | {'name': 'Bed', 'feature': 'dark', 'pos': {'x': 14.683, 'y': 0.01, 'z': -51.396}}]},
765 | 'F3_Bedroom_8': {'receptacles': [{'name': 'Table', 'feature': 'personal',
766 | 'pos': {'x': 25.7, 'y': 0.01, 'z': -44.62}},
767 | {'name': 'Bed', 'feature': 'single',
768 | 'pos': {'x': 24.597, 'y': 0.01, 'z': -44.186}}]},
769 | 'F2_HallRoom01': {'receptacles': [{'name': 'Polygon Table', 'feature': '',
770 | 'pos': {'x': 12.727, 'y': -5.167, 'z': -5.639}}]},
771 | 'F2_MedicalRoom03': {'receptacles': [{'name': 'Desk 4', 'feature': '',
772 | 'pos': {'x': -13.347, 'y': -5.172,
773 | 'z': -6.917}}]}, 'F3_OfficeRoom02': {
774 | 'receptacles': [{'name': 'Desk 3', 'feature': 'office', 'pos': {'x': -6.415, 'y': 0.0, 'z': -6.654}}]},
775 | 'F3_Bedroom_10': {'receptacles': [{'name': 'Table', 'feature': 'wooden',
776 | 'pos': {'x': 25.7, 'y': 0.01, 'z': -52.31}},
777 | {'name': 'Bed', 'feature': 'square',
778 | 'pos': {'x': 24.597, 'y': 0.01, 'z': -51.876}}]}}
779 | self.receptacles_information = receptacles_information
780 | self.receptacle_mark(rece)
781 |
782 | def receptacle_mark(self, obj_rec):
783 | recp = []
784 | for rec in obj_rec:
785 | id = rec['itemId']
786 | obj = self.objects[id]
787 | name, id = obj['itemName'], obj['itemId']
788 | position = obj['position']
789 | lis = rec['receptacleBounds']
790 | x_max, x_min = lis[0]['x'], lis[0]['x']
791 | z_max, z_min = lis[0]['z'], lis[0]['z']
792 | for item in lis:
793 | x_max = max(x_max, item['x'])
794 | x_min = min(x_min, item['x'])
795 | z_max = max(z_max, item['z'])
796 | z_min = min(z_min, item['z'])
797 | recp.append({'name': name, 'id': id, 'x_max': x_max, 'y': lis[0]['y'],
798 | 'x_min': x_min, 'z_max': z_max, 'z_min': z_min, 'position': position})
799 | self.receptacles = recp
800 |
801 | def point_determine(self, pos):
802 | point_P = {}
803 | try:
804 | point_P['x'], point_P['y'], point_P['z'] = pos['x'], pos['y'], pos['z']
805 | except:
806 | point_P['x'], point_P['y'], point_P['z'] = pos[0], pos[1], pos[2]
807 | res = None
808 | for room_i in self.room_area:
809 | if (room_i['x'][0] <= point_P['x'] <= room_i['x'][1]) and (
810 | room_i['z'][0] <= point_P['z'] <= room_i['z'][1]):
811 | if abs(point_P['y']-room_i['y']) < 3:
812 | res = room_i['name']
813 | return res
814 |
815 | def object_parsing(self, ins, target=['Chair','Stool']):
816 | datas = eval(ins['statusDetail'])
817 | obj_closed = datas['closeRangeItemIds']
818 | object = None
819 | for i, obj in enumerate(obj_closed):
820 | name = self.objects[obj]['itemName']
821 | for ttt in target:
822 | if ttt.lower() in name.lower():
823 | print("The target: ", name, obj, self.objects[obj])
824 | return obj
825 | # print('There is no {}'.format(target))
826 | return object
827 | # return None
828 |
829 | def object_query(self, target=['Chair', 'Stool']):
830 | tar = []
831 | for i, obj in enumerate(self.objects):
832 | obj_i = obj['itemId']
833 | obj_full_name = obj['itemName']
834 | obj_now = ''.join([char for char in obj_full_name if not char.isdigit()])
835 | for name in target:
836 | if name.lower() == obj_now.lower():
837 | tar.append(obj_i)
838 | return tar
839 |
840 | def get_object_name(self, object_id=1):
841 | name = None
842 | for obj in self.objects:
843 | id = obj['itemId']
844 | if id == object_id:
845 | name = obj['itemName']
846 | return name
847 |
848 | def get_info_from_name(self, object_name):
849 | result = None
850 | for obj in self.objects:
851 | na = obj['itemName']
852 | if na == object_name:
853 | result = obj
854 | return result
855 |
856 | def check_feedback(self, server, id):
857 | time.sleep(0.1)
858 | info = None
859 | for i in range(30):
860 | try:
861 | info = server.notes[id]
862 | break
863 | except Exception as e:
864 | print(len(server.notes))
865 | time.sleep(0.1)
866 | return info
867 |
868 |
869 | def cleanup_function(stop_event):
870 | stop_event.set()
871 | # stop the loop
872 |
873 |
874 | def agent_plan(server, agent):
875 | agent.get_all_map()
876 | p, information = agent.pos_query()
877 | ob = agent.observation_camera_head()
878 | return 1
879 | flo, xx, yy, is_o = server.maps.get_point_info((20.9, 1, -44))
880 | print(flo, xx, yy, is_o)
881 | print(server.maps.maps_info[0]['x0'], server.maps.maps_info[0]['z0'])
882 | # server.maps.draw(19.2, 1, -44)
883 | des = agent.move_to(4)
884 | flo, xx, yy, is_o = server.maps.get_point_info(des)
885 | # agent.navigate(flo, (xx, yy))
886 | # Physical navigation
887 | # agent.move_forward(0.3)
888 | # agent.pos_query()
889 | # Robot position request
890 |
891 | # agent.goto_target_goal((18.0, 0.1, -2.99))
892 | agent.goto_target_goal((-13.9, 0.1, -7.5))
893 | flo, xx, yy, is_o = server.maps.get_point_info((-2.5, 0.1, -2.8))
894 | # adjustment
895 | flo, xx, yy, is_o = server.maps.get_point_info((-12.8, 0.1, 1.7))
896 | rotation_angle = agent.calculate_rotation_angle(xx, yy)
897 | # print(information)
898 | # print('-+++++++++++++++++++++++=', rotation_angle)
899 | # agent.joint_control(5, rotation_angle)
900 | # return
901 | # agent.rotate_right(rotation_angle)
902 | time.sleep(1)
903 | # agent.go_to_there((-2.0, 0.1, 0))
904 | # agent.go_to_there((12.5, 0.1, 0))
905 | # agent.goto_target_goal((27.0, 0.1, -2.99))
906 | agent.ik_control()
907 | time.sleep(2)
908 | agent.goto_target_goal((3.38, 0.1, 5.99))
909 | # Item location information
910 | # {"requestIndex":10,"actionId":6,"result":1,"positionOffset":0.0,"directionOffset":0.0}
911 | # def server_initialization(output_queue):
912 |
913 |
914 | class DevNull:
915 | def write(self, msg):
916 | pass
917 |
918 |
919 | class PrsEnv(object):
920 | def __init__(self, is_print=1, rendering=1, start_up_mode=0):
921 | # is_print: 0 without print, 1 print information to screen;
922 | # rendering=1 with unity render, 0 is headless mode; start_up_mode: 0 manual, 1 automatic
923 | print("PRS environment beta is starting without interaction")
924 | print('Please open the Unity program (start.sh)')
925 | print('PRS challenge task and benchmark come soon!')
926 | self.original_stdout = sys.stdout
927 | if not is_print:
928 | dev_null = DevNull()
929 | sys.stdout = dev_null
930 | self.stop_event = threading.Event()
931 | self.server = Server(self.stop_event)
932 | self.npc_running, self.time_running, self.agent_running = 0, 0, 0
933 | connection_thread = threading.Thread(target=self.server.wait_for_connection, args=())
934 | receive_thread = threading.Thread(target=self.server.receive_data, args=())
935 | parsing_thread = threading.Thread(target=self.server.message_process, args=())
936 | connection_thread.start()
937 | receive_thread.start()
938 | parsing_thread.start()
939 | # ---------------server begin-------------------
940 | self.env_time = EnvTime()
941 | # ---------------time system ready-------------------
942 | self.process = 0
943 | # executable_path = 'start.sh'
944 | executable_path = './unity/PRS.x86_64'
945 | if rendering:
946 | command_args = [executable_path]
947 | else:
948 | command_args = [executable_path, '-batchmode']
949 | try:
950 | if start_up_mode:
951 | # Start the Shell script using subprocess.Popen and capture stdout and stderr
952 | self.process = subprocess.Popen(command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
953 | print("Starting Unity process...")
954 | # If needed, you can add more processing logic here, such as waiting for the process to finish, etc.
955 | except Exception as e:
956 | # Catch any exceptions that occur during startup and print the error message
957 | print(f"An error occurred during beginning: {e}")
958 | # --- unity exe start ---
959 | while True:
960 | time.sleep(0.3)
961 | state = self.server.state
962 | if state == 2 : break
963 | self.objs_data = ObjectsData()
964 | # --------------agent begin---------------
965 | self.agent = Agent(self.server, self.env_time, self.objs_data)
966 | # agent_thread = threading.Thread(target=agent_plan, args=(self.server, self.agent))
967 | # 启动线程 机器人
968 | self.agent.get_all_map()
969 | # agent_thread.start()
970 | # ----------------------- npc coming----------------------
971 | npc_0 = Npc(0, self.server, self.env_time, self.objs_data)
972 | npc_1 = Npc(1, self.server, self.env_time, self.objs_data)
973 | npc_2 = Npc(2, self.server, self.env_time, self.objs_data)
974 | npc_3 = Npc(3, self.server, self.env_time, self.objs_data)
975 | npc_4 = Npc(4, self.server, self.env_time, self.objs_data)
976 | npc_5 = Npc(5, self.server, self.env_time, self.objs_data)
977 | npc_6 = Npc(6, self.server, self.env_time, self.objs_data)
978 | npc_7 = Npc(7, self.server, self.env_time, self.objs_data)
979 | npc_8 = Npc(8, self.server, self.env_time, self.objs_data)
980 | npc_9 = Npc(9, self.server, self.env_time, self.objs_data)
981 |
982 | print('start')
983 | self.task = {'type': None, 'npc': None, 'object': None, 'target': None, 'state': 0, 'result': None}
984 | self.npcs = [npc_0, npc_1, npc_2, npc_3, npc_4, npc_5, npc_6, npc_7, npc_8, npc_9]
985 | self.agent.npcs = self.npcs
986 | self.receptacle_mark()
987 | with open('data/npc_data.json', 'r') as file:
988 | npc_data = json.load(file)
989 | self.npc_data = npc_data
990 | time.sleep(0.1)
991 |
992 | # # --------------------------robot ----------------------
993 |
994 | def npc_start(self, number=1):
995 | if not self.time_running:
996 | time_thread = threading.Thread(target=self.env_time.time_simulation, args=(self.stop_event,))
997 | time_thread.start()
998 | self.time_running = 1
999 | if not self.npc_running:
1000 | for npc_i, npc in enumerate(self.npcs):
1001 | if npc_i == number:
1002 | break
1003 | # running_thread = threading.Thread(target=npc.continuous_simulation, args=())
1004 | running_thread = threading.Thread(target=npc.random_walk, args=())
1005 | running_thread.start()
1006 | time.sleep(2)
1007 | self.npc_running = 1
1008 |
1009 | def finish_env(self):
1010 | print('========== Env end ==========')
1011 | self.stop_event.set()
1012 | self.agent.running = 0
1013 | self.server.env_finish(self.process, self.npcs)
1014 | sys.stdout = self.original_stdout
1015 | exit(0)
1016 |
1017 | def sim_speed(self, speed):
1018 | instruction = {"requestIndex": 1, "timeScale": speed}
1019 | action_id = self.server.send_data(12, instruction, 0)
1020 | # print(self.env_time.time_multiplier, speed)
1021 | res = self.server.wait_for_respond(action_id, 10)
1022 | self.env_time.time_multiplier = speed
1023 | return self.env_time.time_multiplier
1024 |
1025 | def object_query(self, obj_id=0):
1026 | instruction = {"requestIndex": 0, "targetType": 1, "targetId": obj_id}
1027 | r_id = self.server.send_data(2, instruction, 1)
1028 | object_info = self.agent.wait_for_respond(r_id, 60)
1029 | if object_info:
1030 | object_info = eval(object_info['statusDetail'])
1031 | return object_info
1032 |
1033 | def receptacle_mark(self):
1034 | # maps_0 = copy.deepcopy(self.server.maps.maps_info[0]['grid'])
1035 | # maps_1 = copy.deepcopy(self.server.maps.maps_info[1]['grid'])
1036 | for floor_i in range(3):
1037 | maps_2 = copy.deepcopy(self.server.maps.maps_info[floor_i]['grid'])
1038 | record = dict()
1039 | for rece in self.objs_data.receptacles:
1040 | # {'name': name, 'id': id, 'x_max': x_max,'x_min': x_min, 'z_max': z_max, 'z_min': z_min}
1041 | x_max, x_min, z_max, z_min, y = rece['x_max'], rece['x_min'], rece['z_max'], rece['z_min'], rece['y']
1042 | floor, map_i1, map_j1, iso = self.server.maps.get_point_info((x_max, y, z_max))
1043 | floor, map_i2, map_j2, iso = self.server.maps.get_point_info((x_min, y, z_min))
1044 | map_i_min, map_i_max = min(map_i1, map_i2), max(map_i1, map_i2)
1045 | map_j_min, map_j_max = min(map_j1, map_j2), max(map_j1, map_j2)
1046 | for ii in range(map_i_min, map_i_max + 1):
1047 | for jj in range(map_j_min, map_j_max + 1):
1048 | if maps_2[ii][jj] == 0:
1049 | maps_2[ii][jj] = 2
1050 | loc = self.objs_data.point_determine((x_min, floor, z_max))
1051 | rece['location'], rece['floor'] = loc, floor
1052 | rece['map_i_min'], rece['map_i_max'] = map_i_min, map_i_max
1053 | rece['map_j_min'], rece['map_j_max'] = map_j_min, map_j_max
1054 | try:
1055 | record[loc]['num'] += 1
1056 | record[loc]['receptacles'].append(rece)
1057 | except:
1058 | record[loc] = {'num': 1}
1059 | record[loc]['receptacles'] = [rece]
1060 | self.objs_data.room_receptacles = record
1061 | self.objs_data.sematic_map[floor_i] = maps_2
1062 |
1063 |
1064 | if __name__ == '__main__': # pragma nocover
1065 | server = Server()
1066 |
1067 |
1068 |
1069 | '''
1070 | -> Unity: {"requestIndex":10,"npcId":0,"actionId":0,"actionPara":""}
1071 | -> Get : {"requestIndex":11, "result":1}
1072 |
1073 |
1074 | -> Unity: {"requestIndex":10,"npcId":0,"actionId":0,"actionPara":""}
1075 | -> Get : {"result":1}
1076 | '''
1077 |
--------------------------------------------------------------------------------
/npc_control.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import csv
4 | import cv2
5 | import time
6 | import numpy as np
7 | from heapq import heappop, heappush
8 | import copy
9 | import matplotlib.pyplot as plt
10 | from multiprocessing import Process, Queue, Value, Lock
11 | import multiprocessing as mp
12 | from PIL import Image
13 | import io
14 | # robot
15 | from roboticstoolbox.robot.ERobot import ERobot
16 | import roboticstoolbox as rtb
17 | from math import pi
18 | from spatialmath import SE3
19 | from spatialmath.base import *
20 | from collections import Counter
21 |
22 |
23 | def random_number(n):
24 | selected_number = np.random.randint(0, n) # Generate a random number within the interval [0, n-1]
25 | return selected_number
26 |
27 |
28 | class Env(object):
29 | def __init__(self):
30 | self.height_f1 = -16.693447
31 | self.height_f2 = -5.2174
32 | self.height_f3 = -0.0499999
33 | f1, f2, f3 = self.height_f1, self.height_f2, self.height_f3
34 | self.landmark_list = [
35 | 'warehouse',
36 | 'laboratory',
37 | 'clinic',
38 | 'meeting room',
39 | 'office',
40 | 'kitchen',
41 | 'restroom',
42 | 'bedroom',
43 | 'lobby',
44 | 'mark',
45 | 'hallway',
46 | 'hall',
47 | 'wc',
48 | 'elevator'
49 | ]
50 | self.location = {
51 | "F1_EnergyRoom": [(21.65, -16.4, -24.8), (11.5, -16.4, -36.5)],
52 | "F1_ConveyorRoom": [(-0.4, -16.4, -2.8), (9.1, -16.4, -2.9)],
53 | "F1_StorageRoom01": [(10.4, -16.4, -54.1), (10.1, -16.4, -46.3)],
54 | "F1_StorageRoom02": [(1.5, -16.4, -14.1), (9.5, -16.4, -26.5)],
55 | "F2_LabRoom01": [(14.5, -5.1, 4.1), (14.4, -5.1, 7.3)],
56 | "F2_LabRoom02": [(4.8, -5.1, 3.9), (5.1, -5.1, 7.1)],
57 | "F2_LabRoom03": [(-4.9, -5.1, 3.5), (-3.4, -5.1, 6.2)],
58 | "F2_Restroom": [(-15.2, -5.1, 2.2), (-15.9, -5.1, 3.5)],
59 | "F2_WarmRoom": [(15.7, -5.1, -19.5), (15.4, -5.1, -17.1)],
60 | "F2_StorageRoom": [(-11.9, -5.1, 1.7), (-13.4, -5.1, 1.9)],
61 | "F2_ServerRoom": [(15.6, -5.1, -12.1), (14.5, -5.1, -13.1)],
62 | "F2_MedicalRoom01": [(4.8, -5.1, -3.2), (3.6, -5.1, -5.9)],
63 | "F2_MedicalRoom02": [(-4.9, -5.1, -3.6), (-8.5, -5.1, -6.8)],
64 | "F2_MedicalRoom03": [(-12.4, -5.1, -3.7), (-11.5, -5.1, -6.7)],
65 | # "F3_Bedroom": [(, 0.1,), (, 0.1,)],
66 | "F3_GymRoom": [(14.7, 0.1, 3.3), (17.7, 0.1, 4.4)],
67 | "F3_OfficeRoom01": [(-2.1, 0.1, -3.4), (-2.3, 0.1, -7.6)],
68 | "F3_OfficeRoom02": [(-7.2, 0.1, -3.3), (-7.8, 0.1, -7.1)],
69 | "F3_RestRoom": [(-15.5, 0.1, 2.8), (-15.9, 0.1, 3.8)],
70 | "F3_OfficeSpaceRoom": [(4.9, 0.1, 3.2), (3.3, 0.1, 6.1)],
71 | "F3_KitchenRoom": [(-14.7, 0.1, -3.2), (-15.9, 0.1, -6.9)],
72 | "F3_StorageRoom": [(-11.8, 0.1, 2.7), (-13.4, 0.1, 2.1)],
73 | "F3_LivingRoom": [(-4.9, 0.1, 3.3), (-3.1, 0.1, 8.1)],
74 | "F3_ConferenceRoom": [(4.9, 0.1, -3.2), (1.5, 0.1, -4.6)]
75 |
76 | }
77 |
78 | def calculate_distance(self, point1, point2):
79 | # NumPy array
80 | try:
81 | point1_array = np.array([point1['x'], point1['y'], point1['z']], dtype=float)
82 | except:
83 | point1_array = np.array([point1[0], point1[1], point1[2]], dtype=float)
84 | try:
85 | point2_array = np.array([point2['x'], point2['y'], point2['z']], dtype=float)
86 | except:
87 | point2_array = np.array([point2[0], point2[1], point2[2]], dtype=float)
88 | try:
89 | distance = np.linalg.norm(point2_array - point1_array)
90 | except:
91 | print(point1_array, point2_array)
92 | distance = np.linalg.norm(point2_array - point1_array)
93 | return distance
94 |
95 |
96 | class Npc(object):
97 | def __init__(self, person_id, sock, env_time, objects):
98 | self.object_data = objects
99 | self.person_id = person_id
100 | self.env = Env()
101 | self.times = env_time
102 | self.server = sock
103 | self.running = 1
104 | self.action_state = 'stand'
105 | self.place_state = 'warehouse'
106 | self.information = self.object_data.characters[self.person_id]
107 | # self.
108 | # -----initialization------
109 | self.height_f1 = -16.693447
110 | self.height_f2 = -5.2174
111 | self.height_f3 = -0.0499999
112 | self.position = [0, 0, 0]
113 | # tar_action, number = 'sit', 0
114 | # 0 universal (parameters for turning, sitting, eating, and missing) 1performance 2expressions
115 | # 3go 4pick 5put 6turn 7manipulation component 8
116 | self.instruction_type = [{"requestIndex": 10, "npcId": 0, "actionId": 0, "actionPara": {}},
117 | {"requestIndex": 10, "npcId": 0, "actionId": 400, "actionPara": {"showType": -1}},
118 | {"requestIndex": 10, "npcId": 0, "actionId": 300, "actionPara": {"expressionType":100}},
119 | {"requestIndex": 10, "npcId": 0, "actionId": 1,"actionPara": {"destination":{"x":0.5,"y":1.0,"z":0}}},
120 | {"requestIndex": 10, "npcId": 0, "actionId": 100,"actionPara": {"handType":-1,"itemId":1}},
121 | {"requestIndex": 10, "npcId": 0, "actionId": 101,"actionPara": {"handType":-1,"position":{"x":5.0,"y":12.0,"z":5.0}}},
122 | {"requestIndex": 10, "npcId": 0, "actionId": 2, "actionPara": {"angle":50}},
123 | {"requestIndex": 10, "npcId": 0, "actionId": 204,"actionPara": {"handType":1,"itemId":8,"targetActiveState":True}}
124 | ]
125 | self.mapping_action_type = {0: 0, 1: 3, 2: 0, 10: 0, 100: 4, 101: 5, 102: 0, 300: 2,
126 | 400: 1}
127 | self.actions = {
128 | 'stand': [0],
129 | 'walk': [1],
130 | 'turn': [2],
131 | 'sit': [10],
132 | 'pick': [100],
133 | 'put': [101],
134 | 'eat': [102],
135 | 'operateButton': [200],
136 | 'operateKnob': [201],
137 | 'operateSlider': [202],
138 | 'operateLever': [203],
139 | 'triggerButton': [204],
140 | 'emoji': [300, 301, 302],
141 | # -------show play()----------
142 | 'dance': [400, 100],
143 | 'exercise': [400, 101],
144 | 'playComputer': [400, 102],
145 | 'playGame': [400, 103],
146 | 'layingDownOnDoor': [400, 104],
147 | 'standLookHandObject': [400, 105],
148 | 'sitLookHandObject': [400, 106],
149 | 'waveHandOverHead': [400, 200],
150 | 'waveHandAtWaist': [400, 201],
151 | 'streachHead': [400, 202],
152 | 'interrupt': [400, -1]
153 | # Dance = 100, Exercise = 101, PlayComputer = 102, PlayGame = 103, LayingDownOnDoor = 104, StandLookHandObject = 105, SitLookHandObject = 106,
154 | # //Single animation
155 | # WaveHandOverHead = 200, WaveHandAtWaist = 201, StreachHead = 202
156 | }
157 | self.obj_interaction_action = ['pick', 'put', 'eat', 'operateButton', 'operateKnob', 'operateSlider',
158 | 'operateLever', 'triggerButton']
159 | self.continuous_animation = ['dance', 'exercise', 'playComputer', 'playGame', 'layingDownOnDoor',
160 | 'standLookHandObject', 'sitLookHandObject', 'waveHandOverHead', 'waveHandAtWaist', 'streachHead']
161 |
162 | def change_id(self, n):
163 | self.person_id = n
164 |
165 | def go_to_here(self, pos, command=0):
166 | person_id = self.person_id
167 | if not command:
168 | command = {"requestIndex": 10, "npcId": 0, "actionId": 1, "actionPara": {"destination": {"x": 0.5, "y": 1.0, "z": 0}}}
169 | command['npcId'] = person_id
170 | # print(command)
171 | # print(type(command['actionPara']))
172 | command['actionPara']['destination']['x'] = pos[0]
173 | command['actionPara']['destination']['y'] = pos[1]
174 | command['actionPara']['destination']['z'] = pos[2]
175 | command['actionPara'] = json.dumps(command['actionPara'])
176 | re_id = self.server.send_data(1, command, 1)
177 | return re_id
178 |
179 | def where_npc(self):
180 | for npc in range(3):
181 | request_id = self.server.send_data(2, {"requestIndex": 0, "targetType": 0, "targetId": self.person_id}, 1)
182 | time.sleep(0.1)
183 | info = None
184 | for i in range(15):
185 | try:
186 | info = self.server.notes[request_id]
187 | break
188 | except :
189 | time.sleep(0.1)
190 | if info:
191 | try:
192 | info['statusDetail'] = info['statusDetail'].replace("false", "False")
193 | inf = eval(info['statusDetail'])
194 | pos = inf['position']
195 | return pos, info
196 | except Exception as e:
197 | print(e, info)
198 | # return False, info
199 | # else:
200 | return False, info
201 |
202 | def query_information(self):
203 | pos, info = self.where_npc()
204 | datas = None
205 | if pos:
206 | datas = eval(info['statusDetail'])
207 | obj_closed = datas['closeRangeItemIds']
208 | return pos, datas
209 |
210 | def goto_randomly(self, position_tar, radius=1.5, delete_dis=3, times=10):
211 | try:
212 | xx, yy, zz = position_tar[0], position_tar[1], position_tar[2]
213 | except:
214 | xx, yy, zz = position_tar['x'], position_tar['y'], position_tar['z']
215 | floor, point_list = self.server.maps.get_an_accessible_area(xx, yy, zz, radius)
216 | result_go = 0
217 | for i_try in range(times):
218 | if not self.running or self.server.stop_event.is_set() or not self.server.state:
219 | return 0
220 | length = len(point_list)
221 | if length < 1:
222 | break
223 | if result_go == 1:
224 | break
225 | # choose a random point (p_i, p_j)
226 | now = np.random.randint(0, length)
227 | p_i, p_j = point_list[now][0], point_list[now][1]
228 | # translate the grid pos to the world pos
229 | pos_i, pos_j = self.server.maps.get_an_aligned_world_coordinate_randomly(floor, p_i, p_j)
230 | position_go = (pos_i, self.server.maps.floors[floor], pos_j)
231 | # print(' now plan to {}'.format(position_go))
232 | for i in range(2):
233 | if not self.running or self.server.stop_event.is_set() or not self.server.state:
234 | return 0
235 | action_id = self.go_to_here(position_go)
236 | result = 0
237 | while True:
238 | time.sleep(0.5)
239 | if not self.running or self.server.stop_event.is_set() or not self.server.state:
240 | return 0
241 | try:
242 | if self.server.notes[action_id]['informResult'] == 2:
243 | result = self.server.notes[action_id]['informResult']
244 | break
245 | elif self.server.notes[action_id]['informResult'] == 0:
246 | time.sleep(0.5)
247 | break
248 | except: pass
249 | if result == 2:
250 | pos, info = self.where_npc()
251 | if pos:
252 | dis = self.env.calculate_distance(pos, position_go)
253 | if dis < 1:
254 | result_go = 1
255 | break
256 | time.sleep(1.5)
257 | if result_go == 0:
258 | # Reverse loop deletion of points with a distance of 2 (not meter)
259 | for i in range(len(point_list) - 1, -1, -1):
260 | point = point_list[i]
261 | if np.sqrt((point[0] - p_i) ** 2 + (point[1] - p_j) ** 2) < delete_dis:
262 | del point_list[i]
263 | elif result_go == 1:
264 | # print('$$$$$ arrive at: ', position_go)
265 | return result_go
266 | if result_go == 0: pass
267 | # print('$$$$$not arrive: ', position_tar)
268 | return result_go
269 |
270 | def go_to_place(self, tar, specific=1, rad=2, del_d=2, times=20):
271 | destination = self.env.location[tar][specific]
272 | print('^^^^^^^^^^^^ here will be ', destination)
273 | result = 0
274 | result = self.goto_randomly(destination, rad, del_d, times)
275 | return result
276 |
277 | def random_walk(self):
278 | for i in range(1000):
279 | if not self.server.state or self.server.stop_event.is_set() or not self.running:
280 | return 0
281 | random_key = np.random.choice(list(self.env.location.keys()))
282 | location_now = self.env.location[random_key]
283 | result = self.goto_randomly(location_now[1], 2, 2, 20)
284 | if result:
285 | res, obj = self.go_to_object('Seat')
286 | if res:
287 | suc = self.npc_action('sit', obj)
288 | if suc:
289 | time.sleep(10)
290 | self.npc_action('stand')
291 | time.sleep(3)
292 | else:
293 | time.sleep(5)
294 |
295 | def walk_around(self):
296 | time.sleep(1)
297 | for i in range(1000):
298 | if not self.server.state or self.server.stop_event.is_set() or not self.running:
299 | return 0
300 | if i < 3 and False:
301 | continue
302 | n = random_number(13)
303 | # n = i
304 | time.sleep(3)
305 | # print('############### now go to {} -'.format(self.env.landmark_list[n]), i)
306 | destination = self.env.landmark[self.env.landmark_list[n]][1]
307 | print(n, destination)
308 | go_res = self.go_to_place(self.env.landmark_list[n], 1)
309 | # if go_res:
310 | # print('good good good for {} -'.format(self.env.landmark_list[n]), i)
311 | # action_id = self.go_to_here(destination)
312 | # while True:
313 | # time.sleep(1)
314 | # try:
315 | # if self.server.notes[action_id]['informResult'] == 2:
316 | # break
317 | # except Exception as e:
318 | # print('~~~~~~~~~~~~~~~~', e,'---', len(self.server.notes), action_id)
319 | # pos, info = self.where_npc()
320 | # print('$$$$$arrive: ', pos)
321 | time.sleep(0.5)
322 |
323 | def go_to_object(self, target='Seat', name='None_target', random_mode=1):
324 | pos, npc_info = self.query_information()
325 | if not pos:
326 | return 0, 0
327 | items = npc_info['closeRangeItemIds']
328 | all_obj = []
329 | if len(items) != 0:
330 | for item_id in items:
331 | item_info = self.object_data.objects[item_id]
332 | if not item_info['isOccupied']:
333 | if target in item_info['features'] or name.lower() in item_info['itemName'].lower() :
334 | item_info = self.server.object_query(item_id)
335 | all_obj.append(item_info)
336 | else:
337 | return 0, 0
338 | if len(all_obj) == 0:
339 | return 0, 0
340 | if random_mode == 1:
341 | target_obj = np.random.choice(all_obj)
342 | else:
343 | target_obj = all_obj[0]
344 | if target_obj == None:
345 | return 0, 0
346 | pos = target_obj['position']
347 | res = self.goto_randomly(pos, 1, 2, 10)
348 | return res, target_obj['itemId']
349 |
350 | def get_now_time(self):
351 | week = self.times.weekday_now()
352 | hour = self.times.current_date.hour
353 | minute = self.times.current_date.minute
354 | # second = self.times.current_date.second
355 | return week, hour, minute
356 |
357 | def continuous_simulation(self, length=10):
358 | npc_day = {0: [
359 | # 'id': 0, "schedule":
360 | ['exercise', 'hallway'], ['stand', 'bedroom'], ['stand', 'bedroom'], ['stand', 'bedroom'],
361 | ['stand', 'bedroom'], ['stand', 'bedroom'], ['stand', 'bedroom'], ['stand', 'bedroom'],
362 | # -> 0-8h: room -> 9-11h: office -> 12-13h: kitchen
363 | ['sit', 'office'], ['playComputer', 'office'], ['playComputer', 'office'], ['playComputer', 'office'],
364 | ['sitLookHandObject', 'office'],
365 | # -> 14-15h: dating 16-17h: meetingroom 18-19h: kitchen
366 | ['layingDownOnDoor', 'office'], ['playComputer', 'office'], ['playComputer', 'office'],
367 | ['playComputer', 'office'], ['layingDownOnDoor', 'office'], ['sitLookHandObject', 'office'],
368 | # 20-21h: lobby 22-23h: bedroom
369 | ['playComputer', 'office'], ['playComputer', 'office'], ['layingDownOnDoor', 'office'], ['playComputer', 'office'], ['playComputer', 'office']
370 | ], -1: [
371 | # 'id': 0, "schedule":
372 | ['stand', 'bedroom'], ['stand', 'bedroom'], ['stand', 'bedroom'], ['stand', 'bedroom'],
373 | ['stand', 'bedroom'], ['stand', 'hallway'], ['stand', 'bedroom'], ['stand', 'bedroom'],
374 | # -> 0-8h: room -> 9-11h: office -> 12-13h: kitchen
375 | ['sit', 'office'], ['playComputer', 'office'], ['playComputer', 'office'], ['sit', 'kitchen'],
376 | ['sit', 'kitchen'],
377 | # -> 14-15h: dating 16-17h: meetingroom 18-19h: kitchen
378 | ['sit', 'mark'], ['playGame', 'mark'], ['waveHandAtWaist', 'meeting room'],
379 | ['waveHandOverHead', 'meeting room'], ['sit', 'kitchen'], ['sitLookHandObject', 'kitchen'],
380 | # 20-21h: lobby 22-23h: bedroom
381 | ['exercise', 'lobby'], ['dance', 'lobby'], ['stand', 'bedroom'], ['stand', 'bedroom'], ['stand', 'bedroom']
382 | ], 1:
383 | [
384 | # 'id': 0, "schedule":
385 |
386 | # -> 0-8h: room -> 9-11h: office -> 12-13h: kitchen
387 | ['sit', 'office'], ['playComputer', 'office'], ['playComputer', 'office'], ['sit', 'kitchen'], ['exercise', 'hall'],
388 | ['sit', 'kitchen'],
389 | # -> 14-15h: dating 16-17h: meetingroom 18-19h: kitchen
390 | ['sit', 'mark'], ['exercise', 'mark'], ['waveHandAtWaist', 'meeting room'],
391 | ['waveHandOverHead', 'meeting room'], ['sit', 'kitchen'], ['sitLookHandObject', 'kitchen'],
392 | # 20-21h: lobby 22-23h: bedroom
393 | ['exercise', 'lobby'], ['dance', 'lobby'], ['stand', 'bedroom'], ['stand', 'laboratory'], ['stand', 'laboratory'],
394 | ['stand', 'laboratory'], ['stand', 'mark'], ['stand', 'bedroom'],
395 | ['stand', 'mark'], ['exercise', 'mark'], ['stand', 'restroom'], ['dance', 'restroom']
396 | ], 2:
397 | [
398 | # 'id': 0, "schedule":
399 | ['sit', 'mark'], ['playGame', 'mark'], ['waveHandAtWaist', 'meeting room'],
400 | ['waveHandOverHead', 'hallway'], ['waveHandAtWaist', 'hall'], ['stand', 'lobby'], ['stand', 'restroom'],
401 | ['streachHead', 'restroom'], ['stand', 'bedroom'], ['dance', 'bedroom'], ['streachHead', 'hall'],
402 | # -> 0-8h: room -> 9-11h: office -> 12-13h: kitchen
403 | ['stand', 'clinic'], ['stand', 'clinic'], ['playComputer', 'clinic'], ['playComputer', 'clinic'],
404 | ['stand', 'kitchen'], ['waveHandOverHead', 'hallway'], ['exercise', 'hall'], ['stand', 'lobby'],
405 | # -> 14-15h: dating 16-17h: meetingroom 18-19h: kitchen
406 | ['waveHandOverHead', 'meeting room'], ['sit', 'kitchen'], ['sitLookHandObject', 'kitchen'],
407 | # 20-21h: lobby 22-23h: bedroom
408 | ['waveHandOverHead', 'hallway'], ['dance', 'meeting room']
409 | ]
410 | }
411 | for day_i in range(length):
412 | week, hour, min = self.get_now_time()
413 | print('############ now is {} '.format(week), hour, min)
414 | self.one_day(npc_day[self.person_id])
415 | if not self.server.state or self.server.stop_event.is_set() or not self.running:
416 | return 0
417 | while True:
418 | week_n, hour_n, min_n = self.get_now_time()
419 | if week != week_n:
420 | print('---------hey folks, new day is coming --------')
421 | break
422 |
423 | def one_day(self, a_day):
424 | # Get hours, minutes, and seconds
425 | week, hour, min = self.get_now_time()
426 | print('############now ', hour, min)
427 | while hour < 23:
428 | week, hour_now, min = self.get_now_time()
429 | if not self.server.state or self.server.stop_event.is_set() or not self.running:
430 | return 0
431 | if hour_now != hour or min > 55:
432 | hour = hour_now
433 | tar_action, tar_place = a_day[hour][0], a_day[hour][1]
434 | print('@@@@@@@ now is : {} {} {}'.format(hour, tar_action, tar_place))
435 | print(self.action_state, '++++++++++', self.place_state)
436 | if tar_action != self.action_state or tar_place != self.place_state:
437 | if self.action_state == 'sit':
438 | action_id = self.mapping_action_type[self.actions['stand'][0]]
439 | ins = copy.deepcopy(self.instruction_type[action_id])
440 | ins['actionId'] = 0
441 | ins['npcId'] = self.person_id
442 | self.action_execution(ins)
443 | elif self.actions[self.action_state][0] == 400:
444 | print('should iiiiiiiiiiiiiinterrupt')
445 | action_id = self.mapping_action_type[self.actions['interrupt'][0]]
446 | ins = copy.deepcopy(self.instruction_type[action_id])
447 | ins['actionPara']["showType"] = -1
448 | ins['npcId'] = self.person_id
449 | print(action_id, 'interrupt', ins)
450 | self.action_execution(ins)
451 |
452 | if tar_place != self.place_state:
453 | res = self.go_to_place(tar_place)
454 | if res:
455 | self.place_state = tar_place
456 | if tar_action != self.action_state:
457 |
458 | action_para = self.actions[tar_action]
459 | instruct = self.mapping_action_type[action_para[0]]
460 | print(action_para,'oooooooooo',instruct)
461 | ins_template = copy.deepcopy(self.instruction_type[instruct])
462 |
463 | # 先打断,先坐下------------
464 | ins_template['npcId'] = self.person_id
465 | if action_para[0]<300:
466 | ins_template['actionId'] = action_para[0]
467 | if tar_action == 'stand':
468 | pass
469 | elif tar_action == 'sit':
470 | pos, info = self.where_npc()
471 | if pos:
472 | tar = self.object_data.object_parsing(info)
473 | if tar:
474 | print(self.check_object_status(tar))
475 | # tar = self.object_data.object_parsing(self.near_items, ['Stool'])
476 | ins_template['actionPara']["itemId"] = tar
477 | else:
478 | ins_template['actionPara']["itemId"] = 1
479 | # sit fail
480 | # continue
481 | elif tar_action == 'pick':
482 | ins_template['actionPara']["itemId"] = 1
483 | ins_template['actionPara']["handType"] = -1
484 | elif tar_action == 'put':
485 | ins_template['actionPara']["position"] = {"x": 1, "y": 1, 'z': 1}
486 | ins_template['actionPara']["handType"] = -1
487 | elif tar_action == 'eat':
488 | ins_template['actionPara']["handType"] = -1
489 | elif action_para[0] == 400:
490 | # print('show show show')
491 | ins_template['actionPara']["showType"] = action_para[1]
492 | res = self.action_execution(ins_template)
493 | if res > 0:
494 | self.action_state = tar_action
495 | # print('*********successfully: ', tar_action, ' - ', tar_place)
496 | time.sleep(2)
497 | time.sleep(0.5)
498 | return 1
499 |
500 | def npc_action(self, tar_action, tar_object=0):
501 | action_para = self.actions[tar_action]
502 | instruct = self.mapping_action_type[action_para[0]]
503 | # print(action_para, 'oooooooooo', instruct)
504 | ins_template = copy.deepcopy(self.instruction_type[instruct])
505 | # ------------
506 | ins_template['npcId'] = self.person_id
507 | if action_para[0] < 300:
508 | ins_template['actionId'] = action_para[0]
509 | if tar_action == 'stand':
510 | pass
511 | elif tar_action == 'sit':
512 | if tar_object:
513 | ins_template['actionPara']["itemId"] = tar_object
514 | # pos, info = self.where_npc()
515 | # if pos:
516 | # tar = self.object_data.object_parsing(info)
517 | # if tar:
518 | # # tar = self.object_data.object_parsing(self.near_items, ['Stool'])
519 | # ins_template['actionPara']["itemId"] = tar
520 | # else:
521 | # ins_template['actionPara']["itemId"] = 0
522 |
523 | elif tar_action == 'pick':
524 | if tar_object:
525 | ins_template['actionPara']["itemId"] = tar_object
526 | ins_template['actionPara']["handType"] = -1
527 | elif tar_action == 'put':
528 | ins_template['actionPara']["position"] = {"x": 1, "y": 1, 'z': 1}
529 | ins_template['actionPara']["handType"] = -1
530 | elif tar_action == 'eat':
531 | ins_template['actionPara']["handType"] = -1
532 | elif action_para[0] == 400:
533 | # print('show show show')
534 | ins_template['actionPara']["showType"] = action_para[1]
535 | res = self.action_execution(ins_template)
536 | if res > 0:
537 | if res == 1:
538 | time.sleep(0.5)
539 | self.action_state = tar_action
540 | # print('*********successfully: ', tar_action, ' - ')
541 | return res
542 |
543 | def action_execution(self, ins):
544 | ins['actionPara'] = json.dumps(ins['actionPara'])
545 | action_id = self.server.send_data(1, ins, 1)
546 | res = 0
547 | for ii in range(20):
548 | time.sleep(0.3)
549 | try:
550 | res = self.server.notes[action_id]['informResult']
551 | if res > 1:
552 | break
553 | elif res == 0 or ii > 2:
554 | break
555 | except Exception as e:
556 | pass
557 | # print('~~~~~~~~~~~~~~~~', e, '---', len(self.server.notes), action_id)
558 | # pos = self.where_npc()
559 | # print('$$$$$arrive: ', pos)
560 | return res
561 |
562 | def check_object_status(self, target=1):
563 | for npc in range(3):
564 | request_id = self.server.send_data(2, {"requestIndex": 0, "targetType": 1, "targetId": target}, 1)
565 | time.sleep(0.1)
566 | info = None
567 | for i in range(9):
568 | try:
569 | info = self.server.notes[request_id]
570 | break
571 | except :
572 | time.sleep(0.1)
573 | if info:
574 | try:
575 | info['statusDetail'] = info['statusDetail'].replace("false", "False")
576 | fea = eval(info['statusDetail'])['features']
577 | # 'position', 'diretion', 'itemName'
578 | return info, fea
579 | except Exception as e:
580 | print(e, info)
581 | return info, None
582 |
583 | def observation_surrounding(self):
584 | ins = {"requestIndex": 0, "targetType": 10, "targetId": self.person_id}
585 | action_id = self.server.send_data(2, ins, 1)
586 | for ii in range(30):
587 | time.sleep(0.3)
588 | try:
589 | res = self.server.notes[action_id]
590 | break
591 | except Exception as e: pass
592 | if not res:
593 | return res
594 | img = json.loads(res["statusDetail"])
595 | im = img["multiVisionBytes"][0]['bytes']
596 | byte_array = bytes(im)
597 | # Load and display PNG files
598 | nparr = np.frombuffer(byte_array, np.uint8)
599 | image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
600 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
601 | # image = Image.open(io.BytesIO(byte_array))
602 | # print(image.size)
603 | # Display Image
604 | # image.show()
605 | # image.save('img1.png')
606 | return image
607 |
608 |
609 | class Agent(object):
610 | def __init__(self, sock, env_time, objects):
611 | self.object_data = objects
612 | self.times = env_time
613 | self.running = 1
614 | self.npcs = []
615 |
616 | self.is_grasp = None
617 | self.robot = PRS_IK()
618 | # robot ik algorithm
619 | self.env = Env()
620 | self.server = sock
621 | self.agent_state = 1
622 | self.current_info = None
623 | self.check_for_MLLM = None
624 | self.temporary_data = None
625 | self.height_f1 = -16.693447
626 | self.height_f2 = -5.2174
627 | self.height_f3 = -0.0499999
628 | self.position_agent = None # [0,0,0]
629 | # x y z
630 | self.direction_degree = None
631 | self.direction_vector = {'x': None,'y': None}
632 | self.map_position_agent = {'x': None, 'y': None, 'floor': None}
633 |
634 | def request_interaction(self, type=0):
635 | # type=0 head camera, 1 shows hand camera, suggest not to use this function while moving
636 | ob_rgb = self.observation_camera(type)
637 | seg, tags = self.get_segmentation(type, 1)
638 | if isinstance(ob_rgb, np.ndarray) and isinstance(seg, np.ndarray) and tags:
639 | self.check_for_MLLM = {'seg_matrix': seg, 'tags': tags}
640 | return ob_rgb
641 | return 0
642 |
643 | def interaction(self, input_matrix, manipulaton=1):
644 | # input_matrix, 0: None, 1: target
645 | # manipulation=0 view for recognize, manipulation=1 grasp
646 | target_list = []
647 | if self.check_for_MLLM:
648 | for i in range(input_matrix.shape[0]):
649 | for j in range(input_matrix.shape[1]):
650 | if input_matrix[i][j]:
651 | target_list.append(self.check_for_MLLM['seg_matrix'][i][j])
652 | counter = Counter(target_list)
653 | try:
654 | most_common_element, occurrences = counter.most_common(1)[0]
655 | except:
656 | print('no object or target in the input matrix')
657 | return 0
658 | target_id = None
659 | if most_common_element and occurrences/len(target_list) > 0.5:
660 | target_obj = self.check_for_MLLM['tags'][int(most_common_element)]
661 | try:
662 | split_list = target_obj.split('_')
663 | target_id = int(split_list[1])
664 | obj_n = split_list[0]
665 | except: pass
666 | if target_id is None: return 0
667 | print(target_id, most_common_element, occurrences)
668 | if manipulaton == 1:
669 | tar_obj_info = self.object_information_query(obj_id=target_id)
670 | if not tar_obj_info: return 0
671 | self.goto_target_goal(tar_obj_info['position'], 1, 1, position_mode=0)
672 | re = self.direction_adjust(position=tar_obj_info['position'])
673 | a = self.ik_calculation(tar_obj_info['position'])
674 | if a:
675 | self.arm_control(a)
676 | self.grasp_object(target_id)
677 | return 0
678 |
679 | def object_interaction(self, input_matrix, manipulaton=1, type=0):
680 | # manipulation=0 view for recognize, manipulation=1 grasp, 2 approach,
681 | # camera type=0 head camera, 1 shows hand camera, suggest not to use this function while moving
682 | ob_rgb = self.observation_camera(type)
683 | seg, tags = self.get_segmentation(type, 1)
684 | if isinstance(ob_rgb, np.ndarray) and isinstance(seg, np.ndarray) and tags:
685 | check_for_LMM = {'seg_matrix': seg, 'tags': tags}
686 | else: return 0
687 | target_list = []
688 | if np.sum(input_matrix) == 0:
689 | return 0
690 | if check_for_LMM:
691 | for i in range(input_matrix.shape[0]):
692 | for j in range(input_matrix.shape[1]):
693 | if input_matrix[i][j] > 0:
694 | target_list.append(check_for_LMM['seg_matrix'][i][j])
695 | counter = Counter(target_list)
696 | element_list = []
697 | try:
698 | most_common_element, occurrences = counter.most_common(1)[0]
699 | num = min(len(counter), 3)
700 | element_list = counter.most_common(num)
701 | except:
702 | print('no object or target in the input matrix')
703 | return 0
704 | for element in element_list:
705 | most_common_element, occurrences = element
706 | target_id, is_npc = None, False
707 | if most_common_element and occurrences / len(target_list) > 0.33:
708 | target_obj = check_for_LMM['tags'][int(most_common_element)]
709 | try:
710 | split_list = target_obj.split('_')
711 | if split_list[0].lower() == 'npc':
712 | target_id = int(split_list[2])
713 | is_npc = True
714 | else:
715 | target_id = int(split_list[-1])
716 | obj_n = split_list[0]
717 | except:
718 | pass
719 | if target_id is None: return 0
720 | if is_npc:
721 | pos, tar_obj_info = self.npcs[target_id].query_information()
722 | else:
723 | tar_obj_info = self.object_information_query(obj_id=target_id)
724 | if not tar_obj_info: return 0
725 | if manipulaton == 1:
726 | if "Grabable" not in tar_obj_info['features']: continue
727 | pos, info = self.pos_query()
728 | if self.env.calculate_distance(tar_obj_info['position'], pos) > 3.0:
729 | return 0
730 | elif self.env.calculate_distance(tar_obj_info['position'], pos) > 1.0:
731 | self.goto_target_goal(tar_obj_info['position'], 1, 1, position_mode=0)
732 | re = self.direction_adjust(position=tar_obj_info['position'])
733 | a = self.ik_calculation(tar_obj_info['position'])
734 | if a:
735 | self.arm_control(a)
736 | time.sleep(1)
737 | res = self.grasp_object(target_id)
738 | self.joint_control(joint_id=5, target=0)
739 | if res:
740 | return res
741 | elif manipulaton == 2:
742 | res = self.goto_target_goal(tar_obj_info['position'], 1.5, 1, position_mode=0)
743 | return res
744 | return 0
745 |
746 | def get_room_area(self, target_room='F3_KitchenRoom'):
747 | room_info = None
748 | for room_i in self.object_data.room_area:
749 | if room_i['name'] == target_room:
750 | room_info = room_i
751 | break
752 | if room_info is None: return None
753 | room_accessible_area = []
754 | x_max, x_min, z_max, z_min, y = room_info['x'][1], room_info['x'][0], room_info['z'][1], room_info['z'][0], room_info['y']
755 | floor, map_i1, map_j1, iso = self.server.maps.get_point_info((x_max, y, z_max))
756 | floor, map_i2, map_j2, iso = self.server.maps.get_point_info((x_min, y, z_min))
757 | map_i_min, map_i_max = min(map_i1, map_i2), max(map_i1, map_i2)
758 | map_j_min, map_j_max = min(map_j1, map_j2), max(map_j1, map_j2)
759 | map = copy.deepcopy(self.object_data.sematic_map[floor])
760 | map = np.array(map)
761 | for ii in range(map_i_min, map_i_max + 1):
762 | for jj in range(map_j_min, map_j_max + 1):
763 | if map[ii][jj] == 1:
764 | close_to_obstacle = False
765 | for iii in range(max(0, ii - 1), min(map.shape[0], ii + 2)):
766 | for jjj in range(max(0, jj - 1), min(map.shape[1], jj + 2)):
767 | if map[iii, jjj] == 0:
768 | close_to_obstacle = True
769 | if not close_to_obstacle:
770 | room_accessible_area.append((ii, jj))
771 | map[ii][jj] = 3
772 | return room_accessible_area
773 |
774 | def get_receptacles_within_room(self, room_name='F3_KitchenRoom'):
775 | room_receptacle = self.object_data.room_receptacles[room_name]['receptacles']
776 | room_receptacles = [[i, rec['name']] for i, rec in enumerate(room_receptacle)]
777 | # 标注的容器描述和名字,例如黄色的桌子
778 | return room_receptacles
779 |
780 | def calculate_2D_distance(self, point1, point2):
781 | dis = np.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)
782 | return dis
783 |
784 | def goto_receptacle(self, room='F3_KitchenRoom', recepacle=0, random=0):
785 | room_receptacle = self.object_data.room_receptacles[room]['receptacles'][recepacle]
786 | pos, info = self.pos_query()
787 | floor_robot, robot_i, robot_j = self.map_position_agent['floor'], self.map_position_agent['x'], self.map_position_agent['y']
788 | floor_receptacle, rec_i, rec_j, _ = self.server.maps.get_point_info(room_receptacle['position'])
789 | if floor_receptacle != floor_receptacle:
790 | print('robot and receptacle is not the same floor !')
791 | return 0,0
792 | width = abs(room_receptacle['map_i_max'] - room_receptacle['map_i_min'])
793 | length = abs(room_receptacle['map_j_max'] - room_receptacle['map_j_min'])
794 | scale = self.server.maps.maps_info[floor_robot]['scale']
795 | ob_distance = np.sqrt(width**2+length**2)*1.0
796 | free_area = self.get_room_area(room)
797 | reasonable_points = []
798 | for po in free_area:
799 | if self.calculate_2D_distance(po, (rec_i, rec_j)) < ob_distance:
800 | reasonable_points.append(po)
801 | if len(reasonable_points) == 0: return 0, 0
802 | distances = [np.sqrt((i - robot_i) ** 2 + (j - robot_j) ** 2) for i, j in reasonable_points]
803 | sorted_valid_points = [point for _, point in sorted(zip(distances, reasonable_points))]
804 | target_point = sorted_valid_points[0]
805 | # target_point = reasonable_points[0]
806 | if random:
807 | random_i = np.random.randint(0, len(reasonable_points))
808 | target_point = reasonable_points[random_i]
809 | res = self.goto_target_goal((floor_robot, target_point[0], target_point[1]), radius=1, delete_dis=1, position_mode=1)
810 | if res:
811 | self.head_camera_look_at(room_receptacle['position'], accuracy=1)
812 | return res, room_receptacle
813 |
814 | def depth_estimation(self, matrix_target, depth, field_of_view=90):
815 | target_depth = np.multiply(matrix_target, depth)
816 | sum_non_zero = np.sum(target_depth)
817 | count_non_zero = np.count_nonzero(target_depth)
818 | average_depth = sum_non_zero / count_non_zero
819 | non_zero_indices = np.nonzero(target_depth)
820 | # non_zero_elements = target_depth[non_zero_indices]
821 | # average_non_zero = np.mean(non_zero_elements)
822 | x_indices, y_indices = non_zero_indices[0], non_zero_indices[1]
823 | average_x, average_y = np.mean(x_indices), np.mean(y_indices)
824 | y = matrix_target.shape[1]
825 | degree = ((average_y-150)/y) * 90
826 | return average_depth, degree
827 |
828 | def target_direction(self, degree=20, distance=1, target_degree_view=0):
829 | # degree is the camera
830 | pos, info = self.pos_query()
831 | direction_degree = self.direction_degree
832 | # degree = info['jointJointTarget']
833 | camera_direction = - degree + direction_degree
834 | print(camera_direction, direction_degree, self.direction_vector)
835 | floor_r, r_i, r_j = self.map_position_agent['floor'], self.map_position_agent['x'], self.map_position_agent['y']
836 | # FOV field of view = 90, target_degree_view: left -, right +
837 | camera_direction = camera_direction - target_degree_view
838 | scale = self.server.maps.maps_info[floor_r]['scale']
839 | target_dis = distance / scale
840 | map = copy.deepcopy(self.object_data.sematic_map[floor_r])
841 | camera_direction = np.deg2rad(camera_direction)
842 | target_i, target_j = r_i + np.cos(camera_direction)*target_dis, r_j + np.sin(camera_direction)*target_dis
843 | map[round(target_i)][round(target_j)] = 5
844 | # plt.imshow(map)
845 | # plt.grid(False)
846 | # plt.show()
847 | approximate_point = (round(floor_r), round(target_i), round(target_j))
848 | return approximate_point
849 |
850 | def observation(self, degree=0, camera=0, up_down=10):
851 | # Depression angle, camera 0 is head 1 is hand
852 | if camera == 0:
853 | self.joint_control(joint_id=4, target=up_down)
854 | self.joint_control(joint_id=14, target=degree)
855 | ob_rgb = self.observation_camera(camera)
856 | elif camera == 1:
857 | if abs(degree) > 150:
858 | return None
859 | self.joint_control(joint_id=5, target=degree)
860 | ob_rgb = self.observation_camera(camera)
861 | return ob_rgb
862 |
863 | def look360(self, pitch=0):
864 | m0 = self.observation(-90, up_down=pitch)
865 | m1 = self.observation(0, up_down=pitch)
866 | m2 = self.observation(90, up_down=pitch)
867 | m3 = self.observation(180, up_down=pitch)
868 | fig, axs = plt.subplots(nrows=1, ncols=4, figsize=(12, 4))
869 |
870 | axs[0].imshow(m0)
871 | axs[0].set_title('Matrix m0')
872 | axs[0].axis('off')
873 |
874 | axs[1].imshow(m1)
875 | axs[1].set_title('Matrix m1')
876 | axs[1].axis('off')
877 |
878 | axs[2].imshow(m2)
879 | axs[2].set_title('Matrix m2')
880 | axs[2].axis('off')
881 |
882 | axs[3].imshow(m3)
883 | axs[3].set_title('Matrix m3')
884 | axs[3].axis('off')
885 |
886 | plt.tight_layout()
887 | plt.show()
888 | return [m0, m1, m2, m3]
889 |
890 | def goto_and_grasp(self, obj_name=None, target_id=None):
891 | if target_id is None:
892 | tars = self.object_data.object_query([obj_name])
893 | if len(tars) == 0: return 0
894 | target_id = tars[0]
895 | tar_obj_info = self.object_information_query(obj_id=target_id)
896 | if not tar_obj_info: return 0
897 | print(tar_obj_info)
898 | self.goto_target_goal(tar_obj_info['position'], 1, 1, position_mode=0)
899 | re = self.direction_adjust(position=tar_obj_info['position'])
900 | a = self.ik_calculation(tar_obj_info['position'])
901 | if a:
902 | print('---------', a)
903 | else:
904 | a = [-0.2, 0, 0.4, 0.6, 0.3]
905 | self.arm_control(a)
906 | self.grasp_object(target_id)
907 | self.joint_control(joint_id=5, target=0)
908 |
909 | def object_information_query(self, obj_id=0):
910 | instruction = {"requestIndex": 0, "targetType": 1, "targetId": obj_id}
911 | r_id = self.server.send_data(2, instruction, 1)
912 | object_info = self.wait_for_respond(r_id, 60)
913 | if object_info:
914 | object_info = eval(object_info['statusDetail'])
915 | return object_info
916 |
917 | def ik_calculation(self, pos_world):
918 | try:
919 | x, y, z = pos_world['x'], pos_world['y'], pos_world['z']
920 | pos_world = {'position': {'x': x, 'y': y, 'z': z}}
921 | except:
922 | pos_world = {'position': {'x': pos_world[0], 'y': pos_world[1], 'z': pos_world[2]}}
923 | pos_transform = {"requestIndex": 1, "actionId": 201, "actionPara": json.dumps(pos_world)}
924 | # ----------------- get the object information-----------------
925 | r_id = self.server.send_data(5, pos_transform, 1)
926 | obj_info1 = self.wait_for_respond(r_id, 75)
927 | if not obj_info1:
928 | return 0
929 | pos_relative = eval(obj_info1['information'])['position']
930 | # print(obj_info1, '---------IK relative position---')
931 | joint_targets = self.ik_process(pos_relative['x'], 0, pos_relative['z'])
932 | return joint_targets
933 |
934 | def arm_control(self, joints_tar=[0, 0, 0, 0, 0]):
935 | target_execute = {"requestIndex": 1, "actionId": 3,
936 | "actionPara": json.dumps({'result': 1, 'data': joints_tar})}
937 | r_id = self.server.send_data(5, target_execute, 1)
938 | robot_info1 = self.wait_for_respond(r_id, 300)
939 | # print(robot_info1, '======---------IK perform')
940 | return robot_info1
941 |
942 | def initial_pose(self):
943 | a = [-0.35, 0, 0.3, 0.3, 0.1]
944 | return self.arm_control(a)
945 |
946 | def grasp_object(self, obj_id):
947 | if not self.is_grasp:
948 | grasp_execute = {"requestIndex": 1, "actionId": 4, "actionPara": json.dumps({'itemId': obj_id})}
949 | r_id = self.server.send_data(5, grasp_execute, 1)
950 | robot_info2 = self.wait_for_respond(r_id, 60)
951 | if robot_info2:
952 | self.is_grasp = obj_id
953 | return robot_info2
954 | return None
955 |
956 | def release_object(self):
957 | if self.is_grasp:
958 | release_execute = {"requestIndex": 1, "actionId": 5}
959 | r_id = self.server.send_data(5, release_execute, 1)
960 | robot_info3 = self.wait_for_respond(r_id, 60)
961 | if robot_info3:
962 | # print(robot_info3, '======---------release')
963 | self.is_grasp = None
964 | result = self.joint_control(5, 0)
965 | return robot_info3
966 | return None
967 |
968 | def joint_control(self, joint_id, target, radian=1):
969 | if radian:
970 | target = np.radians(target)
971 | target_execute = {"requestIndex": 1, "actionId": 2, "actionPara": json.dumps({'jointId': joint_id, 'data': target})}
972 | r_id = self.server.send_data(5, target_execute, 1)
973 | robot_info = self.wait_for_respond(r_id, 100)
974 | if robot_info:
975 | return 1
976 | return 0
977 |
978 | def wait_for_respond(self, id, times=60):
979 | info = None
980 | for ii in range(int(times)):
981 | time.sleep(0.1)
982 | try:
983 | info = self.server.notes[id]
984 | break
985 | except Exception as e: pass
986 | return info
987 |
988 | def move_forward(self, dis=1.0):
989 | instruct = {"requestIndex": 0, "actionId": 0, "actionPara": {"distance": 1.0}}
990 | instruct["actionPara"]["distance"] = dis
991 | instruct['actionPara'] = json.dumps(instruct['actionPara'])
992 | r_id = self.server.send_data(5, instruct, 1)
993 | start_time = time.time()
994 | time.sleep(dis)
995 | while True:
996 | time.sleep(0.2)
997 | try:
998 | # {'requestIndex': 0, 'actionId': 0, 'result': 1}
999 | if self.server.notes[r_id]['result'] == 1:
1000 | break
1001 | except Exception as e:
1002 | print('~~~~~~~~~~~~~~~~', e, '---', len(self.server.notes), r_id)
1003 | # Record End Time
1004 | end_time = time.time()
1005 | # Calculate the running time of movement
1006 | execution_time = end_time - start_time
1007 | # print("Move forward: ", execution_time, "s")
1008 | return 1
1009 |
1010 | def rotate_right(self, degree=10):
1011 | result = [degree / 2]
1012 | for angle in result:
1013 | ins = {"requestIndex": 1, "actionId": 1, "actionPara": {"degree": angle}}
1014 | ins['actionPara'] = json.dumps(ins['actionPara'])
1015 | r_id = self.server.send_data(5, ins, 1)
1016 | res = self.wait_for_respond(r_id, 60)
1017 | time.sleep(0.5)
1018 | return res
1019 |
1020 | def get_all_map(self):
1021 | for map_i in range(3):
1022 | re_id = self.server.send_data(2, {"requestIndex": 101, "targetType": 2, "targetId": map_i}, 1)
1023 | time.sleep(0.1)
1024 | while True:
1025 | try:
1026 | info = self.server.notes[re_id]
1027 | ma = info['statusDetail']
1028 | dict_ma = eval(ma)
1029 | self.server.maps.add_room(dict_ma)
1030 | break
1031 | except Exception as e:
1032 | pass # print('~~~~~~~~map~~~~~~~~', e)
1033 | time.sleep(0.1)
1034 | # print('get all map: ', type(self.server.maps.floor1), type(self.server.maps.floor2), type(self.server.maps.floor3))
1035 |
1036 | def pos_query(self):
1037 | info, inf, pos = None, None, None
1038 | for request_i in range(3):
1039 | request_id = self.server.send_data(5, {"requestIndex": 10, "actionId": 10}, 1)
1040 | time.sleep(0.1)
1041 | info = None
1042 | # {'requestIndex': 10, 'actionId': 0, 'information': '{"position":{"x":-8.397454261779786,"y":0.0027088536880910398,"z":-1.1824144124984742},
1043 | # "diretion":{"x":0.9973454475402832,"y":-0.01950152963399887,"z":0.07015550881624222}}'}
1044 | pos = None
1045 | for i in range(6):
1046 | try:
1047 | info = self.server.notes[request_id]
1048 | break
1049 | except :
1050 | time.sleep(0.1)
1051 | if info:
1052 | inf = eval(info['information'])
1053 | pos_dire = inf['direction']
1054 | pos = inf['position']
1055 | self.position_agent = pos
1056 | x = pos_dire["x"]
1057 | y = pos_dire["z"]
1058 | self.direction_vector['x'], self.direction_vector['y'] = x, y
1059 | # Calculate the angle (in radians) between the direction vector and the positive x-axis direction
1060 | angle_rad = np.arctan2(y, x)
1061 | # Convert radians to angles
1062 | angle_degree = np.degrees(angle_rad)
1063 | self.direction_degree = angle_degree
1064 | flo, xx, yy, is_o = self.server.maps.get_point_info(pos)
1065 | self.map_position_agent['x'], self.map_position_agent['y'], self.map_position_agent['floor'] = xx, yy, flo
1066 | # print('dgree???????????????????//:', angle_degree)
1067 | return pos, inf
1068 | return pos, inf
1069 |
1070 | def go_to_there(self, pos, command=0):
1071 | try:
1072 | y = pos[1]
1073 | except:
1074 | pos = [pos['x'], pos['y'], pos['z']]
1075 | if not command:
1076 | command = {"requestIndex": 10, "actionId": 6, "actionPara": {'position': {"x": 0.5, "y": 1.0, "z": 0}}}
1077 | command['actionPara']['position']['x'] = pos[0]
1078 | command['actionPara']['position']['y'] = pos[1]
1079 | command['actionPara']['position']['z'] = pos[2]
1080 | command['actionPara'] = json.dumps(command['actionPara'])
1081 | re_id = self.server.send_data(5, command, 1)
1082 | return re_id
1083 |
1084 | def goto_target_goal(self, position_tar, radius=1.0, delete_dis=3, times=6, position_mode=0,
1085 | accurate=0, sort=1, inflation=0):
1086 | # 0 (world pos) position_tar:(0.5, 0.1, 1.2), 1: (x=floor_n, y=map_i, z=map_j)
1087 | try:
1088 | xx, yy, zz = position_tar[0], position_tar[1], position_tar[2]
1089 | except:
1090 | xx, yy, zz = position_tar['x'], position_tar['y'], position_tar['z']
1091 | floor, point_list = self.server.maps.get_an_accessible_area(xx, yy, zz, radius, position_mode, sort, inflation)
1092 | result_go = 0
1093 | for i_try in range(times):
1094 | if not self.running or self.server.stop_event.is_set() or not self.server.state:
1095 | return 0
1096 | length = len(point_list)
1097 | if length < 1:
1098 | break
1099 | if result_go == 1:
1100 | break
1101 | # choose a nearest point (p_i, p_j) now = np.random.randint(0, length)
1102 | p_i, p_j = point_list[0][0], point_list[0][1]
1103 | # translate the grid pos to the world pos
1104 | pos_i, pos_j = self.server.maps.get_an_aligned_world_coordinate_randomly(floor, p_i, p_j)
1105 | if accurate:
1106 | pos_i, pos_j = p_i, p_j
1107 | position_go = (pos_i, self.server.maps.floors[floor], pos_j)
1108 | # print(' now plan to {}'.format(position_go))
1109 | for i in range(2):
1110 | if not self.running or self.server.stop_event.is_set() or not self.server.state:
1111 | return 0
1112 | if accurate:
1113 | position_go = (xx, yy, zz)
1114 | action_id = self.go_to_there(position_go)
1115 | result = 0
1116 | while True:
1117 | if not self.running or self.server.stop_event.is_set() or not self.server.state:
1118 | return 0
1119 | time.sleep(0.5)
1120 | try:
1121 | if self.server.notes[action_id]['result'] == 1:
1122 | result = self.server.notes[action_id]['result']
1123 | break
1124 | elif self.server.notes[action_id]['result'] < 1:
1125 | break
1126 | except: pass
1127 | time.sleep(0.1)
1128 | if result == 1:
1129 | pos, info = self.pos_query()
1130 | if pos:
1131 | dis = self.env.calculate_distance(pos, position_go)
1132 | if dis < 1:
1133 | result_go = 1
1134 | break
1135 | time.sleep(2)
1136 | if result_go == 0:
1137 | accurate = 0
1138 | # Reverse loop deletion of points with a distance of 2
1139 | for i in range(len(point_list) - 1, -1, -1):
1140 | point = point_list[i]
1141 | if np.sqrt((point[0] - p_i) ** 2 + (point[1] - p_j) ** 2) < delete_dis:
1142 | del point_list[i]
1143 | elif result_go == 1:
1144 | # print('$$$$$ arrive at: ', position_go)
1145 | return result_go
1146 | if result_go == 0: pass
1147 | # print('$$$$$not arrive: ', position_tar)
1148 | return result_go
1149 |
1150 | def observation_camera(self, camera_type=0):
1151 | # 0 head camera, 1 hand camera
1152 | c_type = 13
1153 | if camera_type == 0:
1154 | c_type = 13
1155 | elif camera_type == 1:
1156 | c_type = 14
1157 | ins = {"requestIndex": 1, "actionId": c_type, "actionPara": {'height': 640, 'width': 480}}
1158 | # new instruction
1159 | action_id = self.server.send_data(5, ins, 1)
1160 | res = self.wait_for_respond(action_id, 60)
1161 | if not res:
1162 | return res
1163 | img = json.loads(res["information"])
1164 | im = img["multiVisionBytes"][0]['bytes']
1165 | byte_array = bytes(im)
1166 | # Display image loading and display PNG file
1167 | np_arr = np.frombuffer(byte_array, np.uint8)
1168 | image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
1169 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1170 | return image
1171 |
1172 | def get_depth(self, camera_type=0):
1173 | # 0 head camera, 1 hand camera
1174 | c_type = 17
1175 | if camera_type == 0:
1176 | c_type = 15
1177 | elif camera_type == 1:
1178 | c_type = 16
1179 | ins = {"requestIndex": 1, "actionId": c_type, "actionPara": {'height': 300, 'width': 300}}
1180 | ins['actionPara'] = json.dumps(ins['actionPara'])
1181 | action_id = self.server.send_data(5, ins, 1)
1182 | res = self.wait_for_respond(action_id, 60)
1183 | if not res:
1184 | return res
1185 | img = json.loads(res["information"])
1186 | im = img["multiVisionBytes"][0]['bytes']
1187 | byte_array = bytes(im)
1188 | nparr = np.frombuffer(byte_array, np.uint8)
1189 | image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
1190 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
1191 | # image = Image.open(io.BytesIO(byte_array))
1192 | # image = image.convert('I;16')
1193 | # pil_image = Image.fromarray(image.astype('uint8'), mode='L')
1194 | depth_matrix = image/255*10
1195 | return depth_matrix
1196 |
1197 | def get_segmentation(self, camera_type=0, decode=0, show=False):
1198 | # 0 head camera, 1 hand camera
1199 | c_type = 17
1200 | if camera_type == 0:
1201 | c_type = 17
1202 | elif camera_type == 1:
1203 | c_type = 18
1204 | ins = {"requestIndex": 1, "actionId": c_type, "actionPara": {'height': 300, 'width': 300}}
1205 | ins['actionPara'] = json.dumps(ins['actionPara'])
1206 | action_id = self.server.send_data(5, ins, 1)
1207 | res = self.wait_for_respond(action_id, 60)
1208 | if not res:
1209 | return res
1210 | img = json.loads(res["information"])
1211 | im = img["multiVisionBytes"][0]['bytes']
1212 | byte_array = bytes(im)
1213 | # Display image loading and display PNG file
1214 | # image = Image.open(io.BytesIO(byte_array))
1215 | nparr = np.frombuffer(byte_array, np.uint8)
1216 | image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
1217 | # Convert image byte stream to Image object
1218 | if show:
1219 | cv2.imshow('image', image)
1220 | cv2.waitKey(10)
1221 | cv2.destroyAllWindows()
1222 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1223 | if decode:
1224 | im, tags = self.decode_segment(image)
1225 | return im, tags
1226 | return image
1227 |
1228 | def decode_segment(self, image):
1229 | width, height = image.shape[1], image.shape[0]
1230 | object_tag = self.object_data.segment_tag
1231 | rgb_id = self.object_data.rgb_to_id
1232 | target = {}
1233 | seg_matrix = np.zeros((height, width))
1234 | image = image / 255
1235 | formatted_arr = np.array([f'{x:.2f}' for x in np.nditer(image)])
1236 | image = formatted_arr.reshape(image.shape)
1237 | # Traverse each pixel of the image
1238 | for x in range(height):
1239 | for y in range(width):
1240 | # Obtain the RGB value of pixels
1241 | pixel_value = image[x, y]
1242 | r, g, b = pixel_value[0], pixel_value[1], pixel_value[2]
1243 | rrggbb = (r, g, b)
1244 | if rrggbb == self.object_data.background:
1245 | continue
1246 | pixel_id = rgb_id.get(rrggbb, 0)
1247 | if pixel_id is not None:
1248 | seg_matrix[x][y] = pixel_id
1249 | values_set = set(np.unique(seg_matrix))
1250 | for value in values_set:
1251 | value = round(value)
1252 | if value:
1253 | target[value] = object_tag[value]['tag']
1254 | return seg_matrix, target
1255 |
1256 | def query_near_objects(self):
1257 | instruction = {"requestIndex": 0, "actionId": 12}
1258 | r_id = self.server.send_data(5, instruction, 1)
1259 | obj_info = self.wait_for_respond(r_id, 60)
1260 | object_info = eval(obj_info['information'])
1261 | return object_info["nearby"]
1262 |
1263 | def go_to_target_object(self, object_id=None, name='Apple_what_your_nee', feature='Grabable_what_your_need',
1264 | distance=1, random_mode=1):
1265 | items = self.query_near_objects()
1266 | if not items: return 0
1267 | all_objs = []
1268 | if len(items) != 0:
1269 | for item_id in items:
1270 | item_info = self.object_data.objects[item_id]
1271 | if not item_info['isOccupied']:
1272 | obj_f = [n.lower() for n in item_info['features']]
1273 | check_id = 0
1274 | if object_id is not None:
1275 | if item_id['itemId'] == object_id:
1276 | check_id = 1
1277 | if feature.lower() in obj_f or name.lower() in item_info['itemName'].lower() or check_id:
1278 | item_info = self.server.object_query(item_id)
1279 | all_objs.append(item_info)
1280 | else:
1281 | return 0
1282 | if len(all_objs) == 0:
1283 | return 0
1284 | if random_mode == 1:
1285 | target_obj = np.random.choice(all_objs)
1286 | else:
1287 | target_obj = all_objs[0]
1288 | if not target_obj: return 0
1289 | pos = target_obj['position']
1290 | res = self.goto_target_goal(pos, distance, 3, 10)
1291 | return res
1292 |
1293 | def site_view(self, pos=(0, 0, 0)):
1294 | try:
1295 | x, y, z = pos['x'], pos['y'], pos['z']
1296 | except:
1297 | x, y, z = pos[0], pos[1], pos[2]
1298 | if -0.2 < y:
1299 | y = 3
1300 | elif -5 < y:
1301 | y = -2
1302 | else:
1303 | y = -10
1304 | ins = {"requestIndex": 0,"targetType": 12,"siteVisionPos": {"x": x, "y": y, "z": z}}
1305 | r_id = self.server.send_data(2, ins, 1)
1306 | receive = self.wait_for_respond(r_id, 60)
1307 | if not receive: return None
1308 | img = json.loads(receive["statusDetail"])
1309 | im = img["multiVisionBytes"][0]['bytes']
1310 | byte_array = bytes(im)
1311 | nparr = np.frombuffer(byte_array, np.uint8)
1312 | image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
1313 | # Convert image byte stream to Image object
1314 | # cv2.imshow('image', image)
1315 | # cv2.waitKey(15)
1316 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1317 | return image
1318 |
1319 | def go_to_destination(self, tar_location, ids=0):
1320 | location_now, outcome = None, 0
1321 | try:
1322 | location_now = self.env.location[tar_location]
1323 | except:
1324 | locations = list(self.env.location.keys())
1325 | for index, loc in enumerate(locations):
1326 | if tar_location.lower() in loc.lower():
1327 | location_now = self.env.location[loc]
1328 | break
1329 | if location_now:
1330 | outcome = self.goto_target_goal(location_now[ids], 2, 2, 20, position_mode=1)
1331 | return outcome
1332 |
1333 | # def navigate(self, map_floor, goal):
1334 | # # map of scene, goal to navigation
1335 | # path_map = copy.deepcopy(self.server.maps.maps_info[map_floor]['grid'])
1336 | # path_map = np.array(path_map)
1337 | # print(f"started 1 at {time.strftime('%X')}")
1338 | # self.pos_query()
1339 | # start = (self.map_position_agent['x'], self.map_position_agent['y'])
1340 | # # start_vector = (self.direction_vector['x'], self.direction_vector['y'])
1341 | # # Input initial vector, initial coordinate point, and facing coordinate point
1342 | # x0, y0 = 1, 0 # Initial vector
1343 | # x, y = 0, 0 # Initial coordinate point
1344 | # xx, yy = 0, 1 # Facing coordinate points
1345 | #
1346 | # rotation_angle = self.calculate_rotation_angle(goal[0], goal[1])
1347 | # print('-----', rotation_angle, '-------')
1348 | # print(f"started 2 at {time.strftime('%X')}")
1349 | # # path, turns = self.astar(start, goal, path_map)
1350 | # # Plan the path to the goal
1351 | # queue1 = Queue()
1352 | # # shared_list1, shared_list2 = mp.Array('c', 0), mp.Array('c', 0)
1353 | # # print(shared_list1, shared_list2)
1354 | # process1 = Process(target=astar, args=(start, goal, path_map, [], [], queue1))
1355 | # process1.start()
1356 | # process1.join()
1357 | # print(f"started 3 at {time.strftime('%X')}")
1358 | # shared_list1 = queue1.get()
1359 | # shared_list2 = queue1.get()
1360 | # print(shared_list2)
1361 | # rotation_angle = self.calculate_rotation_angle(shared_list2[0][0], shared_list2[0][1])
1362 | # print('-----', rotation_angle, '-------')
1363 | # xxx, yyy, zzz = self.server.maps.get_world_position(map_floor, shared_list2[0][0], shared_list2[0][1])
1364 | # dis = self.calculate_distance(xxx, zzz)
1365 | # print('-----', dis, '-------')
1366 | # # self.move_forward(dis)
1367 | # print(f"started 5 at {time.strftime('%X')}")
1368 | # # print(mp.cpu_count()) 16
1369 |
1370 | def calculate_rotation_angle(self, xx, yy, accuracy=1):
1371 | start = (self.map_position_agent['x'], self.map_position_agent['y'])
1372 | if accuracy:
1373 | start = (self.position_agent['x'], self.position_agent['z'])
1374 | start_vector = (self.direction_vector['x'], self.direction_vector['y'])
1375 | x0, y0, x, y = start_vector[0], start_vector[1], start[0], start[1]
1376 | v1 = np.array([x0, y0]) # Initial vector
1377 | v2 = np.array([xx - x, yy - y]) # The vector of the oriented coordinate point relative to the initial coordinate point
1378 | dot_product = np.dot(v1, v2)
1379 | det = np.linalg.det([v1, v2])
1380 | angle_radians = np.arctan2(det, dot_product)
1381 | angle_degrees = np.degrees(np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))))
1382 | cross_product = np.cross(v1, v2)# Determine the direction of rotation
1383 | if cross_product > 0:
1384 | # left rotation
1385 | angle_degrees = -angle_degrees
1386 | elif cross_product < 0: pass
1387 | # right
1388 | else: pass
1389 | return angle_degrees
1390 |
1391 | def head_camera_look_at(self, position, accuracy=0):
1392 | try:
1393 | xx, yy = position['x'], position['z']
1394 | except:
1395 | xx, yy = position[0], position[2]
1396 | self.pos_query()
1397 | rotation_angle = self.calculate_rotation_angle(xx, yy, accuracy=accuracy)
1398 | result = self.rotate_right(rotation_angle)
1399 | # result = self.joint_control(14, rotation_angle)
1400 | return result
1401 |
1402 | def direction_adjust(self, position, pos_input=0, accuracy=1):
1403 | # world_position = (22.38, 0.1, -0.17) or {'x': , 'y': , 'z': }
1404 | if pos_input:
1405 | flo, xx, yy = position[0], position[1], position[2]
1406 | accuracy = 0
1407 | else:
1408 | flo, xx, yy, is_o = self.server.maps.get_point_info(position)
1409 | if accuracy:
1410 | try:
1411 | xx, yy = position['x'], position['z']
1412 | except:
1413 | xx, yy = position[0], position[2]
1414 | self.pos_query()
1415 | rotation_angle = self.calculate_rotation_angle(xx, yy)
1416 | result = self.rotate_right(rotation_angle)
1417 | self.pos_query()
1418 | rotation_angle = self.calculate_rotation_angle(xx, yy)
1419 | result = self.joint_control(5, rotation_angle)
1420 | return result
1421 |
1422 | def calculate_distance(self, x_x, y_y):
1423 | point1 = np.array([self.position_agent['x'], self.position_agent['y']])
1424 | point2 = np.array([x_x, y_y])
1425 | distance = np.linalg.norm(point2 - point1)
1426 | return distance
1427 |
1428 | def ik_process(self, x, y, z):
1429 | res = self.input_pos(self.robot, x, y, z)
1430 | if np.all(res == -1):
1431 | return 0
1432 | else:
1433 | return res.tolist()
1434 |
1435 | def input_pos(self, robot, x, y, z, phi=0, theta=0, psi=0, plot=0):
1436 | A = eul2r(phi, theta, psi)
1437 | # Representing the rotational Euler angle
1438 | # [0.5, 0, 1.87]
1439 | t = [x, y, z]
1440 | AT = SE3.Rt(A, t)
1441 | if phi == 0 and theta == 0 and psi == 0:
1442 | AT = self.rotation_matrix(t[0], t[1], t[2])
1443 | # The rotation Euler angle represents the displacement on the x, y, and z axes
1444 | # AT = SE3.Rt(A, t)
1445 | # AT represents the 4 * 4 transformation matrix of the end effector
1446 | sol = robot.ik(AT)
1447 | # print(robot.fkine([-0.11530289, 0.3, 0.24709773, 0.42730769, 0.72559458]))
1448 | if sol[1]:
1449 | if plot:
1450 | robot.arm.plot(sol[0])
1451 | # If a feasible solution is calculated, visualize it (swift package)
1452 | time.sleep(20)
1453 | return sol[0]
1454 | return np.array([-1])
1455 |
1456 | def rotation_matrix(self, x, y, z):
1457 | # print('hypotenuse and length of robot arm limit')return None
1458 | # print(x, y, z)
1459 | if x < -0.4:
1460 | x = -0.4
1461 | elif x > 0.5:
1462 | x = 0.5
1463 | if 0.2 > z:
1464 | z = 0.2
1465 | elif 1.0 < z:
1466 | z = 1.05
1467 | if y < -0.5:
1468 | y = -0.5
1469 | elif y > 0.5:
1470 | y = 0.5
1471 | elif abs(y) < 0.03:
1472 | y = 0
1473 | hypotenuse = np.sqrt(y ** 2 + z ** 2)
1474 | if hypotenuse > 1.01:
1475 | z = np.sqrt(1.05 - y ** 2)
1476 | a, b, c = -2.222439753175887, 0.7599754447016618, 1.981407645745737
1477 | theta = a * z ** 2 + b * z + c
1478 | phi = y * 4
1479 | A = eul2r(phi, theta, 0)
1480 | t = [x, y, z]
1481 | AT = SE3.Rt(A, t)
1482 | return AT
1483 |
1484 |
1485 | def astar(start, goal, map_matrix, list1, list2, queue, initial_direction=(1, 0)):
1486 |
1487 | def heuristic(node, is_start):
1488 | # return abs(node[0] - goal[0]) + abs(node[1] - goal[1])
1489 | if is_start:
1490 | # Calculate the angle between the node and the initial direction
1491 | direction_cost = abs(node[0] - start[0]) * initial_direction[0] + abs(node[1] - start[1]) * \
1492 | initial_direction[1]
1493 | else:
1494 | direction_cost = 0
1495 | # Calculate the Manhattan distance from the node to the nearest obstacle
1496 | node_np = np.array(node)
1497 | obstacles = np.transpose(np.where(map_matrix == 0))
1498 | min_dist_to_obstacle = np.min(np.sum(np.abs(obstacles - node_np), axis=1))
1499 | # Calculate the angle between the node and the initial direction
1500 | return abs(node[0] - goal[0]) + abs(node[1] - goal[1]) + min_dist_to_obstacle + direction_cost*3
1501 | # return abs(node[0] - goal[0]) + abs(node[1] - goal[1])
1502 | # node_np = np.array(node)
1503 | # obstacles = np.transpose(np.where(map_matrix == 0))
1504 | # min_dist_to_obstacle = np.min(np.sum(np.abs(obstacles - node_np), axis=1))
1505 | # return abs(node[0] - goal[0]) + abs(node[1] - goal[1]) + min_dist_to_obstacle
1506 |
1507 | # representation of the point with goal
1508 | # Heuristic value for initialization starting point
1509 | start_heuristic = heuristic(start, True)
1510 | open_set = [(start_heuristic, start)]
1511 | # save current position, [1]updating with movement
1512 | came_from = {}
1513 | # save path
1514 | g_score = {start: 0}
1515 | # score for evaluate point
1516 | while open_set:
1517 | current_heuristic, current = heappop(open_set)
1518 |
1519 | # pos [x][y]
1520 | if current == goal:
1521 | break
1522 | # pos all around [(x+1, y), (x, y+1), (x-1, y), (x, y-1)]
1523 | for neighbor in [(current[0] - 1, current[1]), (current[0] + 1, current[1]),
1524 | (current[0], current[1] - 1), (current[0], current[1] + 1)]:
1525 | # Boundary Check
1526 | if 0 <= neighbor[0] < map_matrix.shape[0] and 0 <= neighbor[1] < map_matrix.shape[1]:
1527 | if map_matrix[neighbor[0], neighbor[1]] == 0:
1528 | continue
1529 | # Obstacle examination
1530 | tentative_g_score = g_score[current] + 1
1531 | # Choose the best neighbor point
1532 | if neighbor not in g_score or tentative_g_score < g_score[neighbor]:
1533 | came_from[neighbor] = current
1534 | g_score[neighbor] = tentative_g_score
1535 | f_score = tentative_g_score + heuristic(neighbor, False)
1536 | heappush(open_set, (f_score, neighbor))
1537 | # saving with the score of the point (n, (x,y))
1538 | paths_p = []
1539 | turns_p = [] # Store points that require turning
1540 | previous_direction = None # The forward direction of the previous point
1541 | current = goal
1542 | # Reverse search
1543 | while current != start:
1544 | paths_p.append(current)
1545 | neighbors = [(current[0] - 1, current[1]), (current[0] + 1, current[1]),
1546 | (current[0], current[1] - 1), (current[0], current[1] + 1)]
1547 | next_vertex = came_from[current]
1548 | dx = next_vertex[0] - current[0]
1549 | dy = next_vertex[1] - current[1]
1550 | current_direction = (dx, dy)
1551 | # Determine whether to turn
1552 | if previous_direction is not None and previous_direction != current_direction:
1553 | turns_p.append(current) # the current point needing turning right/left
1554 | previous_direction = current_direction
1555 | current = next_vertex
1556 | paths_p.append(start)
1557 | paths_p.reverse()
1558 |
1559 | # -------------visualization------------
1560 | path, turns, path_map = paths_p, turns_p, map_matrix
1561 | list1, list2 = paths_p, turns_p
1562 | print(len(list1), len(list2))
1563 | queue.put(list1)
1564 | queue.put(list2)
1565 | for point in path:
1566 | if point in turns:
1567 | path_map[point[0], point[1]] = 3
1568 | else:
1569 | path_map[point[0], point[1]] = 2
1570 | # turning
1571 | print("turning here:", turns)
1572 | # control agent to move according to path, Moving point by point
1573 | # for point in turns:
1574 | # print(point)
1575 | # Mark as 2 on the path
1576 | for point in path:
1577 | path_map[point[0], point[1]] = 2
1578 |
1579 | # Output the points that need to turn
1580 | print("需要转弯的点:")
1581 | for point in turns:
1582 | # print(point)
1583 | path_map[point[0], point[1]] = 3
1584 | # Visual map
1585 | print(f"started 4 at {time.strftime('%X')}")
1586 | plt.imshow(path_map, cmap='viridis', interpolation='nearest')
1587 | plt.colorbar(ticks=[0, 1, 2])
1588 | plt.title('Path Planning using A* Algorithm')
1589 | plt.show()
1590 |
1591 | return paths_p, turns_p, list1, list2
1592 |
1593 |
1594 | # IK server
1595 | class Planar3DOF(ERobot):
1596 | """
1597 | Class that imports a planar3DOF Robot
1598 | """
1599 |
1600 | def __init__(self):
1601 | args = super().URDF_read(
1602 | "robot/PRS_Robot.urdf", tld="./")
1603 |
1604 | super().__init__(
1605 | args[0],
1606 | name=args[1])
1607 |
1608 | self.manufacturer = "Utadeo"
1609 | # self.ee_link = self.ets[9]
1610 |
1611 | # zero angles, L shaped pose
1612 | self.addconfiguration("qz", np.array([0, 0, 0, 0, 0]))
1613 |
1614 | # ready pose, arm up
1615 | self.addconfiguration("qr", np.array([0, 0, 0, 0, 0]))
1616 | # self.addconfiguration("qr", np.array([0, -pi / 2, pi / 2, 0, 0]))
1617 |
1618 | # straight and horizontal
1619 | self.addconfiguration("qs", np.array([0, 0, 0, 0, 0]))
1620 | # self.addconfiguration("qs", np.array([0, 0, pi / 2, 0, 0]))
1621 |
1622 | # nominal table top picking pose
1623 | self.addconfiguration("qn", np.array([0, 0, pi / 4, pi/4, 0]))
1624 |
1625 | # # nominal table top picking pose
1626 | # self.addconfiguration("qm", np.array([0, pi / 4, pi]))
1627 | #
1628 |
1629 | @staticmethod
1630 | def load_my_path():
1631 | # print(__file__)
1632 | os.chdir(os.path.dirname(__file__))
1633 |
1634 |
1635 |
1636 | class PRS_IK(object):
1637 | def __init__(self):
1638 | self.arm = Planar3DOF()
1639 |
1640 | def ik(self, AT):
1641 | sol = self.arm.ik_GN(AT, q0 = [0, 0, 0, 0, 0])
1642 | return sol # display joint angles
1643 |
1644 | def fkine(self, arr=[0, 0, 0, 0, 0]):
1645 | return self.arm.fkine(arr)
1646 | # print(robot.fkine(robot.configs['qz'])) # FK shows that desired end-effector pose was achieved
1647 |
1648 | def trajectory(self, p0, p1, step):
1649 | qt = rtb.tools.trajectory.jtraj(self.arm.configs['qr'], self.arm.configs['qz'], 100)
1650 | # qt = rtb.tools.trajectory.jtraj(sol.q, sol.q, 100)
1651 | qt = rtb.tools.trajectory.jtraj(p0, p1, step)
1652 | # print(qt.q)
1653 | return qt
1654 |
1655 | def show(self, pose=[0,0,0,0,0], time=10):
1656 | self.arm.plot(pose)
1657 | time.sleep(time)
1658 |
1659 |
--------------------------------------------------------------------------------