├── .gitignore
├── llm_input
├── resource
│ └── llm_input
├── llm_input
│ ├── __init__.py
│ ├── llm_audio_input_local.py
│ └── llm_audio_input.py
├── setup.cfg
├── package.xml
├── test
│ ├── test_pep257.py
│ ├── test_flake8.py
│ └── test_copyright.py
└── setup.py
├── llm_model
├── resource
│ └── llm_model
├── llm_model
│ ├── __init__.py
│ └── chatgpt.py
├── setup.cfg
├── package.xml
├── setup.py
├── test
│ ├── test_pep257.py
│ ├── test_flake8.py
│ └── test_copyright.py
└── readme.md
├── llm_robot
├── resource
│ └── llm_robot
├── llm_robot
│ ├── __init__.py
│ ├── test.py
│ ├── readme.md
│ ├── arx5_arm_robot.py
│ ├── turtle_robot.py
│ └── multi_robot.py
├── setup.cfg
├── package.xml
├── test
│ ├── test_pep257.py
│ ├── test_flake8.py
│ └── test_copyright.py
└── setup.py
├── llm_bringup
├── resource
│ └── llm_bringup
├── llm_bringup
│ └── __init__.py
├── setup.cfg
├── readme.md
├── package.xml
├── test
│ ├── test_pep257.py
│ ├── test_flake8.py
│ └── test_copyright.py
├── setup.py
└── launch
│ ├── audio_in_text_out_demo.launch.py
│ ├── chatgpt_with_turtle_robot.launch.py
│ ├── chatgpt_with_multi_robot.launch.py
│ ├── local_chatgpt_with_multi_robot.launch.py
│ ├── chatgpt_with_arm_robot.launch.py
│ └── arx5_with_turtlesim.py
├── llm_config
├── llm_config
│ ├── __init__.py
│ ├── user_config.py
│ └── robot_behavior.py
├── resource
│ └── llm_config
├── setup.cfg
├── package.xml
├── setup.py
├── test
│ ├── test_pep257.py
│ ├── test_flake8.py
│ └── test_copyright.py
└── readme.md
├── llm_output
├── llm_output
│ ├── __init__.py
│ └── llm_audio_output.py
├── resource
│ └── llm_output
├── setup.cfg
├── package.xml
├── setup.py
└── test
│ ├── test_pep257.py
│ ├── test_flake8.py
│ └── test_copyright.py
├── llm_interfaces
├── msg
│ └── ChatGPT.msg
├── srv
│ └── ChatGPT.srv
├── readme.md
├── package.xml
└── CMakeLists.txt
├── llm_imgs
└── flow_diagram.png
├── CONTRIBUTING.md
├── llm_install
├── dependencies_install.sh
├── config_openai_api_key.sh
└── config_aws.sh
├── README.md
└── LICENSE
/.gitignore:
--------------------------------------------------------------------------------
1 | *__pycache__
2 |
--------------------------------------------------------------------------------
/llm_input/resource/llm_input:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_model/resource/llm_model:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_robot/resource/llm_robot:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_bringup/resource/llm_bringup:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_config/llm_config/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_config/resource/llm_config:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_input/llm_input/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_model/llm_model/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_output/llm_output/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_output/resource/llm_output:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_robot/llm_robot/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_bringup/llm_bringup/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_interfaces/msg/ChatGPT.msg:
--------------------------------------------------------------------------------
1 | string text
--------------------------------------------------------------------------------
/llm_interfaces/srv/ChatGPT.srv:
--------------------------------------------------------------------------------
1 | string request_text
2 | ---
3 | string response_text
4 |
--------------------------------------------------------------------------------
/llm_imgs/flow_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Auromix/ROS-LLM/HEAD/llm_imgs/flow_diagram.png
--------------------------------------------------------------------------------
/llm_input/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/llm_input
3 | [install]
4 | install_scripts=$base/lib/llm_input
5 |
--------------------------------------------------------------------------------
/llm_model/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/llm_model
3 | [install]
4 | install_scripts=$base/lib/llm_model
5 |
--------------------------------------------------------------------------------
/llm_robot/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/llm_robot
3 | [install]
4 | install_scripts=$base/lib/llm_robot
5 |
--------------------------------------------------------------------------------
/llm_bringup/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/llm_bringup
3 | [install]
4 | install_scripts=$base/lib/llm_bringup
5 |
--------------------------------------------------------------------------------
/llm_config/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/llm_config
3 | [install]
4 | install_scripts=$base/lib/llm_config
5 |
--------------------------------------------------------------------------------
/llm_output/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/llm_output
3 | [install]
4 | install_scripts=$base/lib/llm_output
5 |
--------------------------------------------------------------------------------
/llm_robot/llm_robot/test.py:
--------------------------------------------------------------------------------
1 | pose=[1,3,4,4.4]
2 | print(pose)
3 | print(type(pose))
4 | pose =str(pose)
5 | print(type(pose))
6 |
--------------------------------------------------------------------------------
/llm_interfaces/readme.md:
--------------------------------------------------------------------------------
1 | # llm_interfaces
2 |
3 | ## Package Description
4 | The `llm_interfaces` package provides interfaces for `ROS-LLM`.
--------------------------------------------------------------------------------
/llm_bringup/readme.md:
--------------------------------------------------------------------------------
1 | # llm_bringup
2 | ## Package Description
3 | This package is used to launch the related nodes for the ROS-LLM project. This will run within the Robot Operating System 2 (ROS2) framework.
4 |
5 | # chatgpt_with_robot_test.launch.py
6 |
--------------------------------------------------------------------------------
/llm_output/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_output
5 | 0.0.1
6 | ROS-LLM output Package
7 | Herman Ye
8 | Apache-2.0
9 |
10 | ament_copyright
11 | ament_flake8
12 | ament_pep257
13 | python3-pytest
14 |
15 |
16 | ament_python
17 |
18 |
19 |
--------------------------------------------------------------------------------
/llm_bringup/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_bringup
5 | 0.0.1
6 | ROS-LLM bringup Package
7 | Herman Ye
8 | Apache-2.0
9 |
10 | ros2launch
11 | ament_copyright
12 | ament_flake8
13 | ament_pep257
14 | python3-pytest
15 |
16 |
17 | ament_python
18 |
19 |
20 |
--------------------------------------------------------------------------------
/llm_input/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_input
5 | 0.0.1
6 | The llm_input package contains input nodes for the ROS-LLM.
7 | Herman Ye
8 | Apache-2.0
9 |
10 | llm_config
11 |
12 | ament_copyright
13 | ament_flake8
14 | ament_pep257
15 | python3-pytest
16 |
17 |
18 | ament_python
19 |
20 |
21 |
--------------------------------------------------------------------------------
/llm_robot/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_robot
5 | 0.0.1
6 | The llm_robot package provides a ChatGPT function call server to simulate function calls for any robot
7 | hermanye
8 | Apache-2.0
9 |
10 | ament_copyright
11 | ament_flake8
12 | ament_pep257
13 | python3-pytest
14 |
15 |
16 | ament_python
17 |
18 |
19 |
--------------------------------------------------------------------------------
/llm_config/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_config
5 | 0.0.1
6 | The llm_config package is responsible for configuring the behavior and various user preferences of the robot.
7 | Herman Ye
8 | Apache-2.0
9 |
10 | ament_copyright
11 | ament_flake8
12 | ament_pep257
13 | python3-pytest
14 |
15 |
16 | ament_python
17 |
18 |
19 |
--------------------------------------------------------------------------------
/llm_config/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | package_name = 'llm_config'
4 |
5 | setup(
6 | name=package_name,
7 | version='0.0.1',
8 | packages=[package_name],
9 | data_files=[
10 | ('share/ament_index/resource_index/packages',
11 | ['resource/' + package_name]),
12 | ('share/' + package_name, ['package.xml']),
13 | ],
14 | install_requires=['setuptools'],
15 | zip_safe=True,
16 | maintainer='hermanye',
17 | maintainer_email='hermanye233@icloud.com',
18 | description='The llm_config package is responsible for configuring the behavior and various user preferences of the robot.',
19 | license="Apache-2.0",
20 | tests_require=['pytest'],
21 | entry_points={
22 | 'console_scripts': [
23 | ],
24 | },
25 | )
26 |
--------------------------------------------------------------------------------
/llm_output/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | package_name = 'llm_output'
4 |
5 | setup(
6 | name=package_name,
7 | version='0.0.0',
8 | packages=[package_name],
9 | data_files=[
10 | ('share/ament_index/resource_index/packages',
11 | ['resource/' + package_name]),
12 | ('share/' + package_name, ['package.xml']),
13 | ],
14 | install_requires=['setuptools'],
15 | zip_safe=True,
16 | maintainer='hermanye',
17 | maintainer_email='hermanye233@icloud.com',
18 | description='The llm_input package contains input nodes for the ROS-LLM.',
19 | license="Apache-2.0",
20 | tests_require=['pytest'],
21 | entry_points={
22 | 'console_scripts': [
23 | "llm_audio_output = llm_output.llm_audio_output:main",
24 | ],
25 | },
26 | )
27 |
--------------------------------------------------------------------------------
/llm_model/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_model
5 | 0.0.1
6 | The llm_model package provides a conversational interface using the OpenAI API through the implementation of the ChatGPT node.
7 | hermanye
8 | Apache-2.0
9 |
10 | llm_config
11 | llm_interfaces
12 | ament_copyright
13 | ament_flake8
14 | ament_pep257
15 | python3-pytest
16 |
17 |
18 | ament_python
19 |
20 |
21 |
--------------------------------------------------------------------------------
/llm_model/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | package_name = "llm_model"
4 |
5 | setup(
6 | name=package_name,
7 | version="0.0.1",
8 | packages=[package_name],
9 | data_files=[
10 | ("share/ament_index/resource_index/packages", ["resource/" + package_name]),
11 | ("share/" + package_name, ["package.xml"]),
12 | ],
13 | install_requires=["setuptools", "llm_config", "llm_interfaces"],
14 | zip_safe=True,
15 | maintainer="hermanye",
16 | maintainer_email="hermanye233@icloud.com",
17 | description="The llm_model package provides a conversational interface using the OpenAI API through the implementation of the ChatGPT node.",
18 | license="Apache-2.0",
19 | tests_require=["pytest"],
20 | entry_points={
21 | "console_scripts": [
22 | "chatgpt = llm_model.chatgpt:main",
23 | ],
24 | },
25 | )
26 |
--------------------------------------------------------------------------------
/llm_input/test/test_pep257.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_pep257.main import main
16 | import pytest
17 |
18 |
19 | @pytest.mark.linter
20 | @pytest.mark.pep257
21 | def test_pep257():
22 | rc = main(argv=['.', 'test'])
23 | assert rc == 0, 'Found code style errors / warnings'
24 |
--------------------------------------------------------------------------------
/llm_model/test/test_pep257.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_pep257.main import main
16 | import pytest
17 |
18 |
19 | @pytest.mark.linter
20 | @pytest.mark.pep257
21 | def test_pep257():
22 | rc = main(argv=['.', 'test'])
23 | assert rc == 0, 'Found code style errors / warnings'
24 |
--------------------------------------------------------------------------------
/llm_robot/test/test_pep257.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_pep257.main import main
16 | import pytest
17 |
18 |
19 | @pytest.mark.linter
20 | @pytest.mark.pep257
21 | def test_pep257():
22 | rc = main(argv=['.', 'test'])
23 | assert rc == 0, 'Found code style errors / warnings'
24 |
--------------------------------------------------------------------------------
/llm_bringup/test/test_pep257.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_pep257.main import main
16 | import pytest
17 |
18 |
19 | @pytest.mark.linter
20 | @pytest.mark.pep257
21 | def test_pep257():
22 | rc = main(argv=['.', 'test'])
23 | assert rc == 0, 'Found code style errors / warnings'
24 |
--------------------------------------------------------------------------------
/llm_config/test/test_pep257.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_pep257.main import main
16 | import pytest
17 |
18 |
19 | @pytest.mark.linter
20 | @pytest.mark.pep257
21 | def test_pep257():
22 | rc = main(argv=['.', 'test'])
23 | assert rc == 0, 'Found code style errors / warnings'
24 |
--------------------------------------------------------------------------------
/llm_input/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | package_name = 'llm_input'
4 |
5 | setup(
6 | name=package_name,
7 | version='0.0.1',
8 | packages=[package_name],
9 | data_files=[
10 | ('share/ament_index/resource_index/packages',
11 | ['resource/' + package_name]),
12 | ('share/' + package_name, ['package.xml']),
13 | ],
14 | install_requires=['setuptools', 'llm_config'],
15 | zip_safe=True,
16 | maintainer='hermanye',
17 | maintainer_email='hermanye233@icloud.com',
18 | description='The llm_input package contains input nodes for the ROS-LLM.',
19 | license="Apache-2.0",
20 | tests_require=['pytest'],
21 | entry_points={
22 | 'console_scripts': [
23 | "llm_audio_input = llm_input.llm_audio_input:main",
24 | "llm_audio_input_local = llm_input.llm_audio_input_local:main",
25 | ],
26 | },
27 | )
28 |
--------------------------------------------------------------------------------
/llm_output/test/test_pep257.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_pep257.main import main
16 | import pytest
17 |
18 |
19 | @pytest.mark.linter
20 | @pytest.mark.pep257
21 | def test_pep257():
22 | rc = main(argv=['.', 'test'])
23 | assert rc == 0, 'Found code style errors / warnings'
24 |
--------------------------------------------------------------------------------
/llm_bringup/setup.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | from glob import glob
4 | from setuptools import setup
5 |
6 | package_name = 'llm_bringup'
7 |
8 | setup(
9 | name=package_name,
10 | version='0.0.1',
11 | packages=[package_name],
12 | data_files=[
13 | (
14 | "share/ament_index/resource_index/packages",
15 | ["resource/" + package_name],
16 | ),
17 | ("share/" + package_name, ["package.xml"]),
18 | (
19 | os.path.join("share", package_name, "launch"),
20 | glob(os.path.join("launch", "*launch.[pxy][yma]*")),
21 | ),
22 | ],
23 | install_requires=['setuptools', 'llm_config'],
24 | zip_safe=True,
25 | maintainer='hermanye',
26 | maintainer_email='hermanye233@icloud.com',
27 | description='ROS-LLM bringup package for ROS2',
28 | license='Apache-2.0',
29 | tests_require=['pytest'],
30 | entry_points={
31 | 'console_scripts': [
32 | ],
33 | },
34 | )
35 |
--------------------------------------------------------------------------------
/llm_robot/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | package_name = 'llm_robot'
4 |
5 | setup(
6 | name=package_name,
7 | version='0.0.1',
8 | packages=[package_name],
9 | data_files=[
10 | ('share/ament_index/resource_index/packages',
11 | ['resource/' + package_name]),
12 | ('share/' + package_name, ['package.xml']),
13 | ],
14 | install_requires=['setuptools', 'llm_config'],
15 | zip_safe=True,
16 | maintainer='hermanye',
17 | maintainer_email='hermanye233@icloud.com',
18 | description='The llm_robot package provides a ChatGPT function call server to simulate function calls for any robot',
19 | license="Apache-2.0",
20 | tests_require=['pytest'],
21 | entry_points={
22 | 'console_scripts': [
23 | "turtle_robot = llm_robot.turtle_robot:main",
24 | "arm_robot = llm_robot.arx5_arm_robot:main",
25 | "multi_robot = llm_robot.multi_robot:main",
26 | ],
27 | },
28 | )
29 |
--------------------------------------------------------------------------------
/llm_bringup/test/test_flake8.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_flake8.main import main_with_errors
16 | import pytest
17 |
18 |
19 | @pytest.mark.flake8
20 | @pytest.mark.linter
21 | def test_flake8():
22 | rc, errors = main_with_errors(argv=[])
23 | assert rc == 0, \
24 | 'Found %d code style errors / warnings:\n' % len(errors) + \
25 | '\n'.join(errors)
26 |
--------------------------------------------------------------------------------
/llm_config/test/test_flake8.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_flake8.main import main_with_errors
16 | import pytest
17 |
18 |
19 | @pytest.mark.flake8
20 | @pytest.mark.linter
21 | def test_flake8():
22 | rc, errors = main_with_errors(argv=[])
23 | assert rc == 0, \
24 | 'Found %d code style errors / warnings:\n' % len(errors) + \
25 | '\n'.join(errors)
26 |
--------------------------------------------------------------------------------
/llm_input/test/test_flake8.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_flake8.main import main_with_errors
16 | import pytest
17 |
18 |
19 | @pytest.mark.flake8
20 | @pytest.mark.linter
21 | def test_flake8():
22 | rc, errors = main_with_errors(argv=[])
23 | assert rc == 0, \
24 | 'Found %d code style errors / warnings:\n' % len(errors) + \
25 | '\n'.join(errors)
26 |
--------------------------------------------------------------------------------
/llm_model/test/test_flake8.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_flake8.main import main_with_errors
16 | import pytest
17 |
18 |
19 | @pytest.mark.flake8
20 | @pytest.mark.linter
21 | def test_flake8():
22 | rc, errors = main_with_errors(argv=[])
23 | assert rc == 0, \
24 | 'Found %d code style errors / warnings:\n' % len(errors) + \
25 | '\n'.join(errors)
26 |
--------------------------------------------------------------------------------
/llm_output/test/test_flake8.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_flake8.main import main_with_errors
16 | import pytest
17 |
18 |
19 | @pytest.mark.flake8
20 | @pytest.mark.linter
21 | def test_flake8():
22 | rc, errors = main_with_errors(argv=[])
23 | assert rc == 0, \
24 | 'Found %d code style errors / warnings:\n' % len(errors) + \
25 | '\n'.join(errors)
26 |
--------------------------------------------------------------------------------
/llm_robot/test/test_flake8.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_flake8.main import main_with_errors
16 | import pytest
17 |
18 |
19 | @pytest.mark.flake8
20 | @pytest.mark.linter
21 | def test_flake8():
22 | rc, errors = main_with_errors(argv=[])
23 | assert rc == 0, \
24 | 'Found %d code style errors / warnings:\n' % len(errors) + \
25 | '\n'.join(errors)
26 |
--------------------------------------------------------------------------------
/llm_bringup/test/test_copyright.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_copyright.main import main
16 | import pytest
17 |
18 |
19 | # Remove the `skip` decorator once the source file(s) have a copyright header
20 | @pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
21 | @pytest.mark.copyright
22 | @pytest.mark.linter
23 | def test_copyright():
24 | rc = main(argv=['.', 'test'])
25 | assert rc == 0, 'Found errors'
26 |
--------------------------------------------------------------------------------
/llm_config/test/test_copyright.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_copyright.main import main
16 | import pytest
17 |
18 |
19 | # Remove the `skip` decorator once the source file(s) have a copyright header
20 | @pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
21 | @pytest.mark.copyright
22 | @pytest.mark.linter
23 | def test_copyright():
24 | rc = main(argv=['.', 'test'])
25 | assert rc == 0, 'Found errors'
26 |
--------------------------------------------------------------------------------
/llm_input/test/test_copyright.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_copyright.main import main
16 | import pytest
17 |
18 |
19 | # Remove the `skip` decorator once the source file(s) have a copyright header
20 | @pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
21 | @pytest.mark.copyright
22 | @pytest.mark.linter
23 | def test_copyright():
24 | rc = main(argv=['.', 'test'])
25 | assert rc == 0, 'Found errors'
26 |
--------------------------------------------------------------------------------
/llm_model/test/test_copyright.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_copyright.main import main
16 | import pytest
17 |
18 |
19 | # Remove the `skip` decorator once the source file(s) have a copyright header
20 | @pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
21 | @pytest.mark.copyright
22 | @pytest.mark.linter
23 | def test_copyright():
24 | rc = main(argv=['.', 'test'])
25 | assert rc == 0, 'Found errors'
26 |
--------------------------------------------------------------------------------
/llm_output/test/test_copyright.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_copyright.main import main
16 | import pytest
17 |
18 |
19 | # Remove the `skip` decorator once the source file(s) have a copyright header
20 | @pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
21 | @pytest.mark.copyright
22 | @pytest.mark.linter
23 | def test_copyright():
24 | rc = main(argv=['.', 'test'])
25 | assert rc == 0, 'Found errors'
26 |
--------------------------------------------------------------------------------
/llm_robot/test/test_copyright.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Open Source Robotics Foundation, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from ament_copyright.main import main
16 | import pytest
17 |
18 |
19 | # Remove the `skip` decorator once the source file(s) have a copyright header
20 | @pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
21 | @pytest.mark.copyright
22 | @pytest.mark.linter
23 | def test_copyright():
24 | rc = main(argv=['.', 'test'])
25 | assert rc == 0, 'Found errors'
26 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to ROS-LLM Project
2 |
3 | Thank you for your interest in contributing to ROS-LLM Project! Here are some guidelines to help you get started.
4 |
5 | ## Reporting Bugs
6 |
7 | If you find a bug in the project, please open an issue on GitHub and provide as much detail as possible about the problem, including steps to reproduce it.
8 |
9 | ## Suggesting Features
10 |
11 | If you have an idea for a new feature or improvement to the project, please open an issue on GitHub and describe your suggestion in detail.
12 |
13 | ## Submitting Code Changes
14 |
15 | If you want to contribute code changes to the project, please follow these steps:
16 |
17 | 1. Fork the repository on GitHub.
18 | 2. Create a new branch for your changes.
19 | 3. Make your changes and commit them to your branch.
20 | 4. Push your branch to your fork on GitHub.
21 | 5. Open a pull request on the main repository and describe your changes in detail.
22 |
23 | We appreciate all contributions to ROS-LLM Project and will review pull requests as quickly as possible. Thank you for your help!
24 |
--------------------------------------------------------------------------------
/llm_install/dependencies_install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # This script configures dependencies for ROS-LLM
4 | # Version: 1.0
5 | # Author: Herman Ye @Auromix
6 | # Date: 2023-06-24
7 |
8 | # Exit the script immediately if a command exits with a non-zero status
9 | # set -x
10 | set -e
11 | # Install necessary dependencies for OpenAI
12 | sudo apt update
13 | sudo apt upgrade -y
14 | sudo apt install -y python3
15 | sudo apt install -y python3-pip
16 | pip install openai
17 | sudo apt install gnome-terminal -y
18 | pip install pysocks
19 | pip install requests
20 | sudo apt-get install libcanberra-gtk-module libcanberra-gtk3-module -y
21 |
22 |
23 | # Install AWS boto3
24 | pip install boto3
25 | pip install numpy
26 | pip install sounddevice
27 | pip install pydub
28 | pip install scipy
29 | sudo apt install portaudio19-dev -y
30 | sudo apt install ffmpeg -y
31 |
32 | # Install dependencies for sounddevice/soundfile
33 | sudo apt install libportaudio2 -y
34 | sudo apt install alsa-utils -y
35 | sudo apt install mpv -y
36 | pip install numpy sounddevice cffi soundfile
37 |
38 | # Check again
39 | sudo apt update
40 | sudo apt upgrade -y
--------------------------------------------------------------------------------
/llm_interfaces/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_interfaces
5 | 0.0.1
6 | Interfaces for ROS-LLM package
7 | Herman Ye
8 | Apache-2.0
9 |
10 |
11 | ament_cmake
12 |
13 |
14 |
15 | rosidl_default_generators
16 | rosidl_default_runtime
17 |
18 | ament_lint_auto
19 | ament_lint_common
20 |
21 | rosidl_interface_packages
22 |
23 |
24 |
25 | ament_cmake
26 |
27 |
--------------------------------------------------------------------------------
/llm_config/readme.md:
--------------------------------------------------------------------------------
1 | ## llm_config
2 | ## Package Description
3 | The `llm_config` package configures the behavior and user preferences of a robot. It consists of two functionalities, the `robot_behavior.py` and `user_config.py` modules.
4 |
5 | ## robot_behavior.py
6 | The `robot_behavior.py` module controls the movement of the robot by customizing a list of robot functions. To modify the robot's behavior, the functions in this module must be edited correspondingly with the `llm_robot/turtle_robot.py` real functions.
7 |
8 | ## user_config.py
9 | The `user_config.py` module allows users to specify various parameters that affect the AI's output, including the OpenAI language model used, temperature, top probability cutoff, and frequency and presence penalties. Users can also set the chat prompt, the prefix for the AI's response, and the maximum number of tokens and length allowed in the response. The module can store chat history in a JSON file with a maximum length limit. it also enables further customization of the assistant's behavior using a RobotBehavior object and a list of robot functions. This module contains API credentials for Amazon AWS along with parameters for services such as AWS S3, Transcribe, and Polly, as well as audio recording.
10 |
11 | For instructions on usage and customization details, please refer to the individual module files.
--------------------------------------------------------------------------------
/llm_interfaces/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.8)
2 | project(llm_interfaces)
3 |
4 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
5 | add_compile_options(-Wall -Wextra -Wpedantic)
6 | endif()
7 |
8 | # find dependencies
9 | find_package(ament_cmake REQUIRED)
10 | # uncomment the following section in order to fill in
11 | # further dependencies manually.
12 | # find_package( REQUIRED)
13 |
14 | # To convert the interfaces you defined into language-specific code
15 | # (like C++ and Python) so that they can be used in those languages
16 | # ROS Interfaces Settings:https://docs.ros.org/en/crystal/Tutorials/Custom-ROS2-Interfaces.html
17 | find_package(rosidl_default_generators REQUIRED)
18 |
19 | rosidl_generate_interfaces(${PROJECT_NAME}
20 | "msg/ChatGPT.msg"
21 | "srv/ChatGPT.srv"
22 | )
23 |
24 | if(BUILD_TESTING)
25 | find_package(ament_lint_auto REQUIRED)
26 | # the following line skips the linter which checks for copyrights
27 | # comment the line when a copyright and license is added to all source files
28 | set(ament_cmake_copyright_FOUND TRUE)
29 | # the following line skips cpplint (only works in a git repo)
30 | # comment the line when this package is in a git repo and when
31 | # a copyright and license is added to all source files
32 | set(ament_cmake_cpplint_FOUND TRUE)
33 | ament_lint_auto_find_test_dependencies()
34 | endif()
35 |
36 | ament_package()
37 |
--------------------------------------------------------------------------------
/llm_bringup/launch/audio_in_text_out_demo.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This launch file is a part of ROS-LLM project developed to control and interact with the turtlesim robot or your own robot.
21 | # The launch file contains a LaunchDescription object which defines the ROS2 nodes to be executed.
22 | #
23 | # Author: Herman Ye @Auromix
24 |
25 | from launch import LaunchDescription
26 | from launch_ros.actions import Node
27 | from launch.actions import DeclareLaunchArgument
28 | from launch.substitutions import LaunchConfiguration
29 |
30 |
31 | def generate_launch_description():
32 | return LaunchDescription(
33 | [
34 | Node(
35 | package="llm_input",
36 | executable="llm_audio_input",
37 | name="llm_audio_input",
38 | output="screen",
39 | ),
40 | Node(
41 | package="llm_model",
42 | executable="chatgpt",
43 | name="chatgpt",
44 | output="screen",
45 | ),
46 | ]
47 | )
48 |
--------------------------------------------------------------------------------
/llm_robot/llm_robot/readme.md:
--------------------------------------------------------------------------------
1 | # llm_robot
2 |
3 | ## Package Description
4 | The `llm_robot` package provides a function interface for robots. It allows users to create a robot behavior transfer node and add the `ROS-LLM` function interface to their own robot.
5 |
6 | ## turtle_robot.py
7 | The `turtle_robot.py` script is a ROS node that enables the control of a robot in turtlesim. The `TurtleRobot` class extends the `Node` class and defines the behavior of the turtle robot. It has the following attributes:
8 | * A publisher for publishing `Twist` messages to control the turtle's movement.
9 | * A client for calling an `Empty` service to reset the turtlesim.
10 | * A server for handling requests to call functions on the turtle robot.
11 |
12 | The `TurtleRobot` class has the following methods:
13 | * The `function_call_callback` method is the callback function for the `ChatGPT` service. It is responsible for executing the requested function on the turtle robot and returning the result to the client.
14 | * The `publish_cmd_vel` method publishes `Twist` messages to control the turtle's motion.
15 | * The `reset_turtlesim` method calls the reset service using the client attribute, which resets the turtlesim to its initial state.
16 |
17 | This example demonstrates simulating function calls for any robot,
18 | such as controlling velocity and other service commands.
19 | By modifying the content of this file,
20 | A calling interface can be created for the function calls of any robot.
21 | The Python script creates a ROS 2 Node
22 | that controls the movement of the TurtleSim
23 | by creating a publisher for cmd_vel messages and a client for the reset service.
24 | It also includes a ChatGPT function call server
25 | that can call various functions to control the TurtleSim
26 | and return the result of the function call as a string.
27 |
--------------------------------------------------------------------------------
/llm_model/readme.md:
--------------------------------------------------------------------------------
1 | # llm_model
2 | ## Package Description
3 |
4 | The `llm_model` package is a ROS package that provides a conversational interface using the OpenAI API. The package includes a node called `ChatGPTNode` which interacts with the ChatGPT service to implement conversational interactions.
5 |
6 | ## chatgpt.py
7 |
8 | This file contains the implementation of the `ChatGPTNode` node, which is responsible for providing a conversational interface using the OpenAI API. The node implements the ChatGPT service callback function `llm_callback`, which is called whenever a client sends a request to the ChatGPT service.
9 |
10 | The `ChatGPTNode` node also includes a client function `function_call_client` and a publisher `output_publisher`. The `function_call_client` function is used to call other functions using ROS Service, while the `output_publisher` publishes messages to a topic.
11 |
12 | The `chatgpt.py` file also includes a function called `add_message_to_history` to update chat history records. The file writes chat history records to a JSON file using Python's JSON library.
13 |
14 | Overall, the `chatgpt.py` file provides a ROS-based conversational interface with the OpenAI API, allowing users to interact with a chatbot.
15 |
16 |
17 | ## Usage
18 | To test the `turtle_robot.py` file with `robot node`, use the following ROS command-line to publish a speed command that **makes the turtlesim rotate**:
19 | ```bash
20 | ros2 service call /ChatGPT_service llm_interfaces/srv/ChatGPT '{request_text: "Let the turtlesim rotate counterclockwise at a great angular velocity of 50 rad/s and move forward at a certain linear velocity"}'
21 | ```
22 | **Reset the turtlesim**
23 | ```bash
24 | ros2 service call /ChatGPT_service llm_interfaces/srv/ChatGPT '{request_text: "I want the little turtle to go back to where it started."}'
25 | ```
--------------------------------------------------------------------------------
/llm_install/config_openai_api_key.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This script will add your OpenAI API_KEY to your .bashrc file.
21 | #
22 | # Author: Herman Ye @Auromix
23 |
24 | echo "This script will add your OpenAI API_KEY to your .bashrc file."
25 |
26 | # Ask user for OpenAI API key
27 | read -rp "Enter your OpenAI API key: " API_KEY
28 |
29 | # Check if OPENAI_API_KEY already exists in .bashrc file
30 | if grep -q "export OPENAI_API_KEY" ~/.bashrc; then
31 | # Confirm with the user before removing the existing OPENAI_API_KEY
32 | echo "Existing OPENAI_API_KEY found in .bashrc file."
33 | read -rp "Are you sure you want to replace the existing OPENAI_API_KEY in your .bashrc file? (y/n) " confirm
34 | if [[ "$confirm" =~ ^[Yy]$ ]]; then
35 | # Remove existing OPENAI_API_KEY from .bashrc file
36 | sed -i "/export OPENAI_API_KEY/d" "$HOME/.bashrc"
37 | echo "Existing OPENAI_API_KEY was removed from .bashrc file."
38 | # Append OPENAI_API_KEY to the end of .bashrc file
39 | echo "export OPENAI_API_KEY=$API_KEY" >> "$HOME/.bashrc"
40 | source "$HOME/.bashrc"
41 | echo "Added OPENAI_API_KEY=$API_KEY to .bashrc file."
42 | echo "Configuration complete."
43 | else
44 | echo "No changes were made."
45 | fi
46 | else
47 | # If OPENAI_API_KEY not found, add it to the end of .bashrc file
48 | echo "export OPENAI_API_KEY=$API_KEY" >> "$HOME/.bashrc"
49 | source "$HOME/.bashrc"
50 | echo "Added OPENAI_API_KEY=$API_KEY to .bashrc file."
51 | echo "Configuration complete."
52 | fi
53 |
54 |
55 | # Wait for user to exit
56 | read -n 1 -r -p "Press any key to exit..."
57 | exit 0
--------------------------------------------------------------------------------
/llm_bringup/launch/chatgpt_with_turtle_robot.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This launch file is a part of ROS-LLM project developed to control and interact with the turtlesim robot or your own robot.
21 | # The launch file contains a LaunchDescription object which defines the ROS2 nodes to be executed.
22 | #
23 | # Node test Method:
24 | # ros2 launch llm_bringup chatgpt_with_turtle_robot.launch.py
25 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
26 |
27 | # Author: Herman Ye @Auromix
28 |
29 | from launch import LaunchDescription
30 | from launch_ros.actions import Node
31 | from launch.actions import DeclareLaunchArgument
32 | from launch.substitutions import LaunchConfiguration
33 |
34 |
35 | def generate_launch_description():
36 | return LaunchDescription(
37 | [
38 | Node(
39 | package="llm_input",
40 | executable="llm_audio_input",
41 | name="llm_audio_input",
42 | output="screen",
43 | ),
44 | Node(
45 | package="llm_model",
46 | executable="chatgpt",
47 | name="chatgpt",
48 | output="screen",
49 | ),
50 | Node(
51 | package="llm_output",
52 | executable="llm_audio_output",
53 | name="llm_audio_output",
54 | output="screen",
55 | ),
56 | Node(
57 | package="llm_robot",
58 | executable="turtle_robot",
59 | name="turtle_robot",
60 | output="screen",
61 | ),
62 | Node(
63 | package="turtlesim",
64 | executable="turtlesim_node",
65 | name="turtlesim_node",
66 | output="screen",
67 | ),
68 | ]
69 | )
70 |
--------------------------------------------------------------------------------
/llm_bringup/launch/chatgpt_with_multi_robot.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This launch file is a part of ROS-LLM project developed to control and interact with the turtlesim robot or your own robot.
21 | # The launch file contains a LaunchDescription object which defines the ROS2 nodes to be executed.
22 | #
23 | # Node test Method:
24 | # ros2 launch llm_bringup chatgpt_with_arm_robot.launch.py
25 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
26 |
27 | # Author: Herman Ye @Auromix
28 |
29 | from launch import LaunchDescription
30 | from launch_ros.actions import Node
31 | from launch.actions import DeclareLaunchArgument
32 | from launch.substitutions import LaunchConfiguration
33 | from launch.actions import IncludeLaunchDescription
34 | from launch.substitutions import PathJoinSubstitution
35 | from launch.launch_description_sources import PythonLaunchDescriptionSource
36 | from launch_ros.substitutions import FindPackageShare
37 |
38 |
39 | def generate_launch_description():
40 |
41 | return LaunchDescription(
42 | [
43 | Node(
44 | package="llm_input",
45 | executable="llm_audio_input",
46 | name="llm_audio_input",
47 | output="screen",
48 | ),
49 | Node(
50 | package="llm_model",
51 | executable="chatgpt",
52 | name="chatgpt",
53 | output="screen",
54 | ),
55 | Node(
56 | package="llm_output",
57 | executable="llm_audio_output",
58 | name="llm_audio_output",
59 | output="screen",
60 | ),
61 | Node(
62 | package="llm_robot",
63 | executable="multi_robot",
64 | name="multi_robot",
65 | output="screen",
66 | ),
67 | ]
68 | )
69 |
--------------------------------------------------------------------------------
/llm_bringup/launch/local_chatgpt_with_multi_robot.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This launch file is a part of ROS-LLM project developed to control and interact with the turtlesim robot or your own robot.
21 | # The launch file contains a LaunchDescription object which defines the ROS2 nodes to be executed.
22 | #
23 | # Node test Method:
24 | # ros2 launch llm_bringup chatgpt_with_arm_robot.launch.py
25 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
26 |
27 | # Author: Herman Ye @Auromix
28 |
29 | from launch import LaunchDescription
30 | from launch_ros.actions import Node
31 | from launch.actions import DeclareLaunchArgument
32 | from launch.substitutions import LaunchConfiguration
33 | from launch.actions import IncludeLaunchDescription
34 | from launch.substitutions import PathJoinSubstitution
35 | from launch.launch_description_sources import PythonLaunchDescriptionSource
36 | from launch_ros.substitutions import FindPackageShare
37 |
38 |
39 | def generate_launch_description():
40 |
41 | return LaunchDescription(
42 | [
43 | Node(
44 | package="llm_input",
45 | executable="llm_audio_input_local",
46 | name="llm_audio_input_local",
47 | output="screen",
48 | ),
49 | Node(
50 | package="llm_model",
51 | executable="chatgpt",
52 | name="chatgpt",
53 | output="screen",
54 | ),
55 | Node(
56 | package="llm_output",
57 | executable="llm_audio_output",
58 | name="llm_audio_output",
59 | output="screen",
60 | ),
61 | Node(
62 | package="llm_robot",
63 | executable="multi_robot",
64 | name="multi_robot",
65 | output="screen",
66 | ),
67 | ]
68 | )
69 |
--------------------------------------------------------------------------------
/llm_bringup/launch/chatgpt_with_arm_robot.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This launch file is a part of ROS-LLM project developed to control and interact with the turtlesim robot or your own robot.
21 | # The launch file contains a LaunchDescription object which defines the ROS2 nodes to be executed.
22 | #
23 | # Node test Method:
24 | # ros2 launch llm_bringup chatgpt_with_arm_robot.launch.py
25 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
26 |
27 | # Author: Herman Ye @Auromix
28 |
29 | from launch import LaunchDescription
30 | from launch_ros.actions import Node
31 | from launch.actions import DeclareLaunchArgument
32 | from launch.substitutions import LaunchConfiguration
33 | from launch.actions import IncludeLaunchDescription
34 | from launch.substitutions import PathJoinSubstitution
35 | from launch.launch_description_sources import PythonLaunchDescriptionSource
36 | from launch_ros.substitutions import FindPackageShare
37 |
38 |
39 | def generate_launch_description():
40 | arx5_bringup_launch_path = PathJoinSubstitution([FindPackageShare('arx5_bringup'), 'launch', 'bringup.launch.py'])
41 | arx5_bringup_launch =IncludeLaunchDescription(
42 | PythonLaunchDescriptionSource(arx5_bringup_launch_path),)
43 |
44 | return LaunchDescription(
45 | [
46 | arx5_bringup_launch,
47 | Node(
48 | package="llm_input",
49 | executable="llm_audio_input",
50 | name="llm_audio_input",
51 | output="screen",
52 | ),
53 | Node(
54 | package="llm_model",
55 | executable="chatgpt",
56 | name="chatgpt",
57 | output="screen",
58 | ),
59 | Node(
60 | package="llm_output",
61 | executable="llm_audio_output",
62 | name="llm_audio_output",
63 | output="screen",
64 | ),
65 | Node(
66 | package="llm_robot",
67 | executable="arm_robot",
68 | name="arm_robot",
69 | output="screen",
70 | ),
71 | ]
72 | )
73 |
--------------------------------------------------------------------------------
/llm_bringup/launch/arx5_with_turtlesim.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This launch file is a part of ROS-LLM project developed to control and interact with the turtlesim robot or your own robot.
21 | # The launch file contains a LaunchDescription object which defines the ROS2 nodes to be executed.
22 | #
23 | # Node test Method:
24 | # ros2 launch llm_bringup chatgpt_with_arm_robot.launch.py
25 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
26 |
27 | # Author: Herman Ye @Auromix
28 |
29 | from launch import LaunchDescription
30 | from launch_ros.actions import Node
31 | from launch.actions import DeclareLaunchArgument
32 | from launch.substitutions import LaunchConfiguration
33 | from launch.actions import IncludeLaunchDescription
34 | from launch.substitutions import PathJoinSubstitution
35 | from launch.launch_description_sources import PythonLaunchDescriptionSource
36 | from launch_ros.substitutions import FindPackageShare
37 |
38 |
39 | def generate_launch_description():
40 | arx5_bringup_launch_path = PathJoinSubstitution([FindPackageShare('arx5_bringup'), 'launch', 'bringup.launch.py'])
41 | arx5_bringup_launch =IncludeLaunchDescription(
42 | PythonLaunchDescriptionSource(arx5_bringup_launch_path),)
43 |
44 | return LaunchDescription(
45 | [
46 | arx5_bringup_launch,
47 | Node(
48 | package="llm_input",
49 | executable="llm_audio_input",
50 | name="llm_audio_input",
51 | output="screen",
52 | ),
53 | Node(
54 | package="llm_model",
55 | executable="chatgpt",
56 | name="chatgpt",
57 | output="screen",
58 | ),
59 | Node(
60 | package="llm_output",
61 | executable="llm_audio_output",
62 | name="llm_audio_output",
63 | output="screen",
64 | ),
65 | Node(
66 | package="llm_robot",
67 | executable="multi_robot",
68 | name="multi_robot",
69 | output="screen",
70 | ),
71 |
72 | Node(
73 | package="turtlesim",
74 | executable="turtlesim_node",
75 | name="turtlesim_node",
76 | output="screen",
77 | ),
78 | ]
79 | )
80 |
--------------------------------------------------------------------------------
/llm_robot/llm_robot/arx5_arm_robot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This example demonstrates simulating function calls for any robot,
21 | # such as controlling velocity and other service commands.
22 | # By modifying the content of this file,
23 | # A calling interface can be created for the function calls of any robot.
24 | # The Python script creates a ROS 2 Node
25 | # that controls the movement of the TurtleSim
26 | # by creating a publisher for cmd_vel messages and a client for the reset service.
27 | # It also includes a ChatGPT function call server
28 | # that can call various functions to control the TurtleSim
29 | # and return the result of the function call as a string.
30 | #
31 | # Author: Herman Ye @Auromix
32 |
33 | # ROS related
34 | import rclpy
35 | from rclpy.node import Node
36 | from llm_interfaces.srv import ChatGPT
37 | from std_msgs.msg import Float64MultiArray, MultiArrayDimension, MultiArrayLayout
38 | from std_srvs.srv import Empty
39 |
40 | # LLM related
41 | import json
42 | import os
43 | from llm_config.user_config import UserConfig
44 |
45 | # Global Initialization
46 | config = UserConfig()
47 |
48 |
49 | class ArmRobot(Node):
50 | def __init__(self):
51 | super().__init__("arm_robot")
52 |
53 | # Publisher for target_pose
54 | self.target_pose_publisher = self.create_publisher(
55 | Float64MultiArray, "/target_pose", 10
56 | )
57 |
58 | # Server for function call
59 | self.function_call_server = self.create_service(
60 | ChatGPT, "/ChatGPT_function_call_service", self.function_call_callback
61 | )
62 | # Node initialization log
63 | self.get_logger().info("ArmRobot node has been initialized")
64 |
65 | def function_call_callback(self, request, response):
66 | req = json.loads(request.request_text)
67 | function_name = req["name"]
68 | function_args = json.loads(req["arguments"])
69 | func_obj = getattr(self, function_name)
70 | try:
71 | function_execution_result = func_obj(**function_args)
72 | except Exception as error:
73 | self.get_logger().info(f"Failed to call function: {error}")
74 | response.response_text = str(error)
75 | else:
76 | response.response_text = str(function_execution_result)
77 | return response
78 |
79 | def publish_target_pose(self, **kwargs):
80 | """
81 | Publishes target_pose message to control the movement of arx5_arm
82 | """
83 |
84 | x_value = kwargs.get("x", 0.2)
85 | y_value = kwargs.get("y", 0.2)
86 | z_value = kwargs.get("z", 0.2)
87 |
88 | roll_value = kwargs.get("roll", 0.2)
89 | pitch_value = kwargs.get("pitch", 0.2)
90 | yaw_value = kwargs.get("yaw", 0.2)
91 |
92 | pose = [x_value, y_value, z_value, roll_value, pitch_value, yaw_value]
93 | pose_str = ', '.join(map(str, pose))
94 |
95 | command=f"ros2 topic pub /target_pose std_msgs/msg/Float64MultiArray '{{data: [{pose_str}]}}' -1"
96 | os.system(command)
97 | self.get_logger().info(f"Published target message successfully: {pose}")
98 | return pose_str
99 |
100 |
101 |
102 | def main():
103 | rclpy.init()
104 | arm_robot = ArmRobot()
105 | rclpy.spin(arm_robot)
106 | rclpy.shutdown()
107 |
108 |
109 | if __name__ == "__main__":
110 | main()
111 |
--------------------------------------------------------------------------------
/llm_output/llm_output/llm_audio_output.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | #
21 | # Node test Method:
22 | # ros2 run llm_output llm_audio_output
23 | # ros2 topic pub /llm_feedback_to_user std_msgs/msg/String "data: 'Hello, welcome to ROS-LLM'" -1
24 | #
25 | # Author: Herman Ye @Auromix
26 |
27 | # Other libraries
28 | import datetime
29 | import json
30 | import requests
31 | import time
32 |
33 | # AWS ASR related
34 | import boto3
35 | import os
36 |
37 | # Audio recording related
38 | import sounddevice as sd
39 | from scipy.io.wavfile import write
40 |
41 | # ROS related
42 | import rclpy
43 | from rclpy.node import Node
44 | from std_msgs.msg import String
45 |
46 | # Global Initialization
47 | from llm_config.user_config import UserConfig
48 |
49 | config = UserConfig()
50 |
51 |
52 | class AudioOutput(Node):
53 | def __init__(self):
54 | super().__init__("audio_output")
55 |
56 | # Initialization publisher
57 | self.initialization_publisher = self.create_publisher(
58 | String, "/llm_initialization_state", 0
59 | )
60 |
61 | # LLM state publisher
62 | self.llm_state_publisher = self.create_publisher(String, "/llm_state", 0)
63 |
64 | # Feedback for user listener
65 | self.feed_back_for_user_subscriber = self.create_subscription(
66 | String, "/llm_feedback_to_user", self.feedback_for_user_callback, 10
67 | )
68 |
69 | # AWS parameters
70 | self.aws_access_key_id = config.aws_access_key_id
71 | self.aws_secret_access_key = config.aws_secret_access_key
72 | self.aws_region_name = config.aws_region_name
73 | self.aws_session = boto3.Session(
74 | aws_access_key_id=self.aws_access_key_id,
75 | aws_secret_access_key=self.aws_secret_access_key,
76 | region_name=self.aws_region_name,
77 | )
78 | # Initialization ready
79 | self.publish_string("output ready", self.initialization_publisher)
80 |
81 | def feedback_for_user_callback(self, msg):
82 | self.get_logger().info("Received text: '%s'" % msg.data)
83 |
84 | # Call AWS Polly service to synthesize speech
85 | polly_client = self.aws_session.client("polly")
86 | self.get_logger().info("Polly client successfully initialized.")
87 | response = polly_client.synthesize_speech(
88 | Text=msg.data, OutputFormat="mp3", VoiceId=config.aws_voice_id
89 | )
90 |
91 | # Save the audio output to a file
92 | output_file_path = "/tmp/speech_output.mp3"
93 | with open(output_file_path, "wb") as file:
94 | file.write(response["AudioStream"].read())
95 | # Play the audio output
96 | os.system("mpv" + " " + output_file_path)
97 | self.get_logger().info("Finished Polly playing.")
98 | self.publish_string("feedback finished", self.llm_state_publisher)
99 | self.publish_string("listening", self.llm_state_publisher)
100 |
101 | def publish_string(self, string_to_send, publisher_to_use):
102 | msg = String()
103 | msg.data = string_to_send
104 |
105 | publisher_to_use.publish(msg)
106 | self.get_logger().info(
107 | f"Topic: {publisher_to_use.topic_name}\nMessage published: {msg.data}"
108 | )
109 |
110 |
111 | def main(args=None):
112 | rclpy.init(args=args)
113 |
114 | audio_output = AudioOutput()
115 |
116 | rclpy.spin(audio_output)
117 |
118 | audio_output.destroy_node()
119 | rclpy.shutdown()
120 |
121 |
122 | if __name__ == "__main__":
123 | main()
124 |
--------------------------------------------------------------------------------
/llm_robot/llm_robot/turtle_robot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This example demonstrates simulating function calls for any robot,
21 | # such as controlling velocity and other service commands.
22 | # By modifying the content of this file,
23 | # A calling interface can be created for the function calls of any robot.
24 | # The Python script creates a ROS 2 Node
25 | # that controls the movement of the TurtleSim
26 | # by creating a publisher for cmd_vel messages and a client for the reset service.
27 | # It also includes a ChatGPT function call server
28 | # that can call various functions to control the TurtleSim
29 | # and return the result of the function call as a string.
30 | #
31 | # Author: Herman Ye @Auromix
32 |
33 | # ROS related
34 | import rclpy
35 | from rclpy.node import Node
36 | from llm_interfaces.srv import ChatGPT
37 | from geometry_msgs.msg import Twist
38 | from std_srvs.srv import Empty
39 |
40 | # LLM related
41 | import json
42 | from llm_config.user_config import UserConfig
43 |
44 | # Global Initialization
45 | config = UserConfig()
46 |
47 |
48 | class TurtleRobot(Node):
49 | def __init__(self):
50 | super().__init__("turtle_robot")
51 | # Client for reset
52 | self.reset_client = self.create_client(Empty, "/reset")
53 | # Publisher for cmd_vel
54 | self.publisher_ = self.create_publisher(Twist, "/turtle1/cmd_vel", 10)
55 |
56 | while not self.reset_client.wait_for_service(timeout_sec=1.0):
57 | self.get_logger().info("Service /reset not available, waiting again...")
58 | # Server for function call
59 | self.function_call_server = self.create_service(
60 | ChatGPT, "/ChatGPT_function_call_service", self.function_call_callback
61 | )
62 | # Node initialization log
63 | self.get_logger().info("TurtleRobot node has been initialized")
64 |
65 | def function_call_callback(self, request, response):
66 | req = json.loads(request.request_text)
67 | function_name = req["name"]
68 | function_args = json.loads(req["arguments"])
69 | func_obj = getattr(self, function_name)
70 | try:
71 | function_execution_result = func_obj(**function_args)
72 | except Exception as error:
73 | self.get_logger().info(f"Failed to call function: {error}")
74 | response.response_text = str(error)
75 | else:
76 | response.response_text = str(function_execution_result)
77 | return response
78 |
79 | def publish_cmd_vel(self, **kwargs):
80 | """
81 | Publishes cmd_vel message to control the movement of turtlesim
82 | """
83 | linear_x = kwargs.get("linear_x", 0.0)
84 | linear_y = kwargs.get("linear_y", 0.0)
85 | linear_z = kwargs.get("linear_z", 0.0)
86 | angular_x = kwargs.get("angular_x", 0.0)
87 | angular_y = kwargs.get("angular_y", 0.0)
88 | angular_z = kwargs.get("angular_z", 0.0)
89 |
90 | twist_msg = Twist()
91 | twist_msg.linear.x = float(linear_x)
92 | twist_msg.linear.y = float(linear_y)
93 | twist_msg.linear.z = float(linear_z)
94 | twist_msg.angular.x = float(angular_x)
95 | twist_msg.angular.y = float(angular_y)
96 | twist_msg.angular.z = float(angular_z)
97 |
98 | self.publisher_.publish(twist_msg)
99 | self.get_logger().info(f"Publishing cmd_vel message successfully: {twist_msg}")
100 | return twist_msg
101 |
102 | def reset_turtlesim(self, **kwargs):
103 | """
104 | Resets the turtlesim to its initial state and clears the screen
105 | """
106 | empty_req = Empty.Request()
107 | try:
108 | future = self.reset_client.call_async(empty_req)
109 | response_text = "Reset turtlesim successfully"
110 | except Exception as error:
111 | self.get_logger().info(f"Failed to reset turtlesim: {error}")
112 | return str(error)
113 | else:
114 | return response_text
115 |
116 |
117 | def main():
118 | rclpy.init()
119 | turtle_robot = TurtleRobot()
120 | rclpy.spin(turtle_robot)
121 | rclpy.shutdown()
122 |
123 |
124 | if __name__ == "__main__":
125 | main()
126 |
--------------------------------------------------------------------------------
/llm_input/llm_input/llm_audio_input_local.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | #
21 | # Node test Method:
22 | # ros2 run llm_input llm_audio_input_local
23 | # ros2 topic echo /llm_input_audio_to_text
24 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
25 | #
26 | # Author: Herman Ye @Auromix
27 |
28 |
29 | # Open Whisper related
30 | import whisper
31 |
32 | # Audio recording related
33 | import sounddevice as sd
34 | from scipy.io.wavfile import write
35 |
36 | # ROS related
37 | import rclpy
38 | from rclpy.node import Node
39 | from std_msgs.msg import String
40 |
41 | # Global Initialization
42 | from llm_config.user_config import UserConfig
43 |
44 | config = UserConfig()
45 |
46 |
47 | class AudioInput(Node):
48 | def __init__(self):
49 | super().__init__("llm_audio_input")
50 | # tmp audio file
51 | self.tmp_audio_file = "/tmp/user_audio_input.flac"
52 |
53 | # Initialization publisher
54 | self.initialization_publisher = self.create_publisher(
55 | String, "/llm_initialization_state", 0
56 | )
57 |
58 | # LLM state publisher
59 | self.llm_state_publisher = self.create_publisher(String, "/llm_state", 0)
60 |
61 | # LLM state listener
62 | self.llm_state_subscriber = self.create_subscription(
63 | String, "/llm_state", self.state_listener_callback, 0
64 | )
65 |
66 | self.audio_to_text_publisher = self.create_publisher(
67 | String, "/llm_input_audio_to_text", 0
68 | )
69 | # Initialization ready
70 | self.publish_string("llm_audio_input", self.initialization_publisher)
71 |
72 | def state_listener_callback(self, msg):
73 | if msg.data == "listening":
74 | self.get_logger().info(f"STATE: {msg.data}")
75 | self.action_function_listening()
76 |
77 | def action_function_listening(self):
78 | # Recording settings
79 | duration = config.duration # Audio recording duration, in seconds
80 | sample_rate = config.sample_rate # Sample rate
81 | volume_gain_multiplier = config.volume_gain_multiplier # Volume gain multiplier
82 |
83 | # Step 1: Record audio
84 | self.get_logger().info("Start local recording...")
85 | audio_data = sd.rec(
86 | int(duration * sample_rate), samplerate=sample_rate, channels=1
87 | )
88 | sd.wait() # Wait until recording is finished
89 |
90 | # Step 2: Increase the volume by a multiplier
91 | audio_data *= volume_gain_multiplier
92 |
93 | # Step 3: Save audio to file
94 | write(self.tmp_audio_file, sample_rate, audio_data)
95 | self.get_logger().info("Stop local recording!")
96 |
97 | # action_function_input_processing
98 | self.publish_string("input_processing", self.llm_state_publisher)
99 |
100 | # Step 4: Process audio with OpenAI Whisper
101 | whisper_model = whisper.load_model(config.whisper_model_size)
102 |
103 | # Step 6: Wait until the conversion is complete
104 | self.get_logger().info("Local Converting...")
105 |
106 | # Step 7: Get the transcribed text
107 | whisper_result = whisper_model.transcribe(self.tmp_audio_file,language=config.whisper_language)
108 |
109 | transcript_text = whisper_result["text"]
110 | self.get_logger().info("Audio to text conversion complete!")
111 |
112 | # Step 8: Publish the transcribed text to ROS2
113 | if transcript_text == "": # Empty input
114 | self.get_logger().info("Empty input!")
115 | self.publish_string("listening", self.llm_state_publisher)
116 | else:
117 | self.publish_string(transcript_text, self.audio_to_text_publisher)
118 |
119 | def publish_string(self, string_to_send, publisher_to_use):
120 | msg = String()
121 | msg.data = string_to_send
122 |
123 | publisher_to_use.publish(msg)
124 | self.get_logger().info(
125 | f"Topic: {publisher_to_use.topic_name}\nMessage published: {msg.data}"
126 | )
127 |
128 |
129 | def main(args=None):
130 | rclpy.init(args=args)
131 |
132 | audio_input = AudioInput()
133 |
134 | rclpy.spin(audio_input)
135 |
136 | audio_input.destroy_node()
137 | rclpy.shutdown()
138 |
139 |
140 | if __name__ == "__main__":
141 | main()
142 |
--------------------------------------------------------------------------------
/llm_config/llm_config/user_config.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This is a configuration file for a conversational AI assistant
21 | # that uses the OpenAI API for generating responses.
22 | #
23 | # The user can specify the OpenAI language model to be used, the organization
24 | # under which their API key is registered (if applicable), and several parameters
25 | # that affect the creativity and coherence of the AI's responses, such as the
26 | # temperature, top probability cutoff, and frequency and presence penalties.
27 | #
28 | # The user can also specify the prompt given to the AI, the prefix for the AI's response,
29 | # and the maximum number of tokens and length allowed in the response.
30 | #
31 | # The chat history can be stored in a JSON file, with a maximum length limit.
32 | #
33 | # The assistant's behavior can be customized using a RobotBehavior object
34 | # and a list of robot functions.
35 | #
36 | # The API credentials for Amazon AWS are provided, along with parameters
37 | # for AWS S3, Transcribe, and Polly services, and parameters for audio recording.
38 | #
39 | # Author: Herman Ye @Auromix
40 |
41 | from .robot_behavior import RobotBehavior
42 | import os
43 |
44 |
45 | class UserConfig:
46 | def __init__(self):
47 | # OpenAI API related
48 | # [required]: OpenAI API key
49 | self.openai_api_key = os.getenv("OPENAI_API_KEY")
50 | # [required]: Name of the OpenAI language model to be used
51 | self.openai_model = "gpt-3.5-turbo-0613"
52 | # self.openai_model="gpt-4-0613"
53 | # [optional]: Name of the organization under which the OpenAI API key is registered
54 | self.openai_organization = "Auromix"
55 | # [optional]: Controls the creativity of the AI’s responses. Higher values lead to more creative, but less coherent, responses
56 | self.openai_temperature = 1
57 | # [optional]: Probability distribution cutoff for generating responses
58 | self.openai_top_p = 1
59 | # [optional]: Number of responses to generate in batch
60 | self.openai_n = 1
61 | # [optional]: Whether to stream response results or not
62 | self.openai_stream = False
63 | # [optional]: String that if present in the AI's response, marks the end of the response
64 | self.openai_stop = "NULL"
65 | # [optional]: Maximum number of tokens allowed in the AI's respons
66 | self.openai_max_tokens = 4000
67 | # self.openai_max_tokens= 16000
68 | # [optional]: Value that promotes the AI to generates responses with higher diversity
69 | self.openai_frequency_penalty = 0
70 | # [optional]: Value that promotes the AI to generates responses with more information at the text prompt
71 | self.openai_presence_penalty = 0
72 |
73 | # IO related
74 | # [optional]: The prompt given to the AI, provided by the user
75 | self.user_prompt = ""
76 | # [optional]: The generated prompt by the administrator, used as a prefix for the AI's response
77 | self.system_prompt = ""
78 | # TODO: System prompt only works for the first message,so it will be forgotten soon after the first message
79 | # modify the llm_model/chatgpt.py, add system_prompt to every prompt to solve this problem @Herman Ye
80 | # [optional]: The generated response provided by the AI
81 | self.assistant_response = ""
82 |
83 | # Chat history related
84 | # [optional]: The chat history, including the user prompt, system prompt, and assistant response
85 | self.chat_history = [{"role": "system", "content": self.system_prompt}]
86 | # [optional]: The path to the chat history JSON file
87 | self.chat_history_path = os.path.expanduser("~")
88 | # self.chat_history_path = os.path.dirname(os.path.abspath(__file__))
89 | # [optional]: The limit of the chat history length
90 | self.chat_history_max_length = 4000
91 | # self.chat_history_max_length=16000
92 |
93 | # Robot behavior related
94 | # [optional]: The robot behavior object
95 | self.robot_behavior = RobotBehavior()
96 | # [optional]: The robot functions list
97 | self.robot_functions_list = self.robot_behavior.robot_functions_list
98 | # [optional]: Multi-robot list
99 | # "" is for robot without name
100 | self.multi_robots_name=["turtle1","turtle2","minipupper",""]
101 |
102 | # AWS related
103 | # [required]: AWS IAM access key id
104 | self.aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID")
105 | # [required]: AWS IAM secret access key
106 | self.aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY")
107 | # [required]: AWS IAM region name
108 | self.aws_region_name = 'ap-southeast-1'
109 | # [required]: AWS S3 bucket name
110 | self.bucket_name = 'auromixbucket'
111 | # [optional]: AWS transcription language, change this to 'zh-CN' for Chinese
112 | self.aws_transcription_language = "en-US"
113 | # [optional]: AWS polly voice id, change this to 'Zhiyu' for Chinese
114 | self.aws_voice_id = "Ivy"
115 |
116 | # OpenAI Whisper Model size related
117 | # [optional]: OpenAI Whisper Model size: tiny base small medium large
118 | self.whisper_model_size = "medium"
119 | # [optional]: OpenAI Whisper Model language: en
120 | self.whisper_language="en"
121 | # Audio recording related
122 | # [optional]: Audio recording duration, in seconds
123 | self.duration = 5
124 | # [optional]: Audio recording sample rate, in Hz
125 | self.sample_rate = 16000
126 | # [optional]: Audio recording gain multiplier
127 | # Change this to increase or decrease the volume
128 | self.volume_gain_multiplier = 1
129 |
--------------------------------------------------------------------------------
/llm_robot/llm_robot/multi_robot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Author: Herman Ye @Auromix
20 |
21 | # LLM related
22 | import json
23 | import time
24 | import subprocess
25 |
26 | # ROS related
27 | import rclpy
28 | from rclpy.node import Node
29 | from std_msgs.msg import String
30 | from geometry_msgs.msg import Pose
31 | from geometry_msgs.msg import Twist
32 | from llm_interfaces.srv import ChatGPT
33 |
34 | # Global Initialization
35 | from llm_config.user_config import UserConfig
36 |
37 | config = UserConfig()
38 |
39 |
40 | class MultiRobot(Node):
41 | def __init__(self):
42 | super().__init__("turtle_robot")
43 | # Initialize publishers dictionaries
44 | self.pose_publishers = {}
45 | self.cmd_vel_publishers = {}
46 | for robot_name in config.multi_robots_name:
47 | if robot_name == "":
48 | # pose publisher
49 | self.pose_publishers[robot_name] = self.create_publisher(
50 | Pose, "/pose", 10
51 | )
52 | # cmd_vel publishers
53 | self.cmd_vel_publishers[robot_name] = self.create_publisher(
54 | Twist, "/cmd_vel", 10
55 | )
56 | else:
57 | # pose publisher
58 | self.pose_publishers[robot_name] = self.create_publisher(
59 | Pose, "/" + robot_name + "/pose", 10
60 | )
61 | # cmd_vel publishers
62 | self.cmd_vel_publishers[robot_name] = self.create_publisher(
63 | Twist, "/" + robot_name + "/cmd_vel", 10
64 | )
65 |
66 | # Server for model function call
67 | self.function_call_server = self.create_service(
68 | ChatGPT, "/ChatGPT_function_call_service", self.function_call_callback
69 | )
70 |
71 | # Initialization publisher
72 | self.initialization_publisher = self.create_publisher(
73 | String, "/llm_initialization_state", 0
74 | )
75 |
76 | # LLM state publisher
77 | self.llm_state_publisher = self.create_publisher(String, "/llm_state", 0)
78 |
79 | # Initialization ready
80 | self.publish_string("robot", self.initialization_publisher)
81 |
82 | def function_call_callback(self, request, response):
83 | req = json.loads(request.request_text)
84 | function_name = req["name"]
85 | function_args = json.loads(req["arguments"])
86 | func_obj = getattr(self, function_name)
87 | try:
88 | function_execution_result = func_obj(**function_args)
89 | except Exception as error:
90 | self.get_logger().info(f"Failed to call function: {error}")
91 | response.response_text = str(error)
92 | else:
93 | response.response_text = str(function_execution_result)
94 | return response
95 |
96 | def call_service(self, **kwargs):
97 | # Warning:Only empty input service is supported
98 | # TODO: Add support for non-empty input service
99 | service_name = kwargs.get("service_name", "")
100 | service_type = kwargs.get("service_type", "")
101 | command = ["ros2", "service", "call", service_name, service_type]
102 |
103 | try:
104 | command_result = subprocess.check_output(command, stderr=subprocess.STDOUT)
105 | command_result = command_result.decode("utf-8") # Convert bytes to string
106 | except subprocess.CalledProcessError as command_error:
107 | command_result = command_error.output.decode("utf-8")
108 | return command_result
109 |
110 | def publish_cmd_vel(self, **kwargs):
111 | """
112 | Publishes cmd_vel message to control the movement of all types of robots
113 | """
114 | # Get parameters
115 | robot_name = kwargs.get("robot_name", "")
116 | duration = kwargs.get("duration", 0)
117 | linear_x = kwargs.get("linear_x", 0.0)
118 | linear_y = kwargs.get("linear_y", 0.0)
119 | linear_z = kwargs.get("linear_z", 0.0)
120 | angular_x = kwargs.get("angular_x", 0.0)
121 | angular_y = kwargs.get("angular_y", 0.0)
122 | angular_z = kwargs.get("angular_z", 0.0)
123 | self.get_logger().debug(f"Received cmd_vel message: {kwargs}")
124 | # Create message
125 | twist_msg = Twist()
126 | twist_msg.linear.x = float(linear_x)
127 | twist_msg.linear.y = float(linear_y)
128 | twist_msg.linear.z = float(linear_z)
129 | twist_msg.angular.x = float(angular_x)
130 | twist_msg.angular.y = float(angular_y)
131 | twist_msg.angular.z = float(angular_z)
132 | self.get_logger().debug(f"Created cmd_vel message: {twist_msg}")
133 |
134 | # Create publisher for new robot if not exist
135 | if robot_name not in config.multi_robots_name:
136 | self.cmd_vel_publishers[robot_name] = self.create_publisher(
137 | Twist, f"/{robot_name}/cmd_vel", 10
138 | )
139 | self.get_logger().debug(f"Created new publisher for {robot_name}")
140 | # Set default robot name
141 | if robot_name == "":
142 | robot_name = "default"
143 | topic = "/cmd_vel"
144 | self.get_logger().debug(f"default robot:{robot_name}")
145 | else:
146 | topic = f"/{robot_name}/cmd_vel"
147 |
148 | if duration == 0:
149 | self.cmd_vel_publishers[robot_name].publish(twist_msg)
150 | else:
151 | # Publish message for duration
152 | start_time = time.time()
153 | while time.time() - start_time < duration:
154 | self.cmd_vel_publishers[robot_name].publish(twist_msg)
155 | time.sleep(0.1)
156 |
157 | # Log
158 | self.get_logger().info(f"Published {topic} message successfully: {twist_msg}")
159 |
160 | # Stop robot
161 | stop_msg = Twist()
162 | stop_msg.linear.x = 0.0
163 | stop_msg.linear.y = 0.0
164 | stop_msg.linear.z = 0.0
165 | stop_msg.angular.x = 0.0
166 | stop_msg.angular.y = 0.0
167 | stop_msg.angular.z = 0.0
168 | self.cmd_vel_publishers[robot_name].publish(stop_msg)
169 | return twist_msg
170 |
171 | def publish_string(self, string_to_send, publisher_to_use):
172 | msg = String()
173 | msg.data = string_to_send
174 |
175 | publisher_to_use.publish(msg)
176 | self.get_logger().info(
177 | f"Topic: {publisher_to_use.topic_name}\nMessage published: {msg.data}"
178 | )
179 |
180 |
181 | def main():
182 | rclpy.init()
183 | multi_robot = MultiRobot()
184 | try:
185 | rclpy.spin(multi_robot)
186 | except KeyboardInterrupt:
187 | pass
188 | finally:
189 | multi_robot.destroy_node()
190 | rclpy.shutdown()
191 |
192 |
193 | if __name__ == "__main__":
194 | main()
195 |
--------------------------------------------------------------------------------
/llm_input/llm_input/llm_audio_input.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | #
21 | # Node test Method:
22 | # ros2 run llm_input llm_audio_input
23 | # ros2 topic echo /llm_input_audio_to_text
24 | # ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
25 | #
26 | # Author: Herman Ye @Auromix
27 |
28 | # Other libraries
29 | import datetime
30 | import json
31 | import requests
32 | import time
33 |
34 | # AWS ASR related
35 | import boto3
36 |
37 | # Audio recording related
38 | import sounddevice as sd
39 | from scipy.io.wavfile import write
40 |
41 | # ROS related
42 | import rclpy
43 | from rclpy.node import Node
44 | from std_msgs.msg import String
45 |
46 | # Global Initialization
47 | from llm_config.user_config import UserConfig
48 |
49 | config = UserConfig()
50 |
51 |
52 | class AudioInput(Node):
53 | def __init__(self):
54 | super().__init__("llm_audio_input")
55 |
56 | # AWS service initialization
57 | self.aws_audio_file = "/tmp/user_audio_input.flac"
58 | self.aws_access_key_id = config.aws_access_key_id
59 | self.aws_secret_access_key = config.aws_secret_access_key
60 | self.aws_region_name = config.aws_region_name
61 | self.aws_session = boto3.Session(
62 | aws_access_key_id=self.aws_access_key_id,
63 | aws_secret_access_key=self.aws_secret_access_key,
64 | region_name=self.aws_region_name,
65 | )
66 |
67 | # Initialization publisher
68 | self.initialization_publisher = self.create_publisher(
69 | String, "/llm_initialization_state", 0
70 | )
71 |
72 | # LLM state publisher
73 | self.llm_state_publisher = self.create_publisher(String, "/llm_state", 0)
74 |
75 | # LLM state listener
76 | self.llm_state_subscriber = self.create_subscription(
77 | String, "/llm_state", self.state_listener_callback, 0
78 | )
79 |
80 | self.audio_to_text_publisher = self.create_publisher(
81 | String, "/llm_input_audio_to_text", 0
82 | )
83 | # Initialization ready
84 | self.publish_string("llm_audio_input", self.initialization_publisher)
85 |
86 | def state_listener_callback(self, msg):
87 | if msg.data == "listening":
88 | self.get_logger().info(f"STATE: {msg.data}")
89 | self.action_function_listening()
90 |
91 | def action_function_listening(self):
92 | # Recording settings
93 | duration = config.duration # Audio recording duration, in seconds
94 | sample_rate = config.sample_rate # Sample rate
95 | volume_gain_multiplier = config.volume_gain_multiplier # Volume gain multiplier
96 | # AWS S3 settings
97 | bucket_name = config.bucket_name
98 | audio_file_key = "gpt_audio.flac" # Name of the audio file in S3
99 | transcribe_job_name = (
100 | f'my-transcribe-job-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
101 | )
102 | # Name the conversion task based on time to ensure uniqueness
103 | # Path of the audio file in S3
104 | transcribe_job_uri = f"s3://{bucket_name}/{audio_file_key}"
105 |
106 | # Step 1: Record audio
107 | self.get_logger().info("Start recording...")
108 | audio_data = sd.rec(
109 | int(duration * sample_rate), samplerate=sample_rate, channels=1
110 | )
111 | sd.wait() # Wait until recording is finished
112 |
113 | # Step 2: Increase the volume by a multiplier
114 | audio_data *= volume_gain_multiplier
115 |
116 | # Step 3: Save audio to file
117 | write(self.aws_audio_file, sample_rate, audio_data)
118 | self.get_logger().info("Stop recording!")
119 |
120 | # action_function_input_processing
121 | self.publish_string("input_processing", self.llm_state_publisher)
122 | # Step 4: Upload audio to AWS S3
123 | s3 = self.aws_session.client("s3")
124 | self.get_logger().info("Uploading audio to AWS S3...")
125 | with open(self.aws_audio_file, "rb") as f:
126 | s3.upload_fileobj(Fileobj=f, Bucket=bucket_name, Key=audio_file_key)
127 | self.get_logger().info("Upload complete!")
128 |
129 | # Step 5: Convert audio to text
130 | transcribe = self.aws_session.client("transcribe")
131 | self.get_logger().info("Converting audio to text...")
132 | transcribe.start_transcription_job(
133 | TranscriptionJobName=transcribe_job_name,
134 | LanguageCode=config.aws_transcription_language,
135 | Media={"MediaFileUri": transcribe_job_uri},
136 | )
137 |
138 | # Step 6: Wait until the conversion is complete
139 | while True:
140 | status = transcribe.get_transcription_job(
141 | TranscriptionJobName=transcribe_job_name
142 | )
143 | if status["TranscriptionJob"]["TranscriptionJobStatus"] in [
144 | "COMPLETED",
145 | "FAILED",
146 | ]:
147 | break
148 |
149 | self.get_logger().info("Converting...")
150 | time.sleep(0.5)
151 |
152 | # Step 7: Get the transcribed text
153 | if status["TranscriptionJob"]["TranscriptionJobStatus"] == "COMPLETED":
154 | transcript_file_url = status["TranscriptionJob"]["Transcript"][
155 | "TranscriptFileUri"
156 | ]
157 | response = requests.get(transcript_file_url)
158 | transcript_data = json.loads(response.text)
159 | transcript_text = transcript_data["results"]["transcripts"][0]["transcript"]
160 | self.get_logger().info("Audio to text conversion complete!")
161 | # Step 8: Publish the transcribed text to ROS2
162 | if transcript_text == "": # Empty input
163 | self.get_logger().info("Empty input!")
164 | self.publish_string("listening", self.llm_state_publisher)
165 | else:
166 | self.publish_string(transcript_text, self.audio_to_text_publisher)
167 | # Step 9: Delete the temporary audio file from AWS S3
168 | s3.delete_object(Bucket=bucket_name, Key=audio_file_key)
169 |
170 | else:
171 | self.get_logger().error(
172 | f"Failed to transcribe audio: {status['TranscriptionJob']['FailureReason']}"
173 | )
174 |
175 | def publish_string(self, string_to_send, publisher_to_use):
176 | msg = String()
177 | msg.data = string_to_send
178 |
179 | publisher_to_use.publish(msg)
180 | self.get_logger().info(
181 | f"Topic: {publisher_to_use.topic_name}\nMessage published: {msg.data}"
182 | )
183 |
184 |
185 | def main(args=None):
186 | rclpy.init(args=args)
187 |
188 | audio_input = AudioInput()
189 |
190 | rclpy.spin(audio_input)
191 |
192 | audio_input.destroy_node()
193 | rclpy.shutdown()
194 |
195 |
196 | if __name__ == "__main__":
197 | main()
198 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/Auromix)
2 | [](http://docs.ros.org/en/humble/index.html)
3 | [](https://ubuntu.com/) [](https://github.com/Auromix/ROS-LLM/blob/ros2-humble/LICENSE)
4 | [](https://github.com/Auromix/ROS-LLM/stargazers)
5 | [](https://twitter.com/Hermanye233)
6 | # ROS-LLM
7 | The ROS-LLM project is a ROS framework for embodied intelligence applications. It enables natural language interactions and large model-based control of robot motion and navigation for any robot operating on ROS.
8 |
9 | ROS-LLM empowers you to utilize functionalities based on Large Language Models, such as GPT-4 and ChatGPT, for robot decision-making and control.
10 |
11 | This framework is designed to be easy to extend. By simply providing a function interface for your robot, following the provided example, you can integrate and use ROS-LLM within ten minutes.
12 |
13 | ROS-LLM offers a simple solution for quickly creating interactive and control experiences with any robot.
14 |
15 | 
16 | ## 🚀 Features
17 |
18 | - 🤖 **ROS Integration**: Smoothly interacts with the Robot Operating System (ROS) for expansive robotic control.
19 |
20 | - 🧠 **Large Language Models Support**: Leverages GPT-4 and ChatGPT for enhanced decision-making and task management.
21 |
22 | - 🗣️ **Natural Interaction**: Facilitates intuitive communication with robots through conversational engagement.
23 |
24 | - 🔄 **Flexible Control**: Utilizes LLM-based systems for tasks such as motion and navigation based on language model interpretation.
25 |
26 | - 🔌 **Simplified Extensibility**: Provides an easy interface for seamless robot function integration.
27 |
28 | - 🛠️ **Quick Development**: Creates interactive robot control experiences swiftly, sometimes in under ten minutes.
29 |
30 | - 📚 **Instructional Examples**: Offers comprehensive tutorials and examples for easier understanding and implementation.
31 |
32 | - 🗃️ **History Storage**: Retains local chat histories for convenient review and reference.
33 |
34 |
35 |
36 | ## 🔥 Quickstart Guide
37 |
38 | Follow the instructions below to set up ROS-LLM:
39 |
40 | **1. Clone the Repository:**
41 |
42 | Use the command below to clone the repository.
43 | ```bash
44 | git clone https://github.com/Auromix/ROS-LLM.git
45 | ```
46 |
47 | **2. Install Dependencies:**
48 |
49 | Navigate to the `llm_install` directory and execute the installation script.
50 | ```bash
51 | cd ROS-LLM/llm_install
52 | bash dependencies_install.sh
53 | ```
54 |
55 | **3. Configure OpenAI Settings:**
56 |
57 | If you don't have an OpenAI API key, you can obtain one from [OpenAI Platform](https://platform.openai.com). Use the script below to configure your OpenAI API key.
58 | ```bash
59 | cd ROS-LLM/llm_install
60 | bash config_openai_api_key.sh
61 | ```
62 |
63 | **4. Configure AWS Settings (Optional):**
64 |
65 | For cloud natural interaction capabilities, configure the AWS settings. If you prefer to use local ASR, this step can be skipped.
66 |
67 | For low-performance edge embedded platforms, it is recommended to use ASR cloud services to reduce computing pressure, and for high-performance personal hosts, it is recommended to use local ASR services to speed up response
68 |
69 | ```bash
70 | cd ROS-LLM/llm_install
71 | bash config_aws.sh
72 | ```
73 |
74 | **4. Configure OpenAI Whisper Settings (Optional):**
75 |
76 | For local natural interaction capabilities, configure the OpenAI Whisper settings. If you prefer to use cloud ASR, this step can be skipped.
77 |
78 | For low-performance edge embedded platforms, it is recommended to use ASR cloud services to reduce computing pressure, and for high-performance personal hosts, it is recommended to use local ASR services to speed up response
79 | ```bash
80 | pip install -U openai-whisper
81 | pip install setuptools-rust
82 | ```
83 |
84 | **5. Build the Workspace:**
85 |
86 | Navigate to your workspace directory and build the workspace.
87 | ```bash
88 | cd
89 | rosdep install --from-paths src --ignore-src -r -y # Install dependencies
90 | colcon build --symlink-install
91 | ```
92 |
93 | **6. Run the Demo:**
94 |
95 | Source the setup script and launch the Turtlesim demo with cloud ASR.
96 | ```bash
97 | source /install/setup.bash
98 | ros2 launch llm_bringup chatgpt_with_turtle_robot.launch.py
99 | ```
100 | start listening
101 | ```bash
102 | ros2 topic pub /llm_state std_msgs/msg/String "data: 'listening'" -1
103 | ```
104 |
105 | ## ⚙️ Configure Your Own Robot (Optional)
106 |
107 | To use the framework with your own robot, modify the `llm_robot` and `llm_config` packages to suit your robot's specifications. This allows you to customize the behavior of your robot.
108 |
109 |
110 | ## 🧑💻 Future Development Plans
111 |
112 | We are continuously striving to enhance ROS-LLM to better serve the developers and roboticists in the community. Below are the key developments we plan to undertake in the upcoming updates:
113 |
114 | - [ ] Agent Mechanism
115 |
116 | Adding an agent mechanism allows long sequence tasks to be well divided.
117 |
118 | - [ ] Feedback Channel from External Functions
119 |
120 | We plan to add a feedback mechanism for the robot to receive information from external functions. This would significantly assist model-based decision-making processes.
121 |
122 | - [ ] Navigation Interface
123 |
124 | A new interface for robot navigation is also in the pipeline. It will enable the utilization of this framework in navigation-oriented tasks.
125 |
126 | - [ ] Sensor Input Interface
127 |
128 | The addition of other sensor input interfaces is another major development. This will incorporate environmental perception into model decision premises, preparing for functionalities such as obstacle avoidance.
129 |
130 | - [ ] Integration with Vision-based Models like Palm-e
131 |
132 | We aim to extend the capabilities of ROS-LLM by integrating models that allow for visual input, like Palm-e. This would enable the use of advanced computer vision technologies for better environment interaction.
133 |
134 | - [ ] Continuous Optimization
135 |
136 | Last but not least, we will focus on continuous optimization of the framework. We are committed to improving the rationality and extensibility of ROS-LLM to make it easier for developers to customize and extend the framework according to their needs.
137 |
138 | Keep an eye on this repo for updates. Your suggestions and contributions are always welcome!
139 |
140 |
141 |
142 | ## 🙋 To user
143 | If you find this project useful, please consider giving it a ⭐️ star on GitHub! Your support helps us improve the project and encourages further development. Don't forget to also share it with your friends and colleagues who might it beneficial. Thank you for your support!
144 |
145 | ## 🏆 Contributing
146 | Contributions are welcome! Please read the [contributing guidelines](CONTRIBUTING.md) before submitting a pull request.
147 |
148 |
149 |
150 | ## 🪪 License
151 | ```
152 | Copyright 2023 Herman Ye @Auromix
153 | Licensed under the Apache License, Version 2.0 (the "License");
154 | you may not use this file except in compliance with the License.
155 | You may obtain a copy of the License at
156 | http://www.apache.org/licenses/LICENSE-2.0
157 | Unless required by applicable law or agreed to in writing, software
158 | distributed under the License is distributed on an "AS IS" BASIS,
159 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
160 | See the License for the specific language governing permissions and
161 | limitations under the License.
162 | ```
163 |
--------------------------------------------------------------------------------
/llm_install/config_aws.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This script will add your OpenAI API_KEY to your .bashrc file.
21 | #
22 | # Author: Herman Ye @Auromix
23 |
24 | # Get the directory of this script
25 | SCRIPT_DIR=$(dirname "$0")
26 |
27 | # AWS guide for user
28 | clear
29 | # Define the variables to be printed
30 | TEXT0=""
31 | TEXT1="This script will help you configure your AWS credentials."
32 | TEXT2="Follow the instructions to set up your AWS account."
33 | TEXT3="visit https://aws.amazon.com/"
34 | TEXT4="Create an S3 bucket"
35 | TEXT5="1. Create an AWS account."
36 | TEXT6="2. Go to the AWS Console Amazon S3 page."
37 | TEXT7="3. Click Create bucket."
38 | TEXT8="4. Enter a Bucket name and choose an AWS Region."
39 | TEXT9="5. Click Create bucket."
40 |
41 | TEXT10="Set up an IAM user"
42 | TEXT11="1. Go to the AWS Console IAM page."
43 | TEXT12="2. Click Users under the IAM resources section."
44 | TEXT13="3. Click Add user."
45 | TEXT14="4. Enter a User name."
46 | TEXT15="5. Under Set permissions, click Attach policies directly."
47 | TEXT16="6. Add policies below:"
48 | TEXT17="AmazonPollyFullAccess, AmazonTranscribeFullAccess, AmazonS3FullAccess"
49 | TEXT18="7. Click Next, review the Permissions summary and any other information."
50 | TEXT19="8. Click Create user."
51 | TEXT20="If you have finished the above steps, press Enter to continue."
52 |
53 | # Define the colors
54 | RED='\033[0;31m'
55 | BLUE='\033[0;34m'
56 | GREEN='\033[1;32m'
57 | NC='\033[0m'
58 |
59 | # Calculate the center of the terminal window
60 | TERMINAL_WIDTH=$(tput cols)
61 | TEXT1_PADDING=$((($TERMINAL_WIDTH-${#TEXT1})/2))
62 | TEXT2_PADDING=$((($TERMINAL_WIDTH-${#TEXT2})/2))
63 | TEXT3_PADDING=$((($TERMINAL_WIDTH-${#TEXT3})/2))
64 | TEXT20_PADDING=$((($TERMINAL_WIDTH-${#TEXT20})/2))
65 |
66 | # Print the text in the center of the screen in the desired colors
67 | echo -e "${GREEN}$(printf '%*s' $TEXT1_PADDING)${TEXT1} ${NC}"
68 | echo -e "${GREEN}$(printf '%*s' $TEXT2_PADDING)${TEXT2} ${NC}"
69 | echo -e "${GREEN}$(printf '%*s' $TEXT3_PADDING)${TEXT3} ${NC}"
70 | echo -e "${RED}${TEXT4} ${NC}"
71 | echo -e "${NC}${TEXT5} ${NC}"
72 | echo -e "${NC}${TEXT6} ${NC}"
73 | echo -e "${NC}${TEXT7} ${NC}"
74 | echo -e "${NC}${TEXT8} ${NC}"
75 | echo -e "${NC}${TEXT9} ${NC}"
76 | echo -e "${NC}$(printf '%*s' $TEXT1_PADDING)${TEXT0} ${NC}"
77 | echo -e "${RED}${TEXT10} ${NC}"
78 | echo -e "${NC}${TEXT11} ${NC}"
79 | echo -e "${NC}${TEXT12} ${NC}"
80 | echo -e "${NC}${TEXT13} ${NC}"
81 | echo -e "${NC}${TEXT14} ${NC}"
82 | echo -e "${NC}${TEXT15} ${NC}"
83 | echo -e "${NC}${TEXT16} ${NC}"
84 | echo -e "${NC}${TEXT17} ${NC}"
85 | echo -e "${NC}${TEXT18} ${NC}"
86 | echo -e "${NC}${TEXT19} ${NC}"
87 | echo -e "${NC}$(printf '%*s' $TEXT1_PADDING)${TEXT0} ${NC}"
88 | echo -e "${GREEN}$(printf '%*s' $TEXT20_PADDING)${TEXT20} ${NC}"
89 |
90 |
91 | # Wait for user to press enter,if user press enter,continue,else exit
92 | read -p ""
93 |
94 | # Ask user for AWS access key id
95 | read -rp "Enter your AWS access key id from user you created in IAM: " AWS_ACCESS_KEY_ID
96 | # Check if AWS_ACCESS_KEY_ID already exists in .bashrc file
97 | if grep -q "export AWS_ACCESS_KEY_ID" ~/.bashrc; then
98 | # Confirm with the user before removing the existing AWS_ACCESS_KEY_ID
99 | echo "Existing AWS_ACCESS_KEY_ID found in .bashrc file."
100 | read -rp "Are you sure you want to remove the existing AWS_ACCESS_KEY_ID from your .bashrc file? (y/n) " confirm
101 | if [[ "$confirm" =~ ^[Yy]$ ]]; then
102 | # Remove existing AWS_ACCESS_KEY_ID from .bashrc file
103 | sed -i "/export AWS_ACCESS_KEY_ID/d" "$HOME/.bashrc"
104 | echo "Existing AWS_ACCESS_KEY_ID was removed from .bashrc file."
105 | # Append AWS_ACCESS_KEY_ID to the end of .bashrc file
106 | echo "export AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" >> "$HOME/.bashrc"
107 | source "$HOME/.bashrc"
108 | echo "Added AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID to .bashrc file."
109 | echo "AWS_ACCESS_KEY_ID Configuration complete."
110 | else
111 | echo "No AWS_ACCESS_KEY_ID changes were made."
112 |
113 | fi
114 | else
115 | # If AWS_ACCESS_KEY_ID not found, add it to the end of .bashrc file
116 | echo "export AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" >> "$HOME/.bashrc"
117 | source "$HOME/.bashrc"
118 | echo "Added AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID to .bashrc file."
119 | echo "AWS_ACCESS_KEY_ID Configuration complete."
120 | fi
121 |
122 | # Ask user for AWS_SECRET_ACCESS_KEY
123 | read -rp "Enter your AWS secret access key: " AWS_SECRET_ACCESS_KEY
124 | # Check if AWS_SECRET_ACCESS_KEY already exists in .bashrc file
125 | if grep -q "export AWS_SECRET_ACCESS_KEY" ~/.bashrc; then
126 | # Confirm with the user before removing the existing AWS_SECRET_ACCESS_KEY
127 | echo "Existing AWS_SECRET_ACCESS_KEY found in .bashrc file."
128 | read -rp "Are you sure you want to remove the existing AWS_SECRET_ACCESS_KEY from your .bashrc file? (y/n) " confirm
129 | if [[ "$confirm" =~ ^[Yy]$ ]]; then
130 | # Remove existing AWS_SECRET_ACCESS_KEY from .bashrc file
131 | sed -i "/export AWS_SECRET_ACCESS_KEY/d" "$HOME/.bashrc"
132 | echo "Existing AWS_SECRET_ACCESS_KEY was removed from .bashrc file."
133 | # Append AWS_SECRET_ACCESS_KEY to the end of .bashrc file
134 | echo "export AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" >> "$HOME/.bashrc"
135 | source "$HOME/.bashrc"
136 | echo "Added AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY to .bashrc file."
137 | echo "AWS_SECRET_ACCESS_KEY Configuration complete."
138 | else
139 | echo "No AWS_SECRET_ACCESS_KEY changes were made."
140 |
141 | fi
142 | else
143 | # If AWS_SECRET_ACCESS_KEY not found, add it to the end of .bashrc file
144 | echo "export AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" >> "$HOME/.bashrc"
145 | source "$HOME/.bashrc"
146 | echo "Added AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY to .bashrc file."
147 | fi
148 |
149 | # cd to the llm_config
150 | cd $SCRIPT_DIR
151 | cd .. # cd to the parent directory of the current directory
152 | cd llm_config/llm_config
153 | pwd
154 |
155 | # Ask user for aws_region_name
156 | read -p "Enter your aws region name in S3: " aws_region_name
157 |
158 | # Check if 'self.aws_region_name' exists in user_config.py and replace
159 | if grep -q "self.aws_region_name =" user_config.py; then
160 | sed -i "s/self.aws_region_name =.*/self.aws_region_name = '$aws_region_name'/" user_config.py
161 | if [[ $? -eq 0 ]]; then
162 | echo "AWS region name has been set successfully!"
163 | else
164 | echo "An error occurred while setting AWS region name."
165 | exit 1
166 | fi
167 | else
168 | echo "'self.aws_region_name' not found in user_config.py."
169 | exit 1
170 | fi
171 |
172 | # Ask user for bucket_name
173 | read -p "Enter your aws bucket name in S3: " bucket_name
174 |
175 | # Check if 'self.bucket_name' exists in user_config.py and replace
176 | if grep -q "self.bucket_name =" user_config.py; then
177 | sed -i "s/self.bucket_name =.*/self.bucket_name = '$bucket_name'/" user_config.py
178 | if [[ $? -eq 0 ]]; then
179 | echo "AWS bucket name has been set successfully!"
180 | else
181 | echo "An error occurred while setting AWS bucket name."
182 | exit 1
183 | fi
184 | else
185 | echo "'self.bucket_name' not found in user_config.py."
186 | exit 1
187 | fi
188 |
189 |
190 |
191 | # Wait for user to exit
192 | echo -e "${GREEN}All AWS configurations are complete.${NC}"
193 | read -n 1 -r -p "Press any key to exit..."
194 | exit 0
--------------------------------------------------------------------------------
/llm_config/llm_config/robot_behavior.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This file contains the behavior of the robot.
21 | # It includes a list of functions for the robot to perform,
22 | # such as publishing a cmd_vel message to control the movement of the robot.
23 | # To customize the robot's behavior,
24 | # modify the functions in this file to customize the behavior of your robot
25 | # and don't forget to modify the corresponding real functions in llm_robot/turtle_robot.py
26 | #
27 | # Author: Herman Ye @Auromix
28 |
29 | # Example robot functions list for the TurtleSim
30 | # The user can add, remove, or modify the functions in this list
31 | robot_functions_list_1 = [
32 | {
33 | "name": "publish_cmd_vel",
34 | "description": "Publish cmd_vel message to control the movement of turtlesim, including rotation and movement,only used for turtlesim,not for robotic arm",
35 | "parameters": {
36 | "type": "object",
37 | "properties": {
38 | "linear_x": {
39 | "type": "number",
40 | "description": "The linear velocity along the x-axis",
41 | },
42 | "linear_y": {
43 | "type": "number",
44 | "description": "The linear velocity along the y-axis",
45 | },
46 | "linear_z": {
47 | "type": "number",
48 | "description": "The linear velocity along the z-axis",
49 | },
50 | "angular_x": {
51 | "type": "number",
52 | "description": "The angular velocity around the x-axis",
53 | },
54 | "angular_y": {
55 | "type": "number",
56 | "description": "The angular velocity around the y-axis",
57 | },
58 | "angular_z": {
59 | "type": "number",
60 | "description": "The angular velocity around the z-axis",
61 | },
62 | },
63 | "required": [
64 | "linear_x",
65 | "linear_y",
66 | "linear_z",
67 | "angular_x",
68 | "angular_y",
69 | "angular_z",
70 | ],
71 | },
72 | },
73 | {
74 | "name": "reset_turtlesim",
75 | "description": "Resets the turtlesim to its initial state and clears the screen,only used for turtlesim,not for robotic arm",
76 | "parameters": {
77 | "type": "object",
78 | "properties": {},
79 | "required": [],
80 | },
81 | },
82 | {
83 | "name": "publish_target_pose",
84 | "description": "Publish target pose message to control the movement of arm robot, including x, y, z, roll, pitch, yaw",
85 | "parameters": {
86 | "type": "object",
87 | "properties": {
88 | "x": {
89 | "type": "number",
90 | "description": "The x position of the target pose",
91 | },
92 | "y": {
93 | "type": "number",
94 | "description": "The y position of the target pose",
95 | },
96 | "z": {
97 | "type": "number",
98 | "description": "The z position of the target pose",
99 | },
100 | "roll": {
101 | "type": "number",
102 | "description": "The roll of the target pose",
103 | },
104 | "pitch": {
105 | "type": "number",
106 | "description": "The pitch of the target pose",
107 | },
108 | "yaw": {
109 | "type": "number",
110 | "description": "The yaw of the target pose",
111 | },
112 | },
113 | "required": [
114 | "x",
115 | "y",
116 | "z",
117 | "roll",
118 | "pitch",
119 | "yaw",
120 | ],
121 | },
122 | },
123 | {
124 | "name": "publish_target_pose",
125 | "description": "Publish target pose message to control the movement of arm robot, including x, y, z, roll, pitch, yaw. For example,[0.2, 0.2, 0.2, 0.2, 0.2, 0.2] is a valid target pose.",
126 | "parameters": {
127 | "type": "object",
128 | "properties": {
129 | "x": {
130 | "type": "number",
131 | "description": "The x position of the target pose",
132 | },
133 | "y": {
134 | "type": "number",
135 | "description": "The y position of the target pose",
136 | },
137 | "z": {
138 | "type": "number",
139 | "description": "The z position of the target pose",
140 | },
141 | "roll": {
142 | "type": "number",
143 | "description": "The roll of the target pose in radians",
144 | },
145 | "pitch": {
146 | "type": "number",
147 | "description": "The pitch of the target pose in radians",
148 | },
149 | "yaw": {
150 | "type": "number",
151 | "description": "The yaw of the target pose in radians",
152 | },
153 | },
154 | "required": [
155 | "x",
156 | "y",
157 | "z",
158 | "roll",
159 | "pitch",
160 | "yaw",
161 | ],
162 | },
163 | },
164 | ]
165 |
166 | robot_functions_list_multi_robot = [
167 | {
168 | "name": "publish_cmd_vel",
169 | "description": "Publish cmd_vel message to control the movement and rotation of turtlesim. This function is only compatible with turtlesim and not for robotic arm.",
170 | "parameters": {
171 | "type": "object",
172 | "properties": {
173 | "robot_name": {
174 | "type": "string",
175 | "description": "Name of the robot instance that should be controlled. Valid robot names are 'turtle1','turtle2','minipupper', when no specific robot name is specified, robot_name=''",
176 | },
177 | "duration": {
178 | "type": "number",
179 | "description": "Duration of time (in seconds) for which the movement should be performed.",
180 | },
181 | "linear_x": {
182 | "type": "number",
183 | "description": "Linear velocity along the x-axis for the robot.",
184 | },
185 | "linear_y": {
186 | "type": "number",
187 | "description": "Linear velocity along the y-axis for the robot.",
188 | },
189 | "linear_z": {
190 | "type": "number",
191 | "description": "Linear velocity along the z-axis for the robot.",
192 | },
193 | "angular_x": {
194 | "type": "number",
195 | "description": "Angular velocity around the x-axis for the robot.",
196 | },
197 | "angular_y": {
198 | "type": "number",
199 | "description": "Angular velocity around the y-axis for the robot.",
200 | },
201 | "angular_z": {
202 | "type": "number",
203 | "description": "Angular velocity around the z-axis for the robot.",
204 | },
205 | },
206 | "required": [
207 | "robot_name",
208 | "duration",
209 | "linear_x",
210 | "linear_y",
211 | "linear_z",
212 | "angular_x",
213 | "angular_y",
214 | "angular_z",
215 | ],
216 | },
217 | },
218 | ]
219 |
220 |
221 | class RobotBehavior:
222 | """
223 | This class contains the behavior of the robot.
224 | It is used in llm_config/user_config.py to customize the behavior of the robot.
225 | """
226 |
227 | def __init__(self):
228 | self.robot_functions_list = robot_functions_list_multi_robot
229 |
230 |
231 | if __name__ == "__main__":
232 | pass
233 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/llm_model/llm_model/chatgpt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # flake8: noqa
4 | #
5 | # Copyright 2023 Herman Ye @Auromix
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 | # Description:
20 | # This code defines a ROS node called ChatGPTNode
21 | # The node interacts with the ChatGPT service to implement conversational interactions
22 | # The node implements the ChatGPT service callback function "llm_callback"
23 | # The node also includes a client function "function_call_client" and a publisher "output_publisher"
24 | # It also includes a function called "add_message_to_history" to update chat history records
25 | # The code generates a chat response using the OpenAI API
26 | # It extracts response information from the response data
27 | # The code writes chat history records to a JSON file using Python's JSON library
28 | # The code calls other functions using ROS Service
29 | #
30 | # Node test Method:
31 | # ros2 run llm_model chatgpt
32 | # ros2 topic echo /llm_feedback_to_user
33 | # ros2 topic pub /llm_input_audio_to_text std_msgs/msg/String "data: 'Hello,tell me a joke'" -1
34 | #
35 | # Author: Herman Ye @Auromix
36 |
37 | # ROS related
38 | import rclpy
39 | from rclpy.node import Node
40 | from llm_interfaces.srv import ChatGPT
41 | from std_msgs.msg import String
42 |
43 | # LLM related
44 | import json
45 | import os
46 | import time
47 | import openai
48 | from llm_config.user_config import UserConfig
49 |
50 |
51 | # Global Initialization
52 | config = UserConfig()
53 | openai.api_key = config.openai_api_key
54 | # openai.organization = config.openai_organization
55 |
56 |
57 | class ChatGPTNode(Node):
58 | def __init__(self):
59 | super().__init__("ChatGPT_node")
60 | # Initialization publisher
61 | self.initialization_publisher = self.create_publisher(
62 | String, "/llm_initialization_state", 0
63 | )
64 |
65 | # LLM state publisher
66 | self.llm_state_publisher = self.create_publisher(String, "/llm_state", 0)
67 |
68 | # LLM state listener
69 | self.llm_state_subscriber = self.create_subscription(
70 | String, "/llm_state", self.state_listener_callback, 0
71 | )
72 | # LLM input listener
73 | self.llm_input_subscriber = self.create_subscription(
74 | String, "/llm_input_audio_to_text", self.llm_callback, 0
75 | )
76 | # LLM response type publisher
77 | self.llm_response_type_publisher = self.create_publisher(
78 | String, "/llm_response_type", 0
79 | )
80 |
81 | # LLM feedback for user publisher
82 | self.llm_feedback_publisher = self.create_publisher(
83 | String, "/llm_feedback_to_user", 0
84 | )
85 | # ChatGPT function call client
86 | # When function call is detected
87 | # ChatGPT client will call function call service in robot node
88 | self.function_call_client = self.create_client(
89 | ChatGPT, "/ChatGPT_function_call_service"
90 | )
91 | # self.function_call_future = None
92 | # Wait for function call server to be ready
93 | # while not self.function_call_client.wait_for_service(timeout_sec=1.0):
94 | # self.get_logger().info(
95 | # "ChatGPT Function Call Server(ROBOT NODE) not available, waiting again..."
96 | # )
97 | self.function_call_requst = ChatGPT.Request() # Function call request
98 | self.get_logger().info("ChatGPT Function Call Server is ready")
99 |
100 | # ChatGPT output publisher
101 | # When feedback text to user is detected
102 | # ChatGPT node will publish feedback text to output node
103 | self.output_publisher = self.create_publisher(String, "ChatGPT_text_output", 10)
104 |
105 | # Chat history
106 | # The chat history contains user & ChatGPT interaction information
107 | # Chat history is stored in a JSON file in the user_config.chat_history_path
108 | # There is a maximum word limit for chat history
109 | # And the upper limit is user_config.chat_history_max_length
110 | # TODO: Longer interactive content should be stored in the JSON file
111 | # exceeding token limit, waiting to update @Herman Ye
112 | self.start_timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
113 | self.chat_history_file = os.path.join(
114 | config.chat_history_path, f"chat_history_{self.start_timestamp}.json"
115 | )
116 | self.write_chat_history_to_json()
117 | self.get_logger().info(f"Chat history saved to {self.chat_history_file}")
118 |
119 | # Function name
120 | self.function_name = "null"
121 | # Initialization ready
122 | self.publish_string("llm_model_processing", self.initialization_publisher)
123 |
124 | def state_listener_callback(self, msg):
125 | self.get_logger().debug(f"model node get current State:{msg}")
126 | # TODO
127 |
128 | def publish_string(self, string_to_send, publisher_to_use):
129 | msg = String()
130 | msg.data = string_to_send
131 |
132 | publisher_to_use.publish(msg)
133 | self.get_logger().info(
134 | f"Topic: {publisher_to_use.topic_name}\nMessage published: {msg.data}"
135 | )
136 |
137 | def add_message_to_history(
138 | self, role, content="null", function_call=None, name=None
139 | ):
140 | """
141 | Add a new message_element_object to the chat history
142 | with the given role, content, and function call information.
143 | The message_element_object dictionary contains
144 | the key-value pairs for "role", "content", and "function_call".
145 | If the chat history exceeds the maximum allowed length,
146 | the oldest message_element_object will be removed.
147 | Returns the updated chat history list.
148 | """
149 | # Creating message dictionary with given options
150 | message_element_object = {
151 | "role": role,
152 | "content": content,
153 | }
154 | # Adding function call information if provided
155 | if name is not None:
156 | message_element_object["name"] = name
157 | # Adding function call information if provided
158 | if function_call is not None:
159 | message_element_object["function_call"] = function_call
160 | # Adding message_element_object to chat history
161 | config.chat_history.append(message_element_object)
162 | # Log
163 | self.get_logger().info(f"Chat history updated with {message_element_object}")
164 | # Checking if chat history is too long
165 | if len(config.chat_history) > config.chat_history_max_length:
166 | self.get_logger().info(
167 | f"Chat history is too long, popping the oldest message: {config.chat_history[0]}"
168 | )
169 | config.chat_history.pop(0)
170 |
171 | # Returning updated chat history
172 | return config.chat_history
173 |
174 | def generate_chatgpt_response(self, messages_input):
175 | """
176 | Generates a chatgpt response based on the input messages provided.
177 | All parameters can be found in the llm_config/user_config.py file.
178 | """
179 | # Log
180 | self.get_logger().info(f"Sending messages to OpenAI: {messages_input}")
181 | response = openai.ChatCompletion.create(
182 | model=config.openai_model,
183 | messages=messages_input,
184 | functions=config.robot_functions_list,
185 | function_call="auto",
186 | # temperature=config.openai_temperature,
187 | # top_p=config.openai_top_p,
188 | # n=config.openai_n,
189 | # stream=config.openai_stream,
190 | # stop=config.openai_stop,
191 | # max_tokens=config.openai_max_tokens,
192 | # presence_penalty=config.openai_presence_penalty,
193 | # frequency_penalty=config.openai_frequency_penalty,
194 | )
195 | # Log
196 | self.get_logger().info(f"OpenAI response: {response}")
197 | return response
198 |
199 | def get_response_information(self, chatgpt_response):
200 | """
201 | Returns the response information from the chatgpt response.
202 | The response information includes the message, text, function call, and function flag.
203 | function_flag = 0: no function call, 1: function call
204 | """
205 | # Getting response information
206 | message = chatgpt_response["choices"][0]["message"]
207 | content = message.get("content")
208 | function_call = message.get("function_call", None)
209 |
210 | # Initializing function flag, 0: no function call, 1: function call
211 | function_flag = 0
212 |
213 | # If the content is not None, then the response is text
214 | # If the content is None, then the response is function call
215 | if content is not None:
216 | function_flag = 0
217 | self.get_logger().info("OpenAI response type: TEXT")
218 | else:
219 | function_flag = 1
220 | self.get_logger().info("OpenAI response type: FUNCTION CALL")
221 | # Log
222 | self.get_logger().info(
223 | f"Get message from OpenAI: {message}, type: {type(message)}"
224 | )
225 | self.get_logger().info(
226 | f"Get content from OpenAI: {content}, type: {type(content)}"
227 | )
228 | self.get_logger().info(
229 | f"Get function call from OpenAI: {function_call}, type: {type(function_call)}"
230 | )
231 |
232 | return message, content, function_call, function_flag
233 |
234 | def write_chat_history_to_json(self):
235 | """
236 | Write the chat history to a JSON file.
237 | """
238 | try:
239 | # Converting chat history to JSON string
240 | json_data = json.dumps(config.chat_history)
241 |
242 | # Writing JSON to file
243 | with open(self.chat_history_file, "w", encoding="utf-8") as file:
244 | file.write(json_data)
245 |
246 | self.get_logger().info("Chat history has been written to JSON")
247 | return True
248 |
249 | except IOError as error:
250 | # Error writing chat history to JSON
251 | self.get_logger().error(f"Error writing chat history to JSON: {error}")
252 | return False
253 |
254 | def function_call(self, function_call_input):
255 | """
256 | Sends a function call request with the given input and waits for the response.
257 | When the response is received, the function call response callback is called.
258 | """
259 | # JSON object to string
260 | function_call_input_str = json.dumps(function_call_input)
261 | # Get function name
262 | self.function_name = function_call_input["name"]
263 | # Send function call request
264 | self.function_call_requst.request_text = function_call_input_str
265 | self.get_logger().info(
266 | f"Request for ChatGPT_function_call_service: {self.function_call_requst.request_text}"
267 | )
268 | future = self.function_call_client.call_async(self.function_call_requst)
269 | future.add_done_callback(self.function_call_response_callback)
270 |
271 | def function_call_response_callback(self, future):
272 | """
273 | The function call response callback is called when the function call response is received.
274 | the function_call_response_callback will call the gpt service again
275 | to get the text response to user
276 | """
277 | try:
278 | response = future.result()
279 | self.get_logger().info(
280 | f"Response from ChatGPT_function_call_service: {response}"
281 | )
282 |
283 | except Exception as e:
284 | self.get_logger().info(f"ChatGPT function call service failed {e}")
285 |
286 | response_text = "null"
287 | self.add_message_to_history(
288 | role="function",
289 | name=self.function_name,
290 | content=str(response_text),
291 | )
292 | # Generate chat completion
293 | second_chatgpt_response = self.generate_chatgpt_response(config.chat_history)
294 | # Get response information
295 | message, text, function_call, function_flag = self.get_response_information(
296 | second_chatgpt_response
297 | )
298 | self.publish_string(text, self.llm_feedback_publisher)
299 |
300 | def llm_callback(self, msg):
301 | """
302 | The llm_callback function is called when the ChatGPT service is called.
303 | llm_callback is the main function of the ChatGPT node.
304 | """
305 | # Log the llm_callback
306 | self.get_logger().info("STATE: model_processing")
307 |
308 | self.get_logger().info(f"Input message received: {msg.data}")
309 | # Add user message to chat history
310 | user_prompt = msg.data
311 | self.add_message_to_history("user", user_prompt)
312 | # Generate chat completion
313 | chatgpt_response = self.generate_chatgpt_response(config.chat_history)
314 | # Get response information
315 | message, text, function_call, function_flag = self.get_response_information(
316 | chatgpt_response
317 | )
318 | # Append response to chat history
319 | self.add_message_to_history(
320 | role="assistant", content=text, function_call=function_call
321 | )
322 | # Write chat history to JSON
323 | self.write_chat_history_to_json()
324 |
325 | # Log output_processing
326 | self.get_logger().info("STATE: output_processing")
327 | if function_flag == 1:
328 | # Write response text to GPT service response
329 | llm_response_type = "function_call"
330 | self.publish_string(llm_response_type, self.llm_response_type_publisher)
331 |
332 | # Robot function call
333 | # Log function execution
334 | self.get_logger().info("STATE: function_execution")
335 | self.function_call(function_call)
336 | else:
337 | # Return text response
338 | llm_response_type = "feedback_for_user"
339 | # Log feedback_for_user
340 | self.get_logger().info("STATE: feedback_for_user")
341 | self.publish_string(llm_response_type, self.llm_response_type_publisher)
342 | self.publish_string(text, self.llm_feedback_publisher)
343 | # self.publish_string(json.dumps(text), self.llm_feedback_publisher)
344 |
345 |
346 | def main(args=None):
347 | rclpy.init(args=args)
348 | chatgpt = ChatGPTNode()
349 | rclpy.spin(chatgpt)
350 | rclpy.shutdown()
351 |
352 |
353 | if __name__ == "__main__":
354 | main()
355 |
--------------------------------------------------------------------------------