├── .dockerignore
├── rl_uav
├── __init__.py
├── envs
│ ├── __init__.py
│ └── navigation.py
├── enums
│ ├── __init__.py
│ ├── action.py
│ └── track.py
├── features
│ ├── __init__.py
│ ├── discretizer.py
│ ├── polynomials.py
│ ├── linear_function_approximation.py
│ ├── fourier_basis.py
│ ├── radial_basis.py
│ └── tile_coding.py
├── data_classes
│ ├── __init__.py
│ └── euler.py
└── rl_algorithms
│ ├── __init__.py
│ ├── rl_algorithm.py
│ ├── tabular_monte_carlo.py
│ ├── tabular_q_learning.py
│ ├── tabular_sarsa.py
│ ├── lfa_q_learning.py
│ ├── tabular_sarsa_lambda.py
│ ├── lfa_sarsa.py
│ ├── lfa_sarsa_lambda.py
│ ├── tabular_q_lambda.py
│ ├── lfa_q_lambda.py
│ └── lspi.py
├── images
├── track1.png
├── track2.png
└── track3.png
├── requirements.txt
├── .gitignore
├── tests
├── test_discretizer.py
├── test_polynomials.py
├── test_fourier_basis.py
├── test_radial_basis.py
└── test_tile_coding.py
├── Dockerfile
├── scripts
└── laserscan_to_pointcloud.py
├── package.xml
├── .github
└── workflows
│ └── build.yml
├── launch
└── train.launch
├── train_uav.py
├── README.md
├── CMakeLists.txt
├── worlds
├── track1.world
└── track2.world
└── LICENSE
/.dockerignore:
--------------------------------------------------------------------------------
1 | venv
--------------------------------------------------------------------------------
/rl_uav/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rl_uav/envs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rl_uav/enums/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rl_uav/features/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rl_uav/data_classes/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/images/track1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NickGeramanis/rl-uav/HEAD/images/track1.png
--------------------------------------------------------------------------------
/images/track2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NickGeramanis/rl-uav/HEAD/images/track2.png
--------------------------------------------------------------------------------
/images/track3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NickGeramanis/rl-uav/HEAD/images/track3.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | gymnasium==0.29.1
2 | numpy==1.24.4
3 | pytest==8.3.5
4 | coverage==7.8.0
5 | pylint==3.3.6
6 | mypy==1.15.0
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/
2 | .idea/
3 | **/__pycache__
4 | **/.pytest_cache
5 | **/.mypy_cache
6 | *.log
7 | *.npy
8 | *.pyc
9 | coverage/
10 | .coverage
11 |
--------------------------------------------------------------------------------
/rl_uav/data_classes/euler.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 |
3 |
4 | @dataclass
5 | class Euler:
6 | """Orientation represented with euler angles."""
7 | roll: float
8 | pitch: float
9 | yaw: float
10 |
--------------------------------------------------------------------------------
/tests/test_discretizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from rl_uav.features.discretizer import Discretizer
5 |
6 |
7 | class TestDiscretizer:
8 | @pytest.fixture(autouse=True)
9 | def init_discretizer(self):
10 | n_bins = (10, 5)
11 | state_space_range = (np.array([0, 0]), np.array([10, 5]))
12 | self.discretizer = Discretizer(n_bins, state_space_range)
13 |
14 | def test_discretize(self):
15 | state = np.array([3.2, 4.2])
16 |
17 | discrete_state = self.discretizer.discretize(state)
18 |
19 | assert discrete_state == (3, 4)
20 |
--------------------------------------------------------------------------------
/rl_uav/enums/action.py:
--------------------------------------------------------------------------------
1 | """This module contains the Action enum."""
2 | from enum import Enum
3 |
4 |
5 | class Action(Enum):
6 | """The Action enum."""
7 |
8 | linear_velocity: float
9 | angular_velocity: float
10 |
11 | def __new__(cls,
12 | value: int,
13 | linear_velocity: float = 0,
14 | angular_velocity: float = 0):
15 | obj = object.__new__(cls)
16 | obj._value_ = value
17 | obj.linear_velocity = linear_velocity
18 | obj.angular_velocity = angular_velocity
19 | return obj
20 |
21 | FORWARD = (0, 0.5, 0)
22 | ROTATE_RIGHT = (1, 0.1, 0.5)
23 | ROTATE_LEFT = (2, 0.1, -0.5)
24 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM osrf/ros:noetic-desktop-full
2 |
3 | WORKDIR /home/catkin_ws/src/rl_uav
4 |
5 | COPY . .
6 | RUN apt-get update \
7 | && apt-get upgrade -y \
8 | && apt-get install git python3-pip ros-noetic-geographic-msgs tightvncserver dbus-x11 xfonts-base -y \
9 | && pip install -r requirements.txt \
10 | && cd .. \
11 | && git clone https://github.com/tu-darmstadt-ros-pkg/hector_quadrotor \
12 | && git clone https://github.com/tu-darmstadt-ros-pkg/hector_localization \
13 | && git clone https://github.com/tu-darmstadt-ros-pkg/hector_gazebo \
14 | && git clone https://github.com/tu-darmstadt-ros-pkg/hector_models \
15 | && git clone https://github.com/ros-simulation/gazebo_ros_pkgs
16 |
--------------------------------------------------------------------------------
/scripts/laserscan_to_pointcloud.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import laser_geometry.laser_geometry as lg
3 | import rospy
4 | from sensor_msgs.msg import LaserScan, PointCloud2
5 |
6 |
7 | def scan_cb(msg):
8 | list_ranges = list(msg.ranges)
9 | for i in range(0, len(list_ranges)):
10 | if i not in MEASUREMENTS:
11 | list_ranges[i] = 0
12 | else:
13 | print(list_ranges[i])
14 |
15 | msg.ranges = tuple(list_ranges)
16 | pc2_msg = lp.projectLaser(msg)
17 | pc_pub.publish(pc2_msg)
18 | print('------------------------------------')
19 |
20 |
21 | MEASUREMENTS = (180, 360, 540, 720, 900)
22 |
23 | rospy.init_node("laserscan_to_pointcloud")
24 | lp = lg.LaserProjection()
25 | pc_pub = rospy.Publisher("converted_pc", PointCloud2, queue_size=1)
26 | rospy.Subscriber("/scan", LaserScan, scan_cb, queue_size=1)
27 | rospy.spin()
28 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | rl_uav
4 | 0.1.0
5 | Implementation of different reinforcement learning algorithms to solve the navigation problem of an unmanned aerial vehicle (UAV)
6 | Nikolaos Geramanis
7 | GPLv3
8 | https://nickgeramanis.github.io/rl-uav/
9 | Nikolaos Geramanis
10 | catkin
11 | rospy
12 | gazebo-ros-pkgs
13 | hector_quadrotor
14 | hector_localization
15 | hector_gazebo
16 | hector_models
17 | numpy
18 | gymnasium
19 |
20 |
--------------------------------------------------------------------------------
/tests/test_polynomials.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from rl_uav.features.polynomials import Polynomials
5 |
6 |
7 | class TestPolynomials:
8 | @pytest.fixture(autouse=True)
9 | def init_polynomials(self):
10 | order = 1
11 | n_actions = 2
12 | n_dimensions = 2
13 | self.polynomials = Polynomials(n_actions, order, n_dimensions)
14 |
15 | def test_get_features(self):
16 | state = np.array([3, 2])
17 |
18 | features = self.polynomials.get_features(state, 0)
19 | expected_features = [1, 2, 3, 6,
20 | 0, 0, 0, 0]
21 |
22 | assert (features == expected_features).all()
23 |
24 | def test_calculate_q(self):
25 | state = np.array([3, 2])
26 | weights = np.array([i for i in range(8)])
27 |
28 | q = self.polynomials.calculate_q(weights, state)
29 | expected_q = [26, 74]
30 |
31 | assert (q == expected_q).all()
32 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v4
16 | with:
17 | fetch-depth: 0
18 | - name: Set up Python
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: '3.11.5'
22 | - name: Install dependencies
23 | run: |
24 | python -m pip install --upgrade pip
25 | pip install -r requirements.txt
26 | - name: Run tests
27 | run: coverage run --branch --source=rl_uav/ --omit=**/__init__.py -m pytest
28 | - name: Show coverage report
29 | run: coverage report
30 | - name: Run linter
31 | if: always()
32 | run: pylint rl_uav/
33 | - name: Run type checker
34 | if: always()
35 | run: mypy --ignore-missing-imports rl_uav/
36 |
--------------------------------------------------------------------------------
/launch/train.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/rl_uav/features/discretizer.py:
--------------------------------------------------------------------------------
1 | """This module contains the Discretizer class."""
2 | from typing import Tuple
3 |
4 | import numpy as np
5 |
6 |
7 | # pylint: disable-next=too-few-public-methods
8 | class Discretizer:
9 | """Discretize the continuous space."""
10 | n_bins: Tuple[int, ...]
11 | _bins: np.ndarray
12 |
13 | def __init__(self,
14 | n_bins: Tuple[int, ...],
15 | state_space_range: Tuple[np.ndarray, np.ndarray]) -> None:
16 | self.n_bins = n_bins
17 | n_dimensions = len(n_bins)
18 |
19 | self._bins = np.empty((n_dimensions,), dtype=np.ndarray)
20 | for i in range(n_dimensions):
21 | self._bins[i] = np.linspace(state_space_range[0][i],
22 | state_space_range[1][i],
23 | num=n_bins[i] + 1)
24 |
25 | def discretize(self, state: np.ndarray) -> Tuple[int, ...]:
26 | """Discretize the continuous state."""
27 | discrete_state = [np.digitize(state[i], bin_) - 1
28 | for i, bin_ in enumerate(self._bins)]
29 |
30 | return tuple(discrete_state)
31 |
--------------------------------------------------------------------------------
/rl_uav/features/polynomials.py:
--------------------------------------------------------------------------------
1 | """This module contains the Polynomials class."""
2 | import itertools
3 |
4 | import numpy as np
5 |
6 | from rl_uav.features.linear_function_approximation import \
7 | LinearFunctionApproximation
8 |
9 |
10 | class Polynomials(LinearFunctionApproximation):
11 | """Construct features using Polynomials."""
12 | _order: int
13 | _n_polynomials: int
14 | _exponents: np.ndarray
15 |
16 | def __init__(self, n_actions: int, order: int, n_dimensions: int) -> None:
17 | self._n_actions = n_actions
18 | self._order = order
19 | self._n_polynomials = (order + 1) ** n_dimensions
20 | self._exponents = np.array(list(
21 | itertools.product(np.arange(order + 1), repeat=n_dimensions)))
22 | self.n_features = self._n_polynomials * n_actions
23 |
24 | def get_features(self, state: np.ndarray, action: int) -> np.ndarray:
25 | features = np.zeros((self.n_features,))
26 | for i in range(self._n_polynomials):
27 | prod_terms = np.power(state, self._exponents[i])
28 | features[action * self._n_polynomials + i] = np.prod(prod_terms)
29 |
30 | return features
31 |
--------------------------------------------------------------------------------
/tests/test_fourier_basis.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from rl_uav.features.fourier_basis import FourierBasis
5 |
6 |
7 | class TestFourierBasis:
8 | @pytest.fixture(autouse=True)
9 | def init_fourier_basis(self):
10 | order = 1
11 | n_actions = 2
12 | state_space_range = (np.array([0, 0]), np.array([10, 5]))
13 | self.fourier_basis = FourierBasis(n_actions, order, state_space_range)
14 |
15 | def test_get_features(self):
16 | state = np.array([3, 2])
17 |
18 | features = self.fourier_basis.get_features(state, 0)
19 | expected_features = [1,
20 | 0.30901699437,
21 | 0.58778525229,
22 | -0.58778525229,
23 | 0, 0, 0, 0]
24 |
25 | assert (np.isclose(expected_features, features)).all()
26 |
27 | def test_calculate_q(self):
28 | state = np.array([3, 2])
29 | weights = np.array([i for i in range(8)])
30 |
31 | q = self.fourier_basis.calculate_q(weights, state)
32 | expected_q = [-0.27876825793, 4.95729971957]
33 |
34 | assert (np.isclose(expected_q, q)).all()
35 |
--------------------------------------------------------------------------------
/rl_uav/features/linear_function_approximation.py:
--------------------------------------------------------------------------------
1 | """This module contains the LinearFunctionApproximation class."""
2 | from abc import ABC, abstractmethod
3 |
4 | import numpy as np
5 |
6 |
7 | class LinearFunctionApproximation(ABC):
8 | """This class defines the interface for the linear function approximation
9 | techniques."""
10 |
11 | n_features: int = 0
12 |
13 | _n_actions: int = 0
14 |
15 | def calculate_q(self,
16 | weights: np.ndarray,
17 | state: np.ndarray) -> np.ndarray:
18 | """Calculate the Q values for each action for a given state."""
19 | q_values = np.empty((self._n_actions,))
20 | for action in range(self._n_actions):
21 | features = self.get_features(state, action)
22 | q_values[action] = np.dot(features, weights)
23 |
24 | return q_values
25 |
26 | @abstractmethod
27 | def get_features(self, state: np.ndarray, action: int) -> np.ndarray:
28 | """Get the feature array for a given state and action."""
29 |
30 | def _normalize(self,
31 | value: np.ndarray,
32 | min_: np.ndarray,
33 | max_: np.ndarray) -> np.ndarray:
34 | numerator = value - min_
35 | denominator = max_ - min_
36 | return numerator / denominator
37 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/rl_algorithm.py:
--------------------------------------------------------------------------------
1 | """This module contains the RLAlgorithm class."""
2 | import logging
3 | from abc import ABC, abstractmethod
4 |
5 |
6 | class RLAlgorithm(ABC):
7 | """This class defines the interface for the RL algorithms."""
8 | _LOG_FILENAME = 'rl.log'
9 |
10 | _logger: logging.Logger
11 |
12 | def __init__(self) -> None:
13 | self._init_logger()
14 |
15 | def _init_logger(self) -> None:
16 | self._logger = logging.getLogger(__name__)
17 |
18 | for handler in self._logger.handlers[:]:
19 | self._logger.removeHandler(handler)
20 |
21 | log_formatter = logging.Formatter(
22 | fmt='%(asctime)s %(levelname)s %(message)s',
23 | datefmt='%d-%m-%Y %H:%M:%S')
24 |
25 | file_handler = logging.FileHandler(self._LOG_FILENAME)
26 | file_handler.setFormatter(log_formatter)
27 | self._logger.addHandler(file_handler)
28 |
29 | console_handler = logging.StreamHandler()
30 | console_handler.setFormatter(log_formatter)
31 | self._logger.addHandler(console_handler)
32 |
33 | self._logger.setLevel(logging.INFO)
34 |
35 | @abstractmethod
36 | def train(self, n_episodes: int) -> None:
37 | """Execute the training process for a number of episodes."""
38 |
39 | @abstractmethod
40 | def run(self, n_episodes: int) -> None:
41 | """Run the environment and act greedily for a number of episodes."""
42 |
--------------------------------------------------------------------------------
/tests/test_radial_basis.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from rl_uav.features.radial_basis import RadialBasis
5 |
6 |
7 | class TestRadialBasis:
8 | @pytest.fixture(autouse=True)
9 | def init_rbf(self):
10 | n_actions = 2
11 | state_space_range = (np.array([0, 0]), np.array([10, 5]))
12 | centers_per_dimension = [
13 | [0.33, 0.67],
14 | [0.33, 0.67]
15 | ]
16 | standard_deviation = 0.1
17 | self.rbf = RadialBasis(n_actions,
18 | state_space_range,
19 | centers_per_dimension,
20 | standard_deviation)
21 |
22 | def test_get_features(self):
23 | state = np.array([3, 2])
24 |
25 | features = self.rbf.get_features(state, 0)
26 | expected_features = [1,
27 | 0.7482635675785652,
28 | 0.024972002042276155,
29 | 0.0008333973656066949,
30 | 2.7813195266616742e-05,
31 | 0, 0, 0, 0, 0]
32 |
33 | assert (np.isclose(expected_features, features)).all()
34 |
35 | def test_calculate_q(self):
36 | state = np.array([3, 2])
37 | weights = np.array([i for i in range(10)])
38 |
39 | q = self.rbf.calculate_q(weights, state)
40 | expected_q = [0.80081901652, 9.67130291743]
41 |
42 | assert (np.isclose(expected_q, q)).all()
43 |
--------------------------------------------------------------------------------
/tests/test_tile_coding.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from rl_uav.features.tile_coding import TileCoding
5 |
6 |
7 | class TestTileCoding:
8 | @pytest.fixture(autouse=True)
9 | def init_tile_coding(self):
10 | n_tiles_per_dimension = np.array([2, 2])
11 | displacement_vector = np.array([1, 1])
12 | n_tilings = 2
13 | n_actions = 2
14 | state_space_range = (np.array([0, 0]), np.array([10, 5]))
15 | self.tile_coding = TileCoding(n_actions,
16 | n_tilings,
17 | n_tiles_per_dimension,
18 | state_space_range)
19 |
20 | def test_get_features(self):
21 | state = np.array([3, 2])
22 |
23 | features = self.tile_coding.get_features(state, 0)
24 | expected_features = [1, 0, 0,
25 | 0, 0, 0,
26 | 0, 0, 0,
27 | 0, 0, 0,
28 | 0, 1, 0,
29 | 0, 0, 0,
30 | 0, 0, 0,
31 | 0, 0, 0,
32 | 0, 0, 0,
33 | 0, 0, 0,
34 | 0, 0, 0,
35 | 0, 0, 0]
36 |
37 | assert (features == expected_features).all()
38 |
39 | def test_calculate_q(self):
40 | state = np.array([3, 2])
41 | weights = np.array([i for i in range(36)])
42 |
43 | q = self.tile_coding.calculate_q(weights, state)
44 | expected_q = [13, 49]
45 |
46 | assert (q == expected_q).all()
47 |
--------------------------------------------------------------------------------
/rl_uav/features/fourier_basis.py:
--------------------------------------------------------------------------------
1 | """This module contains the FourierBasis class."""
2 | import itertools
3 | import math
4 | from typing import Tuple
5 |
6 | import numpy as np
7 |
8 | from rl_uav.features.linear_function_approximation import \
9 | LinearFunctionApproximation
10 |
11 |
12 | class FourierBasis(LinearFunctionApproximation):
13 | """Construct features using Fourier Basis functions."""
14 | _order: int
15 | _state_space_range: Tuple[np.ndarray, np.ndarray]
16 | _n_functions: int
17 | _integer_vectors: np.ndarray
18 |
19 | def __init__(self,
20 | n_actions: int,
21 | order: int,
22 | state_space_range: Tuple[np.ndarray, np.ndarray]) -> None:
23 | self._n_actions = n_actions
24 | self._order = order
25 | self._state_space_range = state_space_range
26 | n_dimensions = len(state_space_range[0])
27 | self._n_functions = (order + 1) ** n_dimensions
28 | self._integer_vectors = np.array(list(
29 | itertools.product(np.arange(order + 1), repeat=n_dimensions)))
30 | self.n_features = self._n_functions * n_actions
31 |
32 | def get_features(self, state: np.ndarray, action: int) -> np.ndarray:
33 | features = np.zeros((self.n_features,))
34 | norm_state = self._normalize(state,
35 | self._state_space_range[0],
36 | self._state_space_range[1])
37 | for i in range(self._n_functions):
38 | cos_term = math.pi * np.dot(norm_state, self._integer_vectors[i])
39 | features[action * self._n_functions + i] = math.cos(cos_term)
40 |
41 | return features
42 |
--------------------------------------------------------------------------------
/train_uav.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import numpy as np
3 | import rospy
4 |
5 | from rl_uav.envs.navigation import Navigation
6 | from rl_uav.features.tile_coding import TileCoding
7 | from rl_uav.rl_algorithms.lfa_sarsa_lambda import LFASARSALambda
8 | from rl_uav.rl_algorithms.lspi import LSPI
9 |
10 |
11 | def main():
12 | rospy.init_node('train_uav')
13 | env = Navigation(track_id=1)
14 |
15 | n_episodes = 500
16 |
17 | discount_factor = 0.99
18 | state_space_range = (env.observation_space.low, env.observation_space.high)
19 | learning_rate_steepness = 0.02
20 | lambda_ = 0.5
21 |
22 | # Tile Coding
23 | n_tiles_per_dimension = np.array([14, 14, 14, 14, 14])
24 | n_tilings = 7
25 | initial_learning_rate = 0.1 / n_tilings
26 | feature_constructor1 = TileCoding(env.action_space.n,
27 | n_tilings,
28 | n_tiles_per_dimension,
29 | state_space_range)
30 |
31 | # SARSA(lambda) with Linear Function Approximation
32 | lfa_sarsa_lambda = LFASARSALambda(env,
33 | discount_factor,
34 | initial_learning_rate,
35 | learning_rate_steepness,
36 | feature_constructor1,
37 | lambda_)
38 | lfa_sarsa_lambda.train(n_episodes)
39 |
40 | # Least-Squares Policy Iteration
41 | # n_samples = 1000
42 | # lspi = LSPI(env, discount_factor, feature_constructor2)
43 | # lspi.gather_samples(n_samples)
44 | # lspi.train(n_episodes)
45 | # lspi.run(n_episodes)
46 |
47 |
48 | if __name__ == '__main__':
49 | main()
50 |
--------------------------------------------------------------------------------
/rl_uav/features/radial_basis.py:
--------------------------------------------------------------------------------
1 | """This module contains the RadialBasis class."""
2 | import itertools
3 | from typing import List, Tuple
4 |
5 | import numpy as np
6 |
7 | from rl_uav.features.linear_function_approximation import \
8 | LinearFunctionApproximation
9 |
10 |
11 | class RadialBasis(LinearFunctionApproximation):
12 | """Construct features using Radial Basis functions."""
13 | _state_space_range: Tuple[np.ndarray, np.ndarray]
14 | _centers: np.ndarray
15 | _variance: float
16 | _n_functions: int
17 |
18 | def __init__(self,
19 | n_actions: int,
20 | state_space_range: Tuple[np.ndarray, np.ndarray],
21 | centers_per_dimension: List[List[float]],
22 | standard_deviation: float) -> None:
23 | self._n_actions = n_actions
24 | self._state_space_range = state_space_range
25 | self._centers = np.array(list(
26 | itertools.product(*centers_per_dimension)))
27 | self._variance = 2 * standard_deviation ** 2
28 | self._n_functions = self._centers.shape[0] + 1
29 | self.n_features = self._n_functions * n_actions
30 |
31 | def get_features(self, state: np.ndarray, action: int) -> np.ndarray:
32 | features = np.zeros((self.n_features,))
33 | norm_state = self._normalize(state,
34 | self._state_space_range[0],
35 | self._state_space_range[1])
36 | features[action * self._n_functions] = 1
37 | for i in range(1, self._n_functions):
38 | numerator = np.linalg.norm(norm_state - self._centers[i - 1]) ** 2
39 | exponent = - numerator / self._variance
40 | features[action * self._n_functions + i] = np.exp(exponent)
41 |
42 | return features
43 |
--------------------------------------------------------------------------------
/rl_uav/enums/track.py:
--------------------------------------------------------------------------------
1 | """This module contains the Track enum."""
2 | from enum import Enum
3 | from typing import Tuple
4 |
5 |
6 | class Track(Enum):
7 | """The Track enum."""
8 |
9 | spawn_area: Tuple[Tuple[Tuple[float, float], Tuple[float, float]], ...]
10 |
11 | def __new__(cls,
12 | value: int,
13 | spawn_area: Tuple[Tuple[Tuple[float, float],
14 | Tuple[float, float]], ...] = ()):
15 | obj = object.__new__(cls)
16 | obj._value_ = value
17 | obj.spawn_area = spawn_area
18 | return obj
19 |
20 | TRACK1 = (1,
21 | (
22 | ((-9, -9), (-9, 9)),
23 | ((-9, 9), (9, 9)),
24 | ((9, 9), (0, 9)),
25 | ((0, 9), (0, 0)),
26 | ((0, 0), (-9, 0)),
27 | ((-9, 0), (-9, -9))
28 | )
29 | )
30 |
31 | TRACK2 = (2,
32 | (
33 | ((-0.2, -0.2), (-3.2, 3.1)),
34 | ((-0.2, -9.2), (3.1, 3.1)),
35 | ((-9.2, -9.2), (3.1, 12.4)),
36 | ((-9.2, 6.1), (12.4, 12.4)),
37 | ((6.1, 6.1), (12.4, 3)),
38 | ((6.1, 9.2), (3, 3)),
39 | ((9.2, 9.2), (3, -3)),
40 | ((9.2, 6.1), (-3, -3)),
41 | ((6.1, 6.1), (-3, -12.4)),
42 | ((6.1, -9.2), (-12.4, -12.4)),
43 | ((-9.2, -9.2), (-12.4, -3.2)),
44 | ((-9.2, -0.2), (-3.2, -3.2))
45 | )
46 | )
47 |
48 | TRACK3 = (3,
49 | (
50 | ((-4.7, 4.6), (-9.3, -9.3)),
51 | ((4.6, 4.6), (-9.3, -15.3)),
52 | ((4.6, 13.8), (-15.2, -15.2)),
53 | ((13.8, 13.8), (-15.2, -9.3)),
54 | ((13.8, 20), (-9.3, -9.3)),
55 | ((20, 20), (-9.3, -6.1)),
56 | ((20, 0), (-6.1, 13.8)),
57 | ((-20, 0), (-6.1, 13.8)),
58 | ((-20, -20), (-6.1, -9.3)),
59 | ((-20, -13.9), (-9.3, -9.3)),
60 | ((-13.9, -13.9), (-9.3, -15.2)),
61 | ((-13.9, -4.7), (-15.2, -15.2)),
62 | ((-4.7, -4.7), (-15.2, -9.3))
63 | )
64 | )
65 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/tabular_monte_carlo.py:
--------------------------------------------------------------------------------
1 | """This module contains the TabularMonteCarlo class."""
2 | import random
3 |
4 | import numpy as np
5 | from gymnasium import Env
6 |
7 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
8 | from rl_uav.features.discretizer import Discretizer
9 |
10 |
11 | class TabularMonteCarlo(RLAlgorithm):
12 | """The Tabular Monte Carlo algorithm."""
13 | _env: Env
14 | _discount_factor: float
15 | _discretizer: Discretizer
16 | _q_table: np.ndarray
17 | _returns: np.ndarray
18 |
19 | def __init__(self,
20 | env: Env,
21 | discount_factor: float,
22 | discretizer: Discretizer) -> None:
23 | super().__init__()
24 | self._env = env
25 | self._discount_factor = discount_factor
26 | self._discretizer = discretizer
27 | self._q_table = np.random.random(
28 | (discretizer.n_bins + (self._env.action_space.n,)))
29 | self._returns = np.empty(self._q_table.shape, dtype=list)
30 |
31 | def train(self, n_episodes: int) -> None:
32 | for episode_i in range(n_episodes):
33 | episode_reward = 0.0
34 |
35 | try:
36 | epsilon = 1.0 / (episode_i + 1)
37 | except OverflowError:
38 | epsilon = 0.1
39 |
40 | samples = []
41 | terminated = truncated = False
42 | state, _ = self._env.reset()
43 | state = self._discretizer.discretize(state)
44 | while not terminated and not truncated:
45 | if random.random() <= epsilon:
46 | action = self._env.action_space.sample()
47 | else:
48 | action = np.argmax(self._q_table[state])
49 |
50 | state, reward, terminated, truncated, _ = self._env.step(action)
51 | state = self._discretizer.discretize(state)
52 | episode_reward += reward
53 |
54 | samples.append((state, action, reward))
55 |
56 | self._logger.info('episode=%d|reward=%f',
57 | episode_i,
58 | episode_reward)
59 |
60 | return_ = 0.0
61 | processed_samples = []
62 | for sample in reversed(samples):
63 | state = sample[0]
64 | action = sample[1]
65 | reward = sample[2]
66 | return_ = self._discount_factor * return_ + reward
67 |
68 | if (state, action) in processed_samples:
69 | continue
70 |
71 | processed_samples.append((state, action))
72 | if self._returns[state + (action,)] is None:
73 | self._returns[state + (action,)] = [return_]
74 | else:
75 | self._returns[state + (action,)].append(return_)
76 |
77 | self._q_table[state + (action,)] = (
78 | np.mean(self._returns[state + (action,)]))
79 |
80 | def run(self, n_episodes: int) -> None:
81 | for episode_i in range(n_episodes):
82 | episode_reward = 0.0
83 | state, _ = self._env.reset()
84 | terminated = truncated = False
85 |
86 | while not terminated and not truncated:
87 | state = self._discretizer.discretize(state)
88 | action = np.argmax(self._q_table[state])
89 | state, reward, terminated, truncated, _ = self._env.step(action)
90 | episode_reward += reward
91 |
92 | self._logger.info('episode=%d|reward=%f',
93 | episode_i,
94 | episode_reward)
95 |
--------------------------------------------------------------------------------
/rl_uav/features/tile_coding.py:
--------------------------------------------------------------------------------
1 | """This module contains the TileCoding class."""
2 | from typing import Tuple
3 |
4 | import numpy as np
5 |
6 | from rl_uav.features.linear_function_approximation import \
7 | LinearFunctionApproximation
8 |
9 |
10 | class TileCoding(LinearFunctionApproximation):
11 | """Construct features using Tile Coding."""
12 | _n_tilings: int
13 | _n_tiles_per_dimension: np.ndarray
14 | _n_dimensions: int
15 | _n_tiles: int
16 | _tilings: np.ndarray
17 |
18 | def __init__(self,
19 | n_actions: int,
20 | n_tilings: int,
21 | n_tiles_per_dimension: np.ndarray,
22 | state_space_range: Tuple[np.ndarray, np.ndarray]) -> None:
23 | self._n_tilings = n_tilings
24 | self._n_actions = n_actions
25 | self._n_tiles_per_dimension = n_tiles_per_dimension + 1
26 | self._n_dimensions = len(self._n_tiles_per_dimension)
27 | n_tiles_per_tiling = np.prod(self._n_tiles_per_dimension)
28 | self._n_tiles = n_tilings * n_tiles_per_tiling
29 | self._tilings = self._create_tilings(state_space_range)
30 | self.n_features = self._n_tiles * n_actions
31 |
32 | def _create_tilings(
33 | self,
34 | state_space_range: Tuple[np.ndarray, np.ndarray]) -> np.ndarray:
35 | width = state_space_range[1] - state_space_range[0]
36 | tile_width = width / self._n_tiles_per_dimension
37 | tiling_offset = tile_width / self._n_tilings
38 |
39 | tilings = np.empty((self._n_tilings, self._n_dimensions),
40 | dtype=np.ndarray)
41 |
42 | min_value = state_space_range[0]
43 | max_value = state_space_range[1] + tile_width
44 |
45 | # Create the first tile
46 | for i in range(self._n_dimensions):
47 | tilings[0, i] = np.linspace(
48 | min_value[i],
49 | max_value[i],
50 | num=self._n_tiles_per_dimension[i] + 1)
51 |
52 | # In order to create the rest tilings,
53 | # subtract the tiling offset from the previous tiling.
54 | for i in range(1, self._n_tilings):
55 | for j in range(self._n_dimensions):
56 | tilings[i, j] = tilings[i - 1, j] - tiling_offset[j]
57 |
58 | return tilings
59 |
60 | def _get_active_features(self, state: np.ndarray) -> np.ndarray:
61 | active_features = np.zeros((self._n_tilings,), dtype=np.uint32)
62 | dimensions = np.append(self._n_tilings, self._n_tiles_per_dimension)
63 |
64 | for tiling_i in range(self._n_tilings):
65 | index = [tiling_i]
66 | index += [np.digitize(state[i], self._tilings[tiling_i, i]) - 1
67 | for i in range(self._n_dimensions)]
68 |
69 | for j in range(len(dimensions)):
70 | active_features[tiling_i] += (np.prod(dimensions[j + 1:])
71 | * index[j])
72 |
73 | return active_features
74 |
75 | def calculate_q(self,
76 | weights: np.ndarray,
77 | state: np.ndarray) -> np.ndarray:
78 | q_values = np.empty((self._n_actions,))
79 | active_features = self._get_active_features(state)
80 | for action in range(self._n_actions):
81 | q_values[action] = np.sum(
82 | weights[action * self._n_tiles + active_features])
83 |
84 | return q_values
85 |
86 | def get_features(self, state: np.ndarray, action: int) -> np.ndarray:
87 | features = np.zeros((self.n_features,))
88 | active_features = self._get_active_features(state)
89 | features[action * self._n_tiles + active_features] = 1
90 | return features
91 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/tabular_q_learning.py:
--------------------------------------------------------------------------------
1 | """This module contains the TabularQLearning class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.discretizer import Discretizer
10 |
11 |
12 | class TabularQLearning(RLAlgorithm):
13 | """The Tabular Q-Learning algorithm."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _discretizer: Discretizer
19 | _q_table: np.ndarray
20 |
21 | def __init__(self,
22 | env: Env,
23 | discount_factor: float,
24 | initial_learning_rate: float,
25 | learning_rate_steepness: float,
26 | discretizer: Discretizer) -> None:
27 | super().__init__()
28 | self._env = env
29 | self._discount_factor = discount_factor
30 | self._initial_learning_rate = initial_learning_rate
31 | self._learning_rate_steepness = learning_rate_steepness
32 | self._discretizer = discretizer
33 | self._q_table = np.random.random(
34 | (discretizer.n_bins + (self._env.action_space.n,)))
35 |
36 | def train(self, n_episodes: int) -> None:
37 | learning_rate_midpoint = n_episodes / 2
38 | for episode_i in range(n_episodes):
39 | episode_reward = 0.0
40 |
41 | try:
42 | exponent = (self._learning_rate_steepness
43 | * (episode_i - learning_rate_midpoint))
44 | learning_rate = (self._initial_learning_rate
45 | / (1 + math.exp(exponent)))
46 | except OverflowError:
47 | learning_rate = 0
48 |
49 | try:
50 | epsilon = 1.0 / (episode_i + 1)
51 | except OverflowError:
52 | epsilon = 0
53 |
54 | terminated = truncated = False
55 | observation, _ = self._env.reset()
56 | current_state = self._discretizer.discretize(observation)
57 |
58 | while not terminated and not truncated:
59 | if random.random() <= epsilon:
60 | action = self._env.action_space.sample()
61 | else:
62 | action = np.argmax(self._q_table[current_state])
63 |
64 | next_state, reward, terminated, truncated, _ = self._env.step(action)
65 | next_state = self._discretizer.discretize(next_state)
66 | episode_reward += reward
67 |
68 | td_target = reward
69 | if not terminated:
70 | td_target += (self._discount_factor
71 | * max(self._q_table[next_state]))
72 |
73 | td_error = (td_target
74 | - self._q_table[current_state + (action,)])
75 | self._q_table[current_state + (action,)] += (
76 | learning_rate * td_error)
77 |
78 | current_state = next_state
79 |
80 | self._logger.info('episode=%d|reward=%f',
81 | episode_i,
82 | episode_reward)
83 |
84 | def run(self, n_episodes: int) -> None:
85 | for episode_i in range(n_episodes):
86 | episode_reward = 0.0
87 | state, _ = self._env.reset()
88 | terminated = truncated = False
89 |
90 | while not terminated or truncated:
91 | state = self._discretizer.discretize(state)
92 | action = np.argmax(self._q_table[state])
93 | state, reward, terminated, truncated, _ = self._env.step(action)
94 | episode_reward += reward
95 |
96 | self._logger.info('episode=%d|reward=%f',
97 | episode_i,
98 | episode_reward)
99 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/tabular_sarsa.py:
--------------------------------------------------------------------------------
1 | """This module contains the TabularSARSA class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.discretizer import Discretizer
10 |
11 |
12 | class TabularSARSA(RLAlgorithm):
13 | """The Tabular SARSA algorithm."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _discretizer: Discretizer
19 | _q_table: np.ndarray
20 |
21 | def __init__(self,
22 | env: Env,
23 | discount_factor: float,
24 | initial_learning_rate: float,
25 | learning_rate_steepness: float,
26 | discretizer: Discretizer) -> None:
27 | super().__init__()
28 | self._env = env
29 | self._discount_factor = discount_factor
30 | self._initial_learning_rate = initial_learning_rate
31 | self._learning_rate_steepness = learning_rate_steepness
32 | self._discretizer = discretizer
33 | self._q_table = np.random.random(
34 | (discretizer.n_bins + (self._env.action_space.n,)))
35 |
36 | def train(self, n_episodes: int) -> None:
37 | learning_rate_midpoint = n_episodes / 2
38 | for episode_i in range(n_episodes):
39 | episode_reward = 0.0
40 |
41 | try:
42 | exponent = (self._learning_rate_steepness
43 | * (episode_i - learning_rate_midpoint))
44 | learning_rate = (self._initial_learning_rate
45 | / (1 + math.exp(exponent)))
46 | except OverflowError:
47 | learning_rate = 0
48 |
49 | try:
50 | epsilon = 1.0 / (episode_i + 1)
51 | except OverflowError:
52 | epsilon = 0
53 |
54 | terminated = truncated = False
55 | current_state, _ = self._env.reset()
56 | current_state = self._discretizer.discretize(current_state)
57 |
58 | if random.random() <= epsilon:
59 | current_action = self._env.action_space.sample()
60 | else:
61 | current_action = np.argmax(self._q_table[current_state])
62 |
63 | while not terminated and not truncated:
64 | next_state, reward, terminated, truncated, _ = self._env.step(current_action)
65 | next_state = self._discretizer.discretize(next_state)
66 | episode_reward += reward
67 |
68 | if random.random() <= epsilon:
69 | next_action = self._env.action_space.sample()
70 | else:
71 | next_action = np.argmax(self._q_table[next_state])
72 |
73 | td_target = reward
74 | if not terminated:
75 | td_target += (
76 | self._discount_factor
77 | * self._q_table[next_state + (next_action,)])
78 |
79 | td_error = (
80 | td_target
81 | - self._q_table[current_state + (current_action,)])
82 | self._q_table[current_state + (current_action,)] += (
83 | learning_rate * td_error)
84 |
85 | current_state = next_state
86 | current_action = next_action
87 |
88 | self._logger.info('episode=%d|reward=%f',
89 | episode_i,
90 | episode_reward)
91 |
92 | def run(self, n_episodes: int) -> None:
93 | for episode_i in range(n_episodes):
94 | episode_reward = 0.0
95 | state, _ = self._env.reset()
96 | terminated = truncated = False
97 |
98 | while not terminated and not truncated:
99 | state = self._discretizer.discretize(state)
100 | action = np.argmax(self._q_table[state])
101 | state, reward, terminated, truncated, _ = self._env.step(action)
102 | episode_reward += reward
103 |
104 | self._logger.info('episode=%d|reward=%f',
105 | episode_i,
106 | episode_reward)
107 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/lfa_q_learning.py:
--------------------------------------------------------------------------------
1 | """This module contains the LFAQLearning class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.linear_function_approximation import LinearFunctionApproximation
10 |
11 |
12 | class LFAQLearning(RLAlgorithm):
13 | """The Q-Learning algorithm with Linear Function Approximation."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _feature_constructor: LinearFunctionApproximation
19 | _weights: np.ndarray
20 |
21 | def __init__(self,
22 | env: Env,
23 | discount_factor: float,
24 | initial_learning_rate: float,
25 | learning_rate_steepness: float,
26 | feature_constructor: LinearFunctionApproximation) -> None:
27 | super().__init__()
28 | self._env = env
29 | self._discount_factor = discount_factor
30 | self._initial_learning_rate = initial_learning_rate
31 | self._learning_rate_steepness = learning_rate_steepness
32 | self._feature_constructor = feature_constructor
33 | self._weights = np.random.random((feature_constructor.n_features,))
34 |
35 | def train(self, n_episodes: int) -> None:
36 | learning_rate_midpoint = n_episodes / 2
37 | for episode_i in range(n_episodes):
38 | episode_reward = 0.0
39 |
40 | try:
41 | exponent = (self._learning_rate_steepness
42 | * (episode_i - learning_rate_midpoint))
43 | learning_rate = (self._initial_learning_rate
44 | / (1 + math.exp(exponent)))
45 | except OverflowError:
46 | learning_rate = 0
47 |
48 | try:
49 | epsilon = 1.0 / (episode_i + 1)
50 | except OverflowError:
51 | epsilon = 0
52 |
53 | terminated = truncated = False
54 | current_state, _ = self._env.reset()
55 | current_q_values = self._feature_constructor.calculate_q(
56 | self._weights,
57 | current_state)
58 |
59 | while not terminated and not truncated:
60 | if random.random() <= epsilon:
61 | action = self._env.action_space.sample()
62 | else:
63 | action = np.argmax(current_q_values)
64 |
65 | next_state, reward, terminated, truncated, _ = self._env.step(
66 | action)
67 | episode_reward += reward
68 | next_q_values = self._feature_constructor.calculate_q(
69 | self._weights,
70 | next_state)
71 |
72 | td_target = reward
73 | if not terminated:
74 | td_target += self._discount_factor * np.max(next_q_values)
75 |
76 | td_error = td_target - current_q_values[action]
77 |
78 | features = self._feature_constructor.get_features(
79 | current_state,
80 | action)
81 | self._weights += learning_rate * td_error * features
82 |
83 | current_state = next_state
84 | current_q_values = next_q_values
85 |
86 | self._logger.info('episode=%d|reward=%f',
87 | episode_i,
88 | episode_reward)
89 |
90 | def run(self, n_episodes: int) -> None:
91 | for episode_i in range(n_episodes):
92 | episode_reward = 0.0
93 | state, _ = self._env.reset()
94 | terminated = truncated = False
95 |
96 | while not terminated and not truncated:
97 | q_values = self._feature_constructor.calculate_q(self._weights,
98 | state)
99 | action = np.argmax(q_values)
100 | state, reward, terminated, truncated, _ = self._env.step(
101 | action)
102 | episode_reward += reward
103 |
104 | self._logger.info('episode=%d|reward=%f',
105 | episode_i,
106 | episode_reward)
107 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/tabular_sarsa_lambda.py:
--------------------------------------------------------------------------------
1 | """This module contains the TabularSARSALambda class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.discretizer import Discretizer
10 |
11 |
12 | class TabularSARSALambda(RLAlgorithm):
13 | """The Tabular SARSA(lambda) algorithm."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _discretizer: Discretizer
19 | _lambda: float
20 | _q_table: np.ndarray
21 |
22 | def __init__(self,
23 | env: Env,
24 | discount_factor: float,
25 | initial_learning_rate: float,
26 | learning_rate_steepness: float,
27 | discretizer: Discretizer,
28 | lambda_: float) -> None:
29 | super().__init__()
30 | self._env = env
31 | self._discount_factor = discount_factor
32 | self._initial_learning_rate = initial_learning_rate
33 | self._learning_rate_steepness = learning_rate_steepness
34 | self._discretizer = discretizer
35 | self._lambda = lambda_
36 | self._q_table = np.random.random(
37 | (discretizer.n_bins + (self._env.action_space.n,)))
38 |
39 | def train(self, n_episodes: int) -> None:
40 | learning_rate_midpoint = n_episodes / 2
41 | for episode_i in range(n_episodes):
42 | episode_reward = 0.0
43 |
44 | try:
45 | exponent = (self._learning_rate_steepness
46 | * (episode_i - learning_rate_midpoint))
47 | learning_rate = (self._initial_learning_rate
48 | / (1 + math.exp(exponent)))
49 | except OverflowError:
50 | learning_rate = 0
51 |
52 | try:
53 | epsilon = 1.0 / (episode_i + 1)
54 | except OverflowError:
55 | epsilon = 0
56 |
57 | terminated = truncated = False
58 | eligibility_traces = np.zeros(
59 | (self._discretizer.n_bins + (self._env.action_space.n,)))
60 | current_state, _ = self._env.reset()
61 | current_state = self._discretizer.discretize(current_state)
62 |
63 | if random.random() <= epsilon:
64 | current_action = self._env.action_space.sample()
65 | else:
66 | current_action = np.argmax(self._q_table[current_state])
67 |
68 | while not terminated and not truncated:
69 | next_state, reward, terminated, truncated, _ = self._env.step(current_action)
70 | next_state = self._discretizer.discretize(next_state)
71 | episode_reward += reward
72 |
73 | if random.random() <= epsilon:
74 | next_action = self._env.action_space.sample()
75 | else:
76 | next_action = np.argmax(self._q_table[next_state])
77 |
78 | td_target = reward
79 | if not terminated:
80 | td_target += (
81 | self._discount_factor
82 | * self._q_table[next_state + (next_action,)])
83 |
84 | td_error = (
85 | td_target
86 | - self._q_table[current_state + (current_action,)])
87 | eligibility_traces[current_state + (current_action,)] += 1
88 |
89 | self._q_table += learning_rate * td_error * eligibility_traces
90 | eligibility_traces *= self._discount_factor * self._lambda
91 |
92 | current_state = next_state
93 | current_action = next_action
94 |
95 | self._logger.info('episode=%d|reward=%f',
96 | episode_i,
97 | episode_reward)
98 |
99 | def run(self, n_episodes: int) -> None:
100 | for episode_i in range(n_episodes):
101 | episode_reward = 0.0
102 | state, _ = self._env.reset()
103 | terminated = truncated = False
104 |
105 | while not terminated and not truncated:
106 | state = self._discretizer.discretize(state)
107 | action = np.argmax(self._q_table[state])
108 | state, reward, terminated, truncated, _ = self._env.step(action)
109 | episode_reward += reward
110 |
111 | self._logger.info('episode=%d|reward=%f',
112 | episode_i,
113 | episode_reward)
114 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/lfa_sarsa.py:
--------------------------------------------------------------------------------
1 | """This module contains the LFASARSA class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.linear_function_approximation import LinearFunctionApproximation
10 |
11 |
12 | class LFASARSA(RLAlgorithm):
13 | """The SARSA algorithm with Linear Function Approximation."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _feature_constructor: LinearFunctionApproximation
19 | _weights: np.ndarray
20 |
21 | def __init__(self,
22 | env: Env,
23 | discount_factor: float,
24 | initial_learning_rate: float,
25 | learning_rate_steepness: float,
26 | feature_constructor: LinearFunctionApproximation) -> None:
27 | super().__init__()
28 | self._env = env
29 | self._discount_factor = discount_factor
30 | self._initial_learning_rate = initial_learning_rate
31 | self._learning_rate_steepness = learning_rate_steepness
32 | self._feature_constructor = feature_constructor
33 | self._weights = np.random.random((feature_constructor.n_features,))
34 |
35 | def train(self, n_episodes: int) -> None:
36 | learning_rate_midpoint = n_episodes / 2
37 | for episode_i in range(n_episodes):
38 | episode_reward = 0.0
39 |
40 | try:
41 | exponent = (self._learning_rate_steepness
42 | * (episode_i - learning_rate_midpoint))
43 | learning_rate = (self._initial_learning_rate
44 | / (1 + math.exp(exponent)))
45 | except OverflowError:
46 | learning_rate = 0
47 |
48 | try:
49 | epsilon = 1.0 / (episode_i + 1)
50 | except OverflowError:
51 | epsilon = 0
52 |
53 | terminated = truncated = False
54 | current_state, _ = self._env.reset()
55 | current_q_values = self._feature_constructor.calculate_q(
56 | self._weights,
57 | current_state)
58 |
59 | if random.random() <= epsilon:
60 | current_action = self._env.action_space.sample()
61 | else:
62 | current_action = np.argmax(current_q_values)
63 |
64 | while not terminated and not truncated:
65 | next_state, reward, terminated, truncated, _ = self._env.step(
66 | current_action)
67 | episode_reward += reward
68 | next_q_values = self._feature_constructor.calculate_q(
69 | self._weights,
70 | next_state)
71 |
72 | if random.random() <= epsilon:
73 | next_action = self._env.action_space.sample()
74 | else:
75 | next_action = np.argmax(next_q_values)
76 |
77 | td_target = reward
78 | if not terminated:
79 | td_target += (self._discount_factor
80 | * next_q_values[next_action])
81 |
82 | td_error = td_target - current_q_values[current_action]
83 |
84 | features = self._feature_constructor.get_features(
85 | current_state,
86 | current_action)
87 | self._weights += learning_rate * td_error * features
88 |
89 | current_state = next_state
90 | current_action = next_action
91 | current_q_values = next_q_values
92 |
93 | self._logger.info('episode=%d|reward=%f',
94 | episode_i,
95 | episode_reward)
96 |
97 | def run(self, n_episodes: int) -> None:
98 | for episode_i in range(n_episodes):
99 | episode_reward = 0.0
100 | state, _ = self._env.reset()
101 | terminated = truncated = False
102 |
103 | while not terminated and not truncated:
104 | q_values = self._feature_constructor.calculate_q(self._weights,
105 | state)
106 | action = np.argmax(q_values)
107 | state, reward, terminated, truncated, _ = self._env.step(
108 | action)
109 | episode_reward += reward
110 |
111 | self._logger.info('episode=%d|reward=%f',
112 | episode_i,
113 | episode_reward)
114 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/lfa_sarsa_lambda.py:
--------------------------------------------------------------------------------
1 | """This module contains the LFASARSALambda class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | import rospy
7 | from gymnasium import Env
8 |
9 | from rl_uav.features.linear_function_approximation import LinearFunctionApproximation
10 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
11 |
12 |
13 | class LFASARSALambda(RLAlgorithm):
14 | """The SARSA(lambda) algorithm with Linear Function Approximation."""
15 | _env: Env
16 | _discount_factor: float
17 | _initial_learning_rate: float
18 | _learning_rate_steepness: float
19 | _feature_constructor: LinearFunctionApproximation
20 | _lambda: float
21 | _weights: np.ndarray
22 |
23 | def __init__(self,
24 | env: Env,
25 | discount_factor: float,
26 | initial_learning_rate: float,
27 | learning_rate_steepness: float,
28 | feature_constructor: LinearFunctionApproximation,
29 | lambda_: float) -> None:
30 | super().__init__()
31 | self._env = env
32 | self._discount_factor = discount_factor
33 | self._initial_learning_rate = initial_learning_rate
34 | self._learning_rate_steepness = learning_rate_steepness
35 | self._feature_constructor = feature_constructor
36 | self._lambda = lambda_
37 | self._weights = np.random.random((feature_constructor.n_features,))
38 |
39 | def train(self, n_episodes: int) -> None:
40 | learning_rate_midpoint = n_episodes / 2
41 | for episode_i in range(n_episodes):
42 | episode_reward = 0.0
43 |
44 | try:
45 | exponent = (self._learning_rate_steepness
46 | * (episode_i - learning_rate_midpoint))
47 | learning_rate = (self._initial_learning_rate
48 | / (1 + math.exp(exponent)))
49 | except OverflowError:
50 | learning_rate = 0
51 |
52 | try:
53 | epsilon = 1.0 / (episode_i + 1)
54 | except OverflowError:
55 | epsilon = 0
56 |
57 | terminated = truncated = False
58 | current_state, _ = self._env.reset()
59 | eligibility_traces = np.zeros(
60 | (self._feature_constructor.n_features,))
61 | current_q_values = self._feature_constructor.calculate_q(
62 | self._weights,
63 | current_state)
64 |
65 | if random.random() <= epsilon:
66 | current_action = self._env.action_space.sample()
67 | else:
68 | current_action = np.argmax(current_q_values)
69 |
70 | while not terminated and not truncated:
71 | next_state, reward, terminated, truncated, _ = self._env.step(
72 | current_action)
73 | episode_reward += reward
74 |
75 | next_q_values = self._feature_constructor.calculate_q(
76 | self._weights,
77 | next_state)
78 |
79 | if random.random() <= epsilon:
80 | next_action = self._env.action_space.sample()
81 | else:
82 | next_action = np.argmax(next_q_values)
83 |
84 | td_target = reward
85 | if not terminated:
86 | td_target += (self._discount_factor
87 | * next_q_values[next_action])
88 |
89 | td_error = td_target - current_q_values[current_action]
90 |
91 | current_features = self._feature_constructor.get_features(
92 | current_state,
93 | current_action)
94 | eligibility_traces = (self._discount_factor
95 | * self._lambda
96 | * eligibility_traces
97 | + current_features)
98 |
99 | self._weights += (learning_rate
100 | * td_error
101 | * eligibility_traces)
102 |
103 | current_state = next_state
104 | current_action = next_action
105 | current_q_values = next_q_values
106 |
107 | rospy.loginfo(f'episode={episode_i}|reward={episode_reward}')
108 |
109 | def run(self, n_episodes: int) -> None:
110 | for episode_i in range(n_episodes):
111 | episode_reward = 0.0
112 | state, _ = self._env.reset()
113 | terminated = truncated = False
114 |
115 | while not terminated and not truncated:
116 | q_values = self._feature_constructor.calculate_q(self._weights,
117 | state)
118 | action = np.argmax(q_values)
119 | state, reward, terminated, truncated, _ = self._env.step(
120 | action)
121 | episode_reward += reward
122 |
123 | rospy.loginfo(f'episode={episode_i}|reward={episode_reward}')
124 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/tabular_q_lambda.py:
--------------------------------------------------------------------------------
1 | """This module contains the TabularQLambda class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.discretizer import Discretizer
10 |
11 |
12 | class TabularQLambda(RLAlgorithm):
13 | """The Tabular Q(lambda) algorithm."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _discretizer: Discretizer
19 | _lambda: float
20 | _q_table: np.ndarray
21 |
22 | def __init__(self,
23 | env: Env,
24 | discount_factor: float,
25 | initial_learning_rate: float,
26 | learning_rate_steepness: float,
27 | discretizer: Discretizer,
28 | lambda_: float) -> None:
29 | super().__init__()
30 | self._env = env
31 | self._discount_factor = discount_factor
32 | self._initial_learning_rate = initial_learning_rate
33 | self._learning_rate_steepness = learning_rate_steepness
34 | self._discretizer = discretizer
35 | self._lambda = lambda_
36 | self._q_table = np.random.random(
37 | (discretizer.n_bins + (self._env.action_space.n,)))
38 |
39 | def train(self, n_episodes: int) -> None:
40 | learning_rate_midpoint = n_episodes / 2
41 | for episode_i in range(n_episodes):
42 | episode_reward = 0.0
43 |
44 | try:
45 | exponent = (self._learning_rate_steepness
46 | * (episode_i - learning_rate_midpoint))
47 | learning_rate = (self._initial_learning_rate
48 | / (1 + math.exp(exponent)))
49 | except OverflowError:
50 | learning_rate = 0
51 |
52 | try:
53 | epsilon = 1.0 / (episode_i + 1)
54 | except OverflowError:
55 | epsilon = 0
56 |
57 | terminated = truncated = False
58 | eligibility_traces = np.zeros(
59 | (self._discretizer.n_bins + (self._env.action_space.n,)))
60 | current_state, _ = self._env.reset()
61 | current_state = self._discretizer.discretize(current_state)
62 |
63 | if random.random() <= epsilon:
64 | current_action = self._env.action_space.sample()
65 | else:
66 | current_action = np.argmax(self._q_table[current_state])
67 |
68 | while not terminated and not truncated:
69 | next_state, reward, terminated, truncated, _ = self._env.step(current_action)
70 | next_state = self._discretizer.discretize(next_state)
71 | episode_reward += reward
72 |
73 | if random.random() <= epsilon:
74 | next_action = self._env.action_space.sample()
75 | else:
76 | next_action = np.argmax(self._q_table[next_state])
77 |
78 | if (self._q_table[next_state + (next_action,)]
79 | == np.max(self._q_table[next_state])):
80 | best_action = next_action
81 | else:
82 | best_action = np.argmax(self._q_table[next_state])
83 |
84 | td_target = reward
85 | if not terminated:
86 | td_target += (
87 | self._discount_factor
88 | * self._q_table[next_state + (next_action,)])
89 |
90 | td_error = (
91 | td_target
92 | - self._q_table[current_state + (current_action,)])
93 | eligibility_traces[current_state + (current_action,)] += 1
94 |
95 | self._q_table += learning_rate * td_error * eligibility_traces
96 |
97 | if best_action == next_action:
98 | eligibility_traces *= (self._discount_factor
99 | * self._lambda)
100 | else:
101 | eligibility_traces *= 0
102 |
103 | current_state = next_state
104 | current_action = next_action
105 |
106 | self._logger.info('episode=%d|reward=%f',
107 | episode_i,
108 | episode_reward)
109 |
110 | def run(self, n_episodes: int) -> None:
111 | for episode_i in range(n_episodes):
112 | episode_reward = 0.0
113 | state, _ = self._env.reset()
114 | terminated = truncated = False
115 |
116 | while not terminated and not truncated:
117 | state = self._discretizer.discretize(state)
118 | action = np.argmax(self._q_table[state])
119 | state, reward, terminated, truncated, _ = self._env.step(action)
120 | episode_reward += reward
121 |
122 | self._logger.info('episode=%d|reward=%f',
123 | episode_i,
124 | episode_reward)
125 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/lfa_q_lambda.py:
--------------------------------------------------------------------------------
1 | """This module contains the LFAQLambda class."""
2 | import math
3 | import random
4 |
5 | import numpy as np
6 | from gymnasium import Env
7 |
8 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
9 | from rl_uav.features.linear_function_approximation import LinearFunctionApproximation
10 |
11 |
12 | class LFAQLambda(RLAlgorithm):
13 | """The Q(lambda) algorithm with Linear Function Approximation."""
14 | _env: Env
15 | _discount_factor: float
16 | _initial_learning_rate: float
17 | _learning_rate_steepness: float
18 | _feature_constructor: LinearFunctionApproximation
19 | _lambda: float
20 | _weights: np.ndarray
21 |
22 | def __init__(self,
23 | env: Env,
24 | discount_factor: float,
25 | initial_learning_rate: float,
26 | learning_rate_steepness: float,
27 | feature_constructor: LinearFunctionApproximation,
28 | lambda_: float) -> None:
29 | super().__init__()
30 | self._env = env
31 | self._discount_factor = discount_factor
32 | self._initial_learning_rate = initial_learning_rate
33 | self._learning_rate_steepness = learning_rate_steepness
34 | self._feature_constructor = feature_constructor
35 | self._lambda = lambda_
36 | self._weights = np.random.random((feature_constructor.n_features,))
37 |
38 | def train(self, n_episodes: int) -> None:
39 | learning_rate_midpoint = n_episodes / 2
40 | for episode_i in range(n_episodes):
41 | episode_reward = 0.0
42 |
43 | try:
44 | exponent = (self._learning_rate_steepness
45 | * (episode_i - learning_rate_midpoint))
46 | learning_rate = (self._initial_learning_rate
47 | / (1 + math.exp(exponent)))
48 | except OverflowError:
49 | learning_rate = 0
50 |
51 | try:
52 | epsilon = 1.0 / (episode_i + 1)
53 | except OverflowError:
54 | epsilon = 0
55 |
56 | terminated = truncated = False
57 | current_state, _ = self._env.reset()
58 | eligibility_traces = np.zeros(
59 | (self._feature_constructor.n_features,))
60 | current_q_values = self._feature_constructor.calculate_q(
61 | self._weights,
62 | current_state)
63 |
64 | if random.random() <= epsilon:
65 | current_action = self._env.action_space.sample()
66 | else:
67 | current_action = np.argmax(current_q_values)
68 |
69 | while not terminated and not truncated:
70 | next_state, reward, terminated, truncated, _ = self._env.step(
71 | current_action)
72 | episode_reward += reward
73 |
74 | next_q_values = self._feature_constructor.calculate_q(
75 | self._weights,
76 | next_state)
77 |
78 | if random.random() <= epsilon:
79 | next_action = self._env.action_space.sample()
80 | else:
81 | next_action = np.argmax(next_q_values)
82 |
83 | if next_q_values[next_action] == np.max(next_q_values):
84 | best_action = next_action
85 | else:
86 | best_action = np.argmax(next_q_values)
87 |
88 | td_target = reward
89 | if not terminated:
90 | td_target += (self._discount_factor
91 | * next_q_values[next_action])
92 |
93 | td_error = td_target - current_q_values[current_action]
94 |
95 | if best_action == next_action:
96 | features = self._feature_constructor.get_features(
97 | current_state,
98 | current_action)
99 | eligibility_traces = (self._discount_factor
100 | * self._lambda
101 | * eligibility_traces
102 | + features)
103 | else:
104 | eligibility_traces *= 0
105 |
106 | self._weights += learning_rate * td_error * eligibility_traces
107 |
108 | current_state = next_state
109 | current_action = next_action
110 | current_q_values = next_q_values
111 |
112 | self._logger.info('episode=%d|reward=%f',
113 | episode_i,
114 | episode_reward)
115 |
116 | def run(self, n_episodes: int) -> None:
117 | for episode_i in range(n_episodes):
118 | episode_reward = 0.0
119 | state, _ = self._env.reset()
120 | terminated = truncated = False
121 |
122 | while not terminated and not truncated:
123 | q_values = self._feature_constructor.calculate_q(self._weights,
124 | state)
125 | action = np.argmax(q_values)
126 | state, reward, terminated, truncated, _ = self._env.step(
127 | action)
128 | episode_reward += reward
129 |
130 | self._logger.info('episode=%d|reward=%f',
131 | episode_i,
132 | episode_reward)
133 |
--------------------------------------------------------------------------------
/rl_uav/rl_algorithms/lspi.py:
--------------------------------------------------------------------------------
1 | """This module contains the LSPI class."""
2 | import random
3 |
4 | import numpy as np
5 | import rospy
6 | from gymnasium import Env
7 |
8 | from rl_uav.features.linear_function_approximation import LinearFunctionApproximation
9 | from rl_uav.rl_algorithms.rl_algorithm import RLAlgorithm
10 |
11 |
12 | class LSPI(RLAlgorithm):
13 | """The Least Squares Policy Iteration algorithm."""
14 | _env: Env
15 | _discount_factor: float
16 | _n_samples: int
17 | _feature_constructor: LinearFunctionApproximation
18 | _weights: np.ndarray
19 | _samples: np.ndarray
20 |
21 | def __init__(self,
22 | env: Env,
23 | discount_factor: float,
24 | n_samples: int,
25 | feature_constructor: LinearFunctionApproximation) -> None:
26 | super().__init__()
27 | self._env = env
28 | self._discount_factor = discount_factor
29 | self._n_samples = n_samples
30 | self._feature_constructor = feature_constructor
31 |
32 | def _gather_samples(self) -> None:
33 | """Gather samples by following a random policy."""
34 | self._samples = np.empty((self._n_samples,), dtype=tuple)
35 | samples_gathered = 0
36 | current_state = self._env.observation_space.sample()
37 | terminated = truncated = True
38 |
39 | while samples_gathered < self._n_samples:
40 | if terminated or truncated:
41 | current_state, _ = self._env.reset()
42 |
43 | action = self._env.action_space.sample()
44 | next_state, reward, terminated, truncated, _ = self._env.step(
45 | action)
46 | self._samples[samples_gathered] = (current_state,
47 | action,
48 | reward,
49 | next_state,
50 | terminated)
51 | samples_gathered += 1
52 | current_state = next_state
53 |
54 | def _calculate_features_list(self) -> np.ndarray:
55 | features_list = np.empty((self._samples.shape[0],), dtype=np.ndarray)
56 |
57 | for i, sample in enumerate(self._samples):
58 | state = sample[0]
59 | action = sample[1]
60 | features_list[i] = self._feature_constructor.get_features(state,
61 | action)
62 |
63 | return features_list
64 |
65 | def _lstdq(self, features_list: np.ndarray) -> np.ndarray:
66 | a_matrix = (random.random()
67 | * np.identity(self._feature_constructor.n_features))
68 | b_matrix = np.zeros((self._feature_constructor.n_features,))
69 |
70 | for i, sample in enumerate(self._samples):
71 | reward = sample[2]
72 | next_state = sample[3]
73 | terminated = sample[4]
74 |
75 | if terminated:
76 | next_features = np.zeros(
77 | (self._feature_constructor.n_features,))
78 | else:
79 | q_values = self._feature_constructor.calculate_q(self._weights,
80 | next_state)
81 | best_action = int(np.argmax(q_values))
82 | next_features = self._feature_constructor.get_features(
83 | next_state,
84 | best_action)
85 |
86 | current_features = features_list[i]
87 |
88 | a_matrix += np.outer(
89 | current_features,
90 | (current_features - self._discount_factor * next_features))
91 | b_matrix += current_features * reward
92 |
93 | rank = np.linalg.matrix_rank(a_matrix)
94 | if rank == self._feature_constructor.n_features:
95 | a_inverse = np.linalg.inv(a_matrix)
96 | else:
97 | rospy.logwarn(f'A is not full rank (rank={rank})')
98 | u_matrix, s_matrix, vh_matrix = np.linalg.svd(a_matrix)
99 | s_matrix = np.diag(s_matrix)
100 | a_inverse = np.matmul(np.matmul(vh_matrix.T,
101 | np.linalg.pinv(s_matrix)),
102 | u_matrix.T)
103 |
104 | return np.matmul(a_inverse, b_matrix)
105 |
106 | def train(self, n_episodes: int) -> None:
107 | self._gather_samples()
108 | new_weights = np.random.random(
109 | (self._feature_constructor.n_features,))
110 | features_list = self._calculate_features_list()
111 |
112 | for episode_i in range(n_episodes):
113 | self._weights = new_weights
114 | new_weights = self._lstdq(features_list)
115 |
116 | weights_difference = np.linalg.norm(new_weights - self._weights)
117 | rospy.loginfo(f'episode={episode_i}|'
118 | f'weights_difference={weights_difference}')
119 |
120 | if weights_difference <= 0:
121 | break
122 |
123 | def run(self, n_episodes: int) -> None:
124 | for episode_i in range(n_episodes):
125 | episode_reward = 0.0
126 | state, _ = self._env.reset()
127 | terminated = truncated = False
128 |
129 | while not terminated and not truncated:
130 | q_values = self._feature_constructor.calculate_q(self._weights,
131 | state)
132 | action = np.argmax(q_values)
133 | state, reward, terminated, truncated, _ = self._env.step(
134 | action)
135 | episode_reward += reward
136 |
137 | rospy.loginfo(f'episode={episode_i}|reward={episode_reward}')
138 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Reinforcement Learning for Autonomous Unmanned Aerial Vehicles - Undergraduate Thesis
2 |
3 | Implementation of different reinforcement learning algorithms to solve the
4 | navigation problem of an unmanned aerial vehicle (UAV)
5 | using [ROS](https://www.ros.org/)/[Gazebo](http://gazebosim.org/) and Python.
6 |
7 | ## Table of Contents
8 |
9 | - [Description](#description)
10 | - [Abstract](#abstract)
11 | - [Getting Started](#getting-started)
12 | - [Prerequisites](#prerequisites)
13 | - [Installation](#installation)
14 | - [With GUI](#with-gui)
15 | - [Usage](#usage)
16 | - [Status](#status)
17 | - [License](#license)
18 | - [Authors](#authors)
19 |
20 | ## Description
21 |
22 | `navigation_env.py`: The goal of this environment is to navigate an UAV on a
23 | track without crashing into the walls. Initially, the UAV is placed randomly
24 | into the track but at a safe distance from the walls. The state space consists
25 | of 5 range measurements. The action space consists of 3 actions (move_forward,
26 | rotate_left, rotate_right). Furthermore, both actions and states have additive
27 | white Gaussian noise. The UAV is rewarded +5 for moving forward and -0.5 for
28 | rotating. If the UAV crashes into the wall it is penalized with -200.
29 |
30 | There are 3 available worlds/tracks:
31 |
32 | Track1:
33 |
34 | 
35 |
36 | Track2:
37 |
38 | 
39 |
40 | Track3:
41 |
42 | 
43 |
44 | ### Abstract
45 |
46 | Reinforcement learning is an area of machine learning concerned with how
47 | autonomous agents learn to behave in unknown environments through
48 | trial-and-error. The goal of a reinforcement learning agent is to learn a
49 | sequential decision policy that maximizes the notion of cumulative reward
50 | through continuous interaction with the unknown environment. A challenging
51 | problem in robotics is the autonomous navigation of an Unmanned Aerial
52 | Vehicle (UAV) in worlds with no available map. This ability is critical in many
53 | applications, such as search and rescue operations or the mapping of
54 | geographical areas. In this thesis, we present a map-less approach for the
55 | autonomous, safe navigation of a UAV in unknown environments using
56 | reinforcement learning. Specifically, we implemented two popular algorithms,
57 | SARSA(λ) and Least-Squares Policy Iteration (LSPI), and combined them with tile
58 | coding, a parametric, linear approximation architecture for value function in
59 | order to deal with the 5- or 3-dimensional continuous state space defined by
60 | the measurements of the UAV distance sensors. The final policy of each
61 | algorithm, learned over only 500 episodes, was tested in unknown environments
62 | more complex than the one used for training in order to evaluate the behavior
63 | of each policy. Results show that SARSA(λ) was able to learn a near-optimal
64 | policy that performed adequately even in unknown situations, leading the UAV
65 | along paths free-of-collisions with obstacles. LSPI's policy required less
66 | learning time and its performance was promising, but not as effective, as it
67 | occasionally leads to collisions in unknown situations. The whole project was
68 | implemented using the Robot Operating System (ROS) framework and the Gazebo
69 | robot simulation environment.
70 |
71 | Supervisor: Associate Professor Michail G. Lagoudakis
72 |
73 | https://doi.org/10.26233/heallink.tuc.87066
74 |
75 | ## Getting Started
76 |
77 | ### Prerequisites
78 |
79 | This package is provided as a docker image.
80 |
81 | ### Installation
82 |
83 | Build the docker image:
84 |
85 | ```bash
86 | docker build -t rl-uav .
87 | ```
88 |
89 | Run the container:
90 |
91 | ```bash
92 | docker run --name rl-uav -it rl-uav
93 | ```
94 |
95 | Due to XmlRpcServer querying all possible file descriptors, it may be required to lower the corresponding limit depending on your system:
96 |
97 | ```bash
98 | docker run --ulimit nofile=1024:524288 --name rl-uav -it rl-uav
99 | ```
100 |
101 | Build the ros package:
102 |
103 | ```bash
104 | source /opt/ros/noetic/setup.bash
105 | cd /home/catkin_ws
106 | catkin_make
107 | ```
108 |
109 | Due to the high resource intensity of the catkin_make, it may be required to run only one job at a time.
110 |
111 | ```bash
112 | catkin_make -j1
113 | ```
114 |
115 | #### With GUI
116 |
117 | The above image also contains a vnc server in order to display the gazebo simulation.
118 |
119 | First create a docker network:
120 |
121 | ```bash
122 | docker network create ros
123 | ```
124 |
125 | And then run the container as following:
126 |
127 | ```bash
128 | docker run --ulimit nofile=1024:524288 --name rl-uav --net=ros --env="DISPLAY=novnc:0.0" --env="RESOLUTION=1920x1080" --env="USER=root" -it rl-uav
129 | ```
130 |
131 | Start the vnc server with the following command:
132 |
133 | ```bash
134 | vncserver -geometry $RESOLUTION
135 | ```
136 |
137 | Run the noVNC client:
138 |
139 | ```bash
140 | docker run -d --rm --net=ros --env="DISPLAY_WIDTH=1920" --env="DISPLAY_HEIGHT=1800" --env="RUN_XTERM=no" --name=novnc -p=8080:8080 theasp/novnc:latest
141 | ```
142 |
143 | Connect to novnc using the following url: http://localhost:8080/vnc.html.
144 |
145 | ## Usage
146 |
147 | In order to launch a new world you must start the `train.launch` file.
148 | You can select the desired track by changing the `world` parameter accordingly.
149 | Moreover, you can choose whether to display the GUI with the `gui` parameter.
150 |
151 | ```bash
152 | source /home/catkin_ws/devel/setup.bash
153 | roslaunch rl_uav train.launch world:=track1 gui:=true
154 | ```
155 |
156 | After the world has started, run the `train_uav` node (`train_uav.py`) to begin
157 | the training process and test different algorithms:
158 |
159 | ```bash
160 | source /home/catkin_ws/devel/setup.bash
161 | rosrun rl_uav train_uav.py
162 | ```
163 |
164 | ## Status
165 |
166 | Under maintenance.
167 |
168 | ## License
169 |
170 | Distributed under the GPL-3.0 License. See `LICENSE` for more information.
171 |
172 | ## Authors
173 |
174 | [Nick Geramanis](https://www.linkedin.com/in/nikolaos-geramanis)
175 |
176 |
--------------------------------------------------------------------------------
/rl_uav/envs/navigation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """This module contains the basic Navigation environment class."""
3 | import math
4 | from typing import Tuple, Optional, List, Union
5 |
6 | import numpy as np
7 | import rospy
8 | import tf
9 | from geometry_msgs.msg import Point, Pose, PoseStamped, Quaternion, Twist
10 | from gymnasium import Env
11 | from gymnasium.core import RenderFrame
12 | from gymnasium.spaces import Discrete, Box
13 | from hector_uav_msgs.srv import EnableMotors
14 | from rospy.topics import Publisher
15 | from sensor_msgs.msg import LaserScan
16 | from std_msgs.msg import Header
17 | from std_srvs.srv import Empty
18 |
19 | from rl_uav.data_classes.euler import Euler
20 | from rl_uav.enums.action import Action
21 | from rl_uav.enums.track import Track
22 |
23 |
24 | class Navigation(Env):
25 | """The basic Navigation environment."""
26 | _WAIT_TIME = rospy.Duration.from_sec(8)
27 | _STEP_DURATION = rospy.Duration.from_sec(0.4)
28 |
29 | _COLLISION_REWARD = -200.0
30 | _FORWARD_REWARD = +5.0
31 | _YAW_REWARD = -0.5
32 |
33 | _FLYING_ALTITUDE = 1.5 # m
34 | _COLLISION_THRESHOLD = 0.4 # m
35 | _INIT_ALTITUDE = 4 # m
36 |
37 | _VELOCITY_STANDARD_DEVIATION = 0.01
38 |
39 | _MEASUREMENTS = (180, 360, 540, 720, 900)
40 |
41 | _N_OBSERVATIONS = len(_MEASUREMENTS)
42 |
43 | _spawn_area: Track
44 | _ranges: Optional[np.ndarray]
45 | _ranges_range: Tuple[float, float]
46 | _vel_pub: Publisher
47 | _pose_pub: Publisher
48 |
49 | def __init__(self,
50 | render_mode: Optional[str] = None,
51 | track_id: int = 1) -> None:
52 | if (render_mode is not None
53 | and render_mode not in self.metadata['render_modes']):
54 | raise ValueError(f'Mode {render_mode} is not supported')
55 |
56 | self.render_mode = render_mode
57 | self._track = Track(track_id)
58 |
59 | self._ranges = None
60 | self._ranges_range = (-math.inf, math.inf)
61 |
62 | self._vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
63 | self._pose_pub = rospy.Publisher('command/pose',
64 | PoseStamped,
65 | queue_size=10)
66 |
67 | rospy.Subscriber('scan', LaserScan, self._laser_scan_callback)
68 |
69 | while not Navigation._enable_motors():
70 | pass
71 |
72 | while self._ranges is None:
73 | pass
74 |
75 | self.action_space = Discrete(len(Action))
76 |
77 | high = np.array(self._N_OBSERVATIONS * [self._ranges_range[0]],
78 | dtype=np.float64)
79 | low = np.array(self._N_OBSERVATIONS * [self._ranges_range[1]],
80 | dtype=np.float64)
81 | self.observation_space = Box(low=low,
82 | high=high,
83 | shape=(self._N_OBSERVATIONS,),
84 | dtype=np.float64)
85 |
86 | def _perform_action(self, action: int) -> None:
87 | vel_msg = Twist()
88 |
89 | action_enum = Action(action)
90 |
91 | vel_msg.linear.x = (
92 | action_enum.linear_velocity
93 | + self.np_random.normal(0, self._VELOCITY_STANDARD_DEVIATION))
94 | vel_msg.angular.z = (
95 | action_enum.angular_velocity
96 | + self.np_random.normal(0, self._VELOCITY_STANDARD_DEVIATION))
97 |
98 | self._vel_pub.publish(vel_msg)
99 |
100 | def _fly_to(self, position: Point, euler: Euler) -> None:
101 | quaternion = tf.transformations.quaternion_from_euler(euler.roll,
102 | euler.pitch,
103 | euler.yaw)
104 | quaternion = Quaternion(quaternion[0],
105 | quaternion[1],
106 | quaternion[2],
107 | quaternion[3])
108 |
109 | pose = Pose(position, quaternion)
110 | header = Header(frame_id='world')
111 | pose_stamped = PoseStamped(header, pose)
112 |
113 | self._pose_pub.publish(pose_stamped)
114 |
115 | @staticmethod
116 | def _enable_motors() -> bool:
117 | rospy.wait_for_service('enable_motors')
118 | try:
119 | enable_motors = rospy.ServiceProxy('enable_motors', EnableMotors)
120 | response = enable_motors(True)
121 | return response.success
122 | except rospy.ServiceException as exception:
123 | rospy.logerr(exception)
124 |
125 | return False
126 |
127 | def _laser_scan_callback(self, laser_scan: LaserScan) -> None:
128 | if self._ranges is None:
129 | self._ranges = np.empty(len(laser_scan.ranges))
130 | self._ranges_range = (laser_scan.range_max, laser_scan.range_min)
131 |
132 | for i, range_ in enumerate(laser_scan.ranges):
133 | if laser_scan.range_min <= range_ <= laser_scan.range_max:
134 | self._ranges[i] = range_
135 |
136 | def _collision_occurred(self) -> bool:
137 | if self._ranges is None:
138 | raise ValueError
139 |
140 | return bool((self._ranges < self._COLLISION_THRESHOLD).any())
141 |
142 | @staticmethod
143 | def _reset_world() -> None:
144 | rospy.wait_for_service('gazebo/reset_world')
145 | try:
146 | reset_env = rospy.ServiceProxy('gazebo/reset_world', Empty)
147 | reset_env()
148 | except rospy.ServiceException as exception:
149 | rospy.logerr(exception)
150 |
151 | def reset(self,
152 | *,
153 | seed: Optional[int] = None,
154 | options: Optional[dict] = None) -> Tuple[np.ndarray, dict]:
155 | super().reset(seed=seed)
156 | Navigation._reset_world()
157 | rospy.sleep(self._WAIT_TIME)
158 |
159 | area = self.np_random.choice(self._track.spawn_area)
160 | x_coordinate = self.np_random.uniform(area[0][0], area[0][1])
161 | y_coordinate = self.np_random.uniform(area[1][0], area[1][1])
162 | yaw = self.np_random.uniform(-math.pi, math.pi)
163 |
164 | self._fly_to(Point(0, 0, self._INIT_ALTITUDE),
165 | Euler(0, 0, 0))
166 | rospy.sleep(self._WAIT_TIME)
167 | self._fly_to(Point(x_coordinate, y_coordinate, self._INIT_ALTITUDE),
168 | Euler(0, 0, yaw))
169 | rospy.sleep(self._WAIT_TIME)
170 | self._fly_to(Point(x_coordinate, y_coordinate, self._FLYING_ALTITUDE),
171 | Euler(0, 0, yaw))
172 | rospy.sleep(self._WAIT_TIME)
173 |
174 | if self._ranges is None:
175 | raise ValueError
176 |
177 | return self._ranges, {}
178 |
179 | def step(self, action: int) -> Tuple[np.ndarray, float, bool, bool, dict]:
180 | if not self.action_space.contains(action):
181 | raise ValueError(f'Invalid action {action} ({type(action)})')
182 |
183 | self._perform_action(action)
184 | rospy.sleep(self._STEP_DURATION)
185 |
186 | if self._ranges is None:
187 | raise ValueError
188 |
189 | terminated = self._collision_occurred()
190 | truncated = False
191 |
192 | if terminated:
193 | reward = self._COLLISION_REWARD
194 | elif action == Action.FORWARD.value:
195 | reward = self._FORWARD_REWARD
196 | else:
197 | reward = self._YAW_REWARD
198 |
199 | return self._ranges, reward, terminated, truncated, {}
200 |
201 | def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]:
202 | """Rendering is handled by Gazebo."""
203 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(rl_uav)
3 |
4 | ## Compile as C++11, supported in ROS Kinetic and newer
5 | # add_compile_options(-std=c++11)
6 |
7 | ## Find catkin macros and libraries
8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9 | ## is used, also find other catkin packages
10 | find_package(catkin REQUIRED)
11 |
12 | ## System dependencies are found with CMake's conventions
13 | # find_package(Boost REQUIRED COMPONENTS system)
14 |
15 |
16 | ## Uncomment this if the package has a setup.py. This macro ensures
17 | ## modules and global scripts declared therein get installed
18 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
19 | # catkin_python_setup()
20 |
21 | ################################################
22 | ## Declare ROS messages, services and actions ##
23 | ################################################
24 |
25 | ## To declare and build messages, services or actions from within this
26 | ## package, follow these steps:
27 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
28 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
29 | ## * In the file package.xml:
30 | ## * add a build_depend tag for "message_generation"
31 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
32 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
33 | ## but can be declared for certainty nonetheless:
34 | ## * add a exec_depend tag for "message_runtime"
35 | ## * In this file (CMakeLists.txt):
36 | ## * add "message_generation" and every package in MSG_DEP_SET to
37 | ## find_package(catkin REQUIRED COMPONENTS ...)
38 | ## * add "message_runtime" and every package in MSG_DEP_SET to
39 | ## catkin_package(CATKIN_DEPENDS ...)
40 | ## * uncomment the add_*_files sections below as needed
41 | ## and list every .msg/.srv/.action file to be processed
42 | ## * uncomment the generate_messages entry below
43 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
44 |
45 | ## Generate messages in the 'msg' folder
46 | # add_message_files(
47 | # FILES
48 | # Message1.msg
49 | # Message2.msg
50 | # )
51 |
52 | ## Generate services in the 'srv' folder
53 | # add_service_files(
54 | # FILES
55 | # Service1.srv
56 | # Service2.srv
57 | # )
58 |
59 | ## Generate actions in the 'action' folder
60 | # add_action_files(
61 | # FILES
62 | # Action1.action
63 | # Action2.action
64 | # )
65 |
66 | ## Generate added messages and services with any dependencies listed here
67 | # generate_messages(
68 | # DEPENDENCIES
69 | # std_msgs # Or other packages containing msgs
70 | # )
71 |
72 | ################################################
73 | ## Declare ROS dynamic reconfigure parameters ##
74 | ################################################
75 |
76 | ## To declare and build dynamic reconfigure parameters within this
77 | ## package, follow these steps:
78 | ## * In the file package.xml:
79 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
80 | ## * In this file (CMakeLists.txt):
81 | ## * add "dynamic_reconfigure" to
82 | ## find_package(catkin REQUIRED COMPONENTS ...)
83 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
84 | ## and list every .cfg file to be processed
85 |
86 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
87 | # generate_dynamic_reconfigure_options(
88 | # cfg/DynReconf1.cfg
89 | # cfg/DynReconf2.cfg
90 | # )
91 |
92 | ###################################
93 | ## catkin specific configuration ##
94 | ###################################
95 | ## The catkin_package macro generates cmake config files for your package
96 | ## Declare things to be passed to dependent projects
97 | ## INCLUDE_DIRS: uncomment this if your package contains header files
98 | ## LIBRARIES: libraries you create in this project that dependent projects also need
99 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
100 | ## DEPENDS: system dependencies of this project that dependent projects also need
101 | catkin_package(
102 | # INCLUDE_DIRS include
103 | # LIBRARIES uav_rl
104 | # CATKIN_DEPENDS other_catkin_pkg
105 | # DEPENDS system_lib
106 | )
107 |
108 | ###########
109 | ## Build ##
110 | ###########
111 |
112 | ## Specify additional locations of header files
113 | ## Your package locations should be listed before other locations
114 | include_directories(
115 | # include
116 | # ${catkin_INCLUDE_DIRS}
117 | )
118 |
119 | ## Declare a C++ library
120 | # add_library(${PROJECT_NAME}
121 | # src/${PROJECT_NAME}/uav_rl.cpp
122 | # )
123 |
124 | ## Add cmake target dependencies of the library
125 | ## as an example, code may need to be generated before libraries
126 | ## either from message generation or dynamic reconfigure
127 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
128 |
129 | ## Declare a C++ executable
130 | ## With catkin_make all packages are built within a single CMake context
131 | ## The recommended prefix ensures that target names across packages don't collide
132 | # add_executable(${PROJECT_NAME}_node src/uav_rl_node.cpp)
133 |
134 | ## Rename C++ executable without prefix
135 | ## The above recommended prefix causes long target names, the following renames the
136 | ## target back to the shorter version for ease of user use
137 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
138 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
139 |
140 | ## Add cmake target dependencies of the executable
141 | ## same as for the library above
142 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
143 |
144 | ## Specify libraries to link a library or executable target against
145 | # target_link_libraries(${PROJECT_NAME}_node
146 | # ${catkin_LIBRARIES}
147 | # )
148 |
149 | #############
150 | ## Install ##
151 | #############
152 |
153 | # all install targets should use catkin DESTINATION variables
154 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
155 |
156 | ## Mark executable scripts (Python etc.) for installation
157 | ## in contrast to setup.py, you can choose the destination
158 | # install(PROGRAMS
159 | # scripts/my_python_script
160 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
161 | # )
162 |
163 | ## Mark executables for installation
164 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
165 | # install(TARGETS ${PROJECT_NAME}_node
166 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
167 | # )
168 |
169 | ## Mark libraries for installation
170 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
171 | # install(TARGETS ${PROJECT_NAME}
172 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
173 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
174 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
175 | # )
176 |
177 | ## Mark cpp header files for installation
178 | # install(DIRECTORY include/${PROJECT_NAME}/
179 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
180 | # FILES_MATCHING PATTERN "*.h"
181 | # PATTERN ".svn" EXCLUDE
182 | # )
183 |
184 | ## Mark other files for installation (e.g. launch and bag files, etc.)
185 | # install(FILES
186 | # # myfile1
187 | # # myfile2
188 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
189 | # )
190 |
191 | #############
192 | ## Testing ##
193 | #############
194 |
195 | ## Add gtest based cpp test target and link libraries
196 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_uav_rl.cpp)
197 | # if(TARGET ${PROJECT_NAME}-test)
198 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
199 | # endif()
200 |
201 | ## Add folders to be run by python nosetests
202 | # catkin_add_nosetests(test)
203 |
--------------------------------------------------------------------------------
/worlds/track1.world:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 0 0 10 0 -0 0
6 | 0.8 0.8 0.8 1
7 | 0.1 0.1 0.1 1
8 |
9 | 1000
10 | 0.9
11 | 0.01
12 | 0.001
13 |
14 | -0.5 0.5 -1
15 |
16 |
17 | 1
18 |
19 |
20 |
21 |
22 | 0 0 1
23 | 100 100
24 |
25 |
26 |
27 |
28 |
29 | 100
30 | 50
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | 10
42 |
43 |
44 | 0
45 |
46 |
47 | 0 0 1
48 | 100 100
49 |
50 |
51 |
52 |
56 |
57 |
58 | 0
59 | 0
60 | 1
61 |
62 |
63 | 0 0 -9.8
64 | 6e-06 2.3e-05 -4.2e-05
65 |
66 |
67 | 0.001
68 | 1
69 | 0.0
70 |
71 |
72 | 0.4 0.4 0.4 1
73 | 0.7 0.7 0.7 1
74 | 1
75 |
76 |
77 | EARTH_WGS84
78 | 0
79 | 0
80 | 0
81 | 0
82 |
83 |
84 | 0.012316 -0.073517 0 0 -0 0
85 |
86 |
87 |
88 |
89 | 21 0.15 2.5
90 |
91 |
92 | 0 0 1.25 0 -0 0
93 | 10
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 | 0 0 1.25 0 -0 0
109 |
110 |
111 | 21 0.15 2.5
112 |
113 |
114 |
115 |
119 | 0.996078 0.47451 0.0196078 1
120 |
121 |
122 | -10.55 0.049999 0 0 -0 1.5708
123 | 0
124 | 0
125 | 1
126 |
127 |
128 |
129 |
130 |
131 | 21.25 0.15 2.5
132 |
133 |
134 | 0 0 1.25 0 -0 0
135 | 10
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 | 0 0 1.25 0 -0 0
151 |
152 |
153 | 21.25 0.15 2.5
154 |
155 |
156 |
157 |
161 | 0.996078 0.47451 0.0196078 1
162 |
163 |
164 | 0 10.475 0 0 -0 0
165 | 0
166 | 0
167 | 1
168 |
169 |
170 |
171 |
172 |
173 | 12 0.15 2.5
174 |
175 |
176 | 0 0 1.25 0 -0 0
177 | 10
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 | 0 0 1.25 0 -0 0
193 |
194 |
195 | 12 0.15 2.5
196 |
197 |
198 |
199 |
203 | 0.996078 0.47451 0.0196078 1
204 |
205 |
206 | 10.55 4.55 0 0 0 -1.5708
207 | 0
208 | 0
209 | 1
210 |
211 |
212 |
213 |
214 |
215 | 9.25 0.15 2.5
216 |
217 |
218 | 0 0 1.25 0 -0 0
219 | 10
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 | 0 0 1.25 0 -0 0
235 |
236 |
237 | 9.25 0.15 2.5
238 |
239 |
240 |
241 |
245 | 0.996078 0.47451 0.0196078 1
246 |
247 |
248 | 6 -1.375 0 0 -0 3.14159
249 | 0
250 | 0
251 | 1
252 |
253 |
254 |
255 |
256 |
257 | 9.25 0.15 2.5
258 |
259 |
260 | 0 0 1.25 0 -0 0
261 | 10
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 | 0 0 1.25 0 -0 0
277 |
278 |
279 | 9.25 0.15 2.5
280 |
281 |
282 |
283 |
287 | 0.996078 0.47451 0.0196078 1
288 |
289 |
290 | 1.45 -5.925 0 0 0 -1.5708
291 | 0
292 | 0
293 | 1
294 |
295 |
296 |
297 |
298 |
299 | 12 0.15 2.5
300 |
301 |
302 | 0 0 1.25 0 -0 0
303 | 10
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 | 0 0 1.25 0 -0 0
319 |
320 |
321 | 12 0.15 2.5
322 |
323 |
324 |
325 |
329 | 0.996078 0.47451 0.0196078 1
330 |
331 |
332 | -4.475 -10.475 0 0 -0 3.14159
333 | 0
334 | 0
335 | 1
336 |
337 |
338 |
339 |
340 |
341 | 15 0.15 2.5
342 |
343 |
344 | 0 0 1.25 0 -0 0
345 | 10
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 | 0 0 1.25 0 -0 0
361 |
362 |
363 | 15 0.15 2.5
364 |
365 |
366 |
367 |
371 | 0.996078 0.47451 0.0196078 1
372 |
373 |
374 | -7.59937 0.222311 0 0 0 -1.5708
375 | 0
376 | 0
377 | 1
378 |
379 |
380 |
381 |
382 |
383 | 6.25 0.15 2.5
384 |
385 |
386 | 0 0 1.25 0 -0 0
387 | 10
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 | 0 0 1.25 0 -0 0
403 |
404 |
405 | 6.25 0.15 2.5
406 |
407 |
408 |
409 |
413 | 0.996078 0.47451 0.0196078 1
414 |
415 |
416 | -4.54937 -7.20269 0 0 -0 0
417 | 0
418 | 0
419 | 1
420 |
421 |
422 |
423 |
424 |
425 | 9 0.15 2.5
426 |
427 |
428 | 0 0 1.25 0 -0 0
429 | 10
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 | 0 0 1.25 0 -0 0
445 |
446 |
447 | 9 0.15 2.5
448 |
449 |
450 |
451 |
455 | 0.996078 0.47451 0.0196078 1
456 |
457 |
458 | -1.49937 -2.77769 0 0 -0 1.5708
459 | 0
460 | 0
461 | 1
462 |
463 |
464 |
465 |
466 |
467 | 9 0.15 2.5
468 |
469 |
470 | 0 0 1.25 0 -0 0
471 | 10
472 |
473 |
474 |
475 |
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 |
485 |
486 | 0 0 1.25 0 -0 0
487 |
488 |
489 | 9 0.15 2.5
490 |
491 |
492 |
493 |
497 | 0.996078 0.47451 0.0196078 1
498 |
499 |
500 | 2.92563 1.64731 0 0 -0 0
501 | 0
502 | 0
503 | 1
504 |
505 |
506 |
507 |
508 |
509 | 6 0.15 2.5
510 |
511 |
512 | 0 0 1.25 0 -0 0
513 | 10
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 | 0 0 1.25 0 -0 0
529 |
530 |
531 | 6 0.15 2.5
532 |
533 |
534 |
535 |
539 | 0.996078 0.47451 0.0196078 1
540 |
541 |
542 | 7.35063 4.57231 0 0 -0 1.5708
543 | 0
544 | 0
545 | 1
546 |
547 |
548 |
549 |
550 |
551 | 15 0.15 2.5
552 |
553 |
554 | 0 0 1.25 0 -0 0
555 | 10
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 | 0 0 1.25 0 -0 0
571 |
572 |
573 | 15 0.15 2.5
574 |
575 |
576 |
577 |
581 | 0.996078 0.47451 0.0196078 1
582 |
583 |
584 | -0.074369 7.49731 0 0 -0 3.14159
585 | 0
586 | 0
587 | 1
588 |
589 | 1
590 |
591 |
592 | 87 178000000
593 | 90 312764450
594 | 1596491599 912444192
595 | 87178
596 |
597 | 0 0 0 0 -0 0
598 | 1 1 1
599 |
600 | 0 0 0 0 -0 0
601 | 0 0 0 0 -0 0
602 | 0 0 0 0 -0 0
603 | 0 0 0 0 -0 0
604 |
605 |
606 |
607 | 0.012316 -0.073517 0 0 -0 0
608 | 1 1 1
609 |
610 | -10.5377 -0.023518 0 0 -0 1.5708
611 | 0 0 0 0 -0 0
612 | 0 0 0 0 -0 0
613 | 0 0 0 0 -0 0
614 |
615 |
616 | 0.012316 10.4015 0 0 -0 0
617 | 0 0 0 0 -0 0
618 | 0 0 0 0 -0 0
619 | 0 0 0 0 -0 0
620 |
621 |
622 | 10.5623 4.47648 0 0 0 -1.5708
623 | 0 0 0 0 -0 0
624 | 0 0 0 0 -0 0
625 | 0 0 0 0 -0 0
626 |
627 |
628 | 6.01232 -1.44852 0 0 -0 3.14159
629 | 0 0 0 0 -0 0
630 | 0 0 0 0 -0 0
631 | 0 0 0 0 -0 0
632 |
633 |
634 | 1.46232 -5.99852 0 0 0 -1.5708
635 | 0 0 0 0 -0 0
636 | 0 0 0 0 -0 0
637 | 0 0 0 0 -0 0
638 |
639 |
640 | -4.46268 -10.5485 0 0 -0 3.14159
641 | 0 0 0 0 -0 0
642 | 0 0 0 0 -0 0
643 | 0 0 0 0 -0 0
644 |
645 |
646 | -7.58705 0.148794 0 0 0 -1.5708
647 | 0 0 0 0 -0 0
648 | 0 0 0 0 -0 0
649 | 0 0 0 0 -0 0
650 |
651 |
652 | -4.53705 -7.27621 0 0 -0 0
653 | 0 0 0 0 -0 0
654 | 0 0 0 0 -0 0
655 | 0 0 0 0 -0 0
656 |
657 |
658 | -1.48705 -2.85121 0 0 -0 1.5708
659 | 0 0 0 0 -0 0
660 | 0 0 0 0 -0 0
661 | 0 0 0 0 -0 0
662 |
663 |
664 | 2.93795 1.57379 0 0 -0 0
665 | 0 0 0 0 -0 0
666 | 0 0 0 0 -0 0
667 | 0 0 0 0 -0 0
668 |
669 |
670 | 7.36295 4.49879 0 0 -0 1.5708
671 | 0 0 0 0 -0 0
672 | 0 0 0 0 -0 0
673 | 0 0 0 0 -0 0
674 |
675 |
676 | -0.062053 7.42379 0 0 -0 3.14159
677 | 0 0 0 0 -0 0
678 | 0 0 0 0 -0 0
679 | 0 0 0 0 -0 0
680 |
681 |
682 |
683 | 0 0 10 0 -0 0
684 |
685 |
686 |
687 |
688 | -7.438 -20.1168 39.5647 0 1.08764 1.55219
689 | orbit
690 | perspective
691 |
692 |
693 |
694 |
695 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/worlds/track2.world:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 0 0 10 0 -0 0
6 | 0.8 0.8 0.8 1
7 | 0.1 0.1 0.1 1
8 |
9 | 1000
10 | 0.9
11 | 0.01
12 | 0.001
13 |
14 | -0.5 0.5 -1
15 |
16 |
17 | 1
18 |
19 |
20 |
21 |
22 | 0 0 1
23 | 100 100
24 |
25 |
26 |
27 |
28 |
29 | 100
30 | 50
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | 10
42 |
43 |
44 | 0
45 |
46 |
47 | 0 0 1
48 | 100 100
49 |
50 |
51 |
52 |
56 |
57 |
58 | 0
59 | 0
60 | 1
61 |
62 |
63 | 0 0 -9.8
64 | 6e-06 2.3e-05 -4.2e-05
65 |
66 |
67 | 0.001
68 | 1
69 | 0.0
70 |
71 |
72 | 0.4 0.4 0.4 1
73 | 0.7 0.7 0.7 1
74 | 1
75 |
76 |
77 | EARTH_WGS84
78 | 0
79 | 0
80 | 0
81 | 0
82 |
83 |
84 | 0.054572 0.057909 0 0 -0 0
85 |
86 |
87 |
88 |
89 | 12.5 0.15 2.5
90 |
91 |
92 | 0 0 1.25 0 -0 0
93 | 10
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 | 0 0 1.25 0 -0 0
109 |
110 |
111 | 12.5 0.15 2.5
112 |
113 |
114 |
115 |
119 | 0.996078 0.47451 0.0196078 1
120 |
121 |
122 | -10.725 -7.725 0 0 0 -1.5708
123 | 0
124 | 0
125 | 1
126 |
127 |
128 |
129 |
130 |
131 | 9.25 0.15 2.5
132 |
133 |
134 | 0 0 1.25 0 -0 0
135 | 10
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 | 0 0 1.25 0 -0 0
151 |
152 |
153 | 9.25 0.15 2.5
154 |
155 |
156 |
157 |
161 | 0.996078 0.47451 0.0196078 1
162 |
163 |
164 | 10.725 0 0 0 -0 1.5708
165 | 0
166 | 0
167 | 1
168 |
169 |
170 |
171 |
172 |
173 | 3.25 0.15 2.5
174 |
175 |
176 | 0 0 1.25 0 -0 0
177 | 10
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 | 0 0 1.25 0 -0 0
193 |
194 |
195 | 3.25 0.15 2.5
196 |
197 |
198 |
199 |
203 | 0.996078 0.47451 0.0196078 1
204 |
205 |
206 | 9.175 4.55 0 0 -0 3.14159
207 | 0
208 | 0
209 | 1
210 |
211 |
212 |
213 |
214 |
215 | 9.5 0.15 2.5
216 |
217 |
218 | 0 0 1.25 0 -0 0
219 | 10
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 | 0 0 1.25 0 -0 0
235 |
236 |
237 | 9.5 0.15 2.5
238 |
239 |
240 |
241 |
245 | 0.996078 0.47451 0.0196078 1
246 |
247 |
248 | 7.625 9.225 0 0 -0 1.5708
249 | 0
250 | 0
251 | 1
252 |
253 |
254 |
255 |
256 |
257 | 18.5 0.15 2.5
258 |
259 |
260 | 0 0 1.25 0 -0 0
261 | 10
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 | 0 0 1.25 0 -0 0
277 |
278 |
279 | 18.5 0.15 2.5
280 |
281 |
282 |
283 |
287 | 0.996078 0.47451 0.0196078 1
288 |
289 |
290 | -1.55 13.9 0 0 -0 3.14159
291 | 0
292 | 0
293 | 1
294 |
295 |
296 |
297 |
298 |
299 | 12.5 0.15 2.5
300 |
301 |
302 | 0 0 1.25 0 -0 0
303 | 10
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 | 0 0 1.25 0 -0 0
319 |
320 |
321 | 12.5 0.15 2.5
322 |
323 |
324 |
325 |
329 | 0.996078 0.47451 0.0196078 1
330 |
331 |
332 | -10.725 7.725 0 0 0 -1.5708
333 | 0
334 | 0
335 | 1
336 |
337 |
338 |
339 |
340 |
341 | 9.25 0.15 2.5
342 |
343 |
344 | 0 0 1.25 0 -0 0
345 | 10
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 | 0 0 1.25 0 -0 0
361 |
362 |
363 | 9.25 0.15 2.5
364 |
365 |
366 |
367 |
371 | 0.996078 0.47451 0.0196078 1
372 |
373 |
374 | -6.175 1.55 0 0 -0 0
375 | 0
376 | 0
377 | 1
378 |
379 |
380 |
381 |
382 |
383 | 3.25 0.15 2.5
384 |
385 |
386 | 0 0 1.25 0 -0 0
387 | 10
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 | 0 0 1.25 0 -0 0
403 |
404 |
405 | 3.25 0.15 2.5
406 |
407 |
408 |
409 |
413 | 0.996078 0.47451 0.0196078 1
414 |
415 |
416 | -1.625 0 0 0 0 -1.5708
417 | 0
418 | 0
419 | 1
420 |
421 |
422 |
423 |
424 |
425 | 9.5 0.15 2.5
426 |
427 |
428 | 0 0 1.25 0 -0 0
429 | 10
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 | 0 0 1.25 0 -0 0
445 |
446 |
447 | 9.5 0.15 2.5
448 |
449 |
450 |
451 |
455 | 0.996078 0.47451 0.0196078 1
456 |
457 |
458 | 1.40964 -0.067251 0 0 0 -1.5708
459 | 0
460 | 0
461 | 1
462 |
463 |
464 |
465 |
466 |
467 | 9.25 0.15 2.5
468 |
469 |
470 | 0 0 1.25 0 -0 0
471 | 10
472 |
473 |
474 |
475 |
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 |
485 |
486 | 0 0 1.25 0 -0 0
487 |
488 |
489 | 9.25 0.15 2.5
490 |
491 |
492 |
493 |
497 | 0.996078 0.47451 0.0196078 1
498 |
499 |
500 | -3.14036 -4.74225 0 0 -0 3.14159
501 | 0
502 | 0
503 | 1
504 |
505 |
506 |
507 |
508 |
509 | 6.25 0.15 2.5
510 |
511 |
512 | 0 0 1.25 0 -0 0
513 | 10
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 | 0 0 1.25 0 -0 0
529 |
530 |
531 | 6.25 0.15 2.5
532 |
533 |
534 |
535 |
539 | 0.996078 0.47451 0.0196078 1
540 |
541 |
542 | -7.69036 -7.79225 0 0 0 -1.5708
543 | 0
544 | 0
545 | 1
546 |
547 |
548 |
549 |
550 |
551 | 12.25 0.15 2.5
552 |
553 |
554 | 0 0 1.25 0 -0 0
555 | 10
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 | 0 0 1.25 0 -0 0
571 |
572 |
573 | 12.25 0.15 2.5
574 |
575 |
576 |
577 |
581 | 0.996078 0.47451 0.0196078 1
582 |
583 |
584 | -1.64036 -10.8422 0 0 -0 0
585 | 0
586 | 0
587 | 1
588 |
589 |
590 |
591 |
592 |
593 | 9.5 0.15 2.5
594 |
595 |
596 | 0 0 1.25 0 -0 0
597 | 10
598 |
599 |
600 |
601 |
602 |
603 |
604 |
605 |
606 |
607 |
608 |
609 |
610 |
611 |
612 | 0 0 1.25 0 -0 0
613 |
614 |
615 | 9.5 0.15 2.5
616 |
617 |
618 |
619 |
623 | 0.996078 0.47451 0.0196078 1
624 |
625 |
626 | 4.40964 -6.16725 0 0 -0 1.5708
627 | 0
628 | 0
629 | 1
630 |
631 |
632 |
633 |
634 |
635 | 3.25 0.15 2.5
636 |
637 |
638 | 0 0 1.25 0 -0 0
639 | 10
640 |
641 |
642 |
643 |
644 |
645 |
646 |
647 |
648 |
649 |
650 |
651 |
652 |
653 |
654 | 0 0 1.25 0 -0 0
655 |
656 |
657 | 3.25 0.15 2.5
658 |
659 |
660 |
661 |
665 | 0.996078 0.47451 0.0196078 1
666 |
667 |
668 | 5.95964 -1.49225 0 0 -0 0
669 | 0
670 | 0
671 | 1
672 |
673 |
674 |
675 |
676 |
677 | 9.25 0.15 2.5
678 |
679 |
680 | 0 0 1.25 0 -0 0
681 | 10
682 |
683 |
684 |
685 |
686 |
687 |
688 |
689 |
690 |
691 |
692 |
693 |
694 |
695 |
696 | 0 0 1.25 0 -0 0
697 |
698 |
699 | 9.25 0.15 2.5
700 |
701 |
702 |
703 |
707 | 0.996078 0.47451 0.0196078 1
708 |
709 |
710 | -6.175 -1.55 0 0 -0 3.14159
711 | 0
712 | 0
713 | 1
714 |
715 |
716 |
717 |
718 |
719 | 3.25 0.15 2.5
720 |
721 |
722 | 0 0 1.25 0 -0 0
723 | 10
724 |
725 |
726 |
727 |
728 |
729 |
730 |
731 |
732 |
733 |
734 |
735 |
736 |
737 |
738 | 0 0 1.25 0 -0 0
739 |
740 |
741 | 3.25 0.15 2.5
742 |
743 |
744 |
745 |
749 | 0.996078 0.47451 0.0196078 1
750 |
751 |
752 | 7.50964 0.057749 0 0 -0 1.5708
753 | 0
754 | 0
755 | 1
756 |
757 |
758 |
759 |
760 |
761 | 3.25 0.15 2.5
762 |
763 |
764 | 0 0 1.25 0 -0 0
765 | 10
766 |
767 |
768 |
769 |
770 |
771 |
772 |
773 |
774 |
775 |
776 |
777 |
778 |
779 |
780 | 0 0 1.25 0 -0 0
781 |
782 |
783 | 3.25 0.15 2.5
784 |
785 |
786 |
787 |
791 | 0.996078 0.47451 0.0196078 1
792 |
793 |
794 | 5.95964 1.60775 0 0 -0 3.14159
795 | 0
796 | 0
797 | 1
798 |
799 |
800 |
801 |
802 |
803 | 9.25 0.15 2.5
804 |
805 |
806 | 0 0 1.25 0 -0 0
807 | 10
808 |
809 |
810 |
811 |
812 |
813 |
814 |
815 |
816 |
817 |
818 |
819 |
820 |
821 |
822 | 0 0 1.25 0 -0 0
823 |
824 |
825 | 9.25 0.15 2.5
826 |
827 |
828 |
829 |
833 | 0.996078 0.47451 0.0196078 1
834 |
835 |
836 | 4.40964 6.15775 0 0 -0 1.5708
837 | 0
838 | 0
839 | 1
840 |
841 |
842 |
843 |
844 |
845 | 12.25 0.15 2.5
846 |
847 |
848 | 0 0 1.25 0 -0 0
849 | 10
850 |
851 |
852 |
853 |
854 |
855 |
856 |
857 |
858 |
859 |
860 |
861 |
862 |
863 |
864 | 0 0 1.25 0 -0 0
865 |
866 |
867 | 12.25 0.15 2.5
868 |
869 |
870 |
871 |
875 | 0.996078 0.47451 0.0196078 1
876 |
877 |
878 | -1.64036 10.7078 0 0 -0 3.14159
879 | 0
880 | 0
881 | 1
882 |
883 |
884 |
885 |
886 |
887 | 6.25 0.15 2.5
888 |
889 |
890 | 0 0 1.25 0 -0 0
891 | 10
892 |
893 |
894 |
895 |
896 |
897 |
898 |
899 |
900 |
901 |
902 |
903 |
904 |
905 |
906 | 0 0 1.25 0 -0 0
907 |
908 |
909 | 6.25 0.15 2.5
910 |
911 |
912 |
913 |
917 | 0.996078 0.47451 0.0196078 1
918 |
919 |
920 | -7.69036 7.65775 0 0 0 -1.5708
921 | 0
922 | 0
923 | 1
924 |
925 |
926 |
927 |
928 |
929 | 9.25 0.15 2.5
930 |
931 |
932 | 0 0 1.25 0 -0 0
933 | 10
934 |
935 |
936 |
937 |
938 |
939 |
940 |
941 |
942 |
943 |
944 |
945 |
946 |
947 |
948 | 0 0 1.25 0 -0 0
949 |
950 |
951 | 9.25 0.15 2.5
952 |
953 |
954 |
955 |
959 | 0.996078 0.47451 0.0196078 1
960 |
961 |
962 | -3.14036 4.60775 0 0 -0 0
963 | 0
964 | 0
965 | 1
966 |
967 |
968 |
969 |
970 |
971 | 18.5 0.15 2.5
972 |
973 |
974 | 0 0 1.25 0 -0 0
975 | 10
976 |
977 |
978 |
979 |
980 |
981 |
982 |
983 |
984 |
985 |
986 |
987 |
988 |
989 |
990 | 0 0 1.25 0 -0 0
991 |
992 |
993 | 18.5 0.15 2.5
994 |
995 |
996 |
997 |
1001 | 0.996078 0.47451 0.0196078 1
1002 |
1003 |
1004 | -1.55 -13.9 0 0 -0 0
1005 | 0
1006 | 0
1007 | 1
1008 |
1009 |
1010 |
1011 |
1012 |
1013 | 9.5 0.15 2.5
1014 |
1015 |
1016 | 0 0 1.25 0 -0 0
1017 | 10
1018 |
1019 |
1020 |
1021 |
1022 |
1023 |
1024 |
1025 |
1026 |
1027 |
1028 |
1029 |
1030 |
1031 |
1032 | 0 0 1.25 0 -0 0
1033 |
1034 |
1035 | 9.5 0.15 2.5
1036 |
1037 |
1038 |
1039 |
1043 | 0.996078 0.47451 0.0196078 1
1044 |
1045 |
1046 | 7.625 -9.225 0 0 -0 1.5708
1047 | 0
1048 | 0
1049 | 1
1050 |
1051 |
1052 |
1053 |
1054 |
1055 | 3.25 0.15 2.5
1056 |
1057 |
1058 | 0 0 1.25 0 -0 0
1059 | 10
1060 |
1061 |
1062 |
1063 |
1064 |
1065 |
1066 |
1067 |
1068 |
1069 |
1070 |
1071 |
1072 |
1073 |
1074 | 0 0 1.25 0 -0 0
1075 |
1076 |
1077 | 3.25 0.15 2.5
1078 |
1079 |
1080 |
1081 |
1085 | 0.996078 0.47451 0.0196078 1
1086 |
1087 |
1088 | 9.175 -4.55 0 0 -0 0
1089 | 0
1090 | 0
1091 | 1
1092 |
1093 | 1
1094 |
1095 |
1096 | 57 620000000
1097 | 57 792638983
1098 | 1601318472 854989066
1099 | 57620
1100 |
1101 | 0 0 0 0 -0 0
1102 | 1 1 1
1103 |
1104 | 0 0 0 0 -0 0
1105 | 0 0 0 0 -0 0
1106 | 0 0 0 0 -0 0
1107 | 0 0 0 0 -0 0
1108 |
1109 |
1110 |
1111 | 0.054572 0.057909 0 0 -0 0
1112 | 1 1 1
1113 |
1114 | -10.6704 -7.66709 0 0 0 -1.5708
1115 | 0 0 0 0 -0 0
1116 | 0 0 0 0 -0 0
1117 | 0 0 0 0 -0 0
1118 |
1119 |
1120 | 10.7796 0.057909 0 0 -0 1.5708
1121 | 0 0 0 0 -0 0
1122 | 0 0 0 0 -0 0
1123 | 0 0 0 0 -0 0
1124 |
1125 |
1126 | 9.22957 4.60791 0 0 -0 3.14159
1127 | 0 0 0 0 -0 0
1128 | 0 0 0 0 -0 0
1129 | 0 0 0 0 -0 0
1130 |
1131 |
1132 | 7.67957 9.28291 0 0 -0 1.5708
1133 | 0 0 0 0 -0 0
1134 | 0 0 0 0 -0 0
1135 | 0 0 0 0 -0 0
1136 |
1137 |
1138 | -1.49543 13.9579 0 0 -0 3.14159
1139 | 0 0 0 0 -0 0
1140 | 0 0 0 0 -0 0
1141 | 0 0 0 0 -0 0
1142 |
1143 |
1144 | -10.6704 7.78291 0 0 0 -1.5708
1145 | 0 0 0 0 -0 0
1146 | 0 0 0 0 -0 0
1147 | 0 0 0 0 -0 0
1148 |
1149 |
1150 | -6.12043 1.60791 0 0 -0 0
1151 | 0 0 0 0 -0 0
1152 | 0 0 0 0 -0 0
1153 | 0 0 0 0 -0 0
1154 |
1155 |
1156 | -1.57043 0.057909 0 0 0 -1.5708
1157 | 0 0 0 0 -0 0
1158 | 0 0 0 0 -0 0
1159 | 0 0 0 0 -0 0
1160 |
1161 |
1162 | 1.46421 -0.009342 0 0 0 -1.5708
1163 | 0 0 0 0 -0 0
1164 | 0 0 0 0 -0 0
1165 | 0 0 0 0 -0 0
1166 |
1167 |
1168 | -3.08579 -4.68434 0 0 -0 3.14159
1169 | 0 0 0 0 -0 0
1170 | 0 0 0 0 -0 0
1171 | 0 0 0 0 -0 0
1172 |
1173 |
1174 | -7.63579 -7.73434 0 0 0 -1.5708
1175 | 0 0 0 0 -0 0
1176 | 0 0 0 0 -0 0
1177 | 0 0 0 0 -0 0
1178 |
1179 |
1180 | -1.58579 -10.7843 0 0 -0 0
1181 | 0 0 0 0 -0 0
1182 | 0 0 0 0 -0 0
1183 | 0 0 0 0 -0 0
1184 |
1185 |
1186 | 4.46421 -6.10934 0 0 -0 1.5708
1187 | 0 0 0 0 -0 0
1188 | 0 0 0 0 -0 0
1189 | 0 0 0 0 -0 0
1190 |
1191 |
1192 | 6.01421 -1.43434 0 0 -0 0
1193 | 0 0 0 0 -0 0
1194 | 0 0 0 0 -0 0
1195 | 0 0 0 0 -0 0
1196 |
1197 |
1198 | -6.12043 -1.49209 0 0 -0 3.14159
1199 | 0 0 0 0 -0 0
1200 | 0 0 0 0 -0 0
1201 | 0 0 0 0 -0 0
1202 |
1203 |
1204 | 7.56421 0.115658 0 0 -0 1.5708
1205 | 0 0 0 0 -0 0
1206 | 0 0 0 0 -0 0
1207 | 0 0 0 0 -0 0
1208 |
1209 |
1210 | 6.01421 1.66566 0 0 -0 3.14159
1211 | 0 0 0 0 -0 0
1212 | 0 0 0 0 -0 0
1213 | 0 0 0 0 -0 0
1214 |
1215 |
1216 | 4.46421 6.21566 0 0 -0 1.5708
1217 | 0 0 0 0 -0 0
1218 | 0 0 0 0 -0 0
1219 | 0 0 0 0 -0 0
1220 |
1221 |
1222 | -1.58579 10.7657 0 0 -0 3.14159
1223 | 0 0 0 0 -0 0
1224 | 0 0 0 0 -0 0
1225 | 0 0 0 0 -0 0
1226 |
1227 |
1228 | -7.63579 7.71566 0 0 0 -1.5708
1229 | 0 0 0 0 -0 0
1230 | 0 0 0 0 -0 0
1231 | 0 0 0 0 -0 0
1232 |
1233 |
1234 | -3.08579 4.66566 0 0 -0 0
1235 | 0 0 0 0 -0 0
1236 | 0 0 0 0 -0 0
1237 | 0 0 0 0 -0 0
1238 |
1239 |
1240 | -1.49543 -13.8421 0 0 -0 0
1241 | 0 0 0 0 -0 0
1242 | 0 0 0 0 -0 0
1243 | 0 0 0 0 -0 0
1244 |
1245 |
1246 | 7.67957 -9.16709 0 0 -0 1.5708
1247 | 0 0 0 0 -0 0
1248 | 0 0 0 0 -0 0
1249 | 0 0 0 0 -0 0
1250 |
1251 |
1252 | 9.22957 -4.49209 0 0 -0 0
1253 | 0 0 0 0 -0 0
1254 | 0 0 0 0 -0 0
1255 | 0 0 0 0 -0 0
1256 |
1257 |
1258 |
1259 | 0 0 10 0 -0 0
1260 |
1261 |
1262 |
1263 |
1264 | -26.0796 -0.579476 41.8171 0 1.1778 0.092945
1265 | orbit
1266 | perspective
1267 |
1268 |
1269 |
1270 |
1271 |
--------------------------------------------------------------------------------