├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── examples ├── README.md ├── baseline_track1_navigation.py ├── baseline_track2_supply_gather.py ├── basic.py ├── basic_track1_navigation.py ├── basic_track2_supply_gather.py ├── basic_track3_supply_battle.py ├── envs │ ├── __init__.py │ ├── envs_track1.py │ └── envs_track2.py ├── memory_test.py └── visualize_depth_map.py ├── images └── wechat.png ├── inspirai_fps ├── README.md ├── __init__.py ├── gamecore.py ├── lib │ └── libraycaster.so ├── raycast_manager.py ├── simple_command.proto ├── simple_command_pb2.py ├── simple_command_pb2_grpc.py ├── test_camera_render.ipynb └── utils.py ├── setup.py └── submission_template ├── Dockerfile ├── README.md ├── common.py ├── eval.py ├── eval_track_1_1.py ├── eval_track_1_2.py ├── eval_track_2.py ├── requirements.txt ├── run.sh └── submission ├── __init__.py ├── agents.py └── envs.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | debug/* 3 | *.pyc 4 | inspirai_fps.egg-info/* 5 | .vscode/* 6 | dist/* 7 | build/* 8 | examples/test* 9 | *.gif 10 | examples/save_replay.py 11 | benchmark/* 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022-present Xi Chen et. al @ Inspir.AI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inspirai/wilderness-scavenger/4c2be3796cb311601c127fd6e1791619f029934f/MANIFEST.in -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Wilderness Scavenger: 3D Open-World FPS Game AI Challenge 2 | 3 | This is a platform for intelligent agent learning based on a 3D open-world FPS game developed by **Inspir.AI**. 4 | 5 | ## Change Log 6 | 7 | - 2022-08-15: removed raycaster libs of the Mac and Windows platforms, only Linux platform is supported currently! 8 | - 2022-05-16: improved engine backend (Linux) with better stability (v1.0) 9 | - Check out [Supported Platforms](#supported-platforms) for download links. 10 | - Make sure to update to the latest version of the engine if you would like to use depth map or enemy state features. 11 | - 2022-05-18: updated engine backend for Windows and MacOS (v1.0) 12 | - 2022-05-30: added submission template and instructions for submitting solutions to the online evaluation system 13 | 14 | ## Competition Overview 15 | 16 | With a focus on learning intelligent agents in open-world games, this year we are hosting a new contest called *Wilderness Scavenger*. In this new game, which features a Battle Royale-style 3D open-world gameplay experience and a random PCG-based world generation, participants must learn agents that can perform subtasks common to FPS games, such as navigation, scouting, and skirmishing. To win the competition, agents must have strong perception of complex 3D environments and then learn to exploit various environmental structures (such as terrain, buildings, and plants) by developing flexible strategies to gain advantages over other competitors. Despite the difficulty of this goal, we hope that this new competition can serve as a cornerstone of research in AI-based gaming for open-world games. 17 | 18 | ## Features 19 | 20 | - A light-weight 3D open-world FPS game developed with Unity3D game engine 21 | - Rendering-off game acceleration for fast training and evaluation 22 | - Large open world environment providing high freedom of agent behaviors 23 | - Highly customizable game configuration with random supply distribution and dynamic refresh 24 | - PCG-based map generation with randomly spawned buildings, plants and obstacles (100 training maps) 25 | - Interactive replay tool for game record visualization 26 | 27 | ## Basic Structures 28 | 29 | We developed this repository to provide a training and evaluation platform for the researchers interested in open-world FPS game AI. For getting started quickly, a typical workspace structure when using this repository can be summarized as follows: 30 | 31 | ```bash 32 | . 33 | ├── examples # providing starter code examples and training baselines 34 | │   ├── envs/... 35 | │   ├── basic.py 36 | │   ├── basic_track1_navigation.py 37 | │   ├── basic_track2_supply_gather.py 38 | │   ├── basic_track3_supply_battle.py 39 | │   ├── baseline_track1_navigation.py 40 | │   ├── baseline_track2_supply_gather.py 41 | │   └── baseline_track3_supply_battle.py 42 | ├── inspirai_fps # the game play API source code 43 | │   ├── lib/... 44 | │   ├── __init__.py 45 | │   ├── gamecore.py 46 | │   ├── raycast_manager.py 47 | │   ├── simple_command_pb2.py 48 | │   ├── simple_command_pb2_grpc.py 49 | │   └── utils.py 50 | └── fps_linux # the engine backend (Linux) 51 | ├── UnityPlayer.so 52 | ├── fps.x86_64 53 | ├── fps_Data/... 54 | └── logs/... 55 | ``` 56 | 57 | - `fps_linux` (**requires to be manually downloaded and unzipped to your working directory**): the (Linux) engine backend extracted from our game development project, containing all the game related assets, binaries and source codes. 58 | - `inspirai_fps`: the python gameplay API for agent training and testing, providing the core [`Game`](inspirai_fps/gamecore.py) class and other useful tool classes and functions. 59 | - `examples`: we provide basic starter codes for each game mode targeting each track of the challenge, and we also give out our implementation of some baseline solutions based on [`ray.rllib`](https://docs.ray.io/en/master/rllib/index.html) reinforcement learning framework. 60 | 61 | ## Supported Platforms 62 | 63 | We support the multiple platforms with different engine backends, including: 64 | 65 | - Linux: download the engine from [Google Drive](https://drive.google.com/file/d/1gDkaKtwC8QUFbrrO1Zc1VwPhb3QcHaLl/view?usp=sharing) or [Feishu](https://inspirai.feishu.cn/file/boxcnwohwxo3EqCUbOhRj4ESJBb) (last updated on 2022-05-16) 66 | - Windows: download the engine from [~~Google Drive~~](https://drive.google.com/file/d/1DwjnB7Harpeo5f57fjKpfp-43vvSLtQO/view?usp=sharing) or [~~Feishu~~](https://inspirai.feishu.cn/file/boxcnG8EP0RuTI3ImX2mmhat23d) (last updated on 2022-05-18) 67 | - MacOS: download the engine from [~~Google Drive~~](https://drive.google.com/file/d/1I56Db8QvLpPQo8eUSi914MJT1-4ze-om/view?usp=sharing) or [~~Feishu~~](https://scwc0a0eu7.feishu.cn/file/boxcnfFqRae6L42wS2lRyhPDNXe) (last updated on 2022-05-18) 68 | 69 | ## Installation (from source) 70 | 71 | To use the game play API, you need to first install the package `inspirai_fps` by following the commands below: 72 | 73 | ```bash 74 | git clone https://github.com/inspirai/wilderness-scavenger 75 | cd wilderness-scavenger 76 | pip install . 77 | ``` 78 | 79 | We recommend installing this package with python 3.8 (which is our development environment), so you may first create a virtual env using [`conda`](https://www.anaconda.com/) and finish installation: 80 | 81 | ```bash 82 | $ conda create -n WildScav python=3.8 83 | $ conda activate WildScav 84 | (WildScav) $ pip install . 85 | ``` 86 | 87 | ## Installation (from PyPI) 88 | 89 | **Note: this may not be maintained in time. We strongly recommend using the installation method above** 90 | 91 | Alternatively, you can install the package from PyPI directly. But note that this will only install the gameplay API `inspirai_fps`, not the backend engine. So you still need to manually download the correct engine backend from the [Supported Platfroms](#supported-platforms) section. 92 | 93 | ```bash 94 | pip install inspirai-fps 95 | ``` 96 | 97 | ## Loading Engine Backend 98 | 99 | To successfully run the game, you need to make sure the game engine backend for your platform is downloaded and set the `engine_dir` parameter of the `Game` init function correctly. For example, here is a code snippet in the script `example/basic.py`: 100 | 101 | ```python 102 | from inspirai_fps import Game, ActionVariable 103 | ... 104 | parser.add_argument("--engine-dir", type=str, default="../fps_linux") 105 | ... 106 | game = Game(..., engine_dir=args.engine_dir, ...) 107 | ``` 108 | 109 | ## Loading Map Data 110 | 111 | To get access to some features like realtime depth map computation or randomized player spawning, you need to load the map data and load them into the `Game`. After this, once you turn on the depth map rendering, the game server will automatically compute a depth map viewing from the player's first person perspective at each time step. 112 | 113 | 1. Download map data from [Google Drive](https://drive.google.com/file/d/1n1199S3DF9ScvVZHlHLrtZ8WUBvIlKgR/view?usp=sharing) or [Feishu](https://inspirai.feishu.cn/file/boxcnjVwcVTSZPnwB1whheQsGKf) and decompress the downloaded file to your preferred directory (e.g., `/map_data`). 114 | 2. Set `map_dir` parameter of the `Game` initializer accordingly 115 | 3. Set the `map_id` as you like 116 | 4. Turn on the function of depth map computation 117 | 5. Turn on random start location to spawn agents at random places 118 | 119 | Read the following code snippet in the script `examples/basic.py` as an example: 120 | 121 | ```python 122 | from inspirai_fps import Game, ActionVariable 123 | ... 124 | parser.add_argument("--map-id", type=int, default=1) 125 | parser.add_argument("--use-depth-map", action="store_true") 126 | parser.add_argument("--random-start-location", action="store_true") 127 | parser.add_argument("--map-dir", type=str, default="../map_data") 128 | ... 129 | game = Game(map_dir=args.map_dir, ...) 130 | game.set_map_id(args.map_id) # this will load the valid locations of the specified map 131 | ... 132 | if args.use_depth_map: 133 | game.turn_on_depth_map() 134 | game.set_depth_map_size(380, 220, 200) # width (pixels), height (pixels), depth_limit (meters) 135 | ... 136 | if args.random_start_location: 137 | for agent_id in range(args.num_agents): 138 | game.random_start_location(agent_id, indoor=False) # this will randomly spawn the player at a valid outdoor location, or indoor location if indoor is True 139 | ... 140 | game.new_episode() # start a new episode, this will load the mesh of the specified map 141 | ``` 142 | 143 | ## Gameplay Visualization 144 | 145 | We have also developed a replay visualization tool based on the Unity3D game engine. It is similar to the spectator mode common in multiplayer FPS games, which allows users to interactively follow the gameplay. Users can view an agent's action from different perspectives and also switch between multiple agents or different viewing modes (e.g., first person, third person, free) to see the entire game in a more immersive way. Participants can download the tool for their specific platforms here: 146 | 147 | - Windows: download the replay tool from [Google Drive](https://drive.google.com/file/d/1RgVjCuRw9b_oi4DUe0AuLAHNQzDIaJdS/view?usp=sharing) or [Feishu](https://scwc0a0eu7.feishu.cn/file/boxcnX7UJ94zBv2AaPElGujyKWb) 148 | - MacOS: download the replay tool from [Google Drive](https://drive.google.com/file/d/1N5jMLkIzGdN2ynw_QWBUaRAiGa05QScq/view?usp=sharing) or [Feishu](https://scwc0a0eu7.feishu.cn/file/boxcneEFmflI0ZW8hPrQ7Ad6Iyc) 149 | 150 | To use this tool, follow the instruction below: 151 | 152 | - Decompress the downloaded file to anywhere you prefer. 153 | - Turn on recording function with `game.turn_on_record()`. One record file will be saved at the end of each episode. 154 | 155 | Find the replay files under the engine directory according to your platform: 156 | 157 | - Linux: `/fps_Data/StreamingAssets/Replay` 158 | - Windows: `\FPSGameUnity_Data\StreamingAssets\Replay` 159 | - MacOS: `/Contents/Resources/Data/StreamingAssets/Replay` 160 | 161 | Copy replay files you want to the replay tool directory according to your platform and start the replay tool. 162 | 163 | For Windows users: 164 | 165 | - Copy the replay file (e.g. `xxx.bin`) into `/FPSGameUnity_Data/StreamingAssets/Replay` 166 | - Run `FPSGameUnity.exe` to start the application. 167 | 168 | For MacOS users: 169 | 170 | - Copy the replay file (e.g. `xxx.bin`) into `/Contents/Resources/Data/StreamingAssets/Replay` 171 | - Run `fps.app` to start the application. 172 | 173 | In the replay tool, you can: 174 | 175 | - Select the record you want to watch from the drop-down menu and click **PLAY** to start playing the record. 176 | - During the replay, users can make the following operations 177 | - Press **Tab**: pause or resume 178 | - Press **E**: switch observation mode (between first person, third person, free) 179 | - Press **Q**: switch between multiple agents 180 | - Press **ECS**: stop replay and return to the main menu 181 | 182 | ## Q & A 183 | 184 | Please feel free to join our WeChat group to ask questions about the platform and online evaluation. 185 | 186 | ![wechat-group](images/wechat.png) 187 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started: basic examples for quick start 2 | 3 | Here we provide some example python scripts, which may help you get familiar with the basic use of our environment quickly and possibly build your own AI training environment based on the provided game play interfaces. To run the scripts, here are some example shell commands for your reference: 4 | 5 | ## [basic.py](basic.py) 6 | 7 | - Show the basic use of game playing interfaces 8 | - All players are controlled by a naive simple policy 9 | - Multiple game configuration parameters (including `timeout`, `map_id`, `num_agents` etc.) can be set as you wish 10 | 11 | ```bash 12 | # run the game in Navigation mode for one episode with 3 players and turn on depth map rendering 13 | python basic.py \ 14 | --num-episodes 1 --num-agents 3 \ 15 | --record --replay-suffix basic_demo \ 16 | --use-depth-map --game-mode 0 17 | ``` 18 | 19 | ## [basic_track1_navigation.py](basic_track1_navigation.py) 20 | 21 | - Show the basic use of game playing interfaces for track 1 22 | - An agent is controlled by a simple navigation policy that tells the agent to walk towards the target position 23 | - Multiple game configuration parameters (including `timeout`, `map_id`, `walk_speed` etc.) can be set as you wish 24 | 25 | ```bash 26 | # run the game in Navigation mode for one episode with 1 player and turn on depth map rendering 27 | python basic_track1_navigation.py \ 28 | --num-episodes 1 \ 29 | --record --replay-suffix simple_navigation \ 30 | --use-depth-map 31 | ``` 32 | 33 | ## [basic_track2_supply_gather.py](basic_track2_supply_gather.py) 34 | 35 | - Show the basic use of game playing interfaces for track 2 36 | - An agent is controlled by a simple gather policy that tells the agent to act randomly 37 | - Multiple game configuration parameters (including `timeout`, `map_id`, `walk_speed` etc.) can be set as you wish 38 | 39 | ```bash 40 | # run the game in Supply_gather mode for one episode with 1 player and turn on depth map rendering 41 | python basic_track2_supply_gather.py \ 42 | --num-episodes 1 \ 43 | --record --replay-suffix simple_navigation \ 44 | --use-depth-map 45 | ``` 46 | 47 | ## [basic_track3_supply_battle.py](basic_track3_supply_battle.py) 48 | 49 | - Show the basic use of game playing interfaces for track 3 50 | - Two agent is controlled by a simple battle policy that tells the agent to act randomly 51 | - Multiple game configuration parameters (including `timeout`, `map_id`, `walk_speed` etc.) can be set as you wish 52 | 53 | ```bash 54 | # run the game in Supply_battle mode for one episode with 1 player and turn on depth map rendering 55 | python basic_track3_supply_battle.py \ 56 | --num-episodes 1 --num-agents 2 \ 57 | --record --replay-suffix simple_navigation \ 58 | --use-depth-map 59 | ``` 60 | 61 | # Getting Started: baseline training scripts based on Ray 62 | 63 | Here we will introduce some applications of using ray to train agents on different tracks. 64 | 65 | - A simple PPO reinforcement learning algorithm is used to learn a policy with discrete action spaces. 66 | - You can change the state design or reward function to better adapt to the environment 67 | and train the agent to find the optimal strategy 68 | - Other reinforcement learning algorithms can be used to train the baselines as well. 69 | 70 | ## [baseline_track1_normal.py](baseline_track1_normal.py) 71 | 72 | - Show the basic use of ray to train the agent for track1 73 | - An agent is controlled by a discret policy that tells the agent to walk to target position as possible as quick by trained with a ppo algorithm 74 | - Multiple game configuration parameters (including `timeout`, `map_id`, `walk_speed` etc.) can be set as you wish 75 | - Numbers of environment and training configuration parameters(including `num_worker`,`batch_size`,`max_episode` etc.) can be set as you wish 76 | - You can design yourself environment shape of state, action or reward function 77 | 78 | ```bash 79 | # run the training iteration for 20 episodes with a single rollout worker 80 | python baseline_track1_normal.py \ 81 | --use-depth-map \ 82 | --detailed-log \ 83 | --record --replay-suffix baseline_navigation \ 84 | --num-workers 1 --stop-iters 20 --stop-reward 80 85 | ``` 86 | 87 | ## [baseline_track2_normal.py](baseline_track2_normal.py) 88 | 89 | - Show the basic use of ray to train the agent for track2 90 | - An agent is controlled by a learning policy that tells the agent to collect supply as much as possible by trained with a ppo algorithm 91 | - Multiple game configuration parameters (including `timeout`, `map_id`, `walk_speed` etc.) can be set as you wish 92 | - As well, numbers of environment and training configuration parameters(including `num_worker`,`batch_size`,`max_episode` etc.) can be set as you wish 93 | - You can design yourself environment shape of state, action or reward function 94 | 95 | ```bash 96 | # run the training iteration for 100 episodes with 10 rollout workers 97 | python baseline_track2_normal.py \ 98 | --use-depth-map \ 99 | --detailed-log \ 100 | --record --replay-suffix baseline_supply \ 101 | --num-workers 10 --stop-episodes 100 --train-batch-size 400 102 | ``` 103 | -------------------------------------------------------------------------------- /examples/baseline_track1_navigation.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | parser = argparse.ArgumentParser() 4 | parser.add_argument("-T", "--timeout", type=int, default=60 * 2) # The time length of one game (sec) 5 | parser.add_argument("-R", "--time-scale", type=int, default=10) 6 | parser.add_argument("-M", "--map-id", type=int, default=1) 7 | parser.add_argument("-S", "--random-seed", type=int, default=0) 8 | parser.add_argument("--target-location", type=float, nargs=3, default=[0, 0, 0]) 9 | parser.add_argument("--start-location", type=float, nargs=3, default=[0, 0, 0]) 10 | parser.add_argument("--start-range", type=float, default=2) 11 | parser.add_argument("--start-hight", type=float, default=5) 12 | parser.add_argument("--engine-dir", type=str, default="../unity3d") 13 | parser.add_argument("--map-dir", type=str, default="../data") 14 | parser.add_argument("--num-workers", type=int, default=0) 15 | parser.add_argument("--eval-interval", type=int, default=None) 16 | parser.add_argument("--record", action="store_true") 17 | parser.add_argument("--replay-suffix", type=str, default="") 18 | parser.add_argument("--checkpoint-dir", type=str, default="checkpoints_track1") 19 | parser.add_argument("--detailed-log", action="store_true", help="whether to print detailed logs") 20 | parser.add_argument("--run", type=str, default="PPO", help="The RLlib-registered algorithm to use.") 21 | parser.add_argument("--stop-iters", type=int, default=9999) 22 | parser.add_argument("--stop-timesteps", type=int, default=100000000) 23 | parser.add_argument("--stop-reward", type=float, default=95) 24 | 25 | 26 | if __name__ == "__main__": 27 | import os 28 | import ray 29 | from ray.tune.logger import pretty_print 30 | from ray.rllib.agents.ppo import PPOTrainer 31 | from envs.envs_track1 import NavigationEnvSimple 32 | 33 | args = parser.parse_args() 34 | 35 | ray.init() 36 | trainer = PPOTrainer( 37 | config={ 38 | "env": NavigationEnvSimple, 39 | "env_config": vars(args), 40 | "framework": "torch", 41 | "num_workers": args.num_workers, 42 | "evaluation_interval": args.eval_interval, 43 | } 44 | ) 45 | 46 | while True: 47 | result = trainer.train() 48 | print(pretty_print(result)) 49 | 50 | if result["episode_reward_mean"] >= args.stop_reward: 51 | os.makedirs(args.checkpoint_dir, exist_ok=True) 52 | trainer.save_checkpoint(args.checkpoint_dir) 53 | trainer.stop() 54 | break 55 | 56 | print(pretty_print(result)) 57 | ray.shutdown() 58 | -------------------------------------------------------------------------------- /examples/baseline_track2_supply_gather.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | parser = argparse.ArgumentParser() 4 | 5 | # game setup 6 | parser.add_argument("--timeout", type=int, default=60 * 2) # The time length of one game (sec) 7 | parser.add_argument("--time-scale", type=int, default=1) # speedup factor 8 | parser.add_argument("--random-seed", type=int, default=0) 9 | parser.add_argument("--detailed-log", action="store_true", help="whether to print detailed logs") 10 | parser.add_argument("--heatmap-center", type=float, nargs=3, default=[8, 8]) # the center of the supply heatmap (x, z are the 2D location and y is the height) 11 | parser.add_argument("--start-range", type=float, default=1) # the range of the start location 12 | parser.add_argument("--start-hight", type=float, default=5) # the height of the start location 13 | parser.add_argument("--engine-dir", type=str, default="../unity3d") # path to unity executable 14 | parser.add_argument("--map-dir", type=str, default="../data") # path to map files 15 | parser.add_argument("--map-id", type=int, default=1) # id of the map 16 | parser.add_argument("--use-depth-map", action="store_true") # whether to use depth map 17 | parser.add_argument("--resume", action="store_true") # whether to resume training from a checkpoint 18 | parser.add_argument("--checkpoint-dir", type=str, default="checkpoints_track2", help="dir to checkpoint files") 19 | parser.add_argument("--replay-interval", type=int, default=1, help="episode interval to save replay") 20 | parser.add_argument("--record", action="store_true", help="whether to record the game") 21 | parser.add_argument("--replay-suffix", type=str, default="", help="suffix of the replay filename") 22 | parser.add_argument("--inference", action="store_true", help="whether to run inference") 23 | 24 | # training config 25 | parser.add_argument("--num-workers", type=int, default=0) 26 | parser.add_argument("--eval-interval", type=int, default=None) 27 | parser.add_argument("--run", type=str, default="PPO", help="The RLlib-registered algorithm to use.") 28 | parser.add_argument("--stop-iters", type=int, default=9999) 29 | parser.add_argument("--stop-timesteps", type=int, default=100000000) 30 | parser.add_argument("--stop-reward", type=float, default=999999) 31 | parser.add_argument("--stop-episodes", type=float, default=20) 32 | parser.add_argument("--train-batch-size", type=int, default=400) 33 | 34 | 35 | if __name__ == "__main__": 36 | import os 37 | import ray 38 | from ray.rllib.agents.ppo import PPOTrainer 39 | from ray.tune.logger import pretty_print 40 | from envs.envs_track2 import SupplyGatherDiscreteSingleTarget 41 | 42 | args = parser.parse_args() 43 | 44 | ray.init() 45 | trainer = PPOTrainer( 46 | config={ 47 | "env": SupplyGatherDiscreteSingleTarget, 48 | "env_config": vars(args), 49 | "framework": "torch", 50 | "num_workers": args.num_workers, 51 | "evaluation_interval": args.eval_interval, 52 | "train_batch_size": args.train_batch_size, # default of ray is 4000 53 | } 54 | ) 55 | 56 | if args.resume: 57 | trainer.load_checkpoint(args.checkpoint_dir) 58 | 59 | while True: 60 | result = trainer.train() 61 | print(pretty_print(result)) 62 | if result["episodes_total"] >= args.stop_episodes: 63 | os.makedirs(args.checkpoint_dir, exist_ok=True) 64 | trainer.save_checkpoint(args.checkpoint_dir) 65 | trainer.stop() 66 | break 67 | 68 | print(pretty_print(result)) 69 | ray.shutdown() 70 | -------------------------------------------------------------------------------- /examples/basic.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | import argparse 4 | 5 | from rich.progress import track 6 | from rich.console import Console 7 | 8 | console = Console() 9 | 10 | from inspirai_fps import Game, ActionVariable 11 | from inspirai_fps.utils import get_position 12 | 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--port", type=int, default=50051) 16 | parser.add_argument("--timeout", type=int, default=10) 17 | parser.add_argument("--game-mode", type=int, default=0) 18 | parser.add_argument("--random-seed", type=int, default=0) 19 | parser.add_argument("--num-episodes", type=int, default=1) 20 | parser.add_argument("--map-id", type=int, default=1) 21 | parser.add_argument("--map-dir", type=str, default="../map_data") 22 | parser.add_argument("--engine-dir", type=str, default="../fps_linux") 23 | parser.add_argument("--use-depth-map", action="store_true") 24 | parser.add_argument("--random-start-location", action="store_true") 25 | parser.add_argument("--num-agents", type=int, default=1) 26 | parser.add_argument("--record", action="store_true") 27 | parser.add_argument("--replay-suffix", type=str, default="") 28 | parser.add_argument("--start-location", type=float, nargs=3, default=[0, 0, 0]) 29 | parser.add_argument("--target-location", type=float, nargs=3, default=[5, 0, 5]) 30 | args = parser.parse_args() 31 | console.print(args) 32 | 33 | 34 | def my_policy(state): 35 | """Define a random policy""" 36 | return [ 37 | random.randint(0, 360), # walk_dir 38 | random.randint(1, 10), # walk_speed 39 | random.choice([-1, 0, 1]), # turn_lr_delta 40 | random.choice([-1, 0, 1]), # turn_ud_delta 41 | random.random() > 0.5, # jump 42 | ] 43 | 44 | 45 | used_actions = [ 46 | ActionVariable.WALK_DIR, 47 | ActionVariable.WALK_SPEED, 48 | ActionVariable.TURN_LR_DELTA, 49 | ActionVariable.LOOK_UD_DELTA, 50 | ActionVariable.JUMP, 51 | ] 52 | 53 | game = Game(map_dir=args.map_dir, engine_dir=args.engine_dir, server_port=args.port) 54 | game.set_game_mode(args.game_mode) 55 | game.set_random_seed(args.random_seed) 56 | game.set_supply_heatmap_center([args.start_location[0], args.start_location[2]]) 57 | game.set_supply_heatmap_radius(30) 58 | game.set_supply_indoor_richness(80) 59 | game.set_supply_outdoor_richness(20) 60 | game.set_supply_indoor_quantity_range(10, 50) 61 | game.set_supply_outdoor_quantity_range(1, 5) 62 | game.set_supply_spacing(5) 63 | game.set_episode_timeout(args.timeout) 64 | game.set_start_location(args.start_location) 65 | game.set_target_location(args.target_location) 66 | game.set_available_actions(used_actions) 67 | game.set_map_id(args.map_id) 68 | 69 | for agent_id in range(1, args.num_agents): 70 | game.add_agent() 71 | 72 | if args.use_depth_map: 73 | game.turn_on_depth_map() 74 | 75 | if args.record: 76 | game.turn_on_record() 77 | 78 | 79 | game.init() 80 | for ep in track(range(args.num_episodes), description="Running Episodes ..."): 81 | if args.random_start_location: 82 | for agent_id in range(args.num_agents): 83 | game.random_start_location(agent_id) 84 | 85 | game.set_game_replay_suffix(f"{args.replay_suffix}_episode_{ep}") 86 | console.print(game.get_game_config()) 87 | 88 | game.new_episode() 89 | while not game.is_episode_finished(): 90 | ts = game.get_time_step() 91 | 92 | t = time.perf_counter() 93 | state_all = game.get_state_all() 94 | action_all = {agent_id: my_policy(state_all[agent_id]) for agent_id in state_all} 95 | game.make_action(action_all) 96 | dt = time.perf_counter() - t 97 | 98 | for agent_id, state in state_all.items(): 99 | step_info = { 100 | "Episode": ep, 101 | "TimeStep": ts, 102 | "AgentID": agent_id, 103 | "Location": get_position(state), 104 | "Action": {name: val for name, val in zip(used_actions, action_all[agent_id])}, 105 | "#SupplyInfo": len(state.supply_states), 106 | "#EnemyInfo": len(state.enemy_states), 107 | "StepRate": round(1 / dt), 108 | } 109 | if args.use_depth_map: 110 | step_info["DepthMap"] = state.depth_map.shape 111 | console.print(step_info, style="bold magenta") 112 | 113 | print("episode ended ...") 114 | 115 | game.close() 116 | -------------------------------------------------------------------------------- /examples/basic_track1_navigation.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | import argparse 4 | import numpy as np 5 | 6 | from inspirai_fps import Game, ActionVariable 7 | from inspirai_fps.utils import get_position 8 | 9 | from rich.progress import track 10 | from rich.console import Console 11 | 12 | console = Console() 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--port", type=int, default=50051) 16 | parser.add_argument("--timeout", type=int, default=10) 17 | parser.add_argument("--map-id", type=int, default=1) 18 | parser.add_argument("--random-seed", type=int, default=0) 19 | parser.add_argument("--num-episodes", type=int, default=1) 20 | parser.add_argument("--engine-dir", type=str, default="../unity3d") 21 | parser.add_argument("--map-dir", type=str, default="../data") 22 | parser.add_argument("--use-depth-map", action="store_true") 23 | parser.add_argument("--record", action="store_true") 24 | parser.add_argument("--replay-suffix", type=str, default="") 25 | parser.add_argument("--start-location", type=float, nargs=3, default=[0, 0, 0]) 26 | parser.add_argument("--target-location", type=float, nargs=3, default=[5, 0, 5]) 27 | parser.add_argument("--walk-speed", type=float, default=1) 28 | args = parser.parse_args() 29 | 30 | 31 | def get_pitch_yaw(x, y, z): 32 | pitch = np.arctan2(y, (x**2 + z**2) ** 0.5) / np.pi * 180 33 | yaw = np.arctan2(x, z) / np.pi * 180 34 | return pitch, yaw 35 | 36 | 37 | def my_policy(state): 38 | """Define a simple navigation policy""" 39 | self_pos = [state.position_x, state.position_y, state.position_z] 40 | target_pos = args.target_location 41 | direction = [v2 - v1 for v1, v2 in zip(self_pos, target_pos)] 42 | yaw = get_pitch_yaw(*direction)[1] 43 | action = [yaw, args.walk_speed] 44 | return action 45 | 46 | 47 | # valid actions 48 | used_actions = [ 49 | ActionVariable.WALK_DIR, 50 | ActionVariable.WALK_SPEED, 51 | ] 52 | 53 | # instantiate Game 54 | game = Game(map_dir=args.map_dir, engine_dir=args.engine_dir) 55 | game.set_game_mode(Game.MODE_NAVIGATION) 56 | game.set_episode_timeout(args.timeout) 57 | game.set_start_location(args.start_location) # set start location of the first agent 58 | game.set_target_location(args.target_location) 59 | game.set_available_actions(used_actions) 60 | game.set_map_id(args.map_id) 61 | 62 | if args.use_depth_map: 63 | game.turn_on_depth_map() 64 | 65 | if args.record: 66 | game.turn_on_record() 67 | 68 | game.init() 69 | 70 | for ep in track(range(args.num_episodes), description="Running Episodes ..."): 71 | game.set_game_replay_suffix(f"{args.replay_suffix}_episode_{ep}") 72 | game.new_episode() 73 | 74 | while not game.is_episode_finished(): 75 | ts = game.get_time_step() 76 | 77 | t = time.perf_counter() 78 | state_all = game.get_state_all() 79 | action_all = { 80 | agent_id: my_policy(state_all[agent_id]) for agent_id in state_all 81 | } 82 | game.make_action(action_all) 83 | dt = time.perf_counter() - t 84 | 85 | for agent_id, state in state_all.items(): 86 | step_info = { 87 | "Episode": ep, 88 | "TimeStep": ts, 89 | "AgentID": agent_id, 90 | "Location": get_position(state), 91 | "Action": { 92 | name: val for name, val in zip(used_actions, action_all[agent_id]) 93 | }, 94 | "#SupplyInfo": len(state.supply_states), 95 | "#EnemyInfo": len(state.enemy_states), 96 | "StepRate": round(1 / dt), 97 | } 98 | if args.use_depth_map: 99 | step_info["DepthMap"] = state.depth_map.shape 100 | console.print(step_info, style="bold magenta") 101 | 102 | print("episode ended ...") 103 | 104 | game.close() 105 | -------------------------------------------------------------------------------- /examples/basic_track2_supply_gather.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | import argparse 4 | 5 | from rich.progress import track 6 | from rich.console import Console 7 | 8 | console = Console() 9 | 10 | from inspirai_fps import Game, ActionVariable 11 | from inspirai_fps.utils import get_position 12 | 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--port", type=int, default=50051) 16 | parser.add_argument("--timeout", type=int, default=10) 17 | parser.add_argument("--map-id", type=int, default=1) 18 | parser.add_argument("--random-seed", type=int, default=0) 19 | parser.add_argument("--num-episodes", type=int, default=1) 20 | parser.add_argument("--engine-dir", type=str, default="../unity3d") 21 | parser.add_argument("--map-dir", type=str, default="../data") 22 | parser.add_argument("--use-depth-map", action="store_true") 23 | parser.add_argument("--record", action="store_true") 24 | parser.add_argument("--replay-suffix", type=str, default="") 25 | parser.add_argument("--start-location", type=float, nargs=3, default=[0, 0, 0]) 26 | parser.add_argument("--walk-speed", type=float, default=1) 27 | args = parser.parse_args() 28 | 29 | 30 | # Define a random policy 31 | def my_policy(state, ts): 32 | jump = False 33 | pickup = True 34 | 35 | if ts % 60 == 0: 36 | jump = True 37 | 38 | return [ 39 | random.randint(0, 360), # walk_dir 40 | args.walk_speed, # walk_speed 41 | jump, # jump 42 | 1, # turn left right 43 | 0, # look up down 44 | pickup, # collect 45 | ] 46 | 47 | 48 | # valid actions 49 | used_actions = [ 50 | ActionVariable.WALK_DIR, 51 | ActionVariable.WALK_SPEED, 52 | ActionVariable.JUMP, 53 | ActionVariable.TURN_LR_DELTA, 54 | ActionVariable.LOOK_UD_DELTA, 55 | ActionVariable.PICKUP, 56 | ] 57 | 58 | # instantiate Game 59 | game = Game(map_dir=args.map_dir, engine_dir=args.engine_dir) 60 | game.set_game_mode(Game.MODE_SUP_GATHER) 61 | game.set_supply_heatmap_center([args.start_location[0], args.start_location[2]]) 62 | game.set_supply_heatmap_radius(50) 63 | game.set_supply_indoor_richness(80) 64 | game.set_supply_outdoor_richness(20) 65 | game.set_supply_indoor_quantity_range(10, 50) 66 | game.set_supply_outdoor_quantity_range(1, 5) 67 | game.set_supply_spacing(5) 68 | game.set_episode_timeout(args.timeout) 69 | game.set_start_location(args.start_location) 70 | game.set_available_actions(used_actions) 71 | game.set_map_id(args.map_id) 72 | 73 | if args.use_depth_map: 74 | game.turn_on_depth_map() 75 | 76 | if args.record: 77 | game.turn_on_record() 78 | 79 | game.init() 80 | 81 | for ep in track(range(args.num_episodes), description="Running Episodes ..."): 82 | game.set_game_replay_suffix(f"{args.replay_suffix}_episode_{ep}") 83 | game.new_episode() 84 | 85 | while not game.is_episode_finished(): 86 | ts = game.get_time_step() 87 | 88 | t = time.perf_counter() 89 | state_all = game.get_state_all() 90 | action_all = { 91 | agent_id: my_policy(state_all[agent_id], ts) for agent_id in state_all 92 | } 93 | game.make_action(action_all) 94 | dt = time.perf_counter() - t 95 | 96 | for agent_id, state in state_all.items(): 97 | step_info = { 98 | "Episode": ep, 99 | "TimeStep": ts, 100 | "AgentID": agent_id, 101 | "Location": get_position(state), 102 | "Action": { 103 | name: val for name, val in zip(used_actions, action_all[agent_id]) 104 | }, 105 | "#SupplyInfo": len(state.supply_states), 106 | "#EnemyInfo": len(state.enemy_states), 107 | "StepRate": round(1 / dt), 108 | } 109 | if args.use_depth_map: 110 | step_info["DepthMap"] = state.depth_map.shape 111 | console.print(step_info, style="bold magenta") 112 | 113 | print("episode ended ...") 114 | 115 | game.close() 116 | -------------------------------------------------------------------------------- /examples/basic_track3_supply_battle.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | import argparse 4 | 5 | from rich.progress import track 6 | from rich.console import Console 7 | 8 | console = Console() 9 | 10 | from inspirai_fps import Game, ActionVariable 11 | from inspirai_fps.utils import get_position 12 | 13 | 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--port", type=int, default=50051) 16 | parser.add_argument("--timeout", type=int, default=10) 17 | parser.add_argument("--map-id", type=int, default=1) 18 | parser.add_argument("--random-seed", type=int, default=0) 19 | parser.add_argument("--num-episodes", type=int, default=1) 20 | parser.add_argument("--engine-dir", type=str, default="../unity3d") 21 | parser.add_argument("--map-dir", type=str, default="../data") 22 | parser.add_argument("--num-agents", type=int, default=1) 23 | parser.add_argument("--use-depth-map", action="store_true") 24 | parser.add_argument("--record", action="store_true") 25 | parser.add_argument("--replay-suffix", type=str, default="") 26 | parser.add_argument("--start-location", type=float, nargs=3, default=[0, 0, 0]) 27 | parser.add_argument("--walk-speed", type=float, default=1) 28 | args = parser.parse_args() 29 | 30 | 31 | # Define a random policy 32 | def my_policy(state, ts): 33 | jump = False 34 | attack = 0 35 | reload = False 36 | pickup = True 37 | 38 | if ts % 60 == 0: 39 | jump = True 40 | if ts % 30 == 0: 41 | attack = 1 42 | if (state.weapon_ammo == 0) and (state.spare_ammo > 0): 43 | reload = True 44 | 45 | return [ 46 | random.randint(0, 360), # walk_dir 47 | args.walk_speed, # walk_speed 48 | jump, # jump 49 | 1, # turn left right 50 | 0, # look up down 51 | attack, # attack 52 | reload, # reload 53 | pickup, # collect 54 | ] 55 | 56 | 57 | # valid actions 58 | used_actions = [ 59 | ActionVariable.WALK_DIR, 60 | ActionVariable.WALK_SPEED, 61 | ActionVariable.JUMP, 62 | ActionVariable.TURN_LR_DELTA, 63 | ActionVariable.LOOK_UD_DELTA, 64 | ActionVariable.ATTACK, 65 | ActionVariable.RELOAD, 66 | ActionVariable.PICKUP, 67 | ] 68 | 69 | # instantiate Game 70 | game = Game(map_dir=args.map_dir, engine_dir=args.engine_dir) 71 | game.set_game_mode(Game.MODE_SUP_BATTLE) 72 | game.set_supply_heatmap_center([args.start_location[0], args.start_location[2]]) 73 | game.set_supply_heatmap_radius(50) 74 | game.set_supply_indoor_richness(80) 75 | game.set_supply_outdoor_richness(20) 76 | game.set_supply_indoor_quantity_range(10, 50) 77 | game.set_supply_outdoor_quantity_range(1, 5) 78 | game.set_supply_spacing(5) 79 | game.set_episode_timeout(args.timeout) 80 | game.set_start_location(args.start_location) # set start location of the first agent 81 | game.set_available_actions(used_actions) 82 | game.set_map_id(args.map_id) 83 | 84 | if args.use_depth_map: 85 | game.turn_on_depth_map() 86 | 87 | if args.record: 88 | game.turn_on_record() 89 | 90 | for agent_id in range(1, args.num_agents): 91 | game.add_agent() 92 | game.random_start_location(agent_id) 93 | 94 | game.init() 95 | 96 | for ep in track(range(args.num_episodes), description="Running Episodes ..."): 97 | game.set_game_replay_suffix(f"{args.replay_suffix}_episode_{ep}") 98 | game.new_episode() 99 | 100 | while not game.is_episode_finished(): 101 | ts = game.get_time_step() 102 | 103 | t = time.perf_counter() 104 | state_all = game.get_state_all() 105 | action_all = { 106 | agent_id: my_policy(state_all[agent_id], ts) for agent_id in state_all 107 | } 108 | game.make_action(action_all) 109 | dt = time.perf_counter() - t 110 | 111 | for agent_id, state in state_all.items(): 112 | step_info = { 113 | "Episode": ep, 114 | "TimeStep": ts, 115 | "AgentID": agent_id, 116 | "Location": get_position(state), 117 | "Action": { 118 | name: val for name, val in zip(used_actions, action_all[agent_id]) 119 | }, 120 | "#SupplyInfo": len(state.supply_states), 121 | "#EnemyInfo": len(state.enemy_states), 122 | "StepRate": round(1 / dt), 123 | } 124 | if args.use_depth_map: 125 | step_info["DepthMap"] = state.depth_map.shape 126 | console.print(step_info, style="bold magenta") 127 | 128 | print("episode ended ...") 129 | 130 | game.close() 131 | -------------------------------------------------------------------------------- /examples/envs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inspirai/wilderness-scavenger/4c2be3796cb311601c127fd6e1791619f029934f/examples/envs/__init__.py -------------------------------------------------------------------------------- /examples/envs/envs_track1.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | from gym import spaces 4 | from ray.rllib.env import EnvContext 5 | from inspirai_fps.utils import get_distance, get_position 6 | from inspirai_fps.gamecore import Game, ActionVariable 7 | 8 | BASE_WORKER_PORT = 50000 9 | 10 | 11 | class BaseEnv(gym.Env): 12 | def __init__(self, config: EnvContext): 13 | super().__init__() 14 | 15 | self.record = config.get("record", False) 16 | self.replay_suffix = config.get("replay_suffix", "") 17 | self.print_log = config.get("detailed_log", False) 18 | 19 | self.seed(config["random_seed"]) 20 | self.server_port = BASE_WORKER_PORT + config.worker_index 21 | print(f">>> New instance {self} on port: {self.server_port}") 22 | print(f"Worker Index: {config.worker_index}, VecEnv Index: {config.vector_index}") 23 | 24 | self.game = Game(map_dir=config["map_dir"], engine_dir=config["engine_dir"], server_port=self.server_port) 25 | self.game.set_map_id(config["map_id"]) 26 | self.game.set_episode_timeout(config["timeout"]) 27 | self.game.set_random_seed(config["random_seed"]) 28 | self.start_location = config.get("start_location", [0, 0, 0]) 29 | 30 | def reset(self): 31 | print("Reset for a new game ...") 32 | self._reset_game_config() 33 | if self.record: 34 | self.game.turn_on_record() 35 | else: 36 | self.game.turn_off_record() 37 | self.game.set_game_replay_suffix(self.replay_suffix) 38 | self.game.new_episode() 39 | self.state = self.game.get_state() 40 | self.running_steps = 0 41 | return self._get_obs() 42 | 43 | def close(self): 44 | self.game.close() 45 | return super().close() 46 | 47 | def render(self, mode="replay"): 48 | return None 49 | 50 | def _reset_game_config(self): 51 | raise NotImplementedError() 52 | 53 | def _get_obs(self): 54 | raise NotImplementedError() 55 | 56 | 57 | class NavigationBaseEnv(BaseEnv): 58 | def __init__(self, config: EnvContext): 59 | super().__init__(config) 60 | 61 | self.start_range = config["start_range"] 62 | self.start_hight = config["start_hight"] 63 | self.trigger_range = self.game.get_target_reach_distance() 64 | self.target_location = config["target_location"] 65 | 66 | self.game.set_game_mode(Game.MODE_NAVIGATION) 67 | self.game.set_target_location(self.target_location) 68 | 69 | def _reset_game_config(self): 70 | self.start_location = self._sample_start_location() 71 | self.game.set_start_location(self.start_location) 72 | 73 | def step(self, action): 74 | action_cmd = self._action_process(action) 75 | self.game.make_action({0: action_cmd}) 76 | self.state = self.game.get_state() 77 | done = self.game.is_episode_finished() 78 | reward = 0 79 | self.running_steps += 1 80 | 81 | if done: 82 | cur_pos = get_position(self.state) 83 | tar_pos = self.target_location 84 | 85 | if get_distance(cur_pos, tar_pos) <= self.trigger_range: 86 | reward += 100 87 | 88 | if self.print_log: 89 | Start = np.round(np.asarray(self.start_location), 2).tolist() 90 | Target = np.round(np.asarray(self.target_location), 2).tolist() 91 | End = np.round(np.asarray(get_position(self.state)), 2).tolist() 92 | Step = self.running_steps 93 | Reward = reward 94 | print(f"{Start=}\t{Target=}\t{End=}\t{Step=}\t{Reward=}") 95 | 96 | return self._get_obs(), reward, done, {} 97 | 98 | def _sample_start_location(self): 99 | raise NotImplementedError() 100 | 101 | def _action_process(self, action): 102 | raise NotImplementedError() 103 | 104 | 105 | class NavigationEnvSimple(NavigationBaseEnv): 106 | def __init__(self, config: EnvContext): 107 | super().__init__(config) 108 | self.action_pools = { 109 | ActionVariable.WALK_DIR: [0, 90, 180, 270], 110 | ActionVariable.WALK_SPEED: [3, 6], 111 | } 112 | self.action_space = spaces.MultiDiscrete([len(pool) for pool in self.action_pools.values()]) 113 | self.observation_space = spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32) 114 | 115 | self.game.set_available_actions([action_name for action_name in self.action_pools.keys()]) 116 | self.game.init() 117 | 118 | def _get_obs(self): 119 | cur_pos = np.asarray(get_position(self.state)) 120 | tar_pos = np.asarray(self.target_location) 121 | dir_vec = tar_pos - cur_pos 122 | return dir_vec / np.linalg.norm(dir_vec) 123 | 124 | def _action_process(self, action): 125 | action_values = list(self.action_pools.values()) 126 | return [action_values[i][action[i]] for i in range(len(action))] 127 | 128 | def _sample_start_location(self): 129 | angle = np.random.uniform(0, 360) 130 | distance_to_trigger = abs(np.random.normal(scale=self.start_range)) 131 | vec_len = self.trigger_range + distance_to_trigger 132 | dx = np.sin(angle) * vec_len 133 | dz = np.cos(angle) * vec_len 134 | x = self.target_location[0] + dx 135 | z = self.target_location[2] + dz 136 | return [x, self.start_hight, z] 137 | -------------------------------------------------------------------------------- /examples/envs/envs_track2.py: -------------------------------------------------------------------------------- 1 | import os, time 2 | import numpy as np 3 | from typing import List 4 | 5 | import gym 6 | from gym.spaces import Box, MultiDiscrete, Tuple 7 | from ray.rllib.env import EnvContext 8 | 9 | from inspirai_fps.gamecore import ActionVariable, Game 10 | from inspirai_fps.utils import get_distance, get_position 11 | 12 | 13 | def standardization(data, axis=1): 14 | mu = np.mean(data, axis=axis) 15 | sigma = np.std(data, axis=axis) 16 | return (data - mu) / sigma 17 | 18 | 19 | class SupplyGatherBaseEnv(gym.Env): 20 | """ 21 | Base Gym Env for Supply Gathering,\\ 22 | inherit this class to implement your own supply gathering environment,\\ 23 | and implement the following methods: 24 | - _get_obs 25 | - _compute_reward 26 | - _action_process 27 | """ 28 | 29 | def __init__(self, env_config: EnvContext): 30 | super().__init__() 31 | # set log 32 | cur_path = os.path.abspath(os.path.dirname(__file__)) 33 | time_stamp = "Supply-%s" % time.strftime("%Y%m%d-%H%M%S") 34 | self.is_inference = env_config["inference"] 35 | if self.is_inference: 36 | self.server_port = 50052 37 | time_stamp += f"-{self.server_port}" 38 | self.log_path = os.path.expanduser("%s/%s" % (cur_path, time_stamp)) 39 | with open(self.log_path + "log.txt", "w") as f: 40 | f.write(f">>> {self.__class__}, log:\n") 41 | else: 42 | self.server_port = 50052 + env_config.worker_index 43 | time_stamp += f"-{self.server_port}" 44 | self.log_path = os.path.expanduser("%s/%s" % (cur_path, time_stamp)) 45 | print(f">>> New instance {self} on port: {self.server_port}") 46 | print(f"Worker Index: {env_config.worker_index}, VecEnv Index: {env_config.vector_index}") 47 | with open(self.log_path + "log.txt", "w") as f: 48 | f.write(f">>> {self.__class__}, server_port: {self.server_port} , worker_index: {env_config.worker_index}, log:\n") 49 | 50 | use_action_vars = [ 51 | ActionVariable.WALK_DIR, 52 | ActionVariable.WALK_SPEED, 53 | ActionVariable.PICKUP, 54 | ] 55 | 56 | self.WALK_DIR_LIST = [0, 45, 90, 135, 180, 225, 270, 315] 57 | self.WALK_SPEED_LIST = [3, 6, 9] # [3, 6, 9] # [0, 1, 2] 58 | self.PICKUP_LIST = [True, False] 59 | 60 | self.action_space = MultiDiscrete( 61 | [ 62 | len(self.WALK_DIR_LIST), 63 | len(self.WALK_SPEED_LIST), 64 | len(self.PICKUP_LIST), 65 | ] 66 | ) 67 | 68 | self.supply_attribute_len = 3 69 | 70 | self.game = Game( 71 | map_dir=env_config["map_dir"], 72 | engine_dir=env_config["engine_dir"], 73 | server_port=self.server_port, 74 | ) 75 | 76 | self.game.set_game_mode(Game.MODE_SUP_GATHER) 77 | self.game.set_available_actions(use_action_vars) 78 | self.game.set_map_id(env_config["map_id"]) 79 | self.game.set_episode_timeout(env_config["timeout"]) 80 | self.game.set_random_seed(env_config["random_seed"]) 81 | self.game.set_supply_heatmap_center(env_config["heatmap_center"]) 82 | self.game.set_supply_heatmap_radius(30) 83 | self.game.set_supply_indoor_richness(2) # 10 84 | self.game.set_supply_outdoor_richness(2) # 10 85 | self.game.set_supply_indoor_quantity_range(10, 20) 86 | self.game.set_supply_outdoor_quantity_range(1, 5) 87 | self.game.set_supply_spacing(1) 88 | 89 | self.is_inference = env_config.get("inference", False) 90 | self.turn_on_detailed_log = env_config["detailed_log"] 91 | self.args = env_config 92 | self.episode_count = 0 93 | 94 | self.target_supply_radius = 4 # heatmap center -> radius = 4, supply -> radius = 2 95 | 96 | self.list_spaces: List[gym.Space] = [Box(low=-1, high=1, shape=(3,), dtype=np.float32)] 97 | if env_config["use_depth_map"]: 98 | self.game.turn_on_depth_map() 99 | height = self.game.get_depth_map_height() 100 | width = self.game.get_depth_map_width() 101 | max_depth = self.game.get_depth_limit() 102 | self.list_spaces.append(Box(0, max_depth, (height, width), dtype=np.float32)) 103 | self.observation_space = Tuple(self.list_spaces) 104 | else: 105 | self.observation_space = self.list_spaces[0] 106 | self.game.init() 107 | 108 | def reset(self): 109 | print("Reset for a new game ...") 110 | self.start_location = self._sample_start_location() 111 | self.game.set_start_location(self.start_location) 112 | self.episode_count += 1 113 | if self.args["record"] and self.episode_count % self.args["replay_interval"] == 0: 114 | self.game.turn_on_record() 115 | self.game.set_game_replay_suffix(self.args["replay_suffix"]) 116 | else: 117 | self.game.turn_off_record() 118 | self.game.new_episode() 119 | state = self.game.get_state() 120 | 121 | self.collected_supply = 0 122 | self.running_steps = 0 123 | self.episode_reward = 0 124 | self.valid_collected_supply = 0 # number of valid collected supply 125 | # self.target_supply_flag = False # whether the agent still exists 126 | return state 127 | 128 | def _sample_start_location(self): 129 | angle = np.random.uniform(0, 360) 130 | distance_to_trigger = abs(np.random.normal(scale=self.args["start_range"])) 131 | vec_len = 1 + distance_to_trigger 132 | # vec_len = self.game.trigger_range + np.random.uniform(0, self.start_range) 133 | dx = np.sin(angle) * vec_len 134 | dz = np.cos(angle) * vec_len 135 | x = self.args["heatmap_center"][0] + dx 136 | z = self.args["heatmap_center"][1] + dz 137 | return [x, self.args["start_hight"], z] 138 | 139 | def _action_process(self, action): 140 | walk_dir = self.WALK_DIR_LIST[action[0]] 141 | walk_speed = self.WALK_SPEED_LIST[action[1]] 142 | pickup = self.PICKUP_LIST[action[2]] 143 | 144 | return {0: [walk_dir, walk_speed, pickup]} 145 | 146 | def step(self, action): 147 | """ 148 | Parameters 149 | ---------- 150 | action : list of action values 151 | 152 | Procedure 153 | ---------- 154 | 1. process action to cmd and then backend env execute action 155 | 2. get new state from backend env 156 | 3. compute reward from new state 157 | 4. process new state to get the new observation 158 | """ 159 | 160 | self.running_steps += 1 161 | # 执行动作 162 | action_cmd = self._action_process(action) 163 | self.game.make_action(action_cmd) 164 | 165 | # 状态转移 166 | state = self.game.get_state() 167 | 168 | # 计算 reward 169 | # _compute_reward = getattr(self, '_compute_reward') 170 | reward = self._compute_reward(state, action_cmd) 171 | self.episode_reward += reward 172 | if self.turn_on_detailed_log: 173 | with open(self.log_path + "log.txt", "a") as f: 174 | f.write(f"\nstep:{self.running_steps};\t") 175 | f.write(f"动作:{action_cmd};\t") 176 | f.write(f"奖励:{round(reward, 2)};\t") 177 | 178 | # 计算 state 179 | # _get_obs = getattr(self, '_get_obs') 180 | self.curr_obs = self._get_obs(state) 181 | 182 | done = self.game.is_episode_finished() 183 | 184 | _other_process = getattr(self, "_other_process") 185 | done = _other_process(done) 186 | 187 | if done: 188 | with open(self.log_path + "log.txt", "a") as f: 189 | f.write(f"\nepisode总共走了这么多步:{self.running_steps}\n") 190 | f.write(f"捡到的supply总量:{self.collected_supply}\n") 191 | f.write(f"总奖励:{self.episode_reward}\n") 192 | f.write(f"有效supply总量:{self.valid_collected_supply}\n") 193 | 194 | if self.is_inference: 195 | self.game.close() 196 | 197 | return self.curr_obs, reward, done, {} 198 | 199 | def _get_obs(self, state): 200 | """ 201 | method to process state to get observation 202 | 203 | Parameters 204 | ---------- 205 | state: AgentState object got from backend env 206 | """ 207 | raise NotImplementedError() 208 | 209 | def _compute_reward(self, state, action): 210 | """reward process method 211 | 212 | Parameters 213 | ---------- 214 | state: AgentState object got from backend env 215 | action: action list got from agent 216 | """ 217 | raise NotImplementedError() 218 | 219 | 220 | class SupplyGatherDiscreteSingleTarget(SupplyGatherBaseEnv): 221 | """ 222 | Supply Gathering Env with discrete action space 223 | 224 | Task Design 225 | ---------- 226 | The agent is randomly spawned near the supply heatmap center with the following goals: 227 | 1. reach the supply heatmap center 228 | 2. collect supplies in the world. 229 | 230 | Observation Space 231 | ---------- 232 | `obs`: normalized direction vector pointing to the goal location 233 | 234 | Reward Shaping 235 | ---------- 236 | 1. successfully collect supply: 300 237 | 2. punish for moving away from supply, award for moving towards supply 238 | 3. punish for taking pickup action but not collect supply successfully 239 | 4. punish for single step movement 240 | 241 | Note 242 | ---------- 243 | Different from `SupplyGatherDiscreteSingleTargetTwo`, 244 | the observation in `SupplyGatherDiscreteSingleTarget` directly provides the agent with direction information, 245 | making it easy to learn, thus reward does not need distance as guidance. 246 | """ 247 | 248 | def __init__(self, env_config: EnvContext): 249 | super().__init__(env_config) 250 | 251 | def reset(self): 252 | state = super().reset() 253 | 254 | # the initial goal is the supply heatmap center 255 | self.target_supply = [ 256 | self.args["heatmap_center"][0], 257 | 0, 258 | self.args["heatmap_center"][1], 259 | ] 260 | obs = [] 261 | cur_pos = np.asarray(get_position(state)) 262 | tar_pos = np.asarray(self.target_supply) 263 | dir_vec = tar_pos - cur_pos 264 | dir_vec = dir_vec / np.linalg.norm(dir_vec) 265 | obs.append(dir_vec.tolist()) 266 | 267 | if self.args["use_depth_map"]: 268 | obs.append(state.depth_map.tolist()) 269 | return obs 270 | else: 271 | return obs[0] 272 | 273 | def _other_process(self, done: bool): 274 | # if found no good solution, stop the episode 275 | if (self.cur_distance >= 10 and 0 <= self.valid_collected_supply < 10) or (self.cur_distance >= 15 and 10 <= self.valid_collected_supply < 50): 276 | done = True 277 | return done 278 | 279 | def _compute_reward(self, state, action_cmd): 280 | reward = 0 281 | if self.running_steps == 1: 282 | return reward 283 | if not self.game.is_episode_finished(): 284 | # movement punishment 285 | reward -= 1 286 | 287 | # punish for taking PICKUP but with no increase in #supply 288 | if action_cmd[0][2] == True and state.num_supply == self.collected_supply: 289 | reward -= 50 290 | 291 | # punish for moving away from supply and award for moving towards supply 292 | self.cur_distance = get_distance( 293 | [self.target_supply[0], self.target_supply[1], self.target_supply[2]], 294 | get_position(state), 295 | ) 296 | reward += (self.target_supply_radius - self.cur_distance) * 5 297 | 298 | # reaching the initial goal (supply heatmap center) 299 | if self.valid_collected_supply == 0 and self.cur_distance <= self.target_supply_radius: 300 | reward += 300 301 | self.target_supply = None 302 | self.valid_collected_supply += 1 303 | self.target_supply_radius = 4 # 第一个目标完成,修改物资半径为2 304 | 305 | # reaching the second goal (successfully collect supplies) 306 | if state.num_supply > self.collected_supply and self.cur_distance <= 1: 307 | reward += 300 308 | self.target_supply = None 309 | self.valid_collected_supply += state.num_supply - self.collected_supply 310 | 311 | self.collected_supply = state.num_supply 312 | 313 | return reward 314 | 315 | def _get_obs(self, state): 316 | # get supply info of all nearby supplies 317 | self.np_supply_states = [ 318 | np.asarray( 319 | [ 320 | supply.position_x, 321 | supply.position_y, 322 | supply.position_z, 323 | supply.quantity, 324 | ] 325 | ) 326 | for supply in state.supply_states 327 | ] 328 | 329 | # reinitialize: get target information 330 | if self.target_supply is None: 331 | supply_distances = [get_distance([supply[0], supply[1], supply[2]], get_position(state)) for supply in self.np_supply_states] 332 | # target supply is the closest supply 333 | if supply_distances: 334 | self.target_supply = self.np_supply_states[supply_distances.index(min(supply_distances))] 335 | else: 336 | # if no supply nearby, the target supply is set to be the supply heatmap center 337 | temp = self.args["heatmap_center"].copy() 338 | temp.append(-1) 339 | self.target_supply = temp 340 | # get distance to target supply 341 | self.cur_distance = get_distance(self.target_supply[:-1], get_position(state)) if self.target_supply is not None else None 342 | else: 343 | self.cur_distance = None 344 | if self.target_supply is not None: 345 | self.cur_distance = get_distance( 346 | [ 347 | self.target_supply[0], 348 | self.target_supply[1], 349 | self.target_supply[2], 350 | ], 351 | get_position(state), 352 | ) 353 | 354 | self._write_obs_log(state, self.cur_distance) 355 | 356 | cur_pos = np.asarray(get_position(state)) 357 | tar_pos = np.asarray([self.target_supply[0], self.target_supply[1], self.target_supply[2]]) if self.target_supply is not None else np.asarray(self.args["heatmap_center"]) 358 | dir_vec = tar_pos - cur_pos 359 | dir_vec = dir_vec / np.linalg.norm(dir_vec) 360 | obs = [] 361 | obs.append(dir_vec.tolist()) 362 | 363 | if self.args["use_depth_map"]: 364 | obs.append(state.depth_map.tolist()) 365 | return obs 366 | else: 367 | return obs[0] 368 | 369 | def _write_obs_log(self, state, cur_distance): 370 | if self.turn_on_detailed_log: 371 | with open(self.log_path + "log.txt", "a") as f: 372 | f.write(f"CurrentLocation: {state.position_x:.2f}, {state.position_y:.2f}, {state.position_z:.2f};\t") 373 | f.write(f"NearbySupply: {len(state.supply_states)};\t") 374 | f.write(f"CollectedSupply: {state.num_supply};\t") 375 | f.write(f"Target: {[round(self.target_supply[loc_index], 2) for loc_index in range(3)] if self.target_supply is not None else None};\t") 376 | f.write(f"Distance: {round(cur_distance, 2) if cur_distance is not None else None};\t") 377 | 378 | 379 | class SupplyGatherDiscreteEasySingleTargetVision(SupplyGatherDiscreteSingleTarget): 380 | """在 `SupplyGatherDiscreteSingleTarget` 基础上加入了 `Vision`""" 381 | 382 | def __init__(self, env_config: EnvContext): 383 | super().__init__(env_config) 384 | 385 | def _compute_reward(self, state, action): 386 | # 引入 视野 奖励 387 | reward = 0 388 | if self.running_steps == 1: 389 | return reward 390 | if not self.game.is_episode_finished(): 391 | # 移动惩罚 392 | reward -= 1 393 | 394 | # 远离目标惩罚,接近目标奖励 395 | self.cur_distance = get_distance( 396 | [self.target_supply[0], self.target_supply[1], self.target_supply[2]], 397 | get_position(state), 398 | ) 399 | reward += (self.target_supply_radius + 2 - self.cur_distance) * 40 400 | 401 | # 到达热力图中心附近(第一个目标) 402 | if self.valid_collected_supply == 0 and self.cur_distance <= self.target_supply_radius: 403 | reward += 300 404 | self.target_supply = None 405 | self.valid_collected_supply += 1 406 | 407 | # 捡到 目标物资 给奖励 408 | if state.num_supply > self.collected_supply and self.cur_distance <= 1: 409 | reward += 300 410 | self.target_supply = None 411 | self.valid_collected_supply += state.num_supply - self.collected_supply 412 | 413 | self.collected_supply = state.num_supply 414 | 415 | return reward 416 | 417 | def _get_obs(self, state): 418 | # TODO obs 加入 视野 419 | # 当前附近的所有物资 420 | self.np_supply_states = [ 421 | np.asarray( 422 | [ 423 | supply.position_x, 424 | supply.position_y, 425 | supply.position_z, 426 | supply.quantity, 427 | ] 428 | ) 429 | for supply in state.supply_states 430 | ] 431 | 432 | # 重新初始化 目标物资 433 | if self.target_supply is None: 434 | supply_distance = [get_distance([supply[0], supply[1], supply[2]], get_position(state)) for supply in self.np_supply_states] 435 | # 把距离最近的 supply 设置为 target 436 | self.target_supply = self.np_supply_states[supply_distance.index(min(supply_distance))] if len(supply_distance) != 0 else None # self.args["heatmap_center"] 437 | # 初始化当前所在位置到 supply 的距离 438 | self.cur_distance = get_distance(self.target_supply[:-1], get_position(state)) if self.target_supply is not None else None 439 | else: 440 | self.cur_distance = ( 441 | get_distance( 442 | [ 443 | self.target_supply[0], 444 | self.target_supply[1], 445 | self.target_supply[2], 446 | ], 447 | get_position(state), 448 | ) 449 | if self.target_supply is not None 450 | else None 451 | ) 452 | 453 | self._write_obs_log(state, self.cur_distance) 454 | 455 | cur_pos = np.asarray(get_position(state)) 456 | tar_pos = np.asarray([self.target_supply[0], self.target_supply[1], self.target_supply[2]]) if self.target_supply is not None else np.asarray(self.args["heatmap_center"]) 457 | dir_vec = tar_pos - cur_pos 458 | obs = dir_vec / np.linalg.norm(dir_vec) 459 | return obs 460 | 461 | 462 | class SupplyGatherDiscreteSingleTargetTwo(SupplyGatherBaseEnv): 463 | """智能体随机出生在物资热力图中心附近,目标是用最少的步数不断地采集距离它最近的目标物资 464 | state: (1) 当前位置; (2) 目标物资位置;(把最近的一个物资作为目标,捡到目标后才会重新设置目标) 465 | reward: (1) 捡到目标物资奖励; (2) 远离目标物资惩罚, 接近目标物资奖励;(last_distance - cur_distance); 466 | (3) 未捡到目标物资而做出pickup惩罚; (4) 移动惩罚; 467 | 468 | 469 | Note: 与 `SupplyGatherDiscreteSingleTarget` 不同的是,`SupplyGatherDiscreteSingleTargetTwo` 的 state 只给智能体提供了两个位置坐标, 470 | 智能体需要学会隐含的距离信息, reward 使用 diff in distance 来引导智能体快速学习。 471 | """ 472 | 473 | def __init__(self, env_config: EnvContext): 474 | super().__init__(env_config) 475 | 476 | # 智能体可以看见的目标物资的数量 477 | self.visible_supply_max_len = 1 # 10 478 | # 物资的属性的长度 479 | self.supply_attribute_len = 4 480 | # 智能体位置的取值区间 481 | low = [np.asarray([-np.inf] * 4, dtype=np.float32)] 482 | high = [np.asarray([np.inf] * 4, dtype=np.float32)] 483 | # 物资位置的取值区间 484 | low.extend([np.asarray([-np.inf] * self.supply_attribute_len, dtype=np.float32) for _ in range(self.visible_supply_max_len)]) 485 | high.extend([np.asarray([np.inf] * self.supply_attribute_len, dtype=np.float32) for _ in range(self.visible_supply_max_len)]) 486 | self.observation_space = Box( 487 | low=np.asarray(low), 488 | high=np.asarray(high), 489 | shape=(self.visible_supply_max_len + 1, self.supply_attribute_len), 490 | dtype=np.float32, 491 | ) 492 | 493 | def reset(self): 494 | state = super().reset() 495 | self.target_supply_flag = False # 目标物资是否还在;True还在 496 | obs = [np.asarray([state.position_x, state.position_y, state.position_z, 0])] # 最后一个属性是 state.allow_pickup 497 | obs.extend([np.zeros(self.supply_attribute_len)]) 498 | 499 | return np.asarray(obs) 500 | 501 | def step(self, action): 502 | self.running_steps += 1 503 | # 执行动作 504 | action_cmd = self._action_process(action) 505 | self.game.make_action({0: action_cmd}) 506 | if self.turn_on_detailed_log: 507 | with open(self.log_path + "log.txt", "a") as f: 508 | f.write(f"\nstep:{self.running_steps};\t") 509 | f.write(f"动作:{action_cmd};\t") 510 | 511 | # 状态转移 512 | state = self.game.get_state() 513 | self.curr_obs = self._get_obs(state, action_cmd) 514 | 515 | done = self.game.is_episode_finished() 516 | 517 | done = self._other_process(done) 518 | 519 | if done: 520 | with open(self.log_path + "log.txt", "a") as f: 521 | f.write(f"\nepisode总共走了这么多步:{self.running_steps}\n") 522 | f.write(f"捡到的supply总量:{self.collected_supply}\n") 523 | f.write(f"总奖励:{self.episode_reward}\n") 524 | f.write(f"有效supply总量:{self.valid_collected_supply}\n") 525 | 526 | if self.is_inference: 527 | self.game.close() 528 | 529 | return self.curr_obs, self.reward, done, {} 530 | 531 | def _compute_reward(self, state, action_cmd): 532 | reward = 0 533 | if not self.game.is_episode_finished(): 534 | # 移动惩罚 535 | reward += -1 536 | 537 | # 如果做出 pickup 但是 supply 数量没有增加则惩罚 538 | # if action_cmd[2] == True and self.target_supply_flag == True: 539 | # reward -= 300 540 | 541 | # 没有目标,返回 0 奖励 542 | if hasattr(self, "cur_distance") and self.cur_distance is None: 543 | return 0 544 | 545 | # 捡到目标物资奖励 546 | if state.num_supply > self.collected_supply and hasattr(self, "target_supply") and self.cur_distance < 1: 547 | reward += 500 548 | # done = True 549 | self.target_supply = None 550 | self.target_supply_flag = False 551 | self.valid_collected_supply += 1 552 | 553 | self.collected_supply = state.num_supply 554 | 555 | # 接近目标物资奖励,远离目标物资惩罚 556 | if self.running_steps >= 1 and self.target_supply is not None: 557 | reward += (self.last_distance - self.cur_distance) * 1000 # if self.last_distance == -1 else 558 | self.last_distance = self.cur_distance 559 | 560 | return reward 561 | 562 | def _get_obs(self, state, action_cmd): 563 | obs = [np.asarray([state.position_x, state.position_y, state.position_z, 0])] # 最后一个是 state.allow_pickup 564 | 565 | # 当前附近的所有物资 566 | self.np_supply_states = [ 567 | np.asarray( 568 | [ 569 | supply.position_x, 570 | supply.position_y, 571 | supply.position_z, 572 | supply.quantity, 573 | ] 574 | ) 575 | for supply in state.supply_states 576 | ] 577 | 578 | # 初始化目标物资 579 | if self.running_steps == 1: 580 | supply_distance = [get_distance([supply[0], supply[1], supply[2]], get_position(state)) for supply in self.np_supply_states] 581 | # 把距离最近的 supply 设置为 target 582 | self.target_supply = self.np_supply_states[supply_distance.index(min(supply_distance))] if len(supply_distance) != 0 else None 583 | # 初始化当前所在位置到 target supply 的距离 584 | self.last_distance = get_distance(self.target_supply[:-1], get_position(state)) if self.target_supply is not None else None 585 | 586 | # 判断目标物资是否已经被捡走; True还在,False捡走 587 | if self.target_supply_flag is not None and self.target_supply is not None: 588 | self.target_supply_flag = False if state.num_supply > self.collected_supply and get_distance(self.target_supply[:-1], get_position(state)) <= 1 else True 589 | else: 590 | self.target_supply_flag = False 591 | 592 | self.cur_distance = get_distance(self.target_supply[:-1], get_position(state)) if self.target_supply is not None else None 593 | 594 | # 计算奖励 595 | self.reward = self._compute_reward(state, action_cmd) 596 | self.episode_reward += self.reward 597 | 598 | # 如果目标物资被捡走,则重新设置目标 599 | if not self.target_supply_flag: 600 | if len(self.np_supply_states) == 0: 601 | self.target_supply = None 602 | self.last_distance = -1 603 | else: 604 | supply_distance = [get_distance([supply[0], supply[1], supply[2]], get_position(state)) for supply in self.np_supply_states] 605 | self.target_supply = self.np_supply_states[supply_distance.index(min(supply_distance))] 606 | self.last_distance = get_distance(self.target_supply[:-1], get_position(state)) 607 | 608 | self._write_obs_log(state, self.cur_distance) 609 | 610 | # 构造 obs 611 | if self.running_steps < 1: 612 | obs.extend([np.zeros(self.supply_attribute_len)]) 613 | elif self.running_steps >= 1: 614 | if len(self.np_supply_states) >= self.visible_supply_max_len: 615 | obs.extend([self.target_supply]) 616 | else: 617 | obs.extend([np.zeros(self.supply_attribute_len)]) 618 | # obs.extend([np.zeros(self.supply_attribute_len)] * (self.visible_supply_max_len - len(self.init_supply_states))) 619 | 620 | return np.asarray(obs) 621 | 622 | def _action_process(self, action): 623 | walk_dir = self.WALK_DIR_LIST[action[0]] 624 | walk_speed = self.WALK_SPEED_LIST[action[1]] 625 | pickup = self.PICKUP_LIST[action[2]] 626 | 627 | return [walk_dir, walk_speed, pickup] 628 | 629 | def _other_process(self, done: bool): 630 | # 如果智能体在探索过程中逐渐陷入无解,则手动终止本局,进入下一局 631 | if (self.cur_distance >= 10 and 0 <= self.valid_collected_supply < 10) or (self.cur_distance >= 15 and 10 <= self.valid_collected_supply < 50): 632 | done = True 633 | return done 634 | 635 | def _write_obs_log(self, state, cur_distance): 636 | if self.turn_on_detailed_log: 637 | with open(self.log_path + "log.txt", "a") as f: 638 | f.write(f"奖励:{round(self.reward, 2)};\t") 639 | f.write(f"当前位置:{state.position_x:.2f}, {state.position_y:.2f}, {state.position_z:.2f};\t") 640 | f.write(f"附近物资:{len(state.supply_states)};\t") # {state.supply_states} 641 | f.write(f"收集到的物资:{state.num_supply};\t") 642 | f.write(f"目标:{[round(self.target_supply[loc_index], 2) for loc_index in range(3)] if self.target_supply is not None else None};\t") 643 | f.write(f"距离目标:{round(cur_distance, 2) if cur_distance is not None else None};\t") 644 | -------------------------------------------------------------------------------- /examples/memory_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from inspirai_fps import Game, ActionVariable 4 | 5 | 6 | MAPDIR = os.path.expanduser("~/map_data") 7 | ENGINEDIR = os.path.expanduser("~/fps_linux") 8 | 9 | 10 | def rollout( 11 | env_id, 12 | ): 13 | game = Game(server_port=50000 + env_id, map_dir=MAPDIR, engine_dir=ENGINEDIR) 14 | game.set_available_actions( 15 | [ 16 | ActionVariable.WALK_DIR, 17 | ActionVariable.WALK_SPEED, 18 | ] 19 | ) 20 | game.set_game_mode(Game.MODE_SUP_BATTLE) 21 | game.turn_on_depth_map() 22 | game.init() 23 | 24 | done = True 25 | while True: 26 | if done: 27 | game.set_map_id(random.randint(1, 3)) 28 | game.set_episode_timeout(30) 29 | game.set_target_location(game.get_start_location()) 30 | game.random_start_location() 31 | game.turn_on_record() 32 | game.set_game_replay_suffix("temp") 33 | game.new_episode() 34 | print(f"new episode in process {env_id}") 35 | 36 | game.make_action({0: [0, 9]}) 37 | _ = game.get_state_all() 38 | done = game.is_episode_finished() 39 | 40 | 41 | if __name__ == "__main__": 42 | from multiprocessing import Process 43 | 44 | for i in range(5): 45 | Process(target=rollout, args=(i,)).start() 46 | -------------------------------------------------------------------------------- /examples/visualize_depth_map.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from inspirai_fps import Game, ActionVariable 4 | 5 | import cv2 6 | import numpy as np 7 | from PIL import Image 8 | from functools import partial 9 | from rich.console import Console 10 | 11 | print = partial(Console().print, style="bold magenta") 12 | 13 | SCALE = 1 14 | FAR = 200 15 | WIDTH = 500 16 | HEIGHT = round(WIDTH / 16 * 9) 17 | 18 | 19 | def visualize(depth_map, far=FAR, scale=SCALE): 20 | img = ((1 - depth_map / far) * 255).astype(np.uint8) 21 | h, w = [x * scale for x in img.shape] 22 | img = cv2.resize(img, (w, h)) 23 | img = cv2.applyColorMap(img, cv2.COLORMAP_JET) 24 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 25 | return Image.fromarray(img) 26 | 27 | 28 | class PolicyPool: 29 | """ a pool of policies -> static methods for easy access """ 30 | 31 | @staticmethod 32 | def simple_rotate(state): 33 | return [ 34 | (ActionVariable.TURN_LR_DELTA, 1), 35 | ] 36 | 37 | @staticmethod 38 | def simple_walk_forward(state): 39 | return [ 40 | (ActionVariable.WALK_DIR, 0), 41 | (ActionVariable.WALK_SPEED, 5), 42 | ] 43 | 44 | 45 | if __name__ == "__main__": 46 | import argparse 47 | 48 | parser = argparse.ArgumentParser() 49 | parser.add_argument("--map-dir", type=str, default="/mnt/d/Codes/cog-local/map-data") 50 | parser.add_argument("--engine-dir", type=str, default="/mnt/d/Codes/cog-local/fps_linux") 51 | parser.add_argument("--width", type=int, default=WIDTH) 52 | parser.add_argument("--height", type=int, default=HEIGHT) 53 | parser.add_argument("--far", type=int, default=FAR) 54 | parser.add_argument("--scale", type=int, default=SCALE) 55 | parser.add_argument("--map-id-list", type=int, nargs="+", default=[]) 56 | parser.add_argument("--tag", type=str, default=None) 57 | parser.add_argument("--save-dir", type=str, default="VisDepth") 58 | parser.add_argument("--policy", type=str, default="simple_rotate") 59 | args = parser.parse_args() 60 | 61 | policy = getattr(PolicyPool, args.policy) 62 | os.makedirs(args.save_dir, exist_ok=True) 63 | map_dir = os.path.expanduser(args.map_dir) 64 | engine_dir = os.path.expanduser(args.engine_dir) 65 | 66 | game = Game(map_dir, engine_dir) 67 | game.set_available_actions( 68 | [ 69 | ActionVariable.WALK_DIR, 70 | ActionVariable.WALK_SPEED, 71 | ActionVariable.TURN_LR_DELTA, 72 | ActionVariable.LOOK_UD_DELTA, 73 | ] 74 | ) 75 | game.set_episode_timeout(10) 76 | game.turn_on_depth_map() 77 | game.set_depth_map_size(args.width, args.height, args.far) 78 | game.set_game_mode(Game.MODE_NAVIGATION) 79 | game.init() 80 | 81 | for map_id in args.map_id_list: 82 | game.set_map_id(map_id) 83 | game.set_start_location([0, 0, 0]) 84 | game.turn_on_record() 85 | game.set_game_replay_suffix(f"vis_depth_{args.tag}") 86 | game.new_episode() 87 | 88 | frames = [] 89 | while not game.is_episode_finished(): 90 | state_all = game.get_state_all() 91 | action_all = { 92 | agent_id: policy(state) 93 | for agent_id, state in state_all.items() 94 | } 95 | 96 | game.make_action_by_list(action_all) 97 | 98 | print(state_all) 99 | print(action_all) 100 | 101 | frames.append(visualize(state_all[0].depth_map, args.far, args.scale)) 102 | 103 | print(f"Map {map_id:03d} finished") 104 | 105 | save_name = f"policy[{args.policy}]_map[{map_id:03d}]_WxHxF={args.width}x{args.height}x{args.far}" 106 | save_path = os.path.join(args.save_dir, save_name) 107 | 108 | frames[0].save( 109 | save_path + ".gif", 110 | format="GIF", 111 | append_images=frames[1:], 112 | save_all=True, 113 | duration=1000 // 50 * 5, 114 | loop=0, 115 | ) 116 | 117 | game.close() 118 | -------------------------------------------------------------------------------- /images/wechat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inspirai/wilderness-scavenger/4c2be3796cb311601c127fd6e1791619f029934f/images/wechat.png -------------------------------------------------------------------------------- /inspirai_fps/README.md: -------------------------------------------------------------------------------- 1 | # Python Game Play Interface 2 | 3 | ## Agent Action Variable 4 | 5 | - `ActionVariable.WALK_DIR`: the walking direction of the agent 6 | - `ActionVariable.WALK_SPEED`: the walking speed of the agent 7 | - `ActionVariable.TURN_LR_DELTA`: the incremental camera angle of the agent turning left or right 8 | - `ActionVariable.LOOK_UD_DELTA`: the incremental camera angle of the agent looking up or down 9 | - `ActionVariable.JUMP`: the jumping action of the agent 10 | - `ActionVariable.ATTACK`: the shooting action of the agent 11 | - `ActionVariable.RELOAD`: the weapon clip reloading action of the agent 12 | - `ActionVariable.PICKUP`: the action of the agent to pick up a supply 13 | 14 | ## Agent State Variable 15 | 16 | - `StateVariable.LOCATION`: the location of the agent 17 | - `position_x`: the x coordinate value of the agent's location 18 | - `position_y`: the y coordinate value of the agent's location (vertical height) 19 | - `position_z`: the z coordinate value of the agent's location 20 | - `StateVariable.MOVE_DIR`: the moving direction of the agent 21 | - `move_dir_x`: the x coordinate value of the agent's moving direction 22 | - `move_dir_y`: the y coordinate value of the agent's moving direction 23 | - `move_dir_z`: the z coordinate value of the agent's moving direction 24 | - `StateVariable.MOVE_SPEED`: the speed of the agent's movement 25 | - `StateVariable.CAMERA_DIR`: the direction of the camera 26 | - `pitch`: the vertical angle of the camera 27 | - `yaw`: the horizontal angle of the camera 28 | - `StateVariable.HEALTH`: the health percentage of the agent 29 | - `StateVariable.WEAPON_AMMO`: the number of bullets left in the agent's weapon clip 30 | - `StateVariable.SPARE_AMMO`: the number of bullets left in the agent's spare ammo 31 | - `StateVariable.IS_ATTACKING`: whether the agent is currently shooting 32 | - `StateVariable.IS_RELOADING`: whether the agent is currently reloading the weapon 33 | - `StateVariable.HIT_ENEMY`: whether the agent hit an enemy 34 | - `StateVariable.HIT_ENEMY_ID`: the id of the enemy that the agent hit 35 | - `StateVariable.HIT_BY_ENEMY`: whether the agent is hit by an enemy 36 | - `StateVariable.HIT_BY_ENEMY_ID`: the id of the enemy that the agent is hit by 37 | - `StateVariable.NUM_SUPPLIES`: the number of supplies collected by the agent 38 | - `StateVariable.IS_WAITING_RESPAWN`: whether the agent is waiting for respawn 39 | - `StateVariable.IS_INVINCIBLE`: whether the agent is invincible 40 | 41 | ## Supply State Variable 42 | 43 | - `SupplyState.position_x`: the x coordinate value of the supply's location 44 | - `SupplyState.position_y`: the y coordinate value of the supply's location (vertical height) 45 | - `SupplyState.position_z`: the z coordinate value of the supply's location 46 | - `SupplyState.quantity`: the quantity of the supply 47 | - `SupplyState.id`: the id of the supply 48 | 49 | ## Enemy State Variable 50 | 51 | - `EnemyStateDetailed.position_x`: the x coordinate value of the enemy's location 52 | - `EnemyStateDetailed.position_y`: the y coordinate value of the enemy's location (vertical height) 53 | - `EnemyStateDetailed.position_z`: the z coordinate value of the enemy's location 54 | - `EnemyStateDetailed.health`: the health value of the enemy 55 | - `EnemyStateDetailed.waiting_respawn`: whether the enemy is waiting for respawn 56 | - `EnemyStateDetailed.is_invinciable`: whether the enemy is invincible now 57 | - `EnemyStateDetailed.id`: the id of the enemy 58 | 59 | ## Game Mode 60 | 61 | - `Game.MODE_NAVIGATION`: the track 1 mode identifier 62 | - `Game.MODE_SUP_GATHER`: the track 2 mode identifier 63 | - `Game.MODE_SUP_BATTLE`: the track 3 mode identifier 64 | 65 | ## Game Configuration 66 | 67 | Users can change the game configuration by using the following methods and the new game configuration will be applied to the next game when calling `Game.new_episode()`. 68 | 69 | - `Game.set_game_config`: set the game configuration with a file path 70 | - `Game.set_episode_timeout`: set the episode timeout in seconds 71 | - `Game.set_map_id`: set the map id 72 | - `Game.set_game_mode`: set the game mode 73 | - `Game.set_random_seed`: set the random seed (used by supply generation and agent spawning) 74 | - `Game.set_start_location`: set the start location of the specified agent 75 | - `Game.set_target_location`: set the target location of the Navigation mode 76 | - `Game.set_available_actions`: set the available action variables of the agent 77 | - `Game.set_game_replay_suffix`: set the suffix of the game replay filename 78 | - `Game.set_supply_heatmap_center`: set the center of the initial heatmap of supply distribution 79 | - `Game.set_supply_heatmap_radius`: set the radius of the initial heatmap of supply distribution 80 | - `Game.set_supply_outdoor_richness`: control the abundance of supply in the open field in percentage 81 | - `Game.set_supply_indoor_richness`: control the abundance of supply inside houses in percentage 82 | - `Game.set_supply_spacing`: control the spacing between supply in the open field in meters 83 | - `Game.set_supply_outdoor_quantity_range`: control the quantity range of a supply in the open field 84 | - `Game.set_supply_indoor_quantity_range`: control the quantity range of a supply inside houses 85 | - `Game.add_supply_refresh`: general interface to add a supply refresh event in the game 86 | - `refresh_time`: the time of the refresh event 87 | - `heatmap_center`: the center of the heatmap of supply distribution 88 | - `heatmap_radius`: the radius of the heatmap of supply distribution 89 | - `outdoor_richness`: control the abundance of supply in the open field in percentage 90 | - `indoor_richness`: control the abundance of supply inside houses in percentage 91 | - `Game.add_agent`: general interface to add an agent in the game 92 | - `agent_name`: the name of the added agent 93 | - `health`: the health point (HP) of the agent 94 | - `start_location`: the agent's initial spawning location 95 | - `num_clip_ammo`: the number of bullets in the agent's weapon clip 96 | - `num_pack_ammo`: the number of bullets in the agent's spare ammo 97 | - `attack`: the attack power of the agent 98 | - `Game.turn_on_depth_map`: turn on computing of the depth map in the agent state 99 | - `Game.turn_off_depth_map`: turn off computing of the depth map of the agent state 100 | - `Game.set_depth_map_size`: set the size (**width**, **height**, **far**) of the depth map in the agent state (default: (38, 22, 100)) 101 | - `Game.random_start_location`: set the start location for the specified agent by randomly choosing a valid (outdoor or indoor) location in the map 102 | - `Game.random_target_location`: set the target location for the Navigation mode by randomly choosing a valid (outdoor or indoor) location in the map 103 | 104 | ## Game Workflow 105 | 106 | - `Game.init`: initialize the game server and pull up the backend game engine to connect to the game server 107 | - `Game.new_episode`: start a new episode of the game with all agent and game states reset to the initial ones 108 | - `Game.make_action`: send actions of agents (in `dict[int, list]`) from the game server to the backend game engine 109 | - `Game.get_state`: get the agent state (by `agent_id`) from the backend game engine 110 | - `Game.get_state_all`: get all agent states (as `dict[int, AgentState]`) from the backend game engine 111 | - `Game.is_episode_finished`: check whether the current running episode is finished 112 | - `Game.close`: close the game server and shutdown the backend game engine 113 | 114 | ## Global Variable 115 | 116 | - `Game.get_game_config`: get the game configuration (as `dict`) 117 | - `Game.get_agent_name`: get the name of the specified agent (by `agent_id`) 118 | - `Game.get_start_location`: get the start location of the specified agent 119 | - `Game.get_target_location`: get the target location of the Navigation mode 120 | - `Game.get_time_step`: get the current frame count of the running episode 121 | - `Game.get_depth_map_size`: get the (**width**, **height**, **far**) of the depth map in the agent state (where **far** is the visible depth range) 122 | - `Game.get_valid_locations`: get coordinates of all valid **indoor** and **outdoor** locations (as `Dict[str, List[Tuple[float, float, float]]]`) 123 | - `Game.time_step_per_action`: get the number of frames between two consecutive actions 124 | - `Game.target_trigger_distance`: get the distance threshold for the target location to be considered as reached 125 | - `Game.use_depth_map`: get whether the depth map is used in the agent state 126 | 127 | ## Game Replay 128 | 129 | - `Game.turn_on_record`: turn on recording of the game replay 130 | - `Game.turn_off_record`: turn off recording of the game replay 131 | - `Game.set_game_replay_suffix`: set the suffix of the game replay filename -------------------------------------------------------------------------------- /inspirai_fps/__init__.py: -------------------------------------------------------------------------------- 1 | from inspirai_fps.gamecore import * -------------------------------------------------------------------------------- /inspirai_fps/lib/libraycaster.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inspirai/wilderness-scavenger/4c2be3796cb311601c127fd6e1791619f029934f/inspirai_fps/lib/libraycaster.so -------------------------------------------------------------------------------- /inspirai_fps/raycast_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ctypes 3 | from ctypes import cdll 4 | from sys import platform 5 | 6 | import trimesh 7 | import numpy as np 8 | from math import radians 9 | from numpy.ctypeslib import ndpointer 10 | from typing import List 11 | 12 | 13 | def perspective_frustum(hw_ratio, x_fov, znear, zfar): 14 | assert znear != zfar 15 | right = np.abs(np.tan(x_fov) * znear) 16 | top = right * hw_ratio 17 | left = -right 18 | bottom = -top 19 | return [left, right, bottom, top, znear, zfar] 20 | 21 | 22 | class RaycastManager(object): 23 | DEFAULT_HEIGHT = 22 24 | DEFAULT_WIDTH = 38 25 | DEFAULT_FAR = 100 26 | __VISION_ANGLE = 60 27 | 28 | def __init__(self, mesh_file_path): 29 | self.mesh_file_path = mesh_file_path 30 | self.HEIGHT = self.DEFAULT_HEIGHT 31 | self.WIDTH = self.DEFAULT_WIDTH 32 | self.FAR = self.DEFAULT_FAR 33 | 34 | if platform.startswith("linux"): 35 | lib_filename = "libraycaster.so" 36 | # elif platform.startswith("darwin"): 37 | # lib_filename = "libraycaster.dylib" 38 | # elif platform.startswith("win"): 39 | # lib_filename = "raycaster.dll" 40 | else: 41 | raise NotImplementedError(f"{platform} is not supported") 42 | 43 | work_dir = os.path.dirname(__file__) 44 | lib_path = os.path.join(work_dir, "lib", lib_filename) 45 | self.ray_lib = cdll.LoadLibrary(lib_path) 46 | 47 | try: 48 | c_func = self.ray_lib.init_mesh 49 | c_func.argtypes = [ 50 | ctypes.c_void_p, # ray_tracer_ptr 51 | ndpointer( 52 | ctypes.c_float, flags="C_CONTIGUOUS" 53 | ), # vertices. (num_vertices, 3) 54 | ctypes.c_size_t, # num_vertices 55 | ndpointer( 56 | ctypes.c_uint32, flags="C_CONTIGUOUS" 57 | ), # faces. (num_faces, 3) 58 | ctypes.c_size_t, # num_faces 59 | ] 60 | c_func.restype = ctypes.c_void_p 61 | 62 | c_func = self.ray_lib.get_depth 63 | c_func.argtypes = [ 64 | ctypes.c_void_p, 65 | ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), 66 | ctypes.c_size_t, 67 | ctypes.POINTER(ctypes.POINTER(ctypes.c_float)), 68 | ] 69 | 70 | c_func = self.ray_lib.free_arrays 71 | c_func.argtypes = [ 72 | ctypes.POINTER(ctypes.c_void_p), 73 | ctypes.c_size_t, 74 | ] 75 | 76 | c_func = self.ray_lib.agent_is_visible 77 | c_func.argtypes = [ 78 | ctypes.c_void_p, # ray_tracer_ptr 79 | ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), # body param 80 | ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), # view param 81 | ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), # position 82 | ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), # cameralocation 83 | ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"), # cameralocation 84 | ctypes.c_size_t, # num_vertices 85 | ndpointer(ctypes.c_uint32, flags="C_CONTIGUOUS"), # team_id 86 | ndpointer(ctypes.c_uint32, flags="C_CONTIGUOUS"), # is_visiable info 87 | ] 88 | c_func.restype = ctypes.c_void_p 89 | 90 | c_func = self.ray_lib.free_mesh 91 | c_func.argtypes = [ctypes.c_void_p] 92 | 93 | except Exception: 94 | print("External library not loaded correctly: {}".format(lib_filename)) 95 | 96 | self.depth_ptr = ctypes.POINTER(ctypes.c_void_p)() 97 | 98 | mesh = trimesh.load(mesh_file_path, force="mesh") 99 | v = np.array(mesh.vertices).astype(np.float32) 100 | f = np.array(mesh.faces).astype(np.uint32) 101 | 102 | c_func = self.ray_lib.init_mesh 103 | self.depth_ptr = c_func(self.depth_ptr, v, int(v.shape[0]), f, int(f.shape[0])) 104 | 105 | def get_depth( 106 | self, 107 | position: List[float], 108 | direction: List[float], 109 | ): 110 | """ 111 | multi agent support todo@wsp 112 | position: position of only 1 agent for now 113 | ray_origin and ray_direciton: 2d list 114 | """ 115 | 116 | height = self.HEIGHT 117 | width = self.WIDTH 118 | far = self.FAR 119 | 120 | x, y, z = position 121 | position_in_mesh = np.asarray([-x, y, z]) # negative x 122 | r = np.asarray(direction) * np.pi / 180 123 | cam_lookat = position_in_mesh + np.asarray( 124 | [-np.sin(r[2]) * np.cos(r[1]), -np.sin(r[1]), np.cos(r[2]) * np.cos(r[1])] 125 | ) # negative x 126 | 127 | num_cameras = 1 128 | out_depth_values_ptr = (ctypes.POINTER(ctypes.c_float) * num_cameras)() 129 | cam_param_array_double = np.zeros( 130 | (num_cameras, 18), dtype=np.float64, order="C" 131 | ) 132 | for i in range(num_cameras): 133 | cam_pos = np.array(position_in_mesh[i * 3 : i * 3 + 3]) 134 | cam_param_array_double[i, 0:3] = cam_pos 135 | cam_param_array_double[i, 3:6] = cam_lookat 136 | cam_param_array_double[i, 6:9] = [0, 1, 0] 137 | cam_param_array_double[i, 9:10] = 1.0 138 | cam_param_array_double[i, 10:16] = perspective_frustum( 139 | hw_ratio=float(height) / width, 140 | x_fov=radians(self.__VISION_ANGLE // 2), 141 | znear=0.01, 142 | zfar=far, 143 | ) 144 | cam_param_array_double[i, 16:18] = [height, width] 145 | 146 | c_func = self.ray_lib.get_depth 147 | c_func( 148 | self.depth_ptr, 149 | cam_param_array_double, 150 | int(cam_param_array_double.shape[0]), 151 | out_depth_values_ptr, 152 | ) 153 | 154 | out_depth_maps = [] 155 | for i in range(num_cameras): 156 | depth_map = np.ctypeslib.as_array( 157 | out_depth_values_ptr[i], shape=(height, width) 158 | ).copy() 159 | depth_map[np.isnan(depth_map)] = far 160 | out_depth_maps.append(depth_map) 161 | 162 | self._free(out_depth_values_ptr) 163 | 164 | return out_depth_maps 165 | 166 | def agent_is_visible( 167 | self, 168 | body_param, 169 | view_angle, 170 | agent_team_id, 171 | positon, 172 | cameralocation, 173 | camerarotation, 174 | ): 175 | agent_num = len(agent_team_id) 176 | c_func = self.ray_lib.agent_is_visible 177 | is_visiable = np.zeros((agent_num, agent_num), dtype=np.uint32, order="C") 178 | 179 | c_func( 180 | self.depth_ptr, 181 | np.array(body_param, dtype=np.float32, order="C"), 182 | np.array(view_angle, dtype=np.float32, order="C"), 183 | np.array(positon, dtype=np.float32, order="C"), 184 | np.array(cameralocation, dtype=np.float32, order="C"), 185 | np.array(camerarotation, dtype=np.float32, order="C"), 186 | int(agent_num), 187 | np.array(agent_team_id, dtype=np.uint32, order="C"), 188 | is_visiable, 189 | ) 190 | return is_visiable 191 | 192 | def _free(self, ptr): 193 | num_arrays = len(ptr) 194 | arr_ptr_void = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_void_p)) 195 | c_func = self.ray_lib.free_arrays 196 | c_func(arr_ptr_void, num_arrays) 197 | 198 | def _free_mesh(self): 199 | c_func = self.ray_lib.free_mesh 200 | c_func(self.depth_ptr) 201 | 202 | def __repr__(self) -> str: 203 | return f"RayTracer(HEIGHT={self.HEIGHT}, WIDTH={self.WIDTH}, DEPTH={self.FAR}, mesh={self.mesh_file_path})" 204 | 205 | def __del__(self): 206 | self._free_mesh() 207 | print(f"[free] memory used by {self.mesh_file_path} is freed") 208 | -------------------------------------------------------------------------------- /inspirai_fps/simple_command.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2015 gRPC authors. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | package fps_command; 17 | 18 | 19 | service Commander { 20 | rpc Request_S2A_UpdateGame (S2A_Request_Data) returns (A2S_Reply_Data) {} 21 | } 22 | 23 | // -------------------------用到的数据类------------------------------- 24 | 25 | // 玩法模式 26 | enum GameModeType { 27 | NAVIGATION_MODE = 0; // 导航模式 28 | SUP_GATHER_MODE = 1; // 物资收集模式 29 | SUP_BATTLE_MODE = 2; // 物资战模式 30 | } 31 | 32 | message Vector3 { 33 | optional float x = 1; 34 | optional float y = 2; 35 | optional float z = 3; 36 | } 37 | 38 | // 物资信息 39 | message SupplyInfo { 40 | optional int32 supply_id = 1; 41 | optional Vector3 supply_location = 2; 42 | optional int32 supply_quantity = 3; 43 | } 44 | 45 | // 敌人信息 46 | message EnemyInfo { 47 | optional Vector3 location = 1; 48 | optional Vector3 move_dir = 2; 49 | optional float move_speed = 3; 50 | optional int32 hp = 4; 51 | optional int32 enemy_id = 5; 52 | optional bool is_respawn = 6; 53 | optional bool is_invincible = 7; 54 | } 55 | 56 | // 游戏开始后传输观测数据作为request 57 | message Observation { 58 | // 自身信息 59 | optional int32 id = 1; // Agent ID 60 | optional Vector3 location = 2; // 自身空间三维坐标 61 | optional float pitch = 3; // 绝对坐标俯仰角 [-90, 90] 规定水平方向为0 62 | optional float yaw = 4; // 绝对坐标偏航角 [-180, 180] 规定正北方向为0 63 | optional int32 hp = 5; // 血量 [0, 100] 64 | optional Vector3 move_dir = 6; // 3D空间中移动方向 -> 3d单位矢量 65 | optional float move_speed = 7; // 3D空间中移动速度 66 | optional int32 num_gun_ammo = 8; // 武器中剩余弹药数量 67 | optional int32 num_pack_ammo = 9; // 背包中剩余弹药数量 68 | optional bool on_ground = 10; // 是否处在地面上 69 | optional bool is_fire = 11; // 是否正在开火 70 | optional bool is_reload = 12; // 是否正在换弹药 71 | optional bool hit_enemy = 13; // 是否命中敌人 72 | optional bool hit_by_enemy = 14; // 是否被敌人击中 73 | optional int32 hit_enemy_id = 15; // 命中的敌人id 74 | optional int32 hit_by_enemy_id = 16; // 被谁击中的敌人id 75 | optional bool can_pickup = 17; // 当前是否可拾取物资 76 | optional int32 num_supply = 18; // 背包物资数量(非已经拾取的物资数量) 77 | // 环境感知信息 78 | repeated SupplyInfo supply_info_list = 19; // 附件可见物资信息 79 | repeated EnemyInfo enemy_info_list = 20; // 附近可见敌人信息 80 | optional bytes image = 21; // agent第一人称视角画面 81 | // 比赛状态信息 82 | optional bool is_waiting_respawn = 22; // 是否等待复活中 83 | optional bool is_invincible = 23; // 是否处于无敌状态 84 | } 85 | 86 | // -------------------------系统事件------------------------------- 87 | 88 | //游戏类型事件 89 | message Event_GameStart { 90 | GameModeType gameMode = 1; 91 | } 92 | 93 | //物资刷新事件 94 | message Event_SupplyRefrsh { 95 | Vector3 pos = 1;//位置 96 | optional int32 count =2;//数量 97 | optional int32 id =3;//刷新点ID 98 | } 99 | 100 | //结算事件 101 | message Event_Settle { 102 | GameModeType gameMode = 1; 103 | optional float elapsed_time = 2; 104 | optional float move_distance = 3; 105 | optional int32 supply_count = 4; 106 | repeated Event_PlayerSettleInfo event_player_infos = 5; 107 | } 108 | 109 | message Event_PlayerSettleInfo { 110 | optional int32 supply_count = 1; 111 | optional int32 rank = 2; 112 | optional float move_distance = 3; 113 | optional int32 kill_count = 4; 114 | optional int32 dead_count = 5; 115 | } 116 | 117 | // -------------------------Agent事件------------------------------- 118 | 119 | //Agent刷新 120 | message Event_AgentRefresh { 121 | Vector3 pos = 1;//刷新位置 122 | } 123 | 124 | //Agent死亡 125 | message Event_AgentDead { 126 | Vector3 pos = 1;//死亡位置 127 | } 128 | 129 | //Agent被攻击受到伤害 130 | message Event_AgentBeHited { 131 | optional int32 damageValue = 1;//伤害值 132 | optional int32 fire_id = 2;//射击者ID 133 | } 134 | 135 | //Agent获取物资的事件 136 | message Event_GetSupplySuccess { 137 | optional int32 supply_id = 1;//物资ID 138 | optional int32 supply_count = 2;//物资数量 139 | Vector3 pos = 3;//捡到物资的位置 140 | } 141 | 142 | //Agent切换弹夹成功 143 | message Event_AgentReloadSuccess { 144 | //类型不为空切换弹夹成功 145 | } 146 | 147 | //Agent事件 148 | message Event_Agent { 149 | optional int32 player_id = 1;//玩家ID 150 | optional int32 supply_num_rank = 2;//物资数量排名 151 | Event_AgentRefresh event_agent_refresh=3;//Agent刷新 152 | Event_AgentDead event_agent_dead=4;//Agent死亡 153 | Event_AgentBeHited event_agent_beHited=5;//Agent被攻击受到伤害 154 | Event_AgentReloadSuccess event_agent_reload_success=6;//Agent切换弹夹成功 155 | Event_GetSupplySuccess event_get_supply_success=7;//agent成功捡到物资 156 | } 157 | 158 | //系统事件 159 | message Event_System { 160 | Event_GameStart event_game_start=1;//游戏开始事件 161 | Event_SupplyRefrsh event_supplyrefrsh =2;//物资刷新事件 162 | Event_Settle event_settle =3;//结算事件 163 | } 164 | 165 | //游戏事件 166 | message EventInfo { 167 | int32 timeStep = 1; 168 | Event_System event_system = 2;//系统事件 169 | repeated Event_Agent event_agentInfos = 3;//agent事件列表 170 | } 171 | 172 | // AIServer 计算的动作指令,回传给环境端 173 | message AgentCommand { 174 | optional int32 id = 1; // Agent ID 175 | optional float walk_dir = 2 ; // 移动方向 [0, 360) 176 | optional float walk_speed = 3 ; // 移动速度 [0, SPEED_MAX] 177 | optional bool jump = 4; // 跳跃控制 [True/False] 178 | optional float turn_left_right_delta = 5; // 水平角度变化 [-DELTA_MAX, DELTA_MAX] 179 | optional float look_up_down_delta = 6; // 竖直角度变化 [-DELTA_MAX, DELTA_MAX] 180 | optional bool shoot_gun = 7; // 开枪 [True/False] 181 | optional bool reload = 8; // 换弹 [True/False] 182 | optional bool collect = 9; // 拾取 [True/False] 183 | } 184 | 185 | message SupplyInitData { 186 | optional Vector3 supply_heatmap_center = 8; // 物资分布热力图中心点位置 187 | optional int32 supply_aboundance = 9; // 物资分布丰富程度 [1, 100] 默认50 188 | optional int32 supply_create_percent = 10; // 野外刷出物资数量百分比 189 | optional int32 supply_house_create_percent = 11; // 室内刷出物资数量 190 | optional int32 supply_grid_length = 12; // 物资点格子长度(默认为3) 191 | optional int32 supply_random_min = 13; // 野外物资随机下限 192 | optional int32 supply_random_max = 14; // 野外物资随机上限 193 | optional int32 supply_house_random_min = 15; // 室内物资随机上限 194 | optional int32 supply_house_random_max = 16; // 室外物资随机下限 195 | } 196 | 197 | message SupplyRefreshData { 198 | optional Vector3 supply_heatmap_center = 1; // 物资分布热力图中心点位置 199 | optional int32 supply_heatmap_radius = 2; // 物资刷新范围半径 [1, 100] 默认50 200 | optional int32 supply_refresh_time = 3;// 物资动态刷新时刻清单 201 | optional int32 supply_create_percent = 4; // 刷出物资数量百分比 202 | optional int32 supply_house_create_percent = 5; // 室内刷出物资数量百分比 203 | } 204 | 205 | // 算法端发送环境控制指令 206 | message GMCommand { 207 | // 通用设置 208 | optional int32 timeout = 1; // 这一局的游戏时限(秒) 209 | optional GameModeType game_mode = 2; // 游戏模式,上面三种之一 210 | optional int32 time_scale = 3; // 游戏运行得倍速,默认1代表每秒60个step,10每秒600个step 211 | optional int32 map_id = 4; // 地图编号 212 | optional int32 random_seed = 5; // 物资生成随机种子 213 | optional int32 num_agents = 17; // 这一局内的agent数量 -> 前两个模式默认为1 214 | repeated AgentSetup agent_setups = 18; // 所有或者部分agent的初始化设置参数 -> 前两个模式默认只包含一个agent_setup 215 | optional bool is_record = 21; // 是否保存这一局的录像 216 | optional string replay_suffix = 22; // 保存replay文件的名称后缀(replay名称格式:{mode}_{map_id}_{time_stamp}_{replay_suffix}.bin) 217 | // 模式一:导航 218 | optional Vector3 target_location = 6; // 若使用随机目的地则可以忽略 219 | optional float trigger_range = 7; // 到达目标点的范围 220 | // 模式二:物资收集 221 | optional Vector3 supply_heatmap_center = 8; // 物资分布热力图中心点位置 222 | optional int32 supply_heatmap_radius = 9; // 热力分布的标准差[1, 100] 默认50(长度) 223 | optional int32 supply_create_percent = 10; // 野外刷出物资数量百分比 224 | optional int32 supply_house_create_percent = 11; // 室内刷出物资数量百分比 225 | optional int32 supply_grid_length = 12; // 物资点格子长度(默认为3) 226 | optional int32 supply_random_min = 13; // 野外物资随机下限 227 | optional int32 supply_random_max = 14; // 野外物资随机上限 228 | optional int32 supply_house_random_min = 15; // 室内物资随机上限 229 | optional int32 supply_house_random_max = 16; // 室外物资随机下限 230 | // 模式三:物资战 231 | repeated SupplyRefreshData supply_refresh_datas = 19; // 物资热力中心刷新时刻表 232 | optional int32 respawn_time = 20; // 复活等待时间(单位:秒) 233 | optional bool is_random_respawn_location = 23; // 复活地点是否随机,默认否,复活地点使用agent_setups里面的start_location 234 | optional int32 supply_loss_percent_when_dead = 24; // 死亡时物资掉落百分比,默认0 235 | // 额外选项 236 | optional bool is_Open_Event = 25;//是否开启观测事件 237 | optional float water_speed_decay = 26; // 水里移动衰减系数 238 | optional int32 invincible_time = 27; //复活时间 239 | } 240 | 241 | // 算法初始化AI时的信息,在A2S_InitGame里使用 242 | message AgentSetup { 243 | optional int32 id = 1; // Agent ID -> [0, NUM_AGENTS-1] 244 | optional int32 hp = 2; // Agent HP -> [0, 100] 245 | optional int32 num_pack_ammo = 3; // 初始背包弹药数量 246 | optional int32 gun_capacity = 4; // 弹夹容量 247 | optional int32 attack_power = 5; // 攻击力(伤害值) [0, 100] 默认20 248 | optional Vector3 start_location = 6; // 出生位置 249 | optional string agent_name = 7; // 参赛者名字 -> 用于观战模式显示 250 | } 251 | 252 | // -----------------------算法端与服务器交互协议------------------------ 253 | enum GameState { 254 | start = 0; // 开始阶段,环境端传输空消息,基础环境初始化 255 | reset = 1; // 重置一局游戏,算法端传输GM指令,游戏参数初始化 256 | update = 2; // 更新下一步操作,环境端传输观测数据,算法端传输Agent控制指令 257 | close = 3; // 表示整个游戏结束,AIServer会断开连接 258 | over = 4; // 表示一局游戏结束,可以重新开下一局 259 | } 260 | 261 | // 环境端发送当前观测数据 262 | message S2A_Request_Data { 263 | optional GameState game_state = 1; // 当前局游戏是否结束,之后可以有下一局 264 | optional int32 time_step = 2; // 当前帧数 265 | repeated Observation agent_obs_list = 3; // 所有Agent的观测数据 266 | EventInfo event_info=4;//Agent事件 267 | } 268 | 269 | // 算法端发送Agent控制操作 270 | message A2S_Reply_Data { 271 | optional GameState game_state = 1; // 重置游戏,开始新的一局 272 | optional GMCommand gm_cmd = 2; // 每局游戏只有唯一的全局GM指令,由算法端控制 273 | repeated AgentCommand agent_cmd_list = 3; 274 | } 275 | 276 | message ReplayData { 277 | optional string sceneName = 1; // 重置游戏,开始新的一局 278 | repeated A2S_Reply_Data replayDatas = 2; 279 | } 280 | 281 | //-----------------------客户端与服务器交互协议------------------------ 282 | 283 | //客户端服务端执行握手 284 | message S2C_StartGame { 285 | //无参协议,算法只要响应就行 286 | optional int32 playMode=1; 287 | optional int32 seed=2;//客户端收到消息后 根据随机数随机生成场景物,随机玩家出生位置 288 | } 289 | 290 | message C2S_StartGame{ 291 | //客户端响应信号 0客户端异常 1客户端环境良好 292 | optional int32 singalCode=1; 293 | } 294 | 295 | message C2S_LoadingGame{ 296 | //optional LoadingData loadingData = 1; 297 | } 298 | 299 | message S2C_LoadingGame{ 300 | optional bool isAgentInit = 1;//AI端初始胡是否完成 301 | //repeated LoadingData loadingData = 2;//所有真实玩家加载数据 302 | } 303 | 304 | message S2C_UpdateGameWorld { 305 | //repeated SceneObj objs = 1;//初始化场景信息 306 | } 307 | 308 | message C2S_UpdatePalyerOperate { 309 | AgentCommand agentCommands = 1;//玩家的操作信息 310 | optional int32 playerId = 2; //玩家ID 311 | } 312 | 313 | message S2C_GameOver { 314 | //游戏结束 315 | } 316 | -------------------------------------------------------------------------------- /inspirai_fps/simple_command_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: simple_command.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf.internal import enum_type_wrapper 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import descriptor_pool as _descriptor_pool 8 | from google.protobuf import message as _message 9 | from google.protobuf import reflection as _reflection 10 | from google.protobuf import symbol_database as _symbol_database 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14simple_command.proto\x12\x0b\x66ps_command\"K\n\x07Vector3\x12\x0e\n\x01x\x18\x01 \x01(\x02H\x00\x88\x01\x01\x12\x0e\n\x01y\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12\x0e\n\x01z\x18\x03 \x01(\x02H\x02\x88\x01\x01\x42\x04\n\x02_xB\x04\n\x02_yB\x04\n\x02_z\"\xac\x01\n\nSupplyInfo\x12\x16\n\tsupply_id\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x0fsupply_location\x18\x02 \x01(\x0b\x32\x14.fps_command.Vector3H\x01\x88\x01\x01\x12\x1c\n\x0fsupply_quantity\x18\x03 \x01(\x05H\x02\x88\x01\x01\x42\x0c\n\n_supply_idB\x12\n\x10_supply_locationB\x12\n\x10_supply_quantity\"\xb9\x02\n\tEnemyInfo\x12+\n\x08location\x18\x01 \x01(\x0b\x32\x14.fps_command.Vector3H\x00\x88\x01\x01\x12+\n\x08move_dir\x18\x02 \x01(\x0b\x32\x14.fps_command.Vector3H\x01\x88\x01\x01\x12\x17\n\nmove_speed\x18\x03 \x01(\x02H\x02\x88\x01\x01\x12\x0f\n\x02hp\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12\x15\n\x08\x65nemy_id\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x17\n\nis_respawn\x18\x06 \x01(\x08H\x05\x88\x01\x01\x12\x1a\n\ris_invincible\x18\x07 \x01(\x08H\x06\x88\x01\x01\x42\x0b\n\t_locationB\x0b\n\t_move_dirB\r\n\x0b_move_speedB\x05\n\x03_hpB\x0b\n\t_enemy_idB\r\n\x0b_is_respawnB\x10\n\x0e_is_invincible\"\xc1\x07\n\x0bObservation\x12\x0f\n\x02id\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12+\n\x08location\x18\x02 \x01(\x0b\x32\x14.fps_command.Vector3H\x01\x88\x01\x01\x12\x12\n\x05pitch\x18\x03 \x01(\x02H\x02\x88\x01\x01\x12\x10\n\x03yaw\x18\x04 \x01(\x02H\x03\x88\x01\x01\x12\x0f\n\x02hp\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12+\n\x08move_dir\x18\x06 \x01(\x0b\x32\x14.fps_command.Vector3H\x05\x88\x01\x01\x12\x17\n\nmove_speed\x18\x07 \x01(\x02H\x06\x88\x01\x01\x12\x19\n\x0cnum_gun_ammo\x18\x08 \x01(\x05H\x07\x88\x01\x01\x12\x1a\n\rnum_pack_ammo\x18\t \x01(\x05H\x08\x88\x01\x01\x12\x16\n\ton_ground\x18\x0b \x01(\x08H\t\x88\x01\x01\x12\x14\n\x07is_fire\x18\x0c \x01(\x08H\n\x88\x01\x01\x12\x16\n\tis_reload\x18\r \x01(\x08H\x0b\x88\x01\x01\x12\x16\n\thit_enemy\x18\x0e \x01(\x08H\x0c\x88\x01\x01\x12\x19\n\x0chit_by_enemy\x18\x0f \x01(\x08H\r\x88\x01\x01\x12\x19\n\x0chit_enemy_id\x18\x16 \x01(\x05H\x0e\x88\x01\x01\x12\x1c\n\x0fhit_by_enemy_id\x18\x17 \x01(\x05H\x0f\x88\x01\x01\x12\x17\n\ncan_pickup\x18\x10 \x01(\x08H\x10\x88\x01\x01\x12\x31\n\x10supply_info_list\x18\x11 \x03(\x0b\x32\x17.fps_command.SupplyInfo\x12/\n\x0f\x65nemy_info_list\x18\x12 \x03(\x0b\x32\x16.fps_command.EnemyInfo\x12\x17\n\nnum_supply\x18\x13 \x01(\x05H\x11\x88\x01\x01\x12\x1f\n\x12is_waiting_respawn\x18\x14 \x01(\x08H\x12\x88\x01\x01\x12\x1a\n\ris_invincible\x18\x15 \x01(\x08H\x13\x88\x01\x01\x12\x12\n\x05image\x18\x18 \x01(\x0cH\x14\x88\x01\x01\x42\x05\n\x03_idB\x0b\n\t_locationB\x08\n\x06_pitchB\x06\n\x04_yawB\x05\n\x03_hpB\x0b\n\t_move_dirB\r\n\x0b_move_speedB\x0f\n\r_num_gun_ammoB\x10\n\x0e_num_pack_ammoB\x0c\n\n_on_groundB\n\n\x08_is_fireB\x0c\n\n_is_reloadB\x0c\n\n_hit_enemyB\x0f\n\r_hit_by_enemyB\x0f\n\r_hit_enemy_idB\x12\n\x10_hit_by_enemy_idB\r\n\x0b_can_pickupB\r\n\x0b_num_supplyB\x15\n\x13_is_waiting_respawnB\x10\n\x0e_is_invincibleB\x08\n\x06_image\">\n\x0f\x45vent_GameStart\x12+\n\x08gameMode\x18\x01 \x01(\x0e\x32\x19.fps_command.GameModeType\"m\n\x12\x45vent_SupplyRefrsh\x12!\n\x03pos\x18\x01 \x01(\x0b\x32\x14.fps_command.Vector3\x12\x12\n\x05\x63ount\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x03 \x01(\x05H\x01\x88\x01\x01\x42\x08\n\x06_countB\x05\n\x03_id\"\x82\x02\n\x0c\x45vent_Settle\x12+\n\x08gameMode\x18\x01 \x01(\x0e\x32\x19.fps_command.GameModeType\x12\x19\n\x0c\x65lapsed_time\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x1a\n\rmove_distance\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x19\n\x0csupply_count\x18\x04 \x01(\x05H\x02\x88\x01\x01\x12?\n\x12\x65vent_player_infos\x18\x05 \x03(\x0b\x32#.fps_command.Event_PlayerSettleInfoB\x0f\n\r_elapsed_timeB\x10\n\x0e_move_distanceB\x0f\n\r_supply_count\"\xde\x01\n\x16\x45vent_PlayerSettleInfo\x12\x19\n\x0csupply_count\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x11\n\x04rank\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x1a\n\rmove_distance\x18\x03 \x01(\x02H\x02\x88\x01\x01\x12\x17\n\nkill_count\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12\x17\n\ndead_count\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0f\n\r_supply_countB\x07\n\x05_rankB\x10\n\x0e_move_distanceB\r\n\x0b_kill_countB\r\n\x0b_dead_count\"7\n\x12\x45vent_AgentRefresh\x12!\n\x03pos\x18\x01 \x01(\x0b\x32\x14.fps_command.Vector3\"4\n\x0f\x45vent_AgentDead\x12!\n\x03pos\x18\x01 \x01(\x0b\x32\x14.fps_command.Vector3\"`\n\x12\x45vent_AgentBeHited\x12\x18\n\x0b\x64\x61mageValue\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07\x66ire_id\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0e\n\x0c_damageValueB\n\n\x08_fire_id\"\x8d\x01\n\x16\x45vent_GetSupplySuccess\x12\x16\n\tsupply_id\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x19\n\x0csupply_count\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12!\n\x03pos\x18\x03 \x01(\x0b\x32\x14.fps_command.Vector3B\x0c\n\n_supply_idB\x0f\n\r_supply_count\"\x1a\n\x18\x45vent_AgentReloadSuccess\"\xab\x03\n\x0b\x45vent_Agent\x12\x16\n\tplayer_id\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x1c\n\x0fsupply_num_rank\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12<\n\x13\x65vent_agent_refresh\x18\x03 \x01(\x0b\x32\x1f.fps_command.Event_AgentRefresh\x12\x36\n\x10\x65vent_agent_dead\x18\x04 \x01(\x0b\x32\x1c.fps_command.Event_AgentDead\x12<\n\x13\x65vent_agent_beHited\x18\x05 \x01(\x0b\x32\x1f.fps_command.Event_AgentBeHited\x12I\n\x1a\x65vent_agent_reload_success\x18\x06 \x01(\x0b\x32%.fps_command.Event_AgentReloadSuccess\x12\x45\n\x18\x65vent_get_supply_success\x18\x07 \x01(\x0b\x32#.fps_command.Event_GetSupplySuccessB\x0c\n\n_player_idB\x12\n\x10_supply_num_rank\"\xb4\x01\n\x0c\x45vent_System\x12\x36\n\x10\x65vent_game_start\x18\x01 \x01(\x0b\x32\x1c.fps_command.Event_GameStart\x12;\n\x12\x65vent_supplyrefrsh\x18\x02 \x01(\x0b\x32\x1f.fps_command.Event_SupplyRefrsh\x12/\n\x0c\x65vent_settle\x18\x03 \x01(\x0b\x32\x19.fps_command.Event_Settle\"\x82\x01\n\tEventInfo\x12\x10\n\x08timeStep\x18\x01 \x01(\x05\x12/\n\x0c\x65vent_system\x18\x02 \x01(\x0b\x32\x19.fps_command.Event_System\x12\x32\n\x10\x65vent_agentInfos\x18\x03 \x03(\x0b\x32\x18.fps_command.Event_Agent\"\xec\x02\n\x0c\x41gentCommand\x12\x0f\n\x02id\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08walk_dir\x18\x02 \x01(\x02H\x01\x88\x01\x01\x12\x17\n\nwalk_speed\x18\x03 \x01(\x02H\x02\x88\x01\x01\x12\x11\n\x04jump\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\"\n\x15turn_left_right_delta\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x1f\n\x12look_up_down_delta\x18\x06 \x01(\x02H\x05\x88\x01\x01\x12\x16\n\tshoot_gun\x18\x07 \x01(\x08H\x06\x88\x01\x01\x12\x13\n\x06reload\x18\x08 \x01(\x08H\x07\x88\x01\x01\x12\x14\n\x07\x63ollect\x18\t \x01(\x08H\x08\x88\x01\x01\x42\x05\n\x03_idB\x0b\n\t_walk_dirB\r\n\x0b_walk_speedB\x07\n\x05_jumpB\x18\n\x16_turn_left_right_deltaB\x15\n\x13_look_up_down_deltaB\x0c\n\n_shoot_gunB\t\n\x07_reloadB\n\n\x08_collect\"\xca\x04\n\x0eSupplyInitData\x12\x38\n\x15supply_heatmap_center\x18\x08 \x01(\x0b\x32\x14.fps_command.Vector3H\x00\x88\x01\x01\x12\x1e\n\x11supply_aboundance\x18\t \x01(\x05H\x01\x88\x01\x01\x12\"\n\x15supply_create_percent\x18\n \x01(\x05H\x02\x88\x01\x01\x12(\n\x1bsupply_house_create_percent\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12\x1f\n\x12supply_grid_length\x18\x0c \x01(\x05H\x04\x88\x01\x01\x12\x1e\n\x11supply_random_min\x18\r \x01(\x05H\x05\x88\x01\x01\x12\x1e\n\x11supply_random_max\x18\x0e \x01(\x05H\x06\x88\x01\x01\x12$\n\x17supply_house_random_min\x18\x0f \x01(\x05H\x07\x88\x01\x01\x12$\n\x17supply_house_random_max\x18\x10 \x01(\x05H\x08\x88\x01\x01\x42\x18\n\x16_supply_heatmap_centerB\x14\n\x12_supply_aboundanceB\x18\n\x16_supply_create_percentB\x1e\n\x1c_supply_house_create_percentB\x15\n\x13_supply_grid_lengthB\x14\n\x12_supply_random_minB\x14\n\x12_supply_random_maxB\x1a\n\x18_supply_house_random_minB\x1a\n\x18_supply_house_random_max\"\xe7\x02\n\x11SupplyRefreshData\x12\x38\n\x15supply_heatmap_center\x18\x01 \x01(\x0b\x32\x14.fps_command.Vector3H\x00\x88\x01\x01\x12\"\n\x15supply_heatmap_radius\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12 \n\x13supply_refresh_time\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\"\n\x15supply_create_percent\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12(\n\x1bsupply_house_create_percent\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x18\n\x16_supply_heatmap_centerB\x18\n\x16_supply_heatmap_radiusB\x16\n\x14_supply_refresh_timeB\x18\n\x16_supply_create_percentB\x1e\n\x1c_supply_house_create_percent\"\xd9\x0b\n\tGMCommand\x12\x14\n\x07timeout\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x31\n\tgame_mode\x18\x02 \x01(\x0e\x32\x19.fps_command.GameModeTypeH\x01\x88\x01\x01\x12\x17\n\ntime_scale\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x13\n\x06map_id\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12\x18\n\x0brandom_seed\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x17\n\nnum_agents\x18\x11 \x01(\x05H\x05\x88\x01\x01\x12-\n\x0c\x61gent_setups\x18\x12 \x03(\x0b\x32\x17.fps_command.AgentSetup\x12\x16\n\tis_record\x18\x15 \x01(\x08H\x06\x88\x01\x01\x12\x1a\n\rreplay_suffix\x18\x16 \x01(\tH\x07\x88\x01\x01\x12\x32\n\x0ftarget_location\x18\x06 \x01(\x0b\x32\x14.fps_command.Vector3H\x08\x88\x01\x01\x12\x1a\n\rtrigger_range\x18\x07 \x01(\x02H\t\x88\x01\x01\x12\x38\n\x15supply_heatmap_center\x18\x08 \x01(\x0b\x32\x14.fps_command.Vector3H\n\x88\x01\x01\x12\"\n\x15supply_heatmap_radius\x18\t \x01(\x05H\x0b\x88\x01\x01\x12\"\n\x15supply_create_percent\x18\n \x01(\x05H\x0c\x88\x01\x01\x12(\n\x1bsupply_house_create_percent\x18\x0b \x01(\x05H\r\x88\x01\x01\x12\x1f\n\x12supply_grid_length\x18\x0c \x01(\x05H\x0e\x88\x01\x01\x12\x1e\n\x11supply_random_min\x18\r \x01(\x05H\x0f\x88\x01\x01\x12\x1e\n\x11supply_random_max\x18\x0e \x01(\x05H\x10\x88\x01\x01\x12$\n\x17supply_house_random_min\x18\x0f \x01(\x05H\x11\x88\x01\x01\x12$\n\x17supply_house_random_max\x18\x10 \x01(\x05H\x12\x88\x01\x01\x12<\n\x14supply_refresh_datas\x18\x13 \x03(\x0b\x32\x1e.fps_command.SupplyRefreshData\x12\x19\n\x0crespawn_time\x18\x14 \x01(\x05H\x13\x88\x01\x01\x12\'\n\x1ais_random_respawn_location\x18\x17 \x01(\x08H\x14\x88\x01\x01\x12*\n\x1dsupply_loss_percent_when_dead\x18\x18 \x01(\x05H\x15\x88\x01\x01\x12\x1a\n\ris_Open_Event\x18\x19 \x01(\x08H\x16\x88\x01\x01\x12\x1e\n\x11water_speed_decay\x18\x1a \x01(\x02H\x17\x88\x01\x01\x12\x1c\n\x0finvincible_time\x18\x1b \x01(\x05H\x18\x88\x01\x01\x42\n\n\x08_timeoutB\x0c\n\n_game_modeB\r\n\x0b_time_scaleB\t\n\x07_map_idB\x0e\n\x0c_random_seedB\r\n\x0b_num_agentsB\x0c\n\n_is_recordB\x10\n\x0e_replay_suffixB\x12\n\x10_target_locationB\x10\n\x0e_trigger_rangeB\x18\n\x16_supply_heatmap_centerB\x18\n\x16_supply_heatmap_radiusB\x18\n\x16_supply_create_percentB\x1e\n\x1c_supply_house_create_percentB\x15\n\x13_supply_grid_lengthB\x14\n\x12_supply_random_minB\x14\n\x12_supply_random_maxB\x1a\n\x18_supply_house_random_minB\x1a\n\x18_supply_house_random_maxB\x0f\n\r_respawn_timeB\x1d\n\x1b_is_random_respawn_locationB \n\x1e_supply_loss_percent_when_deadB\x10\n\x0e_is_Open_EventB\x14\n\x12_water_speed_decayB\x12\n\x10_invincible_time\"\xb0\x02\n\nAgentSetup\x12\x0f\n\x02id\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x0f\n\x02hp\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x1a\n\rnum_pack_ammo\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x19\n\x0cgun_capacity\x18\x04 \x01(\x05H\x03\x88\x01\x01\x12\x19\n\x0c\x61ttack_power\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x31\n\x0estart_location\x18\x06 \x01(\x0b\x32\x14.fps_command.Vector3H\x05\x88\x01\x01\x12\x17\n\nagent_name\x18\x07 \x01(\tH\x06\x88\x01\x01\x42\x05\n\x03_idB\x05\n\x03_hpB\x10\n\x0e_num_pack_ammoB\x0f\n\r_gun_capacityB\x0f\n\r_attack_powerB\x11\n\x0f_start_locationB\r\n\x0b_agent_name\"\xd6\x01\n\x10S2A_Request_Data\x12/\n\ngame_state\x18\x01 \x01(\x0e\x32\x16.fps_command.GameStateH\x00\x88\x01\x01\x12\x16\n\ttime_step\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x30\n\x0e\x61gent_obs_list\x18\x03 \x03(\x0b\x32\x18.fps_command.Observation\x12*\n\nevent_info\x18\x04 \x01(\x0b\x32\x16.fps_command.EventInfoB\r\n\x0b_game_stateB\x0c\n\n_time_step\"\xbb\x01\n\x0e\x41\x32S_Reply_Data\x12/\n\ngame_state\x18\x01 \x01(\x0e\x32\x16.fps_command.GameStateH\x00\x88\x01\x01\x12+\n\x06gm_cmd\x18\x02 \x01(\x0b\x32\x16.fps_command.GMCommandH\x01\x88\x01\x01\x12\x31\n\x0e\x61gent_cmd_list\x18\x03 \x03(\x0b\x32\x19.fps_command.AgentCommandB\r\n\x0b_game_stateB\t\n\x07_gm_cmd\"d\n\nReplayData\x12\x16\n\tsceneName\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0breplayDatas\x18\x02 \x03(\x0b\x32\x1b.fps_command.A2S_Reply_DataB\x0c\n\n_sceneName\"O\n\rS2C_StartGame\x12\x15\n\x08playMode\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x11\n\x04seed\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0b\n\t_playModeB\x07\n\x05_seed\"7\n\rC2S_StartGame\x12\x17\n\nsingalCode\x18\x01 \x01(\x05H\x00\x88\x01\x01\x42\r\n\x0b_singalCode\"\x11\n\x0f\x43\x32S_LoadingGame\";\n\x0fS2C_LoadingGame\x12\x18\n\x0bisAgentInit\x18\x01 \x01(\x08H\x00\x88\x01\x01\x42\x0e\n\x0c_isAgentInit\"\x15\n\x13S2C_UpdateGameWorld\"o\n\x17\x43\x32S_UpdatePalyerOperate\x12\x30\n\ragentCommands\x18\x01 \x01(\x0b\x32\x19.fps_command.AgentCommand\x12\x15\n\x08playerId\x18\x02 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_playerId\"\x0e\n\x0cS2C_GameOver*M\n\x0cGameModeType\x12\x13\n\x0fNAVIGATION_MODE\x10\x00\x12\x13\n\x0fSUP_GATHER_MODE\x10\x01\x12\x13\n\x0fSUP_BATTLE_MODE\x10\x02*B\n\tGameState\x12\t\n\x05start\x10\x00\x12\t\n\x05reset\x10\x01\x12\n\n\x06update\x10\x02\x12\t\n\x05\x63lose\x10\x03\x12\x08\n\x04over\x10\x04\x32\x63\n\tCommander\x12V\n\x16Request_S2A_UpdateGame\x12\x1d.fps_command.S2A_Request_Data\x1a\x1b.fps_command.A2S_Reply_Data\"\x00\x62\x06proto3') 19 | 20 | _GAMEMODETYPE = DESCRIPTOR.enum_types_by_name['GameModeType'] 21 | GameModeType = enum_type_wrapper.EnumTypeWrapper(_GAMEMODETYPE) 22 | _GAMESTATE = DESCRIPTOR.enum_types_by_name['GameState'] 23 | GameState = enum_type_wrapper.EnumTypeWrapper(_GAMESTATE) 24 | NAVIGATION_MODE = 0 25 | SUP_GATHER_MODE = 1 26 | SUP_BATTLE_MODE = 2 27 | start = 0 28 | reset = 1 29 | update = 2 30 | close = 3 31 | over = 4 32 | 33 | 34 | _VECTOR3 = DESCRIPTOR.message_types_by_name['Vector3'] 35 | _SUPPLYINFO = DESCRIPTOR.message_types_by_name['SupplyInfo'] 36 | _ENEMYINFO = DESCRIPTOR.message_types_by_name['EnemyInfo'] 37 | _OBSERVATION = DESCRIPTOR.message_types_by_name['Observation'] 38 | _EVENT_GAMESTART = DESCRIPTOR.message_types_by_name['Event_GameStart'] 39 | _EVENT_SUPPLYREFRSH = DESCRIPTOR.message_types_by_name['Event_SupplyRefrsh'] 40 | _EVENT_SETTLE = DESCRIPTOR.message_types_by_name['Event_Settle'] 41 | _EVENT_PLAYERSETTLEINFO = DESCRIPTOR.message_types_by_name['Event_PlayerSettleInfo'] 42 | _EVENT_AGENTREFRESH = DESCRIPTOR.message_types_by_name['Event_AgentRefresh'] 43 | _EVENT_AGENTDEAD = DESCRIPTOR.message_types_by_name['Event_AgentDead'] 44 | _EVENT_AGENTBEHITED = DESCRIPTOR.message_types_by_name['Event_AgentBeHited'] 45 | _EVENT_GETSUPPLYSUCCESS = DESCRIPTOR.message_types_by_name['Event_GetSupplySuccess'] 46 | _EVENT_AGENTRELOADSUCCESS = DESCRIPTOR.message_types_by_name['Event_AgentReloadSuccess'] 47 | _EVENT_AGENT = DESCRIPTOR.message_types_by_name['Event_Agent'] 48 | _EVENT_SYSTEM = DESCRIPTOR.message_types_by_name['Event_System'] 49 | _EVENTINFO = DESCRIPTOR.message_types_by_name['EventInfo'] 50 | _AGENTCOMMAND = DESCRIPTOR.message_types_by_name['AgentCommand'] 51 | _SUPPLYINITDATA = DESCRIPTOR.message_types_by_name['SupplyInitData'] 52 | _SUPPLYREFRESHDATA = DESCRIPTOR.message_types_by_name['SupplyRefreshData'] 53 | _GMCOMMAND = DESCRIPTOR.message_types_by_name['GMCommand'] 54 | _AGENTSETUP = DESCRIPTOR.message_types_by_name['AgentSetup'] 55 | _S2A_REQUEST_DATA = DESCRIPTOR.message_types_by_name['S2A_Request_Data'] 56 | _A2S_REPLY_DATA = DESCRIPTOR.message_types_by_name['A2S_Reply_Data'] 57 | _REPLAYDATA = DESCRIPTOR.message_types_by_name['ReplayData'] 58 | _S2C_STARTGAME = DESCRIPTOR.message_types_by_name['S2C_StartGame'] 59 | _C2S_STARTGAME = DESCRIPTOR.message_types_by_name['C2S_StartGame'] 60 | _C2S_LOADINGGAME = DESCRIPTOR.message_types_by_name['C2S_LoadingGame'] 61 | _S2C_LOADINGGAME = DESCRIPTOR.message_types_by_name['S2C_LoadingGame'] 62 | _S2C_UPDATEGAMEWORLD = DESCRIPTOR.message_types_by_name['S2C_UpdateGameWorld'] 63 | _C2S_UPDATEPALYEROPERATE = DESCRIPTOR.message_types_by_name['C2S_UpdatePalyerOperate'] 64 | _S2C_GAMEOVER = DESCRIPTOR.message_types_by_name['S2C_GameOver'] 65 | Vector3 = _reflection.GeneratedProtocolMessageType('Vector3', (_message.Message,), { 66 | 'DESCRIPTOR' : _VECTOR3, 67 | '__module__' : 'simple_command_pb2' 68 | # @@protoc_insertion_point(class_scope:fps_command.Vector3) 69 | }) 70 | _sym_db.RegisterMessage(Vector3) 71 | 72 | SupplyInfo = _reflection.GeneratedProtocolMessageType('SupplyInfo', (_message.Message,), { 73 | 'DESCRIPTOR' : _SUPPLYINFO, 74 | '__module__' : 'simple_command_pb2' 75 | # @@protoc_insertion_point(class_scope:fps_command.SupplyInfo) 76 | }) 77 | _sym_db.RegisterMessage(SupplyInfo) 78 | 79 | EnemyInfo = _reflection.GeneratedProtocolMessageType('EnemyInfo', (_message.Message,), { 80 | 'DESCRIPTOR' : _ENEMYINFO, 81 | '__module__' : 'simple_command_pb2' 82 | # @@protoc_insertion_point(class_scope:fps_command.EnemyInfo) 83 | }) 84 | _sym_db.RegisterMessage(EnemyInfo) 85 | 86 | Observation = _reflection.GeneratedProtocolMessageType('Observation', (_message.Message,), { 87 | 'DESCRIPTOR' : _OBSERVATION, 88 | '__module__' : 'simple_command_pb2' 89 | # @@protoc_insertion_point(class_scope:fps_command.Observation) 90 | }) 91 | _sym_db.RegisterMessage(Observation) 92 | 93 | Event_GameStart = _reflection.GeneratedProtocolMessageType('Event_GameStart', (_message.Message,), { 94 | 'DESCRIPTOR' : _EVENT_GAMESTART, 95 | '__module__' : 'simple_command_pb2' 96 | # @@protoc_insertion_point(class_scope:fps_command.Event_GameStart) 97 | }) 98 | _sym_db.RegisterMessage(Event_GameStart) 99 | 100 | Event_SupplyRefrsh = _reflection.GeneratedProtocolMessageType('Event_SupplyRefrsh', (_message.Message,), { 101 | 'DESCRIPTOR' : _EVENT_SUPPLYREFRSH, 102 | '__module__' : 'simple_command_pb2' 103 | # @@protoc_insertion_point(class_scope:fps_command.Event_SupplyRefrsh) 104 | }) 105 | _sym_db.RegisterMessage(Event_SupplyRefrsh) 106 | 107 | Event_Settle = _reflection.GeneratedProtocolMessageType('Event_Settle', (_message.Message,), { 108 | 'DESCRIPTOR' : _EVENT_SETTLE, 109 | '__module__' : 'simple_command_pb2' 110 | # @@protoc_insertion_point(class_scope:fps_command.Event_Settle) 111 | }) 112 | _sym_db.RegisterMessage(Event_Settle) 113 | 114 | Event_PlayerSettleInfo = _reflection.GeneratedProtocolMessageType('Event_PlayerSettleInfo', (_message.Message,), { 115 | 'DESCRIPTOR' : _EVENT_PLAYERSETTLEINFO, 116 | '__module__' : 'simple_command_pb2' 117 | # @@protoc_insertion_point(class_scope:fps_command.Event_PlayerSettleInfo) 118 | }) 119 | _sym_db.RegisterMessage(Event_PlayerSettleInfo) 120 | 121 | Event_AgentRefresh = _reflection.GeneratedProtocolMessageType('Event_AgentRefresh', (_message.Message,), { 122 | 'DESCRIPTOR' : _EVENT_AGENTREFRESH, 123 | '__module__' : 'simple_command_pb2' 124 | # @@protoc_insertion_point(class_scope:fps_command.Event_AgentRefresh) 125 | }) 126 | _sym_db.RegisterMessage(Event_AgentRefresh) 127 | 128 | Event_AgentDead = _reflection.GeneratedProtocolMessageType('Event_AgentDead', (_message.Message,), { 129 | 'DESCRIPTOR' : _EVENT_AGENTDEAD, 130 | '__module__' : 'simple_command_pb2' 131 | # @@protoc_insertion_point(class_scope:fps_command.Event_AgentDead) 132 | }) 133 | _sym_db.RegisterMessage(Event_AgentDead) 134 | 135 | Event_AgentBeHited = _reflection.GeneratedProtocolMessageType('Event_AgentBeHited', (_message.Message,), { 136 | 'DESCRIPTOR' : _EVENT_AGENTBEHITED, 137 | '__module__' : 'simple_command_pb2' 138 | # @@protoc_insertion_point(class_scope:fps_command.Event_AgentBeHited) 139 | }) 140 | _sym_db.RegisterMessage(Event_AgentBeHited) 141 | 142 | Event_GetSupplySuccess = _reflection.GeneratedProtocolMessageType('Event_GetSupplySuccess', (_message.Message,), { 143 | 'DESCRIPTOR' : _EVENT_GETSUPPLYSUCCESS, 144 | '__module__' : 'simple_command_pb2' 145 | # @@protoc_insertion_point(class_scope:fps_command.Event_GetSupplySuccess) 146 | }) 147 | _sym_db.RegisterMessage(Event_GetSupplySuccess) 148 | 149 | Event_AgentReloadSuccess = _reflection.GeneratedProtocolMessageType('Event_AgentReloadSuccess', (_message.Message,), { 150 | 'DESCRIPTOR' : _EVENT_AGENTRELOADSUCCESS, 151 | '__module__' : 'simple_command_pb2' 152 | # @@protoc_insertion_point(class_scope:fps_command.Event_AgentReloadSuccess) 153 | }) 154 | _sym_db.RegisterMessage(Event_AgentReloadSuccess) 155 | 156 | Event_Agent = _reflection.GeneratedProtocolMessageType('Event_Agent', (_message.Message,), { 157 | 'DESCRIPTOR' : _EVENT_AGENT, 158 | '__module__' : 'simple_command_pb2' 159 | # @@protoc_insertion_point(class_scope:fps_command.Event_Agent) 160 | }) 161 | _sym_db.RegisterMessage(Event_Agent) 162 | 163 | Event_System = _reflection.GeneratedProtocolMessageType('Event_System', (_message.Message,), { 164 | 'DESCRIPTOR' : _EVENT_SYSTEM, 165 | '__module__' : 'simple_command_pb2' 166 | # @@protoc_insertion_point(class_scope:fps_command.Event_System) 167 | }) 168 | _sym_db.RegisterMessage(Event_System) 169 | 170 | EventInfo = _reflection.GeneratedProtocolMessageType('EventInfo', (_message.Message,), { 171 | 'DESCRIPTOR' : _EVENTINFO, 172 | '__module__' : 'simple_command_pb2' 173 | # @@protoc_insertion_point(class_scope:fps_command.EventInfo) 174 | }) 175 | _sym_db.RegisterMessage(EventInfo) 176 | 177 | AgentCommand = _reflection.GeneratedProtocolMessageType('AgentCommand', (_message.Message,), { 178 | 'DESCRIPTOR' : _AGENTCOMMAND, 179 | '__module__' : 'simple_command_pb2' 180 | # @@protoc_insertion_point(class_scope:fps_command.AgentCommand) 181 | }) 182 | _sym_db.RegisterMessage(AgentCommand) 183 | 184 | SupplyInitData = _reflection.GeneratedProtocolMessageType('SupplyInitData', (_message.Message,), { 185 | 'DESCRIPTOR' : _SUPPLYINITDATA, 186 | '__module__' : 'simple_command_pb2' 187 | # @@protoc_insertion_point(class_scope:fps_command.SupplyInitData) 188 | }) 189 | _sym_db.RegisterMessage(SupplyInitData) 190 | 191 | SupplyRefreshData = _reflection.GeneratedProtocolMessageType('SupplyRefreshData', (_message.Message,), { 192 | 'DESCRIPTOR' : _SUPPLYREFRESHDATA, 193 | '__module__' : 'simple_command_pb2' 194 | # @@protoc_insertion_point(class_scope:fps_command.SupplyRefreshData) 195 | }) 196 | _sym_db.RegisterMessage(SupplyRefreshData) 197 | 198 | GMCommand = _reflection.GeneratedProtocolMessageType('GMCommand', (_message.Message,), { 199 | 'DESCRIPTOR' : _GMCOMMAND, 200 | '__module__' : 'simple_command_pb2' 201 | # @@protoc_insertion_point(class_scope:fps_command.GMCommand) 202 | }) 203 | _sym_db.RegisterMessage(GMCommand) 204 | 205 | AgentSetup = _reflection.GeneratedProtocolMessageType('AgentSetup', (_message.Message,), { 206 | 'DESCRIPTOR' : _AGENTSETUP, 207 | '__module__' : 'simple_command_pb2' 208 | # @@protoc_insertion_point(class_scope:fps_command.AgentSetup) 209 | }) 210 | _sym_db.RegisterMessage(AgentSetup) 211 | 212 | S2A_Request_Data = _reflection.GeneratedProtocolMessageType('S2A_Request_Data', (_message.Message,), { 213 | 'DESCRIPTOR' : _S2A_REQUEST_DATA, 214 | '__module__' : 'simple_command_pb2' 215 | # @@protoc_insertion_point(class_scope:fps_command.S2A_Request_Data) 216 | }) 217 | _sym_db.RegisterMessage(S2A_Request_Data) 218 | 219 | A2S_Reply_Data = _reflection.GeneratedProtocolMessageType('A2S_Reply_Data', (_message.Message,), { 220 | 'DESCRIPTOR' : _A2S_REPLY_DATA, 221 | '__module__' : 'simple_command_pb2' 222 | # @@protoc_insertion_point(class_scope:fps_command.A2S_Reply_Data) 223 | }) 224 | _sym_db.RegisterMessage(A2S_Reply_Data) 225 | 226 | ReplayData = _reflection.GeneratedProtocolMessageType('ReplayData', (_message.Message,), { 227 | 'DESCRIPTOR' : _REPLAYDATA, 228 | '__module__' : 'simple_command_pb2' 229 | # @@protoc_insertion_point(class_scope:fps_command.ReplayData) 230 | }) 231 | _sym_db.RegisterMessage(ReplayData) 232 | 233 | S2C_StartGame = _reflection.GeneratedProtocolMessageType('S2C_StartGame', (_message.Message,), { 234 | 'DESCRIPTOR' : _S2C_STARTGAME, 235 | '__module__' : 'simple_command_pb2' 236 | # @@protoc_insertion_point(class_scope:fps_command.S2C_StartGame) 237 | }) 238 | _sym_db.RegisterMessage(S2C_StartGame) 239 | 240 | C2S_StartGame = _reflection.GeneratedProtocolMessageType('C2S_StartGame', (_message.Message,), { 241 | 'DESCRIPTOR' : _C2S_STARTGAME, 242 | '__module__' : 'simple_command_pb2' 243 | # @@protoc_insertion_point(class_scope:fps_command.C2S_StartGame) 244 | }) 245 | _sym_db.RegisterMessage(C2S_StartGame) 246 | 247 | C2S_LoadingGame = _reflection.GeneratedProtocolMessageType('C2S_LoadingGame', (_message.Message,), { 248 | 'DESCRIPTOR' : _C2S_LOADINGGAME, 249 | '__module__' : 'simple_command_pb2' 250 | # @@protoc_insertion_point(class_scope:fps_command.C2S_LoadingGame) 251 | }) 252 | _sym_db.RegisterMessage(C2S_LoadingGame) 253 | 254 | S2C_LoadingGame = _reflection.GeneratedProtocolMessageType('S2C_LoadingGame', (_message.Message,), { 255 | 'DESCRIPTOR' : _S2C_LOADINGGAME, 256 | '__module__' : 'simple_command_pb2' 257 | # @@protoc_insertion_point(class_scope:fps_command.S2C_LoadingGame) 258 | }) 259 | _sym_db.RegisterMessage(S2C_LoadingGame) 260 | 261 | S2C_UpdateGameWorld = _reflection.GeneratedProtocolMessageType('S2C_UpdateGameWorld', (_message.Message,), { 262 | 'DESCRIPTOR' : _S2C_UPDATEGAMEWORLD, 263 | '__module__' : 'simple_command_pb2' 264 | # @@protoc_insertion_point(class_scope:fps_command.S2C_UpdateGameWorld) 265 | }) 266 | _sym_db.RegisterMessage(S2C_UpdateGameWorld) 267 | 268 | C2S_UpdatePalyerOperate = _reflection.GeneratedProtocolMessageType('C2S_UpdatePalyerOperate', (_message.Message,), { 269 | 'DESCRIPTOR' : _C2S_UPDATEPALYEROPERATE, 270 | '__module__' : 'simple_command_pb2' 271 | # @@protoc_insertion_point(class_scope:fps_command.C2S_UpdatePalyerOperate) 272 | }) 273 | _sym_db.RegisterMessage(C2S_UpdatePalyerOperate) 274 | 275 | S2C_GameOver = _reflection.GeneratedProtocolMessageType('S2C_GameOver', (_message.Message,), { 276 | 'DESCRIPTOR' : _S2C_GAMEOVER, 277 | '__module__' : 'simple_command_pb2' 278 | # @@protoc_insertion_point(class_scope:fps_command.S2C_GameOver) 279 | }) 280 | _sym_db.RegisterMessage(S2C_GameOver) 281 | 282 | _COMMANDER = DESCRIPTOR.services_by_name['Commander'] 283 | if _descriptor._USE_C_DESCRIPTORS == False: 284 | 285 | DESCRIPTOR._options = None 286 | _GAMEMODETYPE._serialized_start=7361 287 | _GAMEMODETYPE._serialized_end=7438 288 | _GAMESTATE._serialized_start=7440 289 | _GAMESTATE._serialized_end=7506 290 | _VECTOR3._serialized_start=37 291 | _VECTOR3._serialized_end=112 292 | _SUPPLYINFO._serialized_start=115 293 | _SUPPLYINFO._serialized_end=287 294 | _ENEMYINFO._serialized_start=290 295 | _ENEMYINFO._serialized_end=603 296 | _OBSERVATION._serialized_start=606 297 | _OBSERVATION._serialized_end=1567 298 | _EVENT_GAMESTART._serialized_start=1569 299 | _EVENT_GAMESTART._serialized_end=1631 300 | _EVENT_SUPPLYREFRSH._serialized_start=1633 301 | _EVENT_SUPPLYREFRSH._serialized_end=1742 302 | _EVENT_SETTLE._serialized_start=1745 303 | _EVENT_SETTLE._serialized_end=2003 304 | _EVENT_PLAYERSETTLEINFO._serialized_start=2006 305 | _EVENT_PLAYERSETTLEINFO._serialized_end=2228 306 | _EVENT_AGENTREFRESH._serialized_start=2230 307 | _EVENT_AGENTREFRESH._serialized_end=2285 308 | _EVENT_AGENTDEAD._serialized_start=2287 309 | _EVENT_AGENTDEAD._serialized_end=2339 310 | _EVENT_AGENTBEHITED._serialized_start=2341 311 | _EVENT_AGENTBEHITED._serialized_end=2437 312 | _EVENT_GETSUPPLYSUCCESS._serialized_start=2440 313 | _EVENT_GETSUPPLYSUCCESS._serialized_end=2581 314 | _EVENT_AGENTRELOADSUCCESS._serialized_start=2583 315 | _EVENT_AGENTRELOADSUCCESS._serialized_end=2609 316 | _EVENT_AGENT._serialized_start=2612 317 | _EVENT_AGENT._serialized_end=3039 318 | _EVENT_SYSTEM._serialized_start=3042 319 | _EVENT_SYSTEM._serialized_end=3222 320 | _EVENTINFO._serialized_start=3225 321 | _EVENTINFO._serialized_end=3355 322 | _AGENTCOMMAND._serialized_start=3358 323 | _AGENTCOMMAND._serialized_end=3722 324 | _SUPPLYINITDATA._serialized_start=3725 325 | _SUPPLYINITDATA._serialized_end=4311 326 | _SUPPLYREFRESHDATA._serialized_start=4314 327 | _SUPPLYREFRESHDATA._serialized_end=4673 328 | _GMCOMMAND._serialized_start=4676 329 | _GMCOMMAND._serialized_end=6173 330 | _AGENTSETUP._serialized_start=6176 331 | _AGENTSETUP._serialized_end=6480 332 | _S2A_REQUEST_DATA._serialized_start=6483 333 | _S2A_REQUEST_DATA._serialized_end=6697 334 | _A2S_REPLY_DATA._serialized_start=6700 335 | _A2S_REPLY_DATA._serialized_end=6887 336 | _REPLAYDATA._serialized_start=6889 337 | _REPLAYDATA._serialized_end=6989 338 | _S2C_STARTGAME._serialized_start=6991 339 | _S2C_STARTGAME._serialized_end=7070 340 | _C2S_STARTGAME._serialized_start=7072 341 | _C2S_STARTGAME._serialized_end=7127 342 | _C2S_LOADINGGAME._serialized_start=7129 343 | _C2S_LOADINGGAME._serialized_end=7146 344 | _S2C_LOADINGGAME._serialized_start=7148 345 | _S2C_LOADINGGAME._serialized_end=7207 346 | _S2C_UPDATEGAMEWORLD._serialized_start=7209 347 | _S2C_UPDATEGAMEWORLD._serialized_end=7230 348 | _C2S_UPDATEPALYEROPERATE._serialized_start=7232 349 | _C2S_UPDATEPALYEROPERATE._serialized_end=7343 350 | _S2C_GAMEOVER._serialized_start=7345 351 | _S2C_GAMEOVER._serialized_end=7359 352 | _COMMANDER._serialized_start=7508 353 | _COMMANDER._serialized_end=7607 354 | # @@protoc_insertion_point(module_scope) 355 | -------------------------------------------------------------------------------- /inspirai_fps/simple_command_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import inspirai_fps.simple_command_pb2 as simple__command__pb2 6 | 7 | 8 | class CommanderStub(object): 9 | """Missing associated documentation comment in .proto file.""" 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.Request_S2A_UpdateGame = channel.unary_unary( 18 | '/fps_command.Commander/Request_S2A_UpdateGame', 19 | request_serializer=simple__command__pb2.S2A_Request_Data.SerializeToString, 20 | response_deserializer=simple__command__pb2.A2S_Reply_Data.FromString, 21 | ) 22 | 23 | 24 | class CommanderServicer(object): 25 | """Missing associated documentation comment in .proto file.""" 26 | 27 | def Request_S2A_UpdateGame(self, request, context): 28 | """Missing associated documentation comment in .proto file.""" 29 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 30 | context.set_details('Method not implemented!') 31 | raise NotImplementedError('Method not implemented!') 32 | 33 | 34 | def add_CommanderServicer_to_server(servicer, server): 35 | rpc_method_handlers = { 36 | 'Request_S2A_UpdateGame': grpc.unary_unary_rpc_method_handler( 37 | servicer.Request_S2A_UpdateGame, 38 | request_deserializer=simple__command__pb2.S2A_Request_Data.FromString, 39 | response_serializer=simple__command__pb2.A2S_Reply_Data.SerializeToString, 40 | ), 41 | } 42 | generic_handler = grpc.method_handlers_generic_handler( 43 | 'fps_command.Commander', rpc_method_handlers) 44 | server.add_generic_rpc_handlers((generic_handler,)) 45 | 46 | 47 | # This class is part of an EXPERIMENTAL API. 48 | class Commander(object): 49 | """Missing associated documentation comment in .proto file.""" 50 | 51 | @staticmethod 52 | def Request_S2A_UpdateGame(request, 53 | target, 54 | options=(), 55 | channel_credentials=None, 56 | call_credentials=None, 57 | insecure=False, 58 | compression=None, 59 | wait_for_ready=None, 60 | timeout=None, 61 | metadata=None): 62 | return grpc.experimental.unary_unary(request, target, '/fps_command.Commander/Request_S2A_UpdateGame', 63 | simple__command__pb2.S2A_Request_Data.SerializeToString, 64 | simple__command__pb2.A2S_Reply_Data.FromString, 65 | options, channel_credentials, 66 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 67 | -------------------------------------------------------------------------------- /inspirai_fps/test_camera_render.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "1aef7e50-9818-4f6b-ac61-b3d34dbe2c2e", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "data": { 11 | "text/plain": [ 12 | "" 13 | ] 14 | }, 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "output_type": "execute_result" 18 | } 19 | ], 20 | "source": [ 21 | "import io\n", 22 | "import random\n", 23 | "from rich.pretty import pprint\n", 24 | "from PIL import Image\n", 25 | "from typing import List\n", 26 | "from inspirai_fps import Game, ActionVariable\n", 27 | "from pyvirtualdisplay.display import Display\n", 28 | "Display().start()" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "id": "0e5a1cc7-7224-4cf1-a93d-fc42ad5d6001", 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "Loaded valid locations from /mnt/d/Codes/cog-local/map-data/001.json\n" 42 | ] 43 | }, 44 | { 45 | "name": "stderr", 46 | "output_type": "stream", 47 | "text": [ 48 | "concatenating texture: may result in visual artifacts\n" 49 | ] 50 | }, 51 | { 52 | "name": "stdout", 53 | "output_type": "stream", 54 | "text": [ 55 | "Server started ...\n", 56 | "Unity3D started ...\n", 57 | "Unity3D connected ...\n" 58 | ] 59 | } 60 | ], 61 | "source": [ 62 | "game = Game(\n", 63 | " map_dir=\"/mnt/d/Codes/cog-local/map-data\",\n", 64 | " engine_dir=\"/mnt/d/Codes/cog-local/fps_linux_render\",\n", 65 | ")\n", 66 | "game.init(\"linuxserver.x86_64\")" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 3, 72 | "id": "4bcbdda0-b2ba-4c79-97a8-b89380cd74eb", 73 | "metadata": {}, 74 | "outputs": [ 75 | { 76 | "name": "stdout", 77 | "output_type": "stream", 78 | "text": [ 79 | "Loaded valid locations from /mnt/d/Codes/cog-local/map-data/001.json\n", 80 | "Started new episode ...\n", 81 | "[keep] Reused map mesh from /mnt/d/Codes/cog-local/map-data/001.obj\n" 82 | ] 83 | }, 84 | { 85 | "data": { 86 | "text/html": [ 87 | "
{\n",
 88 |        "'timeout': 30,\n",
 89 |        "'gameMode': 'NAVIGATION_MODE',\n",
 90 |        "'timeScale': 10,\n",
 91 |        "'mapId': 1,\n",
 92 |        "'randomSeed': 0,\n",
 93 |        "'targetLocation': {'x': 1.0, 'y': 0.0, 'z': 1.0},\n",
 94 |        "'triggerRange': 1.0,\n",
 95 |        "'supplyHeatmapCenter': {'x': 0.0, 'y': 0.0, 'z': 0.0},\n",
 96 |        "'supplyHeatmapRadius': 1,\n",
 97 |        "'supplyCreatePercent': 1,\n",
 98 |        "'supplyHouseCreatePercent': 1,\n",
 99 |        "'supplyGridLength': 10,\n",
100 |        "'supplyRandomMin': 1,\n",
101 |        "'supplyRandomMax': 1,\n",
102 |        "'supplyHouseRandomMin': 10,\n",
103 |        "'supplyHouseRandomMax': 10,\n",
104 |        "'numAgents': 1,\n",
105 |        "'agentSetups': [\n",
106 |        "│   │   {\n",
107 |        "│   │   │   'id': 0,\n",
108 |        "│   │   │   'hp': 100,\n",
109 |        "│   │   │   'numPackAmmo': 60,\n",
110 |        "│   │   │   'gunCapacity': 15,\n",
111 |        "│   │   │   'attackPower': 20,\n",
112 |        "│   │   │   'startLocation': {'x': -72.0, 'y': 1.09, 'z': -60.0},\n",
113 |        "│   │   │   'agentName': 'agent_0'\n",
114 |        "│   │   }\n",
115 |        "],\n",
116 |        "'respawnTime': 10,\n",
117 |        "'isRecord': False,\n",
118 |        "'replaySuffix': '',\n",
119 |        "'supplyLossPercentWhenDead': 50,\n",
120 |        "'waterSpeedDecay': 0.5,\n",
121 |        "'invincibleTime': 10,\n",
122 |        "'use_depth_map': False\n",
123 |        "}\n",
124 |        "
\n" 125 | ], 126 | "text/plain": [ 127 | "\u001b[1m{\u001b[0m\n", 128 | "\u001b[2;32m│ \u001b[0m\u001b[32m'timeout'\u001b[0m: \u001b[1;36m30\u001b[0m,\n", 129 | "\u001b[2;32m│ \u001b[0m\u001b[32m'gameMode'\u001b[0m: \u001b[32m'NAVIGATION_MODE'\u001b[0m,\n", 130 | "\u001b[2;32m│ \u001b[0m\u001b[32m'timeScale'\u001b[0m: \u001b[1;36m10\u001b[0m,\n", 131 | "\u001b[2;32m│ \u001b[0m\u001b[32m'mapId'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 132 | "\u001b[2;32m│ \u001b[0m\u001b[32m'randomSeed'\u001b[0m: \u001b[1;36m0\u001b[0m,\n", 133 | "\u001b[2;32m│ \u001b[0m\u001b[32m'targetLocation'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'x'\u001b[0m: \u001b[1;36m1.0\u001b[0m, \u001b[32m'y'\u001b[0m: \u001b[1;36m0.0\u001b[0m, \u001b[32m'z'\u001b[0m: \u001b[1;36m1.0\u001b[0m\u001b[1m}\u001b[0m,\n", 134 | "\u001b[2;32m│ \u001b[0m\u001b[32m'triggerRange'\u001b[0m: \u001b[1;36m1.0\u001b[0m,\n", 135 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyHeatmapCenter'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'x'\u001b[0m: \u001b[1;36m0.0\u001b[0m, \u001b[32m'y'\u001b[0m: \u001b[1;36m0.0\u001b[0m, \u001b[32m'z'\u001b[0m: \u001b[1;36m0.0\u001b[0m\u001b[1m}\u001b[0m,\n", 136 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyHeatmapRadius'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 137 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyCreatePercent'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 138 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyHouseCreatePercent'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 139 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyGridLength'\u001b[0m: \u001b[1;36m10\u001b[0m,\n", 140 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyRandomMin'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 141 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyRandomMax'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 142 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyHouseRandomMin'\u001b[0m: \u001b[1;36m10\u001b[0m,\n", 143 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyHouseRandomMax'\u001b[0m: \u001b[1;36m10\u001b[0m,\n", 144 | "\u001b[2;32m│ \u001b[0m\u001b[32m'numAgents'\u001b[0m: \u001b[1;36m1\u001b[0m,\n", 145 | "\u001b[2;32m│ \u001b[0m\u001b[32m'agentSetups'\u001b[0m: \u001b[1m[\u001b[0m\n", 146 | "\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n", 147 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'id'\u001b[0m: \u001b[1;36m0\u001b[0m,\n", 148 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'hp'\u001b[0m: \u001b[1;36m100\u001b[0m,\n", 149 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'numPackAmmo'\u001b[0m: \u001b[1;36m60\u001b[0m,\n", 150 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'gunCapacity'\u001b[0m: \u001b[1;36m15\u001b[0m,\n", 151 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'attackPower'\u001b[0m: \u001b[1;36m20\u001b[0m,\n", 152 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'startLocation'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'x'\u001b[0m: \u001b[1;36m-72.0\u001b[0m, \u001b[32m'y'\u001b[0m: \u001b[1;36m1.09\u001b[0m, \u001b[32m'z'\u001b[0m: \u001b[1;36m-60.0\u001b[0m\u001b[1m}\u001b[0m,\n", 153 | "\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'agentName'\u001b[0m: \u001b[32m'agent_0'\u001b[0m\n", 154 | "\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m\n", 155 | "\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n", 156 | "\u001b[2;32m│ \u001b[0m\u001b[32m'respawnTime'\u001b[0m: \u001b[1;36m10\u001b[0m,\n", 157 | "\u001b[2;32m│ \u001b[0m\u001b[32m'isRecord'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n", 158 | "\u001b[2;32m│ \u001b[0m\u001b[32m'replaySuffix'\u001b[0m: \u001b[32m''\u001b[0m,\n", 159 | "\u001b[2;32m│ \u001b[0m\u001b[32m'supplyLossPercentWhenDead'\u001b[0m: \u001b[1;36m50\u001b[0m,\n", 160 | "\u001b[2;32m│ \u001b[0m\u001b[32m'waterSpeedDecay'\u001b[0m: \u001b[1;36m0.5\u001b[0m,\n", 161 | "\u001b[2;32m│ \u001b[0m\u001b[32m'invincibleTime'\u001b[0m: \u001b[1;36m10\u001b[0m,\n", 162 | "\u001b[2;32m│ \u001b[0m\u001b[32m'use_depth_map'\u001b[0m: \u001b[3;91mFalse\u001b[0m\n", 163 | "\u001b[1m}\u001b[0m\n" 164 | ] 165 | }, 166 | "metadata": {}, 167 | "output_type": "display_data" 168 | }, 169 | { 170 | "name": "stdout", 171 | "output_type": "stream", 172 | "text": [ 173 | "number of frames: 300\r" 174 | ] 175 | } 176 | ], 177 | "source": [ 178 | "game.random_start_location()\n", 179 | "game.set_map_id(random.randint(1,1))\n", 180 | "game.set_episode_timeout(30)\n", 181 | "game.new_episode()\n", 182 | "\n", 183 | "pprint(game.get_game_config())\n", 184 | "\n", 185 | "raw_frames = []\n", 186 | "\n", 187 | "while not game.is_episode_finished():\n", 188 | " state = game.get_state(render_camera=True)\n", 189 | " raw_frames.append(state.image_bytes)\n", 190 | " print(f\"number of frames: {len(raw_frames)}\", end=\"\\r\")\n", 191 | " action = {\n", 192 | " 0: [\n", 193 | " (ActionVariable.WALK_DIR, random.uniform(0, 360)),\n", 194 | " (ActionVariable.WALK_SPEED, random.uniform(0, 10)),\n", 195 | " (ActionVariable.TURN_LR_DELTA, 1),\n", 196 | " ]\n", 197 | " }\n", 198 | " game.make_action_by_list(action)\n", 199 | "\n", 200 | "camera_frames: List[Image.Image] = [Image.open(io.BytesIO(frame)) for frame in raw_frames]\n", 201 | "\n", 202 | "import os\n", 203 | "\n", 204 | "img_dir = \"camera_images\"\n", 205 | "os.makedirs(img_dir)\n", 206 | "\n", 207 | "camera_frames[0].save(\n", 208 | " f\"{img_dir}/camera.gif\",\n", 209 | " format=\"GIF\",\n", 210 | " append_images=camera_frames[1:],\n", 211 | " save_all=True,\n", 212 | " duration=100,\n", 213 | " loop=0,\n", 214 | ")" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 4, 220 | "id": "ad65d54c", 221 | "metadata": {}, 222 | "outputs": [], 223 | "source": [ 224 | "for idx, img in enumerate(camera_frames):\n", 225 | " img.save(f\"{img_dir}/img_{idx}.png\")" 226 | ] 227 | } 228 | ], 229 | "metadata": { 230 | "kernelspec": { 231 | "display_name": "Python 3.8.13 ('cog')", 232 | "language": "python", 233 | "name": "python3" 234 | }, 235 | "language_info": { 236 | "codemirror_mode": { 237 | "name": "ipython", 238 | "version": 3 239 | }, 240 | "file_extension": ".py", 241 | "mimetype": "text/x-python", 242 | "name": "python", 243 | "nbconvert_exporter": "python", 244 | "pygments_lexer": "ipython3", 245 | "version": "3.8.13" 246 | }, 247 | "vscode": { 248 | "interpreter": { 249 | "hash": "596aee561713b2a2318ce49706f2778c97970a30e1aa39363faf5950719dd21d" 250 | } 251 | } 252 | }, 253 | "nbformat": 4, 254 | "nbformat_minor": 5 255 | } 256 | -------------------------------------------------------------------------------- /inspirai_fps/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | from typing import Any, Dict, Iterable, List, Tuple 4 | from rich.console import Console 5 | from rich.table import Table 6 | 7 | 8 | def load_json(file_path): 9 | with open(file_path, "r") as f: 10 | data = json.loads(f.read()) 11 | return data 12 | 13 | 14 | def get_picth_yaw(x, y, z): 15 | pitch = np.arctan2(-y, (x**2 + z**2) ** 0.5) / np.pi * 180 16 | yaw = np.arctan2(x, z) / np.pi * 180 17 | return pitch, yaw 18 | 19 | 20 | def get_distance(start, target): 21 | p0 = np.asarray(start) 22 | p1 = np.asarray(target) 23 | return np.linalg.norm(p1 - p0) 24 | 25 | 26 | def get_position(state): 27 | return [ 28 | state.position_x, 29 | state.position_y, 30 | state.position_z, 31 | ] 32 | 33 | 34 | def vector3d_to_list(vec3d): 35 | return [vec3d.x, vec3d.y, vec3d.z] 36 | 37 | 38 | def get_orientation(state): 39 | return [ 40 | 0, 41 | state.pitch, 42 | state.yaw, 43 | ] 44 | 45 | 46 | def set_vector3d(vec3d, arr): 47 | vec3d.x = arr[0] 48 | vec3d.y = arr[1] 49 | vec3d.z = arr[2] 50 | 51 | 52 | def set_GM_command(gm_cmd, config: Dict[str, Any]): 53 | for key, value in config.items(): 54 | field = getattr(gm_cmd, key) 55 | if isinstance(field, (int, float, str)): 56 | setattr(gm_cmd, key, value) 57 | elif isinstance(field, Iterable): 58 | for obj in value: 59 | element = field.add() 60 | set_GM_command(element, obj) 61 | else: 62 | set_GM_command(field, value) 63 | 64 | 65 | def plot_movement_trajectory(points: List[Tuple[float, float]], save_path: str): 66 | import matplotlib.pyplot as plt 67 | 68 | xs = [p[0] for p in points] 69 | ys = [p[1] for p in points] 70 | plt.plot(xs, ys, "o-", c="r") 71 | # plt.xlim(-250, 250) 72 | # plt.ylim(-250, 250) 73 | 74 | for i, (x, y) in enumerate(points): 75 | plt.annotate(i * 5, (x, y), c="b") 76 | 77 | plt.savefig(save_path) 78 | 79 | 80 | class ResultLogger: 81 | def __init__(self): 82 | self.console = Console() 83 | self.monitor_metrics = [ 84 | (["training_iteration"], 0), 85 | (["timesteps_total"], 0), 86 | (["episode_reward_min"], 4), 87 | (["episode_reward_max"], 4), 88 | (["episode_reward_mean"], 4), 89 | (["info", "learner", "default_policy", "learner_stats", "entropy"], 4), 90 | (["info", "learner", "default_policy", "learner_stats", "kl"], 4), 91 | ] 92 | 93 | def get_metric_value(self, result, keys): 94 | if len(keys) == 1: 95 | return result[keys[0]] 96 | return self.get_metric_value(result[keys[0]], keys[1:]) 97 | 98 | def print_result(self, res): 99 | table = Table(show_header=True, header_style="bold magenta") 100 | for metric, _ in self.monitor_metrics: 101 | table.add_column(metric[-1]) 102 | val_str_list = [ 103 | f"{round(self.get_metric_value(res, metric), prec)}" 104 | for metric, prec in self.monitor_metrics 105 | ] 106 | table.add_row(*val_str_list) 107 | self.console.print(table) 108 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import pathlib 3 | 4 | 5 | here = pathlib.Path(__file__).parent.resolve() 6 | 7 | # Get the long description from the README file 8 | long_description = (here / "README.md").read_text(encoding="utf-8") 9 | 10 | # non-python data that needs to be installed along with the main python modules 11 | package_data = ["__init__.py", "lib/*"] 12 | 13 | 14 | setup( 15 | name="inspirai_fps", 16 | version="1.0.0", 17 | author="Inspir.AI", 18 | author_email="cog2022@inspirai.com", 19 | url="https://github.com/inspirai/wilderness-scavenger", 20 | description="An intelligent agent learning platform based on a 3D open-world FPS game", 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | packages=["inspirai_fps"], 24 | python_requires=">=3.8, <4", 25 | package_data={"inspirai_fps": package_data}, 26 | install_requires=["requests", "Pillow", "numpy", "grpcio", "rich", "protobuf==3.20", "trimesh", "shapely"], 27 | extras_require={"baseline": ["gym", "ray[rllib]", "torch"]}, 28 | keywords=[ 29 | "inspirai", 30 | "fps", 31 | "game", 32 | "open world", 33 | "ai", 34 | "deep learning", 35 | "reinforcement learning", 36 | "research", 37 | ], 38 | license="LICENSE", 39 | ) 40 | -------------------------------------------------------------------------------- /submission_template/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:latest 2 | 3 | WORKDIR /tmp/install 4 | RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple 5 | RUN pip install -U pip && pip install https://codeload.github.com/inspirai/wilderness-scavenger/zip/refs/heads/master 6 | 7 | # add your additional python denpendencies in the requirements.txt file 8 | COPY requirements.txt requirements.txt 9 | RUN pip install -r requirements.txt 10 | 11 | # ======================================================== 12 | # Install other non-python dependencies here (if any) 13 | # ======================================================== 14 | 15 | # DO NOT MODIFY BELOW THIS LINE 16 | WORKDIR /home/inspirai 17 | COPY submission submission 18 | COPY run.sh run.sh 19 | -------------------------------------------------------------------------------- /submission_template/README.md: -------------------------------------------------------------------------------- 1 | # Submitting solutions for online evaluation 2 | 3 | Here we provide a template for you to easily pack your solution and upload it to our online evaluation system. 4 | 5 | ## Template Structure 6 | 7 | ```bash 8 | submission_template 9 | ├── Dockerfile # Dockerfile for building the submission image 10 | ├── run.sh # Evaluation pipeline entrypoint 11 | ├── common.py # Common functions and variables 12 | ├── eval.py # Main evaluation script 13 | ├── eval_track_1_1.py # Evaluation function for track 1.1 14 | ├── eval_track_1_2.py # Evaluation function for track 1.2 15 | ├── eval_track_2.py # Evaluation function for track 2 16 | ├── requirements.txt # Additional python packages required by the submission 17 | └── submission # Submission source code and data 18 | ├── __init__.py # Making the submission folder a python package 19 | ├── other files or data # Other files or data 20 | └── agents.py # Agent classes for the 3 tracks 21 | ``` 22 | 23 | ## Implement your solution 24 | 25 | - Modify the code in `submission/agents.py` to implement your agents. Below is an example of how to implement a simple navigation agent. 26 | 27 | ```python 28 | class AgentNavigation: 29 | """ 30 | This is a template of an agent for the navigation task. 31 | TODO: Modify the code in this class to implement your agent here. 32 | """ 33 | 34 | def __init__(self, episode_info) -> None: 35 | self.episode_info = episode_info 36 | 37 | def act(self, ts: int, state: AgentState) -> NavigationAction: 38 | pos = np.asarray(get_position(state)) 39 | tar = np.asarray(self.episode_info["target_location"]) 40 | dir = tar - pos 41 | dir = dir / np.linalg.norm(dir) 42 | walk_dir = get_picth_yaw(*dir)[1] % 360 43 | 44 | return NavigationAction( 45 | walk_dir=walk_dir, 46 | walk_speed=5, 47 | turn_lr_delta=0, 48 | look_ud_delta=0, 49 | jump=False, 50 | ) 51 | ``` 52 | 53 | - You can also add additional files (`.py` modules or data) to the `submission` folder and import them in `submission/agents.py` 54 | - If you need additional python packages, add them to the `requirements.txt` file 55 | 56 | ```bash 57 | pip freeze > requirements.txt 58 | ``` 59 | 60 | - If you have other non-python dependencies, add installation commands in the `Dockerfile` file 61 | 62 | ## Test your solution locally 63 | 64 | - Once your finished up dealing with the submission folder, you can test your solution locally by running the following command (for example): 65 | 66 | ```bash 67 | # make sure your are in the root of this template folder 68 | python eval.py --track 1a \ 69 | --local-test --map-dir /path/to/map-data --engine-dir /path/to/backend-engine \ 70 | --map-list 1 2 3 --episodes-per-map 2 --episode-timeout 10 71 | ``` 72 | 73 | ## Submit your solution 74 | 75 | Once you are satisfied with your solution, you can submit your solution following the steps below: 76 | 77 | - pack your solution into a zip file 78 | 79 | ```bash 80 | # make sure your are in the root of this template folder 81 | zip -r /path/to/submission.zip Dockerfile run.sh requirements.txt submission 82 | ``` 83 | - make sure your zip package include the following content 84 | 85 | ```bash 86 | . 87 | ├── Dockerfile # Dockerfile for building the submission image 88 | ├── run.sh # Evaluation pipeline entrypoint 89 | ├── requirements.txt # Additional python packages required by the submission 90 | └── submission # Submission source code and data 91 | ├── __init__.py # Making the submission folder a python package 92 | └── agents.py # Agent classes for the 3 tracks 93 | ``` 94 | 95 | - upload your solution to our [online evaluation system](https://wildscav-eval.inspirai.com) (the maximum size of the zip file is limited 500MB) 96 | 97 | ## Important Notes 98 | 99 | - Do not modify the `eval.py` file. 100 | - Modify the `Dockerfile` file only when you need to add additional non-python dependencies. 101 | - Evaluation scripts for 3 tracks (`eval_track_*.py`) are only for your reference. You can use them to test your solution locally. The actual evaluation code may be different. 102 | 103 | ## Evaluation Rules: 104 | - Register an account for your team to use the online evaluation system. 105 | - The signing up for a team account is reviewed by the organizers first and you will be notified by email if your team account is activated. 106 | - Your team can keep only one (latest) submission for each track. 107 | - We will only evaluate the latest submission for each track once every day. 108 | - Your submissions will be reviewed by the organizers to check for legality before being built and allowed to run. 109 | - You can check the evaluation progress and results after your submission starts running successfully. 110 | -------------------------------------------------------------------------------- /submission_template/common.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import argparse 3 | 4 | 5 | # game configs -- depth map w/h ratio is 16:9 6 | DEPTH_MAP_FAR = 200 7 | DEPTH_MAP_WIDTH = 64 8 | DEPTH_MAP_HEIGHT = 36 9 | TURN_ON_RECORDING = False 10 | 11 | 12 | class RunningStatus: 13 | PENDING = 0 14 | STARTED = 1 15 | FINISHED = 2 16 | STOPPED = 3 17 | ERROR = 5 18 | 19 | 20 | DEFAULT_PAYLOAD = { 21 | "id": None, 22 | "status": RunningStatus.PENDING, 23 | "current_episode": 0, 24 | "total_episodes": 0, 25 | "average_time_use": 0, 26 | "average_time_punish": 0, 27 | "average_time_total": 0, 28 | "success_rate": 0, 29 | "average_supply": 0, 30 | } 31 | 32 | 33 | def get_args(): 34 | parser = argparse.ArgumentParser() 35 | parser.add_argument("--map-list", type=int, nargs="+", default=list(range(91, 100))) 36 | parser.add_argument("--map-dir", type=str, default="/data/map-data") 37 | parser.add_argument("--engine-dir", type=str, default="/data/fps_linux") 38 | parser.add_argument("--episodes-per-map", type=int, default=10) 39 | parser.add_argument("--seed", type=int, default=0) 40 | parser.add_argument("--eval-id", type=int, required=True) 41 | return parser.parse_args() 42 | 43 | 44 | def send_results(data): 45 | url_head = "https://wildscav-eval.inspirai.com/api/evaluations/status?token=baiyangshidai_inspir" 46 | url = url_head + "&" + "&".join([f"{k}={v}" for k, v in data.items()]) 47 | message = requests.get(url, timeout=3).text 48 | print("Response: %s", message) 49 | 50 | 51 | if __name__ == "__main__": 52 | send_results( 53 | { 54 | "id": 7, 55 | "status": RunningStatus.STARTED, 56 | "current_episode": 5, 57 | "total_episodes": 10, 58 | "average_time_use": 0.1, 59 | "average_time_punish": 0.2, 60 | "average_time_total": 0.3, 61 | "success_rate": 0.4, 62 | "num_supply": 0, 63 | } 64 | ) 65 | -------------------------------------------------------------------------------- /submission_template/eval.py: -------------------------------------------------------------------------------- 1 | REMOTE_MAP_DIR = "/root/map-data" 2 | REMOTE_ENGINE_DIR = "/root/fps_linux_eval" 3 | LOCAL_MAP_DIR = "/mnt/d/Codes/cog-local/map-data" 4 | LOCAL_ENGINE_DIR = "/mnt/d/Codes/cog-local/fps_linux_train" 5 | 6 | 7 | if __name__ == "__main__": 8 | import os 9 | import argparse 10 | from rich.pretty import pprint 11 | 12 | parser = argparse.ArgumentParser(description="Run evaluation for different tracks") 13 | parser.add_argument("--track", type=str, default="1a", help="Track to evaluate") 14 | parser.add_argument("--map-list", type=int, nargs="+", default=[1]) 15 | parser.add_argument("--map-dir", type=str, default=LOCAL_MAP_DIR) 16 | parser.add_argument("--engine-dir", type=str, default=LOCAL_ENGINE_DIR) 17 | parser.add_argument("--episodes-per-map", type=int, default=10) 18 | parser.add_argument("--episode-timeout", type=int, default=None) 19 | parser.add_argument("--seed", type=int, default=0) 20 | parser.add_argument("--local-test", action="store_true") 21 | args = parser.parse_args() 22 | 23 | eval_id = os.environ.get("EVAL_ID", "0") 24 | 25 | print(f">>>>>>>>>>>>>>>>>> Evaluation ID: {eval_id} <<<<<<<<<<<<<<<<<<<<") 26 | 27 | if not args.local_test: 28 | args.map_dir = REMOTE_MAP_DIR 29 | args.engine_dir = REMOTE_ENGINE_DIR 30 | 31 | if args.track == "1a": 32 | from eval_track_1_1 import run_eval 33 | if not args.local_test: 34 | args.map_list = list(range(101, 111)) 35 | args.episode_timeout = 60 * 5 36 | args.episodes_per_map = 10 37 | elif args.track == "1b": 38 | from eval_track_1_2 import run_eval 39 | if not args.local_test: 40 | args.map_list = list(range(111, 121)) 41 | args.episode_timeout = 60 * 10 42 | args.episodes_per_map = 10 43 | elif args.track == "2": 44 | from eval_track_2 import run_eval 45 | if not args.local_test: 46 | args.map_list = list(range(121, 131)) 47 | args.episode_timeout = 60 * 15 48 | args.episodes_per_map = 1 49 | else: 50 | raise ValueError(f"Unknown track {args.track}") 51 | 52 | pprint(args) 53 | 54 | from inspirai_fps import Game 55 | from common import RunningStatus, DEFAULT_PAYLOAD, send_results 56 | 57 | game = Game(map_dir=args.map_dir, engine_dir=args.engine_dir) 58 | game.init() 59 | 60 | data = DEFAULT_PAYLOAD.copy() 61 | data.update({ 62 | "id": eval_id, 63 | "status": RunningStatus.STARTED, 64 | "total_episodes": len(args.map_list) * args.episodes_per_map 65 | }) 66 | 67 | if args.local_test: 68 | run_eval(game, args, data) 69 | else: 70 | try: 71 | run_eval(game, args, data) 72 | except Exception as e: 73 | print(e) 74 | data.update({"status": RunningStatus.ERROR}) 75 | send_results(data) 76 | 77 | game.close() 78 | -------------------------------------------------------------------------------- /submission_template/eval_track_1_1.py: -------------------------------------------------------------------------------- 1 | from inspirai_fps import ActionVariable 2 | 3 | 4 | # evaluation configs 5 | USED_ACTIONS = [ 6 | ActionVariable.WALK_DIR, 7 | ActionVariable.WALK_SPEED, 8 | ActionVariable.TURN_LR_DELTA, 9 | ActionVariable.LOOK_UD_DELTA, 10 | ActionVariable.JUMP, 11 | ] 12 | 13 | 14 | def run_eval(game, args, message_data): 15 | from inspirai_fps import Game 16 | from inspirai_fps.utils import get_position 17 | from submission.agents import AgentNavigation 18 | 19 | import random 20 | from functools import partial 21 | from rich.console import Console 22 | from common import ( 23 | DEPTH_MAP_WIDTH, 24 | DEPTH_MAP_HEIGHT, 25 | DEPTH_MAP_FAR, 26 | RunningStatus, 27 | send_results 28 | ) 29 | 30 | random.seed(args.seed) 31 | print = partial(Console().print, style="bold magenta") 32 | 33 | # configure game 34 | game.set_game_mode(Game.MODE_NAVIGATION) 35 | game.set_random_seed(args.seed) 36 | game.set_available_actions(USED_ACTIONS) 37 | game.set_episode_timeout(args.episode_timeout) 38 | game.set_depth_map_size(DEPTH_MAP_WIDTH, DEPTH_MAP_HEIGHT, DEPTH_MAP_FAR) 39 | 40 | results = [] 41 | ep_idx = 0 42 | 43 | message_data.update({"current_episode": ep_idx}) 44 | send_results(message_data) 45 | 46 | for map_id in args.map_list: 47 | game.set_map_id(map_id) 48 | 49 | for ep in range(args.episodes_per_map): 50 | game.random_start_location() 51 | game.random_target_location() 52 | game.new_episode() 53 | 54 | episode_info = { 55 | "start_location": game.get_start_location(), 56 | "target_location": game.get_target_location(), 57 | "time_step_per_action": game.time_step_per_action, 58 | } 59 | 60 | agent = AgentNavigation(episode_info) 61 | 62 | print(f">>>>>> Map {map_id:03d} - Episode {ep} <<<<<<") 63 | 64 | start = [round(x, 2) for x in episode_info["start_location"]] 65 | target = [round(x, 2) for x in episode_info["target_location"]] 66 | 67 | while not game.is_episode_finished(): 68 | ts = game.get_time_step() 69 | state = game.get_state() 70 | action = agent.act(ts, state) 71 | game.make_action({0: action}) 72 | 73 | if ts % game.frame_rate == 0: 74 | curr_location = [round(x, 2) for x in get_position(state)] 75 | print( 76 | f"{map_id=}\t{ep=}\t{ts=}\t{start=} => {target=}\t{curr_location=}" 77 | ) 78 | 79 | res = game.get_game_result() 80 | results.append(res) 81 | 82 | print(f">>>>> Episode ends <<<<<") 83 | print(res) 84 | 85 | ep_idx += 1 86 | 87 | message_data.update({ 88 | "current_episode": ep_idx, 89 | "average_time_use": sum(r["used_time"] for r in results) / len(results), 90 | "average_time_punish": sum(r["punish_time"] for r in results) / len(results), 91 | "success_rate": sum(r["reach_target"] for r in results) / len(results) 92 | }) 93 | message_data["average_time_total"] = message_data["average_time_use"] + message_data["average_time_punish"] 94 | send_results(message_data) 95 | 96 | message_data.update({"status": RunningStatus.FINISHED}) 97 | send_results(message_data) 98 | -------------------------------------------------------------------------------- /submission_template/eval_track_1_2.py: -------------------------------------------------------------------------------- 1 | from inspirai_fps import ActionVariable 2 | 3 | 4 | # evaluation configs 5 | USED_ACTIONS = [ 6 | ActionVariable.WALK_DIR, 7 | ActionVariable.WALK_SPEED, 8 | ActionVariable.TURN_LR_DELTA, 9 | ActionVariable.LOOK_UD_DELTA, 10 | ActionVariable.JUMP, 11 | ActionVariable.PICKUP, 12 | ] 13 | SUPPLY_CONFIGS = { 14 | "supply_center": [ 15 | [0.0, 0.0], 16 | [0.0, 20], 17 | [0.0, -20], 18 | [20, 0.0], 19 | [20, 20], 20 | [20, -20], 21 | [-20, 0.0], 22 | [-20, 20], 23 | [-20, -20], 24 | ], 25 | "supply_radius": [ 26 | 10, 27 | 30, 28 | 50, 29 | 70, 30 | ], 31 | "supply_richness_outdoor": [10, 20, 30], 32 | "supply_richness_indoor": [50, 70, 90], 33 | "supply_spacing": [ 34 | 5, 35 | 10, 36 | 15, 37 | 20, 38 | ], 39 | "supply_indoor_quantity_range": { 40 | "qmin": 10, 41 | "qmax": 50, 42 | }, 43 | "supply_outdoor_quantity_range": { 44 | "qmin": 1, 45 | "qmax": 5, 46 | }, 47 | } 48 | 49 | 50 | def run_eval(game, args, message_data): 51 | from inspirai_fps import Game 52 | from inspirai_fps.utils import get_position 53 | from submission.agents import AgentSupplyGathering 54 | 55 | import random 56 | from functools import partial 57 | from rich.console import Console 58 | from common import ( 59 | DEPTH_MAP_WIDTH, 60 | DEPTH_MAP_HEIGHT, 61 | DEPTH_MAP_FAR, 62 | RunningStatus, 63 | send_results 64 | ) 65 | 66 | random.seed(args.seed) 67 | print = partial(Console().print, style="bold magenta") 68 | 69 | game.set_random_seed(args.seed) 70 | game.set_game_mode(Game.MODE_SUP_GATHER) 71 | game.set_episode_timeout(args.episode_timeout) 72 | game.set_available_actions(USED_ACTIONS) 73 | game.set_depth_map_size(DEPTH_MAP_WIDTH, DEPTH_MAP_HEIGHT, DEPTH_MAP_FAR) 74 | game.set_supply_heatmap_center(random.choice(SUPPLY_CONFIGS["supply_center"])) 75 | game.set_supply_heatmap_radius(random.choice(SUPPLY_CONFIGS["supply_radius"])) 76 | game.set_supply_outdoor_richness( 77 | random.choice(SUPPLY_CONFIGS["supply_richness_outdoor"]) 78 | ) 79 | game.set_supply_indoor_richness( 80 | random.choice(SUPPLY_CONFIGS["supply_richness_indoor"]) 81 | ) 82 | game.set_supply_spacing(random.choice(SUPPLY_CONFIGS["supply_spacing"])) 83 | game.set_supply_indoor_quantity_range( 84 | **SUPPLY_CONFIGS["supply_indoor_quantity_range"] 85 | ) 86 | game.set_supply_outdoor_quantity_range( 87 | **SUPPLY_CONFIGS["supply_outdoor_quantity_range"] 88 | ) 89 | 90 | results = [] 91 | ep_idx = 0 92 | 93 | message_data.update({"current_episode": ep_idx}) 94 | send_results(message_data) 95 | 96 | for map_id in args.map_list: 97 | game.set_map_id(map_id) 98 | 99 | for ep in range(args.episodes_per_map): 100 | game.random_start_location() 101 | game.new_episode() 102 | 103 | episode_info = { 104 | "start_location": game.get_start_location(), 105 | "supply_heatmap_center": game.get_supply_heatmap_center(), 106 | "supply_heatmap_radius": game.get_supply_heatmap_radius(), 107 | "time_step_per_action": game.time_step_per_action, 108 | } 109 | 110 | agent = AgentSupplyGathering(episode_info) 111 | 112 | print(f">>>>>> Map {map_id:03d} - Episode {ep} <<<<<<") 113 | 114 | while not game.is_episode_finished(): 115 | ts = game.get_time_step() 116 | state = game.get_state() 117 | action = agent.act(ts, state) 118 | game.make_action({0: action}) 119 | 120 | if ts % 50 == 0: 121 | walk_dir = round(action.walk_dir, 2) 122 | curr_loc = [round(x, 2) for x in get_position(state)] 123 | num_supply = state.num_supply 124 | print( 125 | f"{map_id=}\t{ep=}\t{ts=}\t{curr_loc=}\t{num_supply=}\t{walk_dir=}" 126 | ) 127 | 128 | res = game.get_game_result() 129 | results.append(res) 130 | 131 | print(f">>>>> Episode ends <<<<<") 132 | print(res) 133 | 134 | ep_idx += 1 135 | 136 | message_data.update({ 137 | "current_episode": ep_idx, 138 | "average_supply": sum(r["num_supply"] for r in results) / len(results) 139 | }) 140 | send_results(message_data) 141 | 142 | message_data.update({"status": RunningStatus.FINISHED}) 143 | send_results(message_data) 144 | -------------------------------------------------------------------------------- /submission_template/eval_track_2.py: -------------------------------------------------------------------------------- 1 | from inspirai_fps import ActionVariable 2 | 3 | # evaluation configs 4 | NUM_AGENTS = 10 5 | USED_ACTIONS = [ 6 | ActionVariable.WALK_DIR, 7 | ActionVariable.WALK_SPEED, 8 | ActionVariable.TURN_LR_DELTA, 9 | ActionVariable.LOOK_UD_DELTA, 10 | ActionVariable.JUMP, 11 | ActionVariable.PICKUP, 12 | ActionVariable.ATTACK, 13 | ActionVariable.RELOAD, 14 | ] 15 | SUPPLY_CONFIGS = { 16 | "supply_center": [ 17 | [0.0, 0.0], 18 | [0.0, 20], 19 | [0.0, -20], 20 | [20, 0.0], 21 | [20, 20], 22 | [20, -20], 23 | [-20, 0.0], 24 | [-20, 20], 25 | [-20, -20], 26 | ], 27 | "supply_radius": [100], 28 | "supply_richness_outdoor": [10, 20, 30], 29 | "supply_richness_indoor": [70, 80, 90], 30 | "supply_spacing": [5], 31 | "supply_indoor_quantity_range": { 32 | "qmin": 10, 33 | "qmax": 50, 34 | }, 35 | "supply_outdoor_quantity_range": { 36 | "qmin": 1, 37 | "qmax": 5, 38 | }, 39 | "supply_refresh": { 40 | "refresh_time": [600], 41 | "heatmap_radius": [30, 50, 70], 42 | "outdoor_richness": [10, 20, 30], 43 | "indoor_richness": [70, 80, 90], 44 | }, 45 | } 46 | 47 | 48 | def run_eval(game, args, message_data): 49 | from inspirai_fps import Game 50 | from inspirai_fps.utils import get_position 51 | from submission.agents import AgentSupplyBattle 52 | 53 | import random 54 | from functools import partial 55 | from rich.console import Console 56 | from common import ( 57 | DEPTH_MAP_WIDTH, 58 | DEPTH_MAP_HEIGHT, 59 | DEPTH_MAP_FAR, 60 | RunningStatus, 61 | send_results, 62 | ) 63 | 64 | random.seed(args.seed) 65 | print = partial(Console().print, style="bold magenta") 66 | 67 | game.set_random_seed(args.seed) 68 | game.set_game_mode(Game.MODE_SUP_BATTLE) 69 | game.set_episode_timeout(args.episode_timeout) 70 | game.set_available_actions(USED_ACTIONS) 71 | game.set_depth_map_size(DEPTH_MAP_WIDTH, DEPTH_MAP_HEIGHT, DEPTH_MAP_FAR) 72 | for agent_id in range(1, NUM_AGENTS): 73 | game.add_agent() 74 | 75 | results = [] 76 | ep_idx = 0 77 | 78 | message_data.update({"current_episode": ep_idx}) 79 | send_results(message_data) 80 | 81 | for map_id in args.map_list: 82 | game.set_map_id(map_id) 83 | 84 | for ep in range(args.episodes_per_map): 85 | for agent_id in range(NUM_AGENTS): 86 | game.random_start_location(agent_id) 87 | 88 | game.set_supply_heatmap_center( 89 | random.choice(SUPPLY_CONFIGS["supply_center"]) 90 | ) 91 | game.set_supply_heatmap_radius( 92 | random.choice(SUPPLY_CONFIGS["supply_radius"]) 93 | ) 94 | game.set_supply_outdoor_richness( 95 | random.choice(SUPPLY_CONFIGS["supply_richness_outdoor"]) 96 | ) 97 | game.set_supply_indoor_richness( 98 | random.choice(SUPPLY_CONFIGS["supply_richness_indoor"]) 99 | ) 100 | game.set_supply_spacing(random.choice(SUPPLY_CONFIGS["supply_spacing"])) 101 | game.set_supply_indoor_quantity_range( 102 | **SUPPLY_CONFIGS["supply_indoor_quantity_range"] 103 | ) 104 | game.set_supply_outdoor_quantity_range( 105 | **SUPPLY_CONFIGS["supply_outdoor_quantity_range"] 106 | ) 107 | 108 | game.clear_supply_refresh() 109 | 110 | refresh_config_pool = SUPPLY_CONFIGS["supply_refresh"] 111 | heatmap_centers = [] 112 | heatmap_radius = [] 113 | 114 | for time in refresh_config_pool["refresh_time"]: 115 | center = random.choice(game.get_valid_locations()["indoor"]) 116 | radius = random.choice(refresh_config_pool["heatmap_radius"]) 117 | 118 | game.add_supply_refresh( 119 | refresh_time=time, 120 | heatmap_center=center, 121 | heatmap_radius=radius, 122 | indoor_richness=random.choice( 123 | refresh_config_pool["indoor_richness"] 124 | ), 125 | outdoor_richness=random.choice( 126 | refresh_config_pool["outdoor_richness"] 127 | ), 128 | ) 129 | 130 | heatmap_centers.append(center) 131 | heatmap_radius.append(radius) 132 | 133 | print(game.get_game_config()) 134 | 135 | game.new_episode() 136 | 137 | episode_info = { 138 | "start_location": game.get_start_location(), 139 | "supply_heatmap_center": game.get_supply_heatmap_center(), 140 | "supply_heatmap_radius": game.get_supply_heatmap_radius(), 141 | "refresh_time": refresh_config_pool["refresh_time"], 142 | "refresh_heatmap_center": heatmap_centers, 143 | "refresh_heatmap_radius": heatmap_radius, 144 | "time_step_per_action": game.time_step_per_action, 145 | } 146 | 147 | agents = {} 148 | for agent_id in range(NUM_AGENTS): 149 | if agent_id == 0: 150 | agents[agent_id] = AgentSupplyBattle(episode_info) # Your agent here 151 | else: 152 | info = episode_info.copy() 153 | info["start_location"] = game.get_start_location(agent_id) 154 | agents[agent_id] = AgentSupplyBattle(info) # will be replaced with our robot agent 155 | 156 | print(f">>>>>> Map {map_id:03d} - Episode {ep} <<<<<<") 157 | 158 | while not game.is_episode_finished(): 159 | ts = game.get_time_step() 160 | state_all = game.get_state_all() 161 | action_all = { 162 | agent_id: agents[agent_id].act(ts, state_all[agent_id]) 163 | for agent_id in range(NUM_AGENTS) 164 | } 165 | game.make_action(action_all) 166 | 167 | if ts % 50 == 0: 168 | walk_dir = round(action_all[0].walk_dir, 2) 169 | curr_loc = [round(x, 2) for x in get_position(state_all[0])] 170 | num_supply = state_all[0].num_supply 171 | print( 172 | f"{map_id=}\t{ep=}\t{ts=}\t{curr_loc=}\t{num_supply=}\t{walk_dir=}" 173 | ) 174 | 175 | res = game.get_game_result() 176 | results.append(res) 177 | 178 | print(f">>>>> Episode ends <<<<<") 179 | print(res) 180 | 181 | ep_idx += 1 182 | message_data.update( 183 | { 184 | "current_episode": ep_idx, 185 | "average_supply": sum(r["num_supply"] for r in results) 186 | / len(results), 187 | } 188 | ) 189 | send_results(message_data) 190 | 191 | message_data.update({"status": RunningStatus.FINISHED}) 192 | send_results(message_data) 193 | -------------------------------------------------------------------------------- /submission_template/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inspirai/wilderness-scavenger/4c2be3796cb311601c127fd6e1791619f029934f/submission_template/requirements.txt -------------------------------------------------------------------------------- /submission_template/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "======== Copying evaluation scripts ========" 6 | cp /root/submission_template/*.py . && ls 7 | echo "================= Finished =================" 8 | 9 | python eval.py --track $1 > /root/logs/${EVAL_ID}.txt 2>&1 10 | -------------------------------------------------------------------------------- /submission_template/submission/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inspirai/wilderness-scavenger/4c2be3796cb311601c127fd6e1791619f029934f/submission_template/submission/__init__.py -------------------------------------------------------------------------------- /submission_template/submission/agents.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import random 4 | import numpy as np 5 | from typing import NamedTuple 6 | from inspirai_fps.gamecore import AgentState 7 | from inspirai_fps.utils import get_position, get_picth_yaw 8 | 9 | 10 | # DO NOT MODIFY THIS CLASS 11 | class NavigationAction(NamedTuple): 12 | walk_dir: float 13 | walk_speed: float 14 | turn_lr_delta: float 15 | look_ud_delta: float 16 | jump: bool 17 | 18 | 19 | # DO NOT MODIFY THIS CLASS 20 | class SupplyGatherAction(NamedTuple): 21 | walk_dir: float 22 | walk_speed: float 23 | turn_lr_delta: float 24 | look_ud_delta: float 25 | jump: bool 26 | pickup: bool 27 | 28 | 29 | # DO NOT MODIFY THIS CLASS 30 | class SupplyBattleAction(NamedTuple): 31 | walk_dir: float 32 | walk_speed: float 33 | turn_lr_delta: float 34 | look_ud_delta: float 35 | jump: bool 36 | pickup: bool 37 | attack: bool 38 | reload: bool 39 | 40 | 41 | import ray 42 | from ray.rllib.agents import ppo 43 | 44 | 45 | class AgentNavigation: 46 | """ 47 | This is a template of an agent for the navigation task. 48 | TODO: Modify the code in this class to implement your agent here. 49 | """ 50 | 51 | # the model file is saved in the same directory as this file 52 | MODEL_PATH = os.path.join(os.path.dirname(__file__), "model.pth") 53 | 54 | def __init__(self, episode_info) -> None: 55 | self.episode_info = episode_info 56 | 57 | from submission.envs import NavigationEnv 58 | 59 | obs_space = NavigationEnv.OBS_SPACE 60 | act_space = NavigationEnv.ACT_SPACE 61 | 62 | self.policy = ppo.PPOTorchPolicy(obs_space, act_space, {}) 63 | self.policy.model.load_state_dict(torch.load(self.MODEL_PATH)) 64 | 65 | def act(self, ts: int, state: AgentState) -> NavigationAction: 66 | obs = { 67 | "cur_loc": np.asarray(get_position(state)), 68 | "tar_loc": np.asarray(self.episode_info["target_location"]), 69 | } 70 | 71 | action_dict = self.policy.compute_single_action( 72 | observation=obs, 73 | explore=True 74 | )[0] 75 | walk_dir = action_dict["walk_dir"] 76 | walk_speed = action_dict["walk_speed"] 77 | 78 | return NavigationAction( 79 | walk_dir=walk_dir, 80 | walk_speed=walk_speed, 81 | turn_lr_delta=0, 82 | look_ud_delta=0, 83 | jump=False, 84 | ) 85 | 86 | def act_backup(self, ts: int, state: AgentState) -> NavigationAction: 87 | pos = np.asarray(get_position(state)) 88 | tar = np.asarray(self.episode_info["target_location"]) 89 | dir = tar - pos 90 | dir = dir / np.linalg.norm(dir) 91 | walk_dir = get_picth_yaw(*dir)[1] % 360 92 | 93 | return NavigationAction( 94 | walk_dir=walk_dir, 95 | walk_speed=5, 96 | turn_lr_delta=0, 97 | look_ud_delta=0, 98 | jump=False, 99 | ) 100 | 101 | 102 | class AgentSupplyGathering: 103 | """ 104 | This is a template of an agent for the supply gathering task. 105 | TODO: Modify the code in this class to implement your agent here. 106 | """ 107 | 108 | def __init__(self, episode_info) -> None: 109 | self.episode_info = episode_info 110 | 111 | def act(self, ts: int, state: AgentState) -> SupplyGatherAction: 112 | pos = np.asarray(get_position(state)) 113 | if state.supply_states: 114 | supply_info = list(state.supply_states.values())[0] 115 | tar = np.asarray(get_position(supply_info)) 116 | dir = tar - pos 117 | dir = dir / np.linalg.norm(dir) 118 | walk_dir = get_picth_yaw(*dir)[1] % 360 119 | else: 120 | walk_dir = random.randint(0, 360) 121 | 122 | return SupplyGatherAction( 123 | walk_dir=walk_dir, 124 | walk_speed=5, 125 | turn_lr_delta=0, 126 | look_ud_delta=0, 127 | jump=False, 128 | pickup=True, 129 | ) 130 | 131 | 132 | class AgentSupplyBattle: 133 | """ 134 | This is a template of an agent for the supply battle task. 135 | TODO: Modify the code in this class to implement your agent here. 136 | """ 137 | 138 | def __init__(self, episode_info) -> None: 139 | self.episode_info = episode_info 140 | 141 | def act(self, ts: int, state: AgentState) -> SupplyBattleAction: 142 | pos = np.asarray(get_position(state)) 143 | if state.supply_states: 144 | supply_info = list(state.supply_states.values())[0] 145 | tar = np.asarray(get_position(supply_info)) 146 | dir = tar - pos 147 | dir = dir / np.linalg.norm(dir) 148 | walk_dir = get_picth_yaw(*dir)[1] % 360 149 | else: 150 | walk_dir = random.randint(0, 360) 151 | 152 | turn_lr_delta = 0 153 | look_ud_delta = 0 154 | attack = False 155 | 156 | if state.enemy_states: 157 | enemy_info = list(state.enemy_states.values())[0] 158 | tar = np.asarray(get_position(enemy_info)) 159 | dir = tar - pos 160 | dir = dir / np.linalg.norm(dir) 161 | aim_pitch, aim_yaw = get_picth_yaw(*dir) 162 | 163 | diff_pitch = aim_pitch - state.pitch 164 | diff_yaw = aim_yaw - state.yaw 165 | if abs(diff_pitch) < 5 and abs(diff_yaw) < 5: 166 | attack = True 167 | 168 | skip_frames = self.episode_info["time_step_per_action"] 169 | rotate_speed_decay = 0.5 170 | turn_lr_delta = diff_yaw / skip_frames * rotate_speed_decay 171 | look_ud_delta = diff_pitch / skip_frames * rotate_speed_decay 172 | 173 | return SupplyBattleAction( 174 | walk_dir=walk_dir, 175 | walk_speed=5, 176 | turn_lr_delta=turn_lr_delta, 177 | look_ud_delta=look_ud_delta, 178 | jump=False, 179 | pickup=True, 180 | attack=attack, 181 | reload=state.weapon_ammo < 5 and state.spare_ammo > 0, 182 | ) 183 | -------------------------------------------------------------------------------- /submission_template/submission/envs.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Dict 3 | 4 | import gym 5 | import numpy as np 6 | from gym import spaces 7 | from ray.rllib.env import EnvContext 8 | from inspirai_fps.utils import get_distance, get_position 9 | from inspirai_fps.gamecore import Game 10 | from inspirai_fps.gamecore import ActionVariable 11 | 12 | 13 | class NavigationEnv(gym.Env): 14 | BASE_PORT = 50000 15 | ACT_VALS = { 16 | "walk_dir": [0, 90, 180, 270], 17 | "walk_speed": [0, 5, 10], 18 | } 19 | OBS_SPACE = spaces.Dict({ 20 | "cur_loc": spaces.Box(low=-np.Inf, high=np.Inf, shape=(3,), dtype=np.float32), 21 | "tar_loc": spaces.Box(low=-np.Inf, high=np.Inf, shape=(3,), dtype=np.float32), 22 | }) 23 | ACT_SPACE = spaces.Dict({ 24 | k: spaces.Discrete(len(v)) for k, v in ACT_VALS.items() 25 | }) 26 | 27 | def __init__(self, config: EnvContext): 28 | self.config = config 29 | self.render_scale = config.get("render_scale", 1) 30 | 31 | env_seed = config.get("random_seed", 0) + config.get("worker_index",0) 32 | self.seed(env_seed) 33 | 34 | self.observation_space = self.OBS_SPACE 35 | self.action_space = self.ACT_SPACE 36 | 37 | server_port = self.BASE_PORT + config.get("worker_index",0) 38 | print(f">>> New instance {self} on port: {server_port}") 39 | 40 | self.game = Game( 41 | map_dir=config["map_dir"], 42 | engine_dir=config["engine_dir"], 43 | server_port=server_port, 44 | ) 45 | self.game.set_map_id(config["map_id"]) 46 | self.game.set_episode_timeout(config["timeout"]) 47 | self.game.set_random_seed(env_seed) 48 | self.game.turn_on_depth_map() 49 | self.game.set_game_mode(Game.MODE_NAVIGATION) 50 | self.game.set_available_actions([ 51 | ActionVariable.WALK_DIR, 52 | ActionVariable.WALK_SPEED, 53 | ]) 54 | self.start_location = None 55 | self.target_location = None 56 | 57 | locations = self.game.get_valid_locations() 58 | 59 | self.indoor_loc = locations["indoor"] 60 | self.outdoor_loc = locations["outdoor"] 61 | 62 | self.game.init() 63 | 64 | def _get_obs(self): 65 | return { 66 | "cur_loc": np.asarray(get_position(self.state)), 67 | "tar_loc": np.asarray(self.target_location) 68 | } 69 | 70 | def step(self, action_dict): 71 | action_vals = self._action_process(action_dict) 72 | self.game.make_action({0: action_vals}) 73 | self.state = self.game.get_state() 74 | done = self.game.is_episode_finished() 75 | 76 | cur_loc = get_position(self.state) 77 | tar_loc = self.target_location 78 | 79 | if get_distance(cur_loc, tar_loc) <= self.game.target_trigger_distance: 80 | reward = 100 81 | done = True 82 | else: 83 | reward = 0 84 | 85 | if done: 86 | if self.print_log: 87 | Start = np.round(np.asarray(self.start_location), 2).tolist() 88 | Target = np.round(np.asarray(self.target_location), 2).tolist() 89 | End = np.round(np.asarray(get_position(self.state)), 2).tolist() 90 | Step = self.running_steps 91 | Reward = reward 92 | print(f"{Start=}\t{Target=}\t{End=}\t{Step=}\t{Reward=}") 93 | 94 | self.running_steps += 1 95 | 96 | return self._get_obs(), reward, done, {} 97 | 98 | def reset(self): 99 | print("Reset for a new game ...") 100 | self.start_location = random.choice(self.outdoor_loc) 101 | self.target_location = random.choice(self.outdoor_loc) 102 | self.game.set_start_location(self.start_location) 103 | self.game.set_target_location(self.target_location) 104 | self.game.new_episode() 105 | self.state = self.game.get_state() 106 | self.running_steps = 0 107 | return self._get_obs() 108 | 109 | def close(self): 110 | self.game.close() 111 | return super().close() 112 | 113 | def _action_process(self, action: Dict[str, int]): 114 | walk_dir = self.ACT_VALS["walk_dir"][action["walk_dir"]] 115 | walk_speed = self.ACT_VALS["walk_speed"][action["walk_speed"]] 116 | return [walk_dir, walk_speed] 117 | --------------------------------------------------------------------------------