└── LLM.py /LLM.py: -------------------------------------------------------------------------------- 1 | from settings import * 2 | 3 | import csv 4 | import json 5 | import re 6 | 7 | from dialog import Dialog 8 | 9 | from distances import Point,find_closest_walkable, dijkstra_distance,mark_positions,place_characters,find_closest_room_center 10 | 11 | import openai 12 | 13 | import re 14 | 15 | openai.api_key = API_KEY 16 | import csv 17 | 18 | 19 | 20 | def place_tile( X, Y, Tile, rooms,filepath="map/Floor_1/map_1_Collisions.csv",filepath_to_modify="map/Floor_1/map_1_Interactions.csv"): 21 | print("function called with :", X,Y,Tile) 22 | 23 | 24 | # Read the csv file into a list of lists 25 | with open(filepath_to_modify, 'r') as f: 26 | reader = csv.reader(f) 27 | data = list(reader) 28 | 29 | for i in range(len(X)): 30 | try: 31 | x_val = int(X[i]) 32 | y_val = int(Y[i]) 33 | tile_val = int(Tile[i]) 34 | start = Point(x_val,y_val) 35 | 36 | closest_walkable = find_closest_room_center(start, rooms) 37 | final_x=closest_walkable.x 38 | final_y=closest_walkable.y 39 | # Place the tile value at the specified location 40 | data[final_y][final_x] = tile_val 41 | 42 | except: 43 | print("OUPS, the model returned some bullshit for index: ", i) 44 | continue # Skip to next iteration if there's an error 45 | 46 | 47 | 48 | # Save the list of lists back to the csv file 49 | with open(filepath_to_modify, 'w', newline='') as f: 50 | writer = csv.writer(f) 51 | writer.writerows(data) 52 | 53 | def Create_story_LLM(story,size,genre): 54 | 55 | system_msg = {"role": "system", "content": f'You an AI designed to help me build an interesting game based on a dungeon inside the tower in this story {story}. /n The map is a {size}x{size} tilemap of a {genre} game'} 56 | 57 | # Initialize messages array 58 | messages = [system_msg] 59 | message=f"with ONLY those available tiles : {AVAILABLE_TILES}, create a basic plot for the level. the story should be less than 100 words long." 60 | 61 | messages.append({"role": "user", "content": message}) 62 | try: 63 | response = openai.ChatCompletion.create( 64 | model="gpt-4-0613", 65 | messages=messages, 66 | 67 | ) 68 | # Extract the assistant's message from the response 69 | # Extract the assistant's message from the response 70 | assistant_msg = response['choices'][0]['message'] 71 | 72 | # Print the model's response 73 | print('Story: ', assistant_msg.content) 74 | return assistant_msg.content 75 | except openai.error.OpenAIError as e: 76 | print("Some error happened here.",e) 77 | return 78 | 79 | def Create_Level_objective_LLM(level_story,size,genre): 80 | 81 | system_msg = {"role": "system", "content": f'You an AI designed to help me build an interesting game based oon a dungeon inside the tower in this story {STORY}. /n The map is a {size}x{size} tilemap of a {genre} game'} 82 | 83 | # Initialize messages array 84 | messages = [system_msg] 85 | message=f"with ONLY those available tiles : {AVAILABLE_TILES}, and this {level_story} in less than 30 words, provide a 'clear condition' for the level. The portal will only activate if the condition is fullfilled, so make it something feasable. example : 'defeat at least N enemies.' Or ' loot at least N treasur chest ' or ' deafeat the boss and escape' " 86 | 87 | messages.append({"role": "user", "content": message}) 88 | try: 89 | response = openai.ChatCompletion.create( 90 | model="gpt-4-0613", 91 | messages=messages, 92 | functions=[ 93 | { 94 | "name": "set_level_objective", 95 | "description": "Set a a clear condition for the current level.", 96 | "parameters": { 97 | "type": "object", 98 | "properties": { 99 | "story": { 100 | "type": "string", 101 | "description": "A good objective that is feasible on the current level", 102 | }, 103 | 104 | }, 105 | "required": ["story"], 106 | }, 107 | } 108 | ], 109 | function_call={"name": "set_level_objective"}, 110 | ) 111 | # Extract the assistant's message from the response 112 | # Extract the assistant's message from the response 113 | assistant_msg = response['choices'][0]['message'] 114 | response_options = assistant_msg.to_dict()['function_call']['arguments'] 115 | options = json.loads(response_options) 116 | 117 | 118 | # Print the model's response 119 | print('objective: ',options["story"]) 120 | return options["story"] 121 | except openai.error.OpenAIError as e: 122 | print("Some error happened here.",e) 123 | return 124 | 125 | 126 | def place_LLM_tiles(Level_story,player_pos,boss_pos, boss_name,size,genre,level,rooms,objective): 127 | 128 | 129 | system_msg = {"role": "system", "content": f'You an AI designed to help me build an interesting game out of the available assets /n The map is a {size}x{size} tilemap of a {genre} game'} 130 | 131 | 132 | # Initialize messages array 133 | messages = [system_msg] 134 | message=f"with ONLY those available tiles : {AVAILABLE_TILES}, create a placement for the tiles as (x,y) coordinates on the map to follow the following story {Level_story} considering that the player starting position is ({player_pos.x},{player_pos.y}) and the boss({boss_name}) position is boss_position ({boss_pos.x},{boss_pos.y}) " 135 | print(message) 136 | messages.append({"role": "user", "content": message}) 137 | 138 | 139 | # Create a dataset using GPT-4 140 | try: 141 | response = openai.ChatCompletion.create( 142 | model="gpt-4-0613", 143 | messages=messages, 144 | functions = [ 145 | { 146 | "name": "place_tile", 147 | "description": f"Place the tiles on the map at pos (x,y) by iterrating over the give arrays, the X, and Y values can't be greater than the size of the map. There must ALWAYS be one exit portal and not more than 2.Also, the objective must ABSOLUTLY be feasible :{objective}", 148 | "parameters": { 149 | "type": "object", 150 | "properties": { 151 | "X": { 152 | "type": "array", 153 | "items": { 154 | "type": "string", 155 | "description": "An individual x position for a tile" 156 | }, 157 | "description": f"List of X pos to place the objects on the map. values can't exceed {size}" 158 | }, 159 | "Y": { 160 | "type": "array", 161 | "items": { 162 | "type": "string", 163 | "description": "An individual y position for a tile" 164 | }, 165 | "description": f"List of Y pos to place the objects on the map values can't exceed {size}" 166 | }, 167 | "Tiles": { 168 | "type": "array", 169 | "items": { 170 | "type": "string", 171 | "description": "An individual Tile value" 172 | }, 173 | "description": "List of Tiles numbers to place the objects on the map" 174 | }, 175 | }, 176 | "required": ["X","Y","Tiles"], 177 | }, 178 | } 179 | ], 180 | function_call={"name": "place_tile"}, 181 | ) 182 | 183 | # Extract the assistant's message from the response 184 | # Extract the assistant's message from the response 185 | assistant_msg = response['choices'][0]['message'] 186 | 187 | # Print the model's response 188 | print('NPC: ', assistant_msg) 189 | 190 | # Step 2: check if GPT wanted to call a function 191 | if assistant_msg.get("function_call"): 192 | print("calling function") 193 | # Step 3: call the function 194 | # Note: the JSON response may not always be valid; be sure to handle errors 195 | available_functions = { 196 | "place_tile": place_tile, 197 | } # only one function in this example, but you can have multiple 198 | function_name = assistant_msg["function_call"]["name"] 199 | fuction_to_call = available_functions[function_name] 200 | function_args = json.loads(assistant_msg["function_call"]["arguments"]) 201 | print(function_args) 202 | fuction_to_call( 203 | X=function_args.get("X"), 204 | Y=function_args.get("Y"), 205 | Tile=function_args.get("Tiles"), 206 | filepath=f"map/Floor_{level}/map_{level}_Collisions.csv", 207 | filepath_to_modify=f"map/Floor_{level}/map_{level}_Interactions.csv", 208 | rooms=rooms 209 | ) 210 | 211 | except openai.error.OpenAIError as e: 212 | print("Some error happened here.",e) 213 | 214 | def set_LLM_Mood(sentence): 215 | 216 | system_msg = {"role": "system", "content": f'You an AI designed to help me answer to some message with the correct tone.'} 217 | 218 | # Initialize messages array 219 | messages = [system_msg] 220 | message=f"Find the correct tone to answer this : {sentence}" 221 | 222 | messages.append({"role": "user", "content": message}) 223 | try: 224 | response = openai.ChatCompletion.create( 225 | model="gpt-4-0613", 226 | messages=messages, 227 | functions=[ 228 | { 229 | "name": "set_mood", 230 | "description": "Set the mood of the NPC to which the sentence was said.", 231 | "parameters": { 232 | "type": "object", 233 | "properties": { 234 | "mood": { 235 | "type": "string", 236 | "description": "A mood between : Neutral , Happy , Sad , Angry or Dull", 237 | }, 238 | 239 | }, 240 | "required": ["mood"], 241 | }, 242 | } 243 | ], 244 | function_call={"name": "set_mood"}, 245 | ) 246 | # Extract the assistant's message from the response 247 | # Extract the assistant's message from the response 248 | assistant_msg = response['choices'][0]['message'] 249 | response_options = assistant_msg.to_dict()['function_call']['arguments'] 250 | options = json.loads(response_options) 251 | 252 | 253 | # Print the model's response 254 | print('Mood: ',options["mood"]) 255 | return options["mood"] 256 | except openai.error.OpenAIError as e: 257 | print("Some error happened here.",e) 258 | return 259 | 260 | def answer_with_mood_LLM(player_input,messages,mood): 261 | # Append the user's message to the messages 262 | try: 263 | 264 | # Create a dataset using GPT-4 265 | response = openai.ChatCompletion.create( 266 | model="gpt-4-0613", 267 | messages=messages, 268 | functions=[ 269 | { 270 | "name": "get_varied_personality_responses", 271 | "description": "ingest the various personality responses", 272 | "parameters": { 273 | "type": "object", 274 | "properties": { 275 | f"{mood}": { 276 | "type": "string", 277 | "description": f"A {mood} version of the response to a user's query", 278 | }, 279 | 280 | }, 281 | "required": ["{mood}"], 282 | }, 283 | } 284 | ], 285 | function_call={"name": "get_varied_personality_responses"}, 286 | ) 287 | 288 | reply_content = response.choices[0].message 289 | response_options = reply_content.to_dict()['function_call']['arguments'] 290 | options = json.loads(response_options) 291 | moody_answer=options[f"{mood}"] 292 | 293 | # Print the model's response 294 | print('NPC: ', moody_answer) 295 | 296 | # Append the assistant's message to the messages 297 | 298 | return moody_answer 299 | except openai.error.OpenAIError as e: 300 | print("Some error happened here.",e) 301 | return 302 | 303 | 304 | 305 | 306 | 307 | def function_call_create_item(rate): 308 | try: 309 | system_intel = "You are an assistent designed to help me build interesting enemies." 310 | prompt =f"Create an item corresponding to the following rate: {rate}" 311 | result = openai.ChatCompletion.create(model="gpt-4-0613", 312 | messages=[{"role": "system", "content": system_intel}, 313 | {"role": "user", "content": prompt}], 314 | functions=[ 315 | { 316 | "name": "create_character_json", 317 | "description": "Create a characte given certain characteristics.", 318 | "parameters": { 319 | "type": "object", 320 | "properties": { 321 | "name": { 322 | "type": "string", 323 | "description": "the name of the object in less than 5 words.", 324 | }, 325 | "description": { 326 | "type": "string", 327 | "description": "the description of the object in less than 15 words.", 328 | }, 329 | "item_type": { 330 | "type": "string", 331 | "description": "the type of the object, between 'weapon','headset','torso','legs','boots','jewel'", 332 | }, 333 | "health_bonus": { 334 | "type": "string", 335 | "description": "the health bonus that the created item gives between 0 and 20 ", 336 | }, 337 | "enegy_bonus": { 338 | "type": "string", 339 | "description": "the energy bonus that the created item gives between 0 and 20 ", 340 | }, 341 | "magic_bonus": { 342 | "type": "string", 343 | "description": "the magic value of the created item between 0 and 10 ", 344 | }, 345 | "strength_bonus": { 346 | "type": "string", 347 | "description": "the strength value of the created characted between 0 and 10 ", 348 | }, 349 | "speed_bonus": { 350 | "type": "string", 351 | "description": "the speed value of the created characted between 0 and 0.5 ", 352 | }, 353 | }, 354 | "required": ["name","description", "item_type", "health_bonus","enegy_bonus","magic_bonus", "strength_bonus", "speed_bonus"], 355 | }, 356 | } 357 | ], 358 | function_call={"name":"create_character_json"}, 359 | ) 360 | 361 | 362 | message = result["choices"][0]["message"] 363 | print(message) 364 | response_option=message.to_dict()['function_call']['arguments'] 365 | print(response_option) 366 | options=json.loads(response_option) 367 | return options 368 | 369 | except openai.error.OpenAIError as e: 370 | print("Some error happened here.",e) 371 | return 372 | 373 | def function_call_create_item_from_NPC(rate, prefered_items): 374 | try: 375 | system_intel = "You are an assistent designed to help me build interesting enemies." 376 | prompt =f"Create an item corresponding to the following rate: {rate}. Gicen your status, you have a tendency to create more {prefered_items}" 377 | result = openai.ChatCompletion.create(model="gpt-4-0613", 378 | messages=[{"role": "system", "content": system_intel}, 379 | {"role": "user", "content": prompt}], 380 | functions=[ 381 | { 382 | "name": "create_character_json", 383 | "description": "Create a characte given certain characteristics.", 384 | "parameters": { 385 | "type": "object", 386 | "properties": { 387 | "name": { 388 | "type": "string", 389 | "description": "the name of the object in less than 5 words.", 390 | }, 391 | "description": { 392 | "type": "string", 393 | "description": "the description of the object in less than 15 words.", 394 | }, 395 | "item_type": { 396 | "type": "string", 397 | "description": "the type of the object, between 'weapon','headset','torso','legs','boots','jewel'", 398 | }, 399 | "health_bonus": { 400 | "type": "string", 401 | "description": "the health bonus that the created item gives between 0 and 20 ", 402 | }, 403 | "enegy_bonus": { 404 | "type": "string", 405 | "description": "the energy bonus that the created item gives between 0 and 20 ", 406 | }, 407 | "magic_bonus": { 408 | "type": "string", 409 | "description": "the magic value of the created item between 0 and 10 ", 410 | }, 411 | "strength_bonus": { 412 | "type": "string", 413 | "description": "the strength value of the created characted between 0 and 10 ", 414 | }, 415 | "speed_bonus": { 416 | "type": "string", 417 | "description": "the speed value of the created characted between 0 and 0.5 ", 418 | }, 419 | }, 420 | "required": ["name","description", "item_type", "health_bonus","enegy_bonus","magic_bonus", "strength_bonus", "speed_bonus"], 421 | }, 422 | } 423 | ], 424 | function_call={"name":"create_character_json"}, 425 | ) 426 | 427 | 428 | message = result["choices"][0]["message"] 429 | print(message) 430 | response_option=message.to_dict()['function_call']['arguments'] 431 | print(response_option) 432 | options=json.loads(response_option) 433 | return options 434 | 435 | except openai.error.OpenAIError as e: 436 | print("Some error happened here.",e) 437 | return 438 | def read_log_file(file_path): 439 | with open(file_path, 'r') as log_file: 440 | log_content = log_file.read() 441 | return log_content 442 | def function_call_Grant_Portal_Acces(file_path,objective): 443 | try: 444 | logs=read_log_file(file_path) 445 | print('LOGS : ', logs) 446 | print('objective : ', objective) 447 | system_intel = "You are an assistant designed to help me decide if an objective has been fullfiled." 448 | prompt =f"Given the following player logs: {logs}. Is this objective completed ? objective : {objective} /n give acces to the next floor ? Don't be too severe in the decision. When you are called the player has reached the exit portal, so don't bother with logs about exit portal 444." 449 | result = openai.ChatCompletion.create(model="gpt-4-0613", 450 | messages=[{"role": "system", "content": system_intel}, 451 | {"role": "user", "content": prompt}], 452 | functions=[ 453 | { 454 | "name": "Grant_acces", 455 | "description": "A function to call to grante acces to the next floor to the player or not.", 456 | "parameters": { 457 | "type": "object", 458 | "properties": { 459 | "acces": { 460 | "type": "string", 461 | "description": "Does the portal open? 'Yes' or 'No'", 462 | }, 463 | }, 464 | "required": ["acces"], 465 | }, 466 | } 467 | ], 468 | function_call={"name":"Grant_acces"}, 469 | ) 470 | 471 | 472 | message = result["choices"][0]["message"] 473 | print(message) 474 | response_option=message.to_dict()['function_call']['arguments'] 475 | print(response_option) 476 | options=json.loads(response_option) 477 | return options 478 | 479 | except openai.error.OpenAIError as e: 480 | print("Some error happened here.",e) 481 | return --------------------------------------------------------------------------------