├── minds ├── __init__.py ├── genes.py ├── jayshoo.py ├── mind2.py ├── mind1.py ├── crawling_chaos.py ├── mind3.py ├── japhet.py ├── zenergizer.py ├── evolving_chaos.py ├── ben.py ├── benvolution.py ├── benmark.py ├── benvolution_genetic.py ├── seken.py └── ben2.py ├── terrain ├── __init__.py └── generator.py ├── .gitignore ├── setup.py ├── CHEATING ├── README ├── cells_helpers.pyx ├── LICENSE ├── tournament.py ├── DOCUMENTATION └── cells.py /minds/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /terrain/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | *.csv 4 | *.cfg 5 | .*.swp 6 | *.c 7 | *.so 8 | build 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | 5 | setup( 6 | cmdclass = {'build_ext': build_ext}, 7 | ext_modules = [Extension("cells_helpers", ["cells_helpers.pyx"])] 8 | ) 9 | -------------------------------------------------------------------------------- /CHEATING: -------------------------------------------------------------------------------- 1 | Some proposed ground rules: 2 | 3 | - No module-level or other shared state in "mind" modules. Agent state 4 | must be visible to only a single AgentMind object. 5 | 6 | - The act method may only access game state passed to it in its 7 | arguments. This current includes a WorldView and a MessageQueue. 8 | 9 | - Only interact with a MessageQueue through its send_message and 10 | get_messages methods. Do not call update and do not attempt to 11 | interact with its internal state. -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Cells is a multi agent programing game written in Python. 2 | 3 | For more information see: http://phonons.wordpress.com/2010/06/01/cells-a-massively-multi-agent-python-programming-game/ 4 | 5 | Join #pycells on irc.freenode.net to discuss Cells 6 | 7 | Requirements: 8 | pygame 9 | numpy 10 | 11 | Optional: 12 | psyco 13 | 14 | To run: 15 | python cells.py ... 16 | 17 | For example: 18 | 19 | python cells.py mind1 mind2 20 | 21 | For available minds, look in the minds/ folder. Also place your custom minds there. 22 | -------------------------------------------------------------------------------- /cells_helpers.pyx: -------------------------------------------------------------------------------- 1 | cimport numpy 2 | 3 | 4 | def get_small_view_fast(self, int x, int y): 5 | cdef numpy.ndarray[object, ndim=2] values = self.values 6 | cdef int width = self.width 7 | cdef int height = self.height 8 | cdef int dr 9 | cdef int dc 10 | cdef int adj_x 11 | cdef int adj_y 12 | 13 | 14 | assert self.values.dtype == object 15 | ret = [] 16 | get = self.get 17 | 18 | for dr in xrange(-1,2): 19 | for dc in xrange(-1,2): 20 | if not dr and not dc: 21 | continue 22 | adj_x = x + dr 23 | if not 0 <= adj_x < width: 24 | continue 25 | adj_y = y + dc 26 | if not 0 <= adj_y < height: 27 | continue 28 | a = values[adj_x, adj_y] 29 | if a is not None: 30 | ret.append(a.get_view()) 31 | return ret 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 Thomas McColgan 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /minds/genes.py: -------------------------------------------------------------------------------- 1 | '''Genes in asexual reproduction. 2 | 3 | Totally made-up, has no basis in genetic algorithms b/c I have no 4 | background in that area. 5 | ''' 6 | 7 | import random 8 | 9 | class Gene(object): 10 | def __init__(self, parent): 11 | '''Clone this gene from the parent gene.''' 12 | self.val = parent.val 13 | 14 | def spawn(self): 15 | '''Copy this gene, introducing mutations probabilistically.''' 16 | new = self.__class__(self) 17 | new.mutate() 18 | return new 19 | 20 | def mutate(self): 21 | perturb = self.gen_perturb() 22 | val = self.val + perturb 23 | self.val = min(max(val, self.min_cap), self.max_cap) 24 | 25 | 26 | def make_normally_perturbed_gene(sigma, minc=0, maxc=1): 27 | class NormallyPerturbedGene(Gene): 28 | min_cap = minc 29 | max_cap = maxc 30 | def gen_perturb(self): 31 | return random.gauss(0, sigma) 32 | return NormallyPerturbedGene 33 | 34 | 35 | def make_drastic_mutation_gene(pr): 36 | '''Gene representing incompatible categories.''' 37 | class DrasticMutationGene(Gene): 38 | min_cap = 0 39 | max_cap = 100 40 | def gen_perturb(self): 41 | if random.random() < pr: 42 | return 1 if random.random() < 0.5 else -1 43 | else: 44 | return 0 45 | return DrasticMutationGene 46 | 47 | 48 | class InitializerGene(object): 49 | '''A fake gene, used to initialize things.''' 50 | def __init__(self, val): 51 | self.val = val 52 | -------------------------------------------------------------------------------- /minds/jayshoo.py: -------------------------------------------------------------------------------- 1 | # seriously stupid bot 2 | # eat, work out symmetric position, attack, lose to spreaded colonies 3 | 4 | import cells, random 5 | 6 | class AgentMind(object): 7 | def __init__(self, args): 8 | # init things 9 | self.home = None 10 | self.breeder = False 11 | 12 | # if called by a parent: 13 | if (args != None): 14 | self.home = args[0] 15 | 16 | def symmetricPos(self, pos): 17 | return (pos[1], pos[0]) 18 | 19 | def get_dir(self, myX, myY, targX, targY): 20 | resultX = 0 21 | resultY = 0 22 | if (myX > targX): resultX = myX+1 23 | if (myX < targX): resultX = myX-1 24 | if (myY > targY): resultY = myY-1 25 | if (myY < targY): resultY = myY+1 26 | return (resultX, resultY) 27 | 28 | def act(self, view, msg): 29 | me = view.get_me() 30 | my_pos = (mx,my) = me.get_pos() 31 | 32 | # first cell only store home plant and work out direction to symmetric team 33 | # TODO: handle view.get_plants() somehow not working for the first cell 34 | if (self.home == None): 35 | self.home = (view.get_plants()[0].x, view.get_plants()[0].y) 36 | self.breeder = True 37 | 38 | # eat 39 | if (view.get_energy().get(mx, my) > 0): 40 | if (me.energy < 50): 41 | return cells.Action(cells.ACT_EAT) 42 | 43 | # breed if designated 44 | if (self.breeder): 45 | return cells.Action(cells.ACT_SPAWN, (mx + random.randrange(-1,2), my + random.randrange(-1,2), self.home)) 46 | 47 | # fight if drunk 48 | nearby = view.get_agents() 49 | for a in nearby: 50 | if (a.team != me.team): 51 | return cells.Action(cells.ACT_ATTACK, a.get_pos()) 52 | 53 | # leave home 54 | return cells.Action(cells.ACT_MOVE, self.symmetricPos(self.home)) 55 | 56 | # die 57 | pass 58 | -------------------------------------------------------------------------------- /minds/mind2.py: -------------------------------------------------------------------------------- 1 | import random, cells 2 | 3 | 4 | class AgentMind(object): 5 | def __init__(self, junk): 6 | self.my_plant = None 7 | self.mode = 1 8 | self.target_range = random.randrange(50,200) 9 | 10 | def act(self,view,msg): 11 | x_sum = 0 12 | y_sum = 0 13 | dir = 1 14 | n = len(view.get_plants()) 15 | me = view.get_me() 16 | mp = (mx,my)= me.get_pos() 17 | for a in view.get_agents(): 18 | if (a.get_team()!=me.get_team()): 19 | return cells.Action(cells.ACT_ATTACK,a.get_pos()) 20 | 21 | for m in msg.get_messages(): 22 | if (random.random()>0.6) and self.my_plant: 23 | self.mode = 5 24 | (tx,ty) = m 25 | self.target = (tx+random.randrange(-3,4),ty+random.randrange(-3,4)) 26 | 27 | if(n>0): 28 | if (not self.my_plant): 29 | self.my_plant = view.get_plants()[0] 30 | elif self.my_plant.get_eff() dist*1.5: 37 | self.mode = 6 38 | 39 | if self.mode == 6: 40 | dist = max(abs(mx-self.target[0]),abs(my-self.target[1])) 41 | if dist > 4: 42 | return cells.Action(cells.ACT_MOVE,self.target) 43 | else: 44 | self.my_plant = None 45 | self.mode = 0 46 | 47 | if (me.energy < self.target_range) and (view.get_energy().get(mx, my) > 0): 48 | return cells.Action(cells.ACT_EAT) 49 | 50 | if self.my_plant: 51 | dist = max(abs(mx-self.my_plant.get_pos()[0]),abs(my-self.my_plant.get_pos()[1])) 52 | if me.energy < dist*1.5: 53 | (mx,my) = self.my_plant.get_pos() 54 | return cells.Action(cells.ACT_MOVE,(mx+random.randrange(-1,2),my+random.randrange(-1,2))) 55 | if (random.random()>0.9999): 56 | (mx,my) = self.my_plant.get_pos() 57 | msg.send_message((my,mx)) 58 | 59 | if (random.random()>0.9): 60 | return cells.Action(cells.ACT_SPAWN,(mx+random.randrange(-1,2),my+random.randrange(-1,2))) 61 | else: 62 | return cells.Action(cells.ACT_MOVE,(mx+random.randrange(-1,2),my+random.randrange(-1,2))) 63 | -------------------------------------------------------------------------------- /minds/mind1.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Defines an agent mind that attacks any opponent agents within its view, 3 | attaches itself to the strongest plant it finds, eats when its hungry, 4 | ''' 5 | 6 | import random, cells 7 | import math 8 | 9 | 10 | class AgentMind(object): 11 | def __init__(self, junk): 12 | self.my_plant = None 13 | self.mode = 1 14 | self.target_range = random.randrange(50, 1000) 15 | 16 | def length(self, a, b): 17 | return int(math.sqrt((a * a) + (b * b))) 18 | 19 | def act(self, view, msg): 20 | x_sum = 0 21 | y_sum = 0 22 | dir = 1 23 | me = view.get_me() 24 | mp = (mx, my)= me.get_pos() 25 | 26 | # Attack any opponents. 27 | for a in view.get_agents(): 28 | if a.get_team() != me.get_team(): 29 | return cells.Action(cells.ACT_ATTACK, a.get_pos()) 30 | 31 | # Attach to the strongest plant found. 32 | if view.get_plants(): 33 | plant = view.get_plants()[0] 34 | if not self.my_plant: 35 | self.my_plant = plant 36 | elif self.my_plant.eff < plant.eff: 37 | self.my_plant = plant 38 | 39 | # Eat if hungry or if this is an exceptionally energy-rich spot. 40 | hungry = (me.energy < self.target_range) 41 | energy_here = view.get_energy().get(mx, my) 42 | food = (energy_here > 0) 43 | if hungry and food or energy_here > 100: 44 | return cells.Action(cells.ACT_EAT) 45 | 46 | if self.my_plant: 47 | plant_pos = self.my_plant.get_pos() 48 | plant_dist = self.length( 49 | abs(mx - plant_pos[0]), 50 | abs(my - plant_pos[1])) 51 | 52 | if (not me.loaded and 53 | (plant_dist % 5 or abs(mx - plant_pos[0]) < 2) 54 | and random.random() > 0.5): 55 | return cells.Action(cells.ACT_LIFT) 56 | if me.loaded and plant_dist % 5 == 0 and abs(mx - plant_pos[0]) >= 2: 57 | return cells.Action(cells.ACT_DROP) 58 | if me.energy < plant_dist * 1.5: 59 | (mx, my) = plant_pos 60 | pos = (mx + random.randrange(-1, 2), my + random.randrange(-1, 2)) 61 | return cells.Action(cells.ACT_MOVE, pos) 62 | 63 | pos = (mx + random.randrange(-1, 2), my + random.randrange(-1, 2)) 64 | action = cells.ACT_SPAWN if random.random() > 0.9 else cells.ACT_MOVE 65 | return cells.Action(action, pos) 66 | -------------------------------------------------------------------------------- /tournament.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import ConfigParser 5 | from cells import Game 6 | 7 | config = ConfigParser.RawConfigParser() 8 | 9 | def get_mind(name): 10 | full_name = 'minds.' + name 11 | __import__(full_name) 12 | mind = sys.modules[full_name] 13 | mind.name = name 14 | return mind 15 | 16 | bounds = None # HACK 17 | symmetric = None 18 | mind_list = None 19 | 20 | def main(): 21 | global bounds, symmetric, mind_list 22 | try: 23 | config.read('tournament.cfg') 24 | bounds = config.getint('terrain', 'bounds') 25 | symmetric = config.getboolean('terrain', 'symmetric') 26 | minds_str = str(config.get('minds', 'minds')) 27 | 28 | except Exception as e: 29 | print 'Got error: %s' % e 30 | config.add_section('minds') 31 | config.set('minds', 'minds', 'mind1,mind2') 32 | config.add_section('terrain') 33 | config.set('terrain', 'bounds', '300') 34 | config.set('terrain', 'symmetric', 'true') 35 | 36 | with open('tournament.cfg', 'wb') as configfile: 37 | config.write(configfile) 38 | 39 | config.read('tournament.cfg') 40 | bounds = config.getint('terrain', 'bounds') 41 | symmetric = config.getboolean('terrain', 'symmetric') 42 | minds_str = str(config.get('minds', 'minds')) 43 | mind_list = [(n, get_mind(n)) for n in minds_str.split(',')] 44 | 45 | # accept command line arguments for the minds over those in the config 46 | try: 47 | if len(sys.argv)>2: 48 | mind_list = [(n,get_mind(n)) for n in sys.argv[1:] ] 49 | except (ImportError, IndexError): 50 | pass 51 | 52 | 53 | if __name__ == "__main__": 54 | main() 55 | scores = [0 for x in mind_list] 56 | tournament_list = [[mind_list[a], mind_list[b]] for a in range(len(mind_list)) for b in range (a)] 57 | for n in range(4): 58 | for pair in tournament_list: 59 | game = Game(bounds, pair, symmetric, 5000, headless = True) 60 | while game.winner == None: 61 | game.tick() 62 | if game.winner >= 0: 63 | idx = mind_list.index(pair[game.winner]) 64 | scores[idx] += 3 65 | if game.winner == -1: 66 | idx = mind_list.index(pair[0]) 67 | scores[idx] += 1 68 | idx = mind_list.index(pair[1]) 69 | scores[idx] += 1 70 | print scores 71 | print [m[0] for m in mind_list] 72 | names = [m[0] for m in mind_list] 73 | name_score = zip(names,scores) 74 | f = open("scores.csv",'w') 75 | srt = sorted(name_score,key=lambda ns: -ns[1]) 76 | for x in srt: 77 | f.write("%s;%s\n" %(x[0],str(x[1]))) 78 | f.close() 79 | -------------------------------------------------------------------------------- /minds/crawling_chaos.py: -------------------------------------------------------------------------------- 1 | import random,cells 2 | 3 | import cmath, math 4 | 5 | class AgentMind(object): 6 | def __init__(self, junk): 7 | self.my_plant = None 8 | self.mode = 1 9 | self.target_range = random.randrange(50,200) 10 | pass 11 | 12 | def act(self,view,msg): 13 | x_sum = 0 14 | y_sum = 0 15 | dir = 1 16 | n = len(view.get_plants()) 17 | me = view.get_me() 18 | mp = (mx,my)= me.get_pos() 19 | for a in view.get_agents(): 20 | if (a.get_team()!=me.get_team()): 21 | msg.send_message(mp) 22 | return cells.Action(cells.ACT_ATTACK,a.get_pos()) 23 | 24 | for m in msg.get_messages(): 25 | r = random.random() 26 | if ((self.my_plant and random.random()>0.6) or 27 | (not self.my_plant and random.random() > 0.5)): 28 | self.mode = 5 29 | (tx,ty) = m 30 | self.target = (tx+random.randrange(-3,4),ty+random.randrange(-3,4)) 31 | 32 | if n: 33 | best_plant = max(view.get_plants(), key=lambda x: x.eff) 34 | if not self.my_plant or self.my_plant.eff < best_plant.eff: 35 | self.my_plant = view.get_plants()[0] 36 | self.mode = 0 37 | 38 | if self.mode == 5: 39 | dist = max(abs(mx-self.target[0]),abs(my-self.target[1])) 40 | self.target_range = max(dist,self.target_range) 41 | if me.energy > dist*1.5: 42 | self.mode = 6 43 | 44 | if self.mode == 6: 45 | dist = max(abs(mx-self.target[0]),abs(my-self.target[1])) 46 | if dist > 4: 47 | return cells.Action(cells.ACT_MOVE,self.target) 48 | else: 49 | self.my_plant = None 50 | self.mode = 0 51 | 52 | if (me.energy < self.target_range) and (view.get_energy().get(mx, my) > 0): 53 | return cells.Action(cells.ACT_EAT) 54 | 55 | if self.my_plant: 56 | dist = max(abs(mx-self.my_plant.get_pos()[0]),abs(my-self.my_plant.get_pos()[1])) 57 | if me.energy < dist*1.5: 58 | (mx,my) = self.my_plant.get_pos() 59 | return cells.Action(cells.ACT_MOVE,(mx+random.randrange(-1,2),my+random.randrange(-1,2))) 60 | if (random.random()>0.9999): 61 | (mx,my) = self.my_plant.get_pos() 62 | dtheta = random.random() * 2 * math.pi 63 | dr = random.randrange(100) 64 | curr_r, curr_theta = cmath.polar(mx + my*1j) 65 | m = cmath.rect(curr_r + dr, curr_theta + dtheta) 66 | msg.send_message((m.real, m.imag)) 67 | 68 | if (random.random()>0.9 and me.energy >= 50): 69 | return cells.Action(cells.ACT_SPAWN,(mx+random.randrange(-1,2),my+random.randrange(-1,2))) 70 | else: 71 | return cells.Action(cells.ACT_MOVE,(mx+random.randrange(-1,2),my+random.randrange(-1,2))) 72 | -------------------------------------------------------------------------------- /DOCUMENTATION: -------------------------------------------------------------------------------- 1 | How to change what minds are fighting: 2 | 3 | After running python cells.py for the first time, a file named 4 | default.cfg will be created in the same directory, open this with your 5 | favorite text editor, and change the part that reads: 6 | 7 | [minds] 8 | minds = mind1,mind2 9 | 10 | to whatever mind names you want to have fight. Note that the mind 11 | names don't include the .py extension, and that the .py files should 12 | be located in the ./minds directory 13 | 14 | You can have from 2 to 4 minds specified, all comma-delimited 15 | 16 | How to build a Mind: 17 | 18 | make your own .py file with whatever name you like. 19 | Import whatever modules you want to use (random, math, etc) 20 | and also import cells 21 | 22 | the only thing absolutely required for a mind that doesn't instantly 23 | lose is this: 24 | 25 | class AgentMind: 26 | def act(self, view, msg): 27 | return cells.Action(cells.ACT_EAT) 28 | 29 | 30 | Minds can have other class methods to assist in writing the act 31 | method, but remember, when calling another method to determine what 32 | action to take, you must have a chain of return statements leading all 33 | the way back to the AgentMind.act method. 34 | 35 | If you don't you'll get a an error that looks like this: 36 | 37 | Traceback (most recent call last): 38 | File "cells.py", line 570, in 39 | game.tick() 40 | File "cells.py", line 242, in tick 41 | self.run_agents() 42 | File "cells.py", line 158, in run_agents 43 | if action.type == ACT_MOVE: # Changes position of agent. 44 | AttributeError: 'NoneType' object has no attribute 'type' 45 | 46 | This is because your act method failed to explicitly return something, 47 | and so python helpfully passed a None instead. 48 | 49 | 50 | 51 | 52 | Allowed cell actions: 53 | 54 | These actions do not require any other arguments: 55 | 56 | cells.Action(cells.ACT_EAT) 57 | 58 | cells.Action(cells.ACT_LIFT) 59 | 60 | cells.Action(cells.ACT_DROP) 61 | 62 | All of these arguments have other requirements, usually a location, 63 | specified by a global location. Valid offsets are directly adjacent 64 | cells to the current position so (me.x +/- 1, me.y +/- 1) 65 | 66 | cells.Action(cells.ACT_SPAWN, (x, y), self) 67 | 68 | cells.Action(cells.ACT_MOVE, (x, y)) 69 | 70 | 71 | cells.Action(cells.ACT_ATTACK, (x, y)) 72 | 73 | 74 | msg.send_message(something) 75 | 76 | msg.get_messages(something) 77 | 78 | Commands in Game window: 79 | 80 | spacebar starts a new game. 81 | q quits the game. 82 | e toggles energy display. 83 | a toggles agent display. 84 | 85 | Useful properties of the view and msg arguments: 86 | 87 | view: 88 | get_me() - return the object that is the current agent (see 'me' below) 89 | 90 | get_agents() - return a list of the agents that can be seen 91 | (see 'agent' below) 92 | 93 | get_plants() - return a list of the plants that can be seen 94 | (see 'plant' below) 95 | 96 | get_energy().get(x_pos, y_pos) - return the amount of energy at coordinates 97 | 98 | me: 99 | get_pos() 100 | 101 | get_team() 102 | 103 | get_view() 104 | 105 | act() 106 | 107 | energy - integer value indicating amount of energy 108 | 109 | loaded - boolean value indicating loaded status 110 | 111 | 112 | plant: 113 | get_pos() 114 | 115 | get_eff() - get the efficiency factor of the plant 116 | 117 | 118 | agent: 119 | get_pos() 120 | 121 | get_team() 122 | -------------------------------------------------------------------------------- /minds/mind3.py: -------------------------------------------------------------------------------- 1 | import random,cells 2 | #rylsan 3 | #phreeza 4 | 5 | 6 | ## Message Grammar 7 | ##sentence = [uniqueid,object_type,obj_instance) 8 | ##such that coords = (x,y) 9 | ##which means: "my name is uniqueid and I have found an obj_instance of an object" 10 | ##object_type=2 : plant 11 | ##object_type=3 : enemy 12 | ##2,3,5,7,11 are possible control vals 13 | 14 | class AgentMind(object): 15 | def __init__(self,junk): 16 | self.my_plant = None 17 | self.mode = 1 18 | self.target_range = random.randrange(50,200) 19 | 20 | self.memory=[] 21 | self.outmemory=[] 22 | 23 | self.uniqueid = 0 24 | 25 | def act(self,view,msg): 26 | x_sum = 0 27 | y_sum = 0 28 | dir = 1 29 | n = len(view.get_plants()) 30 | me = view.get_me() 31 | mp = (mx,my)= me.get_pos() 32 | 33 | #If I don't have an id yet, get one. 34 | if(self.uniqueid==0): 35 | self.uniqueid = self.GetID() 36 | 37 | for a in view.get_agents(): 38 | if (a.get_team()!=me.get_team()): 39 | #If I see an enemy, broadcast it, then attack it. 40 | sentence = [self.uniqueid,3,a] 41 | self.outmemory.append(sentence) 42 | if sentence not in self.outmemory: 43 | msg.send_message(sentence) 44 | return self.Attack(a) 45 | 46 | 47 | 48 | #Go through my messages, then memorize them 49 | for m in msg.get_messages(): 50 | self.memory.append(m) 51 | 52 | 53 | 54 | #Choosing a plant 55 | if(n>0): 56 | #If I see a plant, broadcast it. 57 | sentence = [self.uniqueid,2,view.get_plants()[0]] 58 | self.outmemory.append(sentence) 59 | if sentence not in self.outmemory: 60 | msg.send_message(sentence) 61 | 62 | if (not self.my_plant): 63 | #If I don't have a plant, get one. 64 | self.my_plant = view.get_plants()[0] 65 | elif self.my_plant.get_eff() dist*1.5: 82 | self.mode = 6 83 | 84 | if self.mode == 6: 85 | dist = max(abs(mx-self.target[0]),abs(my-self.target[1])) 86 | if dist > 4: 87 | return cells.Action(cells.ACT_MOVE,self.target) 88 | else: 89 | self.my_plant = None 90 | self.mode = 0 91 | 92 | 93 | if (view.get_me().energy < self.target_range) and (view.get_energy().get(mx,my) > 0): 94 | return self.Eat() 95 | 96 | #If I have a plant, move towards it if i need to. 97 | if self.my_plant: 98 | dist = max(abs(mx-self.my_plant.get_pos()[0]),abs(my-self.my_plant.get_pos()[1])) 99 | if view.get_me().energy < dist*1.5: 100 | (mx,my) = self.my_plant.get_pos() 101 | return self.Move(mx,my) 102 | 103 | #Spawn near my plant, or just move near it. 104 | if (random.random()>0.9): 105 | return self.Spawn(mx,my) 106 | else: 107 | return self.Move(mx,my) 108 | 109 | 110 | def Spawn(self,x,y): 111 | return cells.Action(cells.ACT_SPAWN,(x+random.randrange(-1,2),y+random.randrange(-1,2))) 112 | 113 | def Move(self,x,y): 114 | return cells.Action(cells.ACT_MOVE,(x+random.randrange(-1,2),y+random.randrange(-1,2))) 115 | 116 | def Attack(self,a): 117 | return cells.Action(cells.ACT_ATTACK,a.get_pos()) 118 | 119 | def Eat(self): 120 | return cells.Action(cells.ACT_EAT) 121 | 122 | 123 | def GetID(self): 124 | ulist = [11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71, 125 | 73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173] 126 | 127 | r = random.randint(5,35) 128 | random.shuffle(ulist) 129 | uid = 1 130 | for i in range(0,r): 131 | uid *= ulist[i] 132 | uid=uid*3571 133 | return uid 134 | -------------------------------------------------------------------------------- /minds/japhet.py: -------------------------------------------------------------------------------- 1 | """ 2 | idea: spawn in waves. everyone saves up food from the plant until a certain time at which everyone spawns soldiers as fast as possible. 3 | Triggered by an attack. Everyone set their spawn requirement according to the distance to the attack so that the spawn will all reach the spot at the same time. 4 | idea: use avg pos for spawned soldier destination, local attack events for local maneuvering 5 | idea: attack messages puts everone in 'report' state, everyone sends report on how ready they are to save up. next tick everyone makes same decision based on reports 6 | """ 7 | 8 | 9 | import random,cells 10 | import math 11 | 12 | class Message: 13 | def __init__(self, pos): 14 | self.pos = pos 15 | 16 | # inspired by zenergizer 17 | diffs = [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)] 18 | 19 | class AgentMind: 20 | def __init__(self, args): 21 | if args: 22 | soldier = args[0] 23 | else: 24 | soldier = False 25 | 26 | self.my_plant = None 27 | self.mode = 1 28 | self.moved = 0 29 | #self.target_range = random.randrange(50,1000) 30 | self.setDirection() 31 | self.avgEnemyPos = (0,0) 32 | self.weight = 0 33 | self.soldier = (random.random() < .6) or soldier 34 | self.spawner = False 35 | self.spawnRequirement = 65 36 | self.soldierDirected = None 37 | self.distanceToFight = None 38 | 39 | if self.soldier: 40 | self.energyNeeded = 100 # how much energy we need before we ignore food 41 | else: 42 | self.energyNeeded = 25 43 | self.prevEnergy = None 44 | 45 | 46 | def setDirection(self, rad = None): 47 | if rad != None: 48 | self.direction = rad 49 | else: 50 | self.direction = random.random()*math.pi*2 51 | self.cos = math.cos(self.direction) 52 | self.sin = math.sin(self.direction) 53 | 54 | if self.cos < 0: 55 | self.dx = -1 56 | else: 57 | self.dx = 1 58 | 59 | if self.sin < 0: 60 | self.dy = -1 61 | else: 62 | self.dy = 1 63 | 64 | 65 | def act(self,view,msg): 66 | me = view.get_me() 67 | pos = me.get_pos() 68 | (mx, my) = pos 69 | 70 | if len(view.get_plants()): 71 | self.soldier = False 72 | self.spawner = True 73 | self.spawnRequirement = 55 74 | 75 | 76 | # respond to nearby battles 77 | if self.soldier and len(msg.get_messages()): 78 | newPos = (0, 0) 79 | nCloseBy = 0 80 | for message in msg.get_messages(): 81 | enemyPos = message.pos 82 | cartesian = (enemyPos[0] - mx, enemyPos[1] - my) 83 | distance = max(abs(cartesian[0]), abs(cartesian[1])) 84 | if distance < 30: 85 | nCloseBy+=1 86 | newPos = (newPos[0] + enemyPos[0], newPos[1] + enemyPos[1]) 87 | 88 | # does the soldier have orders? any battle call will do 89 | if not self.soldierDirected: 90 | direction = math.atan2(cartesian[1], cartesian[0]) 91 | self.setDirection(direction) 92 | self.soldierDirected = True 93 | 94 | if nCloseBy: 95 | # find average of all close-by battle calls 96 | self.avgEnemyPos = ((self.avgEnemyPos[0] * float(self.weight) + newPos[0]) / float(self.weight + nCloseBy), 97 | (self.avgEnemyPos[1] * float(self.weight) + newPos[1]) / float(self.weight + nCloseBy)) 98 | self.weight = min(25, self.weight+nCloseBy) 99 | 100 | cartesian = (self.avgEnemyPos[0] - mx, self.avgEnemyPos[1] - my) 101 | direction = math.atan2(cartesian[1], cartesian[0]) 102 | self.setDirection(direction) 103 | self.distanceToFight = max(abs(cartesian[0]), abs(cartesian[1])) 104 | self.spawnRequirement = 50 + self.distanceToFight + 10 105 | 106 | # are we stuck? 107 | if self.moved and self.prevPos == pos: 108 | self.setDirection() 109 | self.soldierDirected = False 110 | self.moved = 0 111 | self.prevPos = None 112 | 113 | #attack? 114 | for a in view.get_agents(): 115 | if (a.get_team()!=me.get_team()): 116 | msg.send_message(Message(a.get_pos())) 117 | return cells.Action(cells.ACT_ATTACK,a.get_pos()) 118 | 119 | # freeSpots = where we can move/spawn 120 | freeSpots = diffs[:] 121 | for a in view.get_agents(): 122 | apos = a.get_pos() 123 | dpos = (apos[0] - pos[0], apos[1] - pos[1]) 124 | if dpos in freeSpots: 125 | freeSpots.remove(dpos) 126 | 127 | # see a ton of food nearby? 128 | if not self.spawner: 129 | for diff in diffs: 130 | target = (mx+diff[0], my+diff[1]) 131 | if view.get_energy().get(target[0], target[1]) > 50 and target in freeSpots: 132 | return cells.Action(cells.ACT_MOVE, (mx+diff[0], my+diff[1])) 133 | 134 | # spawn? 135 | if me.energy > self.spawnRequirement: 136 | if len(freeSpots): 137 | random.shuffle(freeSpots) 138 | spawn = freeSpots[0] 139 | spawnSoldier = None 140 | if self.distanceToFight and self.distanceToFight < 20: 141 | spawnSoldier = True 142 | else: 143 | spawnSoldier = False 144 | return cells.Action(cells.ACT_SPAWN, (mx+spawn[0], my+spawn[1], spawnSoldier)) 145 | 146 | 147 | # eat? 148 | if self.spawner or view.get_energy().get(mx, my) > 1 and (me.energy < self.energyNeeded): 149 | self.prevEnergy = me.energy 150 | return cells.Action(cells.ACT_EAT) 151 | 152 | 153 | # move as directed 154 | elif not self.spawner: 155 | dx = dy = 0 156 | while not self.moved: 157 | if random.random() < abs(self.cos): 158 | dx += self.dx 159 | if random.random() < abs(self.sin): 160 | dy += self.dy 161 | self.moved = dx or dy 162 | self.prevPos = pos 163 | return cells.Action(cells.ACT_MOVE, (mx+dx, my+dy)) 164 | 165 | -------------------------------------------------------------------------------- /minds/zenergizer.py: -------------------------------------------------------------------------------- 1 | # 2 | # zenergizer.py 3 | # 4 | # Seth Zenz 5 | # Email: cancatenate my first and last names, all lower case, at gmail.com 6 | # June 2, 2010 7 | # 8 | # There is a lot of ugly machinery in this guy, I made him from tinkering 9 | # around and never cleaned him up completely. Some of the things he does 10 | # I don't really understand why. Lost of numbers aren't tuned. 11 | # 12 | # But if you watch him work, you'll see that he demonstrates the value of 13 | # going for the biggest pile of energy around and eating it -- both in 14 | # exploration and in big melees. This isn't obvious but it works. 15 | # 16 | 17 | import random,cells 18 | 19 | class AgentMind: 20 | def __init__(self, args): 21 | 22 | self.goto_war_at = 500 23 | self.diffs = [(-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)] 24 | self.mytime = 0 25 | self.am_warrior = False 26 | self.lastattack = (-1,-1,-1) 27 | 28 | if not args: 29 | self.gen = 0 30 | self.war_time = -1 31 | self.startdiff = random.choice(self.diffs) 32 | else: 33 | self.gen = args[0] 34 | self.war_time = args[1] 35 | self.startdiff = args[2] 36 | self.target_range = 5 37 | self.spawn_min = 50 38 | self.migrate_min = 70 39 | 40 | if ((random.random() < 0.7 and not (self.war_time>0)) or self.gen == 0): 41 | self.mygoaldir = (0,0) 42 | else: 43 | if (self.war_time > 0) and (random.random() > 0.1): 44 | self.am_warrior = True 45 | self.mygoaldir = (0,0) 46 | else: 47 | self.mygoaldir = self.startdiff 48 | self.questtime = 0 49 | self.last_x = 999 50 | self.last_y = 999 51 | pass 52 | 53 | def act(self,view,msg): 54 | me = view.get_me() 55 | mp = (mx,my)= me.get_pos() 56 | 57 | # If I think it's time to go to war, sound out a message 58 | if self.mytime > self.goto_war_at and not (self.war_time > 0): 59 | msg.send_message(("war",self.mytime)) 60 | 61 | # personal time counter 62 | self.mytime += 1 63 | 64 | # Interpret war-related messages 65 | for m in msg.get_messages(): 66 | if m[0] == "war" and not self.am_warrior: 67 | self.am_warrior = True 68 | self.war_time = self.mytime 69 | if m[0] == "attack": 70 | self.lastattack = (m[1],m[2],self.mytime) 71 | 72 | # Attack nearby enemies. This always gets done first 73 | for a in view.get_agents(): 74 | if (a.get_team()!=me.get_team()): 75 | msg.send_message(("attack",mx,my)) 76 | return cells.Action(cells.ACT_ATTACK,a.get_pos()) 77 | 78 | # Move if at war 79 | if self.am_warrior and (self.lastattack[2] > self.war_time - 50): 80 | go = True 81 | for plant in view.get_plants(): 82 | if (mx == plant.x and abs(my-plant.y) < 2) or (my == plant.y and abs(mx-plant.x) < 2): 83 | go = False 84 | if go: 85 | tx,ty = (self.lastattack[0],self.lastattack[1]) 86 | if mx != self.lastattack[0]: tx += random.randrange(15,40)*(self.lastattack[0]-mx)/abs((self.lastattack[0]-mx)) 87 | if my != self.lastattack[1]: ty += random.randrange(15,40)*(self.lastattack[1]-my)/abs((self.lastattack[1]-my)) 88 | tx += random.randrange(-4,5) 89 | ty += random.randrange(-4,5) 90 | return cells.Action(cells.ACT_MOVE,(tx,ty)) 91 | 92 | # If very hungry, eat 93 | if ((me.energy < self.target_range) and (view.get_energy().get(mx, my) > 0)): 94 | return cells.Action(cells.ACT_EAT) 95 | 96 | # If on a quest, move. Stop for nearby goodies or if I couldn't move last time. 97 | if self.mygoaldir != (0,0): 98 | self.questtime += 1 99 | highenergy = False 100 | for diff in self.diffs: 101 | tx,ty = mx+diff[0],my+diff[1] 102 | if (view.get_energy().get(tx,ty) > 200): highenergy = True 103 | if ((len(view.get_plants()) > 0 or highenergy) and self.questtime > 5) or (mx == self.last_x and my == self.last_y): 104 | self.mygoaldir = (0,0) 105 | else: 106 | self.last_x = mx 107 | self.last_y = my 108 | for a in view.get_agents(): 109 | if a.x == mx+self.mygoaldir[0] and a.y == my+self.mygoaldir[1]: 110 | self.mygoaldir = random.choice(self.diffs) # change destination if blocked 111 | if random.random() < 0.9: 112 | return cells.Action(cells.ACT_MOVE,(mx+self.mygoaldir[0],my+self.mygoaldir[1])) 113 | else: 114 | return cells.Action(cells.ACT_MOVE,(mx+self.mygoaldir[0]+random.randrange(-1,2),my+self.mygoaldir[1]+random.randrange(-1,2))) 115 | 116 | # Spawn if I have the energy 117 | if me.energy > self.spawn_min: 118 | random.shuffle(self.diffs) 119 | for diff in self.diffs: 120 | sx,sy = (mx+diff[0],my+diff[1]) 121 | occupied = False 122 | for a in view.get_agents(): 123 | if a.x == sx and a.y == sy: 124 | occupied = True 125 | break 126 | if not occupied: 127 | return cells.Action(cells.ACT_SPAWN,(sx,sy,self.gen+1,self.war_time,diff)) 128 | 129 | # Start a quest if I have the energy and there's no war 130 | if me.energy > self.migrate_min and not self.am_warrior: 131 | self.mygoaldir = random.choice(self.diffs) 132 | self.questtime = 0 133 | self.last_x = -999 134 | self.last_y = -999 135 | return cells.Action(cells.ACT_MOVE,(mx+self.mygoaldir[0],my+self.mygoaldir[1])) 136 | 137 | # Find the highest energy square I can see. If I'm there, eat it. Otherwise move there. 138 | maxenergy = view.get_energy().get(mx,my) 139 | fx,fy = (mx,my) 140 | random.shuffle(self.diffs) 141 | for diff in self.diffs: 142 | tx,ty = (mx+diff[0],my+diff[1]) 143 | occupied = False 144 | for a in view.get_agents(): 145 | if a.x == tx and a.y == ty: 146 | occupied = True 147 | break 148 | if view.get_energy().get(tx,ty) > maxenergy and not occupied: 149 | maxenergy = view.get_energy().get(tx,ty) 150 | fx,fy = (tx,ty) 151 | if (mx,my) == (fx,fy): 152 | return cells.Action(cells.ACT_EAT) 153 | return cells.Action(cells.ACT_MOVE,(fx,fy)) 154 | -------------------------------------------------------------------------------- /minds/evolving_chaos.py: -------------------------------------------------------------------------------- 1 | import cells 2 | import genes 3 | 4 | import cmath 5 | import math 6 | from random import random, randrange 7 | 8 | CallForHelpGene = genes.make_normally_perturbed_gene(0.01) 9 | CallOfDutyGene = genes.make_normally_perturbed_gene(0.01) 10 | DraftDodgerGene = genes.make_normally_perturbed_gene(0.01) 11 | SpawnProbabilityGene = genes.make_normally_perturbed_gene(0.01) 12 | SpawnEnergyThresholdGene = genes.make_normally_perturbed_gene(5, 50, 5000) 13 | ColonizeProbabilityGene = genes.make_normally_perturbed_gene(0.01) 14 | 15 | CallTypeGene = genes.make_drastic_mutation_gene(0.01) 16 | 17 | MODE_NORMAL = 0 18 | MODE_PREP = 5 19 | MODE_ATTACK = 6 20 | MODE_COLONIZE = 7 21 | 22 | def fuzz_coord(c): 23 | return c + randrange(-1,2) 24 | 25 | 26 | class AgentMind(object): 27 | def __init__(self, args): 28 | self.my_plant = None 29 | self.mode = MODE_NORMAL 30 | self.target_range = randrange(50,200) 31 | if args is None: 32 | self.call_for_help = CallForHelpGene(genes.InitializerGene(0.25)) 33 | self.call_of_duty = CallOfDutyGene(genes.InitializerGene(0.75)) 34 | self.draft_dodger = DraftDodgerGene(genes.InitializerGene(0.75)) 35 | self.spawn_prob = SpawnProbabilityGene(genes.InitializerGene(0.1)) 36 | self.spawn_energy = SpawnEnergyThresholdGene(genes.InitializerGene(50)) 37 | self.call_type = CallTypeGene(genes.InitializerGene(0)) 38 | self.colonize_prob = ColonizeProbabilityGene(genes.InitializerGene(0.001)) 39 | else: 40 | parent = args[0] 41 | self.call_for_help = parent.call_for_help.spawn() 42 | self.call_of_duty = parent.call_of_duty.spawn() 43 | self.draft_dodger = parent.draft_dodger.spawn() 44 | self.spawn_prob = parent.spawn_prob.spawn() 45 | self.spawn_energy = parent.spawn_energy.spawn() 46 | self.call_type = parent.call_type.spawn() 47 | self.colonize_prob = parent.colonize_prob.spawn() 48 | 49 | def _colonize_from(self, mx, my, mapsize): 50 | tx = randrange(mapsize) 51 | ty = randrange(mapsize) 52 | self._set_target(MODE_COLONIZE, tx, ty, mapsize) 53 | 54 | def _set_target(self, next_mode, tx, ty, mapsize): 55 | self.mode = MODE_PREP 56 | self.next_mode = next_mode 57 | tx += randrange(-3, 4) 58 | ty += randrange(-3, 4) 59 | tx = min(max(tx, 0), mapsize) 60 | ty = min(max(ty, 0), mapsize) 61 | self.target = (tx, ty) 62 | 63 | def act(self,view,msg): 64 | x_sum = 0 65 | y_sum = 0 66 | dir = 1 67 | me = view.me 68 | mp = (mx,my)= (me.x, me.y) 69 | map_size = view.energy_map.width 70 | 71 | cfh_val = self.call_for_help.val 72 | for a in view.agent_views: 73 | if (a.team != me.team): 74 | if random() > cfh_val: 75 | msg.send_message((self.call_type.val, MODE_ATTACK, mp)) 76 | return cells.Action(cells.ACT_ATTACK, (a.x, a.y)) 77 | 78 | my_call_type = self.call_type.val 79 | my_plant = self.my_plant 80 | for message in msg.get_messages(): 81 | call_type, move_mode, m = message 82 | if call_type != my_call_type: 83 | continue 84 | if my_plant: 85 | my_team = me.team 86 | num_nearby = sum(1 for x in view.agent_views if x.team == my_team) 87 | if num_nearby > 1 and random() > self.draft_dodger.val: 88 | tx, ty = m 89 | self._set_target(move_mode, tx, ty, map_size) 90 | elif random() < self.call_of_duty.val: 91 | tx, ty = m 92 | self._set_target(move_mode, tx, ty, map_size) 93 | 94 | del my_plant # Might change later, don't confuse myself by caching it. 95 | 96 | if view.plant_views: 97 | best_plant = max(view.plant_views, key=lambda x: x.eff) 98 | self.my_plant = best_plant 99 | self.mode = MODE_NORMAL 100 | 101 | if self.mode == MODE_PREP: 102 | dist = max(abs(mx-self.target[0]),abs(my-self.target[1])) 103 | self.target_range = max(dist,self.target_range) 104 | if me.energy > dist*1.5: 105 | self.mode = self.next_mode 106 | 107 | if self.mode == MODE_COLONIZE or self.mode == MODE_ATTACK: 108 | dist = abs(mx-self.target[0]) + abs(my-self.target[1]) 109 | my_team = me.team 110 | if (dist < 2 or 111 | (self.mode == MODE_COLONIZE and dist < 8 and 112 | sum(1 for a in view.agent_views 113 | if a.team == my_team) > 7)): 114 | self.my_plant = None 115 | self.mode = MODE_NORMAL 116 | else: 117 | return cells.Action(cells.ACT_MOVE,self.target) 118 | 119 | if me.energy < self.target_range: 120 | if view.energy_map.get(mx, my) > 0: 121 | return cells.Action(cells.ACT_EAT) 122 | elif self.my_plant is not None: 123 | mp = self.my_plant 124 | self._set_target(MODE_ATTACK, mp.x, mp.y, map_size) 125 | else: 126 | self._colonize_from(mx, my, map_size) 127 | 128 | my_plant = self.my_plant 129 | if my_plant is not None: 130 | dist = max(abs(mx-self.my_plant.get_pos()[0]),abs(my-self.my_plant.get_pos()[1])) 131 | if me.energy < dist*1.5: 132 | return cells.Action(cells.ACT_MOVE, 133 | (fuzz_coord(my_plant.x), fuzz_coord(my_plant.y))) 134 | if (random() < self.colonize_prob.val): 135 | self._colonize_from(my_plant.x, my_plant.y, map_size) 136 | 137 | if (random() < self.spawn_prob.val and 138 | me.energy >= self.spawn_energy.val): 139 | return cells.Action(cells.ACT_SPAWN, 140 | (fuzz_coord(mx), fuzz_coord(my), self)) 141 | else: 142 | return cells.Action(cells.ACT_MOVE, 143 | (fuzz_coord(mx), fuzz_coord(my))) 144 | -------------------------------------------------------------------------------- /minds/ben.py: -------------------------------------------------------------------------------- 1 | # 2 | # Benjamin C. Meyer 3 | # 4 | # Overall rules: 5 | # Agents at plants reproduce as much as possible 6 | # Agents are born with a random direction away from the plant 7 | # Agents send a message with they attack 8 | # Agents always attack 9 | # Agents goto the location of the attack, exception scouts that keep looking 10 | # 11 | # Results 12 | # Large growing swarm that explores that area for all plants as fast as possible 13 | # until the enemy is found. By the time the enemy is found everyone is spread out 14 | # Once the enemy is found everyone heads in that direction and if there are any 15 | # plants between the two they are usually taken before they enemy. 16 | # Once a new plant is reached more are quickly spawned and that plant is overrun 17 | # From there it is simple attrition 18 | # 19 | 20 | import random, cells 21 | import numpy 22 | 23 | class MessageType(object): 24 | ATTACK = 0 25 | 26 | class AgentMind(object): 27 | def __init__(self, args): 28 | # The direction to walk in 29 | self.x = False 30 | # Don't come to the rescue, continue looking for plants & bad guys 31 | self.scout = (random.random() > 0.9) 32 | # Once we are attacked (mainly) those reproducing at plants should eat up a defense 33 | self.defense = 0 34 | # Don't have everyone walk on the same line to 1) eat as they walk and 2) find still hidden plants easier 35 | self.step = 0 36 | # reproduce for at least X children at a plant before going out and attacking 37 | self.children = 0 38 | self.my_plant = None 39 | pass 40 | def get_available_space_grid(self, me, view): 41 | grid = numpy.ones((3,3)) 42 | for agent in view.get_agents(): 43 | grid[agent.x - me.x + 1, agent.y - me.y + 1] = 0 44 | for plant in view.get_plants(): 45 | grid[plant.x - me.x + 1, plant.y - me.y + 1] = 0 46 | grid[1,1] = 0 47 | return grid 48 | 49 | def smart_spawn(self, me, view): 50 | grid = self.get_available_space_grid(me, view) 51 | for x in xrange(3): 52 | for y in range(3): 53 | if grid[x,y]: 54 | return (x-1, y-1) 55 | return (-1, -1) 56 | 57 | def choose_new_direction(self, view) : 58 | me = view.get_me() 59 | self.x = random.randrange(view.energy_map.width) - me.x 60 | self.y = random.randrange(view.energy_map.height) - me.y 61 | #self.x = random.randrange(-2, 2) 62 | #self.y = random.randrange(-2, 2) 63 | 64 | def act(self, view, msg): 65 | if not self.x: 66 | self.choose_new_direction(view) 67 | 68 | me = view.get_me() 69 | my_pos = (mx,my) = me.get_pos() 70 | 71 | # Attack anyone next to me, but first send out the distress message with my position 72 | for a in view.get_agents(): 73 | if (a.get_team() != me.get_team()): 74 | msg.send_message((MessageType.ATTACK, mx,my)) 75 | if (me.energy > 2000) : 76 | spawn_x, spawn_y = self.smart_spawn(me, view) 77 | return cells.Action(cells.ACT_SPAWN,(mx+spawn_x, my+spawn_y, self)) 78 | return cells.Action(cells.ACT_ATTACK, a.get_pos()) 79 | 80 | # Eat any energy I find until I am 'full' 81 | if (view.get_energy().get(mx, my) > 0) : 82 | if (me.energy < 50) : 83 | return cells.Action(cells.ACT_EAT) 84 | if (me.energy < self.defense and (random.random()>0.3)): 85 | return cells.Action(cells.ACT_EAT) 86 | 87 | if (self.scout and me.energy > 1000 and random.random()>0.5): 88 | spawn_x, spawn_y = self.smart_spawn(me, view) 89 | return cells.Action(cells.ACT_SPAWN,(mx + spawn_x, my + spawn_y, self)) 90 | 91 | # If there is a plant near by go to it and spawn all I can 92 | if (not self.my_plant) : 93 | plants = view.get_plants() 94 | if (len(plants) > 0) : 95 | self.my_plant = plants[0]; 96 | if (self.my_plant and (self.children < 50 or random.random()>0.9)): 97 | self.children += 1; 98 | spawn_x, spawn_y = self.smart_spawn(me, view) 99 | return cells.Action(cells.ACT_SPAWN,(mx + spawn_x, my + spawn_y, self)) 100 | 101 | # If I get the message of help go and rescue! 102 | map_size = view.energy_map.width 103 | if (self.step == 0 and True != self.scout and (random.random()>0.2)) : 104 | ax = 0; 105 | ay = 0; 106 | best = view.energy_map.width + view.energy_map.height; 107 | message_count = len(msg.get_messages()); 108 | for m in msg.get_messages(): 109 | (type, ox,oy) = m 110 | if (type == MessageType.ATTACK) : 111 | dist = max(abs(mx-ax),abs(my-ay)) 112 | if dist < best: 113 | ax = ox 114 | ay = oy 115 | best = dist 116 | if (ax != 0 and ay != 0) : 117 | self.defense = 2000 118 | self.x = ax - mx 119 | self.y = ay - my 120 | if (message_count > 1) : 121 | # Attack the base, not the front 122 | agent_offset = random.randrange(1, 50) 123 | if (self.x > 0) : 124 | self.x += agent_offset 125 | else : 126 | self.x -= agent_offset 127 | if (self.y > 0) : 128 | self.y += agent_offset 129 | else : 130 | self.y -= agent_offset 131 | # Don't stand still once we get there 132 | if (self.x == 0 and self.y == 0) : 133 | self.choose_new_direction(view) 134 | self.step = random.randrange(3, 10); 135 | 136 | # hit world wall 137 | if mx <= 0 or mx >= map_size-1 or my <= 0 or my >= map_size-1 : 138 | self.choose_new_direction(view) 139 | 140 | # Back to step 0 we can change direction at the next attack 141 | if (self.step > 0): 142 | self.step -= 1; 143 | 144 | # Move quickly randomly in my birth direction 145 | return cells.Action(cells.ACT_MOVE,(mx+self.x+random.randrange(-1,1),my+self.y+random.randrange(-1,1))) 146 | -------------------------------------------------------------------------------- /minds/benvolution.py: -------------------------------------------------------------------------------- 1 | # 2 | # Benjamin C. Meyer 3 | # Modified by Scott Wolchok 4 | # 5 | # Overall rules: 6 | # Agents at plants reproduce as much as possible 7 | # Agents are born with a random direction away from the plant 8 | # Agents send a message with they attack 9 | # Agents always attack 10 | # Agents goto the location of the attack, exception scouts that keep looking 11 | # 12 | # Results 13 | # Large growing swarm that explores that area for all plants as fast as possible 14 | # until the enemy is found. By the time the enemy is found everyone is spread out 15 | # Once the enemy is found everyone heads in that direction and if there are any 16 | # plants between the two they are usually taken before they enemy. 17 | # Once a new plant is reached more are quickly spawned and that plant is overrun 18 | # From there it is simple attrition 19 | # 20 | 21 | import cmath 22 | import random, cells 23 | 24 | import numpy 25 | 26 | import genes 27 | 28 | class MessageType(object): 29 | ATTACK = 0 30 | 31 | class AgentMind(object): 32 | def __init__(self, args): 33 | # The direction to walk in 34 | self.x = None 35 | # Once we are attacked (mainly) those reproducing at plants should eat up a defense. 36 | self.defense = 0 37 | 38 | self.step = 0 39 | self.my_plant = None 40 | self.bumps = 0 41 | self.last_pos = (-1, -1) 42 | 43 | if args is None: 44 | self.strain = 0 45 | self.scout = False 46 | else: 47 | parent = args[0] 48 | self.strain = parent.strain 49 | # Don't come to the rescue, continue looking for plants & bad guys. 50 | if parent.my_plant: 51 | self.scout = (random.random() > 0.9) 52 | else: 53 | self.scout = False 54 | 55 | 56 | def get_available_space_grid(self, me, view): 57 | grid = numpy.ones((3,3)) 58 | for agent in view.get_agents(): 59 | grid[agent.x - me.x + 1, agent.y - me.y + 1] = 0 60 | for plant in view.get_plants(): 61 | grid[plant.x - me.x + 1, plant.y - me.y + 1] = 0 62 | grid[1,1] = 0 63 | return grid 64 | 65 | def smart_spawn(self, me, view): 66 | grid = self.get_available_space_grid(me, view) 67 | for x in xrange(3): 68 | for y in range(3): 69 | if grid[x,y]: 70 | return (x-1, y-1) 71 | return (-1, -1) 72 | 73 | def would_bump(self, me, view, dir_x, dir_y): 74 | grid = self.get_available_space_grid(me, view) 75 | dx = numpy.sign(dir_x) 76 | dy = numpy.sign(dir_y) 77 | adj_dx = dx + 1 78 | adj_dy = dy + 1 79 | return grid[adj_dx,adj_dy] == 0 80 | 81 | 82 | def act(self, view, msg): 83 | ret = self.act_wrapper(view, msg) 84 | self.last_pos = view.me.get_pos() 85 | return ret 86 | 87 | def act_wrapper(self, view, msg): 88 | me = view.get_me() 89 | my_pos = (mx,my) = me.get_pos() 90 | if my_pos == self.last_pos: 91 | self.bumps += 1 92 | else: 93 | self.bumps = 0 94 | 95 | if self.x is None: 96 | self.x = random.randrange(view.energy_map.width) - me.x 97 | self.y = random.randrange(view.energy_map.height) - me.y 98 | # Attack anyone next to me, but first send out the distress message with my position 99 | for a in view.get_agents(): 100 | if (a.get_team() != me.get_team()): 101 | msg.send_message((self.strain, MessageType.ATTACK, mx,my)) 102 | return cells.Action(cells.ACT_ATTACK, a.get_pos()) 103 | 104 | # Eat any energy I find until I am 'full'. The cost of eating 105 | # is 1, so don't eat just 1 energy. 106 | if view.get_energy().get(mx, my) > 1: 107 | if (me.energy <= 50): 108 | return cells.Action(cells.ACT_EAT) 109 | if (me.energy < self.defense and (random.random()>0.3)): 110 | return cells.Action(cells.ACT_EAT) 111 | 112 | 113 | # If there is a plant near by go to it and spawn all I can 114 | if self.my_plant is None : 115 | plants = view.get_plants() 116 | if plants : 117 | self.my_plant = plants[0] 118 | self.x = self.y = 0 119 | self.strain = self.my_plant.x * 41 + self.my_plant.y 120 | 121 | # Current rules don't make carrying around excess energy 122 | # worthwhile. Generates a very nice "They eat their 123 | # wounded?!" effect. Also burns extra energy so the enemy 124 | # can't use it. 125 | # Spawning takes 25 of the energy and gives it 126 | # to the child and reserves the other 25 for the child's death 127 | # drop. In addition, the action costs 1 unit. Therefore, we 128 | # can't create energy by spawning... 129 | if me.energy >= 51: 130 | spawn_x, spawn_y = self.smart_spawn(me, view) 131 | return cells.Action(cells.ACT_SPAWN, 132 | (me.x + spawn_x, me.y + spawn_y, self)) 133 | 134 | # If I get the message of help go and rescue! 135 | if not self.step and not self.scout and random.random() > 0.1: 136 | ax = 0; 137 | ay = 0; 138 | best = 500; 139 | message_count = len(msg.get_messages()); 140 | for m in msg.get_messages(): 141 | (strain, type, ox,oy) = m 142 | if strain != self.strain: 143 | continue 144 | if (type == MessageType.ATTACK) : 145 | dist = max(abs(mx-ax), abs(my-ay)) 146 | if dist < best: 147 | ax = ox 148 | ay = oy 149 | best = dist 150 | if ax and ay: 151 | self.defense = 200 152 | dir = ax-mx + (ay - my) * 1j 153 | r, theta = cmath.polar(dir) 154 | theta += 0.02 * random.random() - 0.5 155 | dir = cmath.rect(r, theta) 156 | self.x = dir.real 157 | self.y = dir.imag 158 | # if (message_count > 1) : 159 | # # Attack the base, not the front 160 | # agent_scale = 1 + random.random() 161 | # self.x *= agent_scale 162 | # self.y *= agent_scale 163 | # don't stand still once we get there 164 | if (self.x == 0 and self.y == 0) : 165 | self.x = random.randrange(-1, 2) 166 | self.y = random.randrange(-1, 2) 167 | self.step = random.randrange(20, 100); 168 | 169 | if self.bumps >= 2: 170 | self.x = random.randrange(-3,4) 171 | self.y = random.randrange(-3,4) 172 | self.bumps = 0 173 | 174 | 175 | # hit world wall 176 | map_size = view.energy_map.width 177 | if (mx == 0 or mx == map_size-1) : 178 | self.x = random.randrange(-1,2) 179 | if (my == 0 or my == map_size-1) : 180 | self.y = random.randrange(-1,2) 181 | 182 | # Back to step 0 we can change direction at the next attack. 183 | if self.step: 184 | self.step -= 1 185 | 186 | return cells.Action(cells.ACT_MOVE,(mx+self.x,my+self.y)) 187 | -------------------------------------------------------------------------------- /minds/benmark.py: -------------------------------------------------------------------------------- 1 | # 2 | # Benjamin C. Meyer, improved by Mark O'Connor 3 | # 4 | # Overall rules: 5 | # Agents at plants reproduce as much as possible 6 | # Agents are born with a random direction away from the plant 7 | # Agents send a message with they attack 8 | # Agents love to eat and reproduce (this is really brutal in long battles) 9 | # Agents always attack 10 | # Agents go to the location of the attack, exception scouts that keep looking 11 | # After a while the AI gets bored and rushes the enemy. Then it rests for a 12 | # while and tries again. 13 | # 14 | # Results 15 | # Grab plants quickly without being distracted by nearby enemies 16 | # Quickly convert battlefields into huge swarms of our cells 17 | # Once we think we've done enough expanding, make a concerted push at the enemy 18 | # Relax this after gaining some ground and build up more forces before a final push 19 | # Obliterates the standard AIs, ben and benvolution 20 | # 21 | # There is clearly a lot of room for improvement in plant finding, battle tactics 22 | # and energy management. 23 | 24 | import random, cells, numpy 25 | from math import sqrt 26 | 27 | armageddon_declared = False 28 | 29 | class MessageType: 30 | ATTACK = 0 31 | 32 | class AgentMind: 33 | def __init__(self, parent_args): 34 | if parent_args == None: # initial instance 35 | self.game_age = 0 36 | else: 37 | self.game_age = parent_args[0].game_age 38 | # The direction to walk in 39 | self.x = random.randrange(-3,4) 40 | self.y = random.randrange(-3,4) 41 | # Don't come to the rescue, continue looking for plants & bad guys 42 | self.scout = random.randrange(0, self.game_age+1) < 200 43 | # Once we are attacked (mainly) those reproducing at plants should eat up a defense 44 | self.defense = 0 45 | # Don't have everyone walk on the same line to 1) eat as they walk and 2) find still hidden plants easier 46 | self.step = 0 47 | self.age = 0 48 | # reproduce for at least X children at a plant before going out and attacking 49 | self.children = 0 50 | self.my_plant = None 51 | self.bumps = 0 52 | self.last_pos = (-1, -1) 53 | 54 | def get_available_spaces(self, me, view): 55 | x, y = me.get_pos() 56 | agents = set((a.x - x, a.y - y) for a in view.get_agents()) 57 | plants = set((p.x - x, p.y - y) for p in view.get_plants()) 58 | my_pos = set((0, 0)) 59 | all = set((x,y) for x in xrange(-1, 2) for y in xrange(-1, 2)) 60 | return all - agents - plants - my_pos 61 | 62 | def smart_spawn(self, me, view): 63 | free = self.get_available_spaces(me, view) 64 | if len(free)>0: 65 | return free.pop() 66 | else: 67 | return None 68 | 69 | def act(self, view, msg): 70 | ret = self.act_wrapper(view, msg) 71 | self.last_pos = view.me.get_pos() 72 | return ret 73 | 74 | def act_wrapper(self, view, msg): 75 | global armageddon_declared 76 | me = view.get_me() 77 | my_pos = (mx,my) = me.get_pos() 78 | # after a while, armageddon! 79 | self.age += 1 80 | self.game_age += 1 81 | bored = (view.energy_map.width+view.energy_map.height) 82 | if self.game_age > bored and self.game_age <= bored*2 or self.game_age > bored*2.5: 83 | self.scout = False 84 | if not armageddon_declared: 85 | print "Mark declares armageddon!" 86 | armageddon_declared = True 87 | if self.game_age > bored*2 and self.game_age < bored*2.5 and armageddon_declared: 88 | print "Mark calls armageddon off..." 89 | armageddon_declared = False 90 | 91 | # Attack anyone next to me, but first send out the distress message with my position 92 | target = next((a for a in view.get_agents() if a.get_team() != me.get_team()), None) 93 | if target: 94 | msg.send_message((MessageType.ATTACK, mx, my)) 95 | return cells.Action(cells.ACT_ATTACK, target.get_pos()) 96 | 97 | # Eat any energy I find until I am 'full' 98 | if view.get_energy().get(mx, my) > 0: 99 | if (me.energy < 50): 100 | return cells.Action(cells.ACT_EAT) 101 | if (me.energy < self.defense):# and (random.random()>0.1)): 102 | return cells.Action(cells.ACT_EAT) 103 | 104 | # If there is a plant near by go to it and spawn all I can 105 | if not self.my_plant and len(view.get_plants())>0: 106 | self.my_plant = view.get_plants()[0] 107 | if self.my_plant: 108 | pos = self.smart_spawn(me, view) 109 | if pos: 110 | return cells.Action(cells.ACT_SPAWN, (me.x + pos[0], me.y + pos[1], self)) 111 | 112 | if me.energy > 50 or (armageddon_declared and me.energy > 400): 113 | pos = self.smart_spawn(me, view) 114 | if pos: 115 | return cells.Action(cells.ACT_SPAWN, (me.x + pos[0], me.y + pos[1], self)) 116 | 117 | # If I get the message of help go and rescue! 118 | if (self.step == 0 and (random.random()>0.2)) : 119 | calls_to_arms = [((mx-ox)**2+(my-oy)**2, ox, oy) for t, ox, oy in msg.get_messages() if t == MessageType.ATTACK] 120 | if len(calls_to_arms)>0: 121 | best, ox, oy = min(calls_to_arms) 122 | if not self.scout or best < min(self.game_age, (view.energy_map.width/8)**2): 123 | self.defense = 2000 124 | self.x = ox - mx 125 | self.y = oy - my 126 | if (len(calls_to_arms) > 1) : 127 | # Attack the base, not the front 128 | agent_offset = random.randrange(1, view.energy_map.width/6) 129 | if (self.x > 0) : 130 | self.x += agent_offset 131 | else : 132 | self.x -= agent_offset 133 | if (self.y > 0) : 134 | self.y += agent_offset 135 | else : 136 | self.y -= agent_offset 137 | # don't all aim directly at the target 138 | roam = int(sqrt(best)) 139 | if roam > 1: 140 | self.x += random.randrange(-roam, roam+1) 141 | self.y += random.randrange(-roam, roam+1) 142 | # Don't stand still once we get there 143 | if (self.x == 0 and self.y == 0) : 144 | self.x = random.randrange(-3, 4) 145 | self.y = random.randrange(-3, 4) 146 | self.step = random.randrange(3, 30) 147 | 148 | # don't get stuck and die 149 | if self.bumps >= 2: 150 | self.x = random.randrange(-3,4) 151 | self.y = random.randrange(-3,4) 152 | self.bumps = 0 153 | 154 | # hit world wall 155 | if (mx == 0 or mx == view.energy_map.width-1): 156 | self.scout = False 157 | self.x *= -1 158 | self.bumps = 0 159 | if (my == 0 or my == view.energy_map.height-1): 160 | self.scout = False 161 | self.y *= -1 162 | self.bumps = 0 163 | 164 | # Back to step 0 we can change direction at the next attack 165 | if (self.step > 0): 166 | self.step -= 1; 167 | 168 | # Move quickly randomly in my birth direction 169 | return cells.Action(cells.ACT_MOVE,(mx+self.x+random.randrange(-1,2),my+self.y+random.randrange(-1,2))) 170 | -------------------------------------------------------------------------------- /terrain/generator.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import random 3 | import math 4 | 5 | class terrain_generator(): 6 | def create_random(self, size, range, symmetric=False): 7 | """Creates a random terrain map""" 8 | ret = numpy.random.random_integers(0, range, size) 9 | 10 | if symmetric: 11 | ret = self.make_symmetric(ret) 12 | return ret 13 | 14 | def create_streak(self, size, range, symmetric=False): 15 | """Creates a terrain map containing streaks that run from north-west to south-east 16 | 17 | Starts with a single point [[a]] and converts it into [[a, b], [c, d]] 18 | where: 19 | b = a + (random change) 20 | c = a + (random change) 21 | d = b + (random change) and c + (random change) 22 | 23 | Repeat untill size matches required size""" 24 | add_random_range = self.add_random_range 25 | 26 | # Creates the top row 27 | ret = [[add_random_range(0, 0, range)]] 28 | for x in xrange(size[0] - 1): 29 | pos_west = ret[0][-1] 30 | if pos_west <= 0: 31 | ret[0].append(add_random_range(pos_west, 0, 1)) 32 | elif pos_west >= range: 33 | ret[0].append(add_random_range(pos_west, -1, 0)) 34 | else: 35 | ret[0].append(add_random_range(pos_west, -1, 1)) 36 | 37 | # Create the next row down 38 | for y in xrange(size[1] - 1): 39 | pos_north = ret[-1][0] 40 | if pos_north <= 0: 41 | next_row = [add_random_range(pos_north, 0, 1)] 42 | elif pos_north >= range: 43 | next_row = [add_random_range(pos_north,-1, 0)] 44 | else: 45 | next_row = [add_random_range(pos_north, -1, 1)] 46 | 47 | for x in xrange(size[0] - 1): 48 | pos_north = ret[-1][x+1] 49 | pos_west = next_row[-1] 50 | if pos_west == pos_north: 51 | if pos_west <= 0: 52 | next_row.append(add_random_range(pos_west, 0, 1)) 53 | elif pos_west >= range: 54 | next_row.append(add_random_range(pos_west, -1, 0)) 55 | else: 56 | next_row.append(add_random_range(pos_west, -1, 1)) 57 | elif abs(pos_west - pos_north) == 2: 58 | next_row.append((pos_west + pos_north)/2) 59 | else: 60 | next_row.append(random.choice((pos_west, pos_north))) 61 | ret.append(next_row) 62 | 63 | if symmetric: 64 | ret = self.make_symmetric(ret) 65 | return numpy.array(ret) 66 | 67 | def create_simple(self, size, range, symmetric=False): 68 | """Creates a procedural terrain map 69 | 70 | Starts with corner points [[a, b], [c, d]] and converts it into [[a, e, b], [f, g, h], [c, i, d]] 71 | where: 72 | e = (a+b)/2 + (random change) 73 | f = (a+c)/2 + (random change) 74 | g = (a+b+c+d)/4 + (random change) 75 | h = (b+d)/2 + (random change) 76 | i = (c+d)/2 + (random change) 77 | 78 | Repeat untill size is greater than required and truncate""" 79 | add_random_range = self.add_random_range 80 | 81 | ret = [[add_random_range(0, 0, range), add_random_range(0, 0, range)], [add_random_range(0, 0, range), add_random_range(0, 0, range)]] 82 | 83 | while len(ret) <= size[0]: 84 | new_ret = [] 85 | 86 | for key_x, x in enumerate(ret): 87 | new_ret.append(x) 88 | 89 | if key_x != len(ret) - 1: 90 | next_row = [] 91 | for key_y, pos_south in enumerate(x): 92 | pos_north = ret[key_x+1][key_y] 93 | pos_avg = (pos_north + pos_south)/2 94 | if pos_avg <= 0: 95 | next_row.append(add_random_range(pos_avg, 0, 1)) 96 | elif pos_avg >= range: 97 | next_row.append(add_random_range(pos_avg, -1, 0)) 98 | else: 99 | next_row.append(add_random_range(pos_avg, -1, 1)) 100 | new_ret.append(next_row) 101 | ret = new_ret 102 | 103 | new_ret = [] 104 | for key_x, x in enumerate(ret): 105 | next_row = [x[0]] 106 | for key_y, pos_east in enumerate(x[1:]): 107 | pos_west = next_row[-1] 108 | if key_x % 2 and not key_y % 2: 109 | pos_north = ret[key_x-1][key_y+1] 110 | pos_south = ret[key_x+1][key_y+1] 111 | pos_avg = (pos_north + pos_south + pos_east + pos_west)/4 112 | if pos_avg <= 0: 113 | next_row.append(add_random_range(pos_avg, 0, 1)) 114 | elif pos_avg >= range: 115 | next_row.append(add_random_range(pos_avg, -1, 0)) 116 | else: 117 | next_row.append(add_random_range(pos_avg, -1, 1)) 118 | else: 119 | pos_avg = (pos_east + pos_west)/2 120 | if pos_avg <= 0: 121 | next_row.append(add_random_range(pos_avg, 0, 1)) 122 | elif pos_avg >= range: 123 | next_row.append(add_random_range(pos_avg, -1, 0)) 124 | else: 125 | next_row.append(add_random_range(pos_avg, -1, 1)) 126 | next_row.append(pos_east) 127 | new_ret.append(next_row) 128 | ret = new_ret 129 | 130 | ret = [x[:size[0]] for x in ret][:size[0]] 131 | 132 | if symmetric: 133 | ret = self.make_symmetric(ret) 134 | return numpy.array(ret) 135 | 136 | def create_perlin(self, size, roughness, symmetric = False): 137 | (width, height) = size 138 | values = numpy.zeros(size) 139 | noise = numpy.random.random_sample((width+1, height+1)) 140 | octaves = (256, 8, 2) 141 | for y in range(height): 142 | for x in range(width): 143 | if symmetric and x < y: 144 | values[x][y] = values[y][x] 145 | continue 146 | nr = 1 147 | for i in octaves: 148 | top = y/i 149 | left = x/i 150 | my = float(y % i) / i 151 | mx = float(x % i) / i 152 | values[x][y] += self.interpolate(noise[top][left], noise[top][left+1], noise[top+1][left], noise[top+1][left+1], mx, my) * math.pow(0.5, nr) 153 | nr += 1 154 | values[x][y] = int(values[x][y] * roughness) 155 | return numpy.array(values,dtype=int) 156 | 157 | #Some helper functions. 158 | def interpolate(self, p1, p2, p3, p4, x, y): 159 | top = self.interpolate1d(p1, p2, x) 160 | bottom = self.interpolate1d(p3, p4, x) 161 | return self.interpolate1d(top, bottom, y) 162 | 163 | def interpolate1d(self, p1, p2, mu): 164 | return p1*(1-mu)+p2*mu 165 | 166 | def add_random_range(self, x, rand_min, rand_max): 167 | """Returns a number that is between x + rand_min and x + rand_max (inclusive)""" 168 | return x + random.randrange(rand_min, rand_max + 1) 169 | 170 | def make_symmetric(self, ret): 171 | """Takes a 2-dimentional list and makes it symmetrical about the north-west / south-east axis""" 172 | for x in xrange(len(ret)): 173 | for y in xrange(x): 174 | ret[x][y] = ret[y][x] 175 | 176 | return ret 177 | -------------------------------------------------------------------------------- /minds/benvolution_genetic.py: -------------------------------------------------------------------------------- 1 | # 2 | # Benjamin C. Meyer 3 | # Modified by Scott Wolchok 4 | # 5 | # Overall rules: 6 | # Agents at plants reproduce as much as possible 7 | # Agents are born with a random direction away from the plant 8 | # Agents send a message with they attack 9 | # Agents always attack 10 | # Agents goto the location of the attack, exception scouts that keep looking 11 | # 12 | # Results 13 | # Large growing swarm that explores that area for all plants as fast as possible 14 | # until the enemy is found. By the time the enemy is found everyone is spread out 15 | # Once the enemy is found everyone heads in that direction and if there are any 16 | # plants between the two they are usually taken before they enemy. 17 | # Once a new plant is reached more are quickly spawned and that plant is overrun 18 | # From there it is simple attrition 19 | # 20 | 21 | import cells 22 | 23 | from cells import Action 24 | from cells import ACT_SPAWN, ACT_MOVE, ACT_EAT, ACT_RELEASE, ACT_ATTACK 25 | from cells import ACT_LIFT, ACT_DROP 26 | 27 | import cmath 28 | from random import choice, random, randrange 29 | 30 | import numpy 31 | 32 | from genes import InitializerGene, make_normally_perturbed_gene 33 | 34 | 35 | DesiredEnergyGene = make_normally_perturbed_gene(5, cells.ATTACK_POWER, 36 | cells.ENERGY_CAP) 37 | FieldSpawnEnergyGene = make_normally_perturbed_gene(5, cells.SPAWN_MIN_ENERGY, 38 | cells.ENERGY_CAP) 39 | PlantSpawnEnergyGene = make_normally_perturbed_gene(5, cells.SPAWN_MIN_ENERGY, 40 | cells.ENERGY_CAP) 41 | 42 | 43 | def debug(s): 44 | #print s 45 | pass 46 | 47 | class MessageType(object): 48 | ATTACK = 0 49 | 50 | size = 300 #cells.config.getint('terrain', 'bounds') 51 | 52 | class AgentMind(object): 53 | def __init__(self, args): 54 | # The direction to walk in 55 | self.tx = randrange(size) 56 | self.ty = randrange(size) 57 | 58 | self.step = 0 59 | self.my_plant = None 60 | self.apoptosis = randrange(100, 201) 61 | 62 | if args is None: 63 | self.strain = 0 64 | self.scout = False 65 | self.genes = genes = {} 66 | genes['desired_energy'] = DesiredEnergyGene( 67 | InitializerGene(2 * cells.SPAWN_MIN_ENERGY)) 68 | genes['field_spawn_energy'] = FieldSpawnEnergyGene( 69 | InitializerGene(4 * cells.ENERGY_CAP / 5)) 70 | genes['plant_spawn_energy'] = PlantSpawnEnergyGene( 71 | InitializerGene(2 * cells.SPAWN_MIN_ENERGY)) 72 | else: 73 | parent = args[0] 74 | self.strain = parent.strain 75 | # Don't come to the rescue, continue looking for plants & bad guys. 76 | self.genes = dict((k, v.spawn()) for (k,v) in parent.genes.iteritems()) 77 | if parent.my_plant is not None: 78 | self.scout = (random() > 0.9) 79 | else: 80 | self.scout = False 81 | 82 | 83 | def get_available_space_grid(self, me, view): 84 | grid = numpy.ones((3,3)) 85 | for agent in view.get_agents(): 86 | grid[agent.x - me.x + 1, agent.y - me.y + 1] = 0 87 | for plant in view.get_plants(): 88 | grid[plant.x - me.x + 1, plant.y - me.y + 1] = 0 89 | grid[1,1] = 0 90 | return grid 91 | 92 | def smart_spawn(self, me, view): 93 | grid = self.get_available_space_grid(me, view) 94 | ret = [] 95 | for x in xrange(3): 96 | for y in range(3): 97 | if grid[x,y]: 98 | ret.append((x-1, y-1)) 99 | if ret: 100 | return choice(ret) 101 | return (-1, -1) 102 | 103 | def would_bump(self, me, view, dir_x, dir_y): 104 | grid = self.get_available_space_grid(me, view) 105 | dx = numpy.sign(dir_x) 106 | dy = numpy.sign(dir_y) 107 | adj_dx = dx + 1 108 | adj_dy = dy + 1 109 | return grid[adj_dx,adj_dy] == 0 110 | 111 | 112 | def act(self, view, msg): 113 | me = view.me 114 | mx = me.x 115 | my = me.y 116 | my_pos = mx, my 117 | 118 | tx = self.tx 119 | ty = self.ty 120 | if mx == tx and my == ty: 121 | self.tx = tx = randrange(tx - 5, tx + 6) 122 | self.ty = ty = randrange(tx - 5, tx + 6) 123 | self.step = 0 124 | 125 | 126 | if self.apoptosis <= 0: 127 | return Action(ACT_MOVE, (0, 0)) 128 | 129 | # Attack anyone next to me, but first send out the distress message with my position 130 | my_team = me.team 131 | for a in view.agent_views: 132 | if a.team != my_team: 133 | ax = a.y 134 | ay = a.y 135 | msg.send_message((self.strain, MessageType.ATTACK, ax, ay)) 136 | return Action(ACT_ATTACK, (ax, ay)) 137 | 138 | # Eat any energy I find until I am 'full'. The cost of eating 139 | # is 1, so don't eat just 1 energy. 140 | my_energy = me.energy 141 | if self.my_plant is None and view.energy_map.values[my_pos] > 1: 142 | if my_energy <= self.genes['desired_energy'].val: 143 | return Action(ACT_EAT) 144 | # else: 145 | # debug('Not eating. Have %s which is above %s' % 146 | # (my_energy, self.genes['desired_energy'].val)) 147 | 148 | 149 | # If there is a plant near by go to it and spawn all I can 150 | if self.my_plant is None : 151 | plants = view.get_plants() 152 | if plants: 153 | self.my_plant = plants[0] 154 | self.tx = tx = mx 155 | self.ty = ty = my 156 | self.strain = self.my_plant.x * 41 + self.my_plant.y 157 | debug('attached to plant, strain %s' % self.strain) 158 | else: 159 | self.apoptosis -= 1 160 | if self.apoptosis <= 0: 161 | self.my_plant = None 162 | return Action(ACT_RELEASE, (mx + 1, my, my_energy - 1)) 163 | 164 | 165 | if self.my_plant is None: 166 | spawn_threshold = self.genes['field_spawn_energy'].val 167 | else: 168 | spawn_threshold = self.genes['plant_spawn_energy'].val 169 | if my_energy >= spawn_threshold: 170 | spawn_x, spawn_y = self.smart_spawn(me, view) 171 | return Action(ACT_SPAWN, 172 | (me.x + spawn_x, me.y + spawn_y, self)) 173 | elif self.my_plant is not None: 174 | return Action(ACT_EAT) 175 | 176 | 177 | # If I get the message of help go and rescue! 178 | if (not self.step) and (not self.scout) and random() > 0.1: 179 | ax = 0; 180 | ay = 0; 181 | best = 500; 182 | message_count = len(msg.get_messages()); 183 | for strain, type, ox, oy in msg.get_messages(): 184 | if strain != self.strain: 185 | continue 186 | if (type == MessageType.ATTACK) : 187 | dist = max(abs(mx-ax), abs(my-ay)) 188 | if dist < best: 189 | ax = ox 190 | ay = oy 191 | best = dist 192 | if ax and ay: 193 | self.tx = tx = ax + randrange(-3, 4) 194 | self.ty = ty = ay + randrange(-3, 4) 195 | # if (message_count > 1) : 196 | # # Attack the base, not the front 197 | # agent_scale = 1 + random() 198 | # self.x *= agent_scale 199 | # self.y *= agent_scale 200 | # don't stand still once we get there 201 | self.step = randrange(20, 100); 202 | 203 | # Back to step 0 we can change direction at the next attack. 204 | if self.step: 205 | self.step -= 1 206 | 207 | return Action(ACT_MOVE, (tx, ty)) 208 | -------------------------------------------------------------------------------- /minds/seken.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Defines an agent mind that attacks any opponent agents within its view, 3 | attaches itself to the strongest plant it finds, eats when its hungry, 4 | ''' 5 | 6 | import random, cells 7 | import math, numpy 8 | 9 | class AgentType(object): 10 | QUEEN = 0 11 | WORKER = 1 12 | FIGHTER = 2 13 | BUILDER = 3 14 | 15 | class MessageType(object): 16 | FOUND = 0 17 | DEFEND = 1 18 | CLAIM = 2 19 | CLAIMED = 3 20 | 21 | def dist(a, b): 22 | return int(math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)) 23 | 24 | def length(xy): 25 | return dist(xy, (0, 0)) 26 | 27 | def offset(i): 28 | i = i % 9 29 | x = 0 30 | y = 0 31 | if i < 3: 32 | y = -1 33 | if i > 5: 34 | y = 1 35 | 36 | if i == 0 or i == 5 or i == 6: 37 | x = -1 38 | if i == 2 or i == 3 or i == 8: 39 | x = 1 40 | 41 | return (x, y) 42 | 43 | def get_available_space_grid(view, agent): 44 | grid = numpy.ones((3,3)) 45 | for a in view.get_agents(): 46 | grid[a.x - agent.x + 1, a.y - agent.y + 1] = 0 47 | for plant in view.get_plants(): 48 | grid[plant.x - agent.x + 1, plant.y - agent.y + 1] = 0 49 | grid[1,1] = 0 50 | return grid 51 | 52 | def spawnPos(i, type, view, agent): 53 | if type == AgentType.QUEEN: 54 | old = offset(i) 55 | return (-old[0], -old[1]) 56 | grid = get_available_space_grid(view, agent) 57 | for x in xrange(3): 58 | for y in range(3): 59 | if grid[x,y]: 60 | return (x-1, y-1) 61 | return (-1, -1) 62 | 63 | class AgentMind(object): 64 | 65 | def __init__(self, data): 66 | self.target_range = random.randrange(50, 1000) 67 | 68 | if data == None: 69 | self.type = AgentType.QUEEN 70 | self.ratios = (1,) 71 | else: 72 | self.type = data[0] 73 | self.ratios = (1, 1, 1, 2) 74 | 75 | if self.type == AgentType.QUEEN: 76 | self.plant = None 77 | self.claimed = False 78 | self.claiming = False 79 | self.position = 0 80 | self.count = 0 81 | self.directionOfAttack = None 82 | self.newborn = True 83 | self.age = 0 84 | 85 | if self.type == AgentType.WORKER: 86 | self.plantList = list() 87 | self.startPoint = data[1] 88 | 89 | if self.type == AgentType.BUILDER: 90 | self.radius = 10 91 | self.height = 4 92 | self.openings = 1 93 | 94 | self.skip = True 95 | 96 | if self.type == AgentType.FIGHTER and data[1]: 97 | self.direction = data[1] 98 | else: 99 | self.direction = (random.randrange(0, 300), random.randrange(0, 300)) 100 | 101 | def act(self, view, msg): 102 | agent = view.get_me() 103 | position = (x, y)= agent.get_pos() 104 | 105 | if dist(self.direction, position) < 2: 106 | self.direction = (random.randrange(0, view.energy_map.width), random.randrange(0, view.energy_map.height)) 107 | 108 | # Attack any opponents. 109 | for a in view.get_agents(): 110 | if a.get_team() != agent.get_team(): 111 | if self.type == AgentType.QUEEN: 112 | msg.send_message((MessageType.DEFEND, (x,y))) 113 | self.ratios = [0, 2, 2, 2] 114 | else: 115 | msg.send_message((MessageType.FOUND, a.get_pos())) 116 | return cells.Action(cells.ACT_ATTACK, a.get_pos()) 117 | 118 | # Process messages 119 | alreadyClaimed = 0 120 | distance = 1000000 121 | for message in msg.get_messages(): 122 | # Queen message behavior 123 | if message[0] == MessageType.CLAIM and self.type == AgentType.QUEEN: 124 | if self.plant != None and self.plant.get_pos() == message[1]: 125 | if self.claimed: 126 | self.newborn = False 127 | msg.send_message((MessageType.CLAIMED, message[1])) 128 | if message[0] == MessageType.CLAIMED and self.type == AgentType.QUEEN: 129 | if self.plant != None and self.plant.get_pos() == message[1]: 130 | if not self.claimed: 131 | alreadyClaimed += 1 132 | if message[0] == MessageType.FOUND and self.type == AgentType.QUEEN: 133 | if dist(message[1], position) < distance: 134 | self.directionOfAttack = message[1] 135 | distance = dist(message[1], position) 136 | 137 | # Worker message behavior 138 | if self.type == AgentType.WORKER: 139 | if message[0] == MessageType.CLAIM: 140 | found = False 141 | for p in self.plantList: 142 | if p == message[1]: 143 | found = True 144 | break 145 | if not found: 146 | self.plantList.append(message[1]) 147 | 148 | if message[0] == MessageType.DEFEND or message[0] == MessageType.FOUND: 149 | aDistance = dist(position, message[1]) 150 | if aDistance < 20 and aDistance < distance: 151 | self.type = AgentType.FIGHTER 152 | self.direction = message[1] 153 | distance = aDistance 154 | 155 | # Fighter message behavior 156 | if self.type == AgentType.FIGHTER: 157 | if message[0] == MessageType.DEFEND or message[0] == MessageType.FOUND: 158 | if distance > dist(position, message[1]): 159 | self.direction = message[1] 160 | distance = dist(position, message[1]) 161 | 162 | if self.type == AgentType.WORKER: 163 | if dist(position, self.startPoint) > 2: 164 | plants = view.get_plants() 165 | if plants: 166 | found = False 167 | for p in self.plantList: 168 | if p == plants[0].get_pos(): 169 | found = True 170 | break 171 | if not found: 172 | self.type = AgentType.QUEEN 173 | self.ratios = (1,1,1,2) 174 | self.newborn = True 175 | self.plant = None 176 | self.claimed = False 177 | self.claiming = False 178 | self.position = 0 179 | self.count = 0 180 | self.directionOfAttack = None 181 | self.age = 0 182 | del self.plantList 183 | 184 | # Eat if hungry. 185 | hungry = (agent.energy < 50) 186 | energy_here = view.get_energy().get(x, y) 187 | food = (energy_here > 0) 188 | if hungry and food: 189 | return cells.Action(cells.ACT_EAT) 190 | 191 | if agent.energy > 500: 192 | sp = spawnPos(0, AgentType.WORKER, view, agent) 193 | sp = (sp[0]+x, sp[1]+y, AgentType.WORKER, (x, y)) 194 | return cells.Action(cells.ACT_SPAWN, sp) 195 | 196 | if random.random() < 0.65: 197 | if random.random() < 0.4: 198 | if view.get_energy().get(x, y) > 0: 199 | return cells.Action(cells.ACT_EAT) 200 | 201 | direction = [self.direction[0]-x, self.direction[1]-y] 202 | if direction[0] > 0: 203 | direction[0] = 1 204 | elif direction[0] == 0: 205 | direction[0] = 0 206 | else: 207 | direction[0] = -1 208 | 209 | if direction[1] > 0: 210 | direction[1] = 1 211 | elif direction[1] == 0: 212 | direction[1] = 0 213 | else: 214 | direction[1] = -1 215 | 216 | position = (position[0]+direction[0], position[1]+direction[1]) 217 | else: 218 | position = (x + random.randrange(-1, 2), y + random.randrange(-1, 2)) 219 | return cells.Action(cells.ACT_MOVE, position) 220 | 221 | if self.type == AgentType.FIGHTER: 222 | # Eat if hungry. 223 | hungry = (agent.energy < 100) 224 | energy_here = view.get_energy().get(x, y) 225 | food = (energy_here > 0) 226 | if hungry and food: 227 | return cells.Action(cells.ACT_EAT) 228 | 229 | if agent.energy > 1000: 230 | sp = spawnPos(0, AgentType.FIGHTER, view, agent) 231 | sp = (sp[0]+x, sp[1]+y, AgentType.FIGHTER, (x, y)) 232 | return cells.Action(cells.ACT_SPAWN, sp) 233 | 234 | if random.random() < 0.85 or dist(position, self.direction) < 8: 235 | direction = [self.direction[0]-x, self.direction[1]-y] 236 | if direction[0] > 0: 237 | direction[0] = 1 238 | elif direction[0] == 0: 239 | direction[0] = 0 240 | else: 241 | direction[0] = -1 242 | 243 | if direction[1] > 0: 244 | direction[1] = 1 245 | elif direction[1] == 0: 246 | direction[1] = 0 247 | else: 248 | direction[1] = -1 249 | 250 | position = (position[0]+direction[0], position[1]+direction[1]) 251 | else: 252 | position = (x + random.randrange(-1, 2), y + random.randrange(-1, 2)) 253 | return cells.Action(cells.ACT_MOVE, position) 254 | 255 | 256 | # Queen Stuff 257 | if self.type == AgentType.QUEEN: 258 | # Check claim 259 | if self.claiming: 260 | if self.skip: 261 | self.skip = False 262 | else: 263 | if alreadyClaimed > 39: 264 | # Try again 265 | self.plant = None 266 | self.claiming = False 267 | else: 268 | # We have a throne 269 | self.claimed = True 270 | self.claiming = False 271 | self.position = alreadyClaimed 272 | print alreadyClaimed 273 | self.skip = True 274 | 275 | # Get a plant 276 | if self.plant == None and view.get_plants(): 277 | self.age += 1 278 | if self.age > 5: 279 | self.type = AgentType.WORKER 280 | self.plantList = list() 281 | 282 | if view.get_plants(): 283 | plants = view.get_plants() 284 | bestPlant = plants[0] 285 | distance = dist(position, bestPlant.get_pos()) 286 | for plant in plants: 287 | if distance > dist(position, bestPlant.get_pos()): 288 | distance = dist(position, bestPlant.get_pos()) 289 | bestPlant = plant 290 | 291 | self.plant = bestPlant 292 | self.claiming = True 293 | msg.send_message((MessageType.CLAIM, self.plant.get_pos())) 294 | 295 | # Check position 296 | if self.claimed == False and self.claiming == False: 297 | # Move randomly 298 | if random.random() > 0.75: 299 | direction = [self.direction[0]-x, self.direction[1]-y] 300 | if direction[0] > 0: 301 | direction[0] = 1 302 | elif direction[0] == 0: 303 | direction[0] = 0 304 | else: 305 | direction[0] = -1 306 | 307 | if direction[1] > 0: 308 | direction[1] = 1 309 | elif direction[1] == 0: 310 | direction[1] = 0 311 | else: 312 | direction[1] = -1 313 | 314 | position = (position[0]+direction[0], position[1]+direction[1]) 315 | else: 316 | position = (x + random.randrange(-1, 2), y + random.randrange(-1, 2)) 317 | return cells.Action(cells.ACT_MOVE, position) 318 | 319 | if self.claimed: 320 | # Move towards 321 | off = offset(self.position) 322 | pos = self.plant.get_pos() 323 | pos = (pos[0]+off[0], pos[1]+off[1]) 324 | distance = dist(pos, position) 325 | 326 | if distance > 0: 327 | if agent.energy > distance * 1.1: 328 | if random.random() > 0.6: 329 | pos = (x + random.randrange(-1, 2), y + random.randrange(-1, 2)) 330 | return cells.Action(cells.ACT_MOVE, pos) 331 | else: 332 | # Cannot move in one go eat if pos or move a bit 333 | if view.get_energy().get(x, y) > 0: 334 | return cells.Action(cells.ACT_EAT) 335 | mxy = [0, 0] 336 | if self.plant.get_pos()[0] > x: 337 | mxy[0] = 1 338 | elif self.plant.get_pos()[0] < x: 339 | mxy[0] = -1 340 | if self.plant.get_pos()[1] > y: 341 | mxy[1] = 1 342 | elif self.plant.get_pos()[1] < y: 343 | mxy[1] = -1 344 | 345 | mxy = (mxy[0]+x, mxy[1]+y) 346 | return cells.Action(cells.ACT_MOVE, mxy) 347 | 348 | # Breed or Eat 349 | nxt = self.ratios[self.count%len(self.ratios)] 350 | spawn = [x, y, nxt] 351 | spawning = False 352 | 353 | if self.newborn and agent.energy > 100: 354 | spawn = [x, y, AgentType.QUEEN] 355 | spawnOff = spawnPos(self.position, AgentType.QUEEN, view, agent) 356 | spawning = True 357 | if nxt == AgentType.QUEEN and agent.energy > 100: 358 | # Spawn new queen 359 | spawnOff = spawnPos(self.position, nxt, view, agent) 360 | spawning = True 361 | if nxt == AgentType.WORKER and agent.energy > 100: 362 | # Spawn new worker 363 | spawnOff = spawnPos(self.position, nxt, view, agent) 364 | spawn.append(position) 365 | spawning = True 366 | if nxt == AgentType.FIGHTER and agent.energy > 100: 367 | # Spawn new fighter 368 | spawnOff = spawnPos(self.position, nxt, view, agent) 369 | spawn.append(self.directionOfAttack) 370 | spawning = True 371 | if nxt == AgentType.BUILDER and agent.energy > 100: 372 | # Spawn new builder 373 | spawnOff = spawnPos(self.position, nxt, view, agent) 374 | spawning = True 375 | 376 | if spawning: 377 | spawn[0] += spawnOff[0] 378 | spawn[1] += spawnOff[1] 379 | self.count = self.count + 1 380 | return cells.Action(cells.ACT_SPAWN, spawn) 381 | 382 | # Must eat 383 | return cells.Action(cells.ACT_EAT) 384 | 385 | 386 | if random.random() > 0.75: 387 | direction = (self.direction[0]-x, self.direction[1]-y) 388 | if direction[0] > 0: 389 | direction[0] = 1 390 | elif direction[0] == 0: 391 | direction[0] = 0 392 | else: 393 | direction[0] = -1 394 | 395 | if direction[1] > 0: 396 | direction[1] = 1 397 | elif direction[1] == 0: 398 | direction[1] = 0 399 | else: 400 | direction[1] = -1 401 | 402 | position = (position[0]+direction[0], position[1]+direction[1]) 403 | else: 404 | position = (x + random.randrange(-1, 2), y + random.randrange(-1, 2)) 405 | return cells.Action(cells.ACT_MOVE, position) 406 | -------------------------------------------------------------------------------- /minds/ben2.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2010, Benjamin C. Meyer 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions 6 | # are met: 7 | # 1. Redistributions of source code must retain the above copyright 8 | # notice, this list of conditions and the following disclaimer. 9 | # 2. Redistributions in binary form must reproduce the above copyright 10 | # notice, this list of conditions and the following disclaimer in the 11 | # documentation and/or other materials provided with the distribution. 12 | # 3. Neither the name of the Benjamin Meyer nor the names of its contributors 13 | # may be used to endorse or promote products derived from this software 14 | # without specific prior written permission. 15 | # 16 | # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 17 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 | # SUCH DAMAGE. 27 | # 28 | 29 | # 30 | # Idea: 31 | # Keep track of how long we have been alive relative to our plant. 32 | # The more time has past, the farther away we will go on a rescue mission and 33 | # the more energy we will gather before heading out 34 | # 35 | # Result: 36 | # strong cells have a good chance of making it to another plant where there 37 | # are many attacks one after another causing the battle line to shift to a plant 38 | # 39 | # At the start (weak) cells goto closer attacks and not far away 40 | # At the end (strong) cells are sent straight to the (far away) attacking area 41 | # 42 | 43 | import random, cells 44 | import cmath, numpy 45 | 46 | class Type: 47 | PARENT = 0 48 | SCOUT = 1 49 | 50 | class MessageType: 51 | ATTACK = 0 52 | FOUNDPLANT = 1 53 | 54 | class AgentMind: 55 | def __init__(self, args): 56 | self.id = 0 57 | self.time = 0 58 | 59 | self.type = Type.SCOUT 60 | # scout vars 61 | self.x = None 62 | self.y = None 63 | self.search = (random.random() > 0.9) # AKA COW, mostly just go and eat up the world grass so the other team can't 64 | self.last_pos = (-1,-1) 65 | self.bumps = 0 66 | self.step = 0 67 | self.rescue = None 68 | # parent vars 69 | self.children = 0 70 | self.plant = None 71 | self.plants = [] 72 | if args: 73 | parent = args[0] 74 | self.time = parent.time 75 | self.plants = parent.plants 76 | if len(self.plants) > 7: 77 | self.id = random.randrange(0,1) 78 | if parent.search: 79 | self.search = (random.random() > 0.2) 80 | pass 81 | 82 | def choose_new_direction(self, view, msg): 83 | me = view.get_me() 84 | self.x = random.randrange(-9,9) 85 | self.y = random.randrange(-9,9) 86 | if self.x == 0 and self.y == 0: 87 | self.choose_new_direction(view, msg) 88 | self.step = 3 89 | self.bumps = 0 90 | 91 | def act_scout(self, view, msg): 92 | me = view.get_me() 93 | if self.x is None: 94 | self.choose_new_direction(view, msg) 95 | 96 | currentEnergy = view.get_energy().get(me.x, me.y) 97 | 98 | # Grabbing a plant is the most important thing, we get this we win 99 | plants = view.get_plants() 100 | if plants : 101 | plant = (plants[0]).get_pos() 102 | if plant != self.plant: 103 | if self.plants.count(plant) == 0: 104 | #print "Found a new plant, resetting time: " + str(len(self.plants)) 105 | msg.send_message((MessageType.FOUNDPLANT, 0, self.id, me.x, me.y)) 106 | self.plants.append(plant) 107 | self.time = 0 108 | self.plant = plant 109 | self.type = Type.PARENT 110 | self.search = None 111 | #print str(len(self.plants)) + " " + str(me.get_team()) 112 | return self.act_parent(view, msg) 113 | else: 114 | # Don't let this go to waste 115 | if currentEnergy >= 3: 116 | return cells.Action(cells.ACT_EAT) 117 | 118 | if self.search: 119 | if me.energy > 100: 120 | spawn_x, spawn_y = self.smart_spawn(me, view) 121 | return cells.Action(cells.ACT_SPAWN, (me.x + spawn_x, me.y + spawn_y, self)) 122 | if (currentEnergy > 3) : 123 | return cells.Action(cells.ACT_EAT) 124 | 125 | # Make sure we wont die 126 | if (me.energy < 25 and currentEnergy > 1) : 127 | return cells.Action(cells.ACT_EAT) 128 | 129 | # hit world wall, bounce back 130 | map_size = view.energy_map.width 131 | if me.x <= 0 or me.x >= map_size-1 or me.y <= 0 or me.y >= map_size-1 : 132 | self.choose_new_direction(view, msg) 133 | 134 | # If I get the message of help go and rescue! 135 | if self.step == 0 and (not self.search) and (random.random()>0.2): 136 | ax = 0; 137 | ay = 0; 138 | best = 300 + self.time / 2 139 | message_count = len(msg.get_messages()); 140 | for m in msg.get_messages(): 141 | (type, count, id, ox, oy) = m 142 | if (id == self.id and type == MessageType.ATTACK) : 143 | dist = abs(me.x-ax) + abs(me.y-ay) 144 | if count >= 2: 145 | dist /= count 146 | if dist < best and dist > 1: 147 | ax = ox 148 | ay = oy 149 | best = dist 150 | if (ax != 0 and ay != 0) : 151 | dir = ax-me.x + (ay - me.y) * 1j 152 | r, theta = cmath.polar(dir) 153 | theta += 0.1 * random.random() - 0.5 154 | dir = cmath.rect(r, theta) 155 | self.x = dir.real 156 | self.y = dir.imag 157 | # if (message_count > 1) : 158 | # # Attack the base, not the front 159 | # agent_scale = 1 + random.random() 160 | # self.x *= agent_scale 161 | # self.y *= agent_scale 162 | # don't stand still once we get there 163 | if (self.x == 0 and self.y == 0) : 164 | self.x = random.randrange(-2, 2) 165 | self.y = random.randrange(-2, 2) 166 | 167 | self.step = random.randrange(1, min(30, max(2,int((best+2)/2)))) 168 | self.rescue = True 169 | 170 | if not self.rescue and me.energy > cells.SPAWN_MIN_ENERGY and me.energy < 100: 171 | spawn_x, spawn_y = self.smart_spawn(me, view) 172 | return cells.Action(cells.ACT_SPAWN,(me.x + spawn_x, me.y + spawn_y, self)) 173 | 174 | # Back to step 0 we can change direction at the next attack. 175 | if self.step: 176 | self.step -= 1 177 | 178 | return self.smart_move(view, msg) 179 | 180 | def get_available_space_grid(self, me, view): 181 | grid = numpy.ones((3,3)) 182 | grid[1,1] = 0 183 | for agent in view.get_agents(): 184 | grid[agent.x - me.x + 1, agent.y - me.y + 1] = 0 185 | for plant in view.get_plants(): 186 | grid[plant.x - me.x + 1, plant.y - me.y + 1] = 0 187 | return grid 188 | 189 | def smart_move(self, view, msg): 190 | me = view.get_me() 191 | 192 | # make sure we can actually move 193 | if me.get_pos() == self.last_pos: 194 | self.bumps += 1 195 | else: 196 | self.bumps = 0 197 | if self.bumps >= 2: 198 | self.choose_new_direction(view, msg) 199 | self.last_pos = view.me.get_pos() 200 | 201 | offsetx = 0 202 | offsety = 0 203 | if self.search: 204 | offsetx = random.randrange(-1, 1) 205 | offsety = random.randrange(-1, 1) 206 | 207 | wx = me.x + self.x + offsetx 208 | wy = me.y + self.y + offsety 209 | 210 | grid = self.get_available_space_grid(me, view) 211 | 212 | bestEnergy = 2 213 | bestEnergyX = -1 214 | bestEnergyY = -1 215 | 216 | for x in xrange(3): 217 | for y in range(3): 218 | if grid[x,y]: 219 | e = view.get_energy().get(me.x + x-1, me.y + y-1) 220 | if e > bestEnergy: 221 | bestEnergy = e; 222 | bestEnergyX = x 223 | bestEnergyY = y; 224 | 225 | # Check the desired location first 226 | if (wx < me.x) : bx = 0 227 | if (wx == me.x) : bx = 1 228 | if (wx > me.x) : bx = 2 229 | if (wy < me.y) : by = 0 230 | if (wy == me.y) : by = 1 231 | if (wy > me.y) : by = 2 232 | if bx == bestEnergyX and bestEnergy > 1: 233 | return cells.Action(cells.ACT_MOVE,(me.x + bestEnergyX-1, me.y + bestEnergyY-1)) 234 | if by == bestEnergyY and bestEnergy > 1: 235 | return cells.Action(cells.ACT_MOVE,(me.x + bestEnergyX-1, me.y + bestEnergyY-1)) 236 | 237 | if grid[bx,by]: 238 | return cells.Action(cells.ACT_MOVE,(wx, wy)) 239 | 240 | if bestEnergy > 1: 241 | return cells.Action(cells.ACT_MOVE,(me.x + bestEnergyX-1, me.y + bestEnergyY-1)) 242 | 243 | if grid[2,0] and random.random() > 0.5: 244 | return cells.Action(cells.ACT_MOVE,(me.x + 1, me.y - 1)) 245 | 246 | for x in xrange(3): 247 | for y in range(3): 248 | if grid[x,y]: 249 | return cells.Action(cells.ACT_MOVE,(x-1, y-1)) 250 | return cells.Action(cells.ACT_MOVE,(wx, wy)) 251 | 252 | def smart_spawn(self, me, view): 253 | grid = self.get_available_space_grid(me, view) 254 | 255 | # So we don't always spawn in our top left 256 | if grid[2,0] and random.random() > 0.8: 257 | return (1, -1) 258 | 259 | for x in xrange(3): 260 | for y in range(3): 261 | if grid[x,y]: 262 | return (x-1, y-1) 263 | return (-1, -1) 264 | 265 | def should_attack(self, view, msg): 266 | me = view.get_me() 267 | count = 0 268 | for a in view.get_agents(): 269 | if a.get_team() != me.get_team(): 270 | count += 1 271 | if count > 0: 272 | currentEnergy = view.get_energy().get(me.x, me.y) 273 | if currentEnergy > 20: 274 | return cells.Action(cells.ACT_EAT) 275 | if self.plant: 276 | count = 10 277 | msg.send_message((MessageType.ATTACK, count, self.id, me.x, me.y)) 278 | return cells.Action(cells.ACT_ATTACK, a.get_pos()) 279 | return None 280 | 281 | def check(self, x, y, view): 282 | plant_pos = (px, py) = self.plant 283 | me = view.get_me() 284 | oldx = x 285 | oldy = y 286 | x += me.x 287 | y += me.y 288 | # Make sure the plant is always populated 289 | grid = self.get_available_space_grid(me, view) 290 | 291 | if abs(px - x) <= 1 and abs(py - y) <= 1: 292 | grid = self.get_available_space_grid(me, view) 293 | if grid[oldx+1, oldy+1] == 1: 294 | #print str(x) + " " + str(y) + " " + str(abs(px - x)) + " " + str(abs(py - y)) 295 | return True 296 | return None 297 | 298 | def act_parent(self, view, msg): 299 | me = view.get_me() 300 | plant_pos = (px, py) = self.plant 301 | 302 | # Make sure the plant is always populated 303 | grid = self.get_available_space_grid(me, view) 304 | xoffset = -2 305 | yoffset = -2 306 | if self.check( 1, 0, view): xoffset = 1; yoffset = 0; # right 307 | if self.check(-1, 0, view): xoffset = -1; yoffset = 0; # left 308 | if self.check( 0, 1, view): xoffset = 0; yoffset = 1; # down 309 | if self.check( 0, -1, view): xoffset = 0; yoffset = -1; # up 310 | if self.check( -1, -1, view): xoffset = -1; yoffset = -1; # diag left 311 | if self.check( -1, 1, view): xoffset = -1; yoffset = 1; # diag right 312 | if self.check( 1, -1, view): xoffset = 1; yoffset = -1; # diag left 313 | if self.check( 1, 1, view): xoffset = 1; yoffset = 1; # diag right 314 | if xoffset != -2: 315 | if me.energy < cells.SPAWN_MIN_ENERGY : return cells.Action(cells.ACT_EAT) 316 | # When we are populating plant cells we must spawn some children in case we are being attacked 317 | # When we are all alone we don't spawn any cheap children and only do high quality cells 318 | self.children += 1 319 | return cells.Action(cells.ACT_SPAWN, (me.x + xoffset, me.y + yoffset, self)) 320 | 321 | # When there are more then two plants always charge up and then leave 322 | # when there are less then two plants only half of the cells should charge up and then leave 323 | if self.children <= 0: 324 | if me.energy >= cells.ENERGY_CAP or me.energy > cells.SPAWN_MIN_ENERGY + self.time + random.randrange(-10,100): 325 | self.type = Type.SCOUT 326 | return self.act_scout(view, msg) 327 | return cells.Action(cells.ACT_EAT) 328 | 329 | if me.energy < cells.SPAWN_MIN_ENERGY : 330 | return cells.Action(cells.ACT_EAT) 331 | self.children -= 1 332 | spawn_x, spawn_y = self.smart_spawn(me, view) 333 | return cells.Action(cells.ACT_SPAWN,(me.x + spawn_x, me.y + spawn_y, self)) 334 | 335 | def act(self, view, msg): 336 | self.time += 1 337 | r = self.should_attack(view, msg) 338 | if r: return r 339 | 340 | if self.type == Type.PARENT: 341 | return self.act_parent(view, msg) 342 | if self.type == Type.SCOUT: 343 | return self.act_scout(view, msg) 344 | -------------------------------------------------------------------------------- /cells.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import ConfigParser 4 | import random 5 | import sys 6 | import time 7 | 8 | import numpy 9 | import pygame, pygame.locals 10 | 11 | from terrain.generator import terrain_generator 12 | 13 | if not pygame.font: print 'Warning, fonts disabled' 14 | 15 | try: 16 | import psyco 17 | psyco.full() 18 | except ImportError: 19 | pass 20 | 21 | 22 | def get_mind(name): 23 | full_name = 'minds.' + name 24 | __import__(full_name) 25 | mind = sys.modules[full_name] 26 | mind.name = name 27 | return mind 28 | 29 | 30 | 31 | STARTING_ENERGY = 20 32 | SCATTERED_ENERGY = 10 33 | 34 | #Plant energy output. Remember, this should always be less 35 | #than ATTACK_POWER, because otherwise cells sitting on the plant edge 36 | #might become invincible. 37 | PLANT_MAX_OUTPUT = 20 38 | PLANT_MIN_OUTPUT = 5 39 | 40 | #BODY_ENERGY is the amount of energy that a cells body contains 41 | #It can not be accessed by the cells, think of it as: they can't 42 | #eat their own body. It is released again at death. 43 | BODY_ENERGY = 25 44 | ATTACK_POWER = 30 45 | #Amount by which attack power is modified for each 1 height difference. 46 | ATTACK_TERR_CHANGE = 2 47 | ENERGY_CAP = 2500 48 | 49 | #SPAWN_COST is the energy it takes to seperate two cells from each other. 50 | #It is lost forever, not to be confused with the BODY_ENERGY of the new cell. 51 | SPAWN_LOST_ENERGY = 20 52 | SUSTAIN_COST = 0 53 | MOVE_COST = 1 54 | #MESSAGE_COST = 0 55 | 56 | #BODY_ENERGY + SPAWN_COST is invested to create a new cell. What remains is split evenly. 57 | #With this model we only need to make sure a cell can't commit suicide by spawning. 58 | SPAWN_TOTAL_ENERGY = BODY_ENERGY + SPAWN_LOST_ENERGY 59 | 60 | TIMEOUT = None 61 | 62 | config = ConfigParser.RawConfigParser() 63 | 64 | 65 | def get_next_move(old_x, old_y, x, y): 66 | ''' Takes the current position, old_x and old_y, and a desired future position, x and y, 67 | and returns the position (x,y) resulting from a unit move toward the future position.''' 68 | dx = numpy.sign(x - old_x) 69 | dy = numpy.sign(y - old_y) 70 | return (old_x + dx, old_y + dy) 71 | 72 | 73 | class Game(object): 74 | ''' Represents a game between different minds. ''' 75 | def __init__(self, bounds, mind_list, symmetric, max_time, headless = False): 76 | self.size = self.width, self.height = (bounds, bounds) 77 | self.mind_list = mind_list 78 | self.messages = [MessageQueue() for x in mind_list] 79 | self.headless = headless 80 | if not self.headless: 81 | self.disp = Display(self.size, scale=2) 82 | self.time = 0 83 | self.clock = pygame.time.Clock() 84 | self.max_time = max_time 85 | self.tic = time.time() 86 | self.terr = ScalarMapLayer(self.size) 87 | self.terr.set_perlin(10, symmetric) 88 | self.minds = [m[1].AgentMind for m in mind_list] 89 | 90 | self.show_energy = True 91 | self.show_agents = True 92 | 93 | self.energy_map = ScalarMapLayer(self.size) 94 | self.energy_map.set_streak(SCATTERED_ENERGY, symmetric) 95 | 96 | self.plant_map = ObjectMapLayer(self.size) 97 | self.plant_population = [] 98 | 99 | self.agent_map = ObjectMapLayer(self.size) 100 | self.agent_population = [] 101 | self.winner = None 102 | if symmetric: 103 | self.n_plants = 7 104 | else: 105 | self.n_plants = 14 106 | 107 | # Add some randomly placed plants to the map. 108 | for x in xrange(self.n_plants): 109 | mx = random.randrange(1, self.width - 1) 110 | my = random.randrange(1, self.height - 1) 111 | eff = random.randrange(PLANT_MIN_OUTPUT, PLANT_MAX_OUTPUT) 112 | p = Plant(mx, my, eff) 113 | self.plant_population.append(p) 114 | if symmetric: 115 | p = Plant(my, mx, eff) 116 | self.plant_population.append(p) 117 | self.plant_map.lock() 118 | self.plant_map.insert(self.plant_population) 119 | self.plant_map.unlock() 120 | 121 | # Create an agent for each mind and place on map at a different plant. 122 | self.agent_map.lock() 123 | for idx in xrange(len(self.minds)): 124 | # BUG: Number of minds could exceed number of plants? 125 | (mx, my) = self.plant_population[idx].get_pos() 126 | fuzzed_x = mx 127 | fuzzed_y = my 128 | while fuzzed_x == mx and fuzzed_y == my: 129 | fuzzed_x = mx + random.randrange(-1, 2) 130 | fuzzed_y = my + random.randrange(-1, 2) 131 | self.agent_population.append(Agent(fuzzed_x, fuzzed_y, STARTING_ENERGY, idx, 132 | self.minds[idx], None)) 133 | self.agent_map.insert(self.agent_population) 134 | self.agent_map.unlock() 135 | 136 | def run_plants(self): 137 | ''' Increases energy at and around (adjacent position) for each plant. 138 | Increase in energy is equal to the eff(?) value of each the plant.''' 139 | for p in self.plant_population: 140 | (x, y) = p.get_pos() 141 | for dx in (-1, 0, 1): 142 | for dy in (-1, 0, 1): 143 | adj_x = x + dx 144 | adj_y = y + dy 145 | if self.energy_map.in_range(adj_x, adj_y): 146 | self.energy_map.change(adj_x, adj_y, p.get_eff()) 147 | 148 | 149 | def add_agent(self, a): 150 | ''' Adds an agent to the game. ''' 151 | self.agent_population.append(a) 152 | self.agent_map.set(a.x, a.y, a) 153 | 154 | def del_agent(self, a): 155 | ''' Kills the agent (if not already dead), removes them from the game and 156 | drops any load they were carrying in there previously occupied position. ''' 157 | self.agent_population.remove(a) 158 | self.agent_map.set(a.x, a.y, None) 159 | a.alive = False 160 | if a.loaded: 161 | a.loaded = False 162 | self.terr.change(a.x, a.y, 1) 163 | 164 | def move_agent(self, a, x, y): 165 | ''' Moves agent, a, to new position (x,y) unless difference in terrain levels between 166 | its current position and new position is greater than 4.''' 167 | if abs(self.terr.get(x, y)-self.terr.get(a.x, a.y)) <= 4: 168 | self.agent_map.set(a.x, a.y, None) 169 | self.agent_map.set(x, y, a) 170 | a.x = x 171 | a.y = y 172 | 173 | def run_agents(self): 174 | # Create a list containing the view for each agent in the population. 175 | views = [] 176 | agent_map_get_small_view_fast = self.agent_map.get_small_view_fast 177 | plant_map_get_small_view_fast = self.plant_map.get_small_view_fast 178 | energy_map = self.energy_map 179 | terr_map = self.terr 180 | WV = WorldView 181 | views_append = views.append 182 | for a in self.agent_population: 183 | x = a.x 184 | y = a.y 185 | agent_view = agent_map_get_small_view_fast(x, y) 186 | plant_view = plant_map_get_small_view_fast(x, y) 187 | world_view = WV(a, agent_view, plant_view, terr_map, energy_map) 188 | views_append((a, world_view)) 189 | 190 | # Create a list containing the action for each agent, where each agent 191 | # determines its actions based on its view of the world and messages 192 | # from its team. 193 | messages = self.messages 194 | actions = [(a, a.act(v, messages[a.team])) for (a, v) in views] 195 | actions_dict = dict(actions) 196 | random.shuffle(actions) 197 | 198 | self.agent_map.lock() 199 | # Apply the action for each agent - in doing so agent uses up 1 energy unit. 200 | for (agent, action) in actions: 201 | #This is the cost of mere survival 202 | agent.energy -= SUSTAIN_COST 203 | 204 | if action.type == ACT_MOVE: # Changes position of agent. 205 | act_x, act_y = action.get_data() 206 | (new_x, new_y) = get_next_move(agent.x, agent.y, 207 | act_x, act_y) 208 | # Move to the new position if it is in range and it's not 209 | #currently occupied by another agent. 210 | if (self.agent_map.in_range(new_x, new_y) and 211 | not self.agent_map.get(new_x, new_y)): 212 | self.move_agent(agent, new_x, new_y) 213 | agent.energy -= MOVE_COST 214 | elif action.type == ACT_SPAWN: # Creates new agents and uses additional 50 energy units. 215 | act_x, act_y = action.get_data()[:2] 216 | (new_x, new_y) = get_next_move(agent.x, agent.y, 217 | act_x, act_y) 218 | if (self.agent_map.in_range(new_x, new_y) and 219 | not self.agent_map.get(new_x, new_y) and 220 | agent.energy >= SPAWN_TOTAL_ENERGY): 221 | agent.energy -= SPAWN_TOTAL_ENERGY 222 | agent.energy /= 2 223 | a = Agent(new_x, new_y, agent.energy, agent.get_team(), 224 | self.minds[agent.get_team()], 225 | action.get_data()[2:]) 226 | self.add_agent(a) 227 | elif action.type == ACT_EAT: 228 | #Eat only as much as possible. 229 | intake = min(self.energy_map.get(agent.x, agent.y), 230 | ENERGY_CAP - agent.energy) 231 | agent.energy += intake 232 | self.energy_map.change(agent.x, agent.y, -intake) 233 | elif action.type == ACT_RELEASE: 234 | #Dump some energy onto an adjacent field 235 | #No Seppuku 236 | output = action.get_data()[2] 237 | output = min(agent.energy - 1, output) 238 | act_x, act_y = action.get_data()[:2] 239 | #Use get_next_move to simplyfy things if you know 240 | #where the energy is supposed to end up. 241 | (out_x, out_y) = get_next_move(agent.x, agent.y, 242 | act_x, act_y) 243 | if (self.agent_map.in_range(out_x, out_y) and 244 | agent.energy >= 1): 245 | agent.energy -= output 246 | self.energy_map.change(out_x, out_y, output) 247 | elif action.type == ACT_ATTACK: 248 | #Make sure agent is attacking an adjacent field. 249 | act_x, act_y = act_data = action.get_data() 250 | next_pos = get_next_move(agent.x, agent.y, act_x, act_y) 251 | new_x, new_y = next_pos 252 | victim = self.agent_map.get(act_x, act_y) 253 | terr_delta = (self.terr.get(agent.x, agent.y) 254 | - self.terr.get(act_x, act_y)) 255 | if (victim is not None and victim.alive and 256 | next_pos == act_data): 257 | #If both agents attack each other, both loose double energy 258 | #Think twice before attacking 259 | try: 260 | contested = (actions_dict[victim].type == ACT_ATTACK) 261 | except: 262 | contested = False 263 | agent.attack(victim, terr_delta, contested) 264 | if contested: 265 | victim.attack(agent, -terr_delta, True) 266 | 267 | elif action.type == ACT_LIFT: 268 | if not agent.loaded and self.terr.get(agent.x, agent.y) > 0: 269 | agent.loaded = True 270 | self.terr.change(agent.x, agent.y, -1) 271 | 272 | elif action.type == ACT_DROP: 273 | if agent.loaded: 274 | agent.loaded = False 275 | self.terr.change(agent.x, agent.y, 1) 276 | 277 | # Kill all agents with negative energy. 278 | team = [0 for n in self.minds] 279 | for (agent, action) in actions: 280 | if agent.energy < 0 and agent.alive: 281 | self.energy_map.change(agent.x, agent.y, BODY_ENERGY) 282 | self.del_agent(agent) 283 | else : 284 | team[agent.team] += 1 285 | 286 | # Team wins (and game ends) if opposition team has 0 agents remaining. 287 | # Draw if time exceeds time limit. 288 | winner = 0 289 | alive = 0 290 | for t in team: 291 | if t != 0: 292 | alive += 1 293 | else: 294 | if alive == 0: 295 | winner += 1 296 | 297 | if alive == 1: 298 | colors = ["red", "white", "purple", "yellow"] 299 | print "Winner is %s (%s) in %s" % (self.mind_list[winner][1].name, 300 | colors[winner], str(self.time)) 301 | self.winner = winner 302 | 303 | if alive == 0 or (self.max_time > 0 and self.time > self.max_time): 304 | print "It's a draw!" 305 | self.winner = -1 306 | 307 | self.agent_map.unlock() 308 | 309 | def tick(self): 310 | if not self.headless: 311 | # Space starts new game 312 | # q or close button will quit the game 313 | for event in pygame.event.get(): 314 | if event.type == pygame.locals.KEYUP: 315 | if event.key == pygame.locals.K_SPACE: 316 | self.winner = -1 317 | elif event.key == pygame.locals.K_q: 318 | sys.exit() 319 | elif event.key == pygame.locals.K_e: 320 | self.show_energy = not self.show_energy 321 | elif event.key == pygame.locals.K_a: 322 | self.show_agents = not self.show_agents 323 | elif event.type == pygame.locals.MOUSEBUTTONUP: 324 | if event.button == 1: 325 | print self.agent_map.get(event.pos[0]/2, 326 | event.pos[1]/2) 327 | elif event.type == pygame.QUIT: 328 | sys.exit() 329 | self.disp.update(self.terr, self.agent_population, 330 | self.plant_population, self.agent_map, 331 | self.plant_map, self.energy_map, self.time, 332 | len(self.minds), self.show_energy, 333 | self.show_agents) 334 | 335 | # test for spacebar pressed - if yes, restart 336 | for event in pygame.event.get(pygame.locals.KEYUP): 337 | if event.key == pygame.locals.K_SPACE: 338 | self.winner = -1 339 | if pygame.event.get(pygame.locals.QUIT): 340 | sys.exit() 341 | pygame.event.pump() 342 | self.disp.flip() 343 | 344 | self.run_agents() 345 | self.run_plants() 346 | for msg in self.messages: 347 | msg.update() 348 | self.time += 1 349 | self.tic = time.time() 350 | self.clock.tick() 351 | if self.time % 100 == 0: 352 | print 'FPS: %f' % self.clock.get_fps() 353 | 354 | 355 | class MapLayer(object): 356 | def __init__(self, size, val=0, valtype=numpy.object_): 357 | self.size = self.width, self.height = size 358 | self.values = numpy.empty(size, valtype) 359 | self.values.fill(val) 360 | 361 | def get(self, x, y): 362 | if y >= 0 and x >= 0: 363 | try: 364 | return self.values[x, y] 365 | except IndexError: 366 | return None 367 | return None 368 | 369 | def set(self, x, y, val): 370 | self.values[x, y] = val 371 | 372 | def in_range(self, x, y): 373 | return (0 <= x < self.width and 0 <= y < self.height) 374 | 375 | 376 | class ScalarMapLayer(MapLayer): 377 | def set_random(self, range, symmetric = True): 378 | self.values = terrain_generator().create_random(self.size, range, 379 | symmetric) 380 | 381 | def set_streak(self, range, symmetric = True): 382 | self.values = terrain_generator().create_streak(self.size, range, 383 | symmetric) 384 | 385 | def set_simple(self, range, symmetric = True): 386 | self.values = terrain_generator().create_simple(self.size, range, 387 | symmetric) 388 | 389 | def set_perlin(self, range, symmetric = True): 390 | self.values = terrain_generator().create_perlin(self.size, range, 391 | symmetric) 392 | 393 | 394 | def change(self, x, y, val): 395 | self.values[x, y] += val 396 | 397 | 398 | class ObjectMapLayer(MapLayer): 399 | def __init__(self, size): 400 | MapLayer.__init__(self, size, None, numpy.object_) 401 | self.surf = pygame.Surface(size) 402 | self.surf.set_colorkey((0,0,0)) 403 | self.surf.fill((0,0,0)) 404 | self.pixels = None 405 | # self.pixels = pygame.PixelArray(self.surf) 406 | 407 | def lock(self): 408 | self.pixels = pygame.surfarray.pixels2d(self.surf) 409 | 410 | def unlock(self): 411 | self.pixels = None 412 | 413 | def get_small_view_fast(self, x, y): 414 | ret = [] 415 | get = self.get 416 | append = ret.append 417 | width = self.width 418 | height = self.height 419 | for dx in (-1, 0, 1): 420 | for dy in (-1, 0, 1): 421 | if not (dx or dy): 422 | continue 423 | try: 424 | adj_x = x + dx 425 | if not 0 <= adj_x < width: 426 | continue 427 | adj_y = y + dy 428 | if not 0 <= adj_y < height: 429 | continue 430 | a = self.values[adj_x, adj_y] 431 | if a is not None: 432 | append(a.get_view()) 433 | except IndexError: 434 | pass 435 | return ret 436 | 437 | def get_view(self, x, y, r): 438 | ret = [] 439 | for x_off in xrange(-r, r + 1): 440 | for y_off in xrange(-r, r + 1): 441 | if x_off == 0 and y_off == 0: 442 | continue 443 | a = self.get(x + x_off, y + y_off) 444 | if a is not None: 445 | ret.append(a.get_view()) 446 | return ret 447 | 448 | def insert(self, list): 449 | for o in list: 450 | self.set(o.x, o.y, o) 451 | 452 | def set(self, x, y, val): 453 | MapLayer.set(self, x, y, val) 454 | if val is None: 455 | self.pixels[x][y] = 0 456 | # self.surf.set_at((x, y), 0) 457 | else: 458 | self.pixels[x][y] = val.color 459 | # self.surf.set_at((x, y), val.color) 460 | 461 | 462 | # Use Cython version of get_small_view_fast if available. 463 | # Otherwise, don't bother folks about it. 464 | try: 465 | import cells_helpers 466 | import types 467 | ObjectMapLayer.get_small_view_fast = types.MethodType( 468 | cells_helpers.get_small_view_fast, None, ObjectMapLayer) 469 | except ImportError: 470 | pass 471 | 472 | TEAM_COLORS = [(255, 0, 0), (255, 255, 255), (255, 0, 255), (255, 255, 0)] 473 | TEAM_COLORS_FAST = [0xFF0000, 0xFFFFFF, 0xFF00FF, 0xFFFF00] 474 | 475 | class Agent(object): 476 | __slots__ = ['x', 'y', 'mind', 'energy', 'alive', 'team', 'loaded', 'color', 477 | 'act'] 478 | def __init__(self, x, y, energy, team, AgentMind, cargs): 479 | self.x = x 480 | self.y = y 481 | self.mind = AgentMind(cargs) 482 | self.energy = energy 483 | self.alive = True 484 | self.team = team 485 | self.loaded = False 486 | self.color = TEAM_COLORS_FAST[team % len(TEAM_COLORS_FAST)] 487 | self.act = self.mind.act 488 | def __str__(self): 489 | return "Agent from team %i, energy %i" % (self.team,self.energy) 490 | def attack(self, other, offset = 0, contested = False): 491 | if not other: 492 | return False 493 | max_power = ATTACK_POWER + ATTACK_TERR_CHANGE * offset 494 | if contested: 495 | other.energy -= min(self.energy, max_power) 496 | else: 497 | other.energy -= max_power 498 | return other.energy <= 0 499 | 500 | def get_team(self): 501 | return self.team 502 | 503 | def get_pos(self): 504 | return (self.x, self.y) 505 | 506 | def set_pos(self, x, y): 507 | self.x = x 508 | self.y = y 509 | 510 | def get_view(self): 511 | return AgentView(self) 512 | 513 | # Actions available to an agent on each turn. 514 | ACT_SPAWN, ACT_MOVE, ACT_EAT, ACT_RELEASE, ACT_ATTACK, ACT_LIFT, ACT_DROP = range(7) 515 | 516 | class Action(object): 517 | ''' 518 | A class for passing an action around. 519 | ''' 520 | def __init__(self, action_type, data=None): 521 | self.type = action_type 522 | self.data = data 523 | 524 | def get_data(self): 525 | return self.data 526 | 527 | def get_type(self): 528 | return self.type 529 | 530 | 531 | class PlantView(object): 532 | def __init__(self, p): 533 | self.x = p.x 534 | self.y = p.y 535 | self.eff = p.get_eff() 536 | 537 | def get_pos(self): 538 | return (self.x, self.y) 539 | 540 | def get_eff(self): 541 | return self.eff 542 | 543 | 544 | class AgentView(object): 545 | def __init__(self, agent): 546 | (self.x, self.y) = agent.get_pos() 547 | self.team = agent.get_team() 548 | 549 | def get_pos(self): 550 | return (self.x, self.y) 551 | 552 | def get_team(self): 553 | return self.team 554 | 555 | 556 | class WorldView(object): 557 | def __init__(self, me, agent_views, plant_views, terr_map, energy_map): 558 | self.agent_views = agent_views 559 | self.plant_views = plant_views 560 | self.energy_map = energy_map 561 | self.terr_map = terr_map 562 | self.me = me 563 | 564 | def get_me(self): 565 | return self.me 566 | 567 | def get_agents(self): 568 | return self.agent_views 569 | 570 | def get_plants(self): 571 | return self.plant_views 572 | 573 | def get_terr(self): 574 | return self.terr_map 575 | 576 | def get_energy(self): 577 | return self.energy_map 578 | 579 | 580 | class Display(object): 581 | black = (0, 0, 0) 582 | red = (255, 0, 0) 583 | green = (0, 255, 0) 584 | yellow = (255, 255, 0) 585 | 586 | def __init__(self, size, scale=2): 587 | self.width, self.height = size 588 | self.scale = scale 589 | self.size = (self.width * scale, self.height * scale) 590 | pygame.init() 591 | self.screen = pygame.display.set_mode(self.size) 592 | self.surface = self.screen 593 | pygame.display.set_caption("Cells") 594 | 595 | self.background = pygame.Surface(self.screen.get_size()) 596 | self.background = self.background.convert() 597 | self.background.fill((150,150,150)) 598 | 599 | self.text = [] 600 | 601 | if pygame.font: 602 | def show_text(self, text, color, topleft): 603 | font = pygame.font.Font(None, 24) 604 | text = font.render(text, 1, color) 605 | textpos = text.get_rect() 606 | textpos.topleft = topleft 607 | self.text.append((text, textpos)) 608 | else: 609 | def show_text(self, text, color, topleft): 610 | pass 611 | 612 | def update(self, terr, pop, plants, agent_map, plant_map, energy_map, 613 | ticks, nteams, show_energy, show_agents): 614 | # Slower version: 615 | # img = ((numpy.minimum(150, 20 * terr.values) << 16) + 616 | # ((numpy.minimum(150, 10 * terr.values + 10.energy_map.values)) << 8)) 617 | 618 | r = numpy.minimum(150, 20 * terr.values) 619 | r <<= 16 620 | 621 | # g = numpy.minimum(150, 10 * terr.values + 10 * energy_map.values) 622 | if show_energy: 623 | g = terr.values + energy_map.values 624 | g *= 10 625 | g = numpy.minimum(150, g) 626 | g <<= 8 627 | 628 | img = r 629 | if show_energy: 630 | img += g 631 | # b = numpy.zeros_like(terr.values) 632 | 633 | img_surf = pygame.Surface((self.width, self.height)) 634 | pygame.surfarray.blit_array(img_surf, img) 635 | if show_agents: 636 | img_surf.blit(agent_map.surf, (0,0)) 637 | img_surf.blit(plant_map.surf, (0,0)) 638 | 639 | scale = self.scale 640 | pygame.transform.scale(img_surf, 641 | self.size, self.screen) 642 | if not ticks % 60: 643 | #todo: find out how many teams are playing 644 | team_pop = [0] * nteams 645 | 646 | for team in xrange(nteams): 647 | team_pop[team] = sum(1 for a in pop if a.team == team) 648 | 649 | self.text = [] 650 | drawTop = 0 651 | for t in xrange(nteams): 652 | drawTop += 20 653 | self.show_text(str(team_pop[t]), TEAM_COLORS[t], (10, drawTop)) 654 | 655 | for text, textpos in self.text: 656 | self.surface.blit(text, textpos) 657 | 658 | def flip(self): 659 | pygame.display.flip() 660 | 661 | 662 | class Plant(object): 663 | color = 0x00FF00 664 | 665 | def __init__(self, x, y, eff): 666 | self.x = x 667 | self.y = y 668 | self.eff = eff 669 | 670 | def get_pos(self): 671 | return (self.x, self.y) 672 | 673 | def get_eff(self): 674 | return self.eff 675 | 676 | def get_view(self): 677 | return PlantView(self) 678 | 679 | 680 | class MessageQueue(object): 681 | def __init__(self): 682 | self.__inlist = [] 683 | self.__outlist = [] 684 | 685 | def update(self): 686 | self.__outlist = self.__inlist 687 | self.__inlist = [] 688 | 689 | def send_message(self, m): 690 | self.__inlist.append(m) 691 | 692 | def get_messages(self): 693 | return self.__outlist 694 | 695 | 696 | class Message(object): 697 | def __init__(self, message): 698 | self.message = message 699 | def get_message(self): 700 | return self.message 701 | 702 | 703 | def main(): 704 | global bounds, symmetric, mind_list 705 | 706 | try: 707 | config.read('default.cfg') 708 | bounds = config.getint('terrain', 'bounds') 709 | symmetric = config.getboolean('terrain', 'symmetric') 710 | minds_str = str(config.get('minds', 'minds')) 711 | except Exception as e: 712 | print 'Got error: %s' % e 713 | config.add_section('minds') 714 | config.set('minds', 'minds', 'mind1,mind2') 715 | config.add_section('terrain') 716 | config.set('terrain', 'bounds', '300') 717 | config.set('terrain', 'symmetric', 'true') 718 | 719 | with open('default.cfg', 'wb') as configfile: 720 | config.write(configfile) 721 | 722 | config.read('default.cfg') 723 | bounds = config.getint('terrain', 'bounds') 724 | symmetric = config.getboolean('terrain', 'symmetric') 725 | minds_str = str(config.get('minds', 'minds')) 726 | mind_list = [(n, get_mind(n)) for n in minds_str.split(',')] 727 | 728 | # accept command line arguments for the minds over those in the config 729 | try: 730 | if len(sys.argv)>2: 731 | mind_list = [(n,get_mind(n)) for n in sys.argv[1:] ] 732 | except (ImportError, IndexError): 733 | pass 734 | 735 | 736 | if __name__ == "__main__": 737 | main() 738 | while True: 739 | game = Game(bounds, mind_list, symmetric, -1) 740 | while game.winner is None: 741 | game.tick() 742 | --------------------------------------------------------------------------------