├── .gitignore ├── README.rst ├── animation ├── README.md ├── bootstrap.py ├── config.py ├── data.py ├── display.py └── params.py ├── doom └── README.rst ├── images ├── LICENSE.txt ├── README.rst ├── content │ ├── Cat.jpg │ ├── Freddie.jpg │ ├── Freddie_sem.png │ ├── Village.jpg │ ├── Village.noise128.jpg │ └── Village.noise64.jpg └── style │ ├── charcoal1.jpg │ ├── charcoal2.jpg │ ├── charcoal2_sem.png │ ├── charcoal3.jpg │ ├── charcoal4.jpg │ ├── charcoal5.jpg │ ├── mosaic1.jpg │ ├── mosaic2.jpg │ ├── mosaic3.jpg │ ├── paint1.jpg │ ├── paint2.jpg │ ├── paint3.jpg │ ├── paint4.jpg │ ├── paint5.jpg │ ├── paint6.jpg │ ├── sketch2.jpg │ ├── sketch3.jpg │ └── sketch4.jpg └── lstm ├── README.rst ├── data.py ├── docker.df ├── reply.py ├── s2s.py └── tweet.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.DS_Store 3 | *__pycache__ 4 | 5 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | nuclai.16 Hands-On Workshops 2 | ============================ 3 | 4 | This repository contains the source code and data to participate in the workshops at the nucl.ai Conference 2016. Programs should work on Windows, Linux and Mac OSX with Python. 5 | 6 | You'll find multiple folders here: 7 | 8 | 1. **image —** On Tuesday 19th, workshop about image synthesis with convolutional networks. 9 | 2. **text —** On Wednesday 20th, workshop about generating text with recurrent networks. 10 | 3. **animation —** On Wednesday 20th, beginner tutorial about motion matching. 11 | 4. ... 12 | 13 | For most demos, we recommend installing `Miniconda `_. This will allow you to create new Python environments easily and install the appropriate packages: 14 | 15 | .. code:: bash 16 | conda create -n py35 python=3.5 17 | activate py35 18 | 19 | See each sub-folder for further details and instructions. 20 | -------------------------------------------------------------------------------- /animation/README.md: -------------------------------------------------------------------------------- 1 | # nucl.ai Workshops 2015 2 | 3 | This repository contains the source code and data to participate in the workshops at the nucl.ai Conference 2016. Programs should work on Windows, Linux and Mac OSX with Python 3.5. 4 | 5 | ## Installation & Dependencies 6 | 7 | 1. conda create -n py35 numpy 8 | 2. conda activate 9 | 3. pip install -e git+https://github.com/vispy/vispy#egg=vispy-dev 10 | 4. (Windows) `Download GLFW `_ and add `lib-mingw-w64` to %PATH%. 11 | 12 | 13 | ## Running demos 14 | 15 | .. code:: bash 16 | 17 | python display_todo.py 18 | 19 | 20 | .. code:: bash 21 | 22 | python display_done.py 23 | 24 | -------------------------------------------------------------------------------- /animation/bootstrap.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | # Setup the path to known locations of GLFW's DLL on Windows 5 | if sys.platform == 'win32': 6 | import platform 7 | bits = platform.architecture()[0][:2] 8 | os.environ['PATH'] += os.pathsep + r'C:\ProgramData\chocolatey\msvc120-{}\bin'.format(bits) 9 | 10 | # Inject support for local font loading into VisPy. 11 | def _get_vispy_font_filename(face, bold, italic): 12 | return os.path.join(os.path.dirname(__file__), 'data/questrial.ttf') 13 | 14 | # Fonts on Mac OSX. 15 | if sys.platform == 'darwin': 16 | from vispy.util.fonts import _quartz 17 | _quartz._vispy_fonts = ('Questrial',) 18 | _quartz._get_vispy_font_filename = _get_vispy_font_filename 19 | del _quartz 20 | 21 | # Fonts on Windows and Linux. 22 | if sys.platform in ['win32', 'linux']: 23 | from vispy.util.fonts import _freetype 24 | _freetype._vispy_fonts = ('Questrial',) 25 | _freetype._get_vispy_font_filename = _get_vispy_font_filename 26 | del _freetype 27 | -------------------------------------------------------------------------------- /animation/config.py: -------------------------------------------------------------------------------- 1 | data_file = "./csv/data.csv" -------------------------------------------------------------------------------- /animation/data.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import math 3 | import random 4 | 5 | class Data(object): 6 | 7 | def __init__(self, data_file, params): 8 | 9 | self.params = params 10 | self.data = {} 11 | self.user_team_lookup = {} 12 | # if advancing 13 | self.selected_paths = [] 14 | self.advance_point = 0 15 | self.player_position = numpy.asarray([0,0]) 16 | 17 | self.previous_path = [] 18 | 19 | for idx, row in enumerate(numpy.genfromtxt(data_file, delimiter=',')): 20 | if idx == 0: continue 21 | hero_id = int(row[1]) 22 | coordinates = numpy.array(row[2:4]) 23 | coordinates *= self.params.SCALE_FACTOR 24 | if hero_id in self.data: self.data[hero_id].append(numpy.array(coordinates)) 25 | else: self.data[hero_id] = [coordinates] 26 | # players 0 - 4 belong to the first team 5 - 9 to the seond one - it comes from a replay data format 27 | if not hero_id in self.user_team_lookup: self.user_team_lookup[hero_id] = 1 if len(self.user_team_lookup.keys()) <= 4 else 2 28 | 29 | # append offset 30 | for hero_id in self.data.keys(): 31 | for idx_point, point in enumerate(self.data[hero_id]): 32 | if idx_point == 0: offset = [0,0] 33 | else: offset = [point[0] - self.data[hero_id][idx_point-1][0], point[1] - self.data[hero_id][idx_point-1][1]] 34 | self.data[hero_id][idx_point] = numpy.append(point, numpy.array(offset)) 35 | 36 | # prepare smaller segments 37 | self.segments = {} 38 | for hero_id in self.data.keys(): 39 | self.segments[hero_id] = numpy.array_split( self.data[hero_id], math.ceil(len(self.data[hero_id]) / float(self.params.SEGMENT_SIZE)) ) 40 | for idx, segment in enumerate(self.segments[hero_id]): 41 | for idx_point, point in enumerate(segment): 42 | if idx_point == 0: continue 43 | if math.hypot(point[2], point[3]) > self.params.TELEPORT_THRESHOLD: 44 | self.segments[hero_id][idx] = [] # skip teleports 45 | continue 46 | 47 | 48 | def advance(self): 49 | self.advance_point += 1 50 | self.player_position = self.player_position + self.selected_paths[0][4][self.advance_point][0:2] - self.selected_paths[0][4][self.advance_point-1][0:2] 51 | 52 | def get_paths(self): 53 | 54 | go_to = self.mouse_xy 55 | selected_paths = [] 56 | player_point = self.player_position * self.params.SCALE_FACTOR 57 | new_path = False 58 | 59 | def append_path(rendom_path, path_idx, hero_id, investigated_point_idx, path_advance): 60 | if len(random_path) and random_path[0][0] != random_path[-1][0] and random_path[0][1] != random_path[-1][1]: 61 | refering_point = random_path[path_advance % self.params.SEGMENT_SIZE][0:2] # refering point is the player position in the path 62 | if investigated_point_idx >= len(random_path): investigated_point_idx = len(random_path) - 1 # edge case - path shorter than expected 63 | point_distance = math.hypot(random_path[investigated_point_idx][0] - refering_point[0] + player_point[0] - go_to[0], 64 | random_path[investigated_point_idx][1] - refering_point[1] + player_point[1] - go_to[1]) 65 | selected_paths.append([point_distance, path_idx, hero_id, refering_point, random_path, self.player_position]) 66 | 67 | for i in range(self.params.SAMPLE_SIZE): 68 | hero_id = random.choice(list(self.data.keys())) 69 | path_idx = numpy.random.random_integers(0, (len(self.segments[hero_id])-1)) 70 | random_path = self.segments[hero_id][path_idx] 71 | append_path(random_path, path_idx, hero_id, self.params.MOVE_ALONG_STEP_SIZE, 0) 72 | 73 | if len(self.selected_paths): 74 | # try to sample the current path by advancinf it 75 | random_path = self.segments[self.selected_paths[0][2]][self.selected_paths[0][1]] 76 | investigated_point_idx = self.params.MOVE_ALONG_STEP_SIZE + self.advance_point + 1 77 | if investigated_point_idx >= len(random_path): 78 | # get into next segment 79 | # check which one and if it exists 80 | segments_jump = (self.params.MOVE_ALONG_STEP_SIZE + self.advance_point + 1) // self.params.SEGMENT_SIZE 81 | if self.selected_paths[0][1] < len(self.segments[self.selected_paths[0][2]]) and len(self.segments[self.selected_paths[0][2]][self.selected_paths[0][1] + segments_jump]): 82 | random_path = self.segments[self.selected_paths[0][2]][self.selected_paths[0][1] + segments_jump] 83 | investigated_point_idx = investigated_point_idx % self.params.SEGMENT_SIZE 84 | else: 85 | random_path = [] # the path has ended, it empty 86 | append_path(random_path, self.selected_paths[0][1], self.selected_paths[0][2], investigated_point_idx, self.advance_point) 87 | selected_paths.sort(key=lambda x: x[0]) 88 | 89 | if len(self.selected_paths) == 0 or selected_paths[0][1] != self.selected_paths[0][1] or selected_paths[0][2] != self.selected_paths[0][2]: 90 | # new path - 91 | # reset the pointer and keep the previous one to keep the history 92 | if len(self.selected_paths) > 0: 93 | if self.advance_point > 0: 94 | self.previous_path = (self.selected_paths[0], self.advance_point + 1) 95 | new_path = True 96 | self.advance_point = 0 97 | self.selected_paths = selected_paths 98 | return (selected_paths, new_path) 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /animation/display.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import bootstrap # Demonstration specific setup. 4 | import vispy.scene # Canvas & visuals for rendering. 5 | import numpy 6 | 7 | import collections 8 | import math 9 | 10 | import data 11 | import params 12 | 13 | # style 14 | SELECTED_PATH_WIDTH = 5 15 | HISTORY_PATH_WIDTH = 3 16 | PATH_WIDTH = 1 17 | SELECTED_ARROW_SIZE = 20.0 18 | ARROW_SIZE = 14.0 19 | # colors 20 | COLOR_NEUTRAL = numpy.asarray([0.5,0.5,0.5]) 21 | COLOR_SELECTED = numpy.asarray([0.8,0.2,0.8]) 22 | 23 | class Application(object): 24 | 25 | def __init__(self, title='nucl.ai Motion Matching'): 26 | self.canvas = vispy.scene.SceneCanvas( 27 | title=title, 28 | size=(1280, 720), 29 | bgcolor='black', 30 | show=False, 31 | keys='interactive') 32 | 33 | self.params = params.Params() 34 | self.widget = self.canvas.central_widget 35 | self.view = self.canvas.central_widget.add_view() 36 | self.marker = vispy.scene.Markers(pos=numpy.asarray([[0,0]]), face_color='red', size=0, parent=self.view.scene) 37 | self.select = True # identify if the current tick select or advance the path 38 | # prepare display 39 | self.lines = [] 40 | self.colors = [] 41 | self.history = [] 42 | self.history_pointer = 0 43 | for i in range(self.params.HISTORY_SIZE): 44 | line = vispy.scene.Line(parent=self.view.scene, color=COLOR_NEUTRAL, connect='strip', method='agg', width=HISTORY_PATH_WIDTH) 45 | line.transform = vispy.visuals.transforms.MatrixTransform() 46 | self.history.append(line) 47 | 48 | for i in range(self.params.TOP_PATHS_NUMBER): 49 | path_width = SELECTED_PATH_WIDTH if i == 0 else PATH_WIDTH 50 | arrow_size = SELECTED_ARROW_SIZE if i == 0 else ARROW_SIZE 51 | color = COLOR_SELECTED if i == 0 else COLOR_NEUTRAL 52 | # color = numpy.random.rand(3) # using fixed colors now 53 | self.colors.append(color) 54 | line = vispy.scene.Line(parent=self.view.scene, color=color, connect='strip', method='agg', width=path_width) 55 | line.transform = vispy.visuals.transforms.MatrixTransform() 56 | self.lines.append(line) 57 | 58 | self.timer_toggle = True 59 | self.player_position = numpy.asarray([0,0]) 60 | if not os.path.exists('dota2.csv'): 61 | print("ERROR: Please download and extract this file...\nhttps://github.com/aigamedev/nuclai16/releases/download/0.0/dota2.csv.bz2\n") 62 | sys.exit(-1) 63 | self.data = data.Data('dota2.csv', self.params) 64 | # init the searched point with some random value - after first mouse move it's a 65 | self.data.mouse_xy = ( ( numpy.random.rand(2) * 10 - 5 ) - numpy.asarray(self.canvas.size) / 2 ) * self.params.SCALE_FACTOR 66 | 67 | self.grid = vispy.scene.visuals.GridLines(parent=self.view.scene, color=(1, 1, 1, 1)) 68 | self.grid.transform = vispy.visuals.transforms.MatrixTransform() 69 | self.grid.transform.translate(numpy.asarray(self.canvas.size) / 2) 70 | self.canvas.show(visible=True) 71 | # HACK: Bug in VisPy 0.5.0-dev requires a click for layout to occur. 72 | self.canvas.events.mouse_press() 73 | 74 | 75 | @self.canvas.events.key_press.connect 76 | def on_key_press(event): 77 | if event.key.name == ' ': 78 | if self.timer_toggle: self.timer.stop() 79 | else: self.timer.start() 80 | self.timer_toggle = not self.timer_toggle 81 | 82 | 83 | @self.canvas.events.resize.connect 84 | def on_resize(event): 85 | self.grid.transform.reset() 86 | self.grid.transform.translate(numpy.asarray(self.canvas.size) / 2) 87 | # @TODO: translate paths 88 | 89 | 90 | @self.canvas.events.mouse_move.connect 91 | def on_mouse_move(event): 92 | self.data.mouse_xy = (numpy.asarray(self.view.camera.transform.imap(event.pos)) - numpy.asarray(self.canvas.size) / 2) * self.params.SCALE_FACTOR 93 | 94 | 95 | @self.canvas.events.draw.connect 96 | def on_draw(event): 97 | pass 98 | 99 | def draw_current_path_advance(self, ev): 100 | if self.select: 101 | _, new_path = self.data.get_paths() 102 | else: 103 | self.data.advance() 104 | new_path = False 105 | for i in range(self.params.TOP_PATHS_NUMBER): 106 | if i != 0 and not self.select: continue # just advancing, no need to redraw all selection 107 | if i >= len(self.data.selected_paths): 108 | # clear and skip 109 | self.lines[i].set_data(pos=numpy.asarray([[0,0],[0,0]])) 110 | continue 111 | 112 | current = self.data.selected_paths[i][4] 113 | draw_to = self.params.MOVE_ALONG_STEP_SIZE 114 | 115 | if i == 0: # i == 0 is the current / best scored path 116 | draw_to += self.data.advance_point 117 | marker_point = current[self.data.advance_point][0:2] 118 | 119 | current = current[0:draw_to] 120 | self.lines[i].set_data(pos=current[:,[0,1]]) 121 | if self.select: # in the selection the transorm must be updated 122 | self.lines[i].transform.reset() 123 | self.lines[i].transform.translate((self.data.selected_paths[i][3] * -1)) # path starting point 124 | self.lines[i].transform.translate(self.data.player_position) # current player position 125 | # to have [0,0] in the screen center 126 | self.lines[i].transform.translate(numpy.asarray(self.canvas.size) / 2) # moving [0,0] to screen center 127 | 128 | if i == 0: 129 | self.marker.set_data(pos=numpy.asarray([marker_point]), face_color=self.colors[i], size=15) 130 | self.marker.transform = self.lines[i].transform 131 | 132 | if new_path: 133 | # append history 134 | self.history[self.history_pointer].transform.reset() 135 | self.history[self.history_pointer].transform = vispy.visuals.transforms.MatrixTransform() 136 | current = self.data.previous_path[0][4][0:self.data.previous_path[1]] 137 | self.history[self.history_pointer].set_data(current[:,[0,1]]) 138 | self.history[self.history_pointer].transform.translate((self.data.previous_path[0][3] * -1)) # path starting point 139 | self.history[self.history_pointer].transform.translate(self.data.previous_path[0][5]) # the player position in the path 140 | self.history[self.history_pointer].transform.translate(numpy.asarray(self.canvas.size) / 2) # moving [0,0] to screen center 141 | self.history_pointer += 1 142 | if self.history_pointer == self.params.HISTORY_SIZE: self.history_pointer = 0 143 | 144 | self.select = not self.select 145 | 146 | 147 | def process(self, _): 148 | return 149 | 150 | 151 | def run(self): 152 | self.timer = vispy.app.Timer(interval=1.0 / 30.0) 153 | self.timer.connect(self.draw_current_path_advance) 154 | self.timer.start(0.033) # 30 FPS 155 | vispy.app.run() 156 | 157 | 158 | if __name__ == "__main__": 159 | vispy.set_log_level('WARNING') 160 | vispy.use(app='glfw') 161 | app = Application() 162 | app.run() 163 | -------------------------------------------------------------------------------- /animation/params.py: -------------------------------------------------------------------------------- 1 | class Params: 2 | SEGMENT_SIZE = 10 3 | TOP_PATHS_NUMBER = 5 4 | SAMPLE_SIZE = 150 5 | SCALE_FACTOR = 200 6 | TELEPORT_THRESHOLD = 40 # it defines a disatnce where we elimiate a segment - we skip all teleports 7 | HISTORY_SIZE = 100 8 | 9 | def __init__(self): 10 | self.MOVE_ALONG_STEP_SIZE = int(self.SEGMENT_SIZE / 2) 11 | -------------------------------------------------------------------------------- /doom/README.rst: -------------------------------------------------------------------------------- 1 | nucl.ai '16 VizDoom Workshop 2 | ============================ 3 | 4 | 1. Clone the repository: `git clone https://github.com/Marqt/ViZDoom` 5 | 6 | 2. `Download WAD files `_ and extract into `scenarios`. 7 | 8 | 3. Build or download binaries, `detailed instructions .`_ 9 | 10 | 4. Setup Python, version 2.7 is recommended if you downloaded the binaries: 11 | 12 | .. code:: bash 13 | 14 | conda create -n py27 mingw libpython numpy scikit-image 15 | activate py27 16 | pip install Theano git+https://github.com/Lasagne/Lasagne 17 | set PYTHONPATH=../../bin/python/ 18 | 19 | 5. Run a basic script from the examples folder: 20 | 21 | .. code:: bash 22 | 23 | cd examples/python 24 | python basic.py 25 | 26 | 6. Follow the `workshop-specific exercises `_ here. -------------------------------------------------------------------------------- /images/LICENSE.txt: -------------------------------------------------------------------------------- 1 | These thumbnails are collected from image search previews, and used 2 | here under Fair Use and for educational reasons only. 3 | 4 | To submit a take down notice, submit a Pull Request. 5 | -------------------------------------------------------------------------------- /images/README.rst: -------------------------------------------------------------------------------- 1 | Image Synthesis with Neural Networks 2 | ==================================== 3 | 4 | 0. Installation 5 | --------------- 6 | 7 | We suggest installing `Docker `_ to run the code. On Windows there are no alternatives, but on Linux and MacOS it's recommended. Then you can setup an alias command from bash: 8 | 9 | .. code:: bash 10 | 11 | alias doodle="docker run -v $(pwd)/style:/nd/style -v $(pwd)/content:/nd/content \ 12 | -v $(pwd)/output:/nd/output -v $(pwd)/frames:/nd/frames \ 13 | -it alexjc/neural-doodle:fast" 14 | 15 | The various folders named ``style``, ``content``, ``output`` or ``frames`` are taken from the current folder, presumably ``nuclai16/images`` and will be used to pass files back and forth to the container. 16 | 17 | 18 | 1. Image Reconstruction 19 | ----------------------- 20 | 21 | .. code:: bash 22 | 23 | doodle --content content/Village.noise64.jpg --style content/Village.jpg \ 24 | --passes 1 --layers 4 --iterations 1 --frames 25 | 26 | 27 | 2. Texture Synthesis 28 | -------------------- 29 | 30 | .. code:: bash 31 | 32 | doodle --style style/sketch4.jpg --output-size=512x512\ 33 | --passes 2 --layers 5 4 3 --iterations 4 3 2 34 | 35 | doodle --style style/paint4.jpg --output-size=512x512 \ 36 | --passes 2 --layers 5 4 3 --iterations 4 3 2 37 | 38 | 39 | 3. Style Transfer 40 | ----------------- 41 | 42 | .. code:: bash 43 | 44 | doodle --content content/Freddie.jpg --style style/charcoal1.jpg \ 45 | --passes 2 --layers 5 4 --iterations 3 3 \ 46 | --variety 20 10 0 --content-weight 0.3 0.1 0.0 47 | 48 | 49 | 4. Neural Doodle 50 | ---------------- 51 | 52 | .. code:: bash 53 | 54 | doodle --content content/Freddie.jpg --style style/charcoal2.jpg --semantic-weight=1.0 \ 55 | --passes 2 --layers 5 4 --iterations 3 2 \ 56 | --variety 100 0 --content-weight 0.1 0.0 --noise-weight 0.1 0.0 57 | -------------------------------------------------------------------------------- /images/content/Cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/content/Cat.jpg -------------------------------------------------------------------------------- /images/content/Freddie.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/content/Freddie.jpg -------------------------------------------------------------------------------- /images/content/Freddie_sem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/content/Freddie_sem.png -------------------------------------------------------------------------------- /images/content/Village.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/content/Village.jpg -------------------------------------------------------------------------------- /images/content/Village.noise128.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/content/Village.noise128.jpg -------------------------------------------------------------------------------- /images/content/Village.noise64.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/content/Village.noise64.jpg -------------------------------------------------------------------------------- /images/style/charcoal1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/charcoal1.jpg -------------------------------------------------------------------------------- /images/style/charcoal2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/charcoal2.jpg -------------------------------------------------------------------------------- /images/style/charcoal2_sem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/charcoal2_sem.png -------------------------------------------------------------------------------- /images/style/charcoal3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/charcoal3.jpg -------------------------------------------------------------------------------- /images/style/charcoal4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/charcoal4.jpg -------------------------------------------------------------------------------- /images/style/charcoal5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/charcoal5.jpg -------------------------------------------------------------------------------- /images/style/mosaic1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/mosaic1.jpg -------------------------------------------------------------------------------- /images/style/mosaic2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/mosaic2.jpg -------------------------------------------------------------------------------- /images/style/mosaic3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/mosaic3.jpg -------------------------------------------------------------------------------- /images/style/paint1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/paint1.jpg -------------------------------------------------------------------------------- /images/style/paint2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/paint2.jpg -------------------------------------------------------------------------------- /images/style/paint3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/paint3.jpg -------------------------------------------------------------------------------- /images/style/paint4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/paint4.jpg -------------------------------------------------------------------------------- /images/style/paint5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/paint5.jpg -------------------------------------------------------------------------------- /images/style/paint6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/paint6.jpg -------------------------------------------------------------------------------- /images/style/sketch2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/sketch2.jpg -------------------------------------------------------------------------------- /images/style/sketch3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/sketch3.jpg -------------------------------------------------------------------------------- /images/style/sketch4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aigamedev/nuclai16/ef014bd398c481323022a693c2173f165bdf87d3/images/style/sketch4.jpg -------------------------------------------------------------------------------- /lstm/README.rst: -------------------------------------------------------------------------------- 1 | Recurrent Neural Networks 2 | ========================= 3 | 4 | 0. Installation 5 | --------------- 6 | 7 | We suggest installing `Docker `_ to run the code. Then you can enter a bash prompt within the image as follows: 8 | 9 | .. code:: bash 10 | 11 | docker run -it alexjc/nuclai16:lstm 12 | 13 | There is no output from this docker container, just text printed on the console. 14 | 15 | 16 | 1. Long Short-Term Memory 17 | ------------------------- 18 | 19 | Using pre-trained data: 20 | 21 | .. code:: bash 22 | 23 | python tweet.py --load lstm512 24 | 25 | python tweet.py --load lstm256x3 26 | 27 | Training it yourself: 28 | 29 | .. code:: bash 30 | 31 | python tweet.py --train --save lstm64 32 | 33 | 34 | 2. Simple Sequence To Sequence 35 | ------------------------------ 36 | 37 | .. code:: bash 38 | 39 | python s2s.py 40 | 41 | 42 | 3. Complex Sequence To Sequence 43 | ------------------------------- 44 | 45 | .. code:: bash 46 | 47 | python reply.py 48 | 49 | -------------------------------------------------------------------------------- /lstm/data.py: -------------------------------------------------------------------------------- 1 | import re 2 | import collections 3 | 4 | STX = u"\2" 5 | ETX = u"\3" 6 | UNK = u"\4" 7 | 8 | def read(filename, minlen=40, maxlen=None, trim_hash=False, shorten=False): 9 | all_tweets = set() 10 | text = "" 11 | with open(filename, 'r', encoding='utf8') as f: 12 | for t in f.readlines(): 13 | tweet = STX + t.strip() + ETX 14 | if trim_hash: tweet = re.sub(r"#\S+[\W+]?", '', tweet) # remove hash - not a word 15 | if shorten and len(tweet) > maxlen: 16 | i = 1 17 | tweet_shorten = tweet[:len("".join(tweet.split(".")[:i])) + i] 18 | while len(tweet[:len("".join(tweet.split(".")[:i + 1])) + i ]) < maxlen: 19 | i += 1 20 | tweet_shorten = tweet[:len("".join(tweet.split(".")[:i])) + i] 21 | tweet = tweet_shorten 22 | if len(tweet) < minlen: continue # not a great input - trim it 23 | if maxlen != None and len(tweet) > maxlen: continue # too long 24 | all_tweets.add(tweet) 25 | text += tweet 26 | 27 | chars = set(text) 28 | 29 | # Remove extremely infrequent characters. 30 | chars_frequency = collections.Counter(text) 31 | threshold = round(len(text) * 0.0005) # 0,05% 32 | 33 | text = "" 34 | tweets = [] 35 | 36 | # Replace unusual characters. 37 | for t in all_tweets: 38 | for c in set(t): 39 | if chars_frequency[c] <= threshold: 40 | t = t.replace(c, UNK) 41 | tweets.append(t) 42 | text += t 43 | 44 | chars = sorted(list(set(text))) 45 | return (tweets, text, chars) 46 | -------------------------------------------------------------------------------- /lstm/docker.df: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | # Install dependencies 4 | RUN apt-get -qq update && \ 5 | apt-get -qq install --assume-yes \ 6 | "build-essential" \ 7 | "git" \ 8 | "vim" \ 9 | "zip" \ 10 | "wget" \ 11 | "pkg-config" && \ 12 | rm -rf /var/lib/apt/lists/* 13 | 14 | # Miniconda. 15 | RUN wget --quiet https://repo.continuum.io/miniconda/Miniconda3-4.0.5-Linux-x86_64.sh -O ~/miniconda.sh && \ 16 | /bin/bash ~/miniconda.sh -b -p /opt/conda && \ 17 | rm ~/miniconda.sh 18 | 19 | # Install requirements before copying project files 20 | WORKDIR /lstm 21 | RUN /opt/conda/bin/conda install -q -y conda numpy pip h5py 22 | RUN /opt/conda/bin/python3 -m pip install theano==0.8.2 keras==1.0.3 23 | RUN /opt/conda/bin/python3 -m pip install git+https://github.com/farizrahman4u/seq2seq 24 | RUN unlink python; ln -s /opt/conda/bin/python3 /bin/python 25 | 26 | # Copy only required project files 27 | COPY data.py . 28 | COPY tweet.py . 29 | COPY s2s.py . 30 | COPY reply.py . 31 | 32 | # Pre-trained network. 33 | RUN wget -q "https://github.com/aigamedev/nuclai16/releases/download/0.0/lstm512.zip" 34 | RUN wget -q "https://github.com/aigamedev/nuclai16/releases/download/0.0/lstm256x3.zip" 35 | RUN wget -q "https://github.com/aigamedev/nuclai16/releases/download/0.0/tweets78k.txt.bz2" 36 | RUN bunzip2 tweets78k.txt.bz2 ; unzip lstm512.zip ; unzip lstm256x3.zip ; rm *.zip 37 | 38 | # Set an entrypoint to the main doodle.py script 39 | ENTRYPOINT ["/bin/bash"] 40 | -------------------------------------------------------------------------------- /lstm/reply.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import itertools 3 | 4 | import numpy as np 5 | from seq2seq.models import SimpleSeq2seq 6 | 7 | import data 8 | 9 | MAXLEN = 40 10 | 11 | tweets, text, chars = data.read('tweets78k.txt', minlen=20, maxlen=MAXLEN, trim_hash=True, shorten=True) 12 | num_chars = len(chars) 13 | num_tweets = len(tweets) 14 | 15 | print('Number of characters:', len(chars)) 16 | print('Number of sequences:', len(tweets)) 17 | 18 | 19 | model = SimpleSeq2seq(input_dim=num_chars, hidden_dim=256, output_length=MAXLEN, output_dim=num_chars, depth=1) 20 | model.compile(loss='mse', optimizer='rmsprop') 21 | 22 | 23 | char_indices = dict((c, i) for i, c in enumerate(chars)) 24 | indices_char = dict((i, c) for i, c in enumerate(chars)) 25 | 26 | 27 | print('Vectorization...') 28 | X = np.zeros((num_tweets, MAXLEN, num_chars), dtype=np.float32) 29 | for i, tweet in enumerate(tweets): 30 | sentence = list(itertools.chain(*itertools.repeat(tweet.replace('\4', '').replace('\2', '').replace('\3', ''), times=2)))[:MAXLEN] 31 | for t, char in enumerate(sentence): 32 | X[i, t, char_indices[char]] = 1.0 33 | 34 | model.fit(X, X, batch_size=1024, nb_epoch=20, verbose=1) 35 | 36 | y = model.predict(X[:8]) 37 | 38 | def sample(a, temperature=1.0): 39 | # Helper function to sample an index from a probability array. 40 | a = np.log(a) / temperature 41 | a = np.exp(a) / np.sum(np.exp(a)) 42 | return np.argmax(np.random.multinomial(1, a, 1)) 43 | 44 | for i in range(8): 45 | print(y[i].shape) 46 | for t, a in enumerate(y[i]): 47 | j = sample(a+a.min(), temperature=1.0) 48 | # print(j, end=' ') 49 | print(ord(indices_char[j]), end=' ') 50 | sys.stdout.write(indices_char[j]) 51 | sys.stdout.write('\n') 52 | sys.stdout.flush() 53 | 54 | -------------------------------------------------------------------------------- /lstm/s2s.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import itertools 3 | import numpy as np 4 | 5 | import seq2seq 6 | from seq2seq.models import SimpleSeq2seq 7 | 8 | 9 | model = SimpleSeq2seq(input_dim=1, hidden_dim=12, output_length=8, output_dim=1, depth=1) 10 | model.compile(loss='mse', optimizer='rmsprop') 11 | 12 | X = np.zeros((256, 8, 1), dtype=np.float32) 13 | X[:,:,0] = list(itertools.product([0.0, 1.0], repeat=8)) 14 | 15 | model.fit(X, X, batch_size=32, nb_epoch=1000, verbose=1) 16 | 17 | y = model.predict(X) 18 | 19 | for i in range(16): 20 | print(X[i,:,0], y[i,:,0] > 0.5) 21 | -------------------------------------------------------------------------------- /lstm/tweet.py: -------------------------------------------------------------------------------- 1 | # This code was originally derived from Keras' LSTM examples. 2 | 3 | import sys 4 | import json 5 | import random 6 | 7 | from keras.models import Sequential, model_from_json 8 | from keras.layers import Dense, Activation, Dropout 9 | from keras.layers import LSTM 10 | from keras.utils.data_utils import get_file 11 | 12 | import h5py 13 | import numpy as np 14 | 15 | import data 16 | 17 | seqlen = 40 18 | seqstep = 3 19 | 20 | 21 | # The tweet length should be above the learned sequence length. 22 | tweets, text, chars = data.read('tweets78k.txt', minlen=seqlen) 23 | print('Number of characters:', len(chars)) 24 | 25 | char_indices = dict((c, i) for i, c in enumerate(chars)) 26 | indices_char = dict((i, c) for i, c in enumerate(chars)) 27 | 28 | 29 | # Cut the text in semi-redundant sequences of seqlen characters. 30 | 31 | sentences = [] 32 | next_chars = [] 33 | for i in range(0, len(text) - seqlen, seqstep): 34 | sentences.append(text[i: i + seqlen]) 35 | next_chars.append(text[i + seqlen]) 36 | print('Number of sequences:', len(sentences)) 37 | 38 | 39 | def build_model(model_in=None, model_out=None): 40 | if model_in: 41 | print("Reading model...") 42 | with open(model_in+'.json', 'r') as model_file: 43 | model = model_from_json(model_file.read()) 44 | 45 | else: 46 | print('Build model...') 47 | model = Sequential() 48 | model.add(LSTM(64, return_sequences=True, input_shape=(seqlen, len(chars)))) 49 | model.add(Dropout(0.2)) 50 | model.add(LSTM(64, return_sequences=False)) 51 | model.add(Dropout(0.2)) 52 | model.add(Dense(len(chars))) 53 | model.add(Activation('softmax')) 54 | 55 | model.compile(loss='categorical_crossentropy', optimizer='rmsprop') 56 | 57 | if model_out: 58 | print("Saving model...") 59 | with open(model_out+'.json', 'w') as model_file: 60 | model_file.write(model.to_json()) 61 | 62 | return model 63 | 64 | 65 | def sample(a, temperature=1.0): 66 | # Helper function to sample an index from a probability array. 67 | a = np.log(a) / temperature 68 | a = np.exp(a) / np.sum(np.exp(a)) 69 | return np.argmax(np.random.multinomial(1, a, 1)) 70 | 71 | 72 | def generate_tweets(model): 73 | for diversity in [0.2, 0.5, 1.0, 1.2]: 74 | print() 75 | print('----- Diversity:', diversity) 76 | 77 | sentence = random.choice(tweets)[:seqlen-1] + data.STX 78 | print('----- Generating with seed: "' + sentence + '"') 79 | 80 | for i in range(140): 81 | x = np.zeros((1, seqlen, len(chars))) 82 | for t, char in enumerate(sentence): 83 | x[0, t, char_indices[char]] = 1. 84 | 85 | preds = model.predict(x, verbose=0)[0] 86 | next_index = sample(preds, diversity) 87 | next_char = indices_char[next_index] 88 | 89 | sentence = sentence[1:] + next_char 90 | sys.stdout.write(next_char) 91 | sys.stdout.flush() 92 | print() 93 | 94 | 95 | def train(model): 96 | print('Vectorization...') 97 | X = np.zeros((len(sentences), seqlen, len(chars)), dtype=np.bool) 98 | y = np.zeros((len(sentences), len(chars)), dtype=np.bool) 99 | for i, sentence in enumerate(sentences): 100 | for t, char in enumerate(sentence): 101 | X[i, t, char_indices[char]] = 1.0 102 | y[i, char_indices[next_chars[i]]] = 1.0 103 | 104 | # Train the model, output generated text after each iteration. 105 | try: 106 | 107 | for iteration in range(1, 25): 108 | print() 109 | print('-' * 50) 110 | print('Iteration', iteration) 111 | 112 | idx = np.random.randint(X.shape[0], size=10000) 113 | model.fit(X[idx], y[idx], batch_size=128, nb_epoch=1) 114 | generate_tweets(model) 115 | 116 | except KeyboardInterrupt: 117 | pass # quit nicely 118 | 119 | 120 | if __name__ == "__main__": 121 | 122 | import argparse 123 | parser = argparse.ArgumentParser(description='nucl.ai16') 124 | parser.add_argument('--load', help='The name for model input file', default=None, type=str) 125 | parser.add_argument('--save', help='The name for model output file', default=None, type=str) 126 | parser.add_argument('--train', default=False, action='store_true') 127 | args = parser.parse_args() 128 | 129 | model = build_model(args.load, args.save) 130 | 131 | if args.load: 132 | print("Reading weights...") 133 | model.load_weights(args.load+'.h5f') 134 | 135 | if not args.train: 136 | generate_tweets(model) 137 | sys.exit(-1) 138 | 139 | train(model) 140 | if args.save: 141 | print("Saving weights...") 142 | model.save_weights(args.save+'.h5f') 143 | --------------------------------------------------------------------------------