├── .gitignore ├── initializer.py ├── LICENSE ├── step.py ├── pattern.py ├── README.md └── lego.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | rect.json 3 | -------------------------------------------------------------------------------- /initializer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | 5 | import cv2 6 | 7 | WINDOW = 'hello' 8 | 9 | 10 | class Initializer(object): 11 | def __init__(self): 12 | self.rect = [] 13 | self.capture = cv2.VideoCapture(0) 14 | 15 | def on_mouse(event, x, y, unused, user_data): 16 | if event == cv2.EVENT_LBUTTONDOWN: 17 | self.click(x, y) 18 | 19 | cv2.namedWindow(WINDOW) 20 | cv2.setMouseCallback(WINDOW, on_mouse) 21 | 22 | def click(self, x, y): 23 | self.rect.append([x, y]) 24 | 25 | def run(self): 26 | while len(self.rect) < 4: 27 | success, frame = self.capture.read() 28 | if success: 29 | cv2.imshow(WINDOW, frame) 30 | if cv2.waitKey(100) != -1: 31 | break 32 | 33 | 34 | if __name__ == '__main__': 35 | initializer = Initializer() 36 | initializer.run() 37 | with open('rect.json', 'w') as f: 38 | json.dump(initializer.rect, f) 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Guido Lorenz 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /step.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import pypm 4 | 5 | from pattern import Pattern, PatternListener 6 | 7 | LATENCY = 8 8 | 9 | 10 | class StepSequencer(object): 11 | def __init__(self, pattern=Pattern()): 12 | pypm.Initialize() 13 | self.bpm = 120 14 | self.pattern = pattern 15 | self.output = pypm.Output(pypm.GetDefaultOutputDeviceID(), LATENCY) 16 | 17 | @property 18 | def bpm(self): 19 | return 15000.0 / self._step_time 20 | 21 | @bpm.setter 22 | def bpm(self, bpm): 23 | self._step_time = 15000.0 / bpm 24 | 25 | def play(self): 26 | next_time = pypm.Time() 27 | step = -1 28 | while True: 29 | if pypm.Time() >= next_time: 30 | step = (step + 1) % 16 31 | self.trigger_step(step, next_time) 32 | if pypm.Time() - next_time > LATENCY: 33 | print 'WARNING: Inaccurate timing. Increase LATENCY.' 34 | next_time += self._step_time 35 | 36 | def trigger_step(self, step, timestamp): 37 | for track, note_on in enumerate(self.pattern.steps[step]): 38 | if note_on and not self.pattern.muted[track]: 39 | self.output.Write([[[0x90, 36 + track, 100], timestamp]]) 40 | self.output.Write([[[0x80, 36 + track], timestamp]]) 41 | 42 | 43 | if __name__ == '__main__': 44 | pattern_listener = PatternListener() 45 | pattern_listener.start() 46 | step = StepSequencer(pattern_listener.pattern) 47 | step.play() 48 | -------------------------------------------------------------------------------- /pattern.py: -------------------------------------------------------------------------------- 1 | import liblo 2 | import numpy 3 | 4 | 5 | class Pattern(object): 6 | def __init__(self, tracks=8, steps=16): 7 | self.steps = numpy.zeros((steps, tracks), bool) 8 | self.muted = numpy.zeros(tracks, bool) 9 | 10 | @property 11 | def num_tracks(self): 12 | return self.steps.shape[1] 13 | 14 | @property 15 | def num_steps(self): 16 | return self.steps.shape[0] 17 | 18 | def set_step(self, track, step): 19 | self.steps[step, track] = True 20 | 21 | def clear_step(self, track, step): 22 | self.steps[step, track] = False 23 | 24 | def mute(self, track): 25 | self.muted[track] = True 26 | 27 | def unmute(self, track): 28 | self.muted[track] = False 29 | 30 | def print_(self): 31 | for track in range(self.num_tracks): 32 | for step in range(self.num_steps): 33 | if self.steps[step, track]: 34 | print '*', 35 | else: 36 | print ' ', 37 | print 38 | 39 | 40 | class SharedPattern(Pattern): 41 | def __init__(self, address=8765): 42 | Pattern.__init__(self) 43 | self.target = liblo.Address(address) 44 | 45 | def set_step(self, track, step): 46 | if not self.steps[step, track]: 47 | liblo.send(self.target, '/pattern/set', track, step) 48 | Pattern.set_step(self, track, step) 49 | 50 | def clear_step(self, track, step): 51 | if self.steps[step, track]: 52 | liblo.send(self.target, '/pattern/clear', track, step) 53 | Pattern.clear_step(self, track, step) 54 | 55 | def mute(self, track): 56 | if not self.muted[track]: 57 | liblo.send(self.target, '/pattern/mute', track) 58 | Pattern.mute(self, track) 59 | 60 | def unmute(self, track): 61 | if self.muted[track]: 62 | liblo.send(self.target, '/pattern/unmute', track) 63 | Pattern.unmute(self, track) 64 | 65 | 66 | class PatternListener(liblo.ServerThread): 67 | def __init__(self, address=8765): 68 | liblo.ServerThread.__init__(self, address) 69 | self.pattern = Pattern() 70 | 71 | @liblo.make_method('/pattern/set', 'ii') 72 | def set_callback(self, path, args): 73 | track, step = args 74 | self.pattern.set_step(track, step) 75 | 76 | @liblo.make_method('/pattern/clear', 'ii') 77 | def clear_callback(self, path, args): 78 | track, step = args 79 | self.pattern.clear_step(track, step) 80 | 81 | @liblo.make_method('/pattern/mute', 'i') 82 | def mute_callback(self, path, track): 83 | self.pattern.mute(track) 84 | 85 | @liblo.make_method('/pattern/unmute', 'i') 86 | def unmute_callback(self, path, track): 87 | self.pattern.unmute(track) 88 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Beat Bricks 2 | 3 | A LEGO Step Sequencer made at [ADVANCE HACKATHON][1]. There's a [video][2]. 4 | 5 | [1]: http://hackathon.advance-conference.com/ 6 | [2]: https://vimeo.com/45026119 7 | 8 | 9 | ## Disclaimer 10 | 11 | This is a hackathon project. It was made during a weekend and a few evenings the week after. 12 | It's basically a proof of concept, so please don't expect it to be easy to set up or even 13 | ready for production use. You'll probably have to tweak the code a little to adapt it to the 14 | camera you're using and the lighting conditions. Even if you know what you're doing, it takes 15 | some time to get it running. I still think that you can have some fun playing with it, like I 16 | had creating it. 17 | 18 | 19 | ## How It Works 20 | 21 | In a video image of the LEGO plate, every potential brick position is evaluated for its color. 22 | Depending on the detected color, a note in the step sequencer pattern is added or removed. These 23 | changes to the step sequencer pattern are published as OSC messages. The actual sequencer is a 24 | separate process that listens to these OSC messages, recreates the pattern from them and sends 25 | out MIDI notes for the pattern at 120 bpm. 26 | 27 | In case you're wondering: I took the detour through OSC and a separate process for latency 28 | reasons. In the separate process the MIDI messages can be triggered with much lower latency than 29 | in a "green" Python thread. 30 | 31 | 32 | ## What You Need 33 | 34 | 35 | 36 | * A 32x32 pin LEGO plate 37 | * Some 2x2 LEGO bricks 38 | * A webcam (I used a Logitech C270) 39 | * Adhesive tape, preferrably double-faced 40 | * Some basic hacking skills 41 | 42 | 43 | 44 | * A MIDI loopback device (IAC, MIDI Yoke etc.) 45 | * A MIDI-based sound generator (Ableton Live, pure data, Rosegarden etc.) 46 | 47 | 48 | 49 | * Python 50 | * OpenCV Python module 51 | * pyliblo 52 | * pyPortMidi 53 | 54 | 55 | ## How To Set It Up 56 | 57 | 1. Tape the webcam to a wall cupboard in your kitchen. 58 | 2. Tape the LEGO plate to the countertop below the webcam. 59 | 3. Make sure that the plate is well-lit (bright, from above, without shadows). 60 | 4. Connect the webcam to your laptop. 61 | 5. Run `python initializer.py` to mark the position of the LEGO plate in the webcam image: 62 | In the window that appears, click the four corners of the plate counterclockwise. 63 | Start with the corner that is on the lower left when you're standing in front of it. 64 | That's not necessarily the lower left corner in the image, since it might be rotated. 65 | After the fourth click, the program should end and create a file called `rect.json`. 66 | 6. Run `python step.py`. This is the actual sequencer that listens to the OSC messages and 67 | sends out MIDI notes. 68 | 7. Run `python lego.py` in a separate terminal. This is the program that detects bricks in 69 | the camera image and creates an OSC message for every change. In the window that opens, 70 | you should see a square picture of the LEGO plate. 71 | 72 | 73 | ## Troubleshooting 74 | 75 | * Instead of running `python step.py` you can run `oscdump 8765` (it comes with pyliblo) at 76 | first. If you don't see it printing messages like `/pattern/set 1 4` when putting a brick 77 | on the plate, the brick detection in `lego.py` isn't working correctly. If you see these 78 | messages, but don't hear any sound, there's a problem with you MIDI setup and sound device. 79 | -------------------------------------------------------------------------------- /lego.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | 5 | import cv2 6 | import numpy 7 | 8 | from pattern import SharedPattern 9 | 10 | WINDOW = 'beat it' 11 | CELL_SIZE = 16 12 | GRID_SIZE = 16 * CELL_SIZE 13 | 14 | 15 | def cell_start_end(id): 16 | start = id * CELL_SIZE + CELL_SIZE / 4 17 | end = start + CELL_SIZE / 2 18 | return start, end 19 | 20 | 21 | def average_cell_color_hsv(img, y, x): 22 | y_start, y_end = cell_start_end(y) 23 | x_start, x_end = cell_start_end(x) 24 | cell = img[ 25 | y_start:y_end, 26 | x_start:x_end, 27 | :] 28 | return bgr2hsv(numpy.average(numpy.average(cell, axis=0), axis=0)) 29 | 30 | 31 | def is_note_color_hsv(color): 32 | h, s, v = color 33 | return ( 34 | (-0.3 < h < 0.1 and s > 0.6 and v > 200) or # red brick 35 | (0.8 < h < 1.2 and s > 0.3 and v > 220) or # yellow brick 36 | (3.2 < h < 3.6 and s > 0.9 and v > 180) or # blue brick 37 | (s < 0.1 and v > 250)) # white brick 38 | 39 | 40 | def is_clear_color_hsv(color): 41 | h, s, v = color 42 | return 2.5 < h < 2.9 and s > 0.7 and v > 100 43 | 44 | 45 | def bgr2hsv(color): 46 | b, g, r = color 47 | v = max(b, g, r) 48 | m = min(b, g, r) 49 | if v > 0: 50 | s = (v - m) / v 51 | else: 52 | s = 0 53 | if v == r: 54 | h = (g - b) / (v - m) 55 | elif v == g: 56 | h = 2 + (b - r) / (v - m) 57 | else: 58 | h = 4 + (r - g) / (v - m) 59 | return (h, s, v) 60 | 61 | 62 | class LegoPatternDetector(object): 63 | def __init__(self): 64 | self.homography = self.compute_homography() 65 | self.pattern = SharedPattern() 66 | 67 | def compute_homography(self): 68 | src_points = json.load(open('rect.json')) 69 | dst_points = [ 70 | [0, 0], 71 | [GRID_SIZE, 0], 72 | [GRID_SIZE, GRID_SIZE], 73 | [0, GRID_SIZE]] 74 | return cv2.findHomography( 75 | numpy.asarray(src_points, float), 76 | numpy.asarray(dst_points, float))[0] 77 | 78 | def process_image(self, img): 79 | img = cv2.warpPerspective(img, self.homography, (GRID_SIZE, GRID_SIZE)) 80 | self.update_notes(img) 81 | self.mute_tracks(img) 82 | return img 83 | 84 | def update_notes(self, img): 85 | for track in range(self.pattern.num_tracks): 86 | for step in range(self.pattern.num_steps): 87 | color = average_cell_color_hsv(img, track, step) 88 | if is_clear_color_hsv(color): 89 | self.pattern.clear_step(track, step) 90 | elif is_note_color_hsv(color): 91 | self.pattern.set_step(track, step) 92 | 93 | def mute_tracks(self, img): 94 | for track in range(self.pattern.num_tracks): 95 | color = average_cell_color_hsv(img, track + 8, 0) 96 | if is_clear_color_hsv(color): 97 | self.pattern.unmute(track) 98 | else: 99 | self.pattern.mute(track) 100 | 101 | 102 | if __name__ == '__main__': 103 | capture = cv2.VideoCapture(0) 104 | cv2.namedWindow(WINDOW) 105 | pattern_detector = LegoPatternDetector() 106 | 107 | while True: 108 | success, frame = capture.read() 109 | if success: 110 | img = pattern_detector.process_image(frame) 111 | cv2.imshow(WINDOW, img) 112 | if cv2.waitKey(1) == 27: 113 | break 114 | --------------------------------------------------------------------------------