├── .github
└── FUNDING.yml
├── OpenCVAnim.py
├── LICENSE
├── Test.py
├── README.md
└── OpenCVAnimOperator.py
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: joevenner
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: joevenner
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
13 |
--------------------------------------------------------------------------------
/OpenCVAnim.py:
--------------------------------------------------------------------------------
1 | import bpy
2 |
3 | class OBJECT_MT_OpenCVPanel(bpy.types.WorkSpaceTool):
4 | """Creates a Panel in the Object properties window"""
5 | bl_label = "OpenCV Animation"
6 | bl_space_type = 'VIEW_3D'
7 | bl_context_mode='OBJECT'
8 | bl_idname = "ui_plus.opencv"
9 | bl_options = {'REGISTER'}
10 | bl_icon = "ops.generic.select_circle"
11 |
12 | def draw_settings(context, layout, tool):
13 |
14 | row = layout.row()
15 | op = row.operator("wm.opencv_operator", text="Capture", icon="OUTLINER_OB_CAMERA")
16 |
17 | def register():
18 | bpy.utils.register_tool(OBJECT_MT_OpenCVPanel, separator=True, group=True)
19 |
20 | def unregister():
21 | bpy.utils.unregister_tool(OBJECT_MT_OpenCVPanel)
22 |
23 | if __name__ == "__main__":
24 | register()
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 JoeVenner
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2 as cv
3 | import time
4 |
5 | # OpenCV Facial Capture Test
6 | landmark_model_path = "C:\\Users\\Joe\\Documents\\AnimationUsingPython\\data\\lbfmodel.yaml"
7 | _cap = cv.VideoCapture(0)
8 | _cap.set(cv.CAP_PROP_FRAME_WIDTH, 512)
9 | _cap.set(cv.CAP_PROP_FRAME_HEIGHT, 512)
10 | _cap.set(cv.CAP_PROP_BUFFERSIZE, 1)
11 | time.sleep(0.5)
12 |
13 | facemark = cv.face.createFacemarkLBF()
14 |
15 | # error detection
16 | try:
17 | # Download the trained model lbfmodel.yaml:
18 | # https://github.com/kurnianggoro/GSOC2017/tree/master/data
19 | # and update this path to the file:
20 | facemark.loadModel(landmark_model_path)
21 | except cv.error:
22 | print("Model not found")
23 |
24 | cascade = cv.CascadeClassifier(cv.data.haarcascades + "haarcascade_frontalface_alt.xml")
25 | if cascade.empty() :
26 | print("cascade not found")
27 | exit()
28 |
29 | print("Press ESC to stop")
30 |
31 | # finite loop
32 | while True:
33 | _, frame = _cap.read()
34 |
35 | faces = cascade.detectMultiScale(frame, 1.05, 6, cv.CASCADE_SCALE_IMAGE, (130, 130))
36 |
37 | #find biggest face, and only keep it
38 | if(type(faces) is np.ndarray and faces.size > 0):
39 | biggestFace = np.zeros(shape=(1,4))
40 | for face in faces:
41 | if face[2] > biggestFace[0][2]:
42 | biggestFace[0] = face
43 |
44 | # find landmarks
45 | ok, landmarks = facemark.fit(frame, faces=biggestFace)
46 |
47 | # draw landmarks
48 | for marks in landmarks:
49 | for (x, y) in marks[0]:
50 | cv.circle(frame, (x, y), 2, (0, 255, 255), -1)
51 |
52 | # draw detected face
53 | for (x,y,w,h) in faces:
54 | cv.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 1)
55 |
56 | for i,(x,y,w,h) in enumerate(faces):
57 | cv.putText(frame, "Face #{}".format(i), (x - 10, y - 10),
58 | cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
59 |
60 | cv.imshow("Image Landmarks", frame)
61 | if(cv.waitKey(1) == 27):
62 | exit()
63 |
64 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | ## - Demo
20 |
21 |
22 |
23 |
24 |
25 |
26 | ## - About :
27 | it's a python script to control a 3d character's face movements by capturing user's face landmarks
28 |
29 | ## - Install prerequisites :
30 |
31 | 1. Download Blender 2.82 [HERE](https://download.blender.org/release/Blender2.82/blender-2.82-windows64.msi)
32 | 2. Install Requirement Modules
33 | Windows : Open Command Prompt as Administrator
34 |
` cd "C:\Program Files\Blender Foundation\Blender 2.82\2.82\python\bin"`
35 |
`python -m pip install --upgrade pip `
36 |
`python -m pip install opencv-contrib-python numpy `
37 | 3. Download Character file [HERE](https://cloud.blender.org/p/characters/5718a967c379cf04929a4247)
38 | 4. Download Project Repo [HERE](https://github.com/joeVenner/control-3d-character-using-python/archive/master.zip)
39 | 5. Download Face Landmarks Model [HERE](https://github.com/kurnianggoro/GSOC2017/archive/master.zip)
40 |
41 |
42 | ## - Youtube Video :
43 | in this tuto i explained how the script works and i will walk you through the Installation process
44 | Check it Out
Link : https://youtu.be/tEmdLULBUTQ
45 |
46 |
47 | ## - License :
48 | Please see the **[LICENSE](LICENSE)** included in this repository for a full copy of the MIT license, which this project is licensed under.
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/OpenCVAnimOperator.py:
--------------------------------------------------------------------------------
1 | import bpy
2 | import cv2
3 | import time
4 | import numpy
5 |
6 |
7 | # Download trained model (lbfmodel.yaml)
8 | # https://github.com/kurnianggoro/GSOC2017/tree/master/data
9 |
10 | # Install prerequisites:
11 |
12 | # Linux: (may vary between distro's and installation methods)
13 | # This is for manjaro with Blender installed from the package manager
14 | # python3 -m ensurepip
15 | # python3 -m pip install --upgrade pip --user
16 | # python3 -m pip install opencv-contrib-python numpy --user
17 |
18 | # MacOS
19 | # open the Terminal
20 | # cd /Applications/Blender.app/Contents/Resources/2.81/python/bin
21 | # ./python3.7m -m ensurepip
22 | # ./python3.7m -m pip install --upgrade pip --user
23 | # ./python3.7m -m pip install opencv-contrib-python numpy --user
24 |
25 | # Windows:
26 | # Open Command Prompt as Administrator
27 | # cd "C:\Program Files\Blender Foundation\Blender 2.82\2.82\python\bin"
28 | # python -m pip install --upgrade pip
29 | # python -m pip install opencv-contrib-python numpy
30 |
31 | class OpenCVAnimOperator(bpy.types.Operator):
32 | """Operator which runs its self from a timer"""
33 | bl_idname = "wm.opencv_operator"
34 | bl_label = "OpenCV Animation Operator"
35 |
36 | # Set paths to trained models downloaded above
37 | face_detect_path = cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
38 | #landmark_model_path = "./data/lbfmodel.yaml" #Linux
39 | #landmark_model_path = "./data/lbfmodel.yaml" #Mac
40 | landmark_model_path = "C:\\Users\\Joe\\Documents\\AnimationUsingPython\\data\\lbfmodel.yaml" #Windows
41 |
42 | # Load models
43 | fm = cv2.face.createFacemarkLBF()
44 | fm.loadModel(landmark_model_path)
45 | cas = cv2.CascadeClassifier(face_detect_path)
46 |
47 | _timer = None
48 | _cap = None
49 | stop = False
50 |
51 |
52 | # Webcam resolution:
53 | width = 640
54 | height = 480
55 |
56 | # 3D model points.
57 | model_points = numpy.array([
58 | (0.0, 0.0, 0.0), # Nose tip
59 | (0.0, -330.0, -65.0), # Chin
60 | (-225.0, 170.0, -135.0), # Left eye left corner
61 | (225.0, 170.0, -135.0), # Right eye right corne
62 | (-150.0, -150.0, -125.0), # Left Mouth corner
63 | (150.0, -150.0, -125.0) # Right mouth corner
64 | ], dtype = numpy.float32)
65 | # Camera internals
66 | camera_matrix = numpy.array(
67 | [[height, 0.0, width/2],
68 | [0.0, height, height/2],
69 | [0.0, 0.0, 1.0]], dtype = numpy.float32
70 | )
71 |
72 | # Keeps a moving average of given length
73 | def smooth_value(self, name, length, value):
74 | if not hasattr(self, 'smooth'):
75 | self.smooth = {}
76 | if not name in self.smooth:
77 | self.smooth[name] = numpy.array([value])
78 | else:
79 | self.smooth[name] = numpy.insert(arr=self.smooth[name], obj=0, values=value)
80 | if self.smooth[name].size > length:
81 | self.smooth[name] = numpy.delete(self.smooth[name], self.smooth[name].size-1, 0)
82 | sum = 0
83 | for val in self.smooth[name]:
84 | sum += val
85 | return sum / self.smooth[name].size
86 |
87 | # Keeps min and max values, then returns the value in a range 0 - 1
88 | def get_range(self, name, value):
89 | if not hasattr(self, 'range'):
90 | self.range = {}
91 | if not name in self.range:
92 | self.range[name] = numpy.array([value, value])
93 | else:
94 | self.range[name] = numpy.array([min(value, self.range[name][0]), max(value, self.range[name][1])] )
95 | val_range = self.range[name][1] - self.range[name][0]
96 | if val_range != 0:
97 | return (value - self.range[name][0]) / val_range
98 | else:
99 | return 0.0
100 |
101 | # The main "loop"
102 | def modal(self, context, event):
103 |
104 | if (event.type in {'RIGHTMOUSE', 'ESC'}) or self.stop == True:
105 | self.cancel(context)
106 | return {'CANCELLED'}
107 |
108 | if event.type == 'TIMER':
109 | self.init_camera()
110 | _, image = self._cap.read()
111 | #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
112 | #gray = cv2.equalizeHist(gray)
113 |
114 | # find faces
115 | faces = self.cas.detectMultiScale(image,
116 | scaleFactor=1.05,
117 | minNeighbors=3,
118 | flags=cv2.CASCADE_SCALE_IMAGE,
119 | minSize=(int(self.width/5), int(self.width/5)))
120 |
121 | #find biggest face, and only keep it
122 | if type(faces) is numpy.ndarray and faces.size > 0:
123 | biggestFace = numpy.zeros(shape=(1,4))
124 | for face in faces:
125 | if face[2] > biggestFace[0][2]:
126 | print(face)
127 | biggestFace[0] = face
128 |
129 | # find the landmarks.
130 | _, landmarks = self.fm.fit(image, faces=biggestFace)
131 | for mark in landmarks:
132 | shape = mark[0]
133 |
134 | #2D image points. If you change the image, you need to change vector
135 | image_points = numpy.array([shape[30], # Nose tip - 31
136 | shape[8], # Chin - 9
137 | shape[36], # Left eye left corner - 37
138 | shape[45], # Right eye right corne - 46
139 | shape[48], # Left Mouth corner - 49
140 | shape[54] # Right mouth corner - 55
141 | ], dtype = numpy.float32)
142 |
143 | dist_coeffs = numpy.zeros((4,1)) # Assuming no lens distortion
144 |
145 | # determine head rotation
146 | if hasattr(self, 'rotation_vector'):
147 | (success, self.rotation_vector, self.translation_vector) = cv2.solvePnP(self.model_points,
148 | image_points, self.camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE,
149 | rvec=self.rotation_vector, tvec=self.translation_vector,
150 | useExtrinsicGuess=True)
151 | else:
152 | (success, self.rotation_vector, self.translation_vector) = cv2.solvePnP(self.model_points,
153 | image_points, self.camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE,
154 | useExtrinsicGuess=False)
155 |
156 | if not hasattr(self, 'first_angle'):
157 | self.first_angle = numpy.copy(self.rotation_vector)
158 |
159 | # set bone rotation/positions
160 | bones = bpy.data.objects["RIG-Vincent"].pose.bones
161 |
162 | # head rotation
163 | bones["head_fk"].rotation_euler[0] = self.smooth_value("h_x", 5, (self.rotation_vector[0] - self.first_angle[0])) / 1 # Up/Down
164 | bones["head_fk"].rotation_euler[2] = self.smooth_value("h_y", 5, -(self.rotation_vector[1] - self.first_angle[1])) / 1.5 # Rotate
165 | bones["head_fk"].rotation_euler[1] = self.smooth_value("h_z", 5, (self.rotation_vector[2] - self.first_angle[2])) / 1.3 # Left/Right
166 |
167 | bones["head_fk"].keyframe_insert(data_path="rotation_euler", index=-1)
168 |
169 | # mouth position
170 | bones["mouth_ctrl"].location[2] = self.smooth_value("m_h", 2, -self.get_range("mouth_height", numpy.linalg.norm(shape[62] - shape[66])) * 0.06 )
171 | bones["mouth_ctrl"].location[0] = self.smooth_value("m_w", 2, (self.get_range("mouth_width", numpy.linalg.norm(shape[54] - shape[48])) - 0.5) * -0.04)
172 |
173 | bones["mouth_ctrl"].keyframe_insert(data_path="location", index=-1)
174 |
175 | #eyebrows
176 | bones["brow_ctrl_L"].location[2] = self.smooth_value("b_l", 3, (self.get_range("brow_left", numpy.linalg.norm(shape[19] - shape[27])) -0.5) * 0.04)
177 | bones["brow_ctrl_R"].location[2] = self.smooth_value("b_r", 3, (self.get_range("brow_right", numpy.linalg.norm(shape[24] - shape[27])) -0.5) * 0.04)
178 |
179 | bones["brow_ctrl_L"].keyframe_insert(data_path="location", index=2)
180 | bones["brow_ctrl_R"].keyframe_insert(data_path="location", index=2)
181 |
182 | # eyelids
183 | l_open = self.smooth_value("e_l", 2, self.get_range("l_open", -numpy.linalg.norm(shape[48] - shape[44])) )
184 | r_open = self.smooth_value("e_r", 2, self.get_range("r_open", -numpy.linalg.norm(shape[41] - shape[39])) )
185 | eyes_open = (l_open + r_open) / 2.0 # looks weird if both eyes aren't the same...
186 | bones["eyelid_up_ctrl_R"].location[2] = -eyes_open * 0.025 + 0.005
187 | bones["eyelid_low_ctrl_R"].location[2] = eyes_open * 0.025 - 0.005
188 | bones["eyelid_up_ctrl_L"].location[2] = -eyes_open * 0.025 + 0.005
189 | bones["eyelid_low_ctrl_L"].location[2] = eyes_open * 0.025 - 0.005
190 |
191 | bones["eyelid_up_ctrl_R"].keyframe_insert(data_path="location", index=2)
192 | bones["eyelid_low_ctrl_R"].keyframe_insert(data_path="location", index=2)
193 | bones["eyelid_up_ctrl_L"].keyframe_insert(data_path="location", index=2)
194 | bones["eyelid_low_ctrl_L"].keyframe_insert(data_path="location", index=2)
195 |
196 | # draw face markers
197 | for (x, y) in shape:
198 | cv2.circle(image, (int(x), int(y)), 2, (0, 255, 255), -1)
199 |
200 | # draw detected face
201 | for (x,y,w,h) in faces:
202 | cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),1)
203 |
204 | # Show camera image in a window
205 | cv2.imshow("Output", image)
206 | cv2.waitKey(1)
207 |
208 | return {'PASS_THROUGH'}
209 |
210 | def init_camera(self):
211 | if self._cap == None:
212 | self._cap = cv2.VideoCapture(0)
213 | self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
214 | self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
215 | self._cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
216 | time.sleep(1.0)
217 |
218 | def stop_playback(self, scene):
219 | print(format(scene.frame_current) + " / " + format(scene.frame_end))
220 | if scene.frame_current == scene.frame_end:
221 | bpy.ops.screen.animation_cancel(restore_frame=False)
222 |
223 | def execute(self, context):
224 | bpy.app.handlers.frame_change_pre.append(self.stop_playback)
225 |
226 | wm = context.window_manager
227 | self._timer = wm.event_timer_add(0.01, window=context.window)
228 | wm.modal_handler_add(self)
229 | return {'RUNNING_MODAL'}
230 |
231 | def cancel(self, context):
232 | wm = context.window_manager
233 | wm.event_timer_remove(self._timer)
234 | cv2.destroyAllWindows()
235 | self._cap.release()
236 | self._cap = None
237 |
238 | def register():
239 | bpy.utils.register_class(OpenCVAnimOperator)
240 |
241 | def unregister():
242 | bpy.utils.unregister_class(OpenCVAnimOperator)
243 |
244 | if __name__ == "__main__":
245 | register()
246 |
247 | # test call
248 | #bpy.ops.wm.opencv_operator()
249 |
250 |
251 |
252 |
--------------------------------------------------------------------------------