├── .gitignore
├── FaceSwap
├── FaceRendering.py
├── FaceRendering.pyc
├── FaceSwap.pyproj
├── ImageProcessing.py
├── ImageProcessing.pyc
├── NonLinearLeastSquares.py
├── NonLinearLeastSquares.pyc
├── drawing.py
├── drawing.pyc
├── models.py
├── models.pyc
├── utils.py
├── utils.pyc
├── zad1.py
└── zad2.py
├── LICENSE
├── README.md
├── candide.npz
├── data
├── brad pitt.jpg
├── einstein.jpg
├── equation.png
├── eye.png
├── hand.png
├── jolie.jpg
└── mask.png
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 |
4 | # User-specific files
5 | *.suo
6 | *.user
7 | *.userosscache
8 | *.sln.docstates
9 |
10 | # User-specific files (MonoDevelop/Xamarin Studio)
11 | *.userprefs
12 |
13 | # Build results
14 | [Dd]ebug/
15 | [Dd]ebugPublic/
16 | [Rr]elease/
17 | [Rr]eleases/
18 | x64/
19 | x86/
20 | bld/
21 | [Bb]in/
22 | [Oo]bj/
23 | [Ll]og/
24 |
25 | # Visual Studio 2015 cache/options directory
26 | .vs/
27 | # Uncomment if you have tasks that create the project's static files in wwwroot
28 | #wwwroot/
29 |
30 | # MSTest test Results
31 | [Tt]est[Rr]esult*/
32 | [Bb]uild[Ll]og.*
33 |
34 | # NUNIT
35 | *.VisualState.xml
36 | TestResult.xml
37 |
38 | # Build Results of an ATL Project
39 | [Dd]ebugPS/
40 | [Rr]eleasePS/
41 | dlldata.c
42 |
43 | # DNX
44 | project.lock.json
45 | artifacts/
46 |
47 | *_i.c
48 | *_p.c
49 | *_i.h
50 | *.ilk
51 | *.meta
52 | *.obj
53 | *.pch
54 | *.pdb
55 | *.pgc
56 | *.pgd
57 | *.rsp
58 | *.sbr
59 | *.tlb
60 | *.tli
61 | *.tlh
62 | *.tmp
63 | *.tmp_proj
64 | *.log
65 | *.vspscc
66 | *.vssscc
67 | .builds
68 | *.pidb
69 | *.svclog
70 | *.scc
71 |
72 | # Chutzpah Test files
73 | _Chutzpah*
74 |
75 | # Visual C++ cache files
76 | ipch/
77 | *.aps
78 | *.ncb
79 | *.opendb
80 | *.opensdf
81 | *.sdf
82 | *.cachefile
83 | *.VC.db
84 | *.VC.VC.opendb
85 |
86 | # Visual Studio profiler
87 | *.psess
88 | *.vsp
89 | *.vspx
90 | *.sap
91 |
92 | # TFS 2012 Local Workspace
93 | $tf/
94 |
95 | # Guidance Automation Toolkit
96 | *.gpState
97 |
98 | # ReSharper is a .NET coding add-in
99 | _ReSharper*/
100 | *.[Rr]e[Ss]harper
101 | *.DotSettings.user
102 |
103 | # JustCode is a .NET coding add-in
104 | .JustCode
105 |
106 | # TeamCity is a build add-in
107 | _TeamCity*
108 |
109 | # DotCover is a Code Coverage Tool
110 | *.dotCover
111 |
112 | # NCrunch
113 | _NCrunch_*
114 | .*crunch*.local.xml
115 | nCrunchTemp_*
116 |
117 | # MightyMoose
118 | *.mm.*
119 | AutoTest.Net/
120 |
121 | # Web workbench (sass)
122 | .sass-cache/
123 |
124 | # Installshield output folder
125 | [Ee]xpress/
126 |
127 | # DocProject is a documentation generator add-in
128 | DocProject/buildhelp/
129 | DocProject/Help/*.HxT
130 | DocProject/Help/*.HxC
131 | DocProject/Help/*.hhc
132 | DocProject/Help/*.hhk
133 | DocProject/Help/*.hhp
134 | DocProject/Help/Html2
135 | DocProject/Help/html
136 |
137 | # Click-Once directory
138 | publish/
139 |
140 | # Publish Web Output
141 | *.[Pp]ublish.xml
142 | *.azurePubxml
143 | # TODO: Comment the next line if you want to checkin your web deploy settings
144 | # but database connection strings (with potential passwords) will be unencrypted
145 | *.pubxml
146 | *.publishproj
147 |
148 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
149 | # checkin your Azure Web App publish settings, but sensitive information contained
150 | # in these scripts will be unencrypted
151 | PublishScripts/
152 |
153 | # NuGet Packages
154 | *.nupkg
155 | # The packages folder can be ignored because of Package Restore
156 | **/packages/*
157 | # except build/, which is used as an MSBuild target.
158 | !**/packages/build/
159 | # Uncomment if necessary however generally it will be regenerated when needed
160 | #!**/packages/repositories.config
161 | # NuGet v3's project.json files produces more ignoreable files
162 | *.nuget.props
163 | *.nuget.targets
164 |
165 | # Microsoft Azure Build Output
166 | csx/
167 | *.build.csdef
168 |
169 | # Microsoft Azure Emulator
170 | ecf/
171 | rcf/
172 |
173 | # Windows Store app package directories and files
174 | AppPackages/
175 | BundleArtifacts/
176 | Package.StoreAssociation.xml
177 | _pkginfo.txt
178 |
179 | # Visual Studio cache files
180 | # files ending in .cache can be ignored
181 | *.[Cc]ache
182 | # but keep track of directories ending in .cache
183 | !*.[Cc]ache/
184 |
185 | # Others
186 | ClientBin/
187 | ~$*
188 | *~
189 | *.dbmdl
190 | *.dbproj.schemaview
191 | *.pfx
192 | *.publishsettings
193 | node_modules/
194 | orleans.codegen.cs
195 |
196 | # Since there are multiple workflows, uncomment next line to ignore bower_components
197 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
198 | #bower_components/
199 |
200 | # RIA/Silverlight projects
201 | Generated_Code/
202 |
203 | # Backup & report files from converting an old project file
204 | # to a newer Visual Studio version. Backup files are not needed,
205 | # because we have git ;-)
206 | _UpgradeReport_Files/
207 | Backup*/
208 | UpgradeLog*.XML
209 | UpgradeLog*.htm
210 |
211 | # SQL Server files
212 | *.mdf
213 | *.ldf
214 |
215 | # Business Intelligence projects
216 | *.rdl.data
217 | *.bim.layout
218 | *.bim_*.settings
219 |
220 | # Microsoft Fakes
221 | FakesAssemblies/
222 |
223 | # GhostDoc plugin setting file
224 | *.GhostDoc.xml
225 |
226 | # Node.js Tools for Visual Studio
227 | .ntvs_analysis.dat
228 |
229 | # Visual Studio 6 build log
230 | *.plg
231 |
232 | # Visual Studio 6 workspace options file
233 | *.opt
234 |
235 | # Visual Studio LightSwitch build output
236 | **/*.HTMLClient/GeneratedArtifacts
237 | **/*.DesktopClient/GeneratedArtifacts
238 | **/*.DesktopClient/ModelManifest.xml
239 | **/*.Server/GeneratedArtifacts
240 | **/*.Server/ModelManifest.xml
241 | _Pvt_Extensions
242 |
243 | # Paket dependency manager
244 | .paket/paket.exe
245 | paket-files/
246 |
247 | # FAKE - F# Make
248 | .fake/
249 |
250 | # JetBrains Rider
251 | .idea/
252 | *.sln.iml
253 |
--------------------------------------------------------------------------------
/FaceSwap/FaceRendering.py:
--------------------------------------------------------------------------------
1 | import pygame
2 | from pygame.locals import *
3 |
4 | from OpenGL.GL import *
5 | from OpenGL.GLU import *
6 |
7 | import numpy as np
8 |
9 |
10 | def setOrtho(w, h):
11 | glMatrixMode(GL_PROJECTION)
12 | glLoadIdentity()
13 | glOrtho(0, w, h, 0, -1000, 1000)
14 | glMatrixMode(GL_MODELVIEW)
15 |
16 | def addTexture(img):
17 | textureId = glGenTextures(1)
18 | glBindTexture(GL_TEXTURE_2D, textureId)
19 | glPixelStorei(GL_UNPACK_ALIGNMENT,1)
20 | glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.shape[1], img.shape[0], 0, GL_BGR, GL_UNSIGNED_BYTE, img)
21 |
22 | glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
23 | glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
24 | glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
25 |
26 | return textureId
27 |
28 | class FaceRenderer:
29 | def __init__(self, targetImg, textureImg, textureCoords, mesh):
30 | self.h = targetImg.shape[0]
31 | self.w = targetImg.shape[1]
32 |
33 | pygame.init()
34 | pygame.display.set_mode((self.w, self.h), DOUBLEBUF|OPENGL)
35 | setOrtho(self.w, self.h)
36 |
37 | glEnable(GL_DEPTH_TEST)
38 | glEnable(GL_TEXTURE_2D)
39 |
40 | self.textureCoords = textureCoords
41 | self.textureCoords[0, :] /= textureImg.shape[1]
42 | self.textureCoords[1, :] /= textureImg.shape[0]
43 |
44 | self.faceTexture = addTexture(textureImg)
45 | self.renderTexture = addTexture(targetImg)
46 |
47 | self.mesh = mesh
48 |
49 | def drawFace(self, vertices):
50 | glBindTexture(GL_TEXTURE_2D, self.faceTexture)
51 |
52 | glBegin(GL_TRIANGLES)
53 | for triangle in self.mesh:
54 | for vertex in triangle:
55 | glTexCoord2fv(self.textureCoords[:, vertex])
56 | glVertex3fv(vertices[:, vertex])
57 |
58 | glEnd()
59 |
60 | def render(self, vertices):
61 | glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
62 | self.drawFace(vertices)
63 |
64 | data = glReadPixels(0, 0, self.w, self.h, GL_BGR, GL_UNSIGNED_BYTE)
65 | renderedImg = np.fromstring(data, dtype=np.uint8)
66 | renderedImg = renderedImg.reshape((self.h, self.w, 3))
67 | for i in range(renderedImg.shape[2]):
68 | renderedImg[:, :, i] = np.flipud(renderedImg[:, :, i])
69 |
70 | pygame.display.flip()
71 | return renderedImg
72 |
--------------------------------------------------------------------------------
/FaceSwap/FaceRendering.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/FaceSwap/FaceRendering.pyc
--------------------------------------------------------------------------------
/FaceSwap/FaceSwap.pyproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Debug
5 | 2.0
6 | 860b1202-bec1-4b56-8eee-7c5943ce2f82
7 | .
8 | zad2.py
9 |
10 |
11 | .
12 | .
13 | Laboratorium 2
14 | Laboratorium 2
15 | Standard Python launcher
16 |
17 |
18 |
19 |
20 | False
21 |
22 |
23 | true
24 | false
25 |
26 |
27 | true
28 | false
29 |
30 |
31 | 10.0
32 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\Python Tools\Microsoft.PythonTools.targets
33 |
34 |
35 |
36 |
37 | Code
38 |
39 |
40 |
41 | Code
42 |
43 |
44 | Code
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/FaceSwap/ImageProcessing.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 |
4 | #tutaj src to obraz, z ktorego piksele beda wklejane do obrazu dst
5 | #feather amount to procent, sluzacy do sterowania wielkoscia obszaru, ktory bedzie poddany wagowaniu
6 | def blendImages(src, dst, mask, featherAmount=0.2):
7 | #indeksy nie czarnych pikseli maski
8 | maskIndices = np.where(mask != 0)
9 | #te same indeksy tylko, ze teraz w jednej macierzy, gdzie kazdy wiersz to jeden piksel (x, y)
10 | maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis]))
11 | faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0)
12 | featherAmount = featherAmount * np.max(faceSize)
13 |
14 | hull = cv2.convexHull(maskPts)
15 | dists = np.zeros(maskPts.shape[0])
16 | for i in range(maskPts.shape[0]):
17 | dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True)
18 |
19 | weights = np.clip(dists / featherAmount, 0, 1)
20 |
21 | composedImg = np.copy(dst)
22 | composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]]
23 |
24 | return composedImg
25 |
26 | #uwaga, tutaj src to obraz, z ktorego brany bedzie kolor
27 | def colorTransfer(src, dst, mask):
28 | transferredDst = np.copy(dst)
29 | #indeksy nie czarnych pikseli maski
30 | maskIndices = np.where(mask != 0)
31 | #src[maskIndices[0], maskIndices[1]] zwraca piksele w nie czarnym obszarze maski
32 |
33 | maskedSrc = src[maskIndices[0], maskIndices[1]].astype(np.int32)
34 | maskedDst = dst[maskIndices[0], maskIndices[1]].astype(np.int32)
35 |
36 | meanSrc = np.mean(maskedSrc, axis=0)
37 | meanDst = np.mean(maskedDst, axis=0)
38 |
39 | maskedDst = maskedDst - meanDst
40 | maskedDst = maskedDst + meanSrc
41 | maskedDst = np.clip(maskedDst, 0, 255)
42 |
43 | transferredDst[maskIndices[0], maskIndices[1]] = maskedDst
44 |
45 | return transferredDst
46 |
47 |
--------------------------------------------------------------------------------
/FaceSwap/ImageProcessing.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/FaceSwap/ImageProcessing.pyc
--------------------------------------------------------------------------------
/FaceSwap/NonLinearLeastSquares.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy import optimize
3 |
4 | def LineSearchFun(alpha, x, d, fun, args):
5 | r = fun(x + alpha * d, *args)
6 | return np.sum(r**2)
7 |
8 | def GaussNewton(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
9 | x = np.array(x0, dtype=np.float64)
10 |
11 | oldCost = -1
12 | for i in range(maxIter):
13 | r = fun(x, *args)
14 | cost = np.sum(r**2)
15 |
16 | if verbose > 0:
17 | print("Cost at iteration " + str(i) + ": " + str(cost))
18 |
19 | if (cost < eps or abs(cost - oldCost) < eps):
20 | break
21 | oldCost = cost
22 |
23 | J = funJack(x, *args)
24 | grad = np.dot(J.T, r)
25 | H = np.dot(J.T, J)
26 | direction = np.linalg.solve(H, grad)
27 |
28 | #optymalizacja dlugosci kroku
29 | lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
30 | #dlugosc kroku
31 | alpha = lineSearchRes["x"]
32 |
33 | x = x + alpha * direction
34 |
35 | if verbose > 0:
36 | print("Gauss Netwon finished after " + str(i + 1) + " iterations")
37 | r = fun(x, *args)
38 | cost = np.sum(r**2)
39 | print("cost = " + str(cost))
40 | print("x = " + str(x))
41 |
42 | return x
43 |
44 | def SteepestDescent(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
45 | x = np.array(x0, dtype=np.float64)
46 |
47 | oldCost = -1
48 | for i in range(maxIter):
49 | r = fun(x, *args)
50 | cost = np.sum(r**2)
51 |
52 | if verbose > 0:
53 | print("Cost at iteration " + str(i) + ": " + str(cost))
54 |
55 | #warunki stopu
56 | if (cost < eps or abs(cost - oldCost) < eps):
57 | break
58 | oldCost = cost
59 |
60 | J = funJack(x, *args)
61 | grad = 2 * np.dot(J.T, r)
62 | direction = grad
63 |
64 | #optymalizacja dlugosci kroku
65 | lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
66 | #dlugosc kroku
67 | alpha = lineSearchRes["x"]
68 |
69 | x = x + alpha * direction
70 |
71 | if verbose > 0:
72 | print("Steepest Descent finished after " + str(i + 1) + " iterations")
73 | r = fun(x, *args)
74 | cost = np.sum(r**2)
75 | print("cost = " + str(cost))
76 | print("x = " + str(x))
77 |
78 |
79 | return x
80 |
--------------------------------------------------------------------------------
/FaceSwap/NonLinearLeastSquares.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/FaceSwap/NonLinearLeastSquares.pyc
--------------------------------------------------------------------------------
/FaceSwap/drawing.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | def drawPoints(img, points, color=(0, 255, 0)):
5 | for point in points:
6 | cv2.circle(img, (int(point[0]), int(point[1])), 2, color)
7 |
8 | def drawCross(img, params, center=(100, 100), scale=30.0):
9 | R = cv2.Rodrigues(params[1:4])[0]
10 |
11 | points = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
12 | points = np.dot(points, R.T)
13 | points2D = points[:, :2]
14 |
15 | points2D = (points2D * scale + center).astype(np.int32)
16 |
17 | cv2.line(img, (center[0], center[1]), (points2D[0, 0], points2D[0, 1]), (255, 0, 0), 3)
18 | cv2.line(img, (center[0], center[1]), (points2D[1, 0], points2D[1, 1]), (0, 255, 0), 3)
19 | cv2.line(img, (center[0], center[1]), (points2D[2, 0], points2D[2, 1]), (0, 0, 255), 3)
20 |
21 | def drawMesh(img, shape, mesh, color=(255, 0, 0)):
22 | for triangle in mesh:
23 | point1 = shape[triangle[0]].astype(np.int32)
24 | point2 = shape[triangle[1]].astype(np.int32)
25 | point3 = shape[triangle[2]].astype(np.int32)
26 |
27 | cv2.line(img, (point1[0], point1[1]), (point2[0], point2[1]), (255, 0, 0), 1)
28 | cv2.line(img, (point2[0], point2[1]), (point3[0], point3[1]), (255, 0, 0), 1)
29 | cv2.line(img, (point3[0], point3[1]), (point1[0], point1[1]), (255, 0, 0), 1)
30 |
31 | def drawProjectedShape(img, x, projection, mesh, params, lockedTranslation=False):
32 | localParams = np.copy(params)
33 |
34 | if lockedTranslation:
35 | localParams[4] = 100
36 | localParams[5] = 200
37 |
38 | projectedShape = projection.fun(x, localParams)
39 |
40 | drawPoints(img, projectedShape.T, (0, 0, 255))
41 | drawMesh(img, projectedShape.T, mesh)
42 | drawCross(img, params)
43 |
--------------------------------------------------------------------------------
/FaceSwap/drawing.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/FaceSwap/drawing.pyc
--------------------------------------------------------------------------------
/FaceSwap/models.py:
--------------------------------------------------------------------------------
1 | from abc import ABCMeta, abstractmethod
2 | import numpy as np
3 | import cv2
4 |
5 | class Model:
6 | __metaclass__ = ABCMeta
7 |
8 | nParams = 0
9 |
10 | #zwraca wektor rezyduow przy danych parametrach modelu, wektorze wejsciowym i oczekiwanych wektorze wyjsciowym
11 | def residual(self, params, x, y):
12 | r = y - self.fun(x, params)
13 | r = r.flatten()
14 |
15 | return r
16 |
17 | #zwraca wartosci zwracane przez model przy danych parametrach i wektorze wejsciowym
18 | @abstractmethod
19 | def fun(self, x, params):
20 | pass
21 |
22 | #zwraca jakobian
23 | @abstractmethod
24 | def jacobian(self, params, x, y):
25 | pass
26 |
27 | #zwraca zbior przykladowych parametrow modelu
28 | @abstractmethod
29 | def getExampleParameters(self):
30 | pass
31 |
32 | #zwraca inny zbior przykladowych parametrow
33 | @abstractmethod
34 | def getInitialParameters(self):
35 | pass
36 |
37 | class OrthographicProjectionBlendshapes(Model):
38 | nParams = 6
39 |
40 | def __init__(self, nBlendshapes):
41 | self.nBlendshapes = nBlendshapes
42 | self.nParams += nBlendshapes
43 |
44 | def fun(self, x, params):
45 | #skalowanie
46 | s = params[0]
47 | #rotacja
48 | r = params[1:4]
49 | #przesuniecie (translacja)
50 | t = params[4:6]
51 | w = params[6:]
52 |
53 | mean3DShape = x[0]
54 | blendshapes = x[1]
55 |
56 | #macierz rotacji z wektora rotacji, wzor Rodriguesa
57 | R = cv2.Rodrigues(r)[0]
58 | P = R[:2]
59 | shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)
60 |
61 | projected = s * np.dot(P, shape3D) + t[:, np.newaxis]
62 |
63 | return projected
64 |
65 | def jacobian(self, params, x, y):
66 | s = params[0]
67 | r = params[1:4]
68 | t = params[4:6]
69 | w = params[6:]
70 |
71 | mean3DShape = x[0]
72 | blendshapes = x[1]
73 |
74 | R = cv2.Rodrigues(r)[0]
75 | P = R[:2]
76 | shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)
77 |
78 | nPoints = mean3DShape.shape[1]
79 |
80 | #nSamples * 2 poniewaz kazdy punkt ma dwa wymiary (x i y)
81 | jacobian = np.zeros((nPoints * 2, self.nParams))
82 |
83 | jacobian[:, 0] = np.dot(P, shape3D).flatten()
84 |
85 | stepSize = 10e-4
86 | step = np.zeros(self.nParams)
87 | step[1] = stepSize;
88 | jacobian[:, 1] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
89 | step = np.zeros(self.nParams)
90 | step[2] = stepSize;
91 | jacobian[:, 2] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
92 | step = np.zeros(self.nParams)
93 | step[3] = stepSize;
94 | jacobian[:, 3] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
95 |
96 | jacobian[:nPoints, 4] = 1
97 | jacobian[nPoints:, 5] = 1
98 |
99 | startIdx = self.nParams - self.nBlendshapes
100 | for i in range(self.nBlendshapes):
101 | jacobian[:, i + startIdx] = s * np.dot(P, blendshapes[i]).flatten()
102 |
103 | return jacobian
104 |
105 | #nie uzywane
106 | def getExampleParameters(self):
107 | params = np.zeros(self.nParams)
108 | params[0] = 1
109 |
110 | return params
111 |
112 | def getInitialParameters(self, x, y):
113 | mean3DShape = x.T
114 | shape2D = y.T
115 |
116 | shape3DCentered = mean3DShape - np.mean(mean3DShape, axis=0)
117 | shape2DCentered = shape2D - np.mean(shape2D, axis=0)
118 |
119 | scale = np.linalg.norm(shape2DCentered) / np.linalg.norm(shape3DCentered[:, :2])
120 | t = np.mean(shape2D, axis=0) - np.mean(mean3DShape[:, :2], axis=0)
121 |
122 | params = np.zeros(self.nParams)
123 | params[0] = scale
124 | params[4] = t[0]
125 | params[5] = t[1]
126 |
127 | return params
128 |
--------------------------------------------------------------------------------
/FaceSwap/models.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/FaceSwap/models.pyc
--------------------------------------------------------------------------------
/FaceSwap/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import models
4 | from dlib import rectangle
5 | import NonLinearLeastSquares
6 |
7 | def getNormal(triangle):
8 | a = triangle[:, 0]
9 | b = triangle[:, 1]
10 | c = triangle[:, 2]
11 |
12 | axisX = b - a
13 | axisX = axisX / np.linalg.norm(axisX)
14 | axisY = c - a
15 | axisY = axisY / np.linalg.norm(axisY)
16 | axisZ = np.cross(axisX, axisY)
17 | axisZ = axisZ / np.linalg.norm(axisZ)
18 |
19 | return axisZ
20 |
21 | def flipWinding(triangle):
22 | return [triangle[1], triangle[0], triangle[2]]
23 |
24 | def fixMeshWinding(mesh, vertices):
25 | for i in range(mesh.shape[0]):
26 | triangle = mesh[i]
27 | normal = getNormal(vertices[:, triangle])
28 | if normal[2] > 0:
29 | mesh[i] = flipWinding(triangle)
30 |
31 | return mesh
32 |
33 | def getShape3D(mean3DShape, blendshapes, params):
34 | #skalowanie
35 | s = params[0]
36 | #rotacja
37 | r = params[1:4]
38 | #przesuniecie (translacja)
39 | t = params[4:6]
40 | w = params[6:]
41 |
42 | #macierz rotacji z wektora rotacji, wzor Rodriguesa
43 | R = cv2.Rodrigues(r)[0]
44 | shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)
45 |
46 | shape3D = s * np.dot(R, shape3D)
47 | shape3D[:2, :] = shape3D[:2, :] + t[:, np.newaxis]
48 |
49 | return shape3D
50 |
51 | def getMask(renderedImg):
52 | mask = np.zeros(renderedImg.shape[:2], dtype=np.uint8)
53 |
54 | def load3DFaceModel(filename):
55 | faceModelFile = np.load(filename)
56 | mean3DShape = faceModelFile["mean3DShape"]
57 | mesh = faceModelFile["mesh"]
58 | idxs3D = faceModelFile["idxs3D"]
59 | idxs2D = faceModelFile["idxs2D"]
60 | blendshapes = faceModelFile["blendshapes"]
61 | mesh = fixMeshWinding(mesh, mean3DShape)
62 |
63 | return mean3DShape, blendshapes, mesh, idxs3D, idxs2D
64 |
65 | def getFaceKeypoints(img, detector, predictor, maxImgSizeForDetection=640):
66 | imgScale = 1
67 | scaledImg = img
68 | if max(img.shape) > maxImgSizeForDetection:
69 | imgScale = maxImgSizeForDetection / float(max(img.shape))
70 | scaledImg = cv2.resize(img, (int(img.shape[1] * imgScale), int(img.shape[0] * imgScale)))
71 |
72 |
73 | #detekcja twarzy
74 | dets = detector(scaledImg, 1)
75 |
76 | if len(dets) == 0:
77 | return None
78 |
79 | shapes2D = []
80 | for det in dets:
81 | faceRectangle = rectangle(int(det.left() / imgScale), int(det.top() / imgScale), int(det.right() / imgScale), int(det.bottom() / imgScale))
82 |
83 | #detekcja punktow charakterystycznych twarzy
84 | dlibShape = predictor(img, faceRectangle)
85 |
86 | shape2D = np.array([[p.x, p.y] for p in dlibShape.parts()])
87 | #transpozycja, zeby ksztalt byl 2 x n a nie n x 2, pozniej ulatwia to obliczenia
88 | shape2D = shape2D.T
89 |
90 | shapes2D.append(shape2D)
91 |
92 | return shapes2D
93 |
94 |
95 | def getFaceTextureCoords(img, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor):
96 | projectionModel = models.OrthographicProjectionBlendshapes(blendshapes.shape[0])
97 |
98 | keypoints = getFaceKeypoints(img, detector, predictor)[0]
99 | modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], keypoints[:, idxs2D])
100 | modelParams = NonLinearLeastSquares.GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian, ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], keypoints[:, idxs2D]), verbose=0)
101 | textureCoords = projectionModel.fun([mean3DShape, blendshapes], modelParams)
102 |
103 | return textureCoords
--------------------------------------------------------------------------------
/FaceSwap/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/FaceSwap/utils.pyc
--------------------------------------------------------------------------------
/FaceSwap/zad1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 |
4 | import ImageProcessing
5 |
6 | handImg = cv2.imread("../data/hand.png")
7 | eyeImg = cv2.imread("../data/eye.png")
8 | maskImg = cv2.imread("../data/mask.png")
9 |
10 | #zmiana obrazka kolorowego na obrazek w skali szarosci
11 | mask = np.mean(maskImg, axis=2)
12 |
13 | eyeImg = ImageProcessing.colorTransfer(handImg, eyeImg, mask)
14 | blendedImg = ImageProcessing.blendImages(eyeImg, handImg, mask)
15 |
16 | cv2.imwrite("../eyeHandBlend.jpg", blendedImg)
--------------------------------------------------------------------------------
/FaceSwap/zad2.py:
--------------------------------------------------------------------------------
1 | import os
2 | import dlib
3 | import cv2
4 | import numpy as np
5 |
6 | import models
7 | import NonLinearLeastSquares
8 | import ImageProcessing
9 |
10 | from drawing import *
11 |
12 | import FaceRendering
13 | import utils
14 |
15 | print("Press T to draw the keypoints and the 3D model")
16 | print("Press R to start recording to a video file")
17 |
18 | #you need to download shape_predictor_68_face_landmarks.dat from the link below and unpack it where the solution file is
19 | #http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
20 |
21 | #loading the keypoint detection model, the image and the 3D model
22 | predictor_path = os.path.join(os.path.dirname(__file__), "..", "shape_predictor_68_face_landmarks.dat")
23 | image_name = os.path.join(os.path.dirname(__file__), "..", "data", "jolie.jpg")
24 | #the smaller this value gets the faster the detection will work
25 | #if it is too small, the user's face might not be detected
26 | maxImageSizeForDetection = 320
27 |
28 | detector = dlib.get_frontal_face_detector()
29 | predictor = dlib.shape_predictor(predictor_path)
30 | mean3DShape, blendshapes, mesh, idxs3D, idxs2D = utils.load3DFaceModel( os.path.join(os.path.dirname(__file__), "..", "candide.npz"))
31 |
32 | projectionModel = models.OrthographicProjectionBlendshapes(blendshapes.shape[0])
33 |
34 | modelParams = None
35 | lockedTranslation = False
36 | drawOverlay = False
37 | cap = cv2.VideoCapture(0)
38 | writer = None
39 | cameraImg = cap.read()[1]
40 |
41 | textureImg = cv2.imread(image_name)
42 | textureCoords = utils.getFaceTextureCoords(textureImg, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor)
43 | renderer = FaceRendering.FaceRenderer(cameraImg, textureImg, textureCoords, mesh)
44 |
45 | while True:
46 | cameraImg = cap.read()[1]
47 | shapes2D = utils.getFaceKeypoints(cameraImg, detector, predictor, maxImageSizeForDetection)
48 |
49 | if shapes2D is not None:
50 | for shape2D in shapes2D:
51 | #3D model parameter initialization
52 | modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], shape2D[:, idxs2D])
53 |
54 | #3D model parameter optimization
55 | modelParams = NonLinearLeastSquares.GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian, ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], shape2D[:, idxs2D]), verbose=0)
56 |
57 | #rendering the model to an image
58 | shape3D = utils.getShape3D(mean3DShape, blendshapes, modelParams)
59 | renderedImg = renderer.render(shape3D)
60 |
61 | #blending of the rendered face with the image
62 | mask = np.copy(renderedImg[:, :, 0])
63 | renderedImg = ImageProcessing.colorTransfer(cameraImg, renderedImg, mask)
64 | cameraImg = ImageProcessing.blendImages(renderedImg, cameraImg, mask)
65 |
66 |
67 | #drawing of the mesh and keypoints
68 | if drawOverlay:
69 | drawPoints(cameraImg, shape2D.T)
70 | drawProjectedShape(cameraImg, [mean3DShape, blendshapes], projectionModel, mesh, modelParams, lockedTranslation)
71 |
72 | if writer is not None:
73 | writer.write(cameraImg)
74 |
75 | cv2.imshow('image', cameraImg)
76 | key = cv2.waitKey(1)
77 |
78 | if key == 27:
79 | break
80 | if key == ord('t'):
81 | drawOverlay = not drawOverlay
82 | if key == ord('r'):
83 | if writer is None:
84 | print("Starting video writer")
85 | writer = cv2.VideoWriter(os.path.join(os.path.dirname(__file__), "..", "out.avi"),
86 | cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'),
87 | 25,
88 | (cameraImg.shape[1], cameraImg.shape[0]))
89 |
90 | if writer.isOpened():
91 | print("Writer succesfully opened")
92 | else:
93 | writer = None
94 | print("Writer opening failed")
95 | else:
96 | print("Stopping video writer")
97 | writer.release()
98 | writer = None
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Marek Kowalski
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # FaceSwap #
2 | FaceSwap is an app that I have originally created as an exercise for my students in "Mathematics in Multimedia" on the Warsaw University of Technology.
3 | The app is written in Python and uses face alignment, Gauss Newton optimization and image blending to swap the face of a person seen by the camera with a face of a person in a provided image.
4 |
5 | You will find a short presentation the program's capabilities in the video below (click to go to YouTube):
6 | [](http://www.youtube.com/watch?v=yZRuSsyxvos)
7 |
8 | ## How to use it ##
9 | To start the program you will have to run a Python script named zad2.py (Polish for exercise 2). You need to have Python 3 and some additional libraries installed. Once Python is on your machine, you should be able to automatically install the libraries by running `pip install -r requirements.txt` in the repo's root directory.
10 |
11 | You will also have to download the face alignment model from here: http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2 and unpack it to the main project directory.
12 |
13 | ## A faster and more stable version ##
14 | A faster and more stable version of FaceSwap is available on Dropbox [here](https://www.dropbox.com/sh/yfq0hcu0we2hed0/AABXckFonehfgLfdzicjqqJYa?dl=0). This new version is based on the [Deep Alignment Network](https://github.com/MarekKowalski/DeepAlignmentNetwork) method, which is faster than the currently used method if ran on a GPU and provides more stable and more precise facial landmarks. Please see the [GitHub repository of Deep Alignment Network](https://github.com/MarekKowalski/DeepAlignmentNetwork) for setup instructions.
15 |
16 | I hope to find time to include this faster version in the repo code soon.
17 |
18 | ## How it works ##
19 | The general outline of the method is as follows:
20 |
21 | First we take the input image (the image of a person we want to see on our own face) and find the face region and its landmarks. Once we have that we fit the 3D model to those landmarks (more on that later) the vertices of that model projected to the image space will be our texture coordinates.
22 |
23 | Once that is finished and everything is initialized the camera starts capturing images. For each captured images the following steps are taken:
24 |
25 | 1. The face region is detected and the facial landmarks are located.
26 | 2. The 3D models is fitted to the located landmarks.
27 | 3. The 3D models is rendered using pygame with the texture obtained during initialization.
28 | 4. The image of the rendered model is blended with the image obtained from the camera using feathering (alpha blending) and very simple color correction.
29 | 5. The final image is shown to the user.
30 |
31 | The most crucial element of the entire process is the fitting of the 3D model. The model itself consists of:
32 | * the 3D shape (set of vertices) of a neutral face,
33 | * a number of blendshapes that can be added to the neutral face to produce mouth opening, eyebrow raising, etc.,
34 | * a set of triplets of indices into the face shape that form the triangular mesh of the face,
35 | * two sets of indices which establish correspondence between the landmarks found by the landmark localizer and the vertices of the 3D face shape.
36 |
37 | The model is projected into the image space using the following equation:
38 |
39 | 
40 |
41 | where *s* is the projected shape, *a* is the scaling parameter, *P* are the first two rows of a rotation matrix that rotates the 3D face shape, *S_0* is the neutral face shape, *w_1-n* are the blendshape weights, *S_1-n* are the blendshapes, *t* is a 2D translation vector and *n* is the number of blendshapes.
42 |
43 | The model fitting is accomplished by minimizing the difference between the projected shape and the localized landmarks. The minimization is accomplished with respect to the blendshape weights, scaling, rotation and translation, using the [Gauss Newton method](https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm).
44 |
45 | ## Licensing ##
46 | The code is licensed under the MIT license, some of the data in the project is downloaded from 3rd party websites:
47 | * brad pitt.jpg - https://en.wikipedia.org/wiki/Brad_Pitt#/media/File:Brad_Pitt_Fury_2014.jpg
48 | * einstein.jpg - https://www.viewfoo.com/uploads/images/702_1433440837_albert-einstein.jpg
49 | * jolie.jpg - http://cdni.condenast.co.uk/720x1080/a_c/Angelina-Jolie_glamour_2mar14_rex_b_720x1080.jpg
50 | * hand.png - http://pngimg.com/upload/hands_PNG905.png
51 | * eye.png - http://cache4.asset-cache.net/xd/521276062.jpg?v=1&c=IWSAsset&k=2&d=62CA815BFB1CE4807BD8B4D34504661CD6D7111452E48A17257DA6DB0BD6EA6DE35742C781328F67
52 | * candide 3D face model source - http://www.icg.isy.liu.se/candide/
53 |
54 | ## Contact ##
55 | If need help or you found the app useful, do not hesitate to let me know.
56 |
57 | Marek Kowalski , homepage: http://home.elka.pw.edu.pl/~mkowals6/
58 |
--------------------------------------------------------------------------------
/candide.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/candide.npz
--------------------------------------------------------------------------------
/data/brad pitt.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/brad pitt.jpg
--------------------------------------------------------------------------------
/data/einstein.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/einstein.jpg
--------------------------------------------------------------------------------
/data/equation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/equation.png
--------------------------------------------------------------------------------
/data/eye.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/eye.png
--------------------------------------------------------------------------------
/data/hand.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/hand.png
--------------------------------------------------------------------------------
/data/jolie.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/jolie.jpg
--------------------------------------------------------------------------------
/data/mask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MarekKowalski/FaceSwap/1fdbd28c8413db42adca5b02d5bdff6d2766c7e7/data/mask.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | dlib == 19.*
2 | pygame == 2.0.1
3 | numpy >= 1.16.2
4 | opencv-python == 4.5.*
5 | PyOpenGL == 3.1.5
--------------------------------------------------------------------------------