├── .gitignore
├── LICENSE
├── afy
├── arguments.py
├── cam_fomm.py
├── camera_selector.py
├── predictor_local.py
├── utils.py
└── videocaptureasync.py
├── avatars
├── 1.png
├── 2.png
├── 3.png
├── 4.png
├── 5.png
├── 6.png
├── 7.png
├── 8.png
└── 9.png
├── client.js
├── faces.py
├── index.html
├── main.py
├── readme.md
├── requirements.txt
├── run.sh
├── run_windows.bat
└── webmeeting
├── README.md
├── package.json
├── public
├── CNAME
├── favicon.ico
├── index.html
└── manifest.json
├── src
├── App.test.tsx
├── App.tsx
├── PreJoinPage.tsx
├── RoomPage.tsx
├── SelectAvatar.tsx
├── chatui-theme.css
├── index.css
├── index.tsx
├── meeting
│ ├── AudioSelectButton.tsx
│ ├── ControlButton.tsx
│ ├── ControlsView.tsx
│ ├── DisplayContext.tsx
│ ├── ParticipantView.tsx
│ ├── ScreenShareView.tsx
│ ├── StageProps.ts
│ ├── StageUtils.ts
│ ├── StageView.tsx
│ ├── VideoSelectButton.tsx
│ ├── desktop
│ │ ├── GridStage.tsx
│ │ ├── SpeakerStage.tsx
│ │ └── styles.module.css
│ ├── mobile
│ │ ├── MobileStage.tsx
│ │ └── styles.module.css
│ ├── static
│ │ ├── connection-quality-1.svg
│ │ ├── connection-quality-2.svg
│ │ └── connection-quality-3.svg
│ └── styles.module.css
├── react-app-env.d.ts
├── setupProxy.js
├── setupTests.ts
└── webrtc
│ └── WebrtcCli.tsx
└── tsconfig.json
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 | /meeting/node_modules
131 | /meeting/src/config.tsx
132 | /webmeeting/node_modules
133 | /webmeeting/src/config.tsx
134 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 git-cloner
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/afy/arguments.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 |
3 | parser = ArgumentParser()
4 | parser.add_argument("--config", help="path to config")
5 | parser.add_argument("--checkpoint", default='vox-cpk.pth.tar', help="path to checkpoint to restore")
6 |
7 | parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
8 | parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
9 | parser.add_argument("--no-pad", dest="no_pad", action="store_true", help="don't pad output image")
10 | parser.add_argument("--enc_downscale", default=1, type=float, help="Downscale factor for encoder input. Improves performance with cost of quality.")
11 |
12 | parser.add_argument("--virt-cam", type=int, default=0, help="Virtualcam device ID")
13 | parser.add_argument("--no-stream", action="store_true", help="On Linux, force no streaming")
14 |
15 | parser.add_argument("--verbose", action="store_true", help="Print additional information")
16 | parser.add_argument("--hide-rect", action="store_true", default=False, help="Hide the helper rectangle in preview window")
17 |
18 | parser.add_argument("--avatars", default="./avatars", help="path to avatars directory")
19 |
20 | parser.add_argument("--is-worker", action="store_true", help="Whether to run this process as a remote GPU worker")
21 | parser.add_argument("--is-client", action="store_true", help="Whether to run this process as a client")
22 | parser.add_argument("--in-port", type=int, default=5557, help="Remote worker input port")
23 | parser.add_argument("--out-port", type=int, default=5558, help="Remote worker output port")
24 | parser.add_argument("--in-addr", type=str, default=None, help="Socket address for incoming messages, like example.com:5557")
25 | parser.add_argument("--out-addr", type=str, default=None, help="Socker address for outcoming messages, like example.com:5558")
26 | parser.add_argument("--jpg_quality", type=int, default=95, help="Jpeg copression quality for image transmission")
27 |
28 | parser.set_defaults(relative=False)
29 | parser.set_defaults(adapt_scale=False)
30 | parser.set_defaults(no_pad=False)
31 |
32 | opt = parser.parse_args()
33 |
34 | if opt.is_client and (opt.in_addr is None or opt.out_addr is None):
35 | raise ValueError("You have to set --in-addr and --out-addr")
36 |
--------------------------------------------------------------------------------
/afy/cam_fomm.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from sys import platform as _platform
4 | import glob
5 | import time
6 | import dlib
7 | import subprocess
8 | import threading
9 | import urllib
10 | import numpy as np
11 | import cv2
12 | import torch
13 | from afy.videocaptureasync import VideoCaptureAsync
14 | from afy.arguments import opt
15 | from afy.utils import info, Once, Tee, crop, pad_img, resize, TicToc
16 | import afy.camera_selector as cam_selector
17 | from afy import predictor_local
18 | from PIL import Image, ImageDraw, ImageFont
19 |
20 |
21 | # Where to split an array from face_alignment to separate each landmark
22 | LANDMARK_SLICE_ARRAY = np.array([17, 22, 27, 31, 36, 42, 48, 60])
23 | face_detector = None
24 | lock = None
25 |
26 | def InitLiveKitCli(room,name,filename):
27 | api_key = os.getenv('livekit_api_key')
28 | api_secret = os.getenv('livekit_api_secret')
29 | command = 'timeout=3600 ./livekit-cli join-room --room ' + room + ' --identity ' + name + 'A --publish h264://' + filename + \
30 | ' --url http://classnotfound.com.cn:7880 --api-key ' + api_key + ' --api-secret ' + api_secret
31 | return command
32 |
33 | def InitOutPipe(fileName):
34 | if (_platform == 'win32'):
35 | rtmp = "rtmp://172.16.62.88:1935/live/" + fileName.replace("/tmp/", "").replace(".h264.sock", "") + "_"
36 | command = ['ffmpeg',
37 | '-y',
38 | '-f', 'rawvideo',
39 | '-vcodec', 'rawvideo',
40 | '-pix_fmt', 'bgr24',
41 | '-s', '256*256',
42 | '-r', '7', # 7 fps!!!!
43 | '-i', '-',
44 | '-c:v', 'h264',
45 | '-pix_fmt', 'yuv420p',
46 | '-preset', 'ultrafast',
47 | '-f', 'flv',
48 | '-color_primaries', 'bt709',
49 | '-color_trc', 'bt709',
50 | '-colorspace', 'bt709',
51 | #'-loglevel', 'quiet',
52 | # '-flvflags','no_duration_filesize',
53 | rtmp]
54 | else:
55 | rtmp = 'unix:' + fileName
56 | command = ['ffmpeg',
57 | '-y',
58 | '-f', 'rawvideo',
59 | '-vcodec', 'rawvideo',
60 | '-max_delay', '0',
61 | '-pix_fmt', 'bgr24',
62 | '-s', '256*256',
63 | '-r', '7',
64 | '-i', '-',
65 | '-c:v', 'h264',
66 | '-pix_fmt', 'yuv420p',
67 | '-preset', 'ultrafast',
68 | '-listen', '1',
69 | '-f', 'h264',
70 | '-color_primaries', 'bt709',
71 | '-color_trc', 'bt709',
72 | '-colorspace', 'bt709',
73 | '-fflags', 'nobuffer',
74 | #'-loglevel', 'quiet',
75 | rtmp]
76 | print(command)
77 | pipe = subprocess.Popen(command, stdin=subprocess.PIPE)
78 | return pipe
79 |
80 |
81 | def runcmd(command):
82 | ret = subprocess.Popen(command, shell=True)
83 | return ret
84 |
85 |
86 | def detect_face(image):
87 | global face_detector
88 | if face_detector is None:
89 | face_detector = dlib.get_frontal_face_detector()
90 | # Convert image into grayscale
91 | img_gray = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2GRAY)
92 | # Use detector to find landmarks
93 | faces = face_detector(img_gray, 0)
94 | if len(faces) > 0:
95 | l = faces[0].left()
96 | t = faces[0].top()
97 | r = faces[0].right()
98 | b = faces[0].bottom()
99 | # init
100 | x = l
101 | y = t
102 | w = r - l
103 | h = b - y
104 | # adjust
105 | e = int(h / 3)
106 | y = y - e
107 | h = h + 2 * e
108 | f = int(w / 3)
109 | x = x - f
110 | w = w + 2 * f
111 | return [(x, y, w, h)]
112 | return []
113 |
114 |
115 | def cut_image(img, box):
116 | (x, y, w, h) = box
117 | cropped = img[int(y):int(y + h), int(x):int(x + w)] # [y0:y1, x0:x1]
118 | return cropped
119 |
120 |
121 | def crop_face(image, face, last_x, last_y, last_w, last_h):
122 | shape = image.shape
123 | (x, y, w, h) = face
124 | if last_x > 0 and last_y > 0:
125 | if abs(last_x - x) < 60 and abs(last_y - y) < 60:
126 | x = last_x
127 | y = last_y
128 | w = last_w
129 | h = last_h
130 | face_image = cut_image(image, (x, y, w, h))
131 | return face_image, x, y, w, h
132 |
133 |
134 | def is_new_frame_better(source, driving, predictor, avatar_kp):
135 | if avatar_kp is None:
136 | return False
137 |
138 | if predictor.get_start_frame() is None:
139 | return True
140 |
141 | driving_smaller = resize(driving, (128, 128))[..., :3]
142 | new_kp = predictor.get_frame_kp(driving)
143 |
144 | if new_kp is not None:
145 | new_norm = (np.abs(avatar_kp - new_kp) ** 2).sum()
146 | old_norm = (
147 | np.abs(avatar_kp - predictor.get_start_frame_kp()) ** 2).sum()
148 |
149 | out_string = "{0} : {1}".format(
150 | int(new_norm * 100), int(old_norm * 100))
151 |
152 | return new_norm < old_norm
153 | else:
154 | return False
155 |
156 |
157 | def load_custom_avatar(images_list, avatarnum):
158 | if avatarnum.isdigit():
159 | return
160 | url_request = "https://classnotfound.com.cn/aiit/avatar/" + avatarnum + ".jpg"
161 | fileName = "./avatars/temp/" + avatarnum + ".jpg"
162 | avatar_basic_auth = os.getenv('avatar_basic_auth')
163 | opener = urllib.request.build_opener()
164 | opener.addheaders = [
165 | ('Authorization', 'Basic ' + avatar_basic_auth)]
166 | urllib.request.install_opener(opener)
167 | try:
168 | urllib.request.urlretrieve(url=url_request, filename=fileName)
169 | images_list[-1] = fileName
170 | except Exception as e:
171 | print(e)
172 | urllib.request.urlcleanup()
173 |
174 |
175 | def load_images(avatarnum):
176 | IMG_SIZE = 256
177 | avatars = []
178 | filenames = []
179 | images_list = sorted(glob.glob(f'./avatars/*.*'))
180 | if not avatarnum.isdigit():
181 | load_custom_avatar(images_list, avatarnum)
182 | for i, f in enumerate(images_list):
183 | if f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.png'):
184 | img = cv2.imread(f)
185 | if img is None:
186 | print("Failed to open image: {}".format(f))
187 | continue
188 |
189 | if img.ndim == 2:
190 | img = np.tile(img[..., None], [1, 1, 3])
191 | img = img[..., :3][..., ::-1]
192 | img = resize(img, (IMG_SIZE, IMG_SIZE))
193 | avatars.append(img)
194 | filenames.append(f)
195 | return avatars, filenames
196 |
197 |
198 | def change_avatar(predictor, new_avatar):
199 | avatar_kp = predictor.get_frame_kp(new_avatar)
200 | predictor.set_source_image(new_avatar)
201 | return avatar_kp
202 |
203 |
204 | def kp_to_pixels(arr):
205 | '''Convert normalized landmark locations to screen pixels'''
206 | return ((arr + 1) * 127).astype(np.int32)
207 |
208 |
209 | def fomm_load_predictor(avatarnum):
210 | print("torch.cuda.is_available:",
211 | torch.cuda.is_available(), torch.cuda.device_count())
212 | print('Loading Predictor')
213 | predictor_args = {
214 | 'config_path': 'fomm/config/vox-adv-256.yaml',
215 | 'checkpoint_path': 'vox-adv-cpk.pth.tar',
216 | 'relative': True,
217 | 'adapt_movement_scale': True,
218 | 'enc_downscale': 1
219 | }
220 | avatars, avatar_names = load_images(avatarnum)
221 | predictor = predictor_local.PredictorLocal(**predictor_args)
222 | # custom avatar
223 | cur_ava = 0
224 | if avatarnum.isdigit():
225 | cur_ava = int(avatarnum)
226 | if cur_ava > 0:
227 | cur_ava = cur_ava - 1
228 | else:
229 | if len(avatars) == 9:
230 | cur_ava = 8
231 | else:
232 | cur_ava = 0
233 | avatar_kp = change_avatar(predictor, avatars[cur_ava])
234 | return predictor, avatar_kp
235 |
236 |
237 | def fomm_change_face(predictor, avatar_kp):
238 | is_debug = True
239 | last_x = 0
240 | last_y = 0
241 | last_w = 0
242 | last_h = 0
243 | skip_frame = 0
244 | is_detectface = False
245 | # init lock
246 | global lock
247 | if lock is None:
248 | lock = threading.Lock()
249 | # init windows
250 | if is_debug:
251 | cv2.namedWindow('cam', cv2.WINDOW_GUI_NORMAL)
252 | cv2.moveWindow('cam', 500, 250)
253 | # load cam
254 | cap = VideoCaptureAsync(0)
255 | cap.start()
256 | while True:
257 | ret, frame = cap.read()
258 | if not ret:
259 | print("Can't receive frame (stream end?). Exiting ...")
260 | reak
261 | is_detectface = (skip_frame == 0)
262 | out, last_x, last_y, last_w, last_h = fomm_change_frame(
263 | predictor, avatar_kp, frame, last_x, last_y, last_w, last_h, is_detectface)
264 | skip_frame = skip_frame + 1
265 | skip_frame = skip_frame % 10
266 | preview_frame = frame[..., ::-1].copy()
267 | if is_debug:
268 | key = cv2.waitKey(1)
269 | if key == ord('q'):
270 | break
271 | cv2.imshow('cam', preview_frame[..., ::-1])
272 | if out is not None:
273 | cv2.imshow('avatarify', out[..., ::-1])
274 | cap.stop
275 | cv2.destroyAllWindows()
276 |
277 | fontStyle = ImageFont.truetype("font/simsun.ttc", 16, encoding="utf-8")
278 |
279 | def cv2ImgAddText(img, text, left, top, textColor=(255, 0, 0)):
280 | #start_time = time.time()
281 | if (isinstance(img, np.ndarray)):
282 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
283 | draw = ImageDraw.Draw(img)
284 | draw.text((left, top), text, textColor, font=fontStyle)
285 | #end_time = time.time()
286 | #run_time = end_time - start_time
287 | #print(run_time)
288 | return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
289 |
290 | def fomm_change_frame(predictor, avatar_kp, frame, last_x, last_y, last_w, last_h, is_detectface):
291 | IMG_SIZE = 256
292 | # init var
293 | avatar = None
294 | frame_proportion = 0.9
295 | frame_offset_x = 0
296 | frame_offset_y = 0
297 | find_keyframe = False
298 | is_calibrated = False
299 | # lock
300 | global lock
301 | if lock is None:
302 | lock = threading.Lock()
303 | try:
304 | frame = frame[..., ::-1]
305 | if last_x == 0 and last_y == 0 and last_w == 0 and last_h == 0:
306 | is_detectface = True
307 | # detect faces
308 | if is_detectface:
309 | faces = detect_face(frame)
310 | predictor.reset_frames()
311 | if(len(faces) > 0):
312 | frame, last_x, last_y, last_w, last_h = crop_face(
313 | frame, faces[0], last_x, last_y, last_w, last_h)
314 | else:
315 | frame = None
316 | return None, 0, 0, 0, 0
317 | else:
318 | faces = [(last_x, last_y, last_w, last_h)]
319 | frame, last_x, last_y, last_w, last_h = crop_face(
320 | frame, faces[0], last_x, last_y, last_w, last_h)
321 | # resize face
322 | try:
323 | frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3]
324 | except Exception as e:
325 | last_x = 0
326 | last_y = 0
327 | last_w = 0
328 | last_h = 0
329 | return None, 0, 0, 0, 0
330 | # find key frame
331 | if find_keyframe:
332 | if is_new_frame_better(avatar, frame, predictor, avatar_kp):
333 | print("Taking new frame!")
334 | predictor.reset_frames()
335 |
336 | # change face
337 | lock.acquire()
338 | try:
339 | out = predictor.predict(frame)
340 | out = cv2ImgAddText(out, "AI生成", 10, 10)
341 | except Exception as e:
342 | print(e)
343 | finally:
344 | lock.release()
345 | if out is None:
346 | print('predict returned None')
347 | return out, last_x, last_y, last_w, last_h
348 | except Exception as e:
349 | print(e)
350 | return None, 0, 0, 0, 0
351 |
352 |
353 | def fomm_test_predictor():
354 | predictor, avatar_kp = fomm_load_predictor(1)
355 | fomm_change_face(predictor, avatar_kp)
356 |
357 |
358 | if __name__ == "__main__":
359 | fomm_test_predictor()
360 |
--------------------------------------------------------------------------------
/afy/camera_selector.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import yaml
4 |
5 | from afy.utils import log
6 |
7 |
8 | g_selected_cam = None
9 |
10 |
11 | def query_cameras(n_cams):
12 | cam_frames = {}
13 | cap = None
14 | for camid in range(n_cams):
15 | log(f"Trying camera with id {camid}")
16 | cap = cv2.VideoCapture(camid)
17 |
18 | if not cap.isOpened():
19 | log(f"Camera with id {camid} is not available")
20 | continue
21 |
22 | ret, frame = cap.read()
23 |
24 | if not ret or frame is None:
25 | log(f"Could not read from camera with id {camid}")
26 | cap.release()
27 | continue
28 |
29 | for i in range(10):
30 | ret, frame = cap.read()
31 |
32 | cam_frames[camid] = frame.copy()
33 |
34 | cap.release()
35 |
36 | return cam_frames
37 |
38 |
39 | def make_grid(images, cell_size=(320, 240), cols=2):
40 | w0, h0 = cell_size
41 | _rows = len(images) // cols + int(len(images) % cols)
42 | _cols = min(len(images), cols)
43 | grid = np.zeros((h0 * _rows, w0 * _cols, 3), dtype=np.uint8)
44 | for i, (camid, img) in enumerate(images.items()):
45 | img = cv2.resize(img, (w0, h0))
46 | # add rect
47 | img = cv2.rectangle(img, (1, 1), (w0 - 1, h0 - 1), (0, 0, 255), 2)
48 | # add id
49 | img = cv2.putText(img, f'Camera {camid}', (10, 30), 0, 1, (0, 255, 0), 2)
50 | c = i % cols
51 | r = i // cols
52 | grid[r * h0:(r + 1) * h0, c * w0:(c + 1) * w0] = img[..., :3]
53 | return grid
54 |
55 |
56 | def mouse_callback(event, x, y, flags, userdata):
57 | global g_selected_cam
58 | if event == 1:
59 | cell_size, grid_cols, cam_frames = userdata
60 | c = x // cell_size[0]
61 | r = y // cell_size[1]
62 | camid = r * grid_cols + c
63 | if camid < len(cam_frames):
64 | g_selected_cam = camid
65 |
66 |
67 | def select_camera(cam_frames, window="Camera selector"):
68 | cell_size = 320, 240
69 | grid_cols = 2
70 | grid = make_grid(cam_frames, cols=grid_cols)
71 |
72 | # to fit the text if only one cam available
73 | if grid.shape[1] == 320:
74 | cell_size = 640, 480
75 | grid = cv2.resize(grid, cell_size)
76 |
77 | cv2.putText(grid, f'Click on the web camera to use', (10, grid.shape[0] - 30), 0, 0.7, (200, 200, 200), 2)
78 |
79 | cv2.namedWindow(window)
80 | cv2.setMouseCallback(window, mouse_callback, (cell_size, grid_cols, cam_frames))
81 | cv2.imshow(window, grid)
82 |
83 | while True:
84 | key = cv2.waitKey(10)
85 |
86 | if g_selected_cam is not None:
87 | break
88 |
89 | if key == 27:
90 | break
91 |
92 | cv2.destroyAllWindows()
93 |
94 | if g_selected_cam is not None:
95 | return list(cam_frames)[g_selected_cam]
96 | else:
97 | return list(cam_frames)[0]
98 |
99 |
100 | if __name__ == '__main__':
101 | with open('config.yaml', 'r') as f:
102 | config = yaml.load(f, Loader=yaml.FullLoader)
103 |
104 | cam_frames = query_cameras(config['query_n_cams'])
105 |
106 | if cam_frames:
107 | selected_cam = select_camera(cam_frames)
108 | print(f"Selected camera {selected_cam}")
109 | else:
110 | log("No cameras are available")
111 |
112 |
--------------------------------------------------------------------------------
/afy/predictor_local.py:
--------------------------------------------------------------------------------
1 | from scipy.spatial import ConvexHull
2 | import torch
3 | import yaml
4 | from modules.keypoint_detector import KPDetector
5 | from modules.generator_optim import OcclusionAwareGenerator
6 | from sync_batchnorm import DataParallelWithCallback
7 | import numpy as np
8 | import face_alignment
9 |
10 |
11 | def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
12 | use_relative_movement=False, use_relative_jacobian=False):
13 | if adapt_movement_scale:
14 | source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
15 | driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
16 | adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
17 | else:
18 | adapt_movement_scale = 1
19 |
20 | kp_new = {k: v for k, v in kp_driving.items()}
21 |
22 | if use_relative_movement:
23 | kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
24 | kp_value_diff *= adapt_movement_scale
25 | kp_new['value'] = kp_value_diff + kp_source['value']
26 |
27 | if use_relative_jacobian:
28 | jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
29 | kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
30 |
31 | return kp_new
32 |
33 |
34 | def to_tensor(a):
35 | return torch.tensor(a[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) / 255
36 |
37 |
38 | class PredictorLocal:
39 | def __init__(self, config_path, checkpoint_path, relative=False, adapt_movement_scale=False, device=None, enc_downscale=1):
40 | self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu')
41 | self.relative = relative
42 | self.adapt_movement_scale = adapt_movement_scale
43 | self.start_frame = None
44 | self.start_frame_kp = None
45 | self.kp_driving_initial = None
46 | self.config_path = config_path
47 | self.checkpoint_path = checkpoint_path
48 | self.generator, self.kp_detector = self.load_checkpoints()
49 | self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True, device=self.device)
50 | self.source = None
51 | self.kp_source = None
52 | self.enc_downscale = enc_downscale
53 |
54 | def load_checkpoints(self):
55 | with open(self.config_path) as f:
56 | config = yaml.load(f, Loader=yaml.FullLoader)
57 |
58 | generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
59 | **config['model_params']['common_params'])
60 | generator.to(self.device)
61 |
62 | kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
63 | **config['model_params']['common_params'])
64 | kp_detector.to(self.device)
65 |
66 | checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
67 | generator.load_state_dict(checkpoint['generator'])
68 | kp_detector.load_state_dict(checkpoint['kp_detector'])
69 |
70 | generator.eval()
71 | kp_detector.eval()
72 |
73 | return generator, kp_detector
74 |
75 | def reset_frames(self):
76 | self.kp_driving_initial = None
77 |
78 | def set_source_image(self, source_image):
79 | self.source = to_tensor(source_image).to(self.device)
80 | self.kp_source = self.kp_detector(self.source)
81 |
82 | if self.enc_downscale > 1:
83 | h, w = int(self.source.shape[2] / self.enc_downscale), int(self.source.shape[3] / self.enc_downscale)
84 | source_enc = torch.nn.functional.interpolate(self.source, size=(h, w), mode='bilinear')
85 | else:
86 | source_enc = self.source
87 |
88 | self.generator.encode_source(source_enc)
89 |
90 | def predict(self, driving_frame):
91 | assert self.kp_source is not None, "call set_source_image()"
92 |
93 | with torch.no_grad():
94 | driving = to_tensor(driving_frame).to(self.device)
95 |
96 | if self.kp_driving_initial is None:
97 | self.kp_driving_initial = self.kp_detector(driving)
98 | self.start_frame = driving_frame.copy()
99 | self.start_frame_kp = self.get_frame_kp(driving_frame)
100 |
101 | kp_driving = self.kp_detector(driving)
102 | kp_norm = normalize_kp(kp_source=self.kp_source, kp_driving=kp_driving,
103 | kp_driving_initial=self.kp_driving_initial, use_relative_movement=self.relative,
104 | use_relative_jacobian=self.relative, adapt_movement_scale=self.adapt_movement_scale)
105 |
106 | out = self.generator(self.source, kp_source=self.kp_source, kp_driving=kp_norm)
107 |
108 | out = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
109 | out = (np.clip(out, 0, 1) * 255).astype(np.uint8)
110 |
111 | return out
112 |
113 | def get_frame_kp(self, image):
114 | kp_landmarks = self.fa.get_landmarks(image)
115 | if kp_landmarks:
116 | kp_image = kp_landmarks[0]
117 | kp_image = self.normalize_alignment_kp(kp_image)
118 | return kp_image
119 | else:
120 | return None
121 |
122 | @staticmethod
123 | def normalize_alignment_kp(kp):
124 | kp = kp - kp.mean(axis=0, keepdims=True)
125 | area = ConvexHull(kp[:, :2]).volume
126 | area = np.sqrt(area)
127 | kp[:, :2] = kp[:, :2] / area
128 | return kp
129 |
130 | def get_start_frame(self):
131 | return self.start_frame
132 |
133 | def get_start_frame_kp(self):
134 | return self.start_frame_kp
135 |
--------------------------------------------------------------------------------
/afy/utils.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import time
3 | from collections import defaultdict
4 |
5 | import numpy as np
6 | import cv2
7 |
8 |
9 | def log(*args, file=sys.stderr, **kwargs):
10 | time_str = f'{time.time():.6f}'
11 | print(f'[{time_str}]', *args, file=file, **kwargs)
12 |
13 |
14 | def info(*args, file=sys.stdout, **kwargs):
15 | print(*args, file=file, **kwargs)
16 |
17 |
18 | class Tee(object):
19 | def __init__(self, filename, mode='w', terminal=sys.stderr):
20 | self.file = open(filename, mode, buffering=1)
21 | self.terminal = terminal
22 |
23 | def __del__(self):
24 | self.file.close()
25 |
26 | def write(self, *args, **kwargs):
27 | log(*args, file=self.file, **kwargs)
28 | log(*args, file=self.terminal, **kwargs)
29 |
30 | def __call__(self, *args, **kwargs):
31 | return self.write(*args, **kwargs)
32 |
33 | def flush(self):
34 | self.file.flush()
35 |
36 |
37 | class Logger():
38 | def __init__(self, filename, verbose=True):
39 | self.tee = Tee(filename)
40 | self.verbose = verbose
41 |
42 | def __call__(self, *args, important=False, **kwargs):
43 | if not self.verbose and not important:
44 | return
45 |
46 | self.tee(*args, **kwargs)
47 |
48 |
49 | class Once():
50 | _id = {}
51 |
52 | def __init__(self, what, who=log, per=1e12):
53 | """ Do who(what) once per seconds.
54 | what: args for who
55 | who: callable
56 | per: frequency in seconds.
57 | """
58 | assert callable(who)
59 | now = time.time()
60 | if what not in Once._id or now - Once._id[what] > per:
61 | who(what)
62 | Once._id[what] = now
63 |
64 |
65 | class TicToc:
66 | def __init__(self):
67 | self.t = None
68 | self.t_init = time.time()
69 |
70 | def tic(self):
71 | self.t = time.time()
72 |
73 | def toc(self, total=False):
74 | if total:
75 | return (time.time() - self.t_init) * 1000
76 |
77 | assert self.t, 'You forgot to call tic()'
78 | return (time.time() - self.t) * 1000
79 |
80 | def tocp(self, str):
81 | t = self.toc()
82 | log(f"{str} took {t:.4f}ms")
83 | return t
84 |
85 |
86 | class AccumDict:
87 | def __init__(self, num_f=3):
88 | self.d = defaultdict(list)
89 | self.num_f = num_f
90 |
91 | def add(self, k, v):
92 | self.d[k] += [v]
93 |
94 | def __dict__(self):
95 | return self.d
96 |
97 | def __getitem__(self, key):
98 | return self.d[key]
99 |
100 | def __str__(self):
101 | s = ''
102 | for k in self.d:
103 | if not self.d[k]:
104 | continue
105 | cur = self.d[k][-1]
106 | avg = np.mean(self.d[k])
107 | format_str = '{:.%df}' % self.num_f
108 | cur_str = format_str.format(cur)
109 | avg_str = format_str.format(avg)
110 | s += f'{k} {cur_str} ({avg_str})\t\t'
111 | return s
112 |
113 | def __repr__(self):
114 | return self.__str__()
115 |
116 |
117 | def clamp(value, min_value, max_value):
118 | return max(min(value, max_value), min_value)
119 |
120 |
121 | def crop(img, p=0.7, offset_x=0, offset_y=0):
122 | h, w = img.shape[:2]
123 | x = int(min(w, h) * p)
124 | l = (w - x) // 2
125 | r = w - l
126 | u = (h - x) // 2
127 | d = h - u
128 |
129 | offset_x = clamp(offset_x, -l, w - r)
130 | offset_y = clamp(offset_y, -u, h - d)
131 |
132 | l += offset_x
133 | r += offset_x
134 | u += offset_y
135 | d += offset_y
136 |
137 | return img[u:d, l:r], (offset_x, offset_y)
138 |
139 |
140 | def pad_img(img, target_size, default_pad=0):
141 | sh, sw = img.shape[:2]
142 | w, h = target_size
143 | pad_w, pad_h = default_pad, default_pad
144 | if w / h > 1:
145 | pad_w += int(sw * (w / h) - sw) // 2
146 | else:
147 | pad_h += int(sh * (h / w) - sh) // 2
148 | out = np.pad(img, [[pad_h, pad_h], [pad_w, pad_w], [0,0]], 'constant')
149 | return out
150 |
151 |
152 | def resize(img, size, version='cv'):
153 | return cv2.resize(img, size)
154 |
--------------------------------------------------------------------------------
/afy/videocaptureasync.py:
--------------------------------------------------------------------------------
1 | # https://github.com/gilbertfrancois/video-capture-async
2 |
3 | import threading
4 | import cv2
5 | import time
6 |
7 |
8 | WARMUP_TIMEOUT = 10.0
9 |
10 |
11 | class VideoCaptureAsync:
12 | def __init__(self, src=0, width=640, height=480):
13 | self.src = src
14 |
15 | self.cap = cv2.VideoCapture(self.src)
16 | if not self.cap.isOpened():
17 | raise RuntimeError("Cannot open camera")
18 |
19 | self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
20 | self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
21 | self.grabbed, self.frame = self.cap.read()
22 | self.started = False
23 | self.read_lock = threading.Lock()
24 |
25 | def set(self, var1, var2):
26 | self.cap.set(var1, var2)
27 |
28 | def isOpened(self):
29 | return self.cap.isOpened()
30 |
31 | def start(self):
32 | if self.started:
33 | print('[!] Asynchronous video capturing has already been started.')
34 | return None
35 | self.started = True
36 | self.thread = threading.Thread(target=self.update, args=(), daemon=True)
37 | self.thread.start()
38 |
39 | # (warmup) wait for the first successfully grabbed frame
40 | warmup_start_time = time.time()
41 | while not self.grabbed:
42 | warmup_elapsed_time = (time.time() - warmup_start_time)
43 | if warmup_elapsed_time > WARMUP_TIMEOUT:
44 | raise RuntimeError(f"Failed to succesfully grab frame from the camera (timeout={WARMUP_TIMEOUT}s). Try to restart.")
45 |
46 | time.sleep(0.5)
47 |
48 | return self
49 |
50 | def update(self):
51 | while self.started:
52 | grabbed, frame = self.cap.read()
53 | if not grabbed or frame is None or frame.size == 0:
54 | continue
55 | with self.read_lock:
56 | self.grabbed = grabbed
57 | self.frame = frame
58 |
59 | def read(self):
60 | while True:
61 | with self.read_lock:
62 | frame = self.frame.copy()
63 | grabbed = self.grabbed
64 | break
65 | return grabbed, frame
66 |
67 | def stop(self):
68 | self.started = False
69 | self.thread.join()
70 |
71 | def __exit__(self, exec_type, exc_value, traceback):
72 | self.cap.release()
73 |
--------------------------------------------------------------------------------
/avatars/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/1.png
--------------------------------------------------------------------------------
/avatars/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/2.png
--------------------------------------------------------------------------------
/avatars/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/3.png
--------------------------------------------------------------------------------
/avatars/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/4.png
--------------------------------------------------------------------------------
/avatars/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/5.png
--------------------------------------------------------------------------------
/avatars/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/6.png
--------------------------------------------------------------------------------
/avatars/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/7.png
--------------------------------------------------------------------------------
/avatars/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/8.png
--------------------------------------------------------------------------------
/avatars/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/avatars/9.png
--------------------------------------------------------------------------------
/client.js:
--------------------------------------------------------------------------------
1 | // get DOM elements
2 | var iceConnectionLog = document.getElementById('ice-connection-state'),
3 | iceGatheringLog = document.getElementById('ice-gathering-state'),
4 | signalingLog = document.getElementById('signaling-state');
5 |
6 | // peer connection
7 | var pc = null;
8 |
9 | // data channel
10 | var dc = null, dcInterval = null;
11 |
12 | function createPeerConnection() {
13 | var config = {
14 | sdpSemantics: 'unified-plan',
15 | iceCandidatePoolSize: 10
16 | };
17 |
18 | var url = window.location.host;
19 | if (url.includes("127.0.0.1")){
20 | config.iceServers = [] ;
21 | }else{
22 | config.iceServers = [{
23 | //urls: 'stun:stun1.l.google.com:19302'
24 | urls: "turn:gitclone.com:3478",
25 | username: "webrtc",
26 | credential: "Webrtc987123654"
27 | }];
28 | }
29 |
30 | pc = new RTCPeerConnection(config);
31 |
32 | // register some listeners to help debugging
33 | pc.addEventListener('icegatheringstatechange', function () {
34 | iceGatheringLog.textContent += ' -> ' + pc.iceGatheringState;
35 | }, false);
36 | iceGatheringLog.textContent = pc.iceGatheringState;
37 |
38 | pc.addEventListener('iceconnectionstatechange', function () {
39 | iceConnectionLog.textContent += ' -> ' + pc.iceConnectionState;
40 | }, false);
41 | iceConnectionLog.textContent = pc.iceConnectionState;
42 |
43 | pc.addEventListener('signalingstatechange', function () {
44 | signalingLog.textContent += ' -> ' + pc.signalingState;
45 | }, false);
46 | signalingLog.textContent = pc.signalingState;
47 |
48 | // connect audio / video
49 | pc.addEventListener('track', function (evt) {
50 | if (evt.track.kind == 'video')
51 | document.getElementById('video').srcObject = evt.streams[0];
52 | });
53 |
54 | return pc;
55 | }
56 |
57 | function negotiate() {
58 | return pc.createOffer().then(function (offer) {
59 | return pc.setLocalDescription(offer);
60 | }).then(function () {
61 | // wait for ICE gathering to complete
62 | return new Promise(function (resolve) {
63 | if (pc.iceGatheringState === 'complete') {
64 | resolve();
65 | } else {
66 | function checkState() {
67 | if (pc.iceGatheringState === 'complete') {
68 | pc.removeEventListener('icegatheringstatechange', checkState);
69 | resolve();
70 | }
71 | }
72 | pc.addEventListener('icegatheringstatechange', checkState);
73 | }
74 | });
75 | }).then(function () {
76 | var offer = pc.localDescription;
77 | var codec;
78 |
79 | codec = document.getElementById('video-codec').value;
80 | if (codec !== 'default') {
81 | offer.sdp = sdpFilterCodec('video', codec, offer.sdp);
82 | }
83 |
84 | //document.getElementById('offer-sdp').textContent = offer.sdp;
85 | var avatar_type = '0' ;
86 | if (document.getElementById('avatar_type1').checked){
87 | avatar_type = '1' ;
88 | }
89 | return fetch('/offer', {
90 | body: JSON.stringify({
91 | sdp: offer.sdp,
92 | type: offer.type,
93 | avatar: document.getElementById('avatar').value + "|" + avatar_type
94 | }),
95 | headers: {
96 | 'Content-Type': 'application/json'
97 | },
98 | method: 'POST'
99 | });
100 | }).then(function (response) {
101 | return response.json();
102 | }).then(function (answer) {
103 | //document.getElementById('answer-sdp').textContent = answer.sdp;
104 | return pc.setRemoteDescription(answer);
105 | }).catch(function (e) {
106 | alert(e);
107 | });
108 | }
109 |
110 | function start() {
111 | document.getElementById('start').style.display = 'none';
112 | pc = createPeerConnection();
113 |
114 | var time_start = null;
115 |
116 | function current_stamp() {
117 | if (time_start === null) {
118 | time_start = new Date().getTime();
119 | return 0;
120 | } else {
121 | return new Date().getTime() - time_start;
122 | }
123 | }
124 |
125 | var constraints = {
126 | audio: false,
127 | video: true
128 | };
129 |
130 | var resolution = document.getElementById('video-resolution').value;
131 | if (resolution) {
132 | resolution = resolution.split('x');
133 | constraints.video = {
134 | width: parseInt(resolution[0], 0),
135 | height: parseInt(resolution[1], 0)
136 | };
137 | } else {
138 | constraints.video = true;
139 | }
140 |
141 | if (constraints.audio || constraints.video) {
142 | if (constraints.video) {
143 | document.getElementById('media').style.display = 'block';
144 | }
145 | navigator.mediaDevices.getUserMedia(constraints).then(function (stream) {
146 | if (stream) {
147 | document.getElementById('video_local').srcObject = stream;
148 | }
149 | stream.getTracks().forEach(function (track) {
150 | pc.addTrack(track, stream);
151 | });
152 | return negotiate();
153 | }, function (err) {
154 | alert('Could not acquire media: ' + err);
155 | });
156 | } else {
157 | negotiate();
158 | }
159 |
160 | document.getElementById('stop').style.display = 'inline-block';
161 | }
162 |
163 | function stop() {
164 | document.getElementById('stop').style.display = 'none';
165 |
166 |
167 | // close data channel
168 | if (dc) {
169 | dc.close();
170 | }
171 |
172 | // close transceivers
173 | if (pc.getTransceivers) {
174 | pc.getTransceivers().forEach(function (transceiver) {
175 | if (transceiver.stop) {
176 | transceiver.stop();
177 | }
178 | });
179 | }
180 |
181 | // close local audio / video
182 | pc.getSenders().forEach(function (sender) {
183 | sender.track.stop();
184 | });
185 |
186 | // close peer connection
187 | setTimeout(function () {
188 | pc.close();
189 | }, 500);
190 | document.getElementById('start').style.display = 'inline-block';
191 | }
192 |
193 | function sdpFilterCodec(kind, codec, realSdp) {
194 | var allowed = []
195 | var rtxRegex = new RegExp('a=fmtp:(\\d+) apt=(\\d+)\r$');
196 | var codecRegex = new RegExp('a=rtpmap:([0-9]+) ' + escapeRegExp(codec))
197 | var videoRegex = new RegExp('(m=' + kind + ' .*?)( ([0-9]+))*\\s*$')
198 |
199 | var lines = realSdp.split('\n');
200 |
201 | var isKind = false;
202 | for (var i = 0; i < lines.length; i++) {
203 | if (lines[i].startsWith('m=' + kind + ' ')) {
204 | isKind = true;
205 | } else if (lines[i].startsWith('m=')) {
206 | isKind = false;
207 | }
208 |
209 | if (isKind) {
210 | var match = lines[i].match(codecRegex);
211 | if (match) {
212 | allowed.push(parseInt(match[1]));
213 | }
214 |
215 | match = lines[i].match(rtxRegex);
216 | if (match && allowed.includes(parseInt(match[2]))) {
217 | allowed.push(parseInt(match[1]));
218 | }
219 | }
220 | }
221 |
222 | var skipRegex = 'a=(fmtp|rtcp-fb|rtpmap):([0-9]+)';
223 | var sdp = '';
224 |
225 | isKind = false;
226 | for (var i = 0; i < lines.length; i++) {
227 | if (lines[i].startsWith('m=' + kind + ' ')) {
228 | isKind = true;
229 | } else if (lines[i].startsWith('m=')) {
230 | isKind = false;
231 | }
232 |
233 | if (isKind) {
234 | var skipMatch = lines[i].match(skipRegex);
235 | if (skipMatch && !allowed.includes(parseInt(skipMatch[2]))) {
236 | continue;
237 | } else if (lines[i].match(videoRegex)) {
238 | sdp += lines[i].replace(videoRegex, '$1 ' + allowed.join(' ')) + '\n';
239 | } else {
240 | sdp += lines[i] + '\n';
241 | }
242 | } else {
243 | sdp += lines[i] + '\n';
244 | }
245 | }
246 |
247 | return sdp;
248 | }
249 |
250 | function escapeRegExp(string) {
251 | return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
252 | }
253 |
254 | function onAvatarClick(avatar) {
255 | document.getElementById('avatar').value = avatar;
256 | }
257 |
--------------------------------------------------------------------------------
/faces.py:
--------------------------------------------------------------------------------
1 | import dlib
2 | import cv2
3 | import numpy as np
4 | import time
5 | import logging
6 | import os
7 | import ssl
8 | import uuid
9 | import urllib
10 | from PIL import Image, ImageDraw, ImageFont
11 |
12 | detector_face = None
13 | predictor_face = None
14 |
15 |
16 | def get_image_size(image):
17 | image_size = (image.shape[0], image.shape[1])
18 | return image_size
19 |
20 |
21 | def get_face_landmarks(image, face_detector, shape_predictor):
22 | dets = face_detector(image, 1)
23 | if dets is None:
24 | return None
25 | if len(dets) == 0:
26 | return None
27 | shape = shape_predictor(image, dets[0])
28 | face_landmarks = np.array([[p.x, p.y] for p in shape.parts()])
29 | return face_landmarks
30 |
31 |
32 | def get_face_mask(image_size, face_landmarks):
33 | mask = np.zeros(image_size, dtype=np.uint8)
34 | points = np.concatenate([face_landmarks[0:16], face_landmarks[26:17:-1]])
35 | cv2.fillPoly(img=mask, pts=[points], color=255)
36 | return mask
37 |
38 |
39 | def get_affine_image(image1, image2, face_landmarks1, face_landmarks2):
40 | three_points_index = [18, 8, 25]
41 | M = cv2.getAffineTransform(face_landmarks1[three_points_index].astype(np.float32),
42 | face_landmarks2[three_points_index].astype(np.float32))
43 | dsize = (image2.shape[1], image2.shape[0])
44 | affine_image = cv2.warpAffine(image1, M, dsize)
45 | return affine_image.astype(np.uint8)
46 |
47 |
48 | def get_mask_center_point(image_mask):
49 | image_mask_index = np.argwhere(image_mask > 0)
50 | miny, minx = np.min(image_mask_index, axis=0)
51 | maxy, maxx = np.max(image_mask_index, axis=0)
52 | center_point = ((maxx + minx) // 2, (maxy + miny) // 2)
53 | return center_point
54 |
55 |
56 | def get_mask_union(mask1, mask2):
57 | mask = np.min([mask1, mask2], axis=0)
58 | mask = ((cv2.blur(mask, (5, 5)) == 255) * 255).astype(np.uint8)
59 | mask = cv2.blur(mask, (3, 3)).astype(np.uint8)
60 | return mask
61 |
62 |
63 | def skin_color_adjustment(im1, im2, mask=None):
64 | if mask is None:
65 | im1_ksize = 55
66 | im2_ksize = 55
67 | im1_factor = cv2.GaussianBlur(
68 | im1, (im1_ksize, im1_ksize), 0).astype(np.float)
69 | im2_factor = cv2.GaussianBlur(
70 | im2, (im2_ksize, im2_ksize), 0).astype(np.float)
71 | else:
72 | im1_face_image = cv2.bitwise_and(im1, im1, mask=mask)
73 | im2_face_image = cv2.bitwise_and(im2, im2, mask=mask)
74 | im1_factor = np.mean(im1_face_image, axis=(0, 1))
75 | im2_factor = np.mean(im2_face_image, axis=(0, 1))
76 |
77 | im1 = np.clip((im1.astype(np.float) * im2_factor /
78 | np.clip(im1_factor, 1e-6, None)), 0, 255).astype(np.uint8)
79 | return im1
80 |
81 |
82 | def load_detector():
83 | global detector_face
84 | global predictor_face
85 | if detector_face == None:
86 | detector_face = dlib.get_frontal_face_detector()
87 | if predictor_face == None:
88 | predictor_face = dlib.shape_predictor(
89 | r'./model/shape_predictor_68_face_landmarks.dat')
90 |
91 |
92 | def load_custom_avatar(avatar):
93 | if avatar.isdigit():
94 | return None
95 | url_request = "https://classnotfound.com.cn/aiit/avatar/" + avatar + ".jpg"
96 | fileName = "./avatars/temp/" + avatar + ".jpg"
97 | avatar_basic_auth = os.getenv('avatar_basic_auth')
98 | opener = urllib.request.build_opener()
99 | opener.addheaders = [
100 | ('Authorization', 'Basic ' + avatar_basic_auth)]
101 | urllib.request.install_opener(opener)
102 | try:
103 | urllib.request.urlretrieve(url=url_request, filename=fileName)
104 | return fileName
105 | except Exception as e:
106 | print(e)
107 | return None
108 | urllib.request.urlcleanup()
109 |
110 |
111 | def load_landmarks(avatar):
112 | global detector_face
113 | global predictor_face
114 | if avatar.isdigit():
115 | im1 = cv2.imread('./avatars/' + avatar + '.png')
116 | else:
117 | fileName = load_custom_avatar(avatar)
118 | if fileName is None:
119 | im1 = cv2.imread('./avatars/1.png')
120 | else:
121 | im1 = cv2.imread(fileName)
122 | im1 = cv2.resize(im1, (480, im1.shape[0] * 640 // im1.shape[1]))
123 | landmarks1 = get_face_landmarks(im1, detector_face, predictor_face)
124 | im1_size = get_image_size(im1)
125 | im1_mask = get_face_mask(im1_size, landmarks1)
126 | return im1, landmarks1, im1_mask
127 |
128 | fontStyle = ImageFont.truetype("font/simsun.ttc", 20, encoding="utf-8")
129 |
130 | def cv2ImgAddText(img, text, left, top, textColor=(255, 255, 0)):
131 | #start_time = time.time()
132 | if (isinstance(img, np.ndarray)):
133 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
134 | draw = ImageDraw.Draw(img)
135 | draw.text((left, top), text, textColor, font=fontStyle)
136 | #end_time = time.time()
137 | #run_time = end_time - start_time
138 | #print(run_time)
139 | return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
140 |
141 | def face_avatar(frame, im1, landmarks1, im1_mask):
142 | global detector_face
143 | global predictor_face
144 | time_start = time.perf_counter()
145 | im2 = frame
146 | img_gray = cv2.cvtColor(src=im2, code=cv2.COLOR_BGR2GRAY)
147 | landmarks2 = get_face_landmarks(
148 | img_gray, detector_face, predictor_face) # 68_face_landmarks
149 | time_end = time.perf_counter()
150 | #print(time_end - time_start)
151 | if landmarks2 is not None:
152 | im2_size = get_image_size(im2)
153 | im2_mask = get_face_mask(im2_size, landmarks2)
154 |
155 | affine_im1 = get_affine_image(im1, im2, landmarks1, landmarks2)
156 | affine_im1_mask = get_affine_image(
157 | im1_mask, im2, landmarks1, landmarks2)
158 |
159 | union_mask = get_mask_union(im2_mask, affine_im1_mask)
160 | #time_start = time.clock()
161 | #affine_im1 = skin_color_adjustment(affine_im1, im2, mask=union_mask)
162 | #time_end = time.clock()
163 | #print(time_end - time_start)
164 | point = get_mask_center_point(affine_im1_mask)
165 | seamless_im = cv2.seamlessClone(
166 | affine_im1, im2, mask=union_mask, p=point, flags=cv2.NORMAL_CLONE)
167 | return cv2ImgAddText(seamless_im, "AI生成", 10, 10)
168 | else:
169 | return None
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | avatarify-webrtc demo
7 |
62 |
63 |
64 |
65 | avatarify-webrtc demo [微信中需要转到手机浏览器中打开]
66 |
67 |
68 | Meeting with Avatar
69 |
70 | Download App
71 |
72 | Source code on Github
73 |
74 |
75 |
76 |
83 |
84 |
85 |
90 |
91 |
92 |
103 |
104 |
105 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 | Media
127 |
133 | State
134 |
135 | ICE gathering state:
136 |
137 |
138 | ICE connection state:
139 |
140 |
141 | Signaling state:
142 |
143 |
149 |
150 |
151 |
152 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import asyncio
3 | import json
4 | import logging
5 | import os
6 | import ssl
7 | import uuid
8 | import urllib
9 | import subprocess
10 | import threading
11 | import time
12 | from aiohttp import web
13 | from av import VideoFrame
14 | import aiohttp_cors
15 | from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription, RTCConfiguration, RTCIceServer
16 | from aiortc.contrib.media import MediaBlackhole, MediaRecorder, MediaRelay
17 | from afy.cam_fomm import fomm_load_predictor, fomm_change_face, fomm_change_frame, fomm_test_predictor, InitOutPipe, InitLiveKitCli
18 | from faces import load_detector, load_landmarks, face_avatar
19 |
20 | global RUN_LIVEKIT_CLI
21 |
22 |
23 | def parseTransParams(transform):
24 | params = transform.split("|")
25 | avatar = "1"
26 | avatar_type = "0"
27 | avatar_room = ""
28 | avatar_id = ""
29 |
30 | if len(params) == 0:
31 | avatar = "1"
32 | if len(params) >= 1:
33 | avatar = params[0]
34 | if len(params) >= 2:
35 | avatar_type = params[1]
36 | if len(params) >= 3:
37 | avatar_room = params[2]
38 | if len(params) >= 4:
39 | avatar_id = params[3]
40 | return avatar, avatar_type, avatar_room, avatar_id
41 |
42 |
43 | def publishAvatar(filename, commond):
44 | global RUN_LIVEKIT_CLI
45 | i = 0
46 | while True:
47 | print("Wait for ", filename)
48 | if os.path.exists(filename):
49 | break
50 | # wait 1 miniute
51 | i = i + 1
52 | if i == 60:
53 | return
54 | time.sleep(1)
55 | process = subprocess.Popen(commond, shell=True)
56 | if process.pid != 0:
57 | RUN_LIVEKIT_CLI[filename] = process
58 |
59 |
60 | ROOT = os.path.dirname(__file__)
61 |
62 | logger = logging.getLogger("pc")
63 | pcs = set()
64 | relay = MediaRelay()
65 |
66 |
67 | class VideoTransformTrack(MediaStreamTrack):
68 |
69 | kind = "video"
70 |
71 | def __init__(self, track, transform):
72 | super().__init__() # don't forget this!
73 | # parse params
74 | self.avatar, self.avatar_type, self.avatar_room, self.avatar_id = parseTransParams(
75 | transform)
76 | self.track = track
77 | # load model
78 | if self.avatar_type == "0":
79 | self.im1, self.landmarks1, self.im1_mask = load_landmarks(
80 | self.avatar)
81 | elif self.avatar_type == "1":
82 | self.fomm_predictor, self.avatar_kp = fomm_load_predictor(
83 | self.avatar)
84 | else:
85 | self.fomm_predictor, self.avatar_kp = None, None
86 | # init var
87 | self.skip_frame = 0
88 | self.skip_detectface = 0
89 | self.new_frame = None
90 | self.last_x = 0
91 | self.last_y = 0
92 | self.last_w = 0
93 | self.last_h = 0
94 | # init pipe
95 | self.filename = ""
96 | self.pipe = None
97 | # support livekit
98 | if self.avatar_room != "":
99 | self.filename = '/tmp/' + self.avatar_room + '__' + \
100 | self.avatar_id + '__' + self.avatar + '.h264.sock'
101 | self.pipe = InitOutPipe(self.filename)
102 | commond = InitLiveKitCli(
103 | self.avatar_room, self.avatar_id, self.filename)
104 | threading.Thread(target=publishAvatar,
105 | args=(self.filename, commond)).start()
106 |
107 | def __del__(self):
108 | global RUN_LIVEKIT_CLI
109 | if self.pipe is not None:
110 | self.pipe.kill()
111 | print("ffmpeg pipe be killed")
112 | if self.filename != "":
113 | if self.filename in RUN_LIVEKIT_CLI.keys():
114 | process = RUN_LIVEKIT_CLI.get(self.filename)
115 | if process is not None:
116 | process.kill()
117 | RUN_LIVEKIT_CLI.pop(self.filename)
118 | print("livekit-cli process be killed")
119 | if os.path.exists(self.filename):
120 | os.remove(self.filename)
121 | print("remove sock file")
122 |
123 | async def recv(self):
124 | frame = await self.track.recv()
125 | if self.avatar_type == "0":
126 | if self.skip_frame == 0:
127 | img = frame.to_ndarray(format="bgr24")
128 | try:
129 | img = face_avatar(
130 | img, self.im1, self.landmarks1, self.im1_mask)
131 | except Exception as e:
132 | print(e)
133 | img = None
134 | if img is not None:
135 | self.new_frame = VideoFrame.from_ndarray(
136 | img, format="bgr24")
137 | self.new_frame.pts = frame.pts
138 | self.new_frame.time_base = frame.time_base
139 | self.skip_frame = self.skip_frame + 1
140 | self.skip_frame = self.skip_frame % 10
141 | if self.new_frame is None:
142 | return frame
143 | else:
144 | return self.new_frame
145 | elif self.avatar_type == "1":
146 | if self.skip_frame == 0:
147 | is_detectface = (self.skip_detectface == 0)
148 | img = frame.to_ndarray(format="bgr24")
149 | try:
150 | img, self.last_x, self.last_y, self.last_w, self.last_h = fomm_change_frame(
151 | self.fomm_predictor, self.avatar_kp, img, self.last_x, self.last_y, self.last_w, self.last_h, is_detectface)
152 | except Exception as e:
153 | print(e)
154 | img = None
155 | if img is not None:
156 | self.new_frame = VideoFrame.from_ndarray(
157 | img[..., ::-1], format="bgr24")
158 | if self.pipe is not None:
159 | try:
160 | self.pipe.stdin.write(img[..., ::-1].tobytes())
161 | except Exception as e:
162 | print(e)
163 | self.new_frame.pts = frame.pts
164 | self.new_frame.time_base = frame.time_base
165 | self.skip_frame = self.skip_frame + 1
166 | self.skip_frame = self.skip_frame % 3
167 | self.skip_detectface = self.skip_detectface + 1
168 | self.skip_detectface = self.skip_detectface % 30
169 | if self.new_frame is None:
170 | return frame
171 | else:
172 | return self.new_frame
173 | else:
174 | return frame
175 |
176 |
177 | async def index(request):
178 | content = open(os.path.join(ROOT, "index.html"),
179 | "r", encoding='utf-8').read()
180 | logger.info("index for %s", request.remote)
181 | return web.Response(content_type="text/html", text=content)
182 |
183 |
184 | async def javascript(request):
185 | content = open(os.path.join(ROOT, "client.js"),
186 | "r", encoding='utf-8').read()
187 | return web.Response(content_type="application/javascript", text=content)
188 |
189 |
190 | async def offer(request):
191 | params = await request.json()
192 | offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
193 |
194 | configuration = RTCConfiguration([
195 | # RTCIceServer("stun:stun1.l.google.com:19302")
196 | RTCIceServer("turn:gitclone.com:3478", "webrtc", "Webrtc987123654")
197 | ])
198 |
199 | pc = RTCPeerConnection(configuration)
200 | pc_id = "PeerConnection(%s)" % uuid.uuid4()
201 | pcs.add(pc)
202 |
203 | def log_info(msg, *args):
204 | logger.info(pc_id + " " + msg, *args)
205 |
206 | log_info("Created for %s", request.remote)
207 |
208 | @pc.on("datachannel")
209 | def on_datachannel(channel):
210 | @channel.on("message")
211 | def on_message(message):
212 | if isinstance(message, str) and message.startswith("ping"):
213 | channel.send("pong" + message[4:])
214 |
215 | @pc.on("connectionstatechange")
216 | async def on_connectionstatechange():
217 | log_info("Connection state is %s", pc.connectionState)
218 | if pc.connectionState == "failed":
219 | await pc.close()
220 | pcs.discard(pc)
221 |
222 | @pc.on("track")
223 | def on_track(track):
224 | log_info("Track %s received", track.kind)
225 | if track.kind == "video":
226 | pc.addTrack(
227 | VideoTransformTrack(
228 | relay.subscribe(track), transform=params["avatar"]
229 | )
230 | )
231 |
232 | @track.on("ended")
233 | async def on_ended():
234 | log_info("Track %s ended", track.kind)
235 |
236 | # handle offer
237 | await pc.setRemoteDescription(offer)
238 |
239 | # send answer
240 | answer = await pc.createAnswer()
241 | await pc.setLocalDescription(answer)
242 |
243 | return web.Response(
244 | content_type="application/json",
245 | text=json.dumps(
246 | {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
247 | ),
248 | )
249 |
250 |
251 | async def on_shutdown(app):
252 | # close peer connections
253 | coros = [pc.close() for pc in pcs]
254 | await asyncio.gather(*coros)
255 | pcs.clear()
256 |
257 |
258 | app = web.Application()
259 | cors = aiohttp_cors.setup(app)
260 | app.on_shutdown.append(on_shutdown)
261 | app.router.add_get("/", index)
262 | app.router.add_get("/client.js", javascript)
263 | app.router.add_post("/offer", offer)
264 | app.router.add_static('/avatars/',
265 | path='avatars',
266 | name='avatars')
267 |
268 | for route in list(app.router.routes()):
269 | cors.add(route, {
270 | "*": aiohttp_cors.ResourceOptions(
271 | allow_credentials=True,
272 | expose_headers="*",
273 | allow_headers="*",
274 | allow_methods="*"
275 | )
276 | })
277 |
278 | if __name__ == "__main__":
279 | # fomm_test_predictor()
280 | RUN_LIVEKIT_CLI = dict()
281 | load_detector()
282 | logging.basicConfig(
283 | level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m-%d %H:%M:%S',)
284 | web.run_app(
285 | app, access_log=None, host="0.0.0.0", port=8080, ssl_context=None
286 | )
287 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # Avatarify-webrtc
2 |
3 | 基于aiortc(webrtc的python实现)和dlib,采用服务器上的GPU计算资源实现动态换脸特效,使得云端算力的有效利用。
4 |
5 | Simple Demo:https://gitclone.com/aiit/avatarify-webrtc/
6 |
7 | ==**带avatar的会议**==(Meeting with avatar Demo): https://gitclone.com/aiit/meeting
8 |
9 | 可在 https://aiit.gitclone.com/ 下载APP查看其他人脸特效
10 |
11 | ## 一、原理
12 |
13 | ### 1、webrtc
14 |
15 | - web端使用/offer请求,与aiortc端服务协商webrtc数据传输参数,然后将视频流加到PeerConnection的track中
16 |
17 | - aiortc端收到track的视频流,交由换脸模型逐帧转换,转化后的帧加到PeerConnection的track中,web端收到trace后显示
18 |
19 | 参考了https://github.com/jcrisp88/flutter-webrtc_python-aiortc-opencv。
20 |
21 | ### 2、换脸
22 |
23 | 使用shape_predictor_68_face模型,识别人脸68个关键点,将脸图合并到原视频上。参考了https://blog.csdn.net/weixin_44152939/article/details/123866639。
24 |
25 | ### 3、表情跟随
26 |
27 | 使用了https://github.com/alievk/avatarify-python 技术,应用first-order-model模型。
28 |
29 | ### 4、udp透传
30 |
31 | webrtc最难处理的就是udp透传,因为webrtc是p2p对等节点直接通讯,使用的是UDP,大多数设备都在防火墙后,没有公网IP,所以在使用中要用到stun(发现公网IP打通UDP端口)和turn(消息转发)技术,单纯使用stun,只有50%的几率能做到UDP透传,所以本示例中使用了coturn服务器进行了通讯中转。
32 |
33 | ## 二、环境安装
34 |
35 | ### 1、安装显卡驱动及conda环境
36 |
37 | 参照https://zhuanlan.zhihu.com/p/477687451的依赖组件部分。
38 |
39 | ### 2、下载代码并安装依赖包
40 |
41 | ```shell
42 | # clone source code
43 | git clone https://gitclone.com/github.com/git-cloner/avatarify-webrtc
44 | cd avatarify-webrtc
45 | git clone https://github.com/alievk/first-order-model.git fomm
46 | # download models
47 | 模型文件比较大,从https://gitclone.com/download1/model/shape_predictor_68_face_landmarks.dat下载后放到model目录下。
48 | 下载https://gitclone.com/download1/model/vox-adv-cpk.pth.tar,放到项目根目录下。
49 | # create avatarify envs,install requirements
50 | conda create -n avatarify python=3.7
51 | conda activate avatarify
52 | pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
53 | pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host=pypi.douban.com
54 | pip install cmake -i http://pypi.douban.com/simple --trusted-host=pypi.douban.com
55 | conda install -c conda-forge dlib
56 | pip install cryptography==38.0.0 -i http://pypi.douban.com/simple --trusted-host=pypi.douban.com
57 | cp simsun.ttc to /usr/share/fonts/msfonts
58 | conda deactivate
59 | ```
60 |
61 | ### 3、运行测试
62 |
63 | ```shell
64 | windows:run_windows.bat
65 | linux:./run.sh
66 | 然后在chrome中浏览:http://127.0.0.1:8080
67 | ```
68 |
69 | 注意:在本机测试只能用127.0.0.1,不能用实际地址,因为chrome的摄像头有权限控制,如果客户端与服务器不在同一台机器上或未采用https连接,则要用以下的文件加白名单。
70 |
71 | - 打开chrome://flags/#unsafely-treat-insecure-origin-as-secure
72 | - 查找Insecure origins treated as secure
73 | - 将Disabled改为Enabled,填写相应的URL,多个URL用逗号隔开
74 | - 修改后relaunch重启浏览器生效
75 |
76 | ## 三、常见问题及解决方案
77 |
78 | | 问题 | 解决方案 |
79 | | --------------- | ------------------------------------------------------------ |
80 | | UDP透传问题 | 采用coturn服务,应用sub + turn相结合的方案 |
81 | | 算力性能问题 | 跳过一些帧,保证生成的视频能够追上原始帧 |
82 | | torch的版本问题 | 用pip install torch命令安装的是CPU版本的,如果要使用GPU,得用上文方法安装,用以下方法验证: |
83 | | | python
import torch
print("torch.cuda.is_available:",torch.cuda.is_available(),torch.cuda.device_count())
exit() |
84 | | opencv安装 | opencv的安装依赖于gcc的版本和cmake,安装时根据提示检查依赖项 |
85 |
86 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python==4.2.0.34
2 | face-alignment==1.3.3
3 | pyzmq==20.0.0
4 | msgpack-numpy==0.4.7.1
5 | pyyaml==5.3.1
6 | requests==2.25.1
7 | pyfakewebcam==0.1.0
8 | av==8.0.3
9 | aiohttp==3.7.4.post0
10 | aiohttp_cors
11 | aiortc==1.3.2
12 | aioice==0.7.5
13 |
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # pkill -9 -f "python -u main.py"
4 | source $(conda info --base)/etc/profile.d/conda.sh
5 | conda activate avatarify
6 | export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/fomm
7 | export CUDA_VISIBLE_DEVICES=0,1
8 | export KMP_DUPLICATE_LIB_OK=TRUE
9 | # nohup python -u main.py avatarify.log 2>&1
10 | # tail -f avatarify.log
11 | python main.py
12 |
--------------------------------------------------------------------------------
/run_windows.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | call conda activate avatarify
4 | set PYTHONPATH=%PYTHONPATH%;%CD%;%CD%/fomm
5 | set KMP_DUPLICATE_LIB_OK=TRUE
6 | call python main.py
7 |
--------------------------------------------------------------------------------
/webmeeting/README.md:
--------------------------------------------------------------------------------
1 | # Aiit Web
2 |
3 | ## Install
4 |
5 | ```shell
6 | yarn install
7 | ```
8 |
9 | ## Test
10 |
11 | ```shell
12 | yarn start
13 | ```
14 |
15 | ## Build
16 |
17 | ```shell
18 | yarn build
19 | ```
20 |
21 |
--------------------------------------------------------------------------------
/webmeeting/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "aiit-webmeeting",
3 | "homepage": ".",
4 | "version": "1.1.0",
5 | "private": true,
6 | "scripts": {
7 | "start": "react-scripts start",
8 | "build": "react-scripts build",
9 | "test": "react-scripts test --passWithNoTests",
10 | "eject": "react-scripts eject",
11 | "deploy": "gh-pages -d build"
12 | },
13 | "dependencies": {
14 | "@chatui/core": "^2.4.2",
15 | "@emotion/react": "^11.10.5",
16 | "@emotion/styled": "^11.10.5",
17 | "@fortawesome/free-solid-svg-icons": "^6.1.1",
18 | "@livekit/react-components": "^1.0.1",
19 | "@mui/icons-material": "^5.10.9",
20 | "@mui/material": "^5.10.12",
21 | "http-proxy-middleware": "^2.0.6",
22 | "livekit-client": "1.2.9",
23 | "react": "^18.1.0",
24 | "react-aspect-ratio": "^1.0.50",
25 | "react-code-input": "^3.10.1",
26 | "react-dom": "^18.1.0",
27 | "react-router-dom": "^6.3.0"
28 | },
29 | "devDependencies": {
30 | "@fortawesome/fontawesome-common-types": "^0.2.36",
31 | "@testing-library/jest-dom": "^4.2.4",
32 | "@testing-library/react": "^9.5.0",
33 | "@testing-library/user-event": "^7.2.1",
34 | "@types/jest": "^25.1.4",
35 | "@types/node": "^12.12.38",
36 | "@types/react": "^18.0.0",
37 | "@types/react-dom": "^18.0.0",
38 | "cross-env": "^7.0.2",
39 | "gh-pages": "^2.2.0",
40 | "microbundle-crl": "^0.13.10",
41 | "npm-run-all": "^4.1.5",
42 | "prettier": "^2.3.1",
43 | "react": "^18.1.0",
44 | "react-dom": "^18.1.0",
45 | "react-scripts": "^5.0.1",
46 | "typescript": "^4.2.3"
47 | },
48 | "eslintConfig": {
49 | "extends": "react-app"
50 | },
51 | "browserslist": [
52 | ">0.2%",
53 | "not dead",
54 | "not op_mini all"
55 | ]
56 | }
57 |
--------------------------------------------------------------------------------
/webmeeting/public/CNAME:
--------------------------------------------------------------------------------
1 | example.livekit.io
--------------------------------------------------------------------------------
/webmeeting/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/git-cloner/avatarify-webrtc/769ef2a1dbd1e0fdaddad65990753f874f25d31c/webmeeting/public/favicon.ico
--------------------------------------------------------------------------------
/webmeeting/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
13 |
14 |
15 |
24 | Aiit meeting with avatar
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/webmeeting/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "aiit-web",
3 | "name": "aiit-web",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | }
10 | ],
11 | "start_url": ".",
12 | "display": "standalone",
13 | "theme_color": "#000000",
14 | "background_color": "#ffffff"
15 | }
16 |
--------------------------------------------------------------------------------
/webmeeting/src/App.test.tsx:
--------------------------------------------------------------------------------
1 | import ReactDOM from 'react-dom';
2 | import App from './App';
3 |
4 | it('renders without crashing', () => {
5 | const div = document.createElement('div');
6 | ReactDOM.render(, div);
7 | ReactDOM.unmountComponentAtNode(div);
8 | });
9 |
--------------------------------------------------------------------------------
/webmeeting/src/App.tsx:
--------------------------------------------------------------------------------
1 | import '@livekit/react-components/dist/index.css';
2 | import { HashRouter as Router, Route, Routes } from 'react-router-dom';
3 | import { PreJoinPage } from './PreJoinPage';
4 | import { RoomPage } from './RoomPage';
5 | import './chatui-theme.css';
6 |
7 | const App = () => {
8 | return (
9 |
10 |
11 |
12 | } />
13 | } />
14 |
15 |
16 |
17 | );
18 | };
19 |
20 | export default App;
21 |
--------------------------------------------------------------------------------
/webmeeting/src/PreJoinPage.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useState, useRef } from 'react';
2 | import { useNavigate } from 'react-router-dom';
3 | import ReactCodeInput from 'react-code-input';
4 | import Button from '@mui/material/Button';
5 | import Stack from '@mui/material/Stack';
6 | import MicIcon from '@mui/icons-material/Mic';
7 | import MicOffIcon from "@mui/icons-material/MicOff";
8 | import VideoCameraFrontIcon from '@mui/icons-material/VideoCameraFront';
9 | import VideocamOffIcon from '@mui/icons-material/VideocamOff';
10 | import AddCircleOutlineIcon from '@mui/icons-material/AddCircleOutline';
11 | import Input from '@mui/material/Input';
12 | import AccountCircle from '@mui/icons-material/AccountCircle';
13 | import InputAdornment from '@mui/material/InputAdornment';
14 | import FaceIcon from '@mui/icons-material/Face';
15 | import ConnectWithoutContactIcon from '@mui/icons-material/ConnectWithoutContact';
16 | import { authtoken } from './config';
17 | import SelectAvatarDialog from './SelectAvatar';
18 | import { WebrtcCli } from './webrtc/WebrtcCli';
19 |
20 | export const PreJoinPage = () => {
21 | const [username, setUsername] = useState('');
22 | const [roomnum, setRoomnum] = useState('');
23 | const [roomnum1, setRoomnum1] = useState('');
24 | const [videoEnabled, setVideoEnabled] = useState(true);
25 | const [adaptiveStream] = useState(true);
26 | const [audioEnabled, setAudioEnabled] = useState(true);
27 | const [connectDisabled, setConnectDisabled] = useState(true);
28 | const navigate = useNavigate();
29 | const [mutebuttonText, setMutebuttonText] = useState('Mute');
30 | const [vediobuttonText, setVediobuttonText] = useState('Disable Vedio');
31 | const usernameRef = useRef();
32 | const videoRefLocal = useRef();
33 | const videoRefRemote = useRef();
34 | const [open, setOpen] = useState(false);
35 | const [selectedValue, setSelectedValue] = useState("0");
36 | const [avatarStatus, setAvatarStatus] = useState("Test avatar");
37 | const [webrtccli, setWebrtccli] = useState();
38 |
39 | const handleClickOpen = () => {
40 | setOpen(true);
41 | };
42 |
43 | const handleClose = (value: string) => {
44 | if (avatarStatus === "Test avatar") {
45 | setOpen(false);
46 | setSelectedValue(value);
47 | if (value === "0") {
48 | return;
49 | }
50 | setAvatarStatus("Stop avatar");
51 | let _webrtccli = new WebrtcCli({
52 | source: videoRefLocal.current,
53 | destination: videoRefRemote.current,
54 | debug: false,
55 | audio: false,
56 | video: true,
57 | avator: value
58 | });
59 | setWebrtccli(_webrtccli);
60 | _webrtccli.startRecording();
61 | } ;
62 | };
63 |
64 | useEffect(() => {
65 | var _roomnum = roomnum;
66 | if (roomnum1) {
67 | if (!(roomnum1 === "")) {
68 | _roomnum = roomnum1;
69 | }
70 | }
71 | if (username && _roomnum) {
72 | setConnectDisabled(false);
73 | } else {
74 | setConnectDisabled(true);
75 | }
76 |
77 | }, [username, roomnum, roomnum1]);
78 |
79 | const handleChange = (event: React.ChangeEvent) => {
80 | setUsername(event.target.value);
81 | };
82 |
83 | const handleChange1 = (value: string) => {
84 | setRoomnum1(value);
85 | };
86 |
87 | const toggleAudio = () => {
88 | if (audioEnabled) {
89 | setAudioEnabled(false);
90 | setMutebuttonText("UnMute");
91 | } else {
92 | setAudioEnabled(true);
93 | setMutebuttonText("Mute");
94 | }
95 | };
96 |
97 | const toggleVedio = () => {
98 | if (videoEnabled) {
99 | setVideoEnabled(false);
100 | setVediobuttonText("EnableVedio");
101 | } else {
102 | setVideoEnabled(true);
103 | setVediobuttonText("Disable Vedio");
104 | }
105 | };
106 |
107 | const toggleNewRoom = async () => {
108 | try {
109 | const response = await fetch('/api/aiit/meeting', {
110 | method: 'POST',
111 | body: JSON.stringify({
112 | params: 'create-room'
113 | }),
114 | headers: {
115 | 'Content-Type': 'application/json',
116 | Accept: 'application/json',
117 | 'Authorization': 'Basic ' + authtoken
118 | },
119 | });
120 | if (!response.ok) {
121 | throw new Error(`Error! status: ${response.status}`);
122 | }
123 | const rnt = (await response.json());
124 | if (rnt.code === "0") {
125 | setRoomnum(rnt.result.roomnum);
126 | setRoomnum1("");
127 | (usernameRef?.current as any).querySelector("input").focus();
128 | }
129 | else {
130 | throw new Error(`Error! status: ${rnt.message}`);
131 | }
132 | } catch (error) {
133 | throw new Error(`Error! status: ${error}`);
134 | }
135 | }
136 |
137 | const createToken = async () => {
138 | try {
139 | var _roomnum = roomnum;
140 | if (roomnum1) {
141 | if (!(roomnum1 === "")) {
142 | _roomnum = roomnum1;
143 | }
144 | }
145 | const response = await fetch('/api/aiit/meeting', {
146 | method: 'POST',
147 | body: JSON.stringify({
148 | params: 'create-token --room ' + _roomnum + ' --join -i ' + username + ' -p 0000'
149 | }),
150 | headers: {
151 | 'Content-Type': 'application/json',
152 | Accept: 'application/json',
153 | 'Authorization': 'Basic ' + authtoken
154 | },
155 | });
156 | if (!response.ok) {
157 | throw new Error(`Error! status: ${response.status}`);
158 | }
159 | const rnt = (await response.json());
160 | if (rnt.code === "0") {
161 | return rnt.result.token;
162 | }
163 | else {
164 | throw new Error(`Error! status: ${rnt.message}`);
165 | }
166 | } catch (error) {
167 | throw new Error(`Error! status: ${error}`);
168 | }
169 | };
170 |
171 | const connectToRoom = async () => {
172 | var url = "wss://classnotfound.com.cn/wss";
173 | createToken().then((token: any) => {
174 | const params: { [key: string]: string } = {
175 | url,
176 | token,
177 | videoEnabled: videoEnabled ? '1' : '0',
178 | audioEnabled: audioEnabled ? '1' : '0',
179 | simulcast: '1',
180 | dynacast: '1',
181 | adaptiveStream: adaptiveStream ? '1' : '0',
182 | };
183 | navigate({
184 | pathname: '/room',
185 | search: '?' + new URLSearchParams(params).toString(),
186 | });
187 | }).catch((e) => {
188 | alert(e);
189 | });
190 |
191 | };
192 |
193 | async function toggleAvatar() {
194 | if (avatarStatus === "Test avatar") {
195 | handleClickOpen();
196 | }else{
197 | webrtccli.stopRecording();
198 | setAvatarStatus("Test avatar");
199 | }
200 | }
201 |
202 | return (
203 |
204 |
205 | Aiit meeting with avatar
206 |
207 |
208 |
Room Number
209 |
210 |
211 |
212 |
214 |
215 |
216 | } />
217 |
218 |
219 |
220 | } size="small" onClick={toggleAvatar}>{avatarStatus}
221 | } size="small" onClick={toggleNewRoom}>new room
222 | } size="small" onClick={connectToRoom} disabled={connectDisabled}>Join meeting
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 | : } size="small" onClick={toggleAudio}>{mutebuttonText}
231 | : } size="small" onClick={toggleVedio}>{vediobuttonText}
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
245 |
246 |
247 |
255 |
256 | );
257 | };
258 |
--------------------------------------------------------------------------------
/webmeeting/src/RoomPage.tsx:
--------------------------------------------------------------------------------
1 | import { faSquare, faThLarge, faUserFriends } from '@fortawesome/free-solid-svg-icons';
2 | import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
3 | import { DataPacket_Kind, Room, RoomEvent, setLogLevel, VideoPresets } from 'livekit-client';
4 | import { DisplayContext, DisplayOptions, LiveKitRoom, ParticipantProps, StageProps } from '@livekit/react-components';
5 | import { useRef, useState } from 'react';
6 | import 'react-aspect-ratio/aspect-ratio.css';
7 | import { useNavigate, useLocation } from 'react-router-dom';
8 | import { ParticipantView } from './meeting/ParticipantView';
9 | import { StageView } from './meeting/StageView';
10 | import { ControlsProps, ControlsView } from './meeting/ControlsView';
11 | import SelectAvatarDialog from './SelectAvatar';
12 | import { WebrtcCli } from './webrtc/WebrtcCli';
13 | import Chat, { Bubble, useMessages } from '@chatui/core';
14 | import '@chatui/core/dist/index.css';
15 |
16 | export const RoomPage = () => {
17 | const [numParticipants, setNumParticipants] = useState(0);
18 | const [selectedValue, setSelectedValue] = useState("0");
19 | const [open, setOpen] = useState(false);
20 | const [displayOptions, setDisplayOptions] = useState({
21 | stageLayout: 'grid',
22 | showStats: false,
23 | });
24 | const navigate = useNavigate();
25 | const query = new URLSearchParams(useLocation().search);
26 | const url = query.get('url');
27 | const token = query.get('token');
28 | const recorder = query.get('recorder');
29 | const [roomname, setRoomname] = useState('');
30 | const [userid, setUserid] = useState('');
31 | const videoRefLocal = useRef();
32 | const videoRefRemote = useRef();
33 | const [webrtccli, setWebrtccli] = useState();
34 | const [avatarStatus, setAvatarStatus] = useState("Avatar");
35 | const encoder = new TextEncoder()
36 | const decoder = new TextDecoder()
37 | const { messages, appendMsg } = useMessages([]);
38 |
39 | if (!url || !token) {
40 | return url and token are required
;
41 | }
42 |
43 | const onLeave = () => {
44 | navigate('/');
45 | };
46 |
47 | const onAvatar = () => {
48 | if (avatarStatus === "Avatar") {
49 | handleClickOpen();
50 | } else {
51 | webrtccli.stopRecording();
52 | setAvatarStatus("Avatar");
53 | }
54 | };
55 |
56 | const handleClickOpen = () => {
57 | setOpen(true);
58 | };
59 |
60 | const handleClose = (value: string) => {
61 | if (avatarStatus === "Avatar") {
62 | setOpen(false);
63 | setSelectedValue(value);
64 | if (value === "0") {
65 | return;
66 | }
67 | setAvatarStatus("Stop avatar");
68 | let _webrtccli = new WebrtcCli({
69 | source: videoRefLocal.current,
70 | destination: videoRefRemote.current,
71 | debug: false,
72 | audio: false,
73 | video: true,
74 | avator: value + '|1|' + roomname + '|' + userid
75 | });
76 | setWebrtccli(_webrtccli);
77 | _webrtccli.startRecording();
78 | };
79 | };
80 |
81 | const updateParticipantSize = (room: Room) => {
82 | setNumParticipants(room.participants.size + 1);
83 | setUserid(room.localParticipant.name ? room.localParticipant.name : "");
84 | setRoomname(room.name);
85 | };
86 |
87 | const onParticipantDisconnected = (room: Room) => {
88 | updateParticipantSize(room);
89 |
90 | /* Special rule for recorder */
91 | if (recorder && parseInt(recorder, 10) === 1 && room.participants.size === 0) {
92 | console.log('END_RECORDING');
93 | }
94 | };
95 |
96 | const updateOptions = (options: DisplayOptions) => {
97 | setDisplayOptions({
98 | ...displayOptions,
99 | ...options,
100 | });
101 | };
102 |
103 | const participantRenderer = (props: ParticipantProps) => {
104 | return ParticipantView(props);
105 | };
106 |
107 | const stageRenderer = (props: StageProps) => {
108 | return StageView(props);
109 | }
110 |
111 | const controlRenderer = (props: ControlsProps) => {
112 | return
113 | }
114 |
115 | function handleRecieveMessage(payload: any) {
116 | const strData = decoder.decode(payload);
117 | var recieveData = JSON.parse(strData);
118 | appendMsg({
119 | type: 'text',
120 | content: { text: "(" + recieveData.user + ")" + recieveData.data },
121 | position: 'left',
122 | user: { avatar: '//gitclone.com/download1/user1.png' }
123 | });
124 | }
125 |
126 | function handleSendMessage(type: any, val: any) {
127 | if (type === 'text' && val.trim()) {
128 | const strData = JSON.stringify({ "user": userid, "data": val })
129 | const senddata = encoder.encode(strData);
130 | (window as any).currentRoom.localParticipant.publishData(senddata, DataPacket_Kind.LOSSY)
131 | appendMsg({
132 | type: 'text',
133 | content: { text: val },
134 | position: 'right',
135 | user: { avatar: '//gitclone.com/download1/user.png' }
136 | });
137 | }
138 | }
139 |
140 | function renderMessageContent(msg: any) {
141 | const { content } = msg;
142 | return ;
143 | }
144 |
145 | return (
146 |
147 |
148 |
149 |
150 |
151 |
152 | updateOptions({ showStats: e.target.checked })}
156 | />
157 |
158 |
159 |
160 |
169 |
178 |
179 |
180 |
181 | {numParticipants}
182 |
183 |
184 |
185 |
186 |
{
190 | setLogLevel('info');
191 | onConnected(room, query);
192 | room.on(RoomEvent.ParticipantConnected, () => updateParticipantSize(room));
193 | room.on(RoomEvent.ParticipantDisconnected, () => onParticipantDisconnected(room));
194 | room.on(RoomEvent.DataReceived, (payload: Uint8Array) => handleRecieveMessage(payload));
195 | updateParticipantSize(room);
196 | }}
197 | roomOptions={{
198 | adaptiveStream: isSet(query, 'adaptiveStream'),
199 | dynacast: isSet(query, 'dynacast'),
200 | videoCaptureDefaults: {
201 | resolution: VideoPresets.h720.resolution,
202 | },
203 | }}
204 | participantRenderer={(props: ParticipantProps) => {
205 | return participantRenderer(props);
206 | }}
207 | stageRenderer={(props: StageProps) => {
208 | return stageRenderer(props);
209 | }}
210 |
211 | controlRenderer={(props: ControlsProps) => {
212 | //props.onAvatar = on
213 | return controlRenderer(props);
214 | }}
215 | onLeave={onLeave}
216 | />
217 |
218 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
235 |
236 |
237 |
238 | );
239 |
240 | };
241 |
242 | async function onConnected(room: Room, query: URLSearchParams) {
243 | // make it easier to debug
244 | (window as any).currentRoom = room;
245 |
246 | if (isSet(query, 'audioEnabled')) {
247 | const audioDeviceId = query.get('audioDeviceId');
248 | if (audioDeviceId && room.options.audioCaptureDefaults) {
249 | room.options.audioCaptureDefaults.deviceId = audioDeviceId;
250 | }
251 | await room.localParticipant.setMicrophoneEnabled(true);
252 | }
253 |
254 | if (isSet(query, 'videoEnabled')) {
255 | const videoDeviceId = query.get('videoDeviceId');
256 | if (videoDeviceId && room.options.videoCaptureDefaults) {
257 | room.options.videoCaptureDefaults.deviceId = videoDeviceId;
258 | }
259 | await room.localParticipant.setCameraEnabled(true);
260 | }
261 | }
262 |
263 | function isSet(query: URLSearchParams, key: string): boolean {
264 | return query.get(key) === '1' || query.get(key) === 'true';
265 | }
266 |
--------------------------------------------------------------------------------
/webmeeting/src/SelectAvatar.tsx:
--------------------------------------------------------------------------------
1 | import DialogTitle from '@mui/material/DialogTitle';
2 | import Dialog from '@mui/material/Dialog';
3 | import DialogActions from '@mui/material/DialogActions';
4 | import Button from '@mui/material/Button';
5 | import ImageList from '@mui/material/ImageList';
6 | import ImageListItem from '@mui/material/ImageListItem';
7 |
8 | export interface SelectAvatarDialogProps {
9 | open: boolean;
10 | selectedValue: string;
11 | onClose: (value: string) => void;
12 | }
13 |
14 | export default function SelectAvatarDialog(props: SelectAvatarDialogProps) {
15 | const { onClose, selectedValue, open } = props;
16 |
17 | const handleClose = () => {
18 | onClose(selectedValue);
19 | };
20 |
21 | const handleListItemClick = (value: string) => {
22 | onClose(value);
23 | };
24 |
25 | const itemData = [
26 | {
27 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/1.png',
28 | title: '1',
29 | },
30 | {
31 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/2.png',
32 | title: '2',
33 | },
34 | {
35 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/3.png',
36 | title: '3',
37 | },
38 | {
39 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/4.png',
40 | title: '4',
41 | },
42 | {
43 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/5.png',
44 | title: '5',
45 | },
46 | {
47 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/6.png',
48 | title: '6',
49 | },
50 | {
51 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/7.png',
52 | title: '7',
53 | },
54 | {
55 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/8.png',
56 | title: '8',
57 | },
58 | {
59 | img: 'https://gitclone.com/aiit/avatarify-webrtc/avatars/9.png',
60 | title: '9',
61 | }
62 | ];
63 |
64 | return (
65 |
84 | );
85 | }
--------------------------------------------------------------------------------
/webmeeting/src/chatui-theme.css:
--------------------------------------------------------------------------------
1 | :root {
2 | font-size: 12px;
3 | line-height:11px ;
4 | }
5 | .ChatApp,
6 | .Bubble{
7 | max-width: 100vw;
8 | }
9 | .MessageContainer,
10 | .Navbar,
11 | .Message .Bubble,
12 | .QuickReplies,
13 | .ChatFooter {
14 | background-repeat: no-repeat;
15 | background-size: cover;
16 | }
17 |
--------------------------------------------------------------------------------
/webmeeting/src/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | padding: 0;
3 | font-family: Avenir, -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu',
4 | 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
5 | -webkit-font-smoothing: antialiased;
6 | -moz-osx-font-smoothing: grayscale;
7 | background: white;
8 | color: black;
9 | margin: 0;
10 | /* overflow: hidden; */
11 | height: 100vh;
12 | font-size: 12px;
13 | }
14 |
15 | code {
16 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace;
17 | }
18 |
19 | a {
20 | color: #5a8bff;
21 | }
22 |
23 | a:hover {
24 | opacity: 0.8;
25 | }
26 |
27 | .container {
28 | box-sizing: border-box;
29 | height: 100vh;
30 | padding: 15px;
31 | }
32 |
33 | .prejoin {
34 | display: grid;
35 | align-items: center;
36 | justify-items: center;
37 | }
38 |
39 | .prejoin main {
40 | width: 750px;
41 | }
42 |
43 | .prejoin h2 {
44 | text-align: center;
45 | margin-top: 30px;
46 | }
47 |
48 | .prejoin hr {
49 | border: 1px solid rgba(255, 255, 255, 0.1);
50 | }
51 |
52 | .prejoin footer {
53 | font-size: 14px;
54 | margin-top: 50px;
55 | margin-bottom: 40px;
56 | }
57 |
58 | .entrySection {
59 | display: grid;
60 | grid-template-columns: 1fr;
61 | margin-top: 40px;
62 | gap: 0.8rem;
63 | }
64 |
65 | @media only screen and (max-width: 800px) {
66 | .container {
67 | padding: 8px;
68 | }
69 |
70 | .entrySection {
71 | grid-template-columns: 1fr;
72 | margin-top: 20px;
73 | gap: 20px;
74 | }
75 |
76 | .prejoin main {
77 | width: 100%;
78 | }
79 | }
80 |
81 | .entrySection .label {
82 | font-weight: 900;
83 | font-size: 16px;
84 | margin-bottom: 10px;
85 | }
86 |
87 | .entrySection input {
88 | border: 1px solid rgba(255, 255, 255, 0.2);
89 | border-radius: 4px;
90 | font-size: 16px;
91 | background-color: transparent;
92 | color: black;
93 | padding: 8px;
94 | }
95 |
96 | .entrySection input[type='text'] {
97 | width: calc(100% - 16px);
98 | }
99 |
100 | .entrySection div {
101 | vertical-align: middle;
102 | text-align: center;
103 | margin-bottom: 10px;
104 | }
105 |
106 | .videoSection {
107 | width: 100%;
108 | aspect-ratio: 16 / 9;
109 | margin-top: 20px;
110 | }
111 |
112 | .videoSection video {
113 | width: 100%;
114 | height: 100%;
115 | border-radius: 4px;
116 | }
117 |
118 | .videoSection .placeholder {
119 | width: 100%;
120 | height: 100%;
121 | border-radius: 4px;
122 | background: #2f2f2f;
123 | }
124 |
125 | .controlSection {
126 | margin-top: 30px;
127 | display: grid;
128 | grid-template-columns: auto;
129 | }
130 |
131 | .controlSection div {
132 | text-align: center;
133 | }
134 |
135 | .controlSection .right {
136 | grid-column: 3 / 4;
137 | }
138 |
139 | .roomContainer {
140 | height: calc(100vh - 38px);
141 | }
142 |
143 | .topBar {
144 | display: grid;
145 | grid-template-columns: auto auto;
146 | justify-content: space-between;
147 | align-items: center;
148 | }
149 |
150 | .topBar .right {
151 | display: grid;
152 | grid-template-columns: auto auto auto auto;
153 | align-items: center;
154 | gap: 16px;
155 | }
156 |
157 | .topBar .label {
158 | font-size: 14px;
159 | font-family: Arial, Helvetica, sans-serif;
160 | font-weight: bold;
161 | color: blue ;
162 | }
163 |
164 | .roomPanal {
165 | display: flex;
166 | height: calc(100vh - 38px);
167 | }
168 |
169 | .roomPanal .roomVedio {
170 | width: 240px;
171 | border: 1px solid lightgray;
172 | margin-top: 3px;
173 | border-radius: 4px;
174 | height: calc(100vh - 140px);
175 | }
176 |
177 | .roomPanal .roomVedio .vedio {
178 | margin-left: 15px;
179 | }
180 |
181 | .iconButton {
182 | cursor: pointer;
183 | }
184 |
185 | .iconButton:disabled {
186 | cursor: auto;
187 | }
188 |
189 | .participantCount {
190 | font-size: 14px;
191 | }
192 |
193 | .participantCount span {
194 | display: inline-block;
195 | margin-left: 5px;
196 | }
197 |
198 | .options {
199 | display: grid;
200 | grid-template-columns: repeat(2, fit-content(120px)) auto;
201 | gap: 8px;
202 | }
203 |
204 | .options label {
205 | text-align: center;
206 | margin-left: 0.2rem;
207 | font-size: 0.9rem;
208 | }
209 |
--------------------------------------------------------------------------------
/webmeeting/src/index.tsx:
--------------------------------------------------------------------------------
1 | import { createRoot } from 'react-dom/client';
2 | import App from './App';
3 | import './index.css';
4 |
5 | const root = createRoot(document.getElementById('root') as Element);
6 | root.render();
7 |
--------------------------------------------------------------------------------
/webmeeting/src/meeting/AudioSelectButton.tsx:
--------------------------------------------------------------------------------
1 | import { faMicrophone, faMicrophoneSlash } from '@fortawesome/free-solid-svg-icons';
2 | import { Room } from 'livekit-client';
3 | import { useCallback, useEffect, useState } from 'react';
4 | import { ControlButton, MenuItem } from './ControlButton';
5 |
6 | export interface AudioSelectButtonProps {
7 | isMuted: boolean;
8 | onClick?: () => void;
9 | onSourceSelected?: (device: MediaDeviceInfo) => void;
10 | isButtonDisabled?: boolean;
11 | muteText?: string;
12 | unmuteText?: string;
13 | className?: string;
14 | popoverContainerClassName?: string;
15 | popoverTriggerBtnClassName?: string;
16 | popoverTriggerBtnSeparatorClassName?: string;
17 | }
18 |
19 | export const AudioSelectButton = ({
20 | isMuted,
21 | onClick,
22 | onSourceSelected,
23 | isButtonDisabled,
24 | muteText = 'Mute',
25 | unmuteText = 'Unmute',
26 | className,
27 | popoverContainerClassName,
28 | popoverTriggerBtnClassName,
29 | popoverTriggerBtnSeparatorClassName,
30 | }: AudioSelectButtonProps) => {
31 | const [sources, setSources] = useState([]);
32 | const [menuItems, setMenuItems] = useState